id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27,100 | cmd_show_metadata.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_show_metadata.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import os
import sys
from calibre import prints
from calibre.ebooks.metadata.opf2 import OPFCreator
readonly = True
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, book_id):
with db.safe_read_lock:
if not db.has_id(book_id):
return
return db.get_metadata(book_id)
def option_parser(get_parser, args):
parser = get_parser(
_(
'''
%prog show_metadata [options] id
Show the metadata stored in the calibre database for the book identified by id.
id is an id number from the search command.
'''
)
)
parser.add_option(
'--as-opf',
default=False,
action='store_true',
help=_('Print metadata in OPF form (XML)')
)
return parser
def main(opts, args, dbctx):
if len(args) < 1:
raise SystemExit(_('You must specify an id'))
book_id = int(args[0])
mi = dbctx.run('show_metadata', book_id)
if mi is None:
raise SystemExit(f'Id #{book_id} is not present in database.')
if opts.as_opf:
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
mi = OPFCreator(os.getcwd(), mi)
mi.render(stdout)
else:
prints(str(mi))
return 0
| 1,360 | Python | .py | 44 | 25.363636 | 79 | 0.650307 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,101 | cmd_saved_searches.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_saved_searches.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
readonly = False
version = 0 # change this if you change signature of implementation()
from calibre import prints
from calibre.srv.changes import saved_searches
from polyglot.builtins import iteritems
def implementation(db, notify_changes, action, *args):
if action == 'list':
with db.safe_read_lock:
names = db.saved_search_names()
return {n: db.saved_search_lookup(n) for n in names}
if action == 'add':
name, val = args
db.saved_search_add(name, val)
if notify_changes is not None:
notify_changes(saved_searches(added=(name,)))
return
if action == 'remove':
name = args[0]
db.saved_search_delete(name)
if notify_changes is not None:
notify_changes(saved_searches(removed=(name,)))
return
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog saved_searches [options] (list|add|remove)
Manage the saved searches stored in this database.
If you try to add a query with a name that already exists, it will be
replaced.
Syntax for adding:
%prog saved_searches add search_name search_expression
Syntax for removing:
%prog saved_searches remove search_name
'''
)
)
return parser
def main(opts, args, dbctx):
args = args or ['list']
if args[0] == 'list':
for name, value in iteritems(dbctx.run('saved_searches', 'list')):
prints(_('Name:'), name)
prints(_('Search string:'), value)
print()
elif args[0] == 'add':
if len(args) < 3:
raise SystemExit(_('Error: You must specify a name and a search string'))
dbctx.run('saved_searches', 'add', args[1], args[2])
prints(args[1], _('added'))
elif args[0] == 'remove':
if len(args) < 2:
raise SystemExit(_('Error: You must specify a name'))
dbctx.run('saved_searches', 'remove', args[1])
prints(args[1], _('removed'))
else:
raise SystemExit(
_(
'Error: Action %s not recognized, must be one '
'of: (add|remove|list)'
) % args[0]
)
return 0
| 2,295 | Python | .py | 65 | 28.107692 | 85 | 0.610835 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,102 | cmd_embed_metadata.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_embed_metadata.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from calibre import prints
from calibre.db.cli import integers_from_string
from calibre.srv.changes import formats_added
readonly = False
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, book_id, only_fmts):
if book_id is None:
return db.all_book_ids()
with db.write_lock:
if db.has_id(book_id):
db.embed_metadata((book_id,), only_fmts=only_fmts)
if notify_changes is not None:
notify_changes(formats_added({book_id: db.formats(book_id)}))
return db.field_for('title', book_id)
def option_parser(get_parser, args):
parser = get_parser(_(
'''
%prog embed_metadata [options] book_id
Update the metadata in the actual book files stored in the calibre library from
the metadata in the calibre database. Normally, metadata is updated only when
exporting files from calibre, this command is useful if you want the files to
be updated in place. Note that different file formats support different amounts
of metadata. You can use the special value 'all' for book_id to update metadata
in all books. You can also specify many book ids separated by spaces and id ranges
separated by hyphens. For example: %prog embed_metadata 1 2 10-15 23'''))
parser.add_option('-f', '--only-formats', action='append', default=[], help=_(
'Only update metadata in files of the specified format. Specify it multiple'
' times for multiple formats. By default, all formats are updated.'))
return parser
def main(opts, args, dbctx):
ids = set()
for arg in args:
if arg == 'all':
ids = None
break
ids |= set(integers_from_string(arg))
only_fmts = opts.only_formats or None
if ids is None:
ids = dbctx.run('embed_metadata', None, None)
def progress(i, title):
prints(_('Processed {0} ({1} of {2})').format(title, i, len(ids)))
for i, book_id in enumerate(ids):
title = dbctx.run('embed_metadata', book_id, only_fmts)
progress(i+1, title or _('No book with id: {}').format(book_id))
return 0
| 2,232 | Python | .py | 47 | 41.87234 | 84 | 0.688306 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,103 | cmd_search.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_search.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from calibre import prints
readonly = True
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, query):
from calibre.utils.search_query_parser import ParseException
try:
return db.search(query)
except ParseException as err:
e = ValueError(_('Failed to parse search query: ({0}) with error: {1}').format(query, err))
e.suppress_traceback = True
raise e from err
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog search [options] search expression
Search the library for the specified search term, returning a comma separated
list of book ids matching the search expression. The output format is useful
to feed into other commands that accept a list of ids as input.
The search expression can be anything from calibre's powerful search query
language, for example: %prog search {0}
'''
).format('author:asimov \'title:"i robot"\'')
)
parser.add_option(
'-l',
'--limit',
default=-1,
type=int,
help=_('The maximum number of results to return. Default is all results.')
)
return parser
def main(opts, args, dbctx):
if len(args) < 1:
raise SystemExit(_('Error: You must specify the search expression'))
q = ' '.join(args)
try:
ids = dbctx.run('search', q)
except Exception as e:
if getattr(e, 'suppress_traceback', False):
raise SystemExit(str(e))
raise
if not ids:
raise SystemExit(_('No books matching the search expression:') + ' ' + q)
ids = sorted(ids)
if opts.limit > -1:
ids = ids[:opts.limit]
prints(','.join(map(str, ids)), end='')
return 0
| 1,856 | Python | .py | 51 | 30.705882 | 99 | 0.65942 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,104 | cmd_catalog.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_catalog.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import os
from calibre.customize.ui import available_catalog_formats, plugin_for_catalog_format
from calibre.db.cli import integers_from_string
readonly = True
version = 0 # change this if you change signature of implementation()
needs_srv_ctx = True
no_remote = True
def implementation(db, notify_changes, ctx):
raise NotImplementedError()
def option_parser(get_parser, args): # {{{
def add_plugin_parser_options(fmt, parser):
# Fetch the extension-specific CLI options from the plugin
# library.catalogs.<format>.py
plugin = plugin_for_catalog_format(fmt)
p = parser.add_option_group(_('{} OPTIONS').format(fmt.upper()))
for option in plugin.cli_options:
if option.action:
p.add_option(
option.option,
default=option.default,
dest=option.dest,
action=option.action,
help=option.help
)
else:
p.add_option(
option.option,
default=option.default,
dest=option.dest,
help=option.help
)
# Entry point
parser = get_parser(
_(
'''\
%prog catalog /path/to/destination.(csv|epub|mobi|xml...) [options]
Export a catalog in format specified by path/to/destination extension.
Options control how entries are displayed in the generated catalog output.
Note that different catalog formats support different sets of options. To
see the different options, specify the name of the output file and then the
{} option.
'''.format('--help')
)
)
# Add options common to all catalog plugins
parser.add_option(
'-i',
'--ids',
default=None,
dest='ids',
help=_(
"Comma-separated list of database IDs to catalog.\n"
"If declared, --search is ignored.\n"
"Default: all"
)
)
parser.add_option(
'-s',
'--search',
default=None,
dest='search_text',
help=_(
"Filter the results by the search query. "
"For the format of the search query, please see "
"the search-related documentation in the User Manual.\n"
"Default: no filtering"
)
)
parser.add_option(
'-v',
'--verbose',
default=False,
action='store_true',
dest='verbose',
help=_('Show detailed output information. Useful for debugging')
)
fmt = 'epub'
if args and '.' in args[0]:
fmt = args[0].rpartition('.')[-1].lower()
if fmt not in available_catalog_formats():
fmt = 'epub'
# Add options specific to fmt plugin
add_plugin_parser_options(fmt, parser)
return parser
# }}}
def main(opts, args, dbctx):
if len(args) < 1:
raise SystemExit(_('You must specify a catalog output file'))
if opts.ids:
opts.ids = list(integers_from_string(opts.ids))
fmt = args[0].rpartition('.')[-1].lower()
if fmt not in available_catalog_formats():
raise SystemExit(
_('Cannot generate a catalog in the {} format').format(fmt.upper())
)
# No support for connected device in CLI environment
# Parallel initialization in calibre.gui2.tools:generate_catalog()
opts.connected_device = {
'is_device_connected': False,
'kind': None,
'name': None,
'save_template': None,
'serial': None,
'storage': None,
}
dest = os.path.abspath(os.path.expanduser(args[0]))
plugin = plugin_for_catalog_format(fmt)
with plugin:
plugin.run(dest, opts, dbctx.db)
return 0
| 3,872 | Python | .py | 112 | 26.1875 | 85 | 0.597968 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,105 | cmd_remove.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_remove.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from calibre.constants import trash_name
from calibre.db.cli import integers_from_string
from calibre.srv.changes import books_deleted
readonly = False
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, ids, permanent):
db.remove_books(ids, permanent=permanent)
if notify_changes is not None:
notify_changes(books_deleted(ids))
def option_parser(get_parser, args):
p = get_parser(
_(
'''\
%prog remove ids
Remove the books identified by ids from the database. ids should be a comma separated \
list of id numbers (you can get id numbers by using the search command). For example, \
23,34,57-85 (when specifying a range, the last number in the range is not included).
'''
)
)
p.add_option(
'--permanent',
default=False,
action='store_true',
help=_('Do not use the {}').format(trash_name())
)
return p
def main(opts, args, dbctx):
if len(args) < 1:
raise SystemExit(_('You must specify at least one book to remove'))
ids = set()
for arg in args:
ids |= set(integers_from_string(arg))
dbctx.run('remove', ids, opts.permanent)
return 0
| 1,330 | Python | .py | 37 | 31.027027 | 87 | 0.685938 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,106 | cmd_export.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_export.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import os
from calibre.db.cli import integers_from_string
from calibre.db.constants import DATA_FILE_PATTERN
from calibre.db.errors import NoSuchFormat
from calibre.library.save_to_disk import config, do_save_book_to_disk, get_formats, sanitize_args
from calibre.utils.formatter_functions import load_user_template_functions
readonly = True
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, action, *args):
is_remote = notify_changes is not None
if action == 'all_ids':
return db.all_book_ids()
if action == 'setup':
book_id, formats = args
if not db.has_id(book_id):
raise KeyError(f'No book with id {book_id} present')
mi = db.get_metadata(book_id)
plugboards = db.pref('plugboards', {})
formats = get_formats(db.formats(book_id), formats)
extra_files_for_export = tuple(ef.relpath for ef in db.list_extra_files(book_id, pattern=DATA_FILE_PATTERN))
plugboards['extra_files_for_export'] = extra_files_for_export
return mi, plugboards, formats, db.library_id, db.pref(
'user_template_functions', []
)
if action == 'cover':
return db.cover(args[0])
if action == 'fmt':
book_id, fmt, dest = args
if is_remote:
return db.format(book_id, fmt)
db.copy_format_to(book_id, fmt, dest)
if action == 'extra_file':
book_id, relpath, dest = args
if is_remote:
from io import BytesIO
output = BytesIO()
db.copy_extra_file_to(book_id, relpath, output)
return output.getvalue()
db.copy_extra_file_to(book_id, relpath, dest)
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog export [options] ids
Export the books specified by ids (a comma separated list) to the filesystem.
The export operation saves all formats of the book, its cover and metadata (in
an OPF file). Any extra data files associated with the book are also saved.
You can get id numbers from the search command.
'''
)
)
parser.add_option(
'--all',
default=False,
action='store_true',
help=_('Export all books in database, ignoring the list of ids.')
)
parser.add_option(
'--to-dir',
default='.',
help=(
_('Export books to the specified folder. Default is') + ' %default'
)
)
parser.add_option(
'--single-dir',
default=False,
action='store_true',
help=_('Export all books into a single folder')
)
parser.add_option(
'--progress',
default=False,
action='store_true',
help=_('Report progress')
)
c = config()
for pref in ['asciiize', 'update_metadata', 'write_opf', 'save_cover', 'save_extra_files']:
opt = c.get_option(pref)
switch = '--dont-' + pref.replace('_', '-')
parser.add_option(
switch,
default=True,
action='store_false',
help=opt.help + ' ' +
_('Specifying this switch will turn '
'this behavior off.'),
dest=pref
)
for pref in ['timefmt', 'template', 'formats']:
opt = c.get_option(pref)
switch = '--' + pref
parser.add_option(switch, default=opt.default, help=opt.help, dest=pref)
for pref in ('replace_whitespace', 'to_lowercase'):
opt = c.get_option(pref)
switch = '--' + pref.replace('_', '-')
parser.add_option(switch, default=False, action='store_true', help=opt.help)
return parser
class DBProxy:
# Proxy to allow do_save_book_to_disk() to work with remote database
def __init__(self, dbctx):
self.dbctx = dbctx
def cover(self, book_id):
return self.dbctx.run('export', 'cover', book_id)
def copy_format_to(self, book_id, fmt, path):
fdata = self.dbctx.run('export', 'fmt', book_id, fmt, path)
if self.dbctx.is_remote:
if fdata is None:
raise NoSuchFormat(fmt)
with open(path, 'wb') as f:
f.write(fdata)
def copy_extra_file_to(self, book_id, relpath, path):
fdata = self.dbctx.run('export', 'extra_file', book_id, relpath, path)
if self.dbctx.is_remote:
if fdata is None:
raise FileNotFoundError(relpath)
with open(path, 'wb') as f:
f.write(fdata)
def export(opts, dbctx, book_id, dest, dbproxy, length, first):
mi, plugboards, formats, library_id, template_funcs = dbctx.run(
'export', 'setup', book_id, opts.formats
)
extra_files = plugboards.pop('extra_files_for_export', ())
if dbctx.is_remote and first:
load_user_template_functions(library_id, template_funcs)
return do_save_book_to_disk(
dbproxy, book_id, mi, plugboards, formats, dest, opts, length, extra_files
)
def main(opts, args, dbctx):
if len(args) < 1 and not opts.all:
raise SystemExit(_('You must specify some ids or the %s option') % '--all')
if opts.all:
book_ids = dbctx.run('export', 'all_ids')
else:
book_ids = set()
for arg in args:
book_ids |= set(integers_from_string(arg))
dest = os.path.abspath(os.path.expanduser(opts.to_dir))
dbproxy = DBProxy(dbctx)
dest, opts, length = sanitize_args(dest, opts)
total = len(book_ids)
for i, book_id in enumerate(book_ids):
export(opts, dbctx, book_id, dest, dbproxy, length, i == 0)
if opts.progress:
num = i + 1
print(f'\r {num / total:.0%} [{num}/{total}]', end=' '*20)
if opts.progress:
print()
return 0
| 5,887 | Python | .py | 151 | 31.225166 | 116 | 0.608962 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,107 | cmd_add.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_add.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import os
import sys
from contextlib import contextmanager
from optparse import OptionGroup, OptionValueError
from calibre import prints
from calibre.db.adding import cdb_find_in_dir, cdb_recursive_find, compile_rule, create_format_map, run_import_plugins, run_import_plugins_before_metadata
from calibre.db.utils import find_identical_books
from calibre.ebooks.metadata import MetaInformation, string_to_authors
from calibre.ebooks.metadata.book.serialize import read_cover, serialize_cover
from calibre.ebooks.metadata.meta import get_metadata, metadata_from_formats
from calibre.ptempfile import TemporaryDirectory
from calibre.srv.changes import books_added, formats_added
from calibre.utils.localization import canonicalize_lang
from calibre.utils.short_uuid import uuid4
readonly = False
version = 0 # change this if you change signature of implementation()
def empty(db, notify_changes, is_remote, args):
mi = args[0]
ids, duplicates = db.add_books([(mi, {})])
if is_remote:
notify_changes(books_added(ids))
db.dump_metadata()
return ids, bool(duplicates)
def cached_identical_book_data(db, request_id):
key = db.library_id, request_id
if getattr(cached_identical_book_data, 'key', None) != key:
cached_identical_book_data.key = key
cached_identical_book_data.ans = db.data_for_find_identical_books()
return cached_identical_book_data.ans
def do_adding(db, request_id, notify_changes, is_remote, mi, format_map, add_duplicates, oautomerge):
identical_book_list, added_ids, updated_ids = set(), set(), set()
duplicates = []
identical_books_data = None
def add_format(book_id, fmt):
db.add_format(book_id, fmt, format_map[fmt], replace=True, run_hooks=False)
updated_ids.add(book_id)
def add_book():
nonlocal added_ids
added_ids_, duplicates_ = db.add_books(
[(mi, format_map)], add_duplicates=True, run_hooks=False)
added_ids |= set(added_ids_)
duplicates.extend(duplicates_)
if oautomerge != 'disabled' or not add_duplicates:
identical_books_data = cached_identical_book_data(db, request_id)
identical_book_list = find_identical_books(mi, identical_books_data)
if oautomerge != 'disabled':
if identical_book_list:
needs_add = False
duplicated_formats = set()
for book_id in identical_book_list:
book_formats = {q.upper() for q in db.formats(book_id)}
input_formats = {q.upper():q for q in format_map}
common_formats = book_formats & set(input_formats)
if not common_formats:
for x in input_formats:
add_format(book_id, input_formats[x])
else:
new_formats = set(input_formats) - book_formats
if new_formats:
for x in new_formats:
add_format(book_id, input_formats[x])
if oautomerge == 'overwrite':
for x in common_formats:
add_format(book_id, input_formats[x])
elif oautomerge == 'ignore':
for x in common_formats:
duplicated_formats.add(input_formats[x])
elif oautomerge == 'new_record':
needs_add = True
if needs_add:
add_book()
if duplicated_formats:
duplicates.append((mi, {x: format_map[x] for x in duplicated_formats}))
else:
add_book()
else:
if identical_book_list:
duplicates.append((mi, format_map))
else:
add_book()
if added_ids and identical_books_data is not None:
for book_id in added_ids:
db.update_data_for_find_identical_books(book_id, identical_books_data)
if is_remote:
notify_changes(books_added(added_ids))
if updated_ids:
notify_changes(formats_added({book_id: tuple(format_map) for book_id in updated_ids}))
db.dump_metadata()
return added_ids, updated_ids, duplicates
def book(db, notify_changes, is_remote, args):
data, fname, fmt, add_duplicates, otitle, oauthors, oisbn, otags, oseries, oseries_index, ocover, oidentifiers, olanguages, oautomerge, request_id = args
with add_ctx(), TemporaryDirectory('add-single') as tdir, run_import_plugins_before_metadata(tdir):
if is_remote:
with open(os.path.join(tdir, fname), 'wb') as f:
f.write(data[1])
path = f.name
else:
path = data
path = run_import_plugins([path])[0]
fmt = os.path.splitext(path)[1]
fmt = (fmt[1:] if fmt else None) or 'unknown'
with open(path, 'rb') as stream:
mi = get_metadata(stream, stream_type=fmt, use_libprs_metadata=True)
if not mi.title:
mi.title = os.path.splitext(os.path.basename(path))[0]
if not mi.authors:
mi.authors = [_('Unknown')]
if oidentifiers:
ids = mi.get_identifiers()
ids.update(oidentifiers)
mi.set_identifiers(ids)
for x in ('title', 'authors', 'isbn', 'tags', 'series', 'languages'):
val = locals()['o' + x]
if val:
setattr(mi, x, val)
if oseries:
mi.series_index = oseries_index
if ocover:
mi.cover = None
mi.cover_data = ocover
identical_book_list, added_ids, updated_ids = set(), set(), set()
duplicates = []
identical_books_data = None
added_ids, updated_ids, duplicates = do_adding(
db, request_id, notify_changes, is_remote, mi, {fmt: path}, add_duplicates, oautomerge)
return added_ids, updated_ids, bool(duplicates), mi.title
def format_group(db, notify_changes, is_remote, args):
formats, add_duplicates, oautomerge, request_id, cover_data = args
with add_ctx(), TemporaryDirectory('add-multiple') as tdir, run_import_plugins_before_metadata(tdir):
updated_ids = {}
if is_remote:
paths = []
for name, data in formats:
with open(os.path.join(tdir, os.path.basename(name)), 'wb') as f:
f.write(data)
paths.append(f.name)
else:
paths = list(formats)
paths = run_import_plugins(paths)
mi = metadata_from_formats(paths)
if mi.title is None:
return None, set(), set(), False
if cover_data and not mi.cover_data or not mi.cover_data[1]:
mi.cover_data = 'jpeg', cover_data
format_map = create_format_map(paths)
added_ids, updated_ids, duplicates = do_adding(
db, request_id, notify_changes, is_remote, mi, format_map, add_duplicates, oautomerge)
return mi.title, set(added_ids), set(updated_ids), bool(duplicates)
def implementation(db, notify_changes, action, *args):
is_remote = notify_changes is not None
func = globals()[action]
return func(db, notify_changes, is_remote, args)
def do_add_empty(
dbctx, title, authors, isbn, tags, series, series_index, cover, identifiers,
languages
):
mi = MetaInformation(None)
if title is not None:
mi.title = title
if authors:
mi.authors = authors
if identifiers:
mi.set_identifiers(identifiers)
if isbn:
mi.isbn = isbn
if tags:
mi.tags = tags
if series:
mi.series, mi.series_index = series, series_index
if cover:
mi.cover = cover
if languages:
mi.languages = languages
ids, duplicates = dbctx.run('add', 'empty', read_cover(mi))
prints(_('Added book ids: %s') % ','.join(map(str, ids)))
@contextmanager
def add_ctx():
orig = sys.stdout
yield
sys.stdout = orig
def do_add(
dbctx, paths, one_book_per_directory, recurse, add_duplicates, otitle, oauthors,
oisbn, otags, oseries, oseries_index, ocover, oidentifiers, olanguages,
compiled_rules, oautomerge
):
request_id = uuid4()
with add_ctx():
files, dirs = [], []
for path in paths:
path = os.path.abspath(path)
if os.path.isdir(path):
dirs.append(path)
else:
if os.path.exists(path):
files.append(path)
else:
prints(path, 'not found')
file_duplicates, added_ids, merged_ids = [], set(), set()
for book in files:
fmt = os.path.splitext(book)[1]
fmt = fmt[1:] if fmt else None
if not fmt:
continue
aids, mids, dups, book_title = dbctx.run(
'add', 'book', dbctx.path(book), os.path.basename(book), fmt, add_duplicates,
otitle, oauthors, oisbn, otags, oseries, oseries_index, serialize_cover(ocover) if ocover else None,
oidentifiers, olanguages, oautomerge, request_id
)
added_ids |= set(aids)
merged_ids |= set(mids)
if dups:
file_duplicates.append((book_title, book))
dir_dups = []
scanner = cdb_recursive_find if recurse else cdb_find_in_dir
for dpath in dirs:
for formats in scanner(dpath, one_book_per_directory, compiled_rules):
cover_data = None
for fmt in formats:
if fmt.lower().endswith('.opf'):
with open(fmt, 'rb') as f:
mi = get_metadata(f, stream_type='opf')
if mi.cover_data and mi.cover_data[1]:
cover_data = mi.cover_data[1]
elif mi.cover:
try:
with open(mi.cover, 'rb') as f:
cover_data = f.read()
except OSError:
pass
book_title, ids, mids, dups = dbctx.run(
'add', 'format_group', tuple(map(dbctx.path, formats)), add_duplicates, oautomerge, request_id, cover_data)
if book_title is not None:
added_ids |= set(ids)
merged_ids |= set(mids)
if dups:
dir_dups.append((book_title, formats))
sys.stdout = sys.__stdout__
if dir_dups or file_duplicates:
prints(
_(
'The following books were not added as '
'they already exist in the database '
'(see --duplicates option or --automerge option):'
),
file=sys.stderr
)
for title, formats in dir_dups:
prints(' ', title, file=sys.stderr)
for path in formats:
prints(' ', path)
if file_duplicates:
for title, path in file_duplicates:
prints(' ', title, file=sys.stderr)
prints(' ', path)
if added_ids:
prints(_('Added book ids: %s') % (', '.join(map(str, added_ids))))
if merged_ids:
prints(_('Merged book ids: %s') % (', '.join(map(str, merged_ids))))
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog add [options] file1 file2 file3 ...
Add the specified files as books to the database. You can also specify folders, see
the folder related options below.
'''
)
)
parser.add_option(
'-d',
'--duplicates',
action='store_true',
default=False,
help=_(
'Add books to database even if they already exist. Comparison is done based on book titles and authors.'
' Note that the {} option takes precedence.'
).format('--automerge')
)
parser.add_option(
'-m',
'--automerge',
type='choice',
choices=('disabled', 'ignore', 'overwrite', 'new_record'),
default='disabled',
help=_(
'If books with similar titles and authors are found, merge the incoming formats (files) automatically into'
' existing book records. A value of "ignore" means duplicate formats are discarded. A value of'
' "overwrite" means duplicate formats in the library are overwritten with the newly added files.'
' A value of "new_record" means duplicate formats are placed into a new book record.'
)
)
parser.add_option(
'-e',
'--empty',
action='store_true',
default=False,
help=_('Add an empty book (a book with no formats)')
)
parser.add_option(
'-t', '--title', default=None, help=_('Set the title of the added book(s)')
)
parser.add_option(
'-a',
'--authors',
default=None,
help=_('Set the authors of the added book(s)')
)
parser.add_option(
'-i', '--isbn', default=None, help=_('Set the ISBN of the added book(s)')
)
parser.add_option(
'-I',
'--identifier',
default=[],
action='append',
help=_('Set the identifiers for this book, e.g. -I asin:XXX -I isbn:YYY')
)
parser.add_option(
'-T', '--tags', default=None, help=_('Set the tags of the added book(s)')
)
parser.add_option(
'-s',
'--series',
default=None,
help=_('Set the series of the added book(s)')
)
parser.add_option(
'-S',
'--series-index',
default=1.0,
type=float,
help=_('Set the series number of the added book(s)')
)
parser.add_option(
'-c',
'--cover',
default=None,
help=_('Path to the cover to use for the added book')
)
parser.add_option(
'-l',
'--languages',
default=None,
help=_(
'A comma separated list of languages (best to use ISO639 language codes, though some language names may also be recognized)'
)
)
g = OptionGroup(
parser,
_('ADDING FROM FOLDERS'),
_(
'Options to control the adding of books from folders. By default only files that have extensions of known e-book file types are added.'
)
)
def filter_pat(option, opt, value, parser, action):
rule = {'match_type': 'glob', 'query': value, 'action': action}
try:
getattr(parser.values, option.dest).append(compile_rule(rule))
except Exception:
raise OptionValueError('%r is not a valid filename pattern' % value)
g.add_option(
'-1',
'--one-book-per-directory',
action='store_true',
default=False,
help=_(
'Assume that each folder has only a single logical book and that all files in it are different e-book formats of that book'
)
)
g.add_option(
'-r',
'--recurse',
action='store_true',
default=False,
help=_('Process folders recursively')
)
def fadd(opt, action, help):
g.add_option(
opt,
action='callback',
type='string',
nargs=1,
default=[],
callback=filter_pat,
dest='filters',
callback_args=(action, ),
metavar=_('GLOB PATTERN'),
help=help
)
fadd(
'--ignore', 'ignore',
_(
'A filename (glob) pattern, files matching this pattern will be ignored when scanning folders for files.'
' Can be specified multiple times for multiple patterns. For example: *.pdf will ignore all PDF files'
)
)
fadd(
'--add', 'add',
_(
'A filename (glob) pattern, files matching this pattern will be added when scanning folders for files,'
' even if they are not of a known e-book file type. Can be specified multiple times for multiple patterns.'
)
)
parser.add_option_group(g)
return parser
def main(opts, args, dbctx):
aut = string_to_authors(opts.authors) if opts.authors else []
tags = [x.strip() for x in opts.tags.split(',')] if opts.tags else []
lcodes = [canonicalize_lang(x) for x in (opts.languages or '').split(',')]
lcodes = [x for x in lcodes if x]
identifiers = (x.partition(':')[::2] for x in opts.identifier)
identifiers = {k.strip(): v.strip() for k, v in identifiers
if k.strip() and v.strip()}
if opts.empty:
do_add_empty(
dbctx, opts.title, aut, opts.isbn, tags, opts.series, opts.series_index,
opts.cover, identifiers, lcodes
)
return 0
if len(args) < 1:
raise SystemExit(_('You must specify at least one file to add'))
do_add(
dbctx, args, opts.one_book_per_directory, opts.recurse, opts.duplicates,
opts.title, aut, opts.isbn, tags, opts.series, opts.series_index, opts.cover,
identifiers, lcodes, opts.filters, opts.automerge
)
return 0
| 17,415 | Python | .py | 429 | 30.30303 | 157 | 0.574945 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,108 | cmd_fts_search.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_fts_search.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import re
readonly = True
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, query, adata):
rto = adata['restrict_to']
restrict_to = None
if not db.is_fts_enabled():
err = Exception(_('Full text searching is not enabled on this library. Use the calibredb fts_index enable --wait-until-complete command to enable it'))
err.suppress_traceback = True
raise err
l, t = db.fts_indexing_progress()[:2]
if l/t > (1 - adata['threshold']):
err = Exception(_('{0} files out of {1} are not yet indexed, searching is disabled').format(l, t))
err.suppress_traceback = True
raise err
if rto:
if isinstance(rto, str):
restrict_to = db.search(rto)
else:
restrict_to = set(rto)
metadata_cache = {}
include_snippets = adata['include_snippets']
def add_metadata(result):
result.pop('id', None)
if not include_snippets:
result.pop('text', None)
bid = result['book_id']
if bid not in metadata_cache:
with db.safe_read_lock:
metadata_cache[bid] = {'title': db._field_for('title', bid), 'authors': db._field_for('authors', bid)}
return result
from calibre.db import FTSQueryError
try:
return db.fts_search(
query, use_stemming=adata['use_stemming'], highlight_start=adata['start_marker'], highlight_end=adata['end_marker'],
return_text=include_snippets, restrict_to_book_ids=restrict_to, result_type=tuple if adata['as_tuple'] else lambda x: x,
process_each_result=add_metadata, snippet_size=64
), metadata_cache
except FTSQueryError as e:
e.suppress_traceback = True
raise e
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog fts_search [options] search expression
Do a full text search on the entire library or a subset of it.
'''
))
parser.add_option(
'--include-snippets',
default=False,
action='store_true',
help=_('Include snippets of the text surrounding each match. Note that this makes searching much slower.')
)
parser.add_option(
'--match-start-marker',
default='\x1b[31m',
help=_('The marker used to indicate the start of a matched word inside a snippet')
)
parser.add_option(
'--match-end-marker',
default='\x1b[m',
help=_('The marker used to indicate the end of a matched word inside a snippet')
)
parser.add_option(
'--do-not-match-on-related-words',
default=True,
dest='use_stemming',
action='store_false',
help=_('Only match on exact words not related words. So correction will not match correcting.')
)
parser.add_option(
'--restrict-to',
default='',
help=_('Restrict the searched books, either using a search expression or ids.'
' For example: ids:1,2,3 to restrict by ids or search:tag:foo to restrict to books having the tag foo.')
)
parser.add_option(
'--output-format', default='text', choices=('text', 'json'),
help=_('The format to output the search results in. Either "text" for plain text or "json" for JSON output.')
)
parser.add_option(
'--indexing-threshold', type=float, default=90.,
help=_('How much of the library must be indexed before searching is allowed, as a percentage. Defaults to 90')
)
return parser
def output_results_as_text(results, metadata_cache, include_snippets):
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.terminal import geometry
width = max(5, geometry()[0])
separator = '─' * width
if not include_snippets:
bids = {}
for result in results:
bids.setdefault(result['book_id'], []).append(result['format'])
for bid, fmts in bids.items():
m = metadata_cache[bid]
print(_('{0} by {1}').format(m['title'], authors_to_string(m['authors'])))
print(f'Book id: {bid} Formats: {", ".join(fmts)}')
print(separator)
return
current_text_q = ''
current_id = -1
current_formats = []
pat = re.compile(r'\s+')
def print_result():
m = metadata_cache[current_id]
print(_('{0} by {1}').format(m['title'], authors_to_string(m['authors'])))
print(f'Book id: {current_id} Formats: {", ".join(current_formats)}')
print(current_text_q)
print(separator)
for result in results:
textq = pat.sub(' ', result['text'])
if result['book_id'] == current_id and textq == current_text_q:
current_formats.append(result['format'])
else:
if current_id > -1:
print_result()
current_id, current_text_q, current_formats = result['book_id'], textq, [result['format']]
if current_id > -1:
print_result()
def main(opts, args, dbctx):
if len(args) < 1:
dbctx.option_parser.print_help()
raise SystemExit(_('Error: You must specify the search expression'))
search_expression = ' '.join(args)
restrict_to = ''
if opts.restrict_to:
q, v = opts.restrict_to.partition(':')[::2]
if q == 'ids':
restrict_to = tuple(set(map(int, v.split(','))))
elif q == 'search':
restrict_to = v
else:
raise SystemExit('The --restrict-to option must start with either ids: or search:')
from calibre.db import FTSQueryError
try:
results, metadata_cache = dbctx.run('fts_search', search_expression, {
'start_marker': opts.match_start_marker, 'end_marker': opts.match_end_marker, 'use_stemming': opts.use_stemming,
'include_snippets': opts.include_snippets, 'restrict_to': restrict_to, 'as_tuple': dbctx.is_remote,
'threshold': max(0, min(opts.indexing_threshold, 100)) / 100
})
if opts.output_format == 'json':
if not dbctx.is_remote:
results = tuple(results)
for r in results:
m = metadata_cache[r['book_id']]
r['title'], r['authors'] = m['title'], m['authors']
import json
print(json.dumps(results, sort_keys=True, indent=' '))
else:
output_results_as_text(results, metadata_cache, opts.include_snippets)
except FTSQueryError as e:
raise SystemExit(str(e))
except Exception as e:
if getattr(e, 'suppress_traceback', False):
raise SystemExit(str(e))
raise
return 0
| 6,809 | Python | .py | 162 | 33.753086 | 159 | 0.610533 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,109 | cmd_remove_custom_column.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_remove_custom_column.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from calibre import prints
from calibre.db.legacy import LibraryDatabase
readonly = False
version = 0 # change this if you change signature of implementation()
no_remote = True
def implementation(db, notify_changes, *args):
raise NotImplementedError()
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog remove_custom_column [options] label
Remove the custom column identified by label. You can see available
columns with the custom_columns command.
'''
)
)
parser.add_option(
'-f',
'--force',
default=False,
action='store_true',
help=_('Do not ask for confirmation')
)
return parser
def input_unicode(prompt):
ans = input(prompt)
if isinstance(ans, bytes):
ans = ans.decode(sys.stdin.encoding)
return ans
def do_remove_custom_column(db, label, force):
if not force:
q = input_unicode(
_('You will lose all data in the column: %s.'
' Are you sure (y/n)? ') % label
)
if q.lower().strip() != _('y'):
return
try:
db.delete_custom_column(label=label)
except KeyError:
raise SystemExit(
_(
'No column named %s found. You must use column labels, not titles.'
' Use calibredb custom_columns to get a list of labels.'
) % label
)
prints('Column %r removed.' % label)
def main(opts, args, dbctx):
if len(args) < 1:
raise SystemExit(_('Error: You must specify a column label'))
do_remove_custom_column(dbctx.db, args[0], opts.force)
# Update the stored field_metadata
dbctx.db.close()
db = LibraryDatabase(dbctx.db.library_path)
m = db.field_metadata.all_metadata()
db.new_api.set_pref('field_metadata', m)
return 0
| 1,963 | Python | .py | 61 | 25.770492 | 83 | 0.632025 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,110 | cmd_set_custom.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_set_custom.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from calibre import prints
from calibre.db import _get_series_values
from calibre.srv.changes import metadata
readonly = False
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, col, book_id, val, append):
is_remote = notify_changes is not None
field = db.field_metadata.custom_field_prefix + col
with db.write_lock:
if not db.has_id(book_id):
return False, _('No book with id {} exists').format(book_id)
try:
fm = db.field_metadata[field]
except KeyError:
return False, _('No column with name {} exists').format(col)
if fm['datatype'] == 'series':
val, s_index = _get_series_values(val)
if s_index is None:
s_index = db.get_next_series_num_for(val, field=field)
db.set_field(field, {book_id: val}), db.set_field(field + '_index', {book_id: s_index})
msg = _('Data set to: {} [{}]').format(db.field_for(field, book_id), db.field_for(field + '_index', book_id))
else:
if append and fm['is_multiple']:
val = list(db.field_for(field, book_id)) + [val]
db.set_field(field, {book_id: val})
val = db.field_for(field, book_id)
if isinstance(val, (tuple, list)):
val = fm['is_multiple']['list_to_ui'].join(val)
msg = _('Data set to: {}').format(val)
if is_remote:
notify_changes(metadata((book_id,)))
return True, msg
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog set_custom [options] column id value
Set the value of a custom column for the book identified by id.
You can get a list of ids using the search command.
You can get a list of custom column names using the custom_columns
command.
'''
)
)
parser.add_option(
'-a',
'--append',
default=False,
action='store_true',
help=_(
'If the column stores multiple values, append the specified '
'values to the existing ones, instead of replacing them.'
)
)
return parser
def main(opts, args, dbctx):
if len(args) < 3:
raise SystemExit(_('Error: You must specify a field name, id and value'))
ok, msg = dbctx.run('set_custom', args[0], int(args[1]), args[2], opts.append)
if ok:
prints(msg)
else:
raise SystemExit(msg)
return 0
| 2,586 | Python | .py | 66 | 31.530303 | 121 | 0.604623 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,111 | cmd_custom_columns.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_custom_columns.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from pprint import pformat
from calibre import prints
from polyglot.builtins import iteritems
readonly = True
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, *args):
return db.backend.custom_column_label_map
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog custom_columns [options]
List available custom columns. Shows column labels and ids.
'''
)
)
parser.add_option(
'-d',
'--details',
default=False,
action='store_true',
help=_('Show details for each column.')
)
return parser
def main(opts, args, dbctx):
for col, data in iteritems(dbctx.run('custom_columns')):
if opts.details:
prints(col)
print()
prints(pformat(data))
print('\n')
else:
prints(col, '(%d)'%data['num'])
return 0
| 1,054 | Python | .py | 36 | 22.972222 | 71 | 0.629593 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,112 | utils.py | kovidgoyal_calibre/src/calibre/db/cli/utils.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import unicodedata
eaw = unicodedata.east_asian_width
def chr_width(x):
return 1 + eaw(x).startswith('W')
def str_width(x):
return sum(map(chr_width, x))
| 263 | Python | .py | 8 | 30 | 71 | 0.733871 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,113 | __init__.py | kovidgoyal_calibre/src/calibre/db/cli/__init__.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import importlib
def module_for_cmd(cmd):
return importlib.import_module('calibre.db.cli.cmd_' + cmd)
def integers_from_string(arg, include_last_inrange=False):
for x in arg.split(','):
y = tuple(map(int, x.split('-')))
if len(y) > 1:
for y in range(y[0], y[1] + int(bool(include_last_inrange))):
yield y
else:
yield y[0]
| 494 | Python | .py | 13 | 31 | 73 | 0.612632 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,114 | tests.py | kovidgoyal_calibre/src/calibre/db/cli/tests.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__docformat__ = 'restructuredtext en'
'''
Test the CLI of the calibre database management tool
'''
import csv
import unittest
from calibre.db.cli.cmd_check_library import _print_check_library_results
from polyglot.builtins import iteritems
from polyglot.io import PolyglotBytesIO
class Checker:
def __init__(self, kw):
for k, v in iteritems(kw):
setattr(self, k, v)
class PrintCheckLibraryResultsTest(unittest.TestCase):
"""
Asserts the format of the output to the CLI to avoid regressions
"""
check = ('dummy_check', 'Dummy Check')
def test_prints_nothing_if_no_errors(self):
stdout = PolyglotBytesIO()
checker = Checker(dict.fromkeys(self.check))
_print_check_library_results(checker, self.check, as_csv=False, out=stdout)
self.assertEqual(stdout.getvalue(), b'')
_print_check_library_results(checker, self.check, as_csv=True, out=stdout)
self.assertEqual(stdout.getvalue(), b'')
def test_human_readable_output(self):
"""
Basic check of the human-readable output.
Does not test: the full line format, truncation
"""
data = [['first', 'second']]
checker = Checker(dict.fromkeys(self.check))
setattr(checker, self.check[0], data)
stdout = PolyglotBytesIO()
_print_check_library_results(checker, self.check, out=stdout, as_csv=False)
result = stdout.getvalue().decode('utf-8', 'replace').split('\n')
self.assertEqual(len(result), len(data)+2)
self.assertEqual(result[0], self.check[1])
result_first = result[1].split('-')[0].strip()
result_second = result[1].split('-')[1].strip()
self.assertEqual(result_first, 'first')
self.assertEqual(result_second, 'second')
self.assertEqual(result[-1], '')
def test_basic_csv_output(self):
"""
Test simple csv output
"""
data = [['first', 'second']]
checker = Checker(dict.fromkeys(self.check))
setattr(checker, self.check[0], data)
stdout = PolyglotBytesIO()
_print_check_library_results(checker, self.check, as_csv=True, out=stdout)
result = stdout.getvalue().decode('utf-8', 'replace').split('\n')
parsed_result = [l for l in csv.reader(result) if l]
self.assertEqual(parsed_result, [[self.check[1], data[0][0], data[0][1]]])
def test_escaped_csv_output(self):
"""
Test more complex csv output
"""
data = [['I, Caesar', 'second']]
checker = Checker(dict.fromkeys(self.check))
setattr(checker, self.check[0], data)
stdout = PolyglotBytesIO()
_print_check_library_results(checker, self.check, as_csv=True, out=stdout)
result = stdout.getvalue().decode('utf-8', 'replace').split('\n')
parsed_result = [l for l in csv.reader(result) if l]
self.assertEqual(parsed_result, [[self.check[1], data[0][0], data[0][1]]])
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(PrintCheckLibraryResultsTest)
| 3,143 | Python | .py | 71 | 37.028169 | 89 | 0.647753 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,115 | cmd_fts_index.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_fts_index.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from calibre.db.listeners import EventType
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, action, adata=None):
if action == 'status':
if db.is_fts_enabled():
l, t, r = db.fts_indexing_progress()
return {'enabled': True, 'left': l, 'total': t, 'rate': r}
return {'enabled': False, 'left': -1, 'total': -1}
if action == 'enable':
if not db.is_fts_enabled():
db.enable_fts()
l, t, r = db.fts_indexing_progress()
return {'enabled': True, 'left': l, 'total': t, 'rate': r}
if action == 'disable':
if db.is_fts_enabled():
db.enable_fts(enabled=False)
return
if action == 'reindex':
if not db.is_fts_enabled():
a = Exception(_('Full text indexing is not enabled on this library'))
a.suppress_traceback = True
raise a
items = adata.get('items')
if items:
for item in items:
db.reindex_fts_book(*item)
else:
db.reindex_fts()
l, t, r = db.fts_indexing_progress()
return {'enabled': True, 'left': l, 'total': t, 'rate': r}
if action == 'wait':
if not db.is_fts_enabled():
a = Exception(_('Full text indexing is not enabled on this library'))
a.suppress_traceback = True
raise a
if 'measure_state' in adata:
db.fts_start_measuring_rate(measure=adata['measure_state'])
if adata.get('speed'):
db.set_fts_speed(slow=adata['speed'] == 'slow')
l, t, r = db.fts_indexing_progress()
return {'left': l, 'total': t, 'rate': r}
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog fts_index [options] {enable}/{disable}/{status}/{reindex}
Control the Full text search indexing process.
{enable}
Turns on FTS indexing for this library
{disable}
Turns off FTS indexing for this library
{status}
Shows the current indexing status
{reindex}
Can be used to re-index either particular books or
the entire library. To re-index particular books
specify the book ids as additional arguments after the
{reindex} command. If no book ids are specified the
entire library is re-indexed.
''').format(enable='enable', disable='disable', status='status', reindex='reindex')
)
parser.add_option(
'--wait-for-completion',
default=False,
action='store_true',
help=_('Wait till all books are indexed, showing indexing progress periodically')
)
parser.add_option(
'--indexing-speed',
default='',
choices=('fast', 'slow', ''),
help=_('The speed of indexing. Use fast for fast indexing using all your computers resources'
' and slow for less resource intensive indexing. Note that the speed is reset to slow after every invocation.')
)
return parser
def run_job(dbctx, which, **data):
try:
return dbctx.run('fts_index', which, data)
except Exception as e:
if getattr(e, 'suppress_traceback', False):
raise SystemExit(str(e))
raise
def show_progress(left, total, rate):
from calibre.db.utils import IndexingProgress
ip = IndexingProgress()
ip.update(left, total, rate)
print('\r\x1b[K' + _('{} of {} book files indexed, {}').format(total-left, total, ip.time_left), flush=True, end=' ...')
def remote_wait_for_completion(dbctx, indexing_speed):
import time
s = run_job(dbctx, 'wait', speed=indexing_speed, measure_state=True)
try:
while s['left'] > 0:
show_progress(s['left'], s['total'], s['rate'])
time.sleep(1)
s = run_job(dbctx, 'wait')
finally:
print()
run_job(dbctx, 'wait', speed='slow', measure_state=False)
def local_wait_for_completion(db, indexing_speed):
from queue import Queue
q = Queue()
def notifier(event_type, library_id, event_data):
if event_type is EventType.indexing_progress_changed:
q.put(event_data)
db.add_listener(notifier)
if indexing_speed:
db.set_fts_speed(slow=indexing_speed == 'slow')
db.fts_start_measuring_rate()
l, t, r = db.fts_indexing_progress()
while l > 0:
show_progress(l, t, r)
l, t, r = q.get()
print()
def main(opts, args, dbctx):
if len(args) < 1:
dbctx.option_parser.print_help()
raise SystemExit(_('Error: You must specify the indexing action'))
action = args[0]
if action == 'status':
s = run_job(dbctx, 'status')
if s['enabled']:
print(_('FTS Indexing is enabled'))
print(_('{0} of {1} books files indexed').format(s['total'] - s['left'], s['total']))
else:
print(_('FTS Indexing is disabled'))
raise SystemExit(2)
elif action == 'enable':
s = run_job(dbctx, 'enable')
print(_('FTS indexing has been enabled'))
print(_('{0} of {1} books files indexed').format(s['total'] - s['left'], s['total']))
elif action == 'reindex':
items = args[1:]
if not items:
print(_('Re-indexing the entire library can take a long time. Are you sure?'))
while True:
try:
q = input(_('Type {} to proceed, anything else to abort').format('"reindex"') + ': ')
except KeyboardInterrupt:
sys.excepthook = lambda *a: None
raise
if q.strip('"') == 'reindex':
break
else:
return 0
def to_spec(x):
parts = x.split(':', 1)
book_id = int(parts[0])
if len(parts) == 1:
return book_id,
fmts = tuple(x.upper() for x in parts[1].split(','))
return (book_id,) + fmts
specs = tuple(map(to_spec, items))
s = run_job(dbctx, 'reindex', items=specs)
print(_('{0} of {1} books files indexed').format(s['total'] - s['left'], s['total']))
elif action == 'disable':
print(_('Disabling indexing will mean that all books will have to be re-checked when re-enabling indexing. Are you sure?'))
while True:
try:
q = input(_('Type {} to proceed, anything else to abort').format('"disable"') + ': ')
except KeyboardInterrupt:
sys.excepthook = lambda *a: None
raise
if q in ('disable', '"disable"'):
break
else:
return 0
run_job(dbctx, 'disable')
print(_('FTS indexing has been disabled'))
return 0
else:
dbctx.option_parser.print_help()
raise SystemExit(f'{action} is not a known action')
if opts.wait_for_completion:
print(_('Waiting for FTS indexing to complete, press Ctrl-C to abort...'))
try:
if dbctx.is_remote:
remote_wait_for_completion(dbctx, opts.indexing_speed)
else:
local_wait_for_completion(dbctx.db.new_api, opts.indexing_speed)
except KeyboardInterrupt:
sys.excepthook = lambda *a: None
raise
print(_('All books indexed!'))
return 0
| 7,499 | Python | .py | 187 | 31.016043 | 131 | 0.577414 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,116 | cmd_check_library.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_check_library.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import csv
import io
import sys
from calibre import prints
from calibre.db.legacy import LibraryDatabase
from calibre.library.check_library import CHECKS, CheckLibrary
readonly = False
version = 0 # change this if you change signature of implementation()
no_remote = True
def implementation(db, notify_changes, *args):
raise NotImplementedError()
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog check_library [options]
Perform some checks on the filesystem representing a library. Reports are {0}
'''
).format(', '.join([c[0] for c in CHECKS]))
)
parser.add_option(
'-c', '--csv', default=False, action='store_true', help=_('Output in CSV')
)
parser.add_option(
'-r',
'--report',
default=None,
dest='report',
help=_("Comma-separated list of reports.\n"
"Default: all")
)
parser.add_option(
'-e',
'--ignore_extensions',
default=None,
dest='exts',
help=_("Comma-separated list of extensions to ignore.\n"
"Default: all")
)
parser.add_option(
'-n',
'--ignore_names',
default=None,
dest='names',
help=_("Comma-separated list of names to ignore.\n"
"Default: all")
)
parser.add_option(
'--vacuum-fts-db',
default=False,
action='store_true',
help=_('Vacuum the full text search database. This can be very slow and memory intensive, depending on the size of the database.')
)
return parser
def _print_check_library_results(checker, check, as_csv=False, out=sys.stdout):
attr = check[0]
list = getattr(checker, attr, None)
if list is None:
return
if as_csv:
to_output = [(check[1], i[0], i[1]) for i in list]
buf = io.StringIO(newline='')
csv_print = csv.writer(buf)
for line in to_output:
csv_print.writerow(line)
out.write(buf.getvalue())
else:
print(check[1], file=out)
for i in list:
print(' %-40.40s - %-40.40s' % (i[0], i[1]), file=out)
def main(opts, args, dbctx):
if opts.report is None:
checks = CHECKS
else:
checks = []
for r in opts.report.split(','):
found = False
for c in CHECKS:
if c[0] == r:
checks.append(c)
found = True
break
if not found:
prints(_('Unknown report check'), r)
return 1
if opts.names is None:
names = []
else:
names = [f.strip() for f in opts.names.split(',') if f.strip()]
if opts.exts is None:
exts = []
else:
exts = [f.strip() for f in opts.exts.split(',') if f.strip()]
if not LibraryDatabase.exists_at(dbctx.library_path):
prints('No library found at', dbctx.library_path, file=sys.stderr)
raise SystemExit(1)
db = LibraryDatabase(dbctx.library_path)
prints(_('Vacuuming database...'))
db.new_api.vacuum(opts.vacuum_fts_db)
checker = CheckLibrary(dbctx.library_path, db)
checker.scan_library(names, exts)
for check in checks:
_print_check_library_results(checker, check, as_csv=opts.csv)
return 0
| 3,450 | Python | .py | 106 | 24.943396 | 138 | 0.588377 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,117 | cmd_restore_database.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_restore_database.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from calibre import prints
from calibre.db.restore import Restore
readonly = False
version = 0 # change this if you change signature of implementation()
no_remote = True
def implementation(db, notify_changes, *args):
raise NotImplementedError()
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog restore_database [options]
Restore this database from the metadata stored in OPF files in each
folder of the calibre library. This is useful if your metadata.db file
has been corrupted.
WARNING: This command completely regenerates your database. You will lose
all saved searches, user categories, plugboards, stored per-book conversion
settings, and custom recipes. Restored metadata will only be as accurate as
what is found in the OPF files.
'''
)
)
parser.add_option(
'-r',
'--really-do-it',
default=False,
action='store_true',
help=_(
'Really do the recovery. The command will not run '
'unless this option is specified.'
)
)
return parser
class Progress:
def __init__(self):
self.total = 1
def __call__(self, msg, step):
if msg is None:
self.total = float(step)
else:
prints(msg, '...', '%d%%' % int(100 * (step / self.total)))
def main(opts, args, dbctx):
if not opts.really_do_it:
raise SystemExit(
_('You must provide the %s option to do a'
' recovery') % '--really-do-it'
)
r = Restore(dbctx.library_path, progress_callback=Progress())
r.start()
r.join()
if r.tb is not None:
prints('Restoring database failed with error:')
prints(r.tb)
else:
prints('Restoring database succeeded')
prints('old database saved as', r.olddb)
if r.errors_occurred:
name = 'calibre_db_restore_report.txt'
open('calibre_db_restore_report.txt',
'wb').write(r.report.encode('utf-8'))
prints('Some errors occurred. A detailed report was ' 'saved to', name)
return 0
| 2,229 | Python | .py | 64 | 28.09375 | 83 | 0.637931 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,118 | cmd_backup_metadata.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_backup_metadata.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from calibre import prints
readonly = True
version = 0 # change this if you change signature of implementation()
no_remote = True
def implementation(db, notify_changes, *args):
raise NotImplementedError()
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog backup_metadata [options]
Backup the metadata stored in the database into individual OPF files in each
books folder. This normally happens automatically, but you can run this
command to force re-generation of the OPF files, with the --all option.
Note that there is normally no need to do this, as the OPF files are backed up
automatically, every time metadata is changed.
'''
)
)
parser.add_option(
'--all',
default=False,
action='store_true',
help=_(
'Normally, this command only operates on books that have'
' out of date OPF files. This option makes it operate on all'
' books.'
)
)
return parser
class BackupProgress:
def __init__(self):
self.total = 0
self.count = 0
def __call__(self, book_id, mi, ok):
if mi is True:
self.total = book_id
else:
self.count += 1
if ok:
prints(
'{:.1f}% {} - {}'.format((self.count * 100) / float(self.total), book_id,
getattr(mi, 'title', 'Unknown'))
)
else:
prints(
f'{(self.count * 100) / float(self.total):.1f}% {book_id} failed')
def main(opts, args, dbctx):
db = dbctx.db
book_ids = None
if opts.all:
book_ids = db.new_api.all_book_ids()
db.new_api.mark_as_dirty(book_ids)
db.dump_metadata(book_ids=book_ids, callback=BackupProgress())
return 0
| 1,956 | Python | .py | 57 | 26.385965 | 93 | 0.596603 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,119 | cmd_list.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_list.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import json
import os
import sys
from textwrap import TextWrapper
from calibre.db.cli.utils import str_width
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.date import isoformat
from polyglot.builtins import as_bytes, iteritems
readonly = True
version = 0 # change this if you change signature of implementation()
FIELDS = {
'title', 'authors', 'author_sort', 'publisher', 'rating', 'timestamp', 'size',
'tags', 'comments', 'series', 'series_index', 'formats', 'isbn', 'uuid',
'pubdate', 'cover', 'last_modified', 'identifiers', 'languages', 'template'
}
def formats(db, book_id):
for fmt in db.formats(book_id, verify_formats=False):
path = db.format_abspath(book_id, fmt)
if path:
yield path.replace(os.sep, '/')
def cover(db, book_id):
return db.format_abspath(book_id, '__COVER_INTERNAL__')
def implementation(
db, notify_changes, fields, sort_by, ascending, search_text, limit, template=None
):
is_remote = notify_changes is not None
if is_remote:
# templates allow arbitrary code execution via python templates. We
# could possibly disallow only python templates but that is more work
# than I feel like doing for this, so simply ignore templates on remote
# connections.
template = None
formatter = None
with db.safe_read_lock:
fm = db.field_metadata
afields = set(FIELDS) | {'id'}
for k in fm.custom_field_keys():
afields.add('*' + k[1:])
if 'all' in fields:
fields = sorted(afields if template else (afields - {'template'}))
sort_by = sort_by or 'id'
sort_fields = sort_by.split(',')
for sf in sort_fields:
if sf not in afields:
return f'Unknown sort field: {sf}'
sort_spec = [(sf, ascending) for sf in sort_fields]
if not set(fields).issubset(afields):
return 'Unknown fields: {}'.format(', '.join(set(fields) - afields))
if search_text:
book_ids = db.multisort(sort_spec, ids_to_sort=db.search(search_text))
else:
book_ids = db.multisort(sort_spec)
if limit > -1:
book_ids = book_ids[:limit]
data = {}
metadata = {}
for field in fields:
if field in 'id':
continue
if field == 'isbn':
x = db.all_field_for('identifiers', book_ids, default_value={})
data[field] = {k: v.get('isbn') or '' for k, v in iteritems(x)}
continue
if field == 'template':
if not template:
data['template'] = _('Template not allowed') if is_remote else _('No template specified')
continue
vals = {}
global_vars = {}
if formatter is None:
from calibre.ebooks.metadata.book.formatter import SafeFormat
formatter = SafeFormat()
for book_id in book_ids:
mi = db.get_proxy_metadata(book_id)
vals[book_id] = formatter.safe_format(template, {}, 'TEMPLATE ERROR', mi, global_vars=global_vars)
data['template'] = vals
continue
field = field.replace('*', '#')
metadata[field] = fm[field]
if not is_remote:
if field == 'formats':
data[field] = {k: list(formats(db, k)) for k in book_ids}
continue
if field == 'cover':
data[field] = {k: cover(db, k) for k in book_ids}
continue
data[field] = db.all_field_for(field, book_ids)
return {'book_ids': book_ids, "data": data, 'metadata': metadata, 'fields':fields}
def stringify(data, metadata, for_machine):
for field, m in iteritems(metadata):
if field == 'authors':
data[field] = {
k: authors_to_string(v)
for k, v in iteritems(data[field])
}
else:
dt = m['datatype']
if dt == 'datetime':
data[field] = {
k: isoformat(v, as_utc=for_machine) if v else 'None'
for k, v in iteritems(data[field])
}
elif not for_machine:
ism = m['is_multiple']
if ism:
data[field] = {
k: ism['list_to_ui'].join(v)
for k, v in iteritems(data[field])
}
if field == 'formats':
data[field] = {
k: '[' + v + ']'
for k, v in iteritems(data[field])
}
def as_machine_data(book_ids, data, metadata):
for book_id in book_ids:
ans = {'id': book_id}
for field, val_map in iteritems(data):
val = val_map.get(book_id)
if val is not None:
ans[field.replace('#', '*')] = val
yield ans
def prepare_output_table(fields, book_ids, data, metadata):
ans = []
for book_id in book_ids:
row = []
ans.append(row)
for field in fields:
if field == 'id':
row.append(str(book_id))
continue
val = data.get(field.replace('*', '#'), {}).get(book_id)
row.append(str(val).replace('\n', ' '))
return ans
def do_list(
dbctx,
fields,
afields,
sort_by,
ascending,
search_text,
line_width,
separator,
prefix,
limit,
template,
template_file,
template_title,
for_machine=False
):
if sort_by is None:
ascending = True
if dbctx.is_remote and (template or template_file):
raise SystemExit(_('The use of templates is disallowed when connecting to remote servers for security reasons'))
if 'template' in (f.strip() for f in fields):
if template_file:
with open(template_file, 'rb') as f:
template = f.read().decode('utf-8')
if not template:
raise SystemExit(_('You must provide a template'))
ans = dbctx.run('list', fields, sort_by, ascending, search_text, limit, template)
else:
ans = dbctx.run('list', fields, sort_by, ascending, search_text, limit)
try:
book_ids, data, metadata = ans['book_ids'], ans['data'], ans['metadata']
except TypeError:
raise SystemExit(ans)
fields = list(ans['fields'])
try:
fields.remove('id')
except ValueError:
pass
fields = ['id'] + fields
stringify(data, metadata, for_machine)
if for_machine:
raw = json.dumps(
list(as_machine_data(book_ids, data, metadata)),
indent=2,
sort_keys=True
)
if not isinstance(raw, bytes):
raw = raw.encode('utf-8')
getattr(sys.stdout, 'buffer', sys.stdout).write(raw)
return
from calibre.utils.terminal import ColoredStream, geometry
output_table = prepare_output_table(fields, book_ids, data, metadata)
widths = list(map(lambda x: 0, fields))
for record in output_table:
for j in range(len(fields)):
widths[j] = max(widths[j], str_width(record[j]))
screen_width = geometry()[0] if line_width < 0 else line_width
if not screen_width:
screen_width = 80
field_width = screen_width // len(fields)
base_widths = list(map(lambda x: min(x + 1, field_width), widths))
while sum(base_widths) < screen_width:
adjusted = False
for i in range(len(widths)):
if base_widths[i] < widths[i]:
base_widths[i] += min(
screen_width - sum(base_widths), widths[i] - base_widths[i]
)
adjusted = True
break
if not adjusted:
break
widths = list(base_widths)
titles = map(
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths,
[template_title if v == 'template' else v for v in fields]
)
with ColoredStream(sys.stdout, fg='green'):
print(''.join(titles), flush=True)
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
linesep = as_bytes(os.linesep)
wrappers = [TextWrapper(x - 1).wrap if x > 1 else lambda y: y for x in widths]
for record in output_table:
text = [
wrappers[i](record[i]) for i in range(len(fields))
]
lines = max(map(len, text))
for l in range(lines):
for i in range(len(text)):
ft = text[i][l] if l < len(text[i]) else ''
stdout.write(ft.encode('utf-8'))
if i < len(text) - 1:
filler = ('%*s' % (widths[i] - str_width(ft) - 1, ''))
stdout.write((filler + separator).encode('utf-8'))
stdout.write(linesep)
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog list [options]
List the books available in the calibre database.
'''
)
)
parser.add_option(
'-f',
'--fields',
default='title,authors',
help=_(
'The fields to display when listing books in the'
' database. Should be a comma separated list of'
' fields.\nAvailable fields: %s\nDefault: %%default. The'
' special field "all" can be used to select all fields.'
' In addition to the builtin fields above, custom fields are'
' also available as *field_name, for example, for a custom field'
' #rating, use the name: *rating'
) % ', '.join(sorted(FIELDS))
)
parser.add_option(
'--sort-by',
default=None,
help=_(
'The field by which to sort the results. You can specify multiple fields by separating them with commas.\nAvailable fields: {0}\nDefault: {1}'
).format(', '.join(sorted(FIELDS)), 'id')
)
parser.add_option(
'--ascending',
default=False,
action='store_true',
help=_('Sort results in ascending order')
)
parser.add_option(
'-s',
'--search',
default=None,
help=_(
'Filter the results by the search query. For the format of the search '
'query, please see the search related documentation in the User '
'Manual. Default is to do no filtering.'
)
)
parser.add_option(
'-w',
'--line-width',
default=-1,
type=int,
help=_(
'The maximum width of a single line in the output. Defaults to detecting screen size.'
)
)
parser.add_option(
'--separator',
default=' ',
help=_('The string used to separate fields. Default is a space.')
)
parser.add_option(
'--prefix',
default=None,
help=_(
'The prefix for all file paths. Default is the absolute path to the library folder.'
)
)
parser.add_option(
'--limit',
default=-1,
type=int,
help=_('The maximum number of results to display. Default: all')
)
parser.add_option(
'--for-machine',
default=False,
action='store_true',
help=_(
'Generate output in JSON format, which is more suitable for machine '
'parsing. Causes the line width and separator options to be ignored.'
)
)
parser.add_option(
'--template',
default=None,
help=_('The template to run if "{}" is in the field list. Note that templates are ignored while connecting to a calibre server.'
' Default: None').format('template')
)
parser.add_option(
'--template_file',
'-t',
default=None,
help=_('Path to a file containing the template to run if "{}" is in '
'the field list. Default: None').format('template')
)
parser.add_option(
'--template_heading',
default='template',
help=_('Heading for the template column. Default: %default. This option '
'is ignored if the option {} is set').format('--for-machine')
)
return parser
def main(opts, args, dbctx):
afields = set(FIELDS) | {'id'}
if opts.fields.strip():
fields = [str(f.strip().lower()) for f in opts.fields.split(',')]
else:
fields = []
do_list(
dbctx,
fields,
afields,
opts.sort_by,
opts.ascending,
opts.search,
opts.line_width,
opts.separator,
opts.prefix,
opts.limit,
opts.template,
opts.template_file,
opts.template_heading,
for_machine=opts.for_machine
)
return 0
| 13,028 | Python | .py | 356 | 26.921348 | 154 | 0.553121 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,120 | cmd_list_categories.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_list_categories.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import csv
import sys
from textwrap import TextWrapper
from calibre import prints
from polyglot.builtins import as_bytes
readonly = True
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes):
return db.get_categories(), db.field_metadata
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog list_categories [options]
Produce a report of the category information in the database. The
information is the equivalent of what is shown in the Tag browser.
'''
)
)
parser.add_option(
'-i',
'--item_count',
default=False,
action='store_true',
help=_(
'Output only the number of items in a category instead of the '
'counts per item within the category'
)
)
parser.add_option(
'-c', '--csv', default=False, action='store_true', help=_('Output in CSV')
)
parser.add_option(
'--dialect',
default='excel',
choices=csv.list_dialects(),
help=_('The type of CSV file to produce. Choices: {}')
.format(', '.join(sorted(csv.list_dialects())))
)
parser.add_option(
'-r',
'--categories',
default='',
dest='report',
help=_("Comma-separated list of category lookup names. "
"Default: all")
)
parser.add_option(
'-w',
'--width',
default=-1,
type=int,
help=_(
'The maximum width of a single line in the output. '
'Defaults to detecting screen size.'
)
)
return parser
def do_list(fields, data, opts):
from calibre.utils.terminal import ColoredStream, geometry
separator = ' '
widths = list(map(lambda x: 0, fields))
for i in data:
for j, field in enumerate(fields):
widths[j] = max(widths[j], max(len(field), len(str(i[field]))))
screen_width = geometry()[0]
if not screen_width:
screen_width = 80
field_width = screen_width // len(fields)
base_widths = list(map(lambda x: min(x + 1, field_width), widths))
while sum(base_widths) < screen_width:
adjusted = False
for i in range(len(widths)):
if base_widths[i] < widths[i]:
base_widths[i] += min(
screen_width - sum(base_widths), widths[i] - base_widths[i]
)
adjusted = True
break
if not adjusted:
break
widths = list(base_widths)
titles = map(
lambda x, y: '%-*s%s' % (x - len(separator), y, separator), widths, fields
)
with ColoredStream(sys.stdout, fg='green'):
prints(''.join(titles))
wrappers = list(map(lambda x: TextWrapper(x - 1), widths))
for record in data:
text = [
wrappers[i].wrap(str(record[field]))
for i, field in enumerate(fields)
]
lines = max(map(len, text))
for l in range(lines):
for i, field in enumerate(text):
ft = text[i][l] if l < len(text[i]) else ''
filler = '%*s' % (widths[i] - len(ft) - 1, '')
print(ft.encode('utf-8') + filler.encode('utf-8'), end=separator)
print()
class StdoutWriter:
def __init__(self):
self.do_write = getattr(sys.stdout, 'buffer', sys.stdout).write
def write(self, x):
x = as_bytes(x)
self.do_write(x)
def do_csv(fields, data, opts):
csv_print = csv.writer(StdoutWriter(), opts.dialect)
csv_print.writerow(fields)
for d in data:
row = [d[f] for f in fields]
csv_print.writerow(row)
def main(opts, args, dbctx):
category_data, field_metadata = dbctx.run('list_categories')
data = []
report_on = [c.strip() for c in opts.report.split(',') if c.strip()]
def category_metadata(k):
return field_metadata.get(k)
categories = [
k for k in category_data.keys()
if category_metadata(k)['kind'] not in ['user', 'search'] and
(not report_on or k in report_on)
]
categories.sort(key=lambda x: x if x[0] != '#' else x[1:])
def fmtr(v):
v = v or 0
ans = '%.1f' % v
if ans.endswith('.0'):
ans = ans[:-2]
return ans
if not opts.item_count:
for category in categories:
is_rating = category_metadata(category)['datatype'] == 'rating'
for tag in category_data[category]:
if is_rating:
tag.name = str(len(tag.name))
data.append({
'category': category,
'tag_name': tag.name,
'count': str(tag.count),
'rating': fmtr(tag.avg_rating),
})
else:
for category in categories:
data.append({
'category': category,
'tag_name': _('CATEGORY ITEMS'),
'count': str(len(category_data[category])),
'rating': ''
})
fields = ['category', 'tag_name', 'count', 'rating']
func = do_csv if opts.csv else do_list
func(fields, data, opts)
return 0
| 5,350 | Python | .py | 156 | 25.621795 | 82 | 0.558915 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,121 | cmd_remove_format.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_remove_format.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from calibre.srv.changes import formats_removed
readonly = False
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes, book_id, fmt):
is_remote = notify_changes is not None
fmt_map = {book_id: (fmt, )}
db.remove_formats(fmt_map)
if is_remote:
notify_changes(formats_removed(fmt_map))
def option_parser(get_parser, args):
return get_parser(
_(
'''
%prog remove_format [options] id fmt
Remove the format fmt from the logical book identified by id. \
You can get id by using the search command. fmt should be a file extension \
like LRF or TXT or EPUB. If the logical book does not have fmt available, \
do nothing.
'''
)
)
def main(opts, args, dbctx):
if len(args) < 2:
raise SystemExit(_('You must specify an id and a format'))
return 1
id, fmt = int(args[0]), args[1].upper()
dbctx.run('remove_format', id, fmt)
return 0
| 1,074 | Python | .py | 30 | 31.366667 | 76 | 0.68635 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,122 | cmd_clone.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_clone.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import os
from calibre import patheq
from calibre.constants import iswindows
from calibre.db.legacy import LibraryDatabase
readonly = True
version = 0 # change this if you change signature of implementation()
def implementation(db, notify_changes):
return db.backend.prefs.copy(), db.backend.library_path
def option_parser(get_parser, args):
return get_parser(
_(
'''\
%prog clone path/to/new/library
Create a clone of the current library. This creates a new, empty library that has all the
same custom columns, Virtual libraries and other settings as the current library.
The cloned library will contain no books. If you want to create a full duplicate, including
all books, then simply use your filesystem tools to copy the library folder.
'''
)
)
def main(opts, args, dbctx):
if len(args) < 1:
raise SystemExit(_('Error: You must specify the path to the cloned library'))
prefs, library_path = dbctx.run('clone')
loc = os.path.abspath(args[0])
if not os.path.exists(loc):
os.makedirs(loc)
if patheq(loc, library_path):
raise SystemExit(
_('The location for the new library is the same as the current library')
)
empty = not os.listdir(loc)
if not empty:
raise SystemExit(
_(
'%s is not empty. You must choose an empty folder for the new library.'
) % loc
)
if iswindows and len(loc) > LibraryDatabase.WINDOWS_LIBRARY_PATH_LIMIT:
raise SystemExit(
_('Path to library too long. It must be less than'
' %d characters.') % LibraryDatabase.WINDOWS_LIBRARY_PATH_LIMIT
)
LibraryDatabase(loc, default_prefs=prefs)
return 0
| 1,853 | Python | .py | 47 | 33.319149 | 91 | 0.684152 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,123 | main.py | kovidgoyal_calibre/src/calibre/db/cli/main.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import json
import os
import sys
from calibre import browser, prints
from calibre.constants import __appname__, __version__, iswindows
from calibre.db.cli import module_for_cmd
from calibre.db.legacy import LibraryDatabase
from calibre.utils.config import OptionParser, prefs
from calibre.utils.localization import localize_user_manual_link
from calibre.utils.lock import singleinstance
from calibre.utils.serialize import MSGPACK_MIME
from polyglot import http_client
from polyglot.urllib import urlencode, urlparse, urlunparse
COMMANDS = (
'list', 'add', 'remove', 'add_format', 'remove_format', 'show_metadata',
'set_metadata', 'export', 'catalog', 'saved_searches', 'add_custom_column',
'custom_columns', 'remove_custom_column', 'set_custom', 'restore_database',
'check_library', 'list_categories', 'backup_metadata', 'clone', 'embed_metadata',
'search', 'fts_index', 'fts_search',
)
def option_parser_for(cmd, args=()):
def cmd_option_parser():
return module_for_cmd(cmd).option_parser(get_parser, args)
return cmd_option_parser
def run_cmd(cmd, opts, args, dbctx):
m = module_for_cmd(cmd)
if dbctx.is_remote and getattr(m, 'no_remote', False):
raise SystemExit(_('The {} command is not supported with remote (server based) libraries').format(cmd))
ret = m.main(opts, args, dbctx)
return ret
def get_parser(usage):
parser = OptionParser(usage)
go = parser.add_option_group(_('GLOBAL OPTIONS'))
go.is_global_options = True
go.add_option(
'--library-path',
'--with-library',
default=None,
help=_(
'Path to the calibre library. Default is to use the path stored in the settings.'
' You can also connect to a calibre Content server to perform actions on'
' remote libraries. To do so use a URL of the form: http://hostname:port/#library_id'
' for example, http://localhost:8080/#mylibrary. library_id is the library id'
' of the library you want to connect to on the Content server. You can use'
' the special library_id value of - to get a list of library ids available'
' on the server. For details on how to setup access via a Content server, see'
' {}.'
).format(localize_user_manual_link(
'https://manual.calibre-ebook.com/generated/en/calibredb.html'
))
)
go.add_option(
'-h', '--help', help=_('show this help message and exit'), action='help'
)
go.add_option(
'--version',
help=_("show program's version number and exit"),
action='version'
)
go.add_option(
'--username',
help=_('Username for connecting to a calibre Content server')
)
go.add_option(
'--password',
help=_('Password for connecting to a calibre Content server.'
' To read the password from standard input, use the special value: {0}.'
' To read the password from a file, use: {1} (i.e. <f: followed by the full path to the file and a trailing >).'
' The angle brackets in the above are required, remember to escape them or use quotes'
' for your shell.').format(
'<stdin>', '<f:C:/path/to/file>' if iswindows else '<f:/path/to/file>')
)
go.add_option(
'--timeout',
type=float,
default=120,
help=_('The timeout, in seconds, when connecting to a calibre library over the network. The default is'
' two minutes.')
)
return parser
def option_parser():
return get_parser(
_(
'''\
%%prog command [options] [arguments]
%%prog is the command line interface to the calibre books database.
command is one of:
%s
For help on an individual command: %%prog command --help
'''
) % '\n '.join(COMMANDS)
)
def read_credentials(opts):
username = opts.username
pw = opts.password
if pw:
if pw == '<stdin>':
from getpass import getpass
pw = getpass(_('Enter the password: '))
elif pw.startswith('<f:') and pw.endswith('>'):
with open(pw[3:-1], 'rb') as f:
pw = f.read().decode('utf-8').rstrip()
return username, pw
class DBCtx:
def __init__(self, opts, option_parser):
self.option_parser = option_parser
self.library_path = opts.library_path or prefs['library_path']
self.timeout = opts.timeout
self.url = None
if self.library_path is None:
raise SystemExit(
'No saved library path, either run the GUI or use the'
' --with-library option'
)
if self.library_path.partition(':')[0] in ('http', 'https'):
parts = urlparse(self.library_path)
self.library_id = parts.fragment or None
self.url = urlunparse(parts._replace(fragment='')).rstrip('/')
self.br = browser(handle_refresh=False, user_agent=f'{__appname__} {__version__}')
self.is_remote = True
username, password = read_credentials(opts)
self.has_credentials = False
if username and password:
self.br.add_password(self.url, username, password)
self.has_credentials = True
if self.library_id == '-':
self.list_libraries()
raise SystemExit()
else:
self.library_path = os.path.expanduser(self.library_path)
if not singleinstance('db'):
ext = '.exe' if iswindows else ''
raise SystemExit(_(
'Another calibre program such as {} or the main calibre program is running.'
' Having multiple programs that can make changes to a calibre library'
' running at the same time is a bad idea. calibredb can connect directly'
' to a running calibre Content server, to make changes through it, instead.'
' See the documentation of the {} option for details.'
).format('calibre-server' + ext, '--with-library')
)
self._db = None
self.is_remote = False
@property
def db(self):
if self._db is None:
self._db = LibraryDatabase(self.library_path)
return self._db
def path(self, path):
if self.is_remote:
with open(path, 'rb') as f:
return path, f.read()
return path
def run(self, name, *args):
m = module_for_cmd(name)
if self.is_remote:
return self.remote_run(name, m, *args)
return m.implementation(self.db.new_api, None, *args)
def interpret_http_error(self, err):
if err.code == http_client.UNAUTHORIZED:
if self.has_credentials:
raise SystemExit('The username/password combination is incorrect')
raise SystemExit('A username and password is required to access this server')
if err.code == http_client.FORBIDDEN:
raise SystemExit(err.reason)
if err.code == http_client.NOT_FOUND:
raise SystemExit(err.reason)
def remote_run(self, name, m, *args):
from mechanize import HTTPError, Request
from calibre.utils.serialize import msgpack_dumps, msgpack_loads
url = self.url + '/cdb/cmd/{}/{}'.format(name, getattr(m, 'version', 0))
if self.library_id:
url += '?' + urlencode({'library_id':self.library_id})
rq = Request(url, data=msgpack_dumps(args),
headers={'Accept': MSGPACK_MIME, 'Content-Type': MSGPACK_MIME})
try:
res = self.br.open_novisit(rq, timeout=self.timeout)
ans = msgpack_loads(res.read())
except HTTPError as err:
self.interpret_http_error(err)
raise
if 'err' in ans:
if ans['tb']:
prints(ans['tb'])
raise SystemExit(ans['err'])
return ans['result']
def list_libraries(self):
from mechanize import HTTPError
url = self.url + '/ajax/library-info'
try:
res = self.br.open_novisit(url, timeout=self.timeout)
ans = json.loads(res.read())
except HTTPError as err:
self.interpret_http_error(err)
raise
library_map, default_library = ans['library_map'], ans['default_library']
for lid in sorted(library_map, key=lambda lid: (lid != default_library, lid)):
prints(lid)
def main(args=sys.argv):
parser = option_parser()
if len(args) < 2:
parser.print_help()
return 1
if args[1] in ('-h', '--help'):
parser.print_help()
return 0
if args[1] == '--version':
parser.print_version()
return 0
for i, x in enumerate(args):
if i > 0 and x in COMMANDS:
cmd = x
break
else:
parser.print_help()
print()
raise SystemExit(_('Error: You must specify a command from the list above'))
del args[i]
parser = option_parser_for(cmd, args[1:])()
opts, args = parser.parse_args(args)
return run_cmd(cmd, opts, args[1:], DBCtx(opts, parser))
if __name__ == '__main__':
main()
| 9,434 | Python | .py | 224 | 33.089286 | 127 | 0.603138 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,124 | cmd_add_custom_column.py | kovidgoyal_calibre/src/calibre/db/cli/cmd_add_custom_column.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import json
from calibre import prints
from calibre.db.legacy import LibraryDatabase
from calibre.library.custom_columns import CustomColumns
readonly = False
version = 0 # change this if you change signature of implementation()
no_remote = True
def implementation(db, notify_changes, *args):
raise NotImplementedError()
def option_parser(get_parser, args):
parser = get_parser(
_(
'''\
%prog add_custom_column [options] label name datatype
Create a custom column. label is the machine friendly name of the column. Should
not contain spaces or colons. name is the human friendly name of the column.
datatype is one of: {0}
'''
).format(', '.join(sorted(CustomColumns.CUSTOM_DATA_TYPES)))
)
parser.add_option(
'--is-multiple',
default=False,
action='store_true',
help=_(
'This column stores tag like data (i.e. '
'multiple comma separated values). Only '
'applies if datatype is text.'
)
)
parser.add_option(
'--display',
default='{}',
help=_(
'A dictionary of options to customize how '
'the data in this column will be interpreted. This is a JSON '
' string. For enumeration columns, use '
'--display="{\\"enum_values\\":[\\"val1\\", \\"val2\\"]}"'
'\n'
'There are many options that can go into the display variable.'
'The options by column type are:\n'
'composite: composite_template, composite_sort, make_category,'
'contains_html, use_decorations\n'
'datetime: date_format\n'
'enumeration: enum_values, enum_colors, use_decorations\n'
'int, float: number_format\n'
'text: is_names, use_decorations\n'
'\n'
'The best way to find legal combinations is to create a custom '
'column of the appropriate type in the GUI then look at the '
'backup OPF for a book (ensure that a new OPF has been created '
'since the column was added). You will see the JSON for the '
'"display" for the new column in the OPF.'
)
)
return parser
def do_add_custom_column(db, label, name, datatype, is_multiple, display):
num = db.create_custom_column(
label, name, datatype, is_multiple, display=display
)
prints('Custom column created with id: %s' % num)
def main(opts, args, dbctx):
if len(args) < 3:
raise SystemExit(_('You must specify label, name and datatype'))
do_add_custom_column(
dbctx.db, args[0], args[1], args[2], opts.is_multiple,
json.loads(opts.display)
)
# Update the stored field_metadata
dbctx.db.close()
db = LibraryDatabase(dbctx.db.library_path)
m = db.field_metadata.all_metadata()
db.new_api.set_pref('field_metadata', m)
return 0
| 3,019 | Python | .py | 76 | 32.276316 | 80 | 0.634346 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,125 | iphlpapi.py | kovidgoyal_calibre/src/calibre/utils/iphlpapi.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
import ctypes
from collections import namedtuple
from contextlib import contextmanager
from ctypes import windll, wintypes
# Wraps (part of) the IPHelper API, useful to enumerate the network routes and
# adapters on the local machine
class GUID(ctypes.Structure):
_fields_ = [
("data1", wintypes.DWORD),
("data2", wintypes.WORD),
("data3", wintypes.WORD),
("data4", wintypes.BYTE * 8)]
def __init__(self, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8):
self.data1 = l
self.data2 = w1
self.data3 = w2
self.data4[0] = b1
self.data4[1] = b2
self.data4[2] = b3
self.data4[3] = b4
self.data4[4] = b5
self.data4[5] = b6
self.data4[6] = b7
self.data4[7] = b8
class SOCKADDR(ctypes.Structure):
_fields_ = [
('sa_family', wintypes.USHORT),
('sa_data', ctypes.c_char * 14),
]
ERROR_SUCCESS = 0
ERROR_INSUFFICIENT_BUFFER = 122
ERROR_BUFFER_OVERFLOW = 111
MAX_ADAPTER_NAME_LENGTH = 256
MAX_ADAPTER_DESCRIPTION_LENGTH = 128
MAX_ADAPTER_ADDRESS_LENGTH = 8
# Do not return IPv6 anycast addresses.
GAA_FLAG_SKIP_ANYCAST = 2
GAA_FLAG_SKIP_MULTICAST = 4
IP_ADAPTER_DHCP_ENABLED = 4
IP_ADAPTER_IPV4_ENABLED = 0x80
IP_ADAPTER_IPV6_ENABLED = 0x0100
MAX_DHCPV6_DUID_LENGTH = 130
IF_TYPE_ETHERNET_CSMACD = 6
IF_TYPE_SOFTWARE_LOOPBACK = 24
IF_TYPE_IEEE80211 = 71
IF_TYPE_TUNNEL = 131
IP_ADAPTER_ADDRESSES_SIZE_2003 = 144
class SOCKET_ADDRESS(ctypes.Structure):
_fields_ = [
('lpSockaddr', ctypes.POINTER(SOCKADDR)),
('iSockaddrLength', wintypes.INT),
]
class IP_ADAPTER_ADDRESSES_Struct1(ctypes.Structure):
_fields_ = [
('Length', wintypes.ULONG),
('IfIndex', wintypes.DWORD),
]
class IP_ADAPTER_ADDRESSES_Union1(ctypes.Union):
_fields_ = [
('Alignment', wintypes.ULARGE_INTEGER),
('Struct1', IP_ADAPTER_ADDRESSES_Struct1),
]
class IP_ADAPTER_UNICAST_ADDRESS(ctypes.Structure):
_fields_ = [
('Union1', IP_ADAPTER_ADDRESSES_Union1),
('Next', wintypes.LPVOID),
('Address', SOCKET_ADDRESS),
('PrefixOrigin', wintypes.DWORD),
('SuffixOrigin', wintypes.DWORD),
('DadState', wintypes.DWORD),
('ValidLifetime', wintypes.ULONG),
('PreferredLifetime', wintypes.ULONG),
('LeaseLifetime', wintypes.ULONG),
]
class IP_ADAPTER_DNS_SERVER_ADDRESS_Struct1(ctypes.Structure):
_fields_ = [
('Length', wintypes.ULONG),
('Reserved', wintypes.DWORD),
]
class IP_ADAPTER_DNS_SERVER_ADDRESS_Union1(ctypes.Union):
_fields_ = [
('Alignment', wintypes.ULARGE_INTEGER),
('Struct1', IP_ADAPTER_DNS_SERVER_ADDRESS_Struct1),
]
class IP_ADAPTER_DNS_SERVER_ADDRESS(ctypes.Structure):
_fields_ = [
('Union1', IP_ADAPTER_DNS_SERVER_ADDRESS_Union1),
('Next', wintypes.LPVOID),
('Address', SOCKET_ADDRESS),
]
class IP_ADAPTER_PREFIX_Struct1(ctypes.Structure):
_fields_ = [
('Length', wintypes.ULONG),
('Flags', wintypes.DWORD),
]
class IP_ADAPTER_PREFIX_Union1(ctypes.Union):
_fields_ = [
('Alignment', wintypes.ULARGE_INTEGER),
('Struct1', IP_ADAPTER_PREFIX_Struct1),
]
class IP_ADAPTER_PREFIX(ctypes.Structure):
_fields_ = [
('Union1', IP_ADAPTER_PREFIX_Union1),
('Next', wintypes.LPVOID),
('Address', SOCKET_ADDRESS),
('PrefixLength', wintypes.ULONG),
]
class IP_ADAPTER_DNS_SUFFIX(ctypes.Structure):
_fields_ = [
('Next', wintypes.LPVOID),
('String', wintypes.LPWSTR),
]
class NET_LUID_LH(ctypes.Union):
_fields_ = [
('Value', wintypes.ULARGE_INTEGER),
('Info', wintypes.ULARGE_INTEGER),
]
class IP_ADAPTER_ADDRESSES(ctypes.Structure):
_fields_ = [
('Union1', IP_ADAPTER_ADDRESSES_Union1),
('Next', wintypes.LPVOID),
('AdapterName', ctypes.c_char_p),
('FirstUnicastAddress',
ctypes.POINTER(IP_ADAPTER_UNICAST_ADDRESS)),
('FirstAnycastAddress',
ctypes.POINTER(IP_ADAPTER_DNS_SERVER_ADDRESS)),
('FirstMulticastAddress',
ctypes.POINTER(IP_ADAPTER_DNS_SERVER_ADDRESS)),
('FirstDnsServerAddress',
ctypes.POINTER(IP_ADAPTER_DNS_SERVER_ADDRESS)),
('DnsSuffix', wintypes.LPWSTR),
('Description', wintypes.LPWSTR),
('FriendlyName', wintypes.LPWSTR),
('PhysicalAddress', ctypes.c_ubyte * MAX_ADAPTER_ADDRESS_LENGTH),
('PhysicalAddressLength', wintypes.DWORD),
('Flags', wintypes.DWORD),
('Mtu', wintypes.DWORD),
('IfType', wintypes.DWORD),
('OperStatus', wintypes.DWORD),
('Ipv6IfIndex', wintypes.DWORD),
('ZoneIndices', wintypes.DWORD * 16),
('FirstPrefix', ctypes.POINTER(IP_ADAPTER_PREFIX)),
# Vista and later
('TransmitLinkSpeed', wintypes.ULARGE_INTEGER),
('ReceiveLinkSpeed', wintypes.ULARGE_INTEGER),
('FirstWinsServerAddress',
ctypes.POINTER(IP_ADAPTER_DNS_SERVER_ADDRESS)),
('FirstGatewayAddress',
ctypes.POINTER(IP_ADAPTER_DNS_SERVER_ADDRESS)),
('Ipv4Metric', wintypes.ULONG),
('Ipv6Metric', wintypes.ULONG),
('Luid', NET_LUID_LH),
('Dhcpv4Server', SOCKET_ADDRESS),
('CompartmentId', wintypes.DWORD),
('NetworkGuid', GUID),
('ConnectionType', wintypes.DWORD),
('TunnelType', wintypes.DWORD),
('Dhcpv6Server', SOCKET_ADDRESS),
('Dhcpv6ClientDuid', ctypes.c_ubyte * MAX_DHCPV6_DUID_LENGTH),
('Dhcpv6ClientDuidLength', wintypes.ULONG),
('Dhcpv6Iaid', wintypes.ULONG),
# Vista SP1 and later, so we comment it out as we dont need it
# ('FirstDnsSuffix', ctypes.POINTER(IP_ADAPTER_DNS_SUFFIX)),
]
class Win32_MIB_IPFORWARDROW(ctypes.Structure):
_fields_ = [
('dwForwardDest', wintypes.DWORD),
('dwForwardMask', wintypes.DWORD),
('dwForwardPolicy', wintypes.DWORD),
('dwForwardNextHop', wintypes.DWORD),
('dwForwardIfIndex', wintypes.DWORD),
('dwForwardType', wintypes.DWORD),
('dwForwardProto', wintypes.DWORD),
('dwForwardAge', wintypes.DWORD),
('dwForwardNextHopAS', wintypes.DWORD),
('dwForwardMetric1', wintypes.DWORD),
('dwForwardMetric2', wintypes.DWORD),
('dwForwardMetric3', wintypes.DWORD),
('dwForwardMetric4', wintypes.DWORD),
('dwForwardMetric5', wintypes.DWORD)
]
class Win32_MIB_IPFORWARDTABLE(ctypes.Structure):
_fields_ = [
('dwNumEntries', wintypes.DWORD),
('table', Win32_MIB_IPFORWARDROW * 1)
]
GetAdaptersAddresses = windll.Iphlpapi.GetAdaptersAddresses
GetAdaptersAddresses.argtypes = [
wintypes.ULONG, wintypes.ULONG, wintypes.LPVOID,
ctypes.POINTER(IP_ADAPTER_ADDRESSES),
ctypes.POINTER(wintypes.ULONG)]
GetAdaptersAddresses.restype = wintypes.ULONG
GetIpForwardTable = windll.Iphlpapi.GetIpForwardTable
GetIpForwardTable.argtypes = [
ctypes.POINTER(Win32_MIB_IPFORWARDTABLE),
ctypes.POINTER(wintypes.ULONG),
wintypes.BOOL]
GetIpForwardTable.restype = wintypes.DWORD
GetProcessHeap = windll.kernel32.GetProcessHeap
GetProcessHeap.argtypes = []
GetProcessHeap.restype = wintypes.HANDLE
HeapAlloc = windll.kernel32.HeapAlloc
HeapAlloc.argtypes = [wintypes.HANDLE, wintypes.DWORD, ctypes.c_uint64]
HeapAlloc.restype = wintypes.LPVOID
HeapFree = windll.kernel32.HeapFree
HeapFree.argtypes = [wintypes.HANDLE, wintypes.DWORD, wintypes.LPVOID]
HeapFree.restype = wintypes.BOOL
ERROR_NO_DATA = 232
GAA_FLAG_INCLUDE_PREFIX = 0x0010
Ws2_32 = windll.Ws2_32
Ws2_32.inet_ntoa.restype = ctypes.c_char_p
def _heap_alloc(heap, size):
table_mem = HeapAlloc(heap, 0, ctypes.c_size_t(size.value))
if not table_mem:
raise MemoryError('Unable to allocate memory for the IP forward table')
return table_mem
@contextmanager
def _get_forward_table():
heap = GetProcessHeap()
size = wintypes.ULONG(0)
p_forward_table = table_mem = None
max_tries = 10
try:
while max_tries > 0:
max_tries -= 1
err = GetIpForwardTable(p_forward_table, ctypes.byref(size), 0)
if err == ERROR_INSUFFICIENT_BUFFER:
if p_forward_table is not None:
HeapFree(heap, 0, p_forward_table)
p_forward_table = None
table_mem = _heap_alloc(heap, size)
p_forward_table = ctypes.cast(table_mem, ctypes.POINTER(Win32_MIB_IPFORWARDTABLE))
elif err in (ERROR_SUCCESS, ERROR_NO_DATA):
yield p_forward_table
break
else:
raise OSError('Unable to get IP forward table. Error: %s' % err)
if p_forward_table is None:
raise OSError('Failed to get IP routing table, table appears to be changing rapidly')
finally:
if p_forward_table is not None:
HeapFree(heap, 0, p_forward_table)
@contextmanager
def _get_adapters():
heap = GetProcessHeap()
size = wintypes.ULONG(0)
addresses = buf = None
max_tries = 10
try:
while max_tries > 0:
max_tries -= 1
err = GetAdaptersAddresses(0, GAA_FLAG_INCLUDE_PREFIX, None, addresses, ctypes.byref(size))
if err in (ERROR_SUCCESS, ERROR_NO_DATA):
yield addresses
break
elif err == ERROR_BUFFER_OVERFLOW:
if addresses is not None:
HeapFree(heap, 0, addresses)
addresses = None
buf = _heap_alloc(heap, size)
addresses = ctypes.cast(buf, ctypes.POINTER(IP_ADAPTER_ADDRESSES))
else:
raise OSError('Failed to determine size for adapters table with error: %s' % err)
if addresses is None:
raise OSError('Failed to get adapter addresses, table appears to be changing rapidly')
finally:
if addresses is not None:
HeapFree(heap, 0, addresses)
addresses = None
Adapter = namedtuple('Adapter', 'name if_index if_index6 friendly_name status transmit_speed receive_speed')
def adapters():
''' A list of adapters on this machine '''
ans = []
smap = {1:'up', 2:'down', 3:'testing', 4:'unknown', 5:'dormant', 6:'not-present', 7:'lower-layer-down'}
with _get_adapters() as p_adapters_list:
adapter = p_adapters_list
while adapter:
adapter = adapter.contents
if not adapter:
break
ans.append(Adapter(
name=adapter.AdapterName.decode(),
if_index=adapter.Union1.Struct1.IfIndex,
if_index6=adapter.Ipv6IfIndex,
friendly_name=adapter.FriendlyName,
status=smap.get(adapter.OperStatus, 'unknown'),
transmit_speed=adapter.TransmitLinkSpeed,
receive_speed=adapter.ReceiveLinkSpeed
))
adapter = ctypes.cast(adapter.Next, ctypes.POINTER(IP_ADAPTER_ADDRESSES))
return ans
Route = namedtuple('Route', 'destination gateway netmask interface metric flags')
def routes():
''' A list of routes on this machine '''
ans = []
adapter_map = {a.if_index:a.name for a in adapters()}
with _get_forward_table() as p_forward_table:
if p_forward_table is None:
return ans
forward_table = p_forward_table.contents
table = ctypes.cast(
ctypes.addressof(forward_table.table),
ctypes.POINTER(Win32_MIB_IPFORWARDROW * forward_table.dwNumEntries)
).contents
for row in table:
destination = Ws2_32.inet_ntoa(row.dwForwardDest).decode()
netmask = Ws2_32.inet_ntoa(row.dwForwardMask).decode()
gateway = Ws2_32.inet_ntoa(row.dwForwardNextHop).decode()
ans.append(Route(
destination=destination,
gateway=gateway,
netmask=netmask,
interface=adapter_map.get(row.dwForwardIfIndex),
metric=row.dwForwardMetric1,
flags=row.dwForwardProto
))
return ans
if __name__ == '__main__':
from pprint import pprint
pprint(adapters())
pprint(routes())
| 12,476 | Python | .py | 323 | 30.876161 | 108 | 0.638252 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,126 | resources.py | kovidgoyal_calibre/src/calibre/utils/resources.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import sys
from calibre import config_dir
from polyglot.builtins import builtins
user_dir = os.path.join(config_dir, 'resources')
class PathResolver:
def __init__(self):
self.locations = [sys.resources_location]
self.cache = {}
def suitable(path):
try:
return os.path.exists(path) and os.path.isdir(path) and \
os.listdir(path)
except:
pass
return False
self.default_path = sys.resources_location
dev_path = os.environ.get('CALIBRE_DEVELOP_FROM', None)
self.using_develop_from = False
if dev_path is not None:
dev_path = os.path.join(os.path.abspath(
os.path.dirname(dev_path)), 'resources')
if suitable(dev_path):
self.locations.insert(0, dev_path)
self.default_path = dev_path
self.using_develop_from = True
self.user_path = None
if suitable(user_dir):
self.locations.insert(0, user_dir)
self.user_path = user_dir
def __call__(self, path, allow_user_override=True):
path = path.replace(os.sep, '/')
key = (path, allow_user_override)
ans = self.cache.get(key, None)
if ans is None:
for base in self.locations:
if not allow_user_override and base == self.user_path:
continue
fpath = os.path.join(base, *path.split('/'))
if os.path.exists(fpath):
ans = fpath
break
if ans is None:
ans = os.path.join(self.default_path, *path.split('/'))
self.cache[key] = ans
return ans
def set_data(self, path, data=None):
self.cache.pop((path, True), None)
fpath = os.path.join(user_dir, *path.split('/'))
if data is None:
if os.path.exists(fpath):
os.remove(fpath)
else:
base = os.path.dirname(fpath)
if not os.path.exists(base):
os.makedirs(base)
with open(fpath, 'wb') as f:
f.write(data)
_resolver = PathResolver()
def get_path(path, data=False, allow_user_override=True):
fpath = _resolver(path, allow_user_override=allow_user_override)
if data:
with open(fpath, 'rb') as f:
return f.read()
return fpath
def get_image_path(path, data=False, allow_user_override=True):
if not path:
return get_path('images', allow_user_override=allow_user_override)
return get_path('images/'+path, data=data, allow_user_override=allow_user_override)
def set_data(path, data=None):
return _resolver.set_data(path, data)
def get_user_path():
return _resolver.user_path
builtins.__dict__['P'] = get_path
builtins.__dict__['I'] = get_image_path
| 3,067 | Python | .py | 79 | 28.949367 | 87 | 0.580798 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,127 | config_base.py | kovidgoyal_calibre/src/calibre/utils/config_base.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import numbers
import os
import re
import traceback
from collections import defaultdict
from contextlib import suppress
from copy import deepcopy
from functools import partial
from calibre.constants import CONFIG_DIR_MODE, config_dir, filesystem_encoding, get_umask, iswindows, preferred_encoding
from calibre.utils.localization import _
from calibre.utils.resources import get_path as P
from polyglot.builtins import iteritems
plugin_dir = os.path.join(config_dir, 'plugins')
def parse_old_style(src):
import pickle as cPickle
options = {'cPickle':cPickle}
try:
if not isinstance(src, str):
src = src.decode('utf-8')
src = re.sub(r'PyQt(?:4|5).QtCore', r'PyQt6.QtCore', src)
src = re.sub(r'cPickle\.loads\(([\'"])', r'cPickle.loads(b\1', src)
exec(src, options)
except Exception as err:
try:
print(f'Failed to parse old style options string with error: {err}')
except Exception:
pass
return options
def to_json(obj):
import datetime
if isinstance(obj, bytearray):
from base64 import standard_b64encode
return {'__class__': 'bytearray',
'__value__': standard_b64encode(bytes(obj)).decode('ascii')}
if isinstance(obj, datetime.datetime):
from calibre.utils.date import isoformat
return {'__class__': 'datetime.datetime',
'__value__': isoformat(obj, as_utc=True)}
if isinstance(obj, (set, frozenset)):
return {'__class__': 'set', '__value__': tuple(obj)}
if isinstance(obj, bytes):
return obj.decode('utf-8')
if hasattr(obj, 'toBase64'): # QByteArray
return {'__class__': 'bytearray',
'__value__': bytes(obj.toBase64()).decode('ascii')}
v = getattr(obj, 'value', None)
if isinstance(v, int): # Possibly an enum with integer values like all the Qt enums
return v
raise TypeError(repr(obj) + ' is not JSON serializable')
def safe_to_json(obj):
try:
return to_json(obj)
except Exception:
pass
def from_json(obj):
custom = obj.get('__class__')
if custom is not None:
if custom == 'bytearray':
from base64 import standard_b64decode
return bytearray(standard_b64decode(obj['__value__'].encode('ascii')))
if custom == 'datetime.datetime':
from calibre.utils.iso8601 import parse_iso8601
return parse_iso8601(obj['__value__'], assume_utc=True)
if custom == 'set':
return set(obj['__value__'])
return obj
def force_unicode(x):
try:
return x.decode('mbcs' if iswindows else preferred_encoding)
except UnicodeDecodeError:
try:
return x.decode(filesystem_encoding)
except UnicodeDecodeError:
return x.decode('utf-8', 'replace')
def force_unicode_recursive(obj):
if isinstance(obj, bytes):
return force_unicode(obj)
if isinstance(obj, (list, tuple)):
return type(obj)(map(force_unicode_recursive, obj))
if isinstance(obj, dict):
return {force_unicode_recursive(k): force_unicode_recursive(v) for k, v in iteritems(obj)}
return obj
def json_dumps(obj, ignore_unserializable=False):
import json
try:
ans = json.dumps(obj, indent=2, default=safe_to_json if ignore_unserializable else to_json, sort_keys=True, ensure_ascii=False)
except UnicodeDecodeError:
obj = force_unicode_recursive(obj)
ans = json.dumps(obj, indent=2, default=safe_to_json if ignore_unserializable else to_json, sort_keys=True, ensure_ascii=False)
if not isinstance(ans, bytes):
ans = ans.encode('utf-8')
return ans
def json_loads(raw):
import json
if isinstance(raw, bytes):
raw = raw.decode('utf-8')
return json.loads(raw, object_hook=from_json)
def make_config_dir():
if not os.path.exists(plugin_dir):
os.makedirs(plugin_dir, mode=CONFIG_DIR_MODE)
class Option:
def __init__(self, name, switches=[], help='', type=None, choices=None,
check=None, group=None, default=None, action=None, metavar=None):
if choices:
type = 'choice'
self.name = name
self.switches = switches
self.help = help.replace('%default', repr(default)) if help else None
self.type = type
if self.type is None and action is None and choices is None:
if isinstance(default, float):
self.type = 'float'
elif isinstance(default, numbers.Integral) and not isinstance(default, bool):
self.type = 'int'
self.choices = choices
self.check = check
self.group = group
self.default = default
self.action = action
self.metavar = metavar
def __eq__(self, other):
return self.name == getattr(other, 'name', other)
def __repr__(self):
return 'Option: '+self.name
def __str__(self):
return repr(self)
class OptionValues:
def copy(self):
return deepcopy(self)
class OptionSet:
OVERRIDE_PAT = re.compile(r'#{3,100} Override Options #{15}(.*?)#{3,100} End Override #{3,100}',
re.DOTALL|re.IGNORECASE)
def __init__(self, description=''):
self.description = description
self.defaults = {}
self.preferences = []
self.group_list = []
self.groups = {}
self.set_buffer = {}
self.loads_pat = None
def has_option(self, name_or_option_object):
if name_or_option_object in self.preferences:
return True
for p in self.preferences:
if p.name == name_or_option_object:
return True
return False
def get_option(self, name_or_option_object):
idx = self.preferences.index(name_or_option_object)
if idx > -1:
return self.preferences[idx]
for p in self.preferences:
if p.name == name_or_option_object:
return p
def add_group(self, name, description=''):
if name in self.group_list:
raise ValueError('A group by the name %s already exists in this set'%name)
self.groups[name] = description
self.group_list.append(name)
return partial(self.add_opt, group=name)
def update(self, other):
for name in other.groups.keys():
self.groups[name] = other.groups[name]
if name not in self.group_list:
self.group_list.append(name)
for pref in other.preferences:
if pref in self.preferences:
self.preferences.remove(pref)
self.preferences.append(pref)
def smart_update(self, opts1, opts2):
'''
Updates the preference values in opts1 using only the non-default preference values in opts2.
'''
for pref in self.preferences:
new = getattr(opts2, pref.name, pref.default)
if new != pref.default:
setattr(opts1, pref.name, new)
def remove_opt(self, name):
if name in self.preferences:
self.preferences.remove(name)
def add_opt(self, name, switches=[], help=None, type=None, choices=None,
group=None, default=None, action=None, metavar=None):
'''
Add an option to this section.
:param name: The name of this option. Must be a valid Python identifier.
Must also be unique in this OptionSet and all its subsets.
:param switches: List of command line switches for this option
(as supplied to :module:`optparse`). If empty, this
option will not be added to the command line parser.
:param help: Help text.
:param type: Type checking of option values. Supported types are:
`None, 'choice', 'complex', 'float', 'int', 'string'`.
:param choices: List of strings or `None`.
:param group: Group this option belongs to. You must previously
have created this group with a call to :method:`add_group`.
:param default: The default value for this option.
:param action: The action to pass to optparse. Supported values are:
`None, 'count'`. For choices and boolean options,
action is automatically set correctly.
'''
pref = Option(name, switches=switches, help=help, type=type, choices=choices,
group=group, default=default, action=action, metavar=None)
if group is not None and group not in self.groups.keys():
raise ValueError('Group %s has not been added to this section'%group)
if pref in self.preferences:
raise ValueError('An option with the name %s already exists in this set.'%name)
self.preferences.append(pref)
self.defaults[name] = default
def retranslate_help(self):
t = _
for opt in self.preferences:
if opt.help:
opt.help = t(opt.help)
if opt.name == 'use_primary_find_in_search':
opt.help = opt.help.format('ñ')
def option_parser(self, user_defaults=None, usage='', gui_mode=False):
from calibre.utils.config import OptionParser
parser = OptionParser(usage, gui_mode=gui_mode)
groups = defaultdict(lambda : parser)
for group, desc in self.groups.items():
groups[group] = parser.add_option_group(group.upper(), desc)
for pref in self.preferences:
if not pref.switches:
continue
g = groups[pref.group]
action = pref.action
if action is None:
action = 'store'
if pref.default is True or pref.default is False:
action = 'store_' + ('false' if pref.default else 'true')
args = dict(
dest=pref.name,
help=pref.help,
metavar=pref.metavar,
type=pref.type,
choices=pref.choices,
default=getattr(user_defaults, pref.name, pref.default),
action=action,
)
g.add_option(*pref.switches, **args)
return parser
def get_override_section(self, src):
match = self.OVERRIDE_PAT.search(src)
if match:
return match.group()
return ''
def parse_string(self, src):
options = {}
if src:
is_old_style = (isinstance(src, bytes) and src.startswith(b'#')) or (isinstance(src, str) and src.startswith('#'))
if is_old_style:
options = parse_old_style(src)
else:
try:
options = json_loads(src)
if not isinstance(options, dict):
raise Exception('options is not a dictionary')
except Exception as err:
try:
print(f'Failed to parse options string with error: {err}')
except Exception:
pass
opts = OptionValues()
for pref in self.preferences:
val = options.get(pref.name, pref.default)
formatter = __builtins__.get(pref.type, None)
if callable(formatter):
val = formatter(val)
setattr(opts, pref.name, val)
return opts
def serialize(self, opts, ignore_unserializable=False):
data = {pref.name: getattr(opts, pref.name, pref.default) for pref in self.preferences}
return json_dumps(data, ignore_unserializable=ignore_unserializable)
class ConfigInterface:
def __init__(self, description):
self.option_set = OptionSet(description=description)
self.add_opt = self.option_set.add_opt
self.add_group = self.option_set.add_group
self.remove_opt = self.remove = self.option_set.remove_opt
self.parse_string = self.option_set.parse_string
self.get_option = self.option_set.get_option
self.preferences = self.option_set.preferences
def update(self, other):
self.option_set.update(other.option_set)
def option_parser(self, usage='', gui_mode=False):
return self.option_set.option_parser(user_defaults=self.parse(),
usage=usage, gui_mode=gui_mode)
def smart_update(self, opts1, opts2):
self.option_set.smart_update(opts1, opts2)
def retry_on_fail(func, *args, count=10, sleep_time=0.2):
import time
ERROR_SHARING_VIOLATION = 32
ACCESS_DENIED = 5
for i in range(count):
try:
return func(*args)
except FileNotFoundError:
raise
except OSError as e:
# Windows stupidly gives us an ACCESS_DENIED rather than a
# ERROR_SHARING_VIOLATION if the file is open
if not iswindows or i > count - 2 or e.winerror not in (ERROR_SHARING_VIOLATION, ACCESS_DENIED):
raise
# Try the operation repeatedly in case something like a virus
# scanner has opened one of the files (I love windows)
time.sleep(sleep_time)
def read_data(file_path):
def r():
with open(file_path, 'rb') as f:
return f.read()
return retry_on_fail(r)
def commit_data(file_path, data):
import tempfile
bdir = os.path.dirname(file_path)
os.makedirs(bdir, exist_ok=True, mode=CONFIG_DIR_MODE)
try:
with tempfile.NamedTemporaryFile(dir=bdir, prefix=os.path.basename(file_path).split('.')[0] + '-atomic-', delete=False) as f:
if hasattr(os, 'fchmod'):
os.fchmod(f.fileno(), 0o666 & ~get_umask())
f.write(data)
retry_on_fail(os.replace, f.name, file_path)
finally:
with suppress(FileNotFoundError, NameError):
os.remove(f.name)
class Config(ConfigInterface):
'''
A file based configuration.
'''
def __init__(self, basename, description=''):
ConfigInterface.__init__(self, description)
self.filename_base = basename
@property
def config_file_path(self):
return os.path.join(config_dir, self.filename_base + '.py.json')
def parse(self):
src = ''
migrate = False
path = self.config_file_path
with suppress(FileNotFoundError):
src_bytes = read_data(path)
try:
src = src_bytes.decode('utf-8')
except ValueError:
print("Failed to parse", path)
traceback.print_exc()
if not src:
path = path.rpartition('.')[0]
from calibre.utils.shared_file import share_open
try:
with share_open(path, 'rb') as f:
src = f.read().decode('utf-8')
except Exception:
pass
else:
migrate = bool(src)
ans = self.option_set.parse_string(src)
if migrate:
new_src = self.option_set.serialize(ans, ignore_unserializable=True)
commit_data(self.config_file_path, new_src)
return ans
def set(self, name, val):
if not self.option_set.has_option(name):
raise ValueError('The option %s is not defined.'%name)
if not os.path.exists(config_dir):
make_config_dir()
src = b''
with suppress(FileNotFoundError):
src = read_data(self.config_file_path)
opts = self.option_set.parse_string(src)
setattr(opts, name, val)
src = self.option_set.serialize(opts)
if isinstance(src, str):
src = src.encode('utf-8')
commit_data(self.config_file_path, src)
class StringConfig(ConfigInterface):
'''
A string based configuration
'''
def __init__(self, src, description=''):
ConfigInterface.__init__(self, description)
self.set_src(src)
def set_src(self, src):
self.src = src
if isinstance(self.src, bytes):
self.src = self.src.decode('utf-8')
def parse(self):
return self.option_set.parse_string(self.src)
def set(self, name, val):
if not self.option_set.has_option(name):
raise ValueError('The option %s is not defined.'%name)
opts = self.option_set.parse_string(self.src)
setattr(opts, name, val)
self.set_src(self.option_set.serialize(opts))
class ConfigProxy:
'''
A Proxy to minimize file reads for widely used config settings
'''
def __init__(self, config):
self.__config = config
self.__opts = None
@property
def defaults(self):
return self.__config.option_set.defaults
def refresh(self):
self.__opts = self.__config.parse()
def retranslate_help(self):
self.__config.option_set.retranslate_help()
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, val):
return self.set(key, val)
def __delitem__(self, key):
self.set(key, self.defaults[key])
def get(self, key):
if self.__opts is None:
self.refresh()
return getattr(self.__opts, key)
def set(self, key, val):
if self.__opts is None:
self.refresh()
setattr(self.__opts, key, val)
return self.__config.set(key, val)
def help(self, key):
return self.__config.get_option(key).help
def create_global_prefs(conf_obj=None):
c = Config('global', 'calibre wide preferences') if conf_obj is None else conf_obj
c.add_opt('database_path',
default=os.path.expanduser('~/library1.db'),
help=_('Path to the database in which books are stored'))
c.add_opt('filename_pattern', default='(?P<title>.+) - (?P<author>[^_]+)',
help=_('Pattern to guess metadata from filenames'))
c.add_opt('isbndb_com_key', default='',
help=_('Access key for isbndb.com'))
c.add_opt('network_timeout', default=5,
help=_('Default timeout for network operations (seconds)'))
c.add_opt('library_path', default=None,
help=_('Path to folder in which your library of books is stored'))
c.add_opt('language', default=None,
help=_('The language in which to display the user interface'))
c.add_opt('output_format', default='EPUB',
help=_('The default output format for e-book conversions. When auto-converting'
' to send to a device this can be overridden by individual device preferences.'
' These can be changed by right clicking the device icon in calibre and'
' choosing "Configure".'))
c.add_opt('input_format_order', default=['EPUB', 'AZW3', 'MOBI', 'LIT', 'PRC',
'FB2', 'HTML', 'HTM', 'XHTM', 'SHTML', 'XHTML', 'ZIP', 'DOCX', 'ODT', 'RTF', 'PDF',
'TXT'],
help=_('Ordered list of formats to prefer for input.'))
c.add_opt('read_file_metadata', default=True,
help=_('Read metadata from files'))
c.add_opt('worker_process_priority', default='normal',
help=_('The priority of worker processes. A higher priority '
'means they run faster and consume more resources. '
'Most tasks like conversion/news download/adding books/etc. '
'are affected by this setting.'))
c.add_opt('swap_author_names', default=False,
help=_('Swap author first and last names when reading metadata'))
c.add_opt('add_formats_to_existing', default=False,
help=_('Add new formats to existing book records'))
c.add_opt('check_for_dupes_on_ctl', default=False,
help=_('Check for duplicates when copying to another library'))
c.add_opt('installation_uuid', default=None, help='Installation UUID')
c.add_opt('new_book_tags', default=[], help=_('Tags to apply to books added to the library'))
c.add_opt('mark_new_books', default=False, help=_(
'Mark newly added books. The mark is a temporary mark that is automatically removed when calibre is restarted.'))
# these are here instead of the gui preferences because calibredb and
# calibre server can execute searches
c.add_opt('saved_searches', default={}, help=_('List of named saved searches'))
c.add_opt('user_categories', default={}, help=_('User-created Tag browser categories'))
c.add_opt('manage_device_metadata', default='manual',
help=_('How and when calibre updates metadata on the device.'))
c.add_opt('limit_search_columns', default=False,
help=_('When searching for text without using lookup '
'prefixes, as for example, Red instead of title:Red, '
'limit the columns searched to those named below.'))
c.add_opt('limit_search_columns_to',
default=['title', 'authors', 'tags', 'series', 'publisher'],
help=_('Choose columns to be searched when not using prefixes, '
'as for example, when searching for Red instead of '
'title:Red. Enter a list of search/lookup names '
'separated by commas. Only takes effect if you set the option '
'to limit search columns above.'))
c.add_opt('use_primary_find_in_search', default=True,
help=_('Characters typed in the search box will match their '
'accented versions, based on the language you have chosen '
'for the calibre interface. For example, in '
'English, searching for n will match both {} and n, but if '
'your language is Spanish it will only match n. Note that '
'this is much slower than a simple search on very large '
'libraries. Also, this option will have no effect if you turn '
'on case-sensitive searching.'))
c.add_opt('case_sensitive', default=False, help=_(
'Make searches case-sensitive'))
c.add_opt('numeric_collation', default=False,
help=_('Recognize numbers inside text when sorting. Setting this '
'means that when sorting on text fields like title the text "Book 2"'
'will sort before the text "Book 100". Note that setting this '
'can cause problems with text that starts with numbers and is '
'a little slower.'))
c.add_opt('migrated', default=False, help='For Internal use. Don\'t modify.')
return c
prefs = ConfigProxy(create_global_prefs())
if prefs['installation_uuid'] is None:
import uuid
prefs['installation_uuid'] = str(uuid.uuid4())
# Read tweaks
def tweaks_file():
return os.path.join(config_dir, 'tweaks.json')
def make_unicode(obj):
if isinstance(obj, bytes):
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
return obj.decode(preferred_encoding, errors='replace')
if isinstance(obj, (list, tuple)):
return list(map(make_unicode, obj))
if isinstance(obj, dict):
return {make_unicode(k): make_unicode(v) for k, v in iteritems(obj)}
return obj
def normalize_tweak(val):
if isinstance(val, (list, tuple)):
return tuple(map(normalize_tweak, val))
if isinstance(val, dict):
return {k: normalize_tweak(v) for k, v in iteritems(val)}
return val
def write_custom_tweaks(tweaks_dict):
make_config_dir()
tweaks_dict = make_unicode(tweaks_dict)
changed_tweaks = {}
default_tweaks = exec_tweaks(default_tweaks_raw())
for key, cval in iteritems(tweaks_dict):
if key in default_tweaks and normalize_tweak(cval) == normalize_tweak(default_tweaks[key]):
continue
changed_tweaks[key] = cval
raw = json_dumps(changed_tweaks)
with open(tweaks_file(), 'wb') as f:
f.write(raw)
def exec_tweaks(path):
if isinstance(path, bytes):
raw = path
fname = '<string>'
else:
with open(path, 'rb') as f:
raw = f.read()
fname = f.name
code = compile(raw, fname, 'exec')
l = {}
g = {'__file__': fname}
exec(code, g, l)
return l
def read_custom_tweaks():
make_config_dir()
tf = tweaks_file()
ans = {}
if os.path.exists(tf):
with open(tf, 'rb') as f:
raw = f.read()
raw = raw.strip()
if not raw:
return ans
try:
return json_loads(raw)
except Exception:
import traceback
traceback.print_exc()
return ans
old_tweaks_file = tf.rpartition('.')[0] + '.py'
if os.path.exists(old_tweaks_file):
ans = exec_tweaks(old_tweaks_file)
ans = make_unicode(ans)
write_custom_tweaks(ans)
return ans
def default_tweaks_raw():
return P('default_tweaks.py', data=True, allow_user_override=False)
def read_tweaks():
default_tweaks = exec_tweaks(default_tweaks_raw())
try:
custom_tweaks = read_custom_tweaks()
except Exception:
custom_tweaks = {}
default_tweaks.update(custom_tweaks)
return default_tweaks
tweaks = read_tweaks()
def migrate_tweaks_to_prefs():
# This must happen after the tweaks are loaded
# Migrate the numeric_collation tweak
if 'numeric_collation' in tweaks:
prefs['numeric_collation'] = tweaks.get('numeric_collation', False)
tweaks.pop('numeric_collation')
write_custom_tweaks(tweaks)
migrate_tweaks_to_prefs()
def reset_tweaks_to_default():
default_tweaks = exec_tweaks(default_tweaks_raw())
tweaks.clear()
tweaks.update(default_tweaks)
class Tweak:
def __init__(self, name, value):
self.name, self.value = name, value
def __enter__(self):
self.origval = tweaks[self.name]
tweaks[self.name] = self.value
def __exit__(self, *args):
tweaks[self.name] = self.origval
| 26,463 | Python | .py | 607 | 33.998353 | 135 | 0.605651 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,128 | unrar.py | kovidgoyal_calibre/src/calibre/utils/unrar.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
import shutil
from io import BytesIO
from calibre.constants import filesystem_encoding, iswindows
from calibre.ptempfile import PersistentTemporaryFile, TemporaryDirectory
from calibre.utils.filenames import make_long_path_useable
from polyglot.builtins import string_or_bytes
def as_unicode(x):
if isinstance(x, bytes):
x = x.decode(filesystem_encoding)
return x
class StreamAsPath:
def __init__(self, stream):
self.stream = stream
def __enter__(self):
self.temppath = None
if isinstance(self.stream, string_or_bytes):
return as_unicode(self.stream)
name = getattr(self.stream, 'name', None)
if name and os.access(name, os.R_OK):
return as_unicode(name)
pos = self.stream.tell()
with PersistentTemporaryFile('for-unar', 'wb') as f:
shutil.copyfileobj(self.stream, f)
self.stream.seek(pos)
self.temppath = f.name
return as_unicode(f.name)
def __exit__(self, *a):
if self.temppath is not None:
try:
os.remove(self.temppath)
except OSError:
pass
self.temppath = None
def extract(path_or_stream, location):
from unrardll import extract
with StreamAsPath(path_or_stream) as path:
return extract(make_long_path_useable(path), make_long_path_useable(location, threshold=0))
def names(path_or_stream):
from unrardll import names
with StreamAsPath(path_or_stream) as path:
yield from names(make_long_path_useable(path), only_useful=True)
def headers(path_or_stream):
from unrardll import headers, is_useful
with StreamAsPath(path_or_stream) as path:
for h in headers(make_long_path_useable(path)):
if is_useful(h):
yield h
def comment(path_or_stream):
from unrardll import comment
with StreamAsPath(path_or_stream) as path:
return comment(make_long_path_useable(path))
def extract_member(
path_or_stream, match=re.compile(r'\.(jpg|jpeg|gif|png)\s*$', re.I), name=None):
from unrardll import extract_member
if iswindows and name is not None:
name = name.replace(os.sep, '/')
def is_match(header):
fname = header['filename']
if iswindows:
fname = fname.replace(os.sep, '/')
return (name is not None and fname == name) or \
(match is not None and match.search(fname) is not None)
with StreamAsPath(path_or_stream) as path:
name, data = extract_member(make_long_path_useable(path), is_match)
if name is not None:
return name, data
def extract_members(path_or_stream, callback):
from unrardll import extract_members
with StreamAsPath(path_or_stream) as path:
extract_members(make_long_path_useable(path), callback)
def extract_first_alphabetically(stream):
from calibre.libunzip import sort_key
names_ = sorted((
x for x in names(stream)
if os.path.splitext(x)[1][1:].lower() in {
'png', 'jpg', 'jpeg', 'gif', 'webp'}),
key=sort_key)
return extract_member(stream, name=names_[0], match=None)
def extract_cover_image(stream):
from calibre.libunzip import name_ok, sort_key
for name in sorted(names(stream), key=sort_key):
if name_ok(name):
return extract_member(stream, name=name, match=None)
# Test normal RAR file {{{
def test_basic():
stream = BytesIO( # {{{
b"Rar!\x1a\x07\x00\xcf\x90s\x00\x00\r\x00\x00\x00\x00\x00\x00\x00\x14\xe7z\x00\x80#\x00\x17\x00\x00\x00\r\x00\x00\x00\x03\xc2\xb3\x96o\x00\x00\x00\x00\x1d3\x03\x00\x00\x00\x00\x00CMT\x0c\x00\x8b\xec\x8e\xef\x14\xf6\xe6h\x04\x17\xff\xcd\x0f\xffk9b\x11]^\x80\xd3dt \x90+\x00\x14\x00\x00\x00\x08\x00\x00\x00\x03\xf1\x84\x93\\\xb9]yA\x1d3\t\x00\xa4\x81\x00\x001\\sub-one\x00\xc0\x0c\x00\x8f\xec\x89\xfe.JM\x86\x82\x0c_\xfd\xfd\xd7\x11\x1a\xef@\x9eHt \x80'\x00\x0e\x00\x00\x00\x04\x00\x00\x00\x03\x9f\xa8\x17\xf8\xaf]yA\x1d3\x07\x00\xa4\x81\x00\x00one.txt\x00\x08\xbf\x08\xae\xf3\xca\x87\xfeo\xfe\xd2n\x80-Ht \x82:\x00\x18\x00\x00\x00\x10\x00\x00\x00\x03\xa86\x81\xdf\xf9fyA\x1d3\x1a\x00\xa4\x81\x00\x00\xe8\xaf\xb6\xe6\xaf\x94\xe5\xb1\x81.txt\x00\x8bh\xf6\xd4kA\\.\x00txt\x0c\x00\x8b\xec\x8e\xef\x14\xf6\xe2l\x91\x189\xff\xdf\xfe\xc2\xd3:g\x9a\x19F=cYt \x928\x00\x11\x00\x00\x00\x08\x00\x00\x00\x03\x7f\xd6\xb6\x7f\xeafyA\x1d3\x16\x00\xa4\x81\x00\x00F\xc3\xbc\xc3\x9fe.txt\x00\x01\x00F\xfc\xdfe\x00.txt\x00\xc0<D\xfe\xc8\xef\xbc\xd1\x04I?\xfd\xff\xdbF)]\xe8\xb9\xe1t \x90/\x00\x13\x00\x00\x00\x08\x00\x00\x00\x03\x1a$\x932\xc2]yA\x1d3\r\x00\xa4\x81\x00\x002\\sub-two.txt\x00\xc0\x10\x00S\xec\xcb\x7f\x8b\xa5(\x0b\x01\xcb\xef\xdf\xf6t\x89\x97z\x0eft \x90)\x00\r\x00\x00\x00\r\x00\x00\x00\x03c\x89K\xd3\xc8fyA\x140\x07\x00\xff\xa1\x00\x00symlink\x00\xc02/sub-two.txt\xeb\x86t\xe0\x90#\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\xb9]yA\x140\x01\x00\xedA\x00\x001\x00\xc0\xe0Dt\xe0\x90#\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\xc2]yA\x140\x01\x00\xedA\x00\x002\x00\xc0u\xa1t \x80,\x00\r\x00\x00\x00\r\x00\x00\x00\x03T\xea\x04\xca\xe6\x84yA\x140\x0c\x00\xa4\x81\x00\x00uncompresseduncompressed\n\xda\x10t \x900\x00\x0e\x00\x00\x00\x04\x00\x00\x00\x035K.\xa6\x18\x85yA\x1d5\x0e\x00\xa4\x81\x00\x00max-compressed\x00\xc0\x00\x08\xbf\x08\xae\xf2\xcc\x01s\xf8\xff\xec\x96\xe8\xc4={\x00@\x07\x00") # noqa }}}
tdata = {
'1': b'',
'1/sub-one': b'sub-one\n',
'2': b'',
'2/sub-two.txt': b'sub-two\n',
'F\xfc\xdfe.txt': b'unicode\n',
'max-compressed': b'max\n',
'one.txt': b'one\n',
'symlink': b'2/sub-two.txt',
'uncompressed': b'uncompressed\n',
'\u8bf6\u6bd4\u5c41.txt': b'chinese unicode\n'}
def do_test(stream):
c = comment(stream)
expected = 'some comment\n'
if c != expected:
raise ValueError(f'Comment not read: {c!r} != {expected!r}')
if set(names(stream)) != {
'1/sub-one', 'one.txt', '2/sub-two.txt', '诶比屁.txt', 'Füße.txt',
'uncompressed', 'max-compressed'}:
raise ValueError('Name list does not match')
with TemporaryDirectory('test-unrar') as tdir:
extract(stream, tdir)
for name in tdata:
if name not in '1 2 symlink'.split():
with open(os.path.join(tdir, name), 'rb') as s:
if s.read() != tdata[name]:
raise ValueError('Did not extract %s properly' % name)
for name in tdata:
if name not in '1 2 symlink'.split():
d = extract_member(stream, name=name)
if d is None or d[1] != tdata[name]:
raise ValueError(
f'Failed to extract {name} {d!r} != {tdata[name]!r}')
do_test(stream)
with PersistentTemporaryFile('test-unrar') as f:
shutil.copyfileobj(stream, f)
with open(f.name, 'rb') as stream:
do_test(stream)
os.remove(f.name)
if __name__ == '__main__':
test_basic()
| 7,302 | Python | .py | 134 | 46.044776 | 1,932 | 0.648926 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,129 | terminal.py | kovidgoyal_calibre/src/calibre/utils/terminal.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
import sys
from calibre.constants import iswindows
from calibre.prints import is_binary
from polyglot.builtins import iteritems
if iswindows:
import ctypes.wintypes
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [
('dwSize', ctypes.wintypes._COORD),
('dwCursorPosition', ctypes.wintypes._COORD),
('wAttributes', ctypes.wintypes.WORD),
('srWindow', ctypes.wintypes._SMALL_RECT),
('dwMaximumWindowSize', ctypes.wintypes._COORD)
]
def fmt(code):
return '\033[%dm' % code
def polyglot_write(stream, is_binary, encoding, text):
binary = isinstance(text, bytes)
if binary:
if is_binary:
return stream.write(text)
buffer = getattr(stream, 'buffer', None)
if buffer is None:
return stream.write(text.decode('utf-8', 'replace'))
return buffer.write(text)
if is_binary:
text = text.encode(encoding, 'replace')
return stream.write(text)
RATTRIBUTES = dict(
zip(range(1, 9), (
'bold',
'dark',
'',
'underline',
'blink',
'',
'reverse',
'concealed'
)
))
ATTRIBUTES = {v:fmt(k) for k, v in iteritems(RATTRIBUTES)}
del ATTRIBUTES['']
RBACKGROUNDS = dict(
zip(range(41, 48), (
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white'
),
))
BACKGROUNDS = {v:fmt(k) for k, v in iteritems(RBACKGROUNDS)}
RCOLORS = dict(
zip(range(31, 38), (
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
),
))
COLORS = {v:fmt(k) for k, v in iteritems(RCOLORS)}
RESET = fmt(0)
def colored(text, fg=None, bg=None, bold=False):
prefix = []
if fg is not None:
prefix.append(COLORS[fg])
if bg is not None:
prefix.append(BACKGROUNDS[bg])
if bold:
prefix.append(ATTRIBUTES['bold'])
prefix = ''.join(prefix)
suffix = RESET
if isinstance(text, bytes):
prefix = prefix.encode('ascii')
suffix = suffix.encode('ascii')
return prefix + text + suffix
class Detect:
def __init__(self, stream):
self.stream = stream or sys.stdout
self.is_binary = is_binary(self.stream)
self.isatty = getattr(self.stream, 'isatty', lambda : False)()
force_ansi = 'CALIBRE_FORCE_ANSI' in os.environ
if not self.isatty and force_ansi:
self.isatty = True
self.isansi = force_ansi or not iswindows or (iswindows and sys.getwindowsversion().major >= 10)
class ColoredStream(Detect):
def __init__(self, stream=None, fg=None, bg=None, bold=False):
Detect.__init__(self, stream)
self.fg, self.bg, self.bold = fg, bg, bold
def cwrite(self, what):
if self.is_binary:
if not isinstance(what, bytes):
what = what.encode('utf-8')
else:
if isinstance(what, bytes):
what = what.decode('utf-8', 'replace')
self.stream.write(what)
def __enter__(self):
if not self.isatty:
return self
if self.isansi:
if self.bold:
self.cwrite(ATTRIBUTES['bold'])
if self.bg is not None:
self.cwrite(BACKGROUNDS[self.bg])
if self.fg is not None:
self.cwrite(COLORS[self.fg])
return self
def __exit__(self, *args, **kwargs):
if not self.isatty:
return
if not self.fg and not self.bg and not self.bold:
return
if self.isansi:
self.cwrite(RESET)
self.stream.flush()
class ANSIStream(Detect):
ANSI_RE = r'\033\[((?:\d|;)*)([a-zA-Z])'
def __init__(self, stream=None):
super().__init__(stream)
self.encoding = getattr(self.stream, 'encoding', None) or 'utf-8'
self._ansi_re_bin = self._ansi_re_unicode = None
def ansi_re(self, binary=False):
attr = '_ansi_re_bin' if binary else '_ansi_re_unicode'
ans = getattr(self, attr)
if ans is None:
expr = self.ANSI_RE
if binary:
expr = expr.encode('ascii')
ans = re.compile(expr)
setattr(self, attr, ans)
return ans
def write(self, text):
if not self.isatty:
return self.strip_and_write(text)
if self.isansi:
return self.polyglot_write(text)
return self.strip_and_write(text)
def polyglot_write(self, text):
return polyglot_write(self.stream, self.is_binary, self.encoding, text)
def strip_and_write(self, text):
binary = isinstance(text, bytes)
pat = self.ansi_re(binary)
repl = b'' if binary else ''
return self.polyglot_write(pat.sub(repl, text))
def windows_terminfo():
from ctypes import Structure, byref
from ctypes.wintypes import SHORT, WORD
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
import msvcrt
file_handle = msvcrt.get_osfhandle(sys.stdout.fileno())
from ctypes import windll
success = windll.kernel32.GetConsoleScreenBufferInfo(file_handle,
byref(csbi))
if not success:
raise Exception('stdout is not a console?')
return csbi
def get_term_geometry():
import fcntl
import struct
import termios
def ioctl_GWINSZ(fd):
try:
return struct.unpack(b'HHHH', fcntl.ioctl(fd, termios.TIOCGWINSZ, b'\0'*8))[:2]
except Exception:
return None, None
for f in (sys.stdin, sys.stdout, sys.stderr):
lines, cols = ioctl_GWINSZ(f.fileno())
if lines is not None:
return lines, cols
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
try:
lines, cols = ioctl_GWINSZ(fd)
if lines is not None:
return lines, cols
finally:
os.close(fd)
except Exception:
pass
return None, None
def geometry():
if iswindows:
try:
ti = windows_terminfo()
return (ti.dwSize.X or 80, ti.dwSize.Y or 25)
except:
return 80, 25
else:
try:
lines, cols = get_term_geometry()
if lines is not None:
return cols, lines
except Exception:
pass
return 80, 25
def test():
s = ANSIStream()
text = [colored(t, fg=t)+'. '+colored(t, fg=t, bold=True)+'.' for t in
('red', 'yellow', 'green', 'white', 'cyan', 'magenta', 'blue',)]
s.write('\n'.join(text))
u = '\u041c\u0438\u0445\u0430\u0438\u043b f√§llen'
print()
s.write(u)
print()
| 7,766 | Python | .py | 239 | 23.485356 | 104 | 0.554054 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,130 | smtplib.py | kovidgoyal_calibre/src/calibre/utils/smtplib.py | #!/usr/bin/env python
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print s.help()
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd("vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
# Enhanced debugging support by Kovid Goyal
#
# This was modified from the Python 1.5 library HTTP lib.
import base64
import email.utils
import hmac
import re
import socket
from email.base64mime import body_encode as encode_base64
from functools import partial
from sys import stderr
from polyglot.builtins import string_or_bytes
__all__ = ["SMTPException", "SMTPServerDisconnected", "SMTPResponseException",
"SMTPSenderRefused", "SMTPRecipientsRefused", "SMTPDataError",
"SMTPConnectError", "SMTPHeloError", "SMTPAuthenticationError",
"quoteaddr", "quotedata", "SMTP"]
SMTP_PORT = 25
SMTP_SSL_PORT = 465
CRLF = "\r\n"
_MAXLINE = 8192 # more than 8 times larger than RFC 821, 4.5.3
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
# Exception classes used by this module.
class SMTPException(Exception):
"""Base class for all exceptions raised by this module."""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code' attribute of the error, and the `smtp_error' attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender' to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = (recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
def quoteaddr(addr):
"""Quote a subset of the email addresses defined by RFC 821.
Should be able to handle anything rfc822.parseaddr can handle.
"""
m = (None, None)
try:
m = email.utils.parseaddr(addr)[1]
except AttributeError:
pass
if m == (None, None): # Indicates parse failure or AttributeError
# something weird here.. punt -ddm
return "<%s>" % addr
elif m is None:
# the sender wants an empty return address
return "<>"
else:
return "<%s>" % m
def _addr_only(addrstring):
displayname, addr = email.utils.parseaddr(addrstring)
if (displayname, addr) == ('', ''):
# parseaddr couldn't parse it, so use it as is.
return addrstring
return addr
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
try:
import ssl
except ImportError:
_have_ssl = False
else:
class SSLFakeFile:
"""A fake file like object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__(self, sslobj):
self.sslobj = sslobj
def readline(self, size=-1):
if size < 0:
size = None
str = ""
chr = None
while chr != "\n":
if size is not None and len(str) >= size:
break
chr = self.sslobj.read(1)
if not chr:
break
str += chr
return str
def close(self):
pass
_have_ssl = True
class SMTP:
"""This class manages a connection to an SMTP or ESMTP server.
SMTP Objects:
SMTP objects have the following attributes:
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command_, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command_, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
debuglevel = 0
file = None
helo_resp = None
ehlo_msg = "ehlo"
ehlo_resp = None
does_esmtp = 0
default_port = SMTP_PORT
def __init__(self, host='', port=0, local_hostname=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
debug_to=partial(print, file=stderr)):
"""Initialize a new instance.
If specified, `host' is the name of the remote host to which to
connect. If specified, `port' specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
if the specified `host' doesn't respond correctly. If specified,
`local_hostname` is used as the FQDN of the local host. By default,
the local hostname is found using socket.getfqdn(). `debug_to`
specifies where debug output is written to. By default it is written to
sys.stderr. You should pass in a print function of your own to control
where debug output is written.
"""
self._host = host
self.timeout = timeout
self.debug = debug_to
self.esmtp_features = {}
if host:
(code, msg) = self.connect(host, port)
if code != 220:
raise SMTPConnectError(code, msg)
if local_hostname is not None:
self.local_hostname = local_hostname
else:
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
fqdn = socket.getfqdn()
if '.' in fqdn:
self.local_hostname = fqdn
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
self.local_hostname = '[%s]' % addr
def set_debuglevel(self, debuglevel):
"""Set the debug output level.
A value of 0 means no debug logging. A value of 1 means all interaction
with the server is logged except that long lines are truncated to 100
characters and AUTH messages are censored. A value of 2 or higher means
the complete session is logged.
"""
self.debuglevel = debuglevel
def _get_socket(self, host, port, timeout):
# This makes it simpler for SMTP_SSL to use the SMTP connect code
# and just alter the socket connection bit.
if self.debuglevel > 0:
self.debug('connect:', (host, port))
return socket.create_connection((host, port), timeout)
def connect(self, host='localhost', port=0):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:') followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i + 1:]
try:
port = int(port)
except ValueError:
raise OSError("nonnumeric port")
if not port:
port = self.default_port
if self.debuglevel > 0:
self.debug('connect:', (host, port))
self._host = host
self.sock = self._get_socket(host, port, self.timeout)
(code, msg) = self.getreply()
if self.debuglevel > 0:
self.debug("connect:", msg)
return (code, msg)
def send(self, str):
"""Send `str' to the server."""
if self.debuglevel > 0:
raw = repr(str)
self.debug('send:', raw)
if hasattr(self, 'sock') and self.sock:
try:
self.sock.sendall(str)
except OSError:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please run connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
str = f'{cmd}{CRLF}'
else:
str = f'{cmd} {args}{CRLF}'
self.send(str)
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp = []
if self.file is None:
self.file = self.sock.makefile('rb')
while True:
try:
line = self.file.readline(_MAXLINE + 1)
except OSError as e:
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed: " + str(e))
if line == '':
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.debuglevel > 0:
self.debug('reply:', repr(line))
if len(line) > _MAXLINE:
raise SMTPResponseException(500, "Line too long.")
resp.append(line[4:].strip())
code = line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4] != "-":
break
errmsg = "\n".join(resp)
if self.debuglevel > 0:
self.debug(f'reply: retcode ({errcode}); Msg: {errmsg}')
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.putcmd("helo", name or self.local_hostname)
(code, msg) = self.getreply()
self.helo_resp = msg
return (code, msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
self.putcmd(self.ehlo_msg, name or self.local_hostname)
(code, msg) = self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp = msg
if code != 250:
return (code, msg)
self.does_esmtp = 1
# parse the ehlo response -ddm
resp = self.ehlo_resp.split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m = re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*) ?', each)
if m:
feature = m.group("feature").lower()
params = m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature] = params
return (code, msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return opt.lower() in self.esmtp_features
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd("help", args)
return self.getreply()[1]
def rset(self):
"""SMTP 'rset' command -- resets session."""
return self.docmd("rset")
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self, sender, options=[]):
"""SMTP 'mail' command -- begins mail xfer session."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("mail", f"FROM:{quoteaddr(sender)}{optionlist}")
return self.getreply()
def rcpt(self, recip, options=[]):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd("rcpt", f"TO:{quoteaddr(recip)}{optionlist}")
return self.getreply()
def data(self, msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent.
"""
self.putcmd("data")
(code, repl) = self.getreply()
if self.debuglevel > 0:
self.debug("data:", (code, repl))
if code != 354:
raise SMTPDataError(code, repl)
else:
q = quotedata(msg)
if q[-2:] != CRLF:
q = q + CRLF
q = q + "." + CRLF
self.send(q)
(code, msg) = self.getreply()
if self.debuglevel > 0 :
self.debug("data:", (code, msg))
return (code, msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd("vrfy", _addr_only(address))
return self.getreply()
# a.k.a.
vrfy = verify
def expn(self, address):
"""SMTP 'expn' command -- expands a mailing list."""
self.putcmd("expn", _addr_only(address))
return self.getreply()
# some useful methods
def ehlo_or_helo_if_needed(self):
"""Call self.ehlo() and/or self.helo() if needed.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
def login(self, user, password):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPException No suitable authentication method was
found.
"""
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
if isinstance(password, str): # Added by Kovid, see http://bugs.python.org/issue5285
password = password.encode('utf-8')
response = user + " " + hmac.HMAC(password, challenge).hexdigest()
return encode_base64(response, eol="")
def encode_plain(user, password):
return encode_base64(f"\0{user}\0{password}", eol="")
AUTH_PLAIN = "PLAIN"
AUTH_CRAM_MD5 = "CRAM-MD5"
AUTH_LOGIN = "LOGIN"
self.ehlo_or_helo_if_needed()
if not self.has_extn("auth"):
raise SMTPException("SMTP AUTH extension not supported by server.")
# Authentication methods the server supports:
authlist = self.esmtp_features["auth"].split()
# List of authentication methods we support: from preferred to
# less preferred methods. Except for the purpose of testing the weaker
# ones, we prefer stronger methods like CRAM-MD5:
preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
# Determine the authentication method we'll use
authmethod = None
for method in preferred_auths:
if method in authlist:
authmethod = method
break
if authmethod == AUTH_CRAM_MD5:
(code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
if code == 503:
# 503 == 'Error: already authenticated'
return (code, resp)
(code, resp) = self.docmd(encode_cram_md5(resp, user, password))
elif authmethod == AUTH_PLAIN:
(code, resp) = self.docmd("AUTH",
AUTH_PLAIN + " " + encode_plain(user, password))
elif authmethod == AUTH_LOGIN:
(code, resp) = self.docmd("AUTH",
"{} {}".format(AUTH_LOGIN, encode_base64(user, eol="")))
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = self.docmd(encode_base64(password, eol=""))
elif authmethod is None:
raise SMTPException("No suitable authentication method found.")
if code not in (235, 503):
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
raise SMTPAuthenticationError(code, resp)
return (code, resp)
def starttls(self, context=None):
"""Puts the connection to the SMTP server into TLS mode.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
"""
self.ehlo_or_helo_if_needed()
if not self.has_extn("starttls"):
raise SMTPException("STARTTLS extension not supported by server.")
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
if not _have_ssl:
raise RuntimeError("No SSL support included in this Python")
if context is None:
self.sock = ssl.wrap_socket(self.sock)
else:
self.sock = context.wrap_socket(self.sock, server_hostname=self._host)
self.file = SSLFakeFile(self.sock)
# RFC 3207:
# The client MUST discard any knowledge obtained from
# the server, such as the list of SMTP service extensions,
# which was not obtained from the TLS negotiation itself.
self.helo_resp = None
self.ehlo_resp = None
self.esmtp_features = {}
self.does_esmtp = 0
else:
# RFC 3207:
# 501 Syntax error (no parameters allowed)
# 454 TLS not available due to temporary reason
raise SMTPResponseException(resp, reply)
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=[],
rcpt_options=[]):
"""This command performs an entire mail transaction.
The arguments are:
- from_addr : The address sending this mail.
- to_addrs : A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
- msg : The message to send.
- mail_options : List of ESMTP options (such as 8bitmime) for the
mail command.
- rcpt_options : List of ESMTP options (such as DSN commands) for
all the rcpt commands.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPRecipientsRefused The server rejected ALL recipients
(no mail was sent).
SMTPSenderRefused The server didn't accept the from_addr.
SMTPDataError The server replied with an unexpected
error code (other than a refusal of
a recipient).
Note: the connection will be open even after an exception is raised.
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
self.ehlo_or_helo_if_needed()
esmtp_opts = []
if self.does_esmtp:
# Hmmm? what's this? -ddm
# self.esmtp_features['7bit']=""
if self.has_extn('size'):
esmtp_opts.append("size=%d" % len(msg))
for option in mail_options:
esmtp_opts.append(option)
(code, resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
self.rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs = {}
if isinstance(to_addrs, string_or_bytes):
to_addrs = [to_addrs]
for each in to_addrs:
(code, resp) = self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each] = (code,resp)
if len(senderrs) == len(to_addrs):
# the server refused all our recipients
self.rset()
raise SMTPRecipientsRefused(senderrs)
(code, resp) = self.data(msg)
if code != 250:
self.rset()
raise SMTPDataError(code, resp)
# if we got here then somebody got our mail
return senderrs
def close(self):
"""Close the connection to the SMTP server."""
try:
file = self.file
self.file = None
if file:
file.close()
finally:
sock = self.sock
self.sock = None
if sock:
sock.close()
def quit(self):
"""Terminate the SMTP session."""
res = self.docmd("quit")
# A new EHLO is required after reconnecting with connect()
self.ehlo_resp = self.helo_resp = None
self.esmtp_features = {}
self.does_esmtp = False
self.close()
return res
if _have_ssl:
class SMTP_SSL(SMTP):
""" This is a subclass derived from SMTP that connects over an SSL encrypted
socket (to use this class you need a socket module that was compiled with SSL
support). If host is not specified, '' (the local host) is used. If port is
omitted, the standard SMTP-over-SSL port (465) is used. keyfile and certfile
are also optional - they can contain a PEM formatted private key and
certificate chain file for the SSL connection.
"""
default_port = SMTP_SSL_PORT
def __init__(self, host='', port=0, local_hostname=None,
keyfile=None, certfile=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
debug_to=partial(print, file=stderr)):
self.keyfile = keyfile
self.certfile = certfile
SMTP.__init__(self, host, port, local_hostname, timeout,
debug_to=debug_to)
def _get_socket(self, host, port, timeout):
if self.debuglevel > 0:
self.debug('connect:', (host, port))
new_socket = socket.create_connection((host, port), timeout)
new_socket = ssl.wrap_socket(new_socket, self.keyfile, self.certfile)
self.file = SSLFakeFile(new_socket)
return new_socket
__all__.append("SMTP_SSL")
#
# LMTP extension
#
LMTP_PORT = 2003
class LMTP(SMTP):
"""LMTP - Local Mail Transfer Protocol
The LMTP protocol, which is very similar to ESMTP, is heavily based
on the standard SMTP client. It's common to use Unix sockets for LMTP,
so our connect() method must support that as well as a regular
host:port server. To specify a Unix socket, you must use an absolute
path as the host, starting with a '/'.
Authentication is supported, using the regular SMTP mechanism. When
using a Unix socket, LMTP generally don't support or require any
authentication, but your mileage might vary."""
ehlo_msg = "lhlo"
def __init__(self, host='', port=LMTP_PORT, local_hostname=None):
"""Initialize a new instance."""
SMTP.__init__(self, host, port, local_hostname)
def connect(self, host='localhost', port=0):
"""Connect to the LMTP daemon, on either a Unix or a TCP socket."""
if host[0] != '/':
return SMTP.connect(self, host, port)
# Handle Unix-domain sockets.
try:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(host)
except OSError:
if self.debuglevel > 0:
self.debug('connect fail:', host)
if self.sock:
self.sock.close()
self.sock = None
raise
(code, msg) = self.getreply()
if self.debuglevel > 0:
self.debug("connect:", msg)
return (code, msg)
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
import sys
def prompt(prompt):
sys.stdout.write(prompt + ": ")
return sys.stdin.readline().strip()
fromaddr = prompt("From")
toaddrs = prompt("To").split(',')
print("Enter message, end with ^D:")
msg = ''
while 1:
line = sys.stdin.readline()
if not line:
break
msg = msg + line
print("Message length is %d" % len(msg))
server = SMTP('localhost')
server.set_debuglevel(1)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
| 32,735 | Python | .py | 754 | 33.265252 | 97 | 0.594037 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,131 | lock.py | kovidgoyal_calibre/src/calibre/utils/lock.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import atexit
import errno
import os
import stat
import tempfile
import time
from functools import partial
from calibre.constants import __appname__, filesystem_encoding, islinux, ismacos, iswindows
from calibre.utils.monotonic import monotonic
from calibre_extensions import speedup
if iswindows:
import msvcrt
from calibre.constants import get_windows_username
from calibre_extensions import winutil
excl_file_mode = stat.S_IREAD | stat.S_IWRITE
else:
excl_file_mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
import fcntl
def unix_open(path):
flags = os.O_RDWR | os.O_CREAT
has_cloexec = False
if hasattr(speedup, 'O_CLOEXEC'):
try:
fd = os.open(path, flags | speedup.O_CLOEXEC, excl_file_mode)
has_cloexec = True
except OSError as err:
# Kernel may not support O_CLOEXEC
if err.errno != errno.EINVAL:
raise
if not has_cloexec:
fd = os.open(path, flags, excl_file_mode)
fcntl.fcntl(fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
return os.fdopen(fd, 'r+b')
def unix_retry(err):
return err.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK, errno.EINTR)
def windows_open(path):
if isinstance(path, bytes):
path = os.fsdecode(path)
h = winutil.create_file(
path,
winutil.GENERIC_READ |
winutil.GENERIC_WRITE, # Open for reading and writing
0, # Open exclusive
winutil.OPEN_ALWAYS, # If file does not exist, create it
winutil.FILE_ATTRIBUTE_NORMAL, # Normal attributes
)
fd = msvcrt.open_osfhandle(int(h), 0)
ans = os.fdopen(fd, 'r+b')
h.detach()
return ans
def windows_retry(err):
return err.winerror in (
winutil.ERROR_SHARING_VIOLATION, winutil.ERROR_LOCK_VIOLATION
)
def retry_for_a_time(timeout, sleep_time, func, error_retry, *args):
limit = monotonic() + timeout
while True:
try:
return func(*args)
except OSError as err:
if not error_retry(err) or monotonic() > limit:
raise
time.sleep(sleep_time)
def lock_file(path, timeout=15, sleep_time=0.2):
if iswindows:
return retry_for_a_time(
timeout, sleep_time, windows_open, windows_retry, path
)
f = unix_open(path)
try:
retry_for_a_time(
timeout, sleep_time, fcntl.flock, unix_retry,
f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB
)
except Exception:
f.close()
raise
return f
class ExclusiveFile:
def __init__(self, path, timeout=15, sleep_time=0.2):
if iswindows and isinstance(path, bytes):
path = path.decode(filesystem_encoding)
self.path = path
self.timeout = timeout
self.sleep_time = sleep_time
def __enter__(self):
self.file = lock_file(self.path, self.timeout, self.sleep_time)
return self.file
def __exit__(self, type, value, traceback):
self.file.close()
def _clean_lock_file(file_obj):
try:
os.remove(file_obj.name)
except OSError:
pass
try:
file_obj.close()
except OSError:
pass
if iswindows:
def create_single_instance_mutex(name, per_user=True):
mutexname = '{}-singleinstance-{}-{}'.format(
__appname__, (get_windows_username() if per_user else ''), name
)
try:
mutex = winutil.create_mutex(mutexname, False)
except FileExistsError:
return
return mutex.close
elif islinux:
def create_single_instance_mutex(name, per_user=True):
import socket
from calibre.utils.ipc import eintr_retry_call
name = '{}-singleinstance-{}-{}'.format(
__appname__, (os.geteuid() if per_user else ''), name
)
name = name
address = '\0' + name.replace(' ', '_')
sock = socket.socket(family=socket.AF_UNIX)
try:
eintr_retry_call(sock.bind, address)
except OSError as err:
sock.close()
if getattr(err, 'errno', None) == errno.EADDRINUSE:
return
raise
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
return sock.close
else:
def singleinstance_path(name, per_user=True):
name = '{}-singleinstance-{}-{}.lock'.format(
__appname__, (os.geteuid() if per_user else ''), name
)
home = os.path.expanduser('~')
locs = ['/var/lock', home, tempfile.gettempdir()]
if ismacos:
locs.insert(0, '/Library/Caches')
for loc in locs:
if os.access(loc, os.W_OK | os.R_OK | os.X_OK):
return os.path.join(loc, ('.' if loc is home else '') + name)
raise OSError(
'Failed to find a suitable filesystem location for the lock file'
)
def create_single_instance_mutex(name, per_user=True):
from calibre.utils.ipc import eintr_retry_call
path = singleinstance_path(name, per_user)
f = open(path, 'w')
try:
eintr_retry_call(fcntl.lockf, f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
return partial(_clean_lock_file, f)
except OSError as err:
f.close()
if err.errno not in (errno.EAGAIN, errno.EACCES):
raise
class SingleInstance:
def __init__(self, name):
self.name = name
self.release_mutex = None
def __enter__(self):
self.release_mutex = create_single_instance_mutex(self.name)
return self.release_mutex is not None
def __exit__(self, *a):
if self.release_mutex is not None:
self.release_mutex()
self.release_mutex = None
def singleinstance(name):
' Ensure that only a single process exists with the specified mutex key '
release_mutex = create_single_instance_mutex(name)
if release_mutex is None:
return False
atexit.register(release_mutex)
return True
| 6,243 | Python | .py | 176 | 27.681818 | 91 | 0.61795 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,132 | wordcount.py | kovidgoyal_calibre/src/calibre/utils/wordcount.py | #!/usr/bin/env python
"""
Get word, character, and Asian character counts
1. Get a word count as a dictionary:
wc = get_wordcount(text)
words = wc['words'] # etc.
2. Get a word count as an object
wc = get_wordcount_obj(text)
words = wc.words # etc.
properties counted:
* characters
* chars_no_spaces
* asian_chars
* non_asian_words
* words
Sourced from:
http://ginstrom.com/scribbles/2008/05/17/counting-words-etc-in-an-html-file-with-python/
http://ginstrom.com/scribbles/2007/10/06/counting-words-characters-and-asian-characters-with-python/
"""
__version__ = 0.1
__author__ = "Ryan Ginstrom"
IDEOGRAPHIC_SPACE = 0x3000
def is_asian(char):
"""Is the character Asian?"""
# 0x3000 is ideographic space (i.e. double-byte space)
# Anything over is an Asian character
return ord(char) > IDEOGRAPHIC_SPACE
def filter_jchars(c):
"""Filters Asian characters to spaces"""
if is_asian(c):
return ' '
return c
def nonj_len(word):
"""Returns number of non-Asian words in {word}
- 日本語AアジアンB -> 2
- hello -> 1
@param word: A word, possibly containing Asian characters
"""
# Here are the steps:
# 本spam日eggs
# -> [' ', 's', 'p', 'a', 'm', ' ', 'e', 'g', 'g', 's']
# -> ' spam eggs'
# -> ['spam', 'eggs']
# The length of which is 2!
chars = [filter_jchars(c) for c in word]
return len(''.join(chars).split())
def get_wordcount(text):
"""Get the word/character count for text
@param text: The text of the segment
"""
characters = len(text)
chars_no_spaces = sum(not x.isspace() for x in text)
asian_chars = sum(is_asian(x) for x in text)
non_asian_words = nonj_len(text)
words = non_asian_words + asian_chars
return dict(characters=characters,
chars_no_spaces=chars_no_spaces,
asian_chars=asian_chars,
non_asian_words=non_asian_words,
words=words)
def dict2obj(dictionary):
"""Transform a dictionary into an object"""
class Obj:
def __init__(self, dictionary):
self.__dict__.update(dictionary)
return Obj(dictionary)
def get_wordcount_obj(text):
"""Get the wordcount as an object rather than a dictionary"""
return dict2obj(get_wordcount(text))
| 2,337 | Python | .py | 69 | 28.434783 | 100 | 0.645553 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,133 | socket_inheritance.py | kovidgoyal_calibre/src/calibre/utils/socket_inheritance.py | #!/usr/bin/env python
'''
Created on 29 Nov 2013
@author: charles
Code taken from https://mail.python.org/pipermail/python-dev/2007-June/073745.html
modified to make it work
'''
def get_socket_inherit(s):
'''
Returns True if the socket has been set to allow inheritance across
forks and execs to child processes, otherwise False
'''
try:
return s.get_inheritable()
except Exception:
import traceback
traceback.print_exc()
def set_socket_inherit(s, inherit=False):
'''
Mark a socket as inheritable or non-inheritable to child processes.
This should be called right after socket creation if you want
to prevent the socket from being inherited by child processes.
Note that for sockets, a new socket returned from accept() will be
inheritable even if the listener socket was not; so you should call
set_socket_inherit for the new socket as well.
'''
try:
s.set_inheritable(inherit)
except Exception:
import traceback
traceback.print_exc()
def test():
import socket
s = socket.socket()
orig = get_socket_inherit(s)
set_socket_inherit(s, orig ^ True)
if orig == get_socket_inherit(s):
raise RuntimeError('Failed to change socket inheritance status')
print('OK!')
if __name__ == '__main__':
test()
| 1,354 | Python | .py | 41 | 28.073171 | 82 | 0.69746 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,134 | config.py | kovidgoyal_calibre/src/calibre/utils/config.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Manage application-wide preferences.
'''
import optparse
import os
from copy import deepcopy
from calibre.constants import CONFIG_DIR_MODE, __appname__, __author__, config_dir, get_version, iswindows
from calibre.utils.config_base import (
Config,
ConfigInterface,
ConfigProxy,
Option,
OptionSet,
OptionValues,
StringConfig,
commit_data,
from_json,
json_dumps,
json_loads,
make_config_dir,
plugin_dir,
prefs,
read_data,
to_json,
tweaks,
)
from calibre.utils.localization import _
from polyglot.builtins import native_string_type, string_or_bytes
# optparse uses gettext.gettext instead of _ from builtins, so we
# monkey patch it.
optparse._ = _
if False:
# Make pyflakes happy
Config, ConfigProxy, Option, OptionValues, StringConfig, OptionSet,
ConfigInterface, tweaks, plugin_dir, prefs, from_json, to_json, make_config_dir
def check_config_write_access():
return os.access(config_dir, os.W_OK) and os.access(config_dir, os.X_OK)
class CustomHelpFormatter(optparse.IndentedHelpFormatter):
def format_usage(self, usage):
from calibre.utils.terminal import colored
parts = usage.split(' ')
if parts:
parts[0] = colored(parts[0], fg='yellow', bold=True)
usage = ' '.join(parts)
return colored(_('Usage'), fg='blue', bold=True) + ': ' + usage
def format_heading(self, heading):
from calibre.utils.terminal import colored
return "%*s%s:\n" % (self.current_indent, '',
colored(heading, fg='blue', bold=True))
def format_option(self, option):
import textwrap
from calibre.utils.terminal import colored
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "",
colored(opts, fg='green'))
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width +
len(colored('', fg='green')), colored(opts, fg='green'))
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option).split('\n')
help_lines = []
for line in help_text:
help_lines.extend(textwrap.wrap(line, self.help_width))
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)+'\n'
class OptionParser(optparse.OptionParser):
def __init__(self,
usage='%prog [options] filename',
version=None,
epilog=None,
gui_mode=False,
conflict_handler='resolve',
**kwds):
import textwrap
from calibre.utils.terminal import colored
usage = textwrap.dedent(usage)
if epilog is None:
epilog = _('Created by ')+colored(__author__, fg='cyan')
usage += '\n\n'+_('''Whenever you pass arguments to %prog that have spaces in them, '''
'''enclose the arguments in quotation marks. For example: "{}"''').format(
"C:\\some path with spaces" if iswindows else '/some path/with spaces') +'\n'
if version is None:
version = '%%prog (%s %s)'%(__appname__, get_version())
optparse.OptionParser.__init__(self, usage=usage, version=version, epilog=epilog,
formatter=CustomHelpFormatter(),
conflict_handler=conflict_handler, **kwds)
self.gui_mode = gui_mode
if False:
# Translatable string from optparse
_("Options")
_("show this help message and exit")
_("show program's version number and exit")
def print_usage(self, file=None):
from calibre.utils.terminal import ANSIStream
s = ANSIStream(file)
optparse.OptionParser.print_usage(self, file=s)
def print_help(self, file=None):
from calibre.utils.terminal import ANSIStream
s = ANSIStream(file)
optparse.OptionParser.print_help(self, file=s)
def print_version(self, file=None):
from calibre.utils.terminal import ANSIStream
s = ANSIStream(file)
optparse.OptionParser.print_version(self, file=s)
def error(self, msg):
if self.gui_mode:
raise Exception(msg)
optparse.OptionParser.error(self, msg)
def merge(self, parser):
'''
Add options from parser to self. In case of conflicts, conflicting options from
parser are skipped.
'''
opts = list(parser.option_list)
groups = list(parser.option_groups)
def merge_options(options, container):
for opt in deepcopy(options):
if not self.has_option(opt.get_opt_string()):
container.add_option(opt)
merge_options(opts, self)
for group in groups:
g = self.add_option_group(group.title)
merge_options(group.option_list, g)
def subsume(self, group_name, msg=''):
'''
Move all existing options into a subgroup named
C{group_name} with description C{msg}.
'''
opts = [opt for opt in self.options_iter() if opt.get_opt_string() not in ('--version', '--help')]
self.option_groups = []
subgroup = self.add_option_group(group_name, msg)
for opt in opts:
self.remove_option(opt.get_opt_string())
subgroup.add_option(opt)
def options_iter(self):
for opt in self.option_list:
if native_string_type(opt).strip():
yield opt
for gr in self.option_groups:
for opt in gr.option_list:
if native_string_type(opt).strip():
yield opt
def option_by_dest(self, dest):
for opt in self.options_iter():
if opt.dest == dest:
return opt
def merge_options(self, lower, upper):
'''
Merge options in lower and upper option lists into upper.
Default values in upper are overridden by
non default values in lower.
'''
for dest in lower.__dict__.keys():
if dest not in upper.__dict__:
continue
opt = self.option_by_dest(dest)
if lower.__dict__[dest] != opt.default and \
upper.__dict__[dest] == opt.default:
upper.__dict__[dest] = lower.__dict__[dest]
def add_option_group(self, *args, **kwargs):
if isinstance(args[0], string_or_bytes):
args = list(args)
args[0] = native_string_type(args[0])
return optparse.OptionParser.add_option_group(self, *args, **kwargs)
class DynamicConfig(dict):
'''
A replacement for QSettings that supports dynamic config keys.
Returns `None` if a config key is not found. Note that the config
data is stored in a JSON file.
'''
def __init__(self, name='dynamic'):
dict.__init__(self, {})
self.name = name
self.defaults = {}
self.refresh()
@property
def file_path(self):
return os.path.join(config_dir, self.name+'.pickle.json')
def decouple(self, prefix):
self.name = prefix + self.name
self.refresh()
def read_old_serialized_representation(self):
from calibre.utils.serialize import pickle_loads
from calibre.utils.shared_file import share_open
path = self.file_path.rpartition('.')[0]
try:
with share_open(path, 'rb') as f:
raw = f.read()
except OSError:
raw = b''
try:
d = pickle_loads(raw).copy()
except Exception:
d = {}
return d
def refresh(self, clear_current=True):
d = {}
migrate = False
if clear_current:
self.clear()
try:
raw = read_data(self.file_path)
except FileNotFoundError:
d = self.read_old_serialized_representation()
migrate = bool(d)
else:
if raw:
try:
d = json_loads(raw)
except Exception as err:
print('Failed to de-serialize JSON representation of stored dynamic data for {} with error: {}'.format(
self.name, err))
else:
d = self.read_old_serialized_representation()
migrate = bool(d)
if migrate and d:
raw = json_dumps(d, ignore_unserializable=True)
commit_data(self.file_path, raw)
self.update(d)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.defaults.get(key, None)
def get(self, key, default=None):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.defaults.get(key, default)
def __setitem__(self, key, val):
dict.__setitem__(self, key, val)
self.commit()
def set(self, key, val):
self.__setitem__(key, val)
def commit(self):
if not getattr(self, 'name', None):
return
raw = json_dumps(self)
commit_data(self.file_path, raw)
dynamic = DynamicConfig()
class XMLConfig(dict):
'''
Similar to :class:`DynamicConfig`, except that it uses an XML storage
backend instead of a pickle file.
See `https://docs.python.org/library/plistlib.html`_ for the supported
data types.
'''
EXTENSION = '.plist'
def __init__(self, rel_path_to_cf_file, base_path=config_dir):
dict.__init__(self)
self.no_commit = False
self.defaults = {}
self.file_path = os.path.join(base_path,
*(rel_path_to_cf_file.split('/')))
self.file_path = os.path.abspath(self.file_path)
if not self.file_path.endswith(self.EXTENSION):
self.file_path += self.EXTENSION
self.refresh()
def mtime(self):
try:
return os.path.getmtime(self.file_path)
except OSError:
return 0
def touch(self):
try:
os.utime(self.file_path, None)
except OSError:
pass
def raw_to_object(self, raw):
from polyglot.plistlib import loads
return loads(raw)
def to_raw(self):
from polyglot.plistlib import dumps
return dumps(self)
def decouple(self, prefix):
self.file_path = os.path.join(os.path.dirname(self.file_path), prefix + os.path.basename(self.file_path))
self.refresh()
def refresh(self, clear_current=True):
d = {}
try:
raw = read_data(self.file_path)
except FileNotFoundError:
pass
else:
try:
d = self.raw_to_object(raw) if raw.strip() else {}
except SystemError:
pass
except:
import traceback
traceback.print_exc()
d = {}
if clear_current:
self.clear()
self.update(d)
def has_key(self, key):
return dict.__contains__(self, key)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.defaults.get(key, None)
def get(self, key, default=None):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.defaults.get(key, default)
def __setitem__(self, key, val):
dict.__setitem__(self, key, val)
self.commit()
def set(self, key, val):
self.__setitem__(key, val)
def __delitem__(self, key):
try:
dict.__delitem__(self, key)
except KeyError:
pass # ignore missing keys
else:
self.commit()
def commit(self):
if self.no_commit:
return
if getattr(self, 'file_path', None):
dpath = os.path.dirname(self.file_path)
if not os.path.exists(dpath):
os.makedirs(dpath, mode=CONFIG_DIR_MODE)
commit_data(self.file_path, self.to_raw())
def __enter__(self):
self.no_commit = True
def __exit__(self, *args):
self.no_commit = False
self.commit()
class JSONConfig(XMLConfig):
EXTENSION = '.json'
def raw_to_object(self, raw):
return json_loads(raw)
def to_raw(self):
return json_dumps(self)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.defaults[key]
def get(self, key, default=None):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.defaults.get(key, default)
def __setitem__(self, key, val):
dict.__setitem__(self, key, val)
self.commit()
class DevicePrefs:
def __init__(self, global_prefs):
self.global_prefs = global_prefs
self.overrides = {}
def set_overrides(self, **kwargs):
self.overrides = kwargs.copy()
def __getitem__(self, key):
return self.overrides.get(key, self.global_prefs[key])
device_prefs = DevicePrefs(prefs)
| 13,959 | Python | .py | 373 | 27.670241 | 123 | 0.573439 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,135 | network.py | kovidgoyal_calibre/src/calibre/utils/network.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2010, Kovid Goyal <kovid at kovidgoyal.net>
from contextlib import suppress
from calibre.constants import isbsd, islinux, iswindows
from calibre.utils.config_base import tweaks
class LinuxNetworkStatus:
# Map of NetworkManager connectivity values to their XDP/GLib equivalents
NM_XDP_CONNECTIVITY_MAP = {
0: 4, # NM_CONNECTIVITY_UNKNOWN → Full network
1: 1, # NM_CONNECTIVITY_NONE → Local only
2: 3, # NM_CONNECTIVITY_PORTAL → Captive portal
3: 2, # NM_CONNECTIVITY_LIMITED → Limited connectivity
4: 4, # NM_CONNECTIVITY_FULL → Full network
}
def __init__(self):
from jeepney import DBusAddress, Properties, new_method_call
# Prefer desktop portal interface here since it can theoretically
# work with network management solutions other than NetworkManager
# and is controlled by the current desktop session
#
# There is no difference in terms of “features” provided between
# the two APIs from our point of view.
self.xdp_call = lambda : new_method_call(DBusAddress(
'/org/freedesktop/portal/desktop',
bus_name='org.freedesktop.portal.Desktop',
interface="org.freedesktop.portal.NetworkMonitor"), 'GetConnectivity')
self.nm_call = lambda : Properties(DBusAddress('/org/freedesktop/NetworkManager',
bus_name='org.freedesktop.NetworkManager',
interface="org.freedesktop.NetworkManager")).get('Connectivity')
if self.xdp() is not None:
self.get_connectivity = self.xdp
elif self.nm() is not None:
self.get_connectivity = self.nm
else:
self.get_connectivity = lambda : 4
def connect(self, which='SESSION'):
from jeepney.io.blocking import open_dbus_connection
if not hasattr(self, 'connection'):
self.connection = open_dbus_connection(which)
def xdp(self):
with suppress(Exception):
self.connect('SESSION')
return self.send(self.xdp_call())
if hasattr(self, 'connection'):
self.connection.close()
del self.connection
def nm(self):
with suppress(Exception):
self.connect('SYSTEM')
return self.NM_XDP_CONNECTIVITY_MAP.get(self.send(self.nm_call()), 4)
if hasattr(self, 'connection'):
self.connection.close()
del self.connection
def send(self, msg):
from jeepney import DBusErrorResponse, MessageType
reply = self.connection.send_and_get_reply(msg)
if reply.header.message_type is MessageType.error:
raise DBusErrorResponse(reply)
return reply.body[0]
def __call__(self):
with suppress(Exception):
# Meanings of returned XDP/GLib connectivity values:
# * 1: Local only. The host is not configured with a route to the internet.
# * 2: Limited connectivity. The host is connected to a network, but can't reach the full internet.
# * 3: Captive portal. The host is behind a captive portal and cannot reach the full internet.
# * 4: Full network. The host connected to a network, and can reach the full internet.
return self.get_connectivity() == 4
return True
class WindowsNetworkStatus:
def __init__(self):
from calibre_extensions import winutil
self.winutil = winutil
def __call__(self):
if self.winutil is None:
return True
return self.winutil.internet_connected()
class DummyNetworkStatus:
def __call__(self):
return True
def internet_connected():
if tweaks['skip_network_check']:
return True
if not hasattr(internet_connected, 'checker'):
internet_connected.checker = WindowsNetworkStatus() if iswindows else \
LinuxNetworkStatus() if (islinux or isbsd) else \
DummyNetworkStatus()
return internet_connected.checker()
def is_ipv6_addr(addr):
import socket
try:
socket.inet_pton(socket.AF_INET6, addr)
except Exception:
return False
return True
def format_addr_for_url(addr):
if is_ipv6_addr(addr):
addr = f'[{addr}]'
return addr
def get_fallback_server_addr():
from socket import has_dualstack_ipv6
return '::' if has_dualstack_ipv6() else '0.0.0.0'
| 4,479 | Python | .py | 101 | 35.722772 | 113 | 0.657749 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,136 | img.py | kovidgoyal_calibre/src/calibre/utils/img.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2015-2019, Kovid Goyal <kovid at kovidgoyal.net>
import errno
import os
import shutil
import subprocess
import sys
import tempfile
from contextlib import suppress
from io import BytesIO
from threading import Thread
from qt.core import QBuffer, QByteArray, QColor, QImage, QImageReader, QImageWriter, QIODevice, QPixmap, Qt, QTransform, qRgba
from calibre import fit_image, force_unicode
from calibre.constants import iswindows
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.config_base import tweaks
from calibre.utils.filenames import atomic_rename
from calibre.utils.imghdr import what
from calibre.utils.resources import get_image_path as I
from calibre_extensions import imageops
from polyglot.builtins import string_or_bytes
# Utilities {{{
class NotImage(ValueError):
pass
def normalize_format_name(fmt):
fmt = fmt.lower()
if fmt == 'jpg':
fmt = 'jpeg'
return fmt
def get_exe_path(name):
from calibre.ebooks.pdf.pdftohtml import PDFTOHTML
base = os.path.dirname(PDFTOHTML)
if iswindows:
name += '-calibre.exe'
if not base:
return name
return os.path.join(base, name)
def load_jxr_data(data):
with TemporaryDirectory() as tdir:
if isinstance(tdir, bytes):
tdir = os.fsdecode(tdir)
with open(os.path.join(tdir, 'input.jxr'), 'wb') as f:
f.write(data)
cmd = [get_exe_path('JxrDecApp'), '-i', 'input.jxr', '-o', 'output.tif']
creationflags = subprocess.DETACHED_PROCESS if iswindows else 0
subprocess.Popen(cmd, cwd=tdir, stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT, creationflags=creationflags).wait()
i = QImage()
if not i.load(os.path.join(tdir, 'output.tif')):
raise NotImage('Failed to convert JPEG-XR image')
return i
# }}}
# png <-> gif {{{
def png_data_to_gif_data(data):
from PIL import Image
img = Image.open(BytesIO(data))
buf = BytesIO()
if img.mode in ('p', 'P'):
transparency = img.info.get('transparency')
if transparency is not None:
img.save(buf, 'gif', transparency=transparency)
else:
img.save(buf, 'gif')
elif img.mode in ('rgba', 'RGBA'):
alpha = img.split()[3]
mask = Image.eval(alpha, lambda a: 255 if a <=128 else 0)
img = img.convert('RGB').convert('P', palette=Image.ADAPTIVE, colors=255)
img.paste(255, mask)
img.save(buf, 'gif', transparency=255)
else:
img = img.convert('P', palette=Image.ADAPTIVE)
img.save(buf, 'gif')
return buf.getvalue()
class AnimatedGIF(ValueError):
pass
def gif_data_to_png_data(data, discard_animation=False):
from PIL import Image
img = Image.open(BytesIO(data))
if img.is_animated and not discard_animation:
raise AnimatedGIF()
buf = BytesIO()
img.save(buf, 'png')
return buf.getvalue()
# }}}
# Loading images {{{
def set_image_allocation_limit(size_in_mb=1024):
with suppress(ImportError): # for people running form source
from calibre_extensions.progress_indicator import set_image_allocation_limit as impl
impl(size_in_mb)
def null_image():
' Create an invalid image. For internal use. '
return QImage()
def image_from_data(data):
' Create an image object from data, which should be a bytestring. '
if isinstance(data, QImage):
return data
set_image_allocation_limit()
i = QImage()
if not i.loadFromData(data):
q = what(None, data)
if q == 'jxr':
return load_jxr_data(data)
raise NotImage(f'Not a valid image (detected type: {q})')
return i
def image_from_path(path):
' Load an image from the specified path. '
with open(path, 'rb') as f:
return image_from_data(f.read())
def image_from_x(x):
' Create an image from a bytestring or a path or a file like object. '
if isinstance(x, str):
return image_from_path(x)
if hasattr(x, 'read'):
return image_from_data(x.read())
if isinstance(x, (bytes, QImage)):
return image_from_data(x)
if isinstance(x, bytearray):
return image_from_data(bytes(x))
if isinstance(x, QPixmap):
return x.toImage()
raise TypeError('Unknown image src type: %s' % type(x))
def image_and_format_from_data(data):
' Create an image object from the specified data which should be a bytestring and also return the format of the image '
ba = QByteArray(data)
buf = QBuffer(ba)
buf.open(QIODevice.OpenModeFlag.ReadOnly)
r = QImageReader(buf)
fmt = bytes(r.format()).decode('utf-8')
return r.read(), fmt
# }}}
# Saving images {{{
def image_to_data(img, compression_quality=95, fmt='JPEG', png_compression_level=9, jpeg_optimized=True, jpeg_progressive=False):
'''
Serialize image to bytestring in the specified format.
:param compression_quality: is for JPEG and WEBP and goes from 0 to 100.
100 being lowest compression, highest image quality. For WEBP 100 means lossless with effort of 70.
:param png_compression_level: is for PNG and goes from 0-9. 9 being highest compression.
:param jpeg_optimized: Turns on the 'optimize' option for libjpeg which losslessly reduce file size
:param jpeg_progressive: Turns on the 'progressive scan' option for libjpeg which allows JPEG images to be downloaded in streaming fashion
'''
fmt = fmt.upper()
ba = QByteArray()
buf = QBuffer(ba)
buf.open(QIODevice.OpenModeFlag.WriteOnly)
if fmt == 'GIF':
w = QImageWriter(buf, b'PNG')
w.setQuality(90)
if not w.write(img):
raise ValueError('Failed to export image as ' + fmt + ' with error: ' + w.errorString())
return png_data_to_gif_data(ba.data())
is_jpeg = fmt in ('JPG', 'JPEG')
w = QImageWriter(buf, fmt.encode('ascii'))
if is_jpeg:
if img.hasAlphaChannel():
img = blend_image(img)
# QImageWriter only gained the following options in Qt 5.5
if jpeg_optimized:
w.setOptimizedWrite(True)
if jpeg_progressive:
w.setProgressiveScanWrite(True)
w.setQuality(compression_quality)
elif fmt == 'PNG':
cl = min(9, max(0, png_compression_level))
w.setQuality(10 * (9-cl))
elif fmt == 'WEBP':
w.setQuality(compression_quality)
if not w.write(img):
raise ValueError('Failed to export image as ' + fmt + ' with error: ' + w.errorString())
return ba.data()
def save_image(img, path, **kw):
''' Save image to the specified path. Image format is taken from the file
extension. You can pass the same keyword arguments as for the
`image_to_data()` function. '''
fmt = path.rpartition('.')[-1]
kw['fmt'] = kw.get('fmt', fmt)
with open(path, 'wb') as f:
f.write(image_to_data(image_from_data(img), **kw))
def save_cover_data_to(
data, path=None,
bgcolor='#ffffff',
resize_to=None,
compression_quality=90,
minify_to=None,
grayscale=False,
eink=False,
letterbox=False,
letterbox_color='#000000',
data_fmt='jpeg'
):
'''
Saves image in data to path, in the format specified by the path
extension. Removes any transparency. If there is no transparency and no
resize and the input and output image formats are the same, no changes are
made.
:param data: Image data as bytestring
:param path: If None img data is returned, in JPEG format
:param data_fmt: The fmt to return data in when path is None. Defaults to JPEG
:param compression_quality: The quality of the image after compression.
Number between 1 and 100. 1 means highest compression, 100 means no
compression (lossless). When generating PNG this number is divided by 10
for the png_compression_level.
:param bgcolor: The color for transparent pixels. Must be specified in hex.
:param resize_to: A tuple (width, height) or None for no resizing
:param minify_to: A tuple (width, height) to specify maximum target size.
The image will be resized to fit into this target size. If None the
value from the tweak is used.
:param grayscale: If True, the image is converted to grayscale,
if that's not already the case.
:param eink: If True, the image is dithered down to the 16 specific shades
of gray of the eInk palette.
Works best with formats that actually support color indexing (i.e., PNG)
:param letterbox: If True, in addition to fit resize_to inside minify_to,
the image will be letterboxed (i.e., centered on a black background).
:param letterbox_color: If letterboxing is used, this is the background color
used. The default is black.
'''
fmt = normalize_format_name(data_fmt if path is None else os.path.splitext(path)[1][1:])
if isinstance(data, QImage):
img = data
changed = True
else:
img, orig_fmt = image_and_format_from_data(data)
orig_fmt = normalize_format_name(orig_fmt)
changed = fmt != orig_fmt
if resize_to is not None:
changed = True
img = img.scaled(int(resize_to[0]), int(resize_to[1]), Qt.AspectRatioMode.IgnoreAspectRatio, Qt.TransformationMode.SmoothTransformation)
owidth, oheight = img.width(), img.height()
if minify_to is None:
nwidth, nheight = tweaks['maximum_cover_size']
nwidth, nheight = max(1, nwidth), max(1, nheight)
else:
nwidth, nheight = minify_to
if letterbox:
img = blend_on_canvas(img, nwidth, nheight, bgcolor=letterbox_color)
# Check if we were minified
if oheight != nheight or owidth != nwidth:
changed = True
else:
scaled, nwidth, nheight = fit_image(owidth, oheight, nwidth, nheight)
if scaled:
changed = True
img = img.scaled(int(nwidth), int(nheight), Qt.AspectRatioMode.IgnoreAspectRatio, Qt.TransformationMode.SmoothTransformation)
if img.hasAlphaChannel():
changed = True
img = blend_image(img, bgcolor)
if grayscale and not eink:
if not img.allGray():
changed = True
img = grayscale_image(img)
if eink:
# NOTE: Keep in mind that JPG does NOT actually support indexed colors, so the JPG algorithm will then smush everything back into a 256c mess...
# Thankfully, Nickel handles PNG just fine, and we potentially generate smaller files to boot, because they can be properly color indexed ;).
img = eink_dither_image(img)
changed = True
if path is None:
return image_to_data(img, compression_quality, fmt, compression_quality // 10) if changed else data
with open(path, 'wb') as f:
f.write(image_to_data(img, compression_quality, fmt, compression_quality // 10) if changed else data)
# }}}
# Overlaying images {{{
def blend_on_canvas(img, width, height, bgcolor='#ffffff'):
' Blend the `img` onto a canvas with the specified background color and size '
w, h = img.width(), img.height()
scaled, nw, nh = fit_image(w, h, width, height)
if scaled:
img = img.scaled(int(nw), int(nh), Qt.AspectRatioMode.IgnoreAspectRatio, Qt.TransformationMode.SmoothTransformation)
w, h = nw, nh
canvas = QImage(int(width), int(height), QImage.Format.Format_RGB32)
canvas.fill(QColor(bgcolor))
overlay_image(img, canvas, (width - w)//2, (height - h)//2)
return canvas
class Canvas:
def __init__(self, width, height, bgcolor='#ffffff'):
self.img = QImage(int(width), int(height), QImage.Format.Format_RGB32)
self.img.fill(QColor(bgcolor))
def __enter__(self):
return self
def __exit__(self, *args):
pass
def compose(self, img, x=0, y=0):
img = image_from_data(img)
overlay_image(img, self.img, x, y)
def export(self, fmt='JPEG', compression_quality=95):
return image_to_data(self.img, compression_quality=compression_quality, fmt=fmt)
def create_canvas(width, height, bgcolor='#ffffff'):
'Create a blank canvas of the specified size and color '
img = QImage(int(width), int(height), QImage.Format.Format_RGB32)
img.fill(QColor(bgcolor))
return img
def overlay_image(img, canvas=None, left=0, top=0):
' Overlay the `img` onto the canvas at the specified position '
if canvas is None:
canvas = QImage(img.size(), QImage.Format.Format_RGB32)
canvas.fill(Qt.GlobalColor.white)
left, top = int(left), int(top)
imageops.overlay(img, canvas, left, top)
return canvas
def texture_image(canvas, texture):
' Repeatedly tile the image `texture` across and down the image `canvas` '
if canvas.hasAlphaChannel():
canvas = blend_image(canvas)
return imageops.texture_image(canvas, texture)
def blend_image(img, bgcolor='#ffffff'):
' Used to convert images that have semi-transparent pixels to opaque by blending with the specified color '
canvas = QImage(img.size(), QImage.Format.Format_RGB32)
canvas.fill(QColor(bgcolor))
overlay_image(img, canvas)
return canvas
# }}}
# Image borders {{{
def add_borders_to_image(img, left=0, top=0, right=0, bottom=0, border_color='#ffffff'):
img = image_from_data(img)
if not (left > 0 or right > 0 or top > 0 or bottom > 0):
return img
canvas = QImage(int(img.width() + left + right), int(img.height() + top + bottom), QImage.Format.Format_RGB32)
canvas.fill(QColor(border_color))
overlay_image(img, canvas, left, top)
return canvas
def remove_borders_from_image(img, fuzz=None):
''' Try to auto-detect and remove any borders from the image. Returns
the image itself if no borders could be removed. `fuzz` is a measure of
what colors are considered identical (must be a number between 0 and 255 in
absolute intensity units). Default is from a tweak whose default value is 10. '''
fuzz = tweaks['cover_trim_fuzz_value'] if fuzz is None else fuzz
img = image_from_data(img)
ans = imageops.remove_borders(img, int(max(0, fuzz)))
return ans if ans.size() != img.size() else img
# }}}
# Cropping/scaling of images {{{
def resize_image(img, width, height):
return img.scaled(int(width), int(height), Qt.AspectRatioMode.IgnoreAspectRatio, Qt.TransformationMode.SmoothTransformation)
def resize_to_fit(img, width, height):
img = image_from_data(img)
resize_needed, nw, nh = fit_image(img.width(), img.height(), width, height)
if resize_needed:
img = resize_image(img, nw, nh)
return resize_needed, img
def clone_image(img):
''' Returns a shallow copy of the image. However, the underlying data buffer
will be automatically copied-on-write '''
return QImage(img)
def scale_image(data, width=60, height=80, compression_quality=70, as_png=False, preserve_aspect_ratio=True):
''' Scale an image, returning it as either JPEG or PNG data (bytestring).
Transparency is alpha blended with white when converting to JPEG. Is thread
safe and does not require a QApplication. '''
# We use Qt instead of ImageMagick here because ImageMagick seems to use
# some kind of memory pool, causing memory consumption to sky rocket.
img = image_from_data(data)
if preserve_aspect_ratio:
scaled, nwidth, nheight = fit_image(img.width(), img.height(), width, height)
if scaled:
img = img.scaled(int(nwidth), int(nheight), Qt.AspectRatioMode.KeepAspectRatio, Qt.TransformationMode.SmoothTransformation)
else:
if img.width() != width or img.height() != height:
img = img.scaled(int(width), int(height), Qt.AspectRatioMode.IgnoreAspectRatio, Qt.TransformationMode.SmoothTransformation)
fmt = 'PNG' if as_png else 'JPEG'
w, h = img.width(), img.height()
return w, h, image_to_data(img, compression_quality=compression_quality, fmt=fmt)
def crop_image(img, x, y, width, height):
'''
Return the specified section of the image.
:param x, y: The top left corner of the crop box
:param width, height: The width and height of the crop box. Note that if
the crop box exceeds the source images dimensions, width and height will be
auto-truncated.
'''
img = image_from_data(img)
width = min(width, img.width() - x)
height = min(height, img.height() - y)
return img.copy(int(x), int(y), int(width), int(height))
# }}}
# Image transformations {{{
def grayscale_image(img):
return imageops.grayscale(image_from_data(img))
def set_image_opacity(img, alpha=0.5):
''' Change the opacity of `img`. Note that the alpha value is multiplied to
any existing alpha values, so you cannot use this function to convert a
semi-transparent image to an opaque one. For that use `blend_image()`. '''
return imageops.set_opacity(image_from_data(img), alpha)
def flip_image(img, horizontal=False, vertical=False):
return image_from_data(img).mirrored(horizontal, vertical)
def image_has_transparent_pixels(img):
' Return True iff the image has at least one semi-transparent pixel '
img = image_from_data(img)
if img.isNull():
return False
return imageops.has_transparent_pixels(img)
def rotate_image(img, degrees):
t = QTransform()
t.rotate(degrees)
return image_from_data(img).transformed(t)
def gaussian_sharpen_image(img, radius=0, sigma=3, high_quality=True):
return imageops.gaussian_sharpen(image_from_data(img), max(0, radius), sigma, high_quality)
def gaussian_blur_image(img, radius=-1, sigma=3):
return imageops.gaussian_blur(image_from_data(img), max(0, radius), sigma)
def despeckle_image(img):
return imageops.despeckle(image_from_data(img))
def oil_paint_image(img, radius=-1, high_quality=True):
return imageops.oil_paint(image_from_data(img), radius, high_quality)
def normalize_image(img):
return imageops.normalize(image_from_data(img))
def quantize_image(img, max_colors=256, dither=True, palette=''):
''' Quantize the image to contain a maximum of `max_colors` colors. By
default a palette is chosen automatically, if you want to use a fixed
palette, then pass in a list of color names in the `palette` variable. If
you, specify a palette `max_colors` is ignored. Note that it is possible
for the actual number of colors used to be less than max_colors.
:param max_colors: Max. number of colors in the auto-generated palette. Must be between 2 and 256.
:param dither: Whether to use dithering or not. dithering is almost always a good thing.
:param palette: Use a manually specified palette instead. For example: palette='red green blue #eee'
'''
img = image_from_data(img)
if img.hasAlphaChannel():
img = blend_image(img)
if palette and isinstance(palette, string_or_bytes):
palette = palette.split()
return imageops.quantize(img, int(max_colors), dither, tuple(QColor(x).rgb() for x in palette))
def eink_dither_image(img):
''' Dither the source image down to the eInk palette of 16 shades of grey,
using ImageMagick's OrderedDither algorithm.
NOTE: No need to call grayscale_image first, as this will inline a grayscaling pass if need be.
Returns a QImage in Grayscale8 pixel format.
'''
img = image_from_data(img)
if img.hasAlphaChannel():
img = blend_image(img)
return imageops.ordered_dither(img)
# }}}
# Optimization of images {{{
def run_optimizer(file_path, cmd, as_filter=False, input_data=None):
file_path = os.path.abspath(file_path)
cwd = os.path.dirname(file_path)
ext = os.path.splitext(file_path)[1]
if not ext or len(ext) > 10 or not ext.startswith('.'):
ext = '.jpg'
fd, outfile = tempfile.mkstemp(dir=cwd, suffix=ext)
try:
if as_filter:
outf = os.fdopen(fd, 'wb')
else:
os.close(fd)
iname, oname = os.path.basename(file_path), os.path.basename(outfile)
input_size = os.path.getsize(file_path)
def repl(q, r):
cmd[cmd.index(q)] = r
if not as_filter:
repl(True, iname), repl(False, oname)
stdin = subprocess.PIPE if as_filter else None
stderr = subprocess.PIPE if as_filter else subprocess.STDOUT
creationflags = subprocess.DETACHED_PROCESS if iswindows else 0
p = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=stderr, stdin=stdin, creationflags=creationflags)
stderr = p.stderr if as_filter else p.stdout
if as_filter:
src = input_data or open(file_path, 'rb')
def copy(src, dest):
try:
shutil.copyfileobj(src, dest)
finally:
src.close(), dest.close()
inw = Thread(name='CopyInput', target=copy, args=(src, p.stdin))
inw.daemon = True
inw.start()
outw = Thread(name='CopyOutput', target=copy, args=(p.stdout, outf))
outw.daemon = True
outw.start()
raw = force_unicode(stderr.read())
if p.wait() != 0:
p.stdout.close()
if as_filter:
p.stderr.close()
p.stdin.close()
return raw
else:
if as_filter:
outw.join(60.0), inw.join(60.0)
p.stdin.close()
p.stderr.close()
p.stdout.close()
try:
sz = os.path.getsize(outfile)
except OSError:
sz = 0
if sz < 1:
return '%s returned a zero size image' % cmd[0]
if sz < input_size:
shutil.copystat(file_path, outfile)
atomic_rename(outfile, file_path)
finally:
try:
os.remove(outfile)
except OSError as err:
if err.errno != errno.ENOENT:
raise
try:
os.remove(outfile + '.bak') # optipng creates these files
except OSError as err:
if err.errno != errno.ENOENT:
raise
def optimize_jpeg(file_path):
exe = get_exe_path('jpegtran')
cmd = [exe] + '-copy none -optimize -progressive -maxmemory 100M -outfile'.split() + [False, True]
return run_optimizer(file_path, cmd)
def optimize_png(file_path, level=7):
' level goes from 1 to 7 with 7 being maximum compression '
exe = get_exe_path('optipng')
cmd = [exe] + f'-fix -clobber -strip all -o{level} -out'.split() + [False, True]
return run_optimizer(file_path, cmd)
def run_cwebp(file_path, lossless, q, m, metadata):
exe = get_exe_path('cwebp')
q = max(0, min(q, 100))
m = max(0, min(m, 6))
cmd = [exe] + f'-mt -metadata {metadata} -q {q} -m {m} -o'.split() + [False, True]
if lossless:
cmd.insert(1, '-lossless')
return run_optimizer(file_path, cmd)
def optimize_webp(file_path, q=100, m=6, metadata='all'):
' metadata can be a comma seaprated list of all, none, exif, icc, xmp '
return run_cwebp(file_path, True, q, m, metadata)
def encode_jpeg(file_path, quality=80):
from calibre.utils.speedups import ReadOnlyFileBuffer
quality = max(0, min(100, int(quality)))
exe = get_exe_path('cjpeg')
cmd = [exe] + '-optimize -progressive -maxmemory 100M -quality'.split() + [str(quality)]
img = QImage()
if not img.load(file_path):
raise ValueError('%s is not a valid image file' % file_path)
ba = QByteArray()
buf = QBuffer(ba)
buf.open(QIODevice.OpenModeFlag.WriteOnly)
if not img.save(buf, 'PPM'):
raise ValueError('Failed to export image to PPM')
data = ReadOnlyFileBuffer(ba.data())
buf.close()
return run_optimizer(file_path, cmd, as_filter=True, input_data=data)
def encode_webp(file_path, quality=75, m=6, metadata='all'):
return run_cwebp(file_path, False, quality, m, metadata)
# }}}
# PIL images {{{
def align8to32(bytes, width, mode):
"""
converts each scanline of data from 8 bit to 32 bit aligned
"""
bits_per_pixel = {"1": 1, "L": 8, "P": 8, "I;16": 16}[mode]
# calculate bytes per line and the extra padding if needed
bits_per_line = bits_per_pixel * width
full_bytes_per_line, remaining_bits_per_line = divmod(bits_per_line, 8)
bytes_per_line = full_bytes_per_line + (1 if remaining_bits_per_line else 0)
extra_padding = -bytes_per_line % 4
# already 32 bit aligned by luck
if not extra_padding:
return bytes
new_data = [
bytes[i * bytes_per_line : (i + 1) * bytes_per_line] + b"\x00" * extra_padding
for i in range(len(bytes) // bytes_per_line)
]
return b"".join(new_data)
def convert_PIL_image_to_pixmap(im, device_pixel_ratio=1.0):
data = None
colortable = None
if im.mode == "RGBA":
fmt = QImage.Format.Format_RGBA8888
data = im.tobytes("raw", "RGBA")
elif im.mode == "1":
fmt = QImage.Format.Format_Mono
elif im.mode == "L":
fmt = QImage.Format.Format_Indexed8
colortable = [qRgba(i, i, i, 255) & 0xFFFFFFFF for i in range(256)]
elif im.mode == "P":
fmt = QImage.Format.Format_Indexed8
palette = im.getpalette()
colortable = [qRgba(*palette[i : i + 3], 255) & 0xFFFFFFFF for i in range(0, len(palette), 3)]
elif im.mode == "I;16":
im = im.point(lambda i: i * 256)
fmt = QImage.Format.Format_Grayscale16
else:
fmt = QImage.Format.Format_RGBX8888
data = im.convert("RGBA").tobytes("raw", "RGBA")
size = im.size
data = data or align8to32(im.tobytes(), size[0], im.mode)
qimg = QImage(data, size[0], size[1], fmt)
if device_pixel_ratio != 1.0:
qimg.setDevicePixelRatio(device_pixel_ratio)
if colortable:
qimg.setColorTable(colortable)
return QPixmap.fromImage(qimg)
def read_xmp_from_pil_image(im) -> str:
fmt = im.format.lower()
xml = ''
if fmt == 'jpeg':
for segment, content in im.applist:
if segment == "APP1":
marker, xmp_tags = content.split(b"\x00")[:2]
if marker == b"http://ns.adobe.com/xap/1.0/":
xml = xmp_tags
break
elif fmt == 'png':
xml = im.info.get('XML:com.adobe.xmp', '')
elif fmt == 'webp':
xml = im.info.get("xmp", '')
elif fmt == 'tiff':
xml = im.tag_v2.get(700, '')
return xml
def read_text_from_container(container, target_lang=''):
lang_map = {}
for li in container.xpath('descendant::*[local-name()="li"]'):
if li.text:
lang = li.attrib.get('{http://www.w3.org/XML/1998/namespace}lang', 'x-default')
lang_map[lang] = li.text
if not target_lang and 'x-default' in lang_map:
return lang_map['x-default']
if target_lang in lang_map:
return lang_map[target_lang]
from calibre.utils.localization import canonicalize_lang
target_lang = canonicalize_lang(target_lang)
if target_lang:
for lang, ans in lang_map.items():
if canonicalize_lang(lang) == target_lang:
return ans
return lang_map.get('x-default', '')
def read_alt_text_from_xmp(xmp, target_lang='') -> str:
from lxml import etree
try:
root = etree.fromstring(xmp)
except Exception:
return ''
# print(etree.tostring(root, encoding='utf-8', pretty_print=True).decode())
for a in root.xpath('//*[local-name()="AltTextAccessibility"]'):
if ans := read_text_from_container(a, target_lang):
return ans
for d in etree.XPath('//dc:description', namespaces={'dc': 'http://purl.org/dc/elements/1.1/'})(root):
if ans := read_text_from_container(d, target_lang):
return ans
return ''
def read_alt_text(pil_im_or_path, target_lang='') -> str:
if isinstance(pil_im_or_path, str):
from PIL import Image
im = Image.open(pil_im_or_path)
else:
im = pil_im_or_path
xmp = read_xmp_from_pil_image(im)
if xmp:
if alt := read_alt_text_from_xmp(xmp, target_lang).strip():
return alt
exif = im.getexif()
if exif:
if desc := exif.get(270):
return desc.strip()
return ''
# }}}
def test(): # {{{
from glob import glob
from calibre import CurrentDir
from calibre.ptempfile import TemporaryDirectory
img = image_from_data(I('lt.png', data=True, allow_user_override=False))
with TemporaryDirectory() as tdir, CurrentDir(tdir):
save_image(img, 'test.jpg')
ret = optimize_jpeg('test.jpg')
if ret is not None:
raise SystemExit('optimize_jpeg failed: %s' % ret)
ret = encode_jpeg('test.jpg')
if ret is not None:
raise SystemExit('encode_jpeg failed: %s' % ret)
shutil.copyfile(I('lt.png'), 'test.png')
ret = optimize_png('test.png')
if ret is not None:
raise SystemExit('optimize_png failed: %s' % ret)
if glob('*.bak'):
raise SystemExit('Spurious .bak files left behind')
save_image(img, 'test.webp', compression_quality=100)
ret = optimize_webp('test.webp')
if ret is not None:
raise SystemExit('optimize_webp failed: %s' % ret)
quantize_image(img)
oil_paint_image(img)
gaussian_sharpen_image(img)
gaussian_blur_image(img)
despeckle_image(img)
remove_borders_from_image(img)
image_to_data(img, fmt='GIF')
p = subprocess.Popen([get_exe_path('JxrDecApp'), '-h'],
creationflags=subprocess.DETACHED_PROCESS if iswindows else 0,
stdout=subprocess.PIPE)
raw, _ = p.communicate()
p.wait()
if b'JPEG XR Decoder Utility' not in raw:
raise SystemExit('Failed to run JxrDecApp')
# }}}
if __name__ == '__main__': # {{{
args = sys.argv[1:]
infile = args.pop(0)
img = image_from_data(open(infile, 'rb').read())
func = globals()[args[0]]
kw = {}
args.pop(0)
outf = None
while args:
k = args.pop(0)
if '=' in k:
n, v = k.partition('=')[::2]
if v in ('True', 'False'):
v = True if v == 'True' else False
try:
v = int(v)
except Exception:
try:
v = float(v)
except Exception:
pass
kw[n] = v
else:
outf = k
if outf is None:
bn = os.path.basename(infile)
outf = bn.rpartition('.')[0] + '.' + '-output' + bn.rpartition('.')[-1]
img = func(img, **kw)
with open(outf, 'wb') as f:
f.write(image_to_data(img, fmt=outf.rpartition('.')[-1]))
# }}}
| 30,931 | Python | .py | 718 | 36.162953 | 155 | 0.648405 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,137 | exim.py | kovidgoyal_calibre/src/calibre/utils/exim.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
import errno
import hashlib
import io
import json
import os
import shutil
import struct
import sys
import tempfile
import time
import uuid
from collections import Counter
from typing import NamedTuple
from calibre import prints
from calibre.constants import config_dir, filesystem_encoding, iswindows
from calibre.utils.config import JSONConfig
from calibre.utils.config_base import StringConfig, create_global_prefs, prefs
from calibre.utils.filenames import samefile
from calibre.utils.localization import _
from polyglot.binary import as_hex_unicode
from polyglot.builtins import error_message, iteritems
# Export {{{
class FileDest:
def __init__(self, key, exporter, mtime=None):
self.exporter, self.key = exporter, key
self.hasher = hashlib.sha1()
self.start_part_number, self.start_pos = exporter.current_pos()
self._discard = False
self.mtime = mtime
self.size = 0
def discard(self):
self._discard = True
def write(self, data):
self.size += len(data)
written = self.exporter.write(data)
if len(data) != written:
raise RuntimeError(f'Exporter failed to write all data: {len(data)} != {written}')
self.hasher.update(data)
def flush(self):
pass
def close(self):
if not self._discard:
digest = str(self.hasher.hexdigest())
self.exporter.file_metadata[self.key] = (self.start_part_number, self.start_pos, self.size, digest, self.mtime)
del self.exporter, self.hasher
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
class Exporter:
VERSION = 1
TAIL_FMT = b'!II?' # part_num, version, is_last
MDATA_SZ_FMT = b'!Q'
EXT = '.calibre-data'
@classmethod
def tail_size(cls):
return struct.calcsize(cls.TAIL_FMT)
def __init__(self, path_to_export_dir, part_size=None):
# default part_size is 1 GB
self.part_size = (1 << 30) if part_size is None else part_size
self.base = os.path.abspath(path_to_export_dir)
self.commited_parts = []
self.current_part = None
self.file_metadata = {}
self.tail_sz = self.tail_size()
self.metadata = {'file_metadata': self.file_metadata}
def set_metadata(self, key, val):
if key in self.metadata:
raise KeyError('The metadata already contains the key: %s' % key)
self.metadata[key] = val
def current_pos(self):
pos = 0
if self.current_part is not None:
pos = self.current_part.tell()
if pos >= self.part_size - self.tail_sz:
self.new_part()
pos = 0
return len(self.commited_parts) + 1, pos
def write(self, data: bytes) -> int:
written = 0
data = memoryview(data)
while len(data) > 0:
if self.current_part is None:
self.new_part()
max_size = self.part_size - self.tail_sz - self.current_part.tell()
if max_size <= 0:
self.new_part()
max_size = self.part_size - self.tail_sz
chunk = data[:max_size]
w = self.current_part.write(chunk)
data = data[w:]
written += w
return written
def new_part(self):
self.commit_part()
self.current_part = open(os.path.join(
self.base, f'part-{len(self.commited_parts) + 1:04d}{self.EXT}'), 'wb')
def commit_part(self, is_last=False):
if self.current_part is not None:
self.current_part.write(struct.pack(self.TAIL_FMT, len(self.commited_parts) + 1, self.VERSION, is_last))
self.current_part.close()
self.commited_parts.append(self.current_part.name)
self.current_part = None
def commit(self):
raw = json.dumps(self.metadata, ensure_ascii=False)
if not isinstance(raw, bytes):
raw = raw.encode('utf-8')
self.new_part()
orig, self.part_size = self.part_size, sys.maxsize
self.write(raw)
self.write(struct.pack(self.MDATA_SZ_FMT, len(raw)))
self.part_size = orig
self.commit_part(is_last=True)
def add_file(self, fileobj, key):
try:
mtime = os.fstat(fileobj.fileno()).st_mtime
except (io.UnsupportedOperation, OSError):
mtime = None
with self.start_file(key, mtime=mtime) as dest:
shutil.copyfileobj(fileobj, dest)
def start_file(self, key, mtime=None):
return FileDest(key, self, mtime=mtime)
def export_dir(self, path, dir_key):
pkey = as_hex_unicode(dir_key)
self.metadata[dir_key] = files = []
for dirpath, dirnames, filenames in os.walk(path):
for fname in filenames:
fpath = os.path.join(dirpath, fname)
rpath = os.path.relpath(fpath, path).replace(os.sep, '/')
key = f'{pkey}:{rpath}'
try:
with open(fpath, 'rb') as f:
self.add_file(f, key)
except OSError:
if not iswindows:
raise
time.sleep(1)
with open(fpath, 'rb') as f:
self.add_file(f, key)
files.append((key, rpath))
def all_known_libraries():
from calibre.gui2 import gprefs
lus = gprefs.get('library_usage_stats', {})
paths = set(lus)
if prefs['library_path']:
paths.add(prefs['library_path'])
added = {}
for path in paths:
mdb = os.path.join(path, 'metadata.db')
if os.path.exists(mdb):
for c in added:
if samefile(mdb, os.path.join(c, 'metadata.db')):
break
else:
added[path] = lus.get(path, 1)
return added
def export(destdir, library_paths=None, dbmap=None, progress1=None, progress2=None, abort=None):
from calibre.db.backend import DB
from calibre.db.cache import Cache
if library_paths is None:
library_paths = all_known_libraries()
dbmap = dbmap or {}
dbmap = {os.path.normcase(os.path.abspath(k)):v for k, v in iteritems(dbmap)}
exporter = Exporter(destdir)
exporter.metadata['libraries'] = libraries = {}
total = len(library_paths) + 1
for i, (lpath, count) in enumerate(iteritems(library_paths)):
if abort is not None and abort.is_set():
return
if progress1 is not None:
progress1(lpath, i, total)
key = os.path.normcase(os.path.abspath(lpath))
db, closedb = dbmap.get(lpath), False
if db is None:
db = Cache(DB(lpath, load_user_formatter_functions=False))
db.init()
closedb = True
else:
db = db.new_api
db.export_library(key, exporter, progress=progress2, abort=abort)
if closedb:
db.close()
libraries[key] = count
if progress1 is not None:
progress1(_('Settings and plugins'), total-1, total)
if abort is not None and abort.is_set():
return
exporter.export_dir(config_dir, 'config_dir')
exporter.commit()
if progress1 is not None:
progress1(_('Completed'), total, total)
# }}}
# Import {{{
class Chunk(NamedTuple):
part_num: int
pos_in_part: int
size: int
pos_in_file: int
class Pos:
def __init__(self, part, pos_in_part, size, importer):
self.size = size
self.pos_in_file = 0
self.chunks = chunks = []
self.open_part = importer.open_part
self.currently_open_part = None
self.currently_open_chunk_index = -1
pos = 0
while size > 0:
part_size = importer.size_of_part(part)
chunk_size = min(size, part_size - pos_in_part)
if chunk_size > 0:
chunks.append(Chunk(part, pos_in_part, chunk_size, pos))
size -= chunk_size
pos += chunk_size
part += 1
pos_in_part = 0
def close(self):
if self.currently_open_part is not None:
self.currently_open_part.close()
self.currently_open_part = None
self.currently_open_chunk_index = -1
def tell(self) -> int:
return self.pos_in_file
def seek(self, amt, whence=os.SEEK_SET) -> int:
if whence == os.SEEK_SET:
new_pos_in_file = amt
if whence == os.SEEK_END:
new_pos_in_file = self.size + amt
if whence == os.SEEK_CUR:
new_pos_in_file = self.pos_in_file + amt
self.pos_in_file = max(0, min(new_pos_in_file, self.size))
return self.pos_in_file
def read(self, size=None):
if size is None or size < 0:
size = self.size
size = min(size, self.size)
amt_left = max(0, self.size - self.pos_in_file)
amt_to_read = min(amt_left, size)
if amt_to_read <= 0:
return b''
start_chunk = max(0, self.currently_open_chunk_index)
num = len(self.chunks)
ans = []
chunk_idx = -1
for i in range(num):
chunk_idx = (start_chunk + i) % num
chunk = self.chunks[chunk_idx]
if chunk.pos_in_file <= self.pos_in_file < chunk.pos_in_file + chunk.size:
break
else:
raise ValueError(f'No chunk found containing {self.pos_in_file=}')
while amt_to_read > 0:
try:
chunk = self.chunks[chunk_idx]
except IndexError:
break
ans.append(self._read_chunk(chunk, amt_to_read, chunk_idx))
amt_to_read -= len(ans[-1])
chunk_idx += 1
return b''.join(ans)
def _read_chunk(self, chunk, size, chunk_idx):
if self.currently_open_chunk_index != chunk_idx or self.currently_open_part is None:
self.close()
self.currently_open_part = self.open_part(chunk.part_num)
self.currently_open_chunk_index = chunk_idx
offset_from_start_of_chunk = self.pos_in_file - chunk.pos_in_file
self.currently_open_part.seek(chunk.pos_in_part + offset_from_start_of_chunk, os.SEEK_SET)
size = min(size, chunk.size - offset_from_start_of_chunk)
ans = self.currently_open_part.read(size)
self.pos_in_file += len(ans)
return ans
class FileSource:
def __init__(self, start_partnum, start_pos, size, digest, description, mtime, importer):
self.size, self.digest, self.description = size, digest, description
self.mtime = mtime
self.start = start_pos
self.start_partnum = start_partnum
self.pos = Pos(start_partnum, start_pos, size, importer)
self.hasher = hashlib.sha1()
self.importer = importer
self.check_hash = True
def seekable(self):
return False
def seek(self, amt, whence=os.SEEK_SET):
return self.pos.seek(amt, whence)
def tell(self):
return self.pos.tell()
def read(self, size=None):
ans = self.pos.read(size)
if self.check_hash and ans:
self.hasher.update(ans)
return ans
def close(self):
if self.check_hash and self.hasher.hexdigest() != self.digest:
self.importer.corrupted_files.append(self.description)
self.hasher = None
self.pos.close()
self.pos = None
def __enter__(self):
return self
def __exit__(self, *a):
self.close()
class Importer:
def __init__(self, path_to_export_dir):
self.corrupted_files = []
part_map = {}
self.tail_size = tail_size = struct.calcsize(Exporter.TAIL_FMT)
self.version = -1
for name in os.listdir(path_to_export_dir):
if name.lower().endswith(Exporter.EXT):
path = os.path.join(path_to_export_dir, name)
with open(path, 'rb') as f:
f.seek(0, os.SEEK_END)
size_of_part = f.tell()
f.seek(-tail_size, os.SEEK_END)
raw = f.read()
if len(raw) != tail_size:
raise ValueError('The exported data in %s is not valid, tail too small' % name)
part_num, version, is_last = struct.unpack(Exporter.TAIL_FMT, raw)
if version > Exporter.VERSION:
raise ValueError('The exported data in %s is not valid,'
' version (%d) is higher than maximum supported version.'
' You might need to upgrade calibre first.' % (name, version))
part_map[part_num] = path, is_last, size_of_part
if self.version == -1:
self.version = version
if version != self.version:
raise ValueError(f'The exported data in {name} is not valid as it contains a mix of parts with versions: {self.version} and {version}')
nums = sorted(part_map)
if not nums:
raise ValueError('No exported data found in: %s' % path_to_export_dir)
if nums[0] != 1:
raise ValueError('The first part of this exported data set is missing')
if not part_map[nums[-1]][1]:
raise ValueError('The last part of this exported data set is missing')
if len(nums) != nums[-1]:
raise ValueError('There are some parts of the exported data set missing')
self.part_map, self.part_size_map = {}, {}
for part_num, (path, is_last, size_of_part) in part_map.items():
self.part_map[part_num] = path
self.part_size_map[part_num] = size_of_part
msf = struct.calcsize(Exporter.MDATA_SZ_FMT)
offset = tail_size + msf
with self.open_part(nums[-1]) as f:
f.seek(-offset, os.SEEK_END)
sz, = struct.unpack(Exporter.MDATA_SZ_FMT, f.read(msf))
f.seek(- sz - offset, os.SEEK_END)
self.metadata = json.loads(f.read(sz))
self.file_metadata = self.metadata['file_metadata']
def size_of_part(self, num):
return self.part_size_map[num] - self.tail_size
def open_part(self, num):
return open(self.part_map[num], 'rb')
def start_file(self, key, description):
partnum, pos, size, digest, mtime = self.file_metadata[key]
return FileSource(partnum, pos, size, digest, description, mtime, self)
def save_file(self, key, description, output_path):
with open(output_path, 'wb') as dest, self.start_file(key, description) as src:
shutil.copyfileobj(src, dest)
def export_config(self, base_dir, library_usage_stats):
for key, relpath in self.metadata['config_dir']:
with self.start_file(key, relpath) as f:
path = os.path.join(base_dir, relpath.replace('/', os.sep))
try:
with open(path, 'wb') as dest:
shutil.copyfileobj(f, dest)
except OSError:
os.makedirs(os.path.dirname(path))
with open(path, 'wb') as dest:
shutil.copyfileobj(f, dest)
gpath = os.path.join(base_dir, 'global.py')
try:
with open(gpath, 'rb') as f:
raw = f.read()
except OSError:
raw = b''
try:
lpath = library_usage_stats.most_common(1)[0][0]
except Exception:
lpath = None
c = create_global_prefs(StringConfig(raw, 'calibre wide preferences'))
c.set('installation_uuid', str(uuid.uuid4()))
c.set('library_path', lpath)
raw = c.src
if not isinstance(raw, bytes):
raw = raw.encode('utf-8')
with open(gpath, 'wb') as f:
f.write(raw)
gprefs = JSONConfig('gui', base_path=base_dir)
gprefs['library_usage_stats'] = dict(library_usage_stats)
def import_data(importer, library_path_map, config_location=None, progress1=None, progress2=None, abort=None):
from calibre.db.cache import import_library
config_location = config_location or config_dir
config_location = os.path.abspath(os.path.realpath(config_location))
total = len(library_path_map) + 1
library_usage_stats = Counter()
for i, (library_key, dest) in enumerate(iteritems(library_path_map)):
if abort is not None and abort.is_set():
return
if isinstance(dest, bytes):
dest = dest.decode(filesystem_encoding)
if progress1 is not None:
progress1(dest, i, total)
try:
os.makedirs(dest)
except OSError as err:
if err.errno != errno.EEXIST:
raise
if not os.path.isdir(dest):
raise ValueError('%s is not a directory' % dest)
import_library(library_key, importer, dest, progress=progress2, abort=abort).close()
stats_key = os.path.abspath(dest).replace(os.sep, '/')
library_usage_stats[stats_key] = importer.metadata['libraries'].get(library_key, 1)
if progress1 is not None:
progress1(_('Settings and plugins'), total - 1, total)
if abort is not None and abort.is_set():
return
base_dir = tempfile.mkdtemp(dir=os.path.dirname(config_location))
importer.export_config(base_dir, library_usage_stats)
if os.path.lexists(config_location):
if os.path.islink(config_location) or os.path.isfile(config_location):
os.remove(config_location)
else:
shutil.rmtree(config_location, ignore_errors=True)
if os.path.exists(config_location):
try:
shutil.rmtree(config_location)
except OSError:
if not iswindows:
raise
time.sleep(1)
shutil.rmtree(config_location)
try:
os.rename(base_dir, config_location)
except OSError:
time.sleep(2)
os.rename(base_dir, config_location)
from calibre.gui2 import gprefs
gprefs.refresh()
if progress1 is not None:
progress1(_('Completed'), total, total)
def test_import(export_dir='/t/ex', import_dir='/t/imp'):
importer = Importer(export_dir)
if os.path.exists(import_dir):
shutil.rmtree(import_dir)
os.mkdir(import_dir)
import_data(importer, {k:os.path.join(import_dir, os.path.basename(k)) for k in importer.metadata['libraries'] if 'largelib' not in k},
config_location=os.path.join(import_dir, 'calibre-config'), progress1=print, progress2=print)
def cli_report(*args, **kw):
try:
prints(*args, **kw)
except OSError:
pass
def input_unicode(prompt):
ans = input(prompt)
if isinstance(ans, bytes):
ans = ans.decode(sys.stdin.encoding)
return ans
def run_exporter(export_dir=None, args=None, check_known_libraries=True):
if args:
if len(args) < 2:
raise SystemExit('You must specify the export folder and libraries to export')
export_dir = args[0]
if not os.path.exists(export_dir):
os.makedirs(export_dir)
if os.listdir(export_dir):
raise SystemExit('%s is not empty' % export_dir)
all_libraries = {os.path.normcase(os.path.abspath(path)):lus for path, lus in iteritems(all_known_libraries())}
if 'all' in args[1:]:
libraries = set(all_libraries)
else:
libraries = {os.path.normcase(os.path.abspath(os.path.expanduser(path))) for path in args[1:]}
if check_known_libraries and libraries - set(all_libraries):
raise SystemExit('Unknown library: ' + tuple(libraries - set(all_libraries))[0])
libraries = {p: all_libraries[p] for p in libraries}
print('Exporting libraries:', ', '.join(sorted(libraries)), 'to:', export_dir)
export(export_dir, progress1=cli_report, progress2=cli_report, library_paths=libraries)
return
export_dir = export_dir or input_unicode(
'Enter path to an empty folder (all exported data will be saved inside it): ').rstrip('\r')
if not os.path.exists(export_dir):
os.makedirs(export_dir)
if not os.path.isdir(export_dir):
raise SystemExit('%s is not a folder' % export_dir)
if os.listdir(export_dir):
raise SystemExit('%s is not empty' % export_dir)
library_paths = {}
for lpath, lus in iteritems(all_known_libraries()):
if input_unicode('Export the library %s [y/n]: ' % lpath).strip().lower() == 'y':
library_paths[lpath] = lus
if library_paths:
export(export_dir, progress1=cli_report, progress2=cli_report, library_paths=library_paths)
else:
raise SystemExit('No libraries selected for export')
def run_importer():
export_dir = input_unicode('Enter path to folder containing previously exported data: ').rstrip('\r')
if not os.path.isdir(export_dir):
raise SystemExit('%s is not a folder' % export_dir)
try:
importer = Importer(export_dir)
except ValueError as err:
raise SystemExit(error_message(err))
import_dir = input_unicode('Enter path to an empty folder (all libraries will be created inside this folder): ').rstrip('\r')
if not os.path.exists(import_dir):
os.makedirs(import_dir)
if not os.path.isdir(import_dir):
raise SystemExit('%s is not a folder' % import_dir)
if os.listdir(import_dir):
raise SystemExit('%s is not empty' % import_dir)
import_data(importer, {
k:os.path.join(import_dir, os.path.basename(k)) for k in importer.metadata['libraries']}, progress1=cli_report, progress2=cli_report)
# }}}
if __name__ == '__main__':
export(sys.argv[-1], progress1=print, progress2=print)
| 22,053 | Python | .py | 516 | 33.069767 | 155 | 0.599403 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,138 | unicode_names.py | kovidgoyal_calibre/src/calibre/utils/unicode_names.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
from collections import defaultdict
from calibre.utils.icu import ord_string
from polyglot.builtins import iteritems
def character_name_from_code(code):
from calibre_extensions.unicode_names import name_for_codepoint
return name_for_codepoint(code) or f'U+{code:X}'
def html_entities():
ans = getattr(html_entities, 'ans', None)
if ans is None:
from calibre.ebooks.html_entities import html5_entities
ans = defaultdict(set)
for name, char in iteritems(html5_entities):
try:
ans[name.lower()].add(ord_string(char)[0])
except TypeError:
continue
ans['nnbsp'].add(0x202F)
ans = dict(ans)
html_entities.ans = ans
return ans
def points_for_word(w):
"""Returns the set of all codepoints that contain ``word`` in their names"""
w = w.lower()
ans = points_for_word.cache.get(w)
if ans is None:
from calibre_extensions.unicode_names import codepoints_for_word
ans = codepoints_for_word(w) | html_entities().get(w, set())
points_for_word.cache[w] = ans
return ans
points_for_word.cache = {} # noqa
| 1,262 | Python | .py | 32 | 32.84375 | 80 | 0.670221 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,139 | ordered_dict.py | kovidgoyal_calibre/src/calibre/utils/ordered_dict.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from collections import OrderedDict
OrderedDict
| 198 | Python | .py | 6 | 31 | 58 | 0.741935 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,140 | icu_test.py | kovidgoyal_calibre/src/calibre/utils/icu_test.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import sys
import unittest
from contextlib import contextmanager
import calibre.utils.icu as icu
from polyglot.builtins import cmp, iteritems
@contextmanager
def make_collation_func(name, locale, numeric=True, maker=icu.make_sort_key_func, func='strcmp'):
def coll():
ans = icu._icu.Collator(locale)
ans.numeric = numeric
return ans
yield maker(coll, func)
class TestICU(unittest.TestCase):
ae = unittest.TestCase.assertEqual
ane= unittest.TestCase.assertNotEqual
def setUp(self):
icu.change_locale('en')
def test_sorting(self):
' Test the various sorting APIs '
german = '''Sonntag Montag Dienstag Januar Februar März Fuße Fluße Flusse flusse fluße flüße flüsse'''.split()
german_good = '''Dienstag Februar flusse Flusse fluße Fluße flüsse flüße Fuße Januar März Montag Sonntag'''.split()
french = '''dimanche lundi mardi janvier février mars déjà Meme deja même dejà bpef bœg Boef Mémé bœf boef bnef pêche pèché pêché pêche pêché'''.split()
french_good = '''bnef boef Boef bœf bœg bpef deja dejà déjà dimanche février janvier lundi mardi mars Meme Mémé même pèché pêche pêche pêché pêché'''.split() # noqa
# Test corner cases
sort_key = icu.sort_key
s = '\U0001f431'
self.ae(sort_key(s), sort_key(s.encode(sys.getdefaultencoding())), 'UTF-8 encoded object not correctly decoded to generate sort key')
self.ae(s.encode('utf-16'), s.encode('utf-16'), 'Undecodable bytestring not returned as itself')
self.ae(b'', sort_key(None))
self.ae(0, icu.strcmp(None, b''))
self.ae(0, icu.strcmp(s, s.encode(sys.getdefaultencoding())))
# Test locales
with make_collation_func('dsk', 'de', maker=icu.make_sort_key_func, func='sort_key') as dsk:
self.ae(german_good, sorted(german, key=dsk))
with make_collation_func('dcmp', 'de', maker=icu.make_two_arg_func, func='strcmp') as dcmp:
for x in german:
for y in german:
self.ae(cmp(dsk(x), dsk(y)), dcmp(x, y))
with make_collation_func('fsk', 'fr', maker=icu.make_sort_key_func, func='sort_key') as fsk:
self.ae(french_good, sorted(french, key=fsk))
with make_collation_func('fcmp', 'fr', maker=icu.make_two_arg_func) as fcmp:
for x in french:
for y in french:
self.ae(cmp(fsk(x), fsk(y)), fcmp(x, y))
with make_collation_func('ssk', 'es', maker=icu.make_sort_key_func, func='sort_key') as ssk:
self.assertNotEqual(ssk('peña'), ssk('pena'))
with make_collation_func('scmp', 'es', maker=icu.make_two_arg_func) as scmp:
self.assertNotEqual(0, scmp('pena', 'peña'))
for k, v in iteritems({'pèché': 'peche', 'flüße':'Flusse', 'Štepánek':'ŠtepaneK'}):
self.ae(0, icu.primary_strcmp(k, v))
# Test different types of collation
self.ae(icu.primary_sort_key('A√§'), icu.primary_sort_key('aa'))
self.assertLess(icu.numeric_sort_key('something 2'), icu.numeric_sort_key('something 11'))
self.assertLess(icu.case_sensitive_sort_key('A'), icu.case_sensitive_sort_key('a'))
self.ae(0, icu.strcmp('a', 'A'))
self.ae(cmp('a', 'A'), icu.case_sensitive_strcmp('a', 'A'))
self.ae(0, icu.primary_strcmp('√§', 'A'))
def test_change_case(self):
' Test the various ways of changing the case '
from calibre.utils.titlecase import titlecase
# Test corner cases
self.ae('A', icu.upper(b'a'))
for x in ('', None, False, 1):
self.ae(x, icu.capitalize(x))
for x in ('a', 'Alice\'s code', 'macdonald\'s machIne', '02 the wars'):
self.ae(icu.upper(x), x.upper())
self.ae(icu.lower(x), x.lower())
# ICU's title case algorithm is different from ours, when there are
# capitals inside words
self.ae(icu.title_case(x), titlecase(x).replace('machIne', 'Machine'))
self.ae(icu.capitalize(x), x[0].upper() + x[1:].lower())
self.ae(icu.swapcase(x), x.swapcase())
def test_find(self):
' Test searching for substrings '
self.ae((1, 1), icu.find(b'a', b'1ab'))
self.ae((1, 1), icu.find('\U0001f431', 'x\U0001f431x'))
self.ae((1, 1), icu.find('y', '\U0001f431y'))
self.ae((0, 4), icu.primary_find('pena', 'peña'))
for k, v in iteritems({'pèché': 'peche', 'flüße':'Flusse', 'Štepánek':'ŠtepaneK'}):
self.ae((1, len(k)), icu.primary_find(v, ' ' + k), f'Failed to find {v} in {k}')
self.assertTrue(icu.startswith(b'abc', b'ab'))
self.assertTrue(icu.startswith('abc', 'abc'))
self.assertFalse(icu.startswith('xyz', 'a'))
self.assertTrue(icu.startswith('xxx', ''))
self.assertTrue(icu.primary_startswith('pena', 'peña'))
self.assertTrue(icu.contains('\U0001f431', '\U0001f431'))
self.assertTrue(icu.contains('something', 'some other something else'))
self.assertTrue(icu.contains('', 'a'))
self.assertTrue(icu.contains('', ''))
self.assertFalse(icu.contains('xxx', 'xx'))
self.assertTrue(icu.primary_contains('pena', 'peña'))
x = icu.primary_collator()
self.ae(x.get_attribute(icu._icu.UCOL_STRENGTH), icu._icu.UCOL_PRIMARY),
self.ae((0, 4), icu.primary_no_punc_find('pena"', 'peña'))
self.ae((0, 13), icu.primary_no_punc_find("typographers", 'typographer’s'))
self.ae((0, 7), icu.primary_no_punc_find('abcd', 'a\u00adb\u200cc\u200dd'))
self.ae((0, 5), icu.primary_no_punc_find('abcd', 'ab cd'))
# test find all
m = []
def a(p, l):
return m.append((p, l))
icu.primary_collator_without_punctuation().find_all('a', 'a aüê±a', a)
self.ae(m, [(0, 1), (2, 1), (5, 1)])
# test find whole words
c = icu.primary_collator_without_punctuation()
self.ae(c.find('a', 'abc a bc'), (0, 1))
self.ae(c.find('a', 'abc a bc', True), (4, 1))
self.ae(c.find('pena', 'a peñaabc peña', True), (10, 4))
def test_collation_order(self):
'Testing collation ordering'
for group in [
(self.ae, ('Šaa', 'Smith', 'Solženicyn', 'Štepánek')),
(self.ae, ('11', '011')),
(self.ane, ('2', '1')),
(self.ae, ('100 Smith', '0100 Smith')),
]:
last = None
assert_func = group[0]
for x in group[1]:
order, _ = icu.numeric_collator().collation_order(x)
if last is not None:
assert_func(last, order, f'Order for {x} not correct: {last} != {order}')
last = order
self.ae(dict(icu.partition_by_first_letter(['A1', '', 'a1', '\U0001f431', '\U0001f431x'])),
{' ':[''], 'A':['A1', 'a1'], '\U0001f431':['\U0001f431', '\U0001f431x']})
def test_collation_order_for_partitioning(self):
'Testing collation ordering for partitioning'
for group in [
(self.ae, ('Smith', 'Šaa', 'Solženicyn', 'Štepánek')),
(self.ane, ('11', '011')),
(self.ae, ('102 Smith', '100 Smith')),
(self.ane, ('100 Smith', '0100 Smith')),
]:
last = None
assert_func = group[0]
for x in group[1]:
order, _ = icu.non_numeric_sort_collator().collation_order(x)
if last is not None:
assert_func(last, order, f'Order for {x} not correct: {last} != {order}')
last = order
self.ae(dict(icu.partition_by_first_letter(['A1', '', 'a1', '\U0001f431', '\U0001f431x'])),
{' ':[''], 'A':['A1', 'a1'], '\U0001f431':['\U0001f431', '\U0001f431x']})
def test_roundtrip(self):
' Test roundtripping '
for r in ('xxx\0\u2219\U0001f431xxx', '\0', '', 'simple'):
self.ae(r, icu._icu.roundtrip(r))
self.ae(icu._icu.roundtrip('\ud8e81'), '\ufffd1')
self.ae(icu._icu.roundtrip('\udc01\ud8e8'), '\ufffd\ufffd')
for x, l in [('', 0), ('a', 1), ('\U0001f431', 1)]:
self.ae(icu._icu.string_length(x), l)
for x, l in [('', 0), ('a', 1), ('\U0001f431', 2)]:
self.ae(icu._icu.utf16_length(x), l)
self.ae(icu._icu.chr(0x1f431), '\U0001f431')
self.ae(icu._icu.ord_string('abc'*100), tuple(map(ord, 'abc'*100)))
self.ae(icu._icu.ord_string('\U0001f431'), (0x1f431,))
def test_character_name(self):
' Test character naming '
from calibre.utils.unicode_names import character_name_from_code
for q, e in {
'\U0001f431': 'CAT FACE'
}.items():
self.ae(icu.character_name(q), e)
self.ae(character_name_from_code(icu.ord_string(q)[0]), e)
def test_contractions(self):
' Test contractions '
self.skipTest('Skipping as this depends too much on ICU version')
c = icu._icu.Collator('cs')
self.ae(icu.contractions(c), frozenset({'Z\u030c', 'z\u030c', 'Ch',
'C\u030c', 'ch', 'cH', 'c\u030c', 's\u030c', 'r\u030c', 'CH',
'S\u030c', 'R\u030c'}))
def test_break_iterator(self):
' Test the break iterator '
from calibre.spell.break_iterator import count_words, index_of, split_into_words_and_positions
from calibre.spell.break_iterator import split_into_words as split
for q in ('one two three', ' one two three', 'one\ntwo three ', ):
self.ae(split(str(q)), ['one', 'two', 'three'], 'Failed to split: %r' % q)
self.ae(split('I I\'m'), ['I', "I'm"])
self.ae(split('out-of-the-box'), ['out-of-the-box'])
self.ae(split('-one two-'), ['-one', 'two-'])
self.ae(split('-one a-b-c-d e'), ['-one', 'a-b-c-d', 'e'])
self.ae(split('-one -a-b-c-d- e'), ['-one', '-a-b-c-d-', 'e'])
self.ae(split_into_words_and_positions('one \U0001f431 three'), [(0, 3), (6, 5)])
self.ae(count_words('a b c d e f'), 6)
for needle, haystack, pos in (
('word', 'a word b', 2),
('word', 'a word', 2),
('one-two', 'a one-two punch', 2),
('one-two', 'one-two punch', 0),
('one-two', 'one-two', 0),
('one', 'one-two one', 8),
('one-two', 'one-two-three one-two', 14),
('one', 'onet one', 5),
('two', 'one-two two', 8),
('two', 'two-one two', 8),
('-two', 'one-two -two', 8),
('-two', 'two', -1),
('i', 'i', 0),
('i', 'six i', 4),
('i', '', -1), ('', '', -1), ('', 'i', -1),
('i', 'six clicks', -1),
('i', '\U0001f431 i', 2),
('-a', 'b -a', 2),
('a-', 'a-b a- d', 4),
('-a-', 'b -a -a-', 5),
('-a-', '-a-', 0),
('-a-', 'a-', -1),
('-a-', '-a', -1),
('-a-', 'a', -1),
('a-', 'a-', 0),
('-a', '-a', 0),
('a-b-c-', 'a-b-c-d', -1),
('a-b-c-', 'a-b-c-.', 0),
('a-b-c-', 'a-b-c-d a-b-c- d', 8),
):
fpos = index_of(needle, haystack)
self.ae(pos, fpos, 'Failed to find index of %r in %r (%d != %d)' % (needle, haystack, pos, fpos))
def test_remove_accents(self):
for func in (icu.remove_accents_icu, icu.remove_accents_regex):
for q, expected in {
'MännÄr': 'MannAr', 'Peña': 'Pena', 'Kátia': 'Katia',
'Málaga': 'Malaga', 'François': 'Francois', 'Phút Hơn': 'Phut Hon',
'中文':'中文'
}.items():
self.ae(expected, func(q))
def test_split_into_sentences(self):
from calibre.spell.break_iterator import split_into_sentences_for_tts
for sentence, expected in {
'hello.': [(0, 'hello.')],
'hello. I love you. Another small sentence. Fini.': [(0, 'hello. I love you. Another small sentence.'), (43, 'Fini.')],
'a very long sentence to be split into at least two smaller sentences': [
(0, 'a very long sentence to be split into at least two'), (51, 'smaller sentences')],
'hello\u2029i love you': [(0, 'hello'), (6, 'i love you')],
}.items():
self.ae(expected, list(split_into_sentences_for_tts(sentence, max_sentence_length=40)))
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(TestICU)
class TestRunner(unittest.main):
def createTests(self):
self.test = find_tests()
def run(verbosity=4):
TestRunner(verbosity=verbosity, exit=False)
def test_build():
result = TestRunner(verbosity=0, buffer=True, catchbreak=True, failfast=True, argv=sys.argv[:1], exit=False).result
if not result.wasSuccessful():
raise SystemExit(1)
if __name__ == '__main__':
run(verbosity=4)
| 13,372 | Python | .py | 250 | 42.592 | 190 | 0.537538 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,141 | imghdr.py | kovidgoyal_calibre/src/calibre/utils/imghdr.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import os
from struct import error, unpack
from calibre.utils.speedups import ReadOnlyFileBuffer
from polyglot.builtins import string_or_bytes
""" Recognize image file formats and sizes based on their first few bytes."""
HSIZE = 120
def what(file, h=None):
' Recognize image headers '
if h is None:
if isinstance(file, string_or_bytes):
with open(file, 'rb') as f:
h = f.read(HSIZE)
else:
location = file.tell()
h = file.read(HSIZE)
file.seek(location)
if isinstance(h, bytes):
h = memoryview(h)
for tf in tests:
res = tf(h)
if res:
return res
# There exist some jpeg files with no headers, only the starting two bits
# If we cannot identify as anything else, identify as jpeg.
if h[:2] == b'\xff\xd8':
return 'jpeg'
return None
def identify(src):
''' Recognize file format and sizes. Returns format, width, height. width
and height will be -1 if not found and fmt will be None if the image is not
recognized. '''
needs_close = False
if isinstance(src, str):
stream = open(src, 'rb')
needs_close = True
elif isinstance(src, bytes):
stream = ReadOnlyFileBuffer(src)
else:
stream = src
try:
return _identify(stream)
finally:
if needs_close:
stream.close()
def _identify(stream):
width = height = -1
pos = stream.tell()
head = stream.read(HSIZE)
stream.seek(pos)
fmt = what(None, head)
if fmt in {'jpeg', 'gif', 'png', 'jpeg2000'}:
size = len(head)
if fmt == 'png':
# PNG
s = head[16:24] if size >= 24 and head[12:16] == b'IHDR' else head[8:16]
try:
width, height = unpack(b">LL", s)
except error:
return fmt, width, height
elif fmt == 'jpeg':
# JPEG
pos = stream.tell()
try:
height, width = jpeg_dimensions(stream)
except Exception:
return fmt, width, height
finally:
stream.seek(pos)
elif fmt == 'gif':
# GIF
try:
width, height = unpack(b"<HH", head[6:10])
except error:
return fmt, width, height
elif size >= 56 and fmt == 'jpeg2000':
# JPEG2000
try:
height, width = unpack(b'>LL', head[48:56])
except error:
return fmt, width, height
return fmt, width, height
# ---------------------------------#
# Subroutines per image file type #
# ---------------------------------#
tests = []
def test(f):
tests.append(f)
return f
@test
def jpeg(h):
"""JPEG data in JFIF format (Changed by Kovid to mimic the file utility,
the original code was failing with some jpegs that included ICC_PROFILE
data, for example: http://nationalpostnews.files.wordpress.com/2013/03/budget.jpeg?w=300&h=1571)"""
if h[6:10] in (b'JFIF', b'Exif'):
return 'jpeg'
if h[:2] == b'\xff\xd8':
q = h[:32].tobytes()
if b'JFIF' in q or b'8BIM' in q:
return 'jpeg'
def jpeg_dimensions(stream):
# A JPEG marker is two bytes of the form 0xff x where 0 < x < 0xff
# See section B.1.1.2 of https://www.w3.org/Graphics/JPEG/itu-t81.pdf
# We read the dimensions from the first SOFn section we come across
stream.seek(2, os.SEEK_CUR)
def read(n):
ans = stream.read(n)
if len(ans) != n:
raise ValueError('Truncated JPEG data')
return ans
def read_byte():
return read(1)[0]
x = None
while True:
# Find next marker
while x != 0xff:
x = read_byte()
# Soak up padding
marker = 0xff
while marker == 0xff:
marker = read_byte()
q = marker
if 0xc0 <= q <= 0xcf and q != 0xc4 and q != 0xcc:
# SOFn marker
stream.seek(3, os.SEEK_CUR)
return unpack(b'>HH', read(4))
elif 0xd8 <= q <= 0xda:
break # start of image, end of image, start of scan, no point
elif q == 0:
return -1, -1 # Corrupted JPEG
elif q == 0x01 or 0xd0 <= q <= 0xd7:
# Standalone marker
continue
else:
# skip this section
size = unpack(b'>H', read(2))[0]
stream.seek(size - 2, os.SEEK_CUR)
# standalone marker, keep going
return -1, -1
@test
def png(h):
if h[:8] == b"\211PNG\r\n\032\n":
return 'png'
@test
def gif(h):
"""GIF ('87 and '89 variants)"""
if h[:6] in (b'GIF87a', b'GIF89a'):
return 'gif'
@test
def tiff(h):
"""TIFF (can be in Motorola or Intel byte order)"""
if h[:2] in (b'MM', b'II'):
if h[2:4] == b'\xbc\x01':
return 'jxr'
return 'tiff'
@test
def webp(h):
if h[:4] == b'RIFF' and h[8:12] == b'WEBP':
return 'webp'
@test
def rgb(h):
"""SGI image library"""
if h[:2] == b'\001\332':
return 'rgb'
@test
def pbm(h):
"""PBM (portable bitmap)"""
if len(h) >= 3 and \
h[0] == b'P' and h[1] in b'14' and h[2] in b' \t\n\r':
return 'pbm'
@test
def pgm(h):
"""PGM (portable graymap)"""
if len(h) >= 3 and \
h[0] == b'P' and h[1] in b'25' and h[2] in b' \t\n\r':
return 'pgm'
@test
def ppm(h):
"""PPM (portable pixmap)"""
if len(h) >= 3 and \
h[0] == b'P' and h[1] in b'36' and h[2] in b' \t\n\r':
return 'ppm'
@test
def rast(h):
"""Sun raster file"""
if h[:4] == b'\x59\xA6\x6A\x95':
return 'rast'
@test
def xbm(h):
"""X bitmap (X10 or X11)"""
s = b'#define '
if h[:len(s)] == s:
return 'xbm'
@test
def bmp(h):
if h[:2] == b'BM':
return 'bmp'
@test
def emf(h):
if h[:4] == b'\x01\0\0\0' and h[40:44] == b' EMF':
return 'emf'
@test
def jpeg2000(h):
if h[:12] == b'\x00\x00\x00\x0cjP \r\n\x87\n':
return 'jpeg2000'
@test
def svg(h):
if h[:4] == b'<svg' or (h[:2] == b'<?' and h[2:5].tobytes().lower() == b'xml' and b'<svg' in h.tobytes()):
return 'svg'
tests = tuple(tests)
| 6,415 | Python | .py | 211 | 23.085308 | 110 | 0.537661 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,142 | complete.py | kovidgoyal_calibre/src/calibre/utils/complete.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
BASH completion for calibre commands that are too complex for simple
completion.
'''
import glob
import os
import re
import shlex
import sys
prints = print
def split(src):
try:
return shlex.split(src)
except ValueError:
try:
return shlex.split(src+'"')
except ValueError:
return shlex.split(src+"'")
def files_and_dirs(prefix, allowed_exts=[]):
prefix = os.path.expanduser(prefix)
for i in glob.iglob(prefix+'*'):
_, ext = os.path.splitext(i)
ext = ext.lower().replace('.', '')
if os.path.isdir(i):
yield i+os.sep
elif allowed_exts is None or ext in allowed_exts:
yield i+' '
def get_opts_from_parser(parser, prefix):
def do_opt(opt):
for x in opt._long_opts:
if x.startswith(prefix):
yield x
for x in opt._short_opts:
if x.startswith(prefix):
yield x
for o in parser.option_list:
for x in do_opt(o):
yield x+' '
for g in parser.option_groups:
for o in g.option_list:
for x in do_opt(o):
yield x+' '
def send(ans):
pat = re.compile('([^0-9a-zA-Z_./-])')
for x in sorted(set(ans)):
x = pat.sub(lambda m : '\\'+m.group(1), x)
if x.endswith('\\ '):
x = x[:-2]+' '
prints(x)
class EbookConvert:
def __init__(self, comp_line, pos):
words = split(comp_line[:pos])
char_before = comp_line[pos-1]
prefix = words[-1] if words[-1].endswith(char_before) else ''
wc = len(words)
if not prefix:
wc += 1
self.words = words
self.prefix = prefix
self.previous = words[-2 if prefix else -1]
from calibre.utils.serialize import msgpack_loads
self.cache = msgpack_loads(open(os.path.join(sys.resources_location,
'ebook-convert-complete.calibre_msgpack'), 'rb').read(), use_list=False)
self.complete(wc)
def complete(self, wc):
if wc == 2:
self.complete_input()
elif wc == 3:
self.complete_output()
else:
q = list(self.words[1:3])
q = [os.path.splitext(x)[0 if x.startswith('.') else 1].partition('.')[-1].lower() for x in q]
if not q[1]:
q[1] = 'oeb'
q = tuple(q)
if q in self.cache:
ans = [x for x in self.cache[q] if x.startswith(self.prefix)]
else:
from calibre.ebooks.conversion.cli import create_option_parser
from calibre.utils.logging import Log
log = Log()
log.outputs = []
ans = []
if not self.prefix or self.prefix.startswith('-'):
try:
parser, _ = create_option_parser(self.words[:3], log)
ans += list(get_opts_from_parser(parser, self.prefix))
except:
pass
if self.previous.startswith('-'):
ans += list(files_and_dirs(self.prefix, None))
send(ans)
def complete_input(self):
ans = list(files_and_dirs(self.prefix, self.cache['input_fmts']))
ans += [t for t in self.cache['input_recipes'] if
t.startswith(self.prefix)]
send(ans)
def complete_output(self):
fmts = self.cache['output']
ans = list(files_and_dirs(self.prefix, fmts))
ans += ['.'+x+' ' for x in fmts if ('.'+x).startswith(self.prefix)]
send(ans)
def main(args=sys.argv):
comp_line, pos = os.environ['COMP_LINE'], int(os.environ['COMP_POINT'])
module = split(comp_line)[0].split(os.sep)[-1]
if module == 'ebook-convert':
EbookConvert(comp_line, pos)
return 0
if __name__ == '__main__':
raise sys.exit(main())
| 4,066 | Python | .py | 114 | 26.27193 | 106 | 0.544436 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,143 | rapydscript.py | kovidgoyal_calibre/src/calibre/utils/rapydscript.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
import errno
import json
import os
import re
import subprocess
import sys
from calibre import force_unicode
from calibre.constants import (
FAKE_HOST,
FAKE_PROTOCOL,
SPECIAL_TITLE_FOR_WEBENGINE_COMMS,
__appname__,
__version__,
builtin_colors_dark,
builtin_colors_light,
builtin_decorations,
dark_link_color,
)
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.filenames import atomic_rename
from calibre.utils.resources import get_path as P
from polyglot.builtins import as_bytes, as_unicode, exec_path
COMPILER_PATH = 'rapydscript/compiler.js.xz'
def abspath(x):
return os.path.realpath(os.path.abspath(x))
# Update RapydScript {{{
def update_rapydscript():
import lzma
d = os.path.dirname
base = d(d(d(d(d(abspath(__file__))))))
base = os.path.join(base, 'rapydscript')
with TemporaryDirectory() as tdir:
subprocess.check_call(['node', '--harmony', os.path.join(base, 'bin', 'web-repl-export'), tdir])
with open(os.path.join(tdir, 'rapydscript.js'), 'rb') as f:
raw = f.read()
path = P(COMPILER_PATH, allow_user_override=False)
with lzma.open(path, 'wb', format=lzma.FORMAT_XZ) as f:
f.write(raw)
# }}}
# Compiler {{{
def to_dict(obj):
return dict(zip(list(obj.keys()), list(obj.values())))
def compiler():
import lzma
ans = getattr(compiler, 'ans', None)
if ans is not None:
return ans
from qt.core import QApplication, QEventLoop
from qt.webengine import QWebEnginePage, QWebEngineScript
from calibre import walk
from calibre.gui2 import must_use_qt
from calibre.utils.webengine import secure_webengine, setup_default_profile, setup_profile
must_use_qt()
setup_default_profile()
with lzma.open(P(COMPILER_PATH, allow_user_override=False)) as lzf:
compiler_script = lzf.read().decode('utf-8')
base = base_dir()
rapydscript_dir = os.path.join(base, 'src', 'pyj')
cache_path = os.path.join(module_cache_dir(), 'embedded-compiler-write-cache.json')
def create_vfs():
ans = {}
for x in walk(rapydscript_dir):
if x.endswith('.pyj'):
r = os.path.relpath(x, rapydscript_dir).replace('\\', '/')
with open(x, 'rb') as f:
ans['__stdlib__/' + r] = f.read().decode('utf-8')
return ans
def vfs_script():
try:
with open(cache_path, 'rb') as f:
write_cache = f.read().decode('utf-8')
except Exception:
write_cache = '{}'
return '''
(function() {
"use strict";
var vfs = VFS;
function read_file_sync(name) {
var ans = vfs[name];
if (typeof ans === "string") return ans;
ans = write_cache[name];
if (typeof ans === "string") return ans;
return null;
}
function write_file_sync(name, data) {
write_cache[name] = data;
}
RapydScript.virtual_file_system = {
'read_file_sync': read_file_sync,
'write_file_sync': write_file_sync
};
window.compiler = RapydScript.create_embedded_compiler();
document.title = 'compiler initialized';
})();
'''.replace('VFS', json.dumps(create_vfs()) + ';\n' + 'window.write_cache = ' + write_cache, 1)
def create_script(src, name):
s = QWebEngineScript()
s.setName(name)
s.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady)
s.setWorldId(QWebEngineScript.ScriptWorldId.ApplicationWorld)
s.setRunsOnSubFrames(True)
s.setSourceCode(src)
return s
class Compiler(QWebEnginePage):
def __init__(self):
super().__init__()
setup_profile(self.profile())
self.errors = []
secure_webengine(self)
script = compiler_script
script += '\n\n;;\n\n' + vfs_script()
self.scripts().insert(create_script(script, 'rapydscript.js'))
self.setHtml('<p>initialize')
while self.title() != 'compiler initialized':
self.spin_loop()
def spin_loop(self):
QApplication.instance().processEvents(QEventLoop.ProcessEventsFlag.ExcludeUserInputEvents)
def javaScriptConsoleMessage(self, level, msg, line_num, source_id):
if level == QWebEnginePage.JavaScriptConsoleMessageLevel.ErrorMessageLevel:
self.errors.append(msg)
else:
print(f'{source_id}:{line_num}:{msg}')
def __call__(self, src, options):
self.compiler_result = null = object()
self.errors = []
self.working = True
options['basedir'] = '__stdlib__'
options['write_name'] = True
options['keep_docstrings'] = False
src = 'var js = window.compiler.compile({}, {}); [js, window.write_cache]'.format(*map(json.dumps, (src, options)))
self.runJavaScript(src, QWebEngineScript.ScriptWorldId.ApplicationWorld, self.compilation_done)
while self.working:
self.spin_loop()
if self.compiler_result is null or self.compiler_result is None:
raise CompileFailure('Failed to compile rapydscript code with error: ' + '\n'.join(self.errors))
write_cache = self.compiler_result[1]
with open(cache_path, 'wb') as f:
f.write(as_bytes(json.dumps(write_cache)))
return self.compiler_result[0]
def eval(self, js):
self.compiler_result = null = object()
self.errors = []
self.working = True
self.runJavaScript(js, QWebEngineScript.ScriptWorldId.ApplicationWorld, self.compilation_done)
while self.working:
self.spin_loop()
if self.compiler_result is null:
raise CompileFailure('Failed to eval JS with error: ' + '\n'.join(self.errors))
return self.compiler_result
def compilation_done(self, js):
self.working = False
self.compiler_result = js
compiler.ans = Compiler()
return compiler.ans
class CompileFailure(ValueError):
pass
_cache_dir = None
def module_cache_dir():
global _cache_dir
if _cache_dir is None:
d = os.path.dirname
base = d(d(d(d(abspath(__file__)))))
_cache_dir = os.path.join(base, '.build-cache', 'pyj')
try:
os.makedirs(_cache_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return _cache_dir
def ok_to_import_webengine():
from qt.core import QApplication
if QApplication.instance() is None:
return True
if 'PyQt6.QtWebEngineCore' in sys.modules:
return True
return False
OUTPUT_SENTINEL = b'-----RS webengine compiler output starts here------'
def forked_compile():
c = compiler()
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
data = stdin.read().decode('utf-8')
options = json.loads(sys.argv[-1])
result = c(data, options)
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
stdout.write(OUTPUT_SENTINEL)
stdout.write(as_bytes(result))
stdout.close()
def run_forked_compile(data, options):
from calibre.debug import run_calibre_debug
p = run_calibre_debug('-c', 'from calibre.utils.rapydscript import *; forked_compile()',
json.dumps(options), stdin=subprocess.PIPE, stdout=subprocess.PIPE, headless=True)
stdout = p.communicate(as_bytes(data))[0]
if p.wait() != 0:
raise SystemExit(p.returncode)
idx = stdout.find(OUTPUT_SENTINEL)
result = as_unicode(stdout[idx+len(OUTPUT_SENTINEL):])
return result
def compile_pyj(
data,
filename='<stdin>',
beautify=True,
private_scope=True,
libdir=None,
omit_baselib=False,
js_version=5,
):
if isinstance(data, bytes):
data = data.decode('utf-8')
options = {
'beautify':beautify,
'private_scope':private_scope,
'keep_baselib': not omit_baselib,
'filename': filename,
'js_version': js_version,
}
if not ok_to_import_webengine():
result = run_forked_compile(data, options)
else:
try:
c = compiler()
result = c(data, options)
except RuntimeError as err:
if 'Cannot use Qt in non GUI thread' in str(err):
result = run_forked_compile(data, options)
else:
raise
return result
has_external_compiler = None
def detect_external_compiler():
from calibre.utils.filenames import find_executable_in_path
rs = find_executable_in_path('rapydscript')
try:
raw = subprocess.check_output([rs, '--version'])
except Exception:
raw = b''
if raw.startswith(b'rapydscript-ng '):
ver = raw.partition(b' ')[-1]
try:
ver = tuple(map(int, ver.split(b'.')))
except Exception:
ver = (0, 0, 0)
if ver >= (0, 7, 5):
return rs
return False
def compile_fast(
data,
filename=None,
beautify=True,
private_scope=True,
libdir=None,
omit_baselib=False,
js_version=None,
):
global has_external_compiler
if has_external_compiler is None:
has_external_compiler = detect_external_compiler()
if not has_external_compiler:
return compile_pyj(data, filename or '<stdin>', beautify, private_scope, libdir, omit_baselib, js_version or 6)
args = ['--cache-dir', module_cache_dir()]
if libdir:
args += ['--import-path', libdir]
if not beautify:
args.append('--uglify')
if not private_scope:
args.append('--bare')
if omit_baselib:
args.append('--omit-baselib')
if js_version:
args.append(f'--js-version={js_version or 6}')
if not isinstance(data, bytes):
data = data.encode('utf-8')
if filename:
args.append('--filename-for-stdin'), args.append(filename)
p = subprocess.Popen([has_external_compiler, 'compile'] + args,
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
js, stderr = p.communicate(data)
if p.wait() != 0:
raise CompileFailure(force_unicode(stderr, 'utf-8'))
return js.decode('utf-8')
def base_dir():
d = os.path.dirname
return d(d(d(d(os.path.abspath(__file__)))))
def atomic_write(base, name, content):
name = os.path.join(base, name)
tname = name + '.tmp'
with open(tname, 'wb') as f:
f.write(as_bytes(content))
atomic_rename(tname, name)
def run_rapydscript_tests():
from urllib.parse import parse_qs
from qt.core import QApplication, QByteArray, QEventLoop, QUrl
from qt.webengine import QWebEnginePage, QWebEngineProfile, QWebEngineScript, QWebEngineUrlRequestJob, QWebEngineUrlSchemeHandler
from calibre.constants import FAKE_HOST, FAKE_PROTOCOL
from calibre.gui2 import must_use_qt
from calibre.gui2.viewer.web_view import send_reply
from calibre.utils.webengine import create_script, insert_scripts, secure_webengine, setup_default_profile, setup_fake_protocol, setup_profile
must_use_qt()
setup_fake_protocol()
setup_default_profile()
base = base_dir()
rapydscript_dir = os.path.join(base, 'src', 'pyj')
fname = os.path.join(rapydscript_dir, 'test.pyj')
with open(fname, 'rb') as f:
js = compile_fast(f.read(), fname)
class UrlSchemeHandler(QWebEngineUrlSchemeHandler):
def __init__(self, parent=None):
QWebEngineUrlSchemeHandler.__init__(self, parent)
self.allowed_hosts = (FAKE_HOST,)
self.registered_data = {}
def requestStarted(self, rq):
if bytes(rq.requestMethod()) != b'GET':
return self.fail_request(rq, QWebEngineUrlRequestJob.Error.RequestDenied)
url = rq.requestUrl()
host = url.host()
if host not in self.allowed_hosts:
return self.fail_request(rq)
q = parse_qs(url.query())
if not q:
return self.fail_request(rq)
mt = q.get('mime-type', ('text/plain',))[0]
data = q.get('data', ('',))[0].encode('utf-8')
send_reply(rq, mt, data)
def fail_request(self, rq, fail_code=None):
if fail_code is None:
fail_code = QWebEngineUrlRequestJob.Error.UrlNotFound
rq.fail(fail_code)
print(f"Blocking FAKE_PROTOCOL request: {rq.requestUrl().toString()}", file=sys.stderr)
class Tester(QWebEnginePage):
def __init__(self):
profile = QWebEngineProfile(QApplication.instance())
profile.setHttpUserAgent('calibre-tester')
setup_profile(profile)
insert_scripts(profile, create_script('test-rapydscript.js', js, on_subframes=False))
url_handler = UrlSchemeHandler(profile)
profile.installUrlSchemeHandler(QByteArray(FAKE_PROTOCOL.encode('ascii')), url_handler)
QWebEnginePage.__init__(self, profile, None)
self.titleChanged.connect(self.title_changed)
secure_webengine(self)
self.setHtml('<p>initialize', QUrl(f'{FAKE_PROTOCOL}://{FAKE_HOST}/index.html'))
self.working = True
def title_changed(self, title):
if title == 'initialized':
self.titleChanged.disconnect()
self.runJavaScript('window.main()', QWebEngineScript.ScriptWorldId.ApplicationWorld, self.callback)
def spin_loop(self):
while self.working:
QApplication.instance().processEvents(QEventLoop.ProcessEventsFlag.ExcludeUserInputEvents)
return self.result
def callback(self, result):
self.result = result
self.working = False
def javaScriptConsoleMessage(self, level, msg, line_num, source_id):
print(msg, file=sys.stdout if level == QWebEnginePage.JavaScriptConsoleMessageLevel.InfoMessageLevel else sys.stderr)
tester = Tester()
result = tester.spin_loop()
raise SystemExit(int(result))
def set_data(src, **kw):
for k, v in {
'__SPECIAL_TITLE__': SPECIAL_TITLE_FOR_WEBENGINE_COMMS,
'__FAKE_PROTOCOL__': FAKE_PROTOCOL,
'__FAKE_HOST__': FAKE_HOST,
'__CALIBRE_VERSION__': __version__,
'__DARK_LINK_COLOR__': dark_link_color,
'__BUILTIN_COLORS_LIGHT__': json.dumps(builtin_colors_light),
'__BUILTIN_COLORS_DARK__': json.dumps(builtin_colors_dark),
'__BUILTIN_DECORATIONS__': json.dumps(builtin_decorations)
}.items():
src = src.replace(k, v, 1)
for k, v in kw.items():
src = src.replace(k, v, 1)
return src
def compile_editor():
base = base_dir()
rapydscript_dir = os.path.join(base, 'src', 'pyj')
fname = os.path.join(rapydscript_dir, 'editor.pyj')
with open(fname, 'rb') as f:
js = set_data(compile_fast(f.read(), fname))
base = os.path.join(base, 'resources')
atomic_write(base, 'editor.js', js)
def compile_viewer():
base = base_dir()
iconf = os.path.join(base, 'imgsrc', 'srv', 'generate.py')
g = {'__file__': iconf}
exec_path(iconf, g)
icons = g['merge']()
with open(os.path.join(base, 'resources', 'content-server', 'reset.css'), 'rb') as f:
reset = f.read().decode('utf-8')
html = '<!DOCTYPE html>\n<html><head><style>{reset}</style></head><body>{icons}</body></html>'.format(
icons=icons, reset=reset)
rapydscript_dir = os.path.join(base, 'src', 'pyj')
fname = os.path.join(rapydscript_dir, 'viewer-main.pyj')
with open(fname, 'rb') as f:
js = set_data(compile_fast(f.read(), fname))
base = os.path.join(base, 'resources')
atomic_write(base, 'viewer.js', js)
atomic_write(base, 'viewer.html', html)
def compile_srv():
base = base_dir()
iconf = os.path.join(base, 'imgsrc', 'srv', 'generate.py')
g = {'__file__': iconf}
exec_path(iconf, g)
icons = g['merge']().encode('utf-8')
with open(os.path.join(base, 'resources', 'content-server', 'reset.css'), 'rb') as f:
reset = f.read()
rapydscript_dir = os.path.join(base, 'src', 'pyj')
rb = os.path.join(base, 'src', 'calibre', 'srv', 'render_book.py')
with open(rb, 'rb') as f:
rv = str(int(re.search(br'^RENDER_VERSION\s+=\s+(\d+)', f.read(), re.M).group(1)))
mathjax_version = json.loads(P('mathjax/manifest.json', data=True, allow_user_override=False))['etag']
base = os.path.join(base, 'resources', 'content-server')
fname = os.path.join(rapydscript_dir, 'srv.pyj')
with open(fname, 'rb') as f:
js = set_data(
compile_fast(f.read(), fname),
__RENDER_VERSION__=rv,
__MATHJAX_VERSION__=mathjax_version
).encode('utf-8')
with open(os.path.join(base, 'index.html'), 'rb') as f:
html = f.read().replace(b'RESET_STYLES', reset, 1).replace(b'ICONS', icons, 1).replace(b'MAIN_JS', js, 1)
atomic_write(base, 'index-generated.html', html)
# }}}
# Translations {{{
def create_pot(source_files):
c = compiler()
gettext_options = json.dumps({
'package_name': __appname__,
'package_version': __version__,
'bugs_address': 'https://bugs.launchpad.net/calibre'
})
c.eval(f'window.catalog = {{}}; window.gettext_options = {gettext_options}; 1')
for fname in source_files:
with open(fname, 'rb') as f:
code = f.read().decode('utf-8')
fname = fname
c.eval('RapydScript.gettext_parse(window.catalog, {}, {}); 1'.format(*map(json.dumps, (code, fname))))
buf = c.eval('ans = []; RapydScript.gettext_output(window.catalog, window.gettext_options, ans.push.bind(ans)); ans;')
return ''.join(buf)
def msgfmt(po_data_as_string):
c = compiler()
return c.eval('RapydScript.msgfmt({}, {})'.format(
json.dumps(po_data_as_string), json.dumps({'use_fuzzy': False})))
# }}}
| 18,105 | Python | .py | 444 | 33.128378 | 146 | 0.625818 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,144 | smartypants.py | kovidgoyal_calibre/src/calibre/utils/smartypants.py | #!/usr/bin/env python
__author__ = "Chad Miller <smartypantspy@chad.org>, Kovid Goyal <kovid at kovidgoyal.net>"
__description__ = "Smart-quotes, smart-ellipses, and smart-dashes for weblog entries in pyblosxom"
r"""
==============
smartypants.py
==============
----------------------------
SmartyPants ported to Python
----------------------------
Ported by `Chad Miller`_
Copyright (c) 2004, 2007 Chad Miller
original `SmartyPants`_ by `John Gruber`_
Copyright (c) 2003 John Gruber
Synopsis
========
A smart-quotes plugin for Pyblosxom_.
The priginal "SmartyPants" is a free web publishing plug-in for Movable Type,
Blosxom, and BBEdit that easily translates plain ASCII punctuation characters
into "smart" typographic punctuation HTML entities.
This software, *smartypants.py*, endeavours to be a functional port of
SmartyPants to Python, for use with Pyblosxom_.
Description
===========
SmartyPants can perform the following transformations:
- Straight quotes ( " and ' ) into "curly" quote HTML entities
- Backticks-style quotes (\`\`like this'') into "curly" quote HTML entities
- Dashes (``--`` and ``---``) into en- and em-dash entities
- Three consecutive dots (``...`` or ``. . .``) into an ellipsis entity
This means you can write, edit, and save your posts using plain old
ASCII straight quotes, plain dashes, and plain dots, but your published
posts (and final HTML output) will appear with smart quotes, em-dashes,
and proper ellipses.
SmartyPants does not modify characters within ``<pre>``, ``<code>``, ``<kbd>``,
``<math>`` or ``<script>`` tag blocks. Typically, these tags are used to
display text where smart quotes and other "smart punctuation" would not be
appropriate, such as source code or example markup.
Backslash Escapes
=================
If you need to use literal straight quotes (or plain hyphens and
periods), SmartyPants accepts the following backslash escape sequences
to force non-smart punctuation. It does so by transforming the escape
sequence into a decimal-encoded HTML entity:
(FIXME: table here.)
.. comment It sucks that there's a disconnect between the visual layout and table markup when special characters are involved.
.. comment ====== ===== =========
.. comment Escape Value Character
.. comment ====== ===== =========
.. comment \\\\\\\\ \ \\\\
.. comment \\\\" " "
.. comment \\\\' ' '
.. comment \\\\. . .
.. comment \\\\- - \-
.. comment \\\\` ` \`
.. comment ====== ===== =========
This is useful, for example, when you want to use straight quotes as
foot and inch marks: 6'2" tall; a 17" iMac.
Options
=======
For Pyblosxom users, the ``smartypants_attributes`` attribute is where you
specify configuration options.
Numeric values are the easiest way to configure SmartyPants' behavior:
"0"
Suppress all transformations. (Do nothing.)
"1"
Performs default SmartyPants transformations: quotes (including
\`\`backticks'' -style), em-dashes, and ellipses. "``--``" (dash dash)
is used to signify an em-dash; there is no support for en-dashes.
"2"
Same as smarty_pants="1", except that it uses the old-school typewriter
shorthand for dashes: "``--``" (dash dash) for en-dashes, "``---``"
(dash dash dash)
for em-dashes.
"3"
Same as smarty_pants="2", but inverts the shorthand for dashes:
"``--``" (dash dash) for em-dashes, and "``---``" (dash dash dash) for
en-dashes.
"-1"
Stupefy mode. Reverses the SmartyPants transformation process, turning
the HTML entities produced by SmartyPants into their ASCII equivalents.
E.g. "“" is turned into a simple double-quote ("), "—" is
turned into two dashes, etc.
The following single-character attribute values can be combined to toggle
individual transformations from within the smarty_pants attribute. For
example, to educate normal quotes and em-dashes, but not ellipses or
\`\`backticks'' -style quotes:
``py['smartypants_attributes'] = "1"``
"q"
Educates normal quote characters: (") and (').
"b"
Educates \`\`backticks'' -style double quotes.
"B"
Educates \`\`backticks'' -style double quotes and \`single' quotes.
"d"
Educates em-dashes.
"D"
Educates em-dashes and en-dashes, using old-school typewriter shorthand:
(dash dash) for en-dashes, (dash dash dash) for em-dashes.
"i"
Educates em-dashes and en-dashes, using inverted old-school typewriter
shorthand: (dash dash) for em-dashes, (dash dash dash) for en-dashes.
"e"
Educates ellipses.
"w"
Translates any instance of ``"`` into a normal double-quote character.
This should be of no interest to most people, but of particular interest
to anyone who writes their posts using Dreamweaver, as Dreamweaver
inexplicably uses this entity to represent a literal double-quote
character. SmartyPants only educates normal quotes, not entities (because
ordinarily, entities are used for the explicit purpose of representing the
specific character they represent). The "w" option must be used in
conjunction with one (or both) of the other quote options ("q" or "b").
Thus, if you wish to apply all SmartyPants transformations (quotes, en-
and em-dashes, and ellipses) and also translate ``"`` entities into
regular quotes so SmartyPants can educate them, you should pass the
following to the smarty_pants attribute:
The ``smartypants_forbidden_flavours`` list contains pyblosxom flavours for
which no Smarty Pants rendering will occur.
Caveats
=======
Why You Might Not Want to Use Smart Quotes in Your Weblog
---------------------------------------------------------
For one thing, you might not care.
Most normal, mentally stable individuals do not take notice of proper
typographic punctuation. Many design and typography nerds, however, break
out in a nasty rash when they encounter, say, a restaurant sign that uses
a straight apostrophe to spell "Joe's".
If you're the sort of person who just doesn't care, you might well want to
continue not caring. Using straight quotes -- and sticking to the 7-bit
ASCII character set in general -- is certainly a simpler way to live.
Even if you I *do* care about accurate typography, you still might want to
think twice before educating the quote characters in your weblog. One side
effect of publishing curly quote HTML entities is that it makes your
weblog a bit harder for others to quote from using copy-and-paste. What
happens is that when someone copies text from your blog, the copied text
contains the 8-bit curly quote characters (as well as the 8-bit characters
for em-dashes and ellipses, if you use these options). These characters
are not standard across different text encoding methods, which is why they
need to be encoded as HTML entities.
People copying text from your weblog, however, may not notice that you're
using curly quotes, and they'll go ahead and paste the unencoded 8-bit
characters copied from their browser into an email message or their own
weblog. When pasted as raw "smart quotes", these characters are likely to
get mangled beyond recognition.
That said, my own opinion is that any decent text editor or email client
makes it easy to stupefy smart quote characters into their 7-bit
equivalents, and I don't consider it my problem if you're using an
indecent text editor or email client.
Algorithmic Shortcomings
------------------------
One situation in which quotes will get curled the wrong way is when
apostrophes are used at the start of leading contractions. For example:
``'Twas the night before Christmas.``
In the case above, SmartyPants will turn the apostrophe into an opening
single-quote, when in fact it should be a closing one. I don't think
this problem can be solved in the general case -- every word processor
I've tried gets this wrong as well. In such cases, it's best to use the
proper HTML entity for closing single-quotes (``’``) by hand.
Bugs
====
To file bug reports or feature requests (other than topics listed in the
Caveats section above) please send email to: mailto:smartypantspy@chad.org
If the bug involves quotes being curled the wrong way, please send example
text to illustrate.
To Do list
----------
- Provide a function for use within templates to quote anything at all.
Version History
===============
1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400
- Fixed bug where blocks of precious unalterable text was instead
interpreted. Thanks to Le Roux and Dirk van Oosterbosch.
1.5_1.5: Sat, 13 Aug 2005 15:50:24 -0400
- Fix bogus magical quotation when there is no hint that the
user wants it, e.g., in "21st century". Thanks to Nathan Hamblen.
- Be smarter about quotes before terminating numbers in an en-dash'ed
range.
1.5_1.4: Thu, 10 Feb 2005 20:24:36 -0500
- Fix a date-processing bug, as reported by jacob childress.
- Begin a test-suite for ensuring correct output.
- Removed import of "string", since I didn't really need it.
(This was my first every Python program. Sue me!)
1.5_1.3: Wed, 15 Sep 2004 18:25:58 -0400
- Abort processing if the flavour is in forbidden-list. Default of
[ "rss" ] (Idea of Wolfgang SCHNERRING.)
- Remove stray virgules from en-dashes. Patch by Wolfgang SCHNERRING.
1.5_1.2: Mon, 24 May 2004 08:14:54 -0400
- Some single quotes weren't replaced properly. Diff-tesuji played
by Benjamin GEIGER.
1.5_1.1: Sun, 14 Mar 2004 14:38:28 -0500
- Support upcoming pyblosxom 0.9 plugin verification feature.
1.5_1.0: Tue, 09 Mar 2004 08:08:35 -0500
- Initial release
Version Information
-------------------
Version numbers will track the SmartyPants_ version numbers, with the addition
of an underscore and the smartypants.py version on the end.
New versions will be available at `http://wiki.chad.org/SmartyPantsPy`_
.. _http://wiki.chad.org/SmartyPantsPy: http://wiki.chad.org/SmartyPantsPy
Authors
=======
`John Gruber`_ did all of the hard work of writing this software in Perl for
`Movable Type`_ and almost all of this useful documentation. `Chad Miller`_
ported it to Python to use with Pyblosxom_.
Additional Credits
==================
Portions of the SmartyPants original work are based on Brad Choate's nifty
MTRegex plug-in. `Brad Choate`_ also contributed a few bits of source code to
this plug-in. Brad Choate is a fine hacker indeed.
`Jeremy Hedley`_ and `Charles Wiltgen`_ deserve mention for exemplary beta
testing of the original SmartyPants.
`Rael Dornfest`_ ported SmartyPants to Blosxom.
.. _Brad Choate: http://bradchoate.com/
.. _Jeremy Hedley: http://antipixel.com/
.. _Charles Wiltgen: http://playbacktime.com/
.. _Rael Dornfest: http://raelity.org/
Copyright and License
=====================
SmartyPants_ license::
Copyright (c) 2003 John Gruber
(https://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license::
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _John Gruber: https://daringfireball.net/
.. _Chad Miller: http://web.chad.org/
.. _Pyblosxom: http://roughingit.subtlehints.net/pyblosxom
.. _SmartyPants: https://daringfireball.net/projects/smartypants/
.. _Movable Type: http://www.movabletype.org/
"""
import re
# style added by Kovid
tags_to_skip_regex = re.compile(r"<(/)?(style|pre|code|kbd|script|math)[^>]*>", re.I)
self_closing_regex = re.compile(r'/\s*>$')
# internal functions below here
def parse_attr(attr):
do_dashes = do_backticks = do_quotes = do_ellipses = do_stupefy = 0
if attr == "1":
do_quotes = 1
do_backticks = 1
do_dashes = 1
do_ellipses = 1
elif attr == "2":
# Do everything, turn all options on, use old school dash shorthand.
do_quotes = 1
do_backticks = 1
do_dashes = 2
do_ellipses = 1
elif attr == "3":
# Do everything, turn all options on, use inverted old school dash shorthand.
do_quotes = 1
do_backticks = 1
do_dashes = 3
do_ellipses = 1
elif attr == "-1":
# Special "stupefy" mode.
do_stupefy = 1
else:
for c in attr:
if c == "q":
do_quotes = 1
elif c == "b":
do_backticks = 1
elif c == "B":
do_backticks = 2
elif c == "d":
do_dashes = 1
elif c == "D":
do_dashes = 2
elif c == "i":
do_dashes = 3
elif c == "e":
do_ellipses = 1
else:
pass
# ignore unknown option
return do_dashes, do_backticks, do_quotes, do_ellipses, do_stupefy
def smartyPants(text, attr='1'):
# Parse attributes:
# 0 : do nothing
# 1 : set all
# 2 : set all, using old school en- and em- dash shortcuts
# 3 : set all, using inverted old school en and em- dash shortcuts
#
# q : quotes
# b : backtick quotes (``double'' only)
# B : backtick quotes (``double'' and `single')
# d : dashes
# D : old school dashes
# i : inverted old school dashes
# e : ellipses
if attr == "0":
# Do nothing.
return text
do_dashes, do_backticks, do_quotes, do_ellipses, do_stupefy = parse_attr(attr)
dashes_func = {1: educateDashes, 2: educateDashesOldSchool, 3: educateDashesOldSchoolInverted}.get(do_dashes, lambda x: x)
backticks_func = {1: educateBackticks, 2: lambda x: educateSingleBackticks(educateBackticks(x))}.get(do_backticks, lambda x: x)
ellipses_func = {1: educateEllipses}.get(do_ellipses, lambda x: x)
stupefy_func = {1: stupefyEntities}.get(do_stupefy, lambda x: x)
skipped_tag_stack = []
tokens = _tokenize(text)
result = []
in_pre = False
prev_token_last_char = ""
# This is a cheat, used to get some context
# for one-character tokens that consist of
# just a quote char. What we do is remember
# the last character of the previous text
# token, to use as context to curl single-
# character quote tokens correctly.
for cur_token in tokens:
if cur_token[0] == "tag":
# Don't mess with quotes inside some tags. This does not handle self <closing/> tags!
result.append(cur_token[1])
skip_match = tags_to_skip_regex.match(cur_token[1])
if skip_match is not None:
is_self_closing = self_closing_regex.search(skip_match.group()) is not None
if not is_self_closing:
if not skip_match.group(1):
skipped_tag_stack.append(skip_match.group(2).lower())
in_pre = True
else:
if len(skipped_tag_stack) > 0:
if skip_match.group(2).lower() == skipped_tag_stack[-1]:
skipped_tag_stack.pop()
else:
pass
# This close doesn't match the open. This isn't XHTML. We should barf here.
if len(skipped_tag_stack) == 0:
in_pre = False
else:
t = cur_token[1]
last_char = t[-1:] # Remember last char of this token before processing.
if not in_pre:
t = processEscapes(t)
t = re.sub('"', '"', t)
t = dashes_func(t)
t = ellipses_func(t)
# Note: backticks need to be processed before quotes.
t = backticks_func(t)
if do_quotes != 0:
if t == "'":
# Special case: single-character ' token
if re.match(r"\S", prev_token_last_char):
t = "’"
else:
t = "‘"
elif t == '"':
# Special case: single-character " token
if re.match(r"\S", prev_token_last_char):
t = "”"
else:
t = "“"
else:
# Normal case:
t = educateQuotes(t)
t = stupefy_func(t)
prev_token_last_char = last_char
result.append(t)
return "".join(result)
def educateQuotes(text):
"""
Parameter: String.
Returns: The string, with "educated" curly quote HTML entities.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?”
"""
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
text = re.sub(fr"""^'(?={punct_class}\\B)""", r"""’""", text)
text = re.sub(fr"""^"(?={punct_class}\\B)""", r"""”""", text)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
text = re.sub(r""""'(?=\w)""", """“‘""", text)
text = re.sub(r"""'"(?=\w)""", """‘“""", text)
text = re.sub(r'''""(?=\w)''', """““""", text)
text = re.sub(r"""''(?=\w)""", """‘‘""", text)
text = re.sub(r'''\"\'''', """”’""", text)
text = re.sub(r'''\'\"''', """’”""", text)
text = re.sub(r'''""''', """””""", text)
text = re.sub(r"""''""", """’’""", text)
# Special case for decade abbreviations (the '80s --> ’80s):
# See http://practicaltypography.com/apostrophes.html
text = re.sub(r"""(\W|^)'(?=\d{2}s)""", r"""\1’""", text)
# Measurements in feet and inches or longitude/latitude: 19' 43.5" --> 19′ 43.5″
text = re.sub(r'''(\W|^)([-0-9.]+\s*)'(\s*[-0-9.]+)"''', r'\1\2′\3″', text)
# Special case for Quotes at inside of other entities, e.g.:
# <p>A double quote--"within dashes"--would be nice.</p>
text = re.sub(r"""(?<=\W)"(?=\w)""", r"""“""", text)
text = re.sub(r"""(?<=\W)'(?=\w)""", r"""‘""", text)
text = re.sub(r"""(?<=\w)"(?=\W)""", r"""”""", text)
text = re.sub(r"""(?<=\w)'(?=\W)""", r"""’""", text)
# The following are commented out as smartypants tokenizes text by
# stripping out html tags. Therefore, there is no guarantee that the
# start-of-line and end-ol-line regex operators will match anything
# meaningful
# Special case for Quotes at end of line with a preceding space (may change just to end of line)
# text = re.sub(r"""(?<=\s)"$""", r"""”""", text)
# text = re.sub(r"""(?<=\s)'$""", r"""’""", text)
# Special case for Quotes at beginning of line with a space - multiparagraph quoted text:
# text = re.sub(r"""^"(?=\s)""", r"""“""", text)
# text = re.sub(r"""^'(?=\s)""", r"""‘""", text)
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
{} | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""".format(dec_dashes), re.VERBOSE)
text = opening_single_quotes_regex.sub(r"""\1‘""", text)
closing_single_quotes_regex = re.compile(r"""
({})
'
(?!\s | s\b | \d)
""".format(close_class), re.VERBOSE)
text = closing_single_quotes_regex.sub(r"""\1’""", text)
closing_single_quotes_regex = re.compile(r"""
({})
'
(\s | s\b)
""".format(close_class), re.VERBOSE)
text = closing_single_quotes_regex.sub(r"""\1’\2""", text)
# Any remaining single quotes should be opening ones:
text = re.sub(r"""'""", r"""‘""", text)
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
{} | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""".format(dec_dashes), re.VERBOSE)
text = opening_double_quotes_regex.sub(r"""\1“""", text)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#({})? # character that indicates the quote should be closing
"
(?=\s)
""".format(close_class), re.VERBOSE)
text = closing_double_quotes_regex.sub(r"""”""", text)
closing_double_quotes_regex = re.compile(r"""
({}) # character that indicates the quote should be closing
"
""".format(close_class), re.VERBOSE)
text = closing_double_quotes_regex.sub(r"""\1”""", text)
if text.endswith('-"'):
# A string that endswith -" is sometimes used for dialogue
text = text[:-1] + '”'
# Any remaining quotes should be opening ones.
text = re.sub(r'"', r"""“""", text)
return text
def educateBackticks(text):
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?”
"""
text = re.sub(r"""``""", r"""“""", text)
text = re.sub(r"""''""", r"""”""", text)
return text
def educateSingleBackticks(text):
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
text = re.sub(r"""`""", r"""‘""", text)
text = re.sub(r"""'""", r"""’""", text)
return text
def educateDashes(text):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity.
"""
text = re.sub(r"""---""", r"""–""", text) # en (yes, backwards)
text = re.sub(r"""--""", r"""—""", text) # em (yes, backwards)
return text
def educateDashesOldSchool(text):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an en-dash HTML entity, and each "---" translated to
an em-dash HTML entity.
"""
text = re.sub(r"""---""", r"""—""", text) # em (yes, backwards)
text = re.sub(r"""--""", r"""–""", text) # en (yes, backwards)
return text
def educateDashesOldSchoolInverted(text):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity, and each "---" translated to
an en-dash HTML entity. Two reasons why: First, unlike the
en- and em-dash syntax supported by
EducateDashesOldSchool(), it's compatible with existing
entries written before SmartyPants 1.1, back when "--" was
only used for em-dashes. Second, em-dashes are more
common than en-dashes, and so it sort of makes sense that
the shortcut should be shorter to type. (Thanks to Aaron
Swartz for the idea.)
"""
text = re.sub(r"""---""", r"""–""", text) # em
text = re.sub(r"""--""", r"""—""", text) # en
return text
def educateEllipses(text):
"""
Parameter: String.
Returns: The string, with each instance of "..." translated to
an ellipsis HTML entity.
Example input: Huh...?
Example output: Huh…?
"""
text = re.sub(r"""\.\.\.""", r"""…""", text)
text = re.sub(r"""\. \. \.""", r"""…""", text)
return text
def stupefyEntities(text):
"""
Parameter: String.
Returns: The string, with each SmartyPants HTML entity translated to
its ASCII counterpart.
Example input: “Hello — world.”
Example output: "Hello -- world."
"""
text = re.sub(r"""–""", r"""-""", text) # en-dash
text = re.sub(r"""—""", r"""--""", text) # em-dash
text = re.sub(r"""‘""", r"""'""", text) # open single quote
text = re.sub(r"""’""", r"""'""", text) # close single quote
text = re.sub(r"""“""", r'''"''', text) # open double quote
text = re.sub(r"""”""", r'''"''', text) # close double quote
text = re.sub(r"""…""", r"""...""", text) # ellipsis
return text
def processEscapes(text):
r"""
Parameter: String.
Returns: The string, with after processing the following backslash
escape sequences. This is useful if you want to force a "dumb"
quote or other character to appear.
Escape Value
------ -----
\\ \
\" "
\' '
\. .
\- -
\` `
"""
text = re.sub(r"""\\\\""", r"""\""", text)
text = re.sub(r'''\\"''', r""""""", text)
text = re.sub(r"""\\'""", r"""'""", text)
text = re.sub(r"""\\\.""", r""".""", text)
text = re.sub(r"""\\-""", r"""-""", text)
text = re.sub(r"""\\`""", r"""`""", text)
return text
def _tokenize(html):
"""
Parameter: String containing HTML markup.
Returns: Reference to an array of the tokens comprising the input
string. Each token is either a tag (possibly with nested,
tags contained therein, such as <a href="<MTFoo>">, or a
run of text between tags. Each element of the array is a
two-element array; the first is either 'tag' or 'text';
the second is the actual value.
Based on the _tokenize() subroutine from Brad Choate's MTRegex plugin.
<http://www.bradchoate.com/past/mtregex.php>
"""
tokens = []
# depth = 6
# nested_tags = "|".join(['(?:<(?:[^<>]',] * depth) + (')*>)' * depth)
# match = r"""(?: <! ( -- .*? -- \s* )+ > ) | # comments
# (?: <\? .*? \?> ) | # directives
# %s # nested tags """ % (nested_tags,)
tag_soup = re.compile(r"""([^<]*)(<[^>]*>)""")
token_match = tag_soup.search(html)
previous_end = 0
while token_match is not None:
if token_match.group(1):
tokens.append(['text', token_match.group(1)])
tokens.append(['tag', token_match.group(2)])
previous_end = token_match.end()
token_match = tag_soup.search(html, token_match.end())
if previous_end < len(html):
tokens.append(['text', html[previous_end:]])
return tokens
def run_tests(return_tests=False):
import unittest
sp = smartyPants
class TestSmartypantsAllAttributes(unittest.TestCase):
# the default attribute is "1", which means "all".
def test_dates(self):
self.assertEqual(sp("one two '60s"), "one two ’60s")
self.assertEqual(sp("1440-80's"), "1440-80’s")
self.assertEqual(sp("1440-'80s"), "1440-’80s")
self.assertEqual(sp("1440---'80s"), "1440–’80s")
self.assertEqual(sp("1960s"), "1960s") # no effect.
self.assertEqual(sp("1960's"), "1960’s")
self.assertEqual(sp("one two '60s"), "one two ’60s")
self.assertEqual(sp("'60s"), "’60s")
def test_measurements(self):
ae = self.assertEqual
ae(sp("one two 1.1'2.2\""), "one two 1.1′2.2″")
ae(sp("1' 2\""), "1′ 2″")
def test_skip_tags(self):
self.assertEqual(
sp("""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>"""), # noqa
"""<script type="text/javascript">\n<!--\nvar href = "http://www.google.com";\nvar linktext = "google";\ndocument.write('<a href="' + href + '">' + linktext + "</a>");\n//-->\n</script>""") # noqa
self.assertEqual(
sp("""<p>He said "Let's write some code." This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>"""),
"""<p>He said “Let’s write some code.” This code here <code>if True:\n\tprint "Okay"</code> is python code.</p>""") # noqa
self.assertEqual(
sp('''<script/><p>It's ok</p>'''),
'''<script/><p>It’s ok</p>''')
def test_ordinal_numbers(self):
self.assertEqual(sp("21st century"), "21st century") # no effect.
self.assertEqual(sp("3rd"), "3rd") # no effect.
def test_educated_quotes(self):
self.assertEqual(sp('''"Isn't this fun?"'''), '''“Isn’t this fun?”''')
self.assertEqual(sp("'abc'"), '‘abc’')
tests = unittest.defaultTestLoader.loadTestsFromTestCase(TestSmartypantsAllAttributes)
if return_tests:
return tests
unittest.TextTestRunner(verbosity=4).run(tests)
if __name__ == "__main__":
run_tests()
| 33,189 | Python | .py | 685 | 41.233577 | 217 | 0.610033 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,145 | search_query_parser.py | kovidgoyal_calibre/src/calibre/utils/search_query_parser.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
A parser for search queries with a syntax very similar to that used by
the Google search engine.
For details on the search query syntax see :class:`SearchQueryParser`.
To use the parser, subclass :class:`SearchQueryParser` and implement the
methods :method:`SearchQueryParser.universal_set` and
:method:`SearchQueryParser.get_matches`. See for example :class:`Tester`.
If this module is run, it will perform a series of unit tests.
'''
import re
import weakref
from calibre import prints
from calibre.constants import preferred_encoding
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import sort_key
from calibre.utils.localization import _
from polyglot.binary import as_hex_unicode, from_hex_unicode
from polyglot.builtins import codepoint_to_chr
'''
This class manages access to the preference holding the saved search queries.
It exists to ensure that unicode is used throughout, and also to permit
adding other fields, such as whether the search is a 'favorite'
'''
class SavedSearchQueries:
queries = {}
opt_name = ''
def __init__(self, db, _opt_name):
self.opt_name = _opt_name
if db is not None:
db = db.new_api
self._db = weakref.ref(db)
self.queries = db.pref(self.opt_name, {})
else:
self.queries = {}
self._db = lambda : None
@property
def db(self):
return self._db()
def save_queries(self):
db = self.db
if db is not None:
db.set_pref(self.opt_name, self.queries)
def force_unicode(self, x):
if not isinstance(x, str):
x = x.decode(preferred_encoding, 'replace')
return x
def add(self, name, value):
self.queries[self.force_unicode(name)] = self.force_unicode(value).strip()
self.save_queries()
def lookup(self, name):
sn = self.force_unicode(name).lower()
for n, q in self.queries.items():
if sn == n.lower():
return q
return None
def delete(self, name):
self.queries.pop(self.force_unicode(name), False)
self.save_queries()
def rename(self, old_name, new_name):
self.queries[self.force_unicode(new_name)] = \
self.queries.get(self.force_unicode(old_name), None)
self.queries.pop(self.force_unicode(old_name), False)
self.save_queries()
def set_all(self, smap):
self.queries = smap
self.save_queries()
def names(self):
return sorted(self.queries.keys(),key=sort_key)
'''
Create a global instance of the saved searches. It is global so that the searches
are common across all instances of the parser (devices, library, etc).
'''
ss = SavedSearchQueries(None, None)
def set_saved_searches(db, opt_name):
global ss
ss = SavedSearchQueries(db, opt_name)
def saved_searches():
global ss
return ss
def global_lookup_saved_search(name):
return ss.lookup(name)
'''
Parse a search expression into a series of potentially recursive operations.
Note that the interpreter wants binary operators, not n-ary ops. This is why we
recurse instead of iterating when building sequences of the same op.
The syntax is more than a bit twisted. In particular, the handling of colons
in the base token requires semantic analysis.
Also note that the query string is lowercased before analysis. This is OK because
calibre's searches are all case-insensitive.
Grammar:
prog ::= or_expression
or_expression ::= and_expression [ 'or' or_expression ]
and_expression ::= not_expression [ [ 'and' ] and_expression ]
not_expression ::= [ 'not' ] location_expression
location_expression ::= base_token | ( '(' or_expression ')' )
base_token ::= a sequence of letters and colons, perhaps quoted
'''
class Parser:
def __init__(self):
self.current_token = 0
self.tokens = None
OPCODE = 1
WORD = 2
QUOTED_WORD = 3
EOF = 4
REPLACEMENTS = tuple(('\\' + x, codepoint_to_chr(i + 1)) for i, x in enumerate('\\"()'))
# the sep must be a printable character sequence that won't actually appear naturally
docstring_sep = '□ༀ؆' # Unicode white square, Tibetian Om, Arabic-Indic Cube Root
# Had to translate named constants to numeric values
lex_scanner = re.Scanner([
(r'[()]', lambda x,t: (Parser.OPCODE, t)),
(r'@.+?:[^")\s]+', lambda x,t: (Parser.WORD, str(t))),
(r'[^"()\s]+', lambda x,t: (Parser.WORD, str(t))),
(r'".*?((?<!\\)")', lambda x,t: (Parser.QUOTED_WORD, t[1:-1])),
(r'\s+', None)
], flags=re.DOTALL)
def token(self, advance=False):
if self.is_eof():
return None
res = self.tokens[self.current_token][1]
if advance:
self.current_token += 1
return res
def lcase_token(self, advance=False):
if self.is_eof():
return None
res = self.tokens[self.current_token][1]
if advance:
self.current_token += 1
return icu_lower(res)
def token_type(self):
if self.is_eof():
return self.EOF
return self.tokens[self.current_token][0]
def is_eof(self):
return self.current_token >= len(self.tokens)
def advance(self):
self.current_token += 1
def tokenize(self, expr):
# convert docstrings to base64 to avoid all processing. Change the docstring
# indicator to something unique with no characters special to the parser.
expr = re.sub('(""")(..*?)(""")',
lambda mo: self.docstring_sep + as_hex_unicode(mo.group(2)) + self.docstring_sep,
expr, flags=re.DOTALL)
# Strip out escaped backslashes, quotes and parens so that the
# lex scanner doesn't get confused. We put them back later.
for k, v in self.REPLACEMENTS:
expr = expr.replace(k, v)
tokens = self.lex_scanner.scan(expr)[0]
def unescape(x):
# recover the docstrings
x = re.sub(f'({self.docstring_sep})(..*?)({self.docstring_sep})',
lambda mo: from_hex_unicode(mo.group(2)), x)
for k, v in self.REPLACEMENTS:
x = x.replace(v, k[1:])
return x
return [(tt, unescape(tv)) for tt, tv in tokens]
def parse(self, expr, locations):
self.locations = locations
self.tokens = self.tokenize(expr)
self.current_token = 0
prog = self.or_expression()
if not self.is_eof():
raise ParseException(_('Extra characters at end of search'))
return prog
def or_expression(self):
lhs = self.and_expression()
if self.lcase_token() == 'or':
self.advance()
return ['or', lhs, self.or_expression()]
return lhs
def and_expression(self):
lhs = self.not_expression()
if self.lcase_token() == 'and':
self.advance()
return ['and', lhs, self.and_expression()]
# Account for the optional 'and'
if ((self.token_type() in [self.WORD, self.QUOTED_WORD] or self.token() == '(') and self.lcase_token() != 'or'):
return ['and', lhs, self.and_expression()]
return lhs
def not_expression(self):
if self.lcase_token() == 'not':
self.advance()
return ['not', self.not_expression()]
return self.location_expression()
def location_expression(self):
if self.token_type() == self.OPCODE and self.token() == '(':
self.advance()
res = self.or_expression()
if self.token_type() != self.OPCODE or self.token(advance=True) != ')':
raise ParseException(_('missing )'))
return res
if self.token_type() not in (self.WORD, self.QUOTED_WORD):
raise ParseException(_('Invalid syntax. Expected a lookup name or a word'))
return self.base_token()
def base_token(self):
if self.token_type() == self.QUOTED_WORD:
return ['token', 'all', self.token(advance=True)]
words = self.token(advance=True).split(':')
# The complexity here comes from having colon-separated search
# values. That forces us to check that the first "word" in a colon-
# separated group is a valid location. If not, then the token must
# be reconstructed. We also have the problem that locations can be
# followed by quoted strings that appear as the next token. and that
# tokens can be a sequence of colons.
# We have a location if there is more than one word and the first
# word is in locations. This check could produce a "wrong" answer if
# the search string is something like 'author: "foo"' because it
# will be interpreted as 'author:"foo"'. I am choosing to accept the
# possible error. The expression should be written '"author:" foo'
if len(words) > 1 and words[0].lower() in self.locations:
loc = words[0].lower()
words = words[1:]
if len(words) == 1 and self.token_type() == self.QUOTED_WORD:
return ['token', loc, self.token(advance=True)]
return ['token', icu_lower(loc), ':'.join(words)]
return ['token', 'all', ':'.join(words)]
class ParseException(Exception):
@property
def msg(self):
if len(self.args) > 0:
return self.args[0]
return ""
class SearchQueryParser:
'''
Parses a search query.
A search query consists of tokens. The tokens can be combined using
the `or`, `and` and `not` operators as well as grouped using parentheses.
When no operator is specified between two tokens, `and` is assumed.
Each token is a string of the form `location:query`. `location` is a string
from :member:`DEFAULT_LOCATIONS`. It is optional. If it is omitted, it is assumed to
be `all`. `query` is an arbitrary string that must not contain parentheses.
If it contains whitespace, it should be quoted by enclosing it in `"` marks.
Examples::
* `Asimov` [search for the string "Asimov" in location `all`]
* `comments:"This is a good book"` [search for "This is a good book" in `comments`]
* `author:Asimov tag:unread` [search for books by Asimov that have been tagged as unread]
* `author:Asimov or author:Hardy` [search for books by Asimov or Hardy]
* `(author:Asimov or author:Hardy) and not tag:read` [search for unread books by Asimov or Hardy]
'''
@staticmethod
def run_tests(parser, result, tests):
failed = []
for test in tests:
prints('\tTesting:', test[0], end=' ')
res = parser.parseString(test[0])
if list(res.get(result, None)) == test[1]:
print('OK')
else:
print('FAILED:', 'Expected:', test[1], 'Got:', list(res.get(result, None)))
failed.append(test[0])
return failed
def __init__(self, locations, test=False, optimize=False, lookup_saved_search=None, parse_cache=None):
self.sqp_initialize(locations, test=test, optimize=optimize)
self.parser = Parser()
self.lookup_saved_search = global_lookup_saved_search if lookup_saved_search is None else lookup_saved_search
self.sqp_parse_cache = parse_cache
def sqp_change_locations(self, locations):
self.sqp_initialize(locations, optimize=self.optimize)
if self.sqp_parse_cache is not None:
self.sqp_parse_cache.clear()
def sqp_initialize(self, locations, test=False, optimize=False):
self.locations = locations
self._tests_failed = False
self.optimize = optimize
def get_queried_fields(self, query):
# empty the list of searches used for recursion testing
self.searches_seen = set()
tree = self._get_tree(query)
yield from self._walk_expr(tree)
def _walk_expr(self, tree):
if tree[0] in ('or', 'and'):
yield from self._walk_expr(tree[1])
yield from self._walk_expr(tree[2])
elif tree[0] == 'not':
yield from self._walk_expr(tree[1])
else:
if tree[1] == 'search':
query, search_name_lower = self._check_saved_search_recursion(tree[2])
yield from self._walk_expr(self._get_tree(query))
self.searches_seen.discard(search_name_lower)
else:
yield tree[1], tree[2]
def parse(self, query, candidates=None):
# empty the list of searches used for recursion testing
self.searches_seen = set()
candidates = self.universal_set()
return self._parse(query, candidates=candidates)
def _get_tree(self, query):
try:
res = self.sqp_parse_cache.get(query, None)
except AttributeError:
res = None
if res is not None:
return res
try:
res = self.parser.parse(query, self.locations)
except RuntimeError:
raise ParseException(_('Failed to parse query, recursion limit reached: %s')%repr(query))
if self.sqp_parse_cache is not None:
self.sqp_parse_cache[query] = res
return res
# this parse is used internally because it doesn't clear the
# recursive search test list.
def _parse(self, query, candidates=None):
tree = self._get_tree(query)
if candidates is None:
candidates = self.universal_set()
t = self.evaluate(tree, candidates)
return t
def method(self, group_name):
return getattr(self, 'evaluate_'+group_name)
def evaluate(self, parse_result, candidates):
return self.method(parse_result[0])(parse_result[1:], candidates)
def evaluate_and(self, argument, candidates):
# RHS checks only those items matched by LHS
# returns result of RHS check: RHmatches(LHmatches(c))
# return self.evaluate(argument[0]).intersection(self.evaluate(argument[1]))
l = self.evaluate(argument[0], candidates)
return l.intersection(self.evaluate(argument[1], l))
def evaluate_or(self, argument, candidates):
# RHS checks only those elements not matched by LHS
# returns LHS union RHS: LHmatches(c) + RHmatches(c-LHmatches(c))
# return self.evaluate(argument[0]).union(self.evaluate(argument[1]))
l = self.evaluate(argument[0], candidates)
return l.union(self.evaluate(argument[1], candidates.difference(l)))
def evaluate_not(self, argument, candidates):
# unary op checks only candidates. Result: list of items matching
# returns: c - matches(c)
# return self.universal_set().difference(self.evaluate(argument[0]))
return candidates.difference(self.evaluate(argument[0], candidates))
# def evaluate_parenthesis(self, argument, candidates):
# return self.evaluate(argument[0], candidates)
def _check_saved_search_recursion(self, query):
if query.startswith('='):
query = query[1:]
search_name_lower = query.lower()
if search_name_lower in self.searches_seen:
raise ParseException(_('Recursive saved search: {0}').format(query))
self.searches_seen.add(search_name_lower)
query = self._get_saved_search_text(query)
return (query, search_name_lower)
def _get_saved_search_text(self, query):
try:
ss = self.lookup_saved_search(query)
if ss is None:
raise ParseException(_('Unknown saved search: {}').format(query))
return ss
except ParseException as e:
raise e
except: # convert all exceptions (e.g., missing key) to a parse error
import traceback
traceback.print_exc()
raise ParseException(_('Unknown error in saved search: {0}').format(query))
def evaluate_token(self, argument, candidates):
location = argument[0]
query = argument[1]
if location.lower() == 'search':
query, search_name_lower = self._check_saved_search_recursion(query)
result = self._parse(query, candidates)
self.searches_seen.discard(search_name_lower)
return result
return self._get_matches(location, query, candidates)
def _get_matches(self, location, query, candidates):
if self.optimize:
return self.get_matches(location, query, candidates=candidates)
else:
return self.get_matches(location, query)
def get_matches(self, location, query, candidates=None):
'''
Should return the set of matches for :param:'location` and :param:`query`.
The search must be performed over all entries if :param:`candidates` is
None otherwise only over the items in candidates.
:param:`location` is one of the items in :member:`SearchQueryParser.DEFAULT_LOCATIONS`.
:param:`query` is a string literal.
:return: None or a subset of the set returned by :meth:`universal_set`.
'''
return set()
def universal_set(self):
'''
Should return the set of all matches.
'''
return set()
| 17,534 | Python | .py | 387 | 36.904393 | 120 | 0.631814 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,146 | browser.py | kovidgoyal_calibre/src/calibre/utils/browser.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2010, Kovid Goyal <kovid at kovidgoyal.net>
import copy
import ssl
from mechanize import Browser as B
from mechanize import HTTPSHandler
from polyglot import http_client
from polyglot.http_cookie import CookieJar
class ModernHTTPSHandler(HTTPSHandler):
ssl_context = None
def https_open(self, req):
if self.client_cert_manager is not None:
key_file, cert_file = self.client_cert_manager.find_key_cert(
req.get_full_url())
if cert_file:
self.ssl_context.load_cert_chain(cert_file, key_file)
def conn_factory(hostport, **kw):
kw['context'] = self.ssl_context
return http_client.HTTPSConnection(hostport, **kw)
return self.do_open(conn_factory, req)
class Browser(B):
'''
A cloneable mechanize browser. Useful for multithreading. The idea is that
each thread has a browser clone. Every clone uses the same thread safe
cookie jar. All clones share the same browser configuration.
Also adds support for fine-tuning SSL verification via an SSL context object.
'''
handler_classes = B.handler_classes.copy()
handler_classes['https'] = ModernHTTPSHandler
def __init__(self, *args, **kwargs):
self._clone_actions = {}
sc = kwargs.pop('ssl_context', None)
if sc is None:
sc = ssl.create_default_context() if kwargs.pop('verify_ssl', True) else ssl._create_unverified_context(cert_reqs=ssl.CERT_NONE)
B.__init__(self, *args, **kwargs)
self.set_cookiejar(CookieJar())
self._ua_handlers['https'].ssl_context = sc
@property
def https_handler(self):
return self._ua_handlers['https']
def set_current_header(self, header, value=None):
found = False
q = header.lower()
remove = []
for i, (k, v) in enumerate(tuple(self.addheaders)):
if k.lower() == q:
if value:
self.addheaders[i] = (header, value)
found = True
else:
remove.append(i)
if not found:
self.addheaders.append((header, value))
if remove:
for i in reversed(remove):
del self.addheaders[i]
def current_user_agent(self):
for k, v in self.addheaders:
if k.lower() == 'user-agent':
return v
def set_user_agent(self, newval):
self.set_current_header('User-agent', newval)
def set_handle_refresh(self, *args, **kwargs):
B.set_handle_refresh(self, *args, **kwargs)
self._clone_actions['set_handle_refresh'] = ('set_handle_refresh',
args, kwargs)
def set_cookiejar(self, *args, **kwargs):
B.set_cookiejar(self, *args, **kwargs)
self._clone_actions['set_cookiejar'] = ('set_cookiejar', args, kwargs)
def set_cookie(self, name, value, domain, path='/'):
return self.set_simple_cookie(name, value, domain, path=path)
@property
def cookiejar(self):
return self._clone_actions['set_cookiejar'][1][0]
def set_handle_redirect(self, *args, **kwargs):
B.set_handle_redirect(self, *args, **kwargs)
self._clone_actions['set_handle_redirect'] = ('set_handle_redirect',
args, kwargs)
def set_handle_equiv(self, *args, **kwargs):
B.set_handle_equiv(self, *args, **kwargs)
self._clone_actions['set_handle_equiv'] = ('set_handle_equiv',
args, kwargs)
def set_handle_gzip(self, *args, **kwargs):
B.set_handle_gzip(self, *args, **kwargs)
self._clone_actions['set_handle_gzip'] = ('set_handle_gzip',
args, kwargs)
def set_debug_redirects(self, *args, **kwargs):
B.set_debug_redirects(self, *args, **kwargs)
self._clone_actions['set_debug_redirects'] = ('set_debug_redirects',
args, kwargs)
def set_debug_responses(self, *args, **kwargs):
B.set_debug_responses(self, *args, **kwargs)
self._clone_actions['set_debug_responses'] = ('set_debug_responses',
args, kwargs)
def set_debug_http(self, *args, **kwargs):
B.set_debug_http(self, *args, **kwargs)
self._clone_actions['set_debug_http'] = ('set_debug_http',
args, kwargs)
def set_handle_robots(self, *args, **kwargs):
B.set_handle_robots(self, *args, **kwargs)
self._clone_actions['set_handle_robots'] = ('set_handle_robots',
args, kwargs)
def set_proxies(self, *args, **kwargs):
B.set_proxies(self, *args, **kwargs)
self._clone_actions['set_proxies'] = ('set_proxies', args, kwargs)
def add_password(self, *args, **kwargs):
B.add_password(self, *args, **kwargs)
self._clone_actions['add_password'] = ('add_password', args, kwargs)
def add_proxy_password(self, *args, **kwargs):
B.add_proxy_password(self, *args, **kwargs)
self._clone_actions['add_proxy_password'] = ('add_proxy_password', args, kwargs)
def clone_browser(self):
clone = self.__class__()
clone.https_handler.ssl_context = self.https_handler.ssl_context
clone.addheaders = copy.deepcopy(self.addheaders)
for func, args, kwargs in self._clone_actions.values():
func = getattr(clone, func)
func(*args, **kwargs)
return clone
if __name__ == '__main__':
from pprint import pprint
from calibre import browser
orig = browser()
clone = orig.clone_browser()
pprint(orig._ua_handlers)
pprint(clone._ua_handlers)
assert orig._ua_handlers.keys() == clone._ua_handlers.keys()
assert orig._ua_handlers['_cookies'].cookiejar is \
clone._ua_handlers['_cookies'].cookiejar
assert orig.addheaders == clone.addheaders
| 5,903 | Python | .py | 130 | 36.553846 | 140 | 0.618898 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,147 | formatter_functions.py | kovidgoyal_calibre/src/calibre/utils/formatter_functions.py | #!/usr/bin/env python
'''
Created on 13 Jan 2011
@author: charles
'''
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import inspect
import numbers
import posixpath
import re
import traceback
from contextlib import suppress
from datetime import datetime, timedelta
from enum import Enum, auto
from functools import partial
from math import ceil, floor, modf, trunc
from lxml import html
from calibre import human_readable, prepare_string_for_xml, prints
from calibre.constants import DEBUG
from calibre.db.constants import DATA_DIR_NAME, DATA_FILE_PATTERN
from calibre.db.notes.exim import expand_note_resources, parse_html
from calibre.ebooks.metadata import title_sort
from calibre.ebooks.metadata.book.base import field_metadata
from calibre.utils.config import tweaks
from calibre.utils.date import UNDEFINED_DATE, format_date, now, parse_date
from calibre.utils.icu import capitalize, sort_key, strcmp
from calibre.utils.icu import lower as icu_lower
from calibre.utils.localization import _, calibre_langcode_to_name, canonicalize_lang
from calibre.utils.titlecase import titlecase
from polyglot.builtins import iteritems, itervalues
class StoredObjectType(Enum):
PythonFunction = auto()
StoredGPMTemplate = auto()
StoredPythonTemplate = auto()
class FormatterFunctions:
error_function_body = ('def evaluate(self, formatter, kwargs, mi, locals):\n'
'\treturn "' +
_('Duplicate user function name {0}. '
'Change the name or ensure that the functions are identical') + '"')
def __init__(self):
self._builtins = {}
self._functions = {}
self._functions_from_library = {}
def register_builtin(self, func_class):
if not isinstance(func_class, FormatterFunction):
raise ValueError('Class %s is not an instance of FormatterFunction'%(
func_class.__class__.__name__))
name = func_class.name
if name in self._functions:
raise ValueError('Name %s already used'%name)
self._builtins[name] = func_class
self._functions[name] = func_class
for a in func_class.aliases:
self._functions[a] = func_class
def _register_function(self, func_class, replace=False):
if not isinstance(func_class, FormatterFunction):
raise ValueError('Class %s is not an instance of FormatterFunction'%(
func_class.__class__.__name__))
name = func_class.name
if not replace and name in self._functions:
raise ValueError('Name %s already used'%name)
self._functions[name] = func_class
def register_functions(self, library_uuid, funcs):
self._functions_from_library[library_uuid] = funcs
self._register_functions()
def _register_functions(self):
for compiled_funcs in itervalues(self._functions_from_library):
for cls in compiled_funcs:
f = self._functions.get(cls.name, None)
replace = False
if f is not None:
existing_body = f.program_text
new_body = cls.program_text
if new_body != existing_body:
# Change the body of the template function to one that will
# return an error message. Also change the arg count to
# -1 (variable) to avoid template compilation errors
if DEBUG:
print(f'attempt to replace formatter function {f.name} with a different body')
replace = True
func = [cls.name, '', -1, self.error_function_body.format(cls.name)]
cls = compile_user_function(*func)
else:
continue
formatter_functions()._register_function(cls, replace=replace)
def unregister_functions(self, library_uuid):
if library_uuid in self._functions_from_library:
for cls in self._functions_from_library[library_uuid]:
self._functions.pop(cls.name, None)
self._functions_from_library.pop(library_uuid)
self._register_functions()
def get_builtins(self):
return self._builtins
def get_builtins_and_aliases(self):
res = {}
for f in itervalues(self._builtins):
res[f.name] = f
for a in f.aliases:
res[a] = f
return res
def get_functions(self):
return self._functions
def reset_to_builtins(self):
self._functions = {}
for n,c in self._builtins.items():
self._functions[n] = c
for a in c.aliases:
self._functions[a] = c
_ff = FormatterFunctions()
def formatter_functions():
global _ff
return _ff
def only_in_gui_error(name):
raise ValueError(_('The function {} can be used only in the GUI').format(name))
def get_database(mi, name):
proxy = mi.get('_proxy_metadata', None)
if proxy is None:
if name is not None:
only_in_gui_error(name)
return None
wr = proxy.get('_db', None)
if wr is None:
if name is not None:
raise ValueError(_('In function {}: The database has been closed').format(name))
return None
cache = wr()
if cache is None:
if name is not None:
raise ValueError(_('In function {}: The database has been closed').format(name))
return None
wr = getattr(cache, 'library_database_instance', None)
if wr is None:
if name is not None:
only_in_gui_error()
return None
db = wr()
if db is None:
if name is not None:
raise ValueError(_('In function {}: The database has been closed').format(name))
return None
return db
class FormatterFunction:
doc = _('No documentation provided')
name = 'no name provided'
category = 'Unknown'
arg_count = 0
aliases = []
object_type = StoredObjectType.PythonFunction
def evaluate(self, formatter, kwargs, mi, locals, *args):
raise NotImplementedError()
def eval_(self, formatter, kwargs, mi, locals, *args):
ret = self.evaluate(formatter, kwargs, mi, locals, *args)
if isinstance(ret, (bytes, str)):
return ret
if isinstance(ret, list):
return ','.join(ret)
if isinstance(ret, (numbers.Number, bool)):
return str(ret)
def only_in_gui_error(self):
only_in_gui_error(self.name)
def get_database(self, mi):
return get_database(mi, self.name)
class BuiltinFormatterFunction(FormatterFunction):
def __init__(self):
formatter_functions().register_builtin(self)
eval_func = inspect.getmembers(self.__class__,
lambda x: inspect.isfunction(x) and x.__name__ == 'evaluate')
try:
lines = [l[4:] for l in inspect.getsourcelines(eval_func[0][1])[0]]
except:
lines = []
self.program_text = ''.join(lines)
class BuiltinStrcmp(BuiltinFormatterFunction):
name = 'strcmp'
arg_count = 5
category = 'Relational'
__doc__ = doc = _('strcmp(x, y, lt, eq, gt) -- does a case-insensitive comparison of x '
'and y as strings. Returns lt if x < y. Returns eq if x == y. '
'Otherwise returns gt. In many cases the lexical comparison operators '
'(>, <, == etc) can replace this function.')
def evaluate(self, formatter, kwargs, mi, locals, x, y, lt, eq, gt):
v = strcmp(x, y)
if v < 0:
return lt
if v == 0:
return eq
return gt
class BuiltinStrcmpcase(BuiltinFormatterFunction):
name = 'strcmpcase'
arg_count = 5
category = 'Relational'
__doc__ = doc = _('strcmpcase(x, y, lt, eq, gt) -- does a case-sensitive comparison of x '
'and y as strings. Returns lt if x < y. Returns eq if x == y. '
'Otherwise returns gt.\n'
'Note: This is NOT the default behavior used by calibre, for example, in the '
'lexical comparison operators (==, >, <, etc.). This function could '
'cause unexpected results, preferably use strcmp() whenever possible.')
def evaluate(self, formatter, kwargs, mi, locals, x, y, lt, eq, gt):
from calibre.utils.icu import case_sensitive_strcmp as case_strcmp
v = case_strcmp(x, y)
if v < 0:
return lt
if v == 0:
return eq
return gt
class BuiltinCmp(BuiltinFormatterFunction):
name = 'cmp'
category = 'Relational'
arg_count = 5
__doc__ = doc = _('cmp(x, y, lt, eq, gt) -- compares x and y after converting both to '
'numbers. Returns lt if x < y. Returns eq if x == y. Otherwise returns gt. '
'In many cases the numeric comparison operators '
'(>#, <#, ==# etc) can replace this function.')
def evaluate(self, formatter, kwargs, mi, locals, x, y, lt, eq, gt):
x = float(x if x and x != 'None' else 0)
y = float(y if y and y != 'None' else 0)
if x < y:
return lt
if x == y:
return eq
return gt
class BuiltinFirstMatchingCmp(BuiltinFormatterFunction):
name = 'first_matching_cmp'
category = 'Relational'
arg_count = -1
__doc__ = doc = _('first_matching_cmp(val, [cmp1, result1,]+, else_result) -- '
'compares "val < cmpN" in sequence, returning resultN for '
'the first comparison that succeeds. Returns else_result '
'if no comparison succeeds. Example: '
'first_matching_cmp(10,5,"small",10,"middle",15,"large","giant") '
'returns "large". The same example with a first value of 16 returns "giant".')
def evaluate(self, formatter, kwargs, mi, locals, *args):
if (len(args) % 2) != 0:
raise ValueError(_('first_matching_cmp requires an even number of arguments'))
val = float(args[0] if args[0] and args[0] != 'None' else 0)
for i in range(1, len(args) - 1, 2):
c = float(args[i] if args[i] and args[i] != 'None' else 0)
if val < c:
return args[i+1]
return args[len(args)-1]
class BuiltinStrcat(BuiltinFormatterFunction):
name = 'strcat'
arg_count = -1
category = 'String manipulation'
__doc__ = doc = _('strcat(a [, b]*) -- can take any number of arguments. Returns the '
'string formed by concatenating all the arguments')
def evaluate(self, formatter, kwargs, mi, locals, *args):
i = 0
res = ''
for i in range(0, len(args)):
res += args[i]
return res
class BuiltinStrlen(BuiltinFormatterFunction):
name = 'strlen'
arg_count = 1
category = 'String manipulation'
__doc__ = doc = _('strlen(a) -- Returns the length of the string passed as '
'the argument')
def evaluate(self, formatter, kwargs, mi, locals, a):
try:
return len(a)
except:
return -1
class BuiltinAdd(BuiltinFormatterFunction):
name = 'add'
arg_count = -1
category = 'Arithmetic'
__doc__ = doc = _('add(x [, y]*) -- returns the sum of its arguments. '
'Throws an exception if an argument is not a number. '
'This function can often be '
'replaced with the + operator.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
res = 0
for v in args:
v = float(v if v and v != 'None' else 0)
res += v
return str(res)
class BuiltinSubtract(BuiltinFormatterFunction):
name = 'subtract'
arg_count = 2
category = 'Arithmetic'
__doc__ = doc = _('subtract(x, y) -- returns x - y. Throws an exception if '
'either x or y are not numbers. This function can often be '
'replaced with the - operator.')
def evaluate(self, formatter, kwargs, mi, locals, x, y):
x = float(x if x and x != 'None' else 0)
y = float(y if y and y != 'None' else 0)
return str(x - y)
class BuiltinMultiply(BuiltinFormatterFunction):
name = 'multiply'
arg_count = -1
category = 'Arithmetic'
__doc__ = doc = _('multiply(x [, y]*) -- returns the product of its arguments. '
'Throws an exception if any argument is not a number. '
'This function can often be replaced with the * operator.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
res = 1
for v in args:
v = float(v if v and v != 'None' else 0)
res *= v
return str(res)
class BuiltinDivide(BuiltinFormatterFunction):
name = 'divide'
arg_count = 2
category = 'Arithmetic'
__doc__ = doc = _('divide(x, y) -- returns x / y. Throws an exception if '
'either x or y are not numbers.'
' This function can often be replaced with the / operator.')
def evaluate(self, formatter, kwargs, mi, locals, x, y):
x = float(x if x and x != 'None' else 0)
y = float(y if y and y != 'None' else 0)
return str(x / y)
class BuiltinCeiling(BuiltinFormatterFunction):
name = 'ceiling'
arg_count = 1
category = 'Arithmetic'
__doc__ = doc = _('ceiling(x) -- returns the smallest integer greater '
'than or equal to x. Throws an exception if x is '
'not a number.')
def evaluate(self, formatter, kwargs, mi, locals, x):
x = float(x if x and x != 'None' else 0)
return str(int(ceil(x)))
class BuiltinFloor(BuiltinFormatterFunction):
name = 'floor'
arg_count = 1
category = 'Arithmetic'
__doc__ = doc = _('floor(x) -- returns the largest integer less '
'than or equal to x. Throws an exception if x is '
'not a number.')
def evaluate(self, formatter, kwargs, mi, locals, x):
x = float(x if x and x != 'None' else 0)
return str(int(floor(x)))
class BuiltinRound(BuiltinFormatterFunction):
name = 'round'
arg_count = 1
category = 'Arithmetic'
__doc__ = doc = _('round(x) -- returns the nearest integer to x. '
'Throws an exception if x is not a number.')
def evaluate(self, formatter, kwargs, mi, locals, x):
x = float(x if x and x != 'None' else 0)
return str(int(round(x)))
class BuiltinMod(BuiltinFormatterFunction):
name = 'mod'
arg_count = 2
category = 'Arithmetic'
__doc__ = doc = _('mod(x) -- returns floor(remainder of x / y). '
'Throws an exception if either x or y is not a number.')
def evaluate(self, formatter, kwargs, mi, locals, x, y):
x = float(x if x and x != 'None' else 0)
y = float(y if y and y != 'None' else 0)
return str(int(x % y))
class BuiltinFractionalPart(BuiltinFormatterFunction):
name = 'fractional_part'
arg_count = 1
category = 'Arithmetic'
__doc__ = doc = _('fractional_part(x) -- returns the value after the decimal '
'point. For example, fractional_part(3.14) returns 0.14. '
'Throws an exception if x is not a number.')
def evaluate(self, formatter, kwargs, mi, locals, x):
x = float(x if x and x != 'None' else 0)
return str(modf(x)[0])
class BuiltinTemplate(BuiltinFormatterFunction):
name = 'template'
arg_count = 1
category = 'Recursion'
__doc__ = doc = _('template(x) -- evaluates x as a template. The evaluation is done '
'in its own context, meaning that variables are not shared between '
'the caller and the template evaluation. Because the { and } '
'characters are special, you must use [[ for the { character and '
']] for the } character; they are converted automatically. '
'For example, template(\'[[title_sort]]\') will evaluate the '
'template {title_sort} and return its value. Note also that '
'prefixes and suffixes (the `|prefix|suffix` syntax) cannot be '
'used in the argument to this function when using template program mode.')
def evaluate(self, formatter, kwargs, mi, locals, template):
template = template.replace('[[', '{').replace(']]', '}')
return formatter.__class__().safe_format(template, kwargs, 'TEMPLATE', mi)
class BuiltinEval(BuiltinFormatterFunction):
name = 'eval'
arg_count = 1
category = 'Recursion'
__doc__ = doc = _('eval(template) -- evaluates the template, passing the local '
'variables (those \'assign\'ed to) instead of the book metadata. '
' This permits using the template processor to construct complex '
'results from local variables. Because the { and } '
'characters are special, you must use [[ for the { character and '
']] for the } character; they are converted automatically. '
'Note also that prefixes and suffixes (the `|prefix|suffix` syntax) '
'cannot be used in the argument to this function when using '
'template program mode.')
def evaluate(self, formatter, kwargs, mi, locals, template):
from calibre.utils.formatter import EvalFormatter
template = template.replace('[[', '{').replace(']]', '}')
return EvalFormatter().safe_format(template, locals, 'EVAL', None)
class BuiltinAssign(BuiltinFormatterFunction):
name = 'assign'
arg_count = 2
category = 'Other'
__doc__ = doc = _('assign(id, val) -- assigns val to id, then returns val. '
'id must be an identifier, not an expression. '
'This function can often be replaced with the = operator.')
def evaluate(self, formatter, kwargs, mi, locals, target, value):
locals[target] = value
return value
class BuiltinListSplit(BuiltinFormatterFunction):
name = 'list_split'
arg_count = 3
category = 'List manipulation'
__doc__ = doc = _('list_split(list_val, sep, id_prefix) -- splits the list_val '
"into separate values using 'sep', then assigns the values "
"to variables named 'id_prefix_N' where N is the position "
"of the value in the list. The first item has position 0 (zero). "
"The function returns the last element in the list. "
"Example: split('one:two:foo', ':', 'var') is equivalent "
"to var_0 = 'one'; var_1 = 'two'; var_2 = 'foo'.")
def evaluate(self, formatter, kwargs, mi, locals, list_val, sep, id_prefix):
l = [v.strip() for v in list_val.split(sep)]
res = ''
for i,v in enumerate(l):
res = locals[id_prefix+'_'+str(i)] = v
return res
class BuiltinPrint(BuiltinFormatterFunction):
name = 'print'
arg_count = -1
category = 'Other'
__doc__ = doc = _('print(a[, b]*) -- prints the arguments to standard output. '
'Unless you start calibre from the command line (calibre-debug -g), '
'the output will go to a black hole.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
print(args)
return ''
class BuiltinField(BuiltinFormatterFunction):
name = 'field'
arg_count = 1
category = 'Get values from metadata'
__doc__ = doc = _('field(lookup_name) -- returns the metadata field named by lookup_name')
def evaluate(self, formatter, kwargs, mi, locals, name):
return formatter.get_value(name, [], kwargs)
class BuiltinRawField(BuiltinFormatterFunction):
name = 'raw_field'
arg_count = -1
category = 'Get values from metadata'
__doc__ = doc = _('raw_field(lookup_name [, optional_default]) -- returns the '
'metadata field named by lookup_name without applying any formatting. '
'It evaluates and returns the optional second argument '
"'default' if the field is undefined ('None').")
def evaluate(self, formatter, kwargs, mi, locals, name, default=None):
res = getattr(mi, name, None)
if res is None and default is not None:
return default
if isinstance(res, list):
fm = mi.metadata_for_field(name)
if fm is None:
return ', '.join(res)
return fm['is_multiple']['list_to_ui'].join(res)
return str(res)
class BuiltinRawList(BuiltinFormatterFunction):
name = 'raw_list'
arg_count = 2
category = 'Get values from metadata'
__doc__ = doc = _('raw_list(lookup_name, separator) -- returns the metadata list '
'named by lookup_name without applying any formatting or sorting and '
'with items separated by separator.')
def evaluate(self, formatter, kwargs, mi, locals, name, separator):
res = getattr(mi, name, None)
if not isinstance(res, list):
return "%s is not a list" % name
return separator.join(res)
class BuiltinSubstr(BuiltinFormatterFunction):
name = 'substr'
arg_count = 3
category = 'String manipulation'
__doc__ = doc = _('substr(str, start, end) -- returns the start\'th through the end\'th '
'characters of str. The first character in str is the zero\'th '
'character. If end is negative, then it indicates that many '
'characters counting from the right. If end is zero, then it '
'indicates the last character. For example, substr(\'12345\', 1, 0) '
'returns \'2345\', and substr(\'12345\', 1, -1) returns \'234\'.')
def evaluate(self, formatter, kwargs, mi, locals, str_, start_, end_):
return str_[int(start_): len(str_) if int(end_) == 0 else int(end_)]
class BuiltinLookup(BuiltinFormatterFunction):
name = 'lookup'
arg_count = -1
category = 'Iterating over values'
__doc__ = doc = _('lookup(val, [pattern, field,]+ else_field) -- '
'like switch, except the arguments are field (metadata) names, not '
'text. The value of the appropriate field will be fetched and used. '
'Note that because composite columns are fields, you can use this '
'function in one composite field to use the value of some other '
'composite field. This is extremely useful when constructing '
'variable save paths')
def evaluate(self, formatter, kwargs, mi, locals, val, *args):
if len(args) == 2: # here for backwards compatibility
if val:
return formatter.vformat('{'+args[0].strip()+'}', [], kwargs)
else:
return formatter.vformat('{'+args[1].strip()+'}', [], kwargs)
if (len(args) % 2) != 1:
raise ValueError(_('lookup requires either 2 or an odd number of arguments'))
i = 0
while i < len(args):
if i + 1 >= len(args):
return formatter.vformat('{' + args[i].strip() + '}', [], kwargs)
if re.search(args[i], val, flags=re.I):
return formatter.vformat('{'+args[i+1].strip() + '}', [], kwargs)
i += 2
class BuiltinTest(BuiltinFormatterFunction):
name = 'test'
arg_count = 3
category = 'If-then-else'
__doc__ = doc = _('test(val, text if not empty, text if empty) -- return `text if not '
'empty` if val is not empty, otherwise return `text if empty`')
def evaluate(self, formatter, kwargs, mi, locals, val, value_if_set, value_not_set):
if val:
return value_if_set
else:
return value_not_set
class BuiltinContains(BuiltinFormatterFunction):
name = 'contains'
arg_count = 4
category = 'If-then-else'
__doc__ = doc = _('contains(val, pattern, text if match, text if not match) -- checks '
'if val contains matches for the regular expression `pattern`. '
'Returns `text if match` if matches are found, otherwise it returns '
'`text if no match`')
def evaluate(self, formatter, kwargs, mi, locals,
val, test, value_if_present, value_if_not):
if re.search(test, val, flags=re.I):
return value_if_present
else:
return value_if_not
class BuiltinSwitch(BuiltinFormatterFunction):
name = 'switch'
arg_count = -1
category = 'Iterating over values'
__doc__ = doc = _('switch(val, [pattern, value,]+ else_value) -- '
'for each `pattern, value` pair, checks if `val` matches '
'the regular expression `pattern` and if so, returns that '
'`value`. If no pattern matches, then `else_value` is returned. '
'You can have as many `pattern, value` pairs as you want')
def evaluate(self, formatter, kwargs, mi, locals, val, *args):
if (len(args) % 2) != 1:
raise ValueError(_('switch requires an even number of arguments'))
i = 0
while i < len(args):
if i + 1 >= len(args):
return args[i]
if re.search(args[i], val, flags=re.I):
return args[i+1]
i += 2
class BuiltinSwitchIf(BuiltinFormatterFunction):
name = 'switch_if'
arg_count = -1
category = 'Iterating over values'
__doc__ = doc = _('switch_if([test_expression, value_expression,]+ else_expression) -- '
'for each "test_expression, value_expression" pair, checks if test_expression '
'is True (non-empty) and if so returns the result of value_expression. '
'If no test_expression is True then the result of else_expression is returned. '
'You can have as many "test_expression, value_expression" pairs as you want.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
if (len(args) % 2) != 1:
raise ValueError(_('switch_if requires an odd number of arguments'))
# We shouldn't get here because the function is inlined. However, someone
# might call it directly.
i = 0
while i < len(args):
if i + 1 >= len(args):
return args[i]
if args[i]:
return args[i+1]
i += 2
class BuiltinStrcatMax(BuiltinFormatterFunction):
name = 'strcat_max'
arg_count = -1
category = 'String manipulation'
__doc__ = doc = _('strcat_max(max, string1 [, prefix2, string2]*) -- '
'Returns a string formed by concatenating the arguments. The '
'returned value is initialized to string1. `Prefix, string` '
'pairs are added to the end of the value as long as the '
'resulting string length is less than `max`. String1 is returned '
'even if string1 is longer than max. You can pass as many '
'`prefix, string` pairs as you wish.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
if len(args) < 2:
raise ValueError(_('strcat_max requires 2 or more arguments'))
if (len(args) % 2) != 0:
raise ValueError(_('strcat_max requires an even number of arguments'))
try:
max = int(args[0])
except:
raise ValueError(_('first argument to strcat_max must be an integer'))
i = 2
result = args[1]
try:
while i < len(args):
if (len(result) + len(args[i]) + len(args[i+1])) > max:
break
result = result + args[i] + args[i+1]
i += 2
except:
pass
return result.strip()
class BuiltinInList(BuiltinFormatterFunction):
name = 'in_list'
arg_count = -1
category = 'List lookup'
__doc__ = doc = _('in_list(val, separator, [ pattern, found_val, ]+ not_found_val) -- '
'treating val as a list of items separated by separator, '
'if the pattern matches any of the list values then return found_val.'
'If the pattern matches no list value then return '
'not_found_val. The pattern and found_value pairs can be repeated as '
'many times as desired. The patterns are checked in order. The '
'found_val for the first match is returned. '
'Aliases: in_list(), list_contains()')
aliases = ['list_contains']
def evaluate(self, formatter, kwargs, mi, locals, val, sep, *args):
if (len(args) % 2) != 1:
raise ValueError(_('in_list requires an odd number of arguments'))
l = [v.strip() for v in val.split(sep) if v.strip()]
i = 0
while i < len(args):
if i + 1 >= len(args):
return args[i]
sf = args[i]
fv = args[i+1]
if l:
for v in l:
if re.search(sf, v, flags=re.I):
return fv
i += 2
class BuiltinStrInList(BuiltinFormatterFunction):
name = 'str_in_list'
arg_count = -1
category = 'List lookup'
__doc__ = doc = _('str_in_list(val, separator, [string, found_val, ]+ not_found_val) -- '
'treating val as a list of items separated by separator, if the '
'string matches any of the list values then return found_val.'
'If the string matches no list value then return '
'not_found_val. The comparison is exact match (not contains) and is '
'case insensitive. The string and found_value pairs can be repeated as '
'many times as desired. The patterns are checked in order. The '
'found_val for the first match is returned.')
def evaluate(self, formatter, kwargs, mi, locals, val, sep, *args):
if (len(args) % 2) != 1:
raise ValueError(_('str_in_list requires an odd number of arguments'))
l = [v.strip() for v in val.split(sep) if v.strip()]
i = 0
while i < len(args):
if i + 1 >= len(args):
return args[i]
sf = args[i]
fv = args[i+1]
c = [v.strip() for v in sf.split(sep) if v.strip()]
if l:
for v in l:
for t in c:
if strcmp(t, v) == 0:
return fv
i += 2
class BuiltinIdentifierInList(BuiltinFormatterFunction):
name = 'identifier_in_list'
arg_count = -1
category = 'List lookup'
__doc__ = doc = _('identifier_in_list(val, id_name [, found_val, not_found_val]) -- '
'treat val as a list of identifiers separated by commas. An identifier '
'has the format "id_name:value". The id_name parameter is the id_name '
'text to search for, either "id_name" or "id_name:regexp". The first case '
'matches if there is any identifier matching that id_name. The second '
'case matches if id_name matches an identifier and the regexp '
'matches the identifier\'s value. If found_val and not_found_val '
'are provided then if there is a match then return found_val, otherwise '
'return not_found_val. If found_val and not_found_val are not '
'provided then if there is a match then return the identifier:value '
'pair, otherwise the empty string.')
def evaluate(self, formatter, kwargs, mi, locals, val, ident, *args):
if len(args) == 0:
fv_is_id = True
nfv = ''
elif len(args) == 2:
fv_is_id = False
fv = args[0]
nfv = args[1]
else:
raise ValueError(_("{} requires 2 or 4 arguments").format(self.name))
l = [v.strip() for v in val.split(',') if v.strip()]
(id_, __, regexp) = ident.partition(':')
if not id_:
return nfv
for candidate in l:
i, __, v = candidate.partition(':')
if v and i == id_:
if not regexp or re.search(regexp, v, flags=re.I):
return candidate if fv_is_id else fv
return nfv
class BuiltinRe(BuiltinFormatterFunction):
name = 're'
arg_count = 3
category = 'String manipulation'
__doc__ = doc = _('re(val, pattern, replacement) -- return val after applying '
'the regular expression. All instances of `pattern` are replaced '
'with `replacement`. As in all of calibre, these are '
'Python-compatible regular expressions')
def evaluate(self, formatter, kwargs, mi, locals, val, pattern, replacement):
return re.sub(pattern, replacement, val, flags=re.I)
class BuiltinReGroup(BuiltinFormatterFunction):
name = 're_group'
arg_count = -1
category = 'String manipulation'
__doc__ = doc = _('re_group(val, pattern [, template_for_group]*) -- '
'return a string made by applying the regular expression pattern '
'to the val and replacing each matched instance with the string '
'computed by replacing each matched group by the value returned '
'by the corresponding template. The original matched value for the '
'group is available as $. In template program mode, like for '
'the template and the eval functions, you use [[ for { and ]] for }.'
' The following example in template program mode looks for series '
'with more than one word and uppercases the first word: '
"{series:'re_group($, \"(\\S* )(.*)\", \"[[$:uppercase()]]\", \"[[$]]\")'}")
def evaluate(self, formatter, kwargs, mi, locals, val, pattern, *args):
from calibre.utils.formatter import EvalFormatter
def repl(mo):
res = ''
if mo and mo.lastindex:
for dex in range(0, mo.lastindex):
gv = mo.group(dex+1)
if gv is None:
continue
if len(args) > dex:
template = args[dex].replace('[[', '{').replace(']]', '}')
res += EvalFormatter().safe_format(template, {'$': gv},
'EVAL', None, strip_results=False)
else:
res += gv
return res
return re.sub(pattern, repl, val, flags=re.I)
class BuiltinSwapAroundComma(BuiltinFormatterFunction):
name = 'swap_around_comma'
arg_count = 1
category = 'String manipulation'
__doc__ = doc = _('swap_around_comma(val) -- given a value of the form '
'"B, A", return "A B". This is most useful for converting names '
'in LN, FN format to FN LN. If there is no comma, the function '
'returns val unchanged')
def evaluate(self, formatter, kwargs, mi, locals, val):
return re.sub(r'^(.*?),\s*(.*$)', r'\2 \1', val, flags=re.I).strip()
class BuiltinIfempty(BuiltinFormatterFunction):
name = 'ifempty'
arg_count = 2
category = 'If-then-else'
__doc__ = doc = _('ifempty(val, text if empty) -- return val if val is not empty, '
'otherwise return `text if empty`')
def evaluate(self, formatter, kwargs, mi, locals, val, value_if_empty):
if val:
return val
else:
return value_if_empty
class BuiltinShorten(BuiltinFormatterFunction):
name = 'shorten'
arg_count = 4
category = 'String manipulation'
__doc__ = doc = _('shorten(val, left chars, middle text, right chars) -- Return a '
'shortened version of val, consisting of `left chars` '
'characters from the beginning of val, followed by '
'`middle text`, followed by `right chars` characters from '
'the end of the string. `Left chars` and `right chars` must be '
'integers. For example, assume the title of the book is '
'`Ancient English Laws in the Times of Ivanhoe`, and you want '
'it to fit in a space of at most 15 characters. If you use '
'{title:shorten(9,-,5)}, the result will be `Ancient E-anhoe`. '
'If the field\'s length is less than left chars + right chars + '
'the length of `middle text`, then the field will be used '
'intact. For example, the title `The Dome` would not be changed.')
def evaluate(self, formatter, kwargs, mi, locals,
val, leading, center_string, trailing):
l = max(0, int(leading))
t = max(0, int(trailing))
if len(val) > l + len(center_string) + t:
return val[0:l] + center_string + ('' if t == 0 else val[-t:])
else:
return val
class BuiltinCount(BuiltinFormatterFunction):
name = 'count'
arg_count = 2
category = 'List manipulation'
aliases = ['list_count']
__doc__ = doc = _('count(val, separator) -- interprets the value as a list of items '
'separated by `separator`, returning the number of items in the '
'list. Most lists use a comma as the separator, but authors '
'uses an ampersand. Examples: {tags:count(,)}, {authors:count(&)}. '
'Aliases: count(), list_count()')
def evaluate(self, formatter, kwargs, mi, locals, val, sep):
return str(len([v for v in val.split(sep) if v]))
class BuiltinListCountMatching(BuiltinFormatterFunction):
name = 'list_count_matching'
arg_count = 3
category = 'List manipulation'
aliases = ['count_matching']
__doc__ = doc = _('list_count_matching(list, pattern, separator) -- '
"interprets 'list' as a list of items separated by 'separator', "
'returning the number of items in the list that match the regular '
"expression 'pattern'. Aliases: list_count_matching(), count_matching()")
def evaluate(self, formatter, kwargs, mi, locals, list_, pattern, sep):
res = 0
for v in [x.strip() for x in list_.split(sep) if x.strip()]:
if re.search(pattern, v, flags=re.I):
res += 1
return str(res)
class BuiltinListitem(BuiltinFormatterFunction):
name = 'list_item'
arg_count = 3
category = 'List lookup'
__doc__ = doc = _('list_item(val, index, separator) -- interpret the value as a list of '
'items separated by `separator`, returning the `index`th item. '
'The first item is number zero. The last item can be returned '
'using `list_item(-1,separator)`. If the item is not in the list, '
'then the empty value is returned. The separator has the same '
'meaning as in the count function.')
def evaluate(self, formatter, kwargs, mi, locals, val, index, sep):
if not val:
return ''
index = int(index)
val = val.split(sep)
try:
return val[index].strip()
except:
return ''
class BuiltinSelect(BuiltinFormatterFunction):
name = 'select'
arg_count = 2
category = 'List lookup'
__doc__ = doc = _('select(val, key) -- interpret the value as a comma-separated list '
'of items, with the items being "id:value". Find the pair with the '
'id equal to key, and return the corresponding value. Returns the '
'empty string if no match is found.'
)
def evaluate(self, formatter, kwargs, mi, locals, val, key):
if not val:
return ''
vals = [v.strip() for v in val.split(',')]
tkey = key+':'
for v in vals:
if v.startswith(tkey):
return v[len(tkey):]
return ''
class BuiltinApproximateFormats(BuiltinFormatterFunction):
name = 'approximate_formats'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('approximate_formats() -- return a comma-separated '
'list of formats that at one point were associated with the '
'book. There is no guarantee that this list is correct, '
'although it probably is. '
'This function can be called in template program mode using '
'the template "{:\'approximate_formats()\'}". '
'Note that format names are always uppercase, as in EPUB. '
'This function works only in the GUI. If you want to use these values '
'in save-to-disk or send-to-device templates then you '
'must make a custom "Column built from other columns", use '
'the function in that column\'s template, and use that '
'column\'s value in your save/send templates'
)
def evaluate(self, formatter, kwargs, mi, locals):
if hasattr(mi, '_proxy_metadata'):
fmt_data = mi._proxy_metadata.db_approx_formats
if not fmt_data:
return ''
data = sorted(fmt_data)
return ','.join(v.upper() for v in data)
self.only_in_gui_error()
class BuiltinFormatsModtimes(BuiltinFormatterFunction):
name = 'formats_modtimes'
arg_count = 1
category = 'Get values from metadata'
__doc__ = doc = _('formats_modtimes(date_format) -- return a comma-separated '
'list of colon-separated items representing modification times '
'for the formats of a book. The date_format parameter '
'specifies how the date is to be formatted. See the '
'format_date function for details. You can use the select '
'function to get the mod time for a specific '
'format. Note that format names are always uppercase, '
'as in EPUB.'
)
def evaluate(self, formatter, kwargs, mi, locals, fmt):
fmt_data = mi.get('format_metadata', {})
try:
data = sorted(fmt_data.items(), key=lambda x:x[1]['mtime'], reverse=True)
return ','.join(k.upper()+':'+format_date(v['mtime'], fmt)
for k,v in data)
except:
return ''
class BuiltinFormatsSizes(BuiltinFormatterFunction):
name = 'formats_sizes'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('formats_sizes() -- return a comma-separated list of '
'colon-separated items representing sizes in bytes '
'of the formats of a book. You can use the select '
'function to get the size for a specific '
'format. Note that format names are always uppercase, '
'as in EPUB.'
)
def evaluate(self, formatter, kwargs, mi, locals):
fmt_data = mi.get('format_metadata', {})
try:
return ','.join(k.upper()+':'+str(v['size']) for k,v in iteritems(fmt_data))
except:
return ''
class BuiltinFormatsPaths(BuiltinFormatterFunction):
name = 'formats_paths'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('formats_paths() -- return a comma-separated list of '
'colon-separated items representing full path to '
'the formats of a book. You can use the select '
'function to get the path for a specific '
'format. Note that format names are always uppercase, '
'as in EPUB.')
def evaluate(self, formatter, kwargs, mi, locals):
fmt_data = mi.get('format_metadata', {})
try:
return ','.join(k.upper()+':'+str(v['path']) for k,v in iteritems(fmt_data))
except:
return ''
class BuiltinHumanReadable(BuiltinFormatterFunction):
name = 'human_readable'
arg_count = 1
category = 'Formatting values'
__doc__ = doc = _('human_readable(v) -- return a string '
'representing the number v in KB, MB, GB, etc.'
)
def evaluate(self, formatter, kwargs, mi, locals, val):
try:
return human_readable(round(float(val)))
except:
return ''
class BuiltinFormatNumber(BuiltinFormatterFunction):
name = 'format_number'
arg_count = 2
category = 'Formatting values'
__doc__ = doc = _('format_number(v, template) -- format the number v using '
'a Python formatting template such as "{0:5.2f}" or '
'"{0:,d}" or "${0:5,.2f}". The field_name part of the '
'template must be a 0 (zero) (the "{0:" in the above examples). '
'See the template language and Python documentation for more '
'examples. You can leave off the leading "{0:" and trailing '
'"}" if the template contains only a format. Returns the empty '
'string if formatting fails.'
)
def evaluate(self, formatter, kwargs, mi, locals, val, template):
if val == '' or val == 'None':
return ''
if '{' not in template:
template = '{0:' + template + '}'
try:
v1 = float(val)
except:
return ''
try: # Try formatting the value as a float
return template.format(v1)
except:
pass
try: # Try formatting the value as an int
v2 = trunc(v1)
if v2 == v1:
return template.format(v2)
except:
pass
return ''
class BuiltinSublist(BuiltinFormatterFunction):
name = 'sublist'
arg_count = 4
category = 'List manipulation'
__doc__ = doc = _('sublist(val, start_index, end_index, separator) -- interpret the '
'value as a list of items separated by `separator`, returning a '
'new list made from the `start_index` to the `end_index` item. '
'The first item is number zero. If an index is negative, then it '
'counts from the end of the list. As a special case, an end_index '
'of zero is assumed to be the length of the list. Examples using '
'basic template mode and assuming that the tags column (which is '
'comma-separated) contains "A, B, C": '
'{tags:sublist(0,1,\\\\,)} returns "A". '
'{tags:sublist(-1,0,\\\\,)} returns "C". '
'{tags:sublist(0,-1,\\\\,)} returns "A, B".'
)
def evaluate(self, formatter, kwargs, mi, locals, val, start_index, end_index, sep):
if not val:
return ''
si = int(start_index)
ei = int(end_index)
# allow empty list items so counts are what the user expects
val = [v.strip() for v in val.split(sep)]
if sep == ',':
sep = ', '
try:
if ei == 0:
return sep.join(val[si:])
else:
return sep.join(val[si:ei])
except:
return ''
class BuiltinSubitems(BuiltinFormatterFunction):
name = 'subitems'
arg_count = 3
category = 'List manipulation'
__doc__ = doc = _('subitems(val, start_index, end_index) -- This function is used to '
'break apart lists of items such as genres. It interprets the value '
'as a comma-separated list of items, where each item is a period-'
'separated list. Returns a new list made by first finding all the '
'period-separated items, then for each such item extracting the '
'`start_index` to the `end_index` components, then combining '
'the results back together. The first component in a period-'
'separated list has an index of zero. If an index is negative, '
'then it counts from the end of the list. As a special case, an '
'end_index of zero is assumed to be the length of the list. '
'Example using basic template mode and assuming a #genre value of '
'"A.B.C": {#genre:subitems(0,1)} returns "A". {#genre:subitems(0,2)} '
'returns "A.B". {#genre:subitems(1,0)} returns "B.C". Assuming a #genre '
'value of "A.B.C, D.E.F", {#genre:subitems(0,1)} returns "A, D". '
'{#genre:subitems(0,2)} returns "A.B, D.E"')
period_pattern = re.compile(r'(?<=[^\.\s])\.(?=[^\.\s])', re.U)
def evaluate(self, formatter, kwargs, mi, locals, val, start_index, end_index):
if not val:
return ''
si = int(start_index)
ei = int(end_index)
has_periods = '.' in val
items = [v.strip() for v in val.split(',') if v.strip()]
rv = set()
for item in items:
if has_periods and '.' in item:
components = self.period_pattern.split(item)
else:
components = [item]
try:
if ei == 0:
t = '.'.join(components[si:]).strip()
else:
t = '.'.join(components[si:ei]).strip()
if t:
rv.add(t)
except:
pass
return ', '.join(sorted(rv, key=sort_key))
class BuiltinFormatDate(BuiltinFormatterFunction):
name = 'format_date'
arg_count = 2
category = 'Formatting values'
__doc__ = doc = _('format_date(val, format_string) -- format the value, '
'which must be a date, using the format_string, returning a string. '
'The formatting codes are: '
'd : the day as number without a leading zero (1 to 31) '
'dd : the day as number with a leading zero (01 to 31) '
'ddd : the abbreviated localized day name (e.g. "Mon" to "Sun"). '
'dddd : the long localized day name (e.g. "Monday" to "Sunday"). '
'M : the month as number without a leading zero (1 to 12). '
'MM : the month as number with a leading zero (01 to 12) '
'MMM : the abbreviated localized month name (e.g. "Jan" to "Dec"). '
'MMMM : the long localized month name (e.g. "January" to "December"). '
'yy : the year as two digit number (00 to 99). '
'yyyy : the year as four digit number. '
'h : the hours without a leading 0 (0 to 11 or 0 to 23, depending on am/pm) '
'hh : the hours with a leading 0 (00 to 11 or 00 to 23, depending on am/pm) '
'm : the minutes without a leading 0 (0 to 59) '
'mm : the minutes with a leading 0 (00 to 59) '
's : the seconds without a leading 0 (0 to 59) '
'ss : the seconds with a leading 0 (00 to 59) '
'ap : use a 12-hour clock instead of a 24-hour clock, with "ap" replaced by the localized string for am or pm '
'AP : use a 12-hour clock instead of a 24-hour clock, with "AP" replaced by the localized string for AM or PM '
'iso : the date with time and timezone. Must be the only format present '
'to_number: the date as a floating point number '
'from_number[:fmt]: format the timestamp using fmt if present otherwise iso')
def evaluate(self, formatter, kwargs, mi, locals, val, format_string):
if not val or val == 'None':
return ''
try:
if format_string == 'to_number':
s = parse_date(val).timestamp()
elif format_string.startswith('from_number'):
val = datetime.fromtimestamp(float(val))
f = format_string[12:]
s = format_date(val, f if f else 'iso')
else:
s = format_date(parse_date(val), format_string)
return s
except:
s = 'BAD DATE'
return s
class BuiltinFormatDateField(BuiltinFormatterFunction):
name = 'format_date_field'
arg_count = 2
category = 'Formatting values'
__doc__ = doc = _("format_date_field(field_name, format_string) -- format "
"the value in the field 'field_name', which must be the lookup name "
"of date field, either standard or custom. See 'format_date' for "
"the formatting codes. This function is much faster than format_date "
"and should be used when you are formatting the value in a field "
"(column). It can't be used for computed dates or dates in string "
"variables. Example: format_date_field('pubdate', 'yyyy.MM.dd')")
def evaluate(self, formatter, kwargs, mi, locals, field, format_string):
try:
field = field_metadata.search_term_to_field_key(field)
if field not in mi.all_field_keys():
raise ValueError(_("Function %s: Unknown field '%s'")%('format_date_field', field))
val = mi.get(field, None)
if mi.metadata_for_field(field)['datatype'] != 'datetime':
raise ValueError(_("Function %s: field '%s' is not a date")%('format_date_field', field))
if val is None:
s = ''
elif format_string == 'to_number':
s = val.timestamp()
elif format_string.startswith('from_number'):
val = datetime.fromtimestamp(float(val))
f = format_string[12:]
s = format_date(val, f if f else 'iso')
else:
s = format_date(val, format_string)
return s
except ValueError:
raise
except Exception:
traceback.print_exc()
raise
return s
class BuiltinUppercase(BuiltinFormatterFunction):
name = 'uppercase'
arg_count = 1
category = 'String case changes'
__doc__ = doc = _('uppercase(val) -- return val in upper case')
def evaluate(self, formatter, kwargs, mi, locals, val):
return val.upper()
class BuiltinLowercase(BuiltinFormatterFunction):
name = 'lowercase'
arg_count = 1
category = 'String case changes'
__doc__ = doc = _('lowercase(val) -- return val in lower case')
def evaluate(self, formatter, kwargs, mi, locals, val):
return val.lower()
class BuiltinTitlecase(BuiltinFormatterFunction):
name = 'titlecase'
arg_count = 1
category = 'String case changes'
__doc__ = doc = _('titlecase(val) -- return val in title case')
def evaluate(self, formatter, kwargs, mi, locals, val):
return titlecase(val)
class BuiltinCapitalize(BuiltinFormatterFunction):
name = 'capitalize'
arg_count = 1
category = 'String case changes'
__doc__ = doc = _('capitalize(val) -- return val capitalized')
def evaluate(self, formatter, kwargs, mi, locals, val):
return capitalize(val)
class BuiltinBooksize(BuiltinFormatterFunction):
name = 'booksize'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('booksize() -- return value of the size field. '
'This function works only in the GUI. If you want to use this value '
'in save-to-disk or send-to-device templates then you '
'must make a custom "Column built from other columns", use '
'the function in that column\'s template, and use that '
'column\'s value in your save/send templates')
def evaluate(self, formatter, kwargs, mi, locals):
if hasattr(mi, '_proxy_metadata'):
try:
v = mi._proxy_metadata.book_size
if v is not None:
return str(mi._proxy_metadata.book_size)
return ''
except:
pass
return ''
self.only_in_gui_error()
class BuiltinOndevice(BuiltinFormatterFunction):
name = 'ondevice'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('ondevice() -- return Yes if ondevice is set, otherwise return '
'the empty string. This function works only in the GUI. If you want to '
'use this value in save-to-disk or send-to-device templates then you '
'must make a custom "Column built from other columns", use '
'the function in that column\'s template, and use that '
'column\'s value in your save/send templates')
def evaluate(self, formatter, kwargs, mi, locals):
if hasattr(mi, '_proxy_metadata'):
if mi._proxy_metadata.ondevice_col:
return _('Yes')
return ''
self.only_in_gui_error()
class BuiltinAnnotationCount(BuiltinFormatterFunction):
name = 'annotation_count'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('annotation_count() -- return the total number of annotations '
'of all types attached to the current book. '
'This function works only in the GUI.')
def evaluate(self, formatter, kwargs, mi, locals):
c = self.get_database(mi).new_api.annotation_count_for_book(mi.id)
return '' if c == 0 else str(c)
class BuiltinIsMarked(BuiltinFormatterFunction):
name = 'is_marked'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _("is_marked() -- check whether the book is 'marked' in "
"calibre. If it is then return the value of the mark, "
"either 'true' or the comma-separated list of named "
"marks. Returns '' if the book is not marked.")
def evaluate(self, formatter, kwargs, mi, locals):
c = self.get_database(mi).data.get_marked(mi.id)
return c if c else ''
class BuiltinSeriesSort(BuiltinFormatterFunction):
name = 'series_sort'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('series_sort() -- return the series sort value')
def evaluate(self, formatter, kwargs, mi, locals):
if mi.series:
langs = mi.languages
lang = langs[0] if langs else None
return title_sort(mi.series, lang=lang)
return ''
class BuiltinHasCover(BuiltinFormatterFunction):
name = 'has_cover'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('has_cover() -- return Yes if the book has a cover, '
'otherwise return the empty string')
def evaluate(self, formatter, kwargs, mi, locals):
if mi.has_cover:
return _('Yes')
return ''
class BuiltinFirstNonEmpty(BuiltinFormatterFunction):
name = 'first_non_empty'
arg_count = -1
category = 'Iterating over values'
__doc__ = doc = _('first_non_empty(value [, value]*) -- '
'returns the first value that is not empty. If all values are '
'empty, then the empty string is returned. '
'You can have as many values as you want.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
i = 0
while i < len(args):
if args[i]:
return args[i]
i += 1
return ''
class BuiltinAnd(BuiltinFormatterFunction):
name = 'and'
arg_count = -1
category = 'Boolean'
__doc__ = doc = _('and(value [, value]*) -- '
'returns the string "1" if all values are not empty, otherwise '
'returns the empty string. This function works well with test or '
'first_non_empty. You can have as many values as you want. In many '
'cases the && operator can replace this function.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
i = 0
while i < len(args):
if not args[i]:
return ''
i += 1
return '1'
class BuiltinOr(BuiltinFormatterFunction):
name = 'or'
arg_count = -1
category = 'Boolean'
__doc__ = doc = _('or(value [, value]*) -- '
'returns the string "1" if any value is not empty, otherwise '
'returns the empty string. This function works well with test or '
'first_non_empty. You can have as many values as you want. In many '
'cases the || operator can replace this function.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
i = 0
while i < len(args):
if args[i]:
return '1'
i += 1
return ''
class BuiltinNot(BuiltinFormatterFunction):
name = 'not'
arg_count = 1
category = 'Boolean'
__doc__ = doc = _('not(value) -- '
'returns the string "1" if the value is empty, otherwise '
'returns the empty string. This function works well with test or '
'first_non_empty. In many cases the ! operator can replace this '
'function.')
def evaluate(self, formatter, kwargs, mi, locals, val):
return '' if val else '1'
class BuiltinListJoin(BuiltinFormatterFunction):
name = 'list_join'
arg_count = -1
category = 'List manipulation'
__doc__ = doc = _("list_join(with_separator, list1, separator1 [, list2, separator2]*) -- "
"return a list made by joining the items in the source lists "
"(list1, etc) using with_separator between the items in the "
"result list. Items in each source list[123...] are separated "
"by the associated separator[123...]. A list can contain "
"zero values. It can be a field like publisher that is "
"single-valued, effectively a one-item list. Duplicates "
"are removed using a case-insensitive comparison. Items are "
"returned in the order they appear in the source lists. "
"If items on lists differ only in letter case then the last "
"is used. All separators can be more than one character.\n"
"Example:") + "\n\n" + (
" program:\n"
" list_join('#@#', $authors, '&', $tags, ',')\n\n") + _(
"You can use list_join on the results of previous "
"calls to list_join as follows:") + "\n" + (
" program:\n\n"
" a = list_join('#@#', $authors, '&', $tags, ',');\n"
" b = list_join('#@#', a, '#@#', $#genre, ',', $#people, '&')\n\n") + _(
"You can use expressions to generate a list. For example, "
"assume you want items for authors and #genre, but "
"with the genre changed to the word 'Genre: ' followed by "
"the first letter of the genre, i.e. the genre 'Fiction' "
"becomes 'Genre: F'. The following will do that:") + "\n" + (
" program:\n"
" list_join('#@#', $authors, '&', list_re($#genre, ',', '^(.).*$', 'Genre: \\1'), ',')")
def evaluate(self, formatter, kwargs, mi, locals, with_separator, *args):
if len(args) % 2 != 0:
raise ValueError(
_("Invalid 'List, separator' pairs. Every list must have one "
"associated separator"))
# Starting in python 3.7 dicts preserve order so we don't need OrderedDict
result = dict()
i = 0
while i < len(args):
lst = [v.strip() for v in args[i].split(args[i+1]) if v.strip()]
result.update({item.lower():item for item in lst})
i += 2
return with_separator.join(result.values())
class BuiltinListUnion(BuiltinFormatterFunction):
name = 'list_union'
arg_count = 3
category = 'List manipulation'
__doc__ = doc = _('list_union(list1, list2, separator) -- '
'return a list made by merging the items in list1 and list2, '
'removing duplicate items using a case-insensitive comparison. If '
'items differ in case, the one in list1 is used. '
'The items in list1 and list2 are separated by separator, as are '
'the items in the returned list. Aliases: list_union(), merge_lists()')
aliases = ['merge_lists']
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
res = {icu_lower(l.strip()): l.strip() for l in list2.split(separator) if l.strip()}
res.update({icu_lower(l.strip()): l.strip() for l in list1.split(separator) if l.strip()})
if separator == ',':
separator = ', '
return separator.join(res.values())
class BuiltinRange(BuiltinFormatterFunction):
name = 'range'
arg_count = -1
category = 'List manipulation'
__doc__ = doc = _("range(start, stop, step, limit) -- "
"returns a list of numbers generated by looping over the "
"range specified by the parameters start, stop, and step, "
"with a maximum length of limit. The first value produced "
"is 'start'. Subsequent values next_v are "
"current_v+step. The loop continues while "
"next_v < stop assuming step is positive, otherwise "
"while next_v > stop. An empty list is produced if "
"start fails the test: start>=stop if step "
"is positive. The limit sets the maximum length of "
"the list and has a default of 1000. The parameters "
"start, step, and limit are optional. "
"Calling range() with one argument specifies stop. "
"Two arguments specify start and stop. Three arguments "
"specify start, stop, and step. Four "
"arguments specify start, stop, step and limit. "
"Examples: range(5) -> '0,1,2,3,4'. range(0,5) -> '0,1,2,3,4'. "
"range(-1,5) -> '-1,0,1,2,3,4'. range(1,5) -> '1,2,3,4'. "
"range(1,5,2) -> '1,3'. range(1,5,2,5) -> '1,3'. "
"range(1,5,2,1) -> error(limit exceeded).")
def evaluate(self, formatter, kwargs, mi, locals, *args):
limit_val = 1000
start_val = 0
step_val = 1
if len(args) == 1:
stop_val = int(args[0] if args[0] and args[0] != 'None' else 0)
elif len(args) == 2:
start_val = int(args[0] if args[0] and args[0] != 'None' else 0)
stop_val = int(args[1] if args[1] and args[1] != 'None' else 0)
elif len(args) >= 3:
start_val = int(args[0] if args[0] and args[0] != 'None' else 0)
stop_val = int(args[1] if args[1] and args[1] != 'None' else 0)
step_val = int(args[2] if args[2] and args[2] != 'None' else 0)
if len(args) > 3:
limit_val = int(args[3] if args[3] and args[3] != 'None' else 0)
r = range(start_val, stop_val, step_val)
if len(r) > limit_val:
raise ValueError(
_("{0}: length ({1}) longer than limit ({2})").format(
'range', len(r), str(limit_val)))
return ', '.join([str(v) for v in r])
class BuiltinListRemoveDuplicates(BuiltinFormatterFunction):
name = 'list_remove_duplicates'
arg_count = 2
category = 'List manipulation'
__doc__ = doc = _('list_remove_duplicates(list, separator) -- '
'return a list made by removing duplicate items in the source list. '
'If items differ only in case, the last of them is returned. '
'The items in source list are separated by separator, as are '
'the items in the returned list.')
def evaluate(self, formatter, kwargs, mi, locals, list_, separator):
res = {icu_lower(l.strip()): l.strip() for l in list_.split(separator) if l.strip()}
if separator == ',':
separator = ', '
return separator.join(res.values())
class BuiltinListDifference(BuiltinFormatterFunction):
name = 'list_difference'
arg_count = 3
category = 'List manipulation'
__doc__ = doc = _('list_difference(list1, list2, separator) -- '
'return a list made by removing from list1 any item found in list2, '
'using a case-insensitive comparison. The items in list1 and list2 '
'are separated by separator, as are the items in the returned list.')
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
l1 = [l.strip() for l in list1.split(separator) if l.strip()]
l2 = {icu_lower(l.strip()) for l in list2.split(separator) if l.strip()}
res = []
for i in l1:
if icu_lower(i) not in l2 and i not in res:
res.append(i)
if separator == ',':
return ', '.join(res)
return separator.join(res)
class BuiltinListIntersection(BuiltinFormatterFunction):
name = 'list_intersection'
arg_count = 3
category = 'List manipulation'
__doc__ = doc = _('list_intersection(list1, list2, separator) -- '
'return a list made by removing from list1 any item not found in list2, '
'using a case-insensitive comparison. The items in list1 and list2 '
'are separated by separator, as are the items in the returned list.')
def evaluate(self, formatter, kwargs, mi, locals, list1, list2, separator):
l1 = [l.strip() for l in list1.split(separator) if l.strip()]
l2 = {icu_lower(l.strip()) for l in list2.split(separator) if l.strip()}
res = []
for i in l1:
if icu_lower(i) in l2 and i not in res:
res.append(i)
if separator == ',':
return ', '.join(res)
return separator.join(res)
class BuiltinListSort(BuiltinFormatterFunction):
name = 'list_sort'
arg_count = 3
category = 'List manipulation'
__doc__ = doc = _('list_sort(list, direction, separator) -- '
'return list sorted using a case-insensitive sort. If direction is '
'zero, the list is sorted ascending, otherwise descending. The list items '
'are separated by separator, as are the items in the returned list.')
def evaluate(self, formatter, kwargs, mi, locals, list1, direction, separator):
res = [l.strip() for l in list1.split(separator) if l.strip()]
if separator == ',':
return ', '.join(sorted(res, key=sort_key, reverse=direction != "0"))
return separator.join(sorted(res, key=sort_key, reverse=direction != "0"))
class BuiltinListEquals(BuiltinFormatterFunction):
name = 'list_equals'
arg_count = 6
category = 'List manipulation'
__doc__ = doc = _('list_equals(list1, sep1, list2, sep2, yes_val, no_val) -- '
'return yes_val if list1 and list2 contain the same items, '
'otherwise return no_val. The items are determined by splitting '
'each list using the appropriate separator character (sep1 or '
'sep2). The order of items in the lists is not relevant. '
'The comparison is case insensitive.')
def evaluate(self, formatter, kwargs, mi, locals, list1, sep1, list2, sep2, yes_val, no_val):
s1 = {icu_lower(l.strip()) for l in list1.split(sep1) if l.strip()}
s2 = {icu_lower(l.strip()) for l in list2.split(sep2) if l.strip()}
if s1 == s2:
return yes_val
return no_val
class BuiltinListRe(BuiltinFormatterFunction):
name = 'list_re'
arg_count = 4
category = 'List manipulation'
__doc__ = doc = _('list_re(src_list, separator, include_re, opt_replace) -- '
'Construct a list by first separating src_list into items using '
'the separator character. For each item in the list, check if it '
'matches include_re. If it does, then add it to the list to be '
'returned. If opt_replace is not the empty string, then apply the '
'replacement before adding the item to the returned list.')
def evaluate(self, formatter, kwargs, mi, locals, src_list, separator, include_re, opt_replace):
l = [l.strip() for l in src_list.split(separator) if l.strip()]
res = []
for item in l:
if re.search(include_re, item, flags=re.I) is not None:
if opt_replace:
item = re.sub(include_re, opt_replace, item)
for i in [t.strip() for t in item.split(separator) if t.strip()]:
if i not in res:
res.append(i)
if separator == ',':
return ', '.join(res)
return separator.join(res)
class BuiltinListReGroup(BuiltinFormatterFunction):
name = 'list_re_group'
arg_count = -1
category = 'List manipulation'
__doc__ = doc = _('list_re_group(src_list, separator, include_re, search_re [, group_template]+) -- '
'Like list_re except replacements are not optional. It '
'uses re_group(list_item, search_re, group_template, ...) when '
'doing the replacements on the resulting list.')
def evaluate(self, formatter, kwargs, mi, locals, src_list, separator, include_re,
search_re, *args):
from calibre.utils.formatter import EvalFormatter
l = [l.strip() for l in src_list.split(separator) if l.strip()]
res = []
for item in l:
def repl(mo):
newval = ''
if mo and mo.lastindex:
for dex in range(0, mo.lastindex):
gv = mo.group(dex+1)
if gv is None:
continue
if len(args) > dex:
template = args[dex].replace('[[', '{').replace(']]', '}')
newval += EvalFormatter().safe_format(template, {'$': gv},
'EVAL', None, strip_results=False)
else:
newval += gv
return newval
if re.search(include_re, item, flags=re.I) is not None:
item = re.sub(search_re, repl, item, flags=re.I)
for i in [t.strip() for t in item.split(separator) if t.strip()]:
if i not in res:
res.append(i)
if separator == ',':
return ', '.join(res)
return separator.join(res)
class BuiltinToday(BuiltinFormatterFunction):
name = 'today'
arg_count = 0
category = 'Date functions'
__doc__ = doc = _('today() -- '
'return a date string for today. This value is designed for use in '
'format_date or days_between, but can be manipulated like any '
'other string. The date is in ISO format.')
def evaluate(self, formatter, kwargs, mi, locals):
return format_date(now(), 'iso')
class BuiltinDaysBetween(BuiltinFormatterFunction):
name = 'days_between'
arg_count = 2
category = 'Date functions'
__doc__ = doc = _('days_between(date1, date2) -- '
'return the number of days between date1 and date2. The number is '
'positive if date1 is greater than date2, otherwise negative. If '
'either date1 or date2 are not dates, the function returns the '
'empty string.')
def evaluate(self, formatter, kwargs, mi, locals, date1, date2):
try:
d1 = parse_date(date1)
if d1 == UNDEFINED_DATE:
return ''
d2 = parse_date(date2)
if d2 == UNDEFINED_DATE:
return ''
except:
return ''
i = d1 - d2
return '%.1f'%(i.days + (i.seconds/(24.0*60.0*60.0)))
class BuiltinDateArithmetic(BuiltinFormatterFunction):
name = 'date_arithmetic'
arg_count = -1
category = 'Date functions'
__doc__ = doc = _('date_arithmetic(date, calc_spec, fmt) -- '
"Calculate a new date from 'date' using 'calc_spec'. Return the "
"new date formatted according to optional 'fmt': if not supplied "
"then the result will be in iso format. The calc_spec is a string "
"formed by concatenating pairs of 'vW' (valueWhat) where 'v' is a "
"possibly-negative number and W is one of the following letters: "
"s: add 'v' seconds to 'date' "
"m: add 'v' minutes to 'date' "
"h: add 'v' hours to 'date' "
"d: add 'v' days to 'date' "
"w: add 'v' weeks to 'date' "
"y: add 'v' years to 'date', where a year is 365 days. "
"Example: '1s3d-1m' will add 1 second, add 3 days, and subtract 1 "
"minute from 'date'.")
calc_ops = {
's': lambda v: timedelta(seconds=v),
'm': lambda v: timedelta(minutes=v),
'h': lambda v: timedelta(hours=v),
'd': lambda v: timedelta(days=v),
'w': lambda v: timedelta(weeks=v),
'y': lambda v: timedelta(days=v * 365),
}
def evaluate(self, formatter, kwargs, mi, locals, date, calc_spec, fmt=None):
try:
d = parse_date(date)
if d == UNDEFINED_DATE:
return ''
while calc_spec:
mo = re.match(r'([-+\d]+)([smhdwy])', calc_spec)
if mo is None:
raise ValueError(
_("{0}: invalid calculation specifier '{1}'").format(
'date_arithmetic', calc_spec))
d += self.calc_ops[mo[2]](int(mo[1]))
calc_spec = calc_spec[len(mo[0]):]
return format_date(d, fmt if fmt else 'iso')
except ValueError as e:
raise e
except Exception as e:
traceback.print_exc()
raise ValueError(_("{0}: error: {1}").format('date_arithmetic', str(e)))
class BuiltinLanguageStrings(BuiltinFormatterFunction):
name = 'language_strings'
arg_count = 2
category = 'Get values from metadata'
__doc__ = doc = _('language_strings(lang_codes, localize) -- '
'return the strings for the language codes passed in lang_codes. '
'If localize is zero, return the strings in English. If '
'localize is not zero, return the strings in the language of '
'the current locale. Lang_codes is a comma-separated list.')
def evaluate(self, formatter, kwargs, mi, locals, lang_codes, localize):
retval = []
for c in [c.strip() for c in lang_codes.split(',') if c.strip()]:
try:
n = calibre_langcode_to_name(c, localize != '0')
if n:
retval.append(n)
except:
pass
return ', '.join(retval)
class BuiltinLanguageCodes(BuiltinFormatterFunction):
name = 'language_codes'
arg_count = 1
category = 'Get values from metadata'
__doc__ = doc = _('language_codes(lang_strings) -- '
'return the language codes for the strings passed in lang_strings. '
'The strings must be in the language of the current locale. '
'Lang_strings is a comma-separated list.')
def evaluate(self, formatter, kwargs, mi, locals, lang_strings):
retval = []
for c in [c.strip() for c in lang_strings.split(',') if c.strip()]:
try:
cv = canonicalize_lang(c)
if cv:
retval.append(canonicalize_lang(cv))
except:
pass
return ', '.join(retval)
class BuiltinCurrentLibraryName(BuiltinFormatterFunction):
name = 'current_library_name'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('current_library_name() -- '
'return the last name on the path to the current calibre library. '
'This function can be called in template program mode using the '
'template "{:\'current_library_name()\'}".')
def evaluate(self, formatter, kwargs, mi, locals):
from calibre.library import current_library_name
return current_library_name()
class BuiltinCurrentLibraryPath(BuiltinFormatterFunction):
name = 'current_library_path'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('current_library_path() -- '
'return the path to the current calibre library. This function can '
'be called in template program mode using the template '
'"{:\'current_library_path()\'}".')
def evaluate(self, formatter, kwargs, mi, locals):
from calibre.library import current_library_path
return current_library_path()
class BuiltinFinishFormatting(BuiltinFormatterFunction):
name = 'finish_formatting'
arg_count = 4
category = 'Formatting values'
__doc__ = doc = _('finish_formatting(val, fmt, prefix, suffix) -- apply the '
'format, prefix, and suffix to a value in the same way as '
'done in a template like `{series_index:05.2f| - |- }`. For '
'example, the following program produces the same output '
'as the above template: '
'program: finish_formatting(field("series_index"), "05.2f", " - ", " - ")')
def evaluate(self, formatter, kwargs, mi, locals_, val, fmt, prefix, suffix):
if not val:
return val
return prefix + formatter._do_format(val, fmt) + suffix
class BuiltinVirtualLibraries(BuiltinFormatterFunction):
name = 'virtual_libraries'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('virtual_libraries() -- return a comma-separated list of '
'Virtual libraries that contain this book. This function '
'works only in the GUI. If you want to use these values '
'in save-to-disk or send-to-device templates then you '
'must make a custom "Column built from other columns", use '
'the function in that column\'s template, and use that '
'column\'s value in your save/send templates')
def evaluate(self, formatter, kwargs, mi, locals_):
db = self.get_database(mi)
try:
a = db.data.get_virtual_libraries_for_books((mi.id,))
return ', '.join(a[mi.id])
except ValueError as v:
return str(v)
class BuiltinCurrentVirtualLibraryName(BuiltinFormatterFunction):
name = 'current_virtual_library_name'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('current_virtual_library_name() -- '
'return the name of the current virtual library if there is one, '
'otherwise the empty string. Library name case is preserved. '
'Example: "program: current_virtual_library_name()".')
def evaluate(self, formatter, kwargs, mi, locals):
return self.get_database(mi).data.get_base_restriction_name()
class BuiltinUserCategories(BuiltinFormatterFunction):
name = 'user_categories'
arg_count = 0
category = 'Get values from metadata'
__doc__ = doc = _('user_categories() -- return a comma-separated list of '
'the user categories that contain this book. This function '
'works only in the GUI. If you want to use these values '
'in save-to-disk or send-to-device templates then you '
'must make a custom "Column built from other columns", use '
'the function in that column\'s template, and use that '
'column\'s value in your save/send templates')
def evaluate(self, formatter, kwargs, mi, locals_):
if hasattr(mi, '_proxy_metadata'):
cats = {k for k, v in iteritems(mi._proxy_metadata.user_categories) if v}
cats = sorted(cats, key=sort_key)
return ', '.join(cats)
self.only_in_gui_error()
class BuiltinTransliterate(BuiltinFormatterFunction):
name = 'transliterate'
arg_count = 1
category = 'String manipulation'
__doc__ = doc = _('transliterate(a) -- Returns a string in a latin alphabet '
'formed by approximating the sound of the words in the '
'source string. For example, if the source is "{0}"'
' the function returns "{1}".').format(
"Фёдор Миха́йлович Достоевский", 'Fiodor Mikhailovich Dostoievskii')
def evaluate(self, formatter, kwargs, mi, locals, source):
from calibre.utils.filenames import ascii_text
return ascii_text(source)
class BuiltinGetLink(BuiltinFormatterFunction):
name = 'get_link'
arg_count = 2
category = 'Template database functions'
__doc__ = doc = _("get_link(field_name, field_value) -- fetch the link for "
"field 'field_name' with value 'field_value'. If there is "
"no attached link, return ''. Example: "
"get_link('tags', 'Fiction') returns the link attached to "
"the tag 'Fiction'.")
def evaluate(self, formatter, kwargs, mi, locals, field_name, field_value):
db = self.get_database(mi).new_api
try:
link = None
item_id = db.get_item_id(field_name, field_value, case_sensitive=True)
if item_id is not None:
link = db.link_for(field_name, item_id)
return link if link is not None else ''
except Exception as e:
traceback.print_exc()
raise ValueError(e)
class BuiltinAuthorLinks(BuiltinFormatterFunction):
name = 'author_links'
arg_count = 2
category = 'Get values from metadata'
__doc__ = doc = _('author_links(val_separator, pair_separator) -- returns '
'a string containing a list of authors and that author\'s '
'link values in the '
'form author1 val_separator author1link pair_separator '
'author2 val_separator author2link etc. An author is '
'separated from its link value by the val_separator string '
'with no added spaces. author:linkvalue pairs are separated '
'by the pair_separator string argument with no added spaces. '
'It is up to you to choose separator strings that do '
'not occur in author names or links. An author is '
'included even if the author link is empty.')
def evaluate(self, formatter, kwargs, mi, locals, val_sep, pair_sep):
if hasattr(mi, '_proxy_metadata'):
link_data = mi._proxy_metadata.link_maps
if not link_data:
return ''
link_data = link_data.get('authors')
if not link_data:
return ''
names = sorted(link_data.keys(), key=sort_key)
return pair_sep.join(n + val_sep + link_data[n] for n in names)
self.only_in_gui_error()
class BuiltinAuthorSorts(BuiltinFormatterFunction):
name = 'author_sorts'
arg_count = 1
category = 'Get values from metadata'
__doc__ = doc = _('author_sorts(val_separator) -- returns a string '
'containing a list of author\'s sort values for the '
'authors of the book. The sort is the one in the author '
'metadata (different from the author_sort in books). The '
'returned list has the form author sort 1 val_separator '
'author sort 2 etc. The author sort values in this list '
'are in the same order as the authors of the book. If '
'you want spaces around val_separator then include them '
'in the separator string')
def evaluate(self, formatter, kwargs, mi, locals, val_sep):
sort_data = mi.author_sort_map
if not sort_data:
return ''
names = [sort_data.get(n) for n in mi.authors if n.strip()]
return val_sep.join(n for n in names)
class BuiltinConnectedDeviceName(BuiltinFormatterFunction):
name = 'connected_device_name'
arg_count = 1
category = 'Get values from metadata'
__doc__ = doc = _("connected_device_name(storage_location) -- if a device is "
"connected then return the device name, otherwise return "
"the empty string. Each storage location on a device can "
"have a different name. The location names are 'main', "
"'carda' and 'cardb'. This function works only in the GUI.")
def evaluate(self, formatter, kwargs, mi, locals, storage_location):
# We can't use get_database() here because we need the device manager.
# In other words, the function really does need the GUI
with suppress(Exception):
# Do the import here so that we don't entangle the GUI when using
# command line functions
from calibre.gui2.ui import get_gui
info = get_gui().device_manager.get_current_device_information()
if info is None:
return ''
try:
if storage_location not in {'main', 'carda', 'cardb'}:
raise ValueError(
_('connected_device_name: invalid storage location "{}"'
.format(storage_location)))
info = info['info'][4]
if storage_location not in info:
return ''
return info[storage_location]['device_name']
except Exception:
traceback.print_exc()
raise
self.only_in_gui_error()
class BuiltinConnectedDeviceUUID(BuiltinFormatterFunction):
name = 'connected_device_uuid'
arg_count = 1
category = 'Get values from metadata'
__doc__ = doc = _("connected_device_uuid(storage_location) -- if a device is "
"connected then return the device uuid (unique id), "
"otherwise return the empty string. Each storage location "
"on a device has a different uuid. The location names are "
"'main', 'carda' and 'cardb'. This function works only in "
"the GUI.")
def evaluate(self, formatter, kwargs, mi, locals, storage_location):
# We can't use get_database() here because we need the device manager.
# In other words, the function really does need the GUI
with suppress(Exception):
# Do the import here so that we don't entangle the GUI when using
# command line functions
from calibre.gui2.ui import get_gui
info = get_gui().device_manager.get_current_device_information()
if info is None:
return ''
try:
if storage_location not in {'main', 'carda', 'cardb'}:
raise ValueError(
_('connected_device_name: invalid storage location "{}"'
.format(storage_location)))
info = info['info'][4]
if storage_location not in info:
return ''
return info[storage_location]['device_store_uuid']
except Exception:
traceback.print_exc()
raise
self.only_in_gui_error()
class BuiltinCheckYesNo(BuiltinFormatterFunction):
name = 'check_yes_no'
arg_count = 4
category = 'If-then-else'
__doc__ = doc = _('check_yes_no(field_name, is_undefined, is_false, is_true) '
'-- checks the value of the yes/no field named by the '
'lookup key field_name for a value specified by the '
'parameters, returning "yes" if a match is found, otherwise '
'returning an empty string. Set the parameter is_undefined, '
'is_false, or is_true to 1 (the number) to check that '
'condition, otherwise set it to 0. Example: '
'check_yes_no("#bool", 1, 0, 1) returns "yes" if the '
'yes/no field "#bool" is either undefined (neither True '
'nor False) or True. More than one of is_undefined, '
'is_false, or is_true can be set to 1. This function '
'is usually used by the test() or is_empty() functions.')
def evaluate(self, formatter, kwargs, mi, locals, field, is_undefined, is_false, is_true):
# 'field' is a lookup name, not a value
if field not in self.get_database(mi).field_metadata:
raise ValueError(_("The column {} doesn't exist").format(field))
res = getattr(mi, field, None)
if res is None:
if is_undefined == '1':
return 'Yes'
return ""
if not isinstance(res, bool):
raise ValueError(_('check_yes_no requires the field be a Yes/No custom column'))
if is_false == '1' and not res:
return 'Yes'
if is_true == '1' and res:
return 'Yes'
return ""
class BuiltinRatingToStars(BuiltinFormatterFunction):
name = 'rating_to_stars'
arg_count = 2
category = 'Formatting values'
__doc__ = doc = _('rating_to_stars(value, use_half_stars) '
'-- Returns the rating as string of star characters. '
'The value is a number between 0 and 5. Set use_half_stars '
'to 1 if you want half star characters for custom ratings '
'columns that support non-integer ratings, for example 2.5.')
def evaluate(self, formatter, kwargs, mi, locals, value, use_half_stars):
if not value:
return ''
err_msg = _('The rating must be a number between 0 and 5')
try:
v = float(value) * 2
except:
raise ValueError(err_msg)
if v < 0 or v > 10:
raise ValueError(err_msg)
from calibre.ebooks.metadata import rating_to_stars
return rating_to_stars(v, use_half_stars == '1')
class BuiltinSwapAroundArticles(BuiltinFormatterFunction):
name = 'swap_around_articles'
arg_count = 2
category = 'String manipulation'
__doc__ = doc = _('swap_around_articles(val, separator) '
'-- returns the val with articles moved to the end. '
'The value can be a list, in which case each member '
'of the list is processed. If the value is a list then '
'you must provide the list value separator. If no '
'separator is provided then the value is treated as '
'being a single value, not a list.')
def evaluate(self, formatter, kwargs, mi, locals, val, separator):
if not val:
return ''
if not separator:
return title_sort(val).replace(',', ';')
result = []
try:
for v in [x.strip() for x in val.split(separator)]:
result.append(title_sort(v).replace(',', ';'))
except:
traceback.print_exc()
return separator.join(sorted(result, key=sort_key))
class BuiltinArguments(BuiltinFormatterFunction):
name = 'arguments'
arg_count = -1
category = 'Other'
__doc__ = doc = _('arguments(id[=expression] [, id[=expression]]*) '
'-- Used in a stored template to retrieve the arguments '
'passed in the call. It both declares and initializes '
'local variables, effectively parameters. The variables '
'are positional; they get the value of the parameter given '
'in the call in the same position. If the corresponding '
'parameter is not provided in the call then arguments '
'assigns that variable the provided default value. If '
'there is no default value then the variable is set to '
'the empty string.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
# The arguments function is implemented in-line in the formatter
raise NotImplementedError()
class BuiltinGlobals(BuiltinFormatterFunction):
name = 'globals'
arg_count = -1
category = 'Other'
__doc__ = doc = _('globals(id[=expression] [, id[=expression]]*) '
'-- Retrieves "global variables" that can be passed into '
'the formatter. It both declares and initializes local '
'variables with the names of the global variables passed '
'in. If the corresponding variable is not provided in '
'the passed-in globals then it assigns that variable the '
'provided default value. If there is no default value '
'then the variable is set to the empty string.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
# The globals function is implemented in-line in the formatter
raise NotImplementedError()
class BuiltinSetGlobals(BuiltinFormatterFunction):
name = 'set_globals'
arg_count = -1
category = 'other'
__doc__ = doc = _('set_globals(id[=expression] [, id[=expression]]*) '
'-- Sets "global variables" that can be passed into '
'the formatter. The globals are given the name of the id '
'passed in. The value of the id is used unless an '
'expression is provided.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
# The globals function is implemented in-line in the formatter
raise NotImplementedError()
class BuiltinFieldExists(BuiltinFormatterFunction):
name = 'field_exists'
arg_count = 1
category = 'If-then-else'
__doc__ = doc = _('field_exists(field_name) -- checks if a field '
'(column) named field_name exists, returning '
"'1' if so and '' if not.")
def evaluate(self, formatter, kwargs, mi, locals, field_name):
if field_name.lower() in mi.all_field_keys():
return '1'
return ''
class BuiltinCharacter(BuiltinFormatterFunction):
name = 'character'
arg_count = 1
category = 'String manipulation'
__doc__ = doc = _('character(character_name) -- returns the '
'character named by character_name. For example, '
r"character('newline') returns a newline character ('\n'). "
"The supported character names are 'newline', 'return', "
"'tab', and 'backslash'.")
def evaluate(self, formatter, kwargs, mi, locals, character_name):
# The globals function is implemented in-line in the formatter
raise NotImplementedError()
class BuiltinToHex(BuiltinFormatterFunction):
name = 'to_hex'
arg_count = 1
category = 'String manipulation'
__doc__ = doc = _('to_hex(val) -- returns the string encoded in hex. '
'This is useful when constructing calibre URLs.')
def evaluate(self, formatter, kwargs, mi, locals, val):
return val.encode().hex()
class BuiltinUrlsFromIdentifiers(BuiltinFormatterFunction):
name = 'urls_from_identifiers'
arg_count = 2
category = 'Formatting values'
__doc__ = doc = _('urls_from_identifiers(identifiers, sort_results) -- given '
'a comma-separated list of identifiers, where an identifier '
'is a colon-separated pair of values (name:id_value), returns a '
'comma-separated list of HTML URLs generated from the '
'identifiers. The list not sorted if sort_results is 0 '
'(character or number), otherwise it is sorted alphabetically '
'by the identifier name. The URLs are generated in the same way '
'as the built-in identifiers column when shown in Book details.')
def evaluate(self, formatter, kwargs, mi, locals, identifiers, sort_results):
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
try:
v = {}
for id_ in identifiers.split(','):
if id_:
pair = id_.split(':', maxsplit=1)
if len(pair) == 2:
l = pair[0].strip()
r = pair[1].strip()
if l and r:
v[l] = r
urls = urls_from_identifiers(v, sort_results=str(sort_results) != '0')
p = prepare_string_for_xml
a = partial(prepare_string_for_xml, attribute=True)
links = [f'<a href="{a(url)}" title="{a(id_typ)}:{a(id_val)}">{p(name)}</a>'
for name, id_typ, id_val, url in urls]
return ', '.join(links)
except Exception as e:
return str(e)
class BuiltinBookCount(BuiltinFormatterFunction):
name = 'book_count'
arg_count = 2
category = 'Template database functions'
__doc__ = doc = _('book_count(query, use_vl) -- returns the count of '
'books found by searching for query. If use_vl is '
'0 (zero) then virtual libraries are ignored. This '
'function can be used only in the GUI.')
def evaluate(self, formatter, kwargs, mi, locals, query, use_vl):
from calibre.db.fields import rendering_composite_name
if (not tweaks.get('allow_template_database_functions_in_composites', False) and
formatter.global_vars.get(rendering_composite_name, None)):
raise ValueError(_('The book_count() function cannot be used in a composite column'))
db = self.get_database(mi)
try:
ids = db.search_getting_ids(query, None, use_virtual_library=use_vl != '0')
return len(ids)
except Exception:
traceback.print_exc()
class BuiltinBookValues(BuiltinFormatterFunction):
name = 'book_values'
arg_count = 4
category = 'Template database functions'
__doc__ = doc = _('book_values(column, query, sep, use_vl) -- returns a list '
'of the values contained in the column "column", separated '
'by "sep", in the books found by searching for "query". '
'If use_vl is 0 (zero) then virtual libraries are ignored. '
'This function can be used only in the GUI.')
def evaluate(self, formatter, kwargs, mi, locals, column, query, sep, use_vl):
from calibre.db.fields import rendering_composite_name
if (not tweaks.get('allow_template_database_functions_in_composites', False) and
formatter.global_vars.get(rendering_composite_name, None)):
raise ValueError(_('The book_values() function cannot be used in a composite column'))
db = self.get_database(mi)
if column not in db.field_metadata:
raise ValueError(_("The column {} doesn't exist").format(column))
try:
ids = db.search_getting_ids(query, None, use_virtual_library=use_vl != '0')
s = set()
for id_ in ids:
f = db.new_api.get_proxy_metadata(id_).get(column, None)
if isinstance(f, (tuple, list)):
s.update(f)
elif f is not None:
s.add(str(f))
return sep.join(s)
except Exception as e:
raise ValueError(e)
class BuiltinHasExtraFiles(BuiltinFormatterFunction):
name = 'has_extra_files'
arg_count = -1
category = 'Template database functions'
__doc__ = doc = _("has_extra_files([pattern]) -- returns the count of extra "
"files, otherwise '' (the empty string). "
"If the optional parameter 'pattern' (a regular expression) "
"is supplied then the list is filtered to files that match "
"pattern before the files are counted. The pattern match is "
"case insensitive. "
'This function can be used only in the GUI.')
def evaluate(self, formatter, kwargs, mi, locals, *args):
if len(args) > 1:
raise ValueError(_('Incorrect number of arguments for function {0}').format('has_extra_files'))
pattern = args[0] if len(args) == 1 else None
db = self.get_database(mi).new_api
try:
files = tuple(f.relpath.partition('/')[-1] for f in
db.list_extra_files(mi.id, use_cache=True, pattern=DATA_FILE_PATTERN))
if pattern:
r = re.compile(pattern, re.IGNORECASE)
files = tuple(filter(r.search, files))
return len(files) if len(files) > 0 else ''
except Exception as e:
traceback.print_exc()
raise ValueError(e)
class BuiltinExtraFileNames(BuiltinFormatterFunction):
name = 'extra_file_names'
arg_count = -1
category = 'Template database functions'
__doc__ = doc = _("extra_file_names(sep [, pattern]) -- returns a sep-separated "
"list of extra files in the book's '{}/' folder. If the "
"optional parameter 'pattern', a regular expression, is "
"supplied then the list is filtered to files that match pattern. "
"The pattern match is case insensitive. "
'This function can be used only in the GUI.').format(DATA_DIR_NAME)
def evaluate(self, formatter, kwargs, mi, locals, sep, *args):
if len(args) > 1:
raise ValueError(_('Incorrect number of arguments for function {0}').format('has_extra_files'))
pattern = args[0] if len(args) == 1 else None
db = self.get_database(mi).new_api
try:
files = tuple(f.relpath.partition('/')[-1] for f in
db.list_extra_files(mi.id, use_cache=True, pattern=DATA_FILE_PATTERN))
if pattern:
r = re.compile(pattern, re.IGNORECASE)
files = tuple(filter(r.search, files))
return sep.join(files)
except Exception as e:
traceback.print_exc()
raise ValueError(e)
class BuiltinExtraFileSize(BuiltinFormatterFunction):
name = 'extra_file_size'
arg_count = 1
category = 'Template database functions'
__doc__ = doc = _("extra_file_size(file_name) -- returns the size in bytes of "
"the extra file 'file_name' in the book's '{}/' folder if "
"it exists, otherwise -1."
'This function can be used only in the GUI.').format(DATA_DIR_NAME)
def evaluate(self, formatter, kwargs, mi, locals, file_name):
db = self.get_database(mi).new_api
try:
q = posixpath.join(DATA_DIR_NAME, file_name)
for f in db.list_extra_files(mi.id, use_cache=True, pattern=DATA_FILE_PATTERN):
if f.relpath == q:
return str(f.stat_result.st_size)
return str(-1)
except Exception as e:
traceback.print_exc()
raise ValueError(e)
class BuiltinExtraFileModtime(BuiltinFormatterFunction):
name = 'extra_file_modtime'
arg_count = 2
category = 'Template database functions'
__doc__ = doc = _("extra_file_modtime(file_name, format_string) -- returns the "
"modification time of the extra file 'file_name' in the "
"book's '{}/' folder if it exists, otherwise -1.0. The "
"modtime is formatted according to 'format_string' "
"(see format_date()). If 'format_string' is empty, returns "
"the modtime as the floating point number of seconds since "
"the epoch. The epoch is OS dependent. "
"This function can be used only in the GUI.").format(DATA_DIR_NAME)
def evaluate(self, formatter, kwargs, mi, locals, file_name, format_string):
db = self.get_database(mi).new_api
try:
q = posixpath.join(DATA_DIR_NAME, file_name)
for f in db.list_extra_files(mi.id, use_cache=True, pattern=DATA_FILE_PATTERN):
if f.relpath == q:
val = f.stat_result.st_mtime
if format_string:
return format_date(datetime.fromtimestamp(val), format_string)
return str(val)
return str(1.0)
except Exception as e:
traceback.print_exc()
raise ValueError(e)
class BuiltinGetNote(BuiltinFormatterFunction):
name = 'get_note'
arg_count = 3
category = 'Template database functions'
__doc__ = doc = _("get_note(field_name, field_value, plain_text) -- fetch the "
"note for field 'field_name' with value 'field_value'. If "
"'plain_text' is empty, return the note's HTML including "
"images. If 'plain_text' is 1 (or '1'), return the "
"note's plain text. If the note doesn't exist, return the "
"empty string in both cases. Example: "
"get_note('tags', 'Fiction', '') returns the HTML of the "
"note attached to the tag 'Fiction'.")
def evaluate(self, formatter, kwargs, mi, locals, field_name, field_value, plain_text):
db = self.get_database(mi).new_api
try:
note = None
item_id = db.get_item_id(field_name, field_value, case_sensitive=True)
if item_id is not None:
note = db.notes_data_for(field_name, item_id)
if note is not None:
if plain_text == '1':
note = note['searchable_text'].partition('\n')[2]
else:
# Return the full HTML of the note, including all images
# as data: URLs. Reason: non-exported note html contains
# "calres://" URLs for images. These images won't render
# outside the context of the library where the note
# "lives". For example, they don't work in book jackets
# and book details from a different library. They also
# don't work in tooltips.
# This code depends on the note being wrapped in <body>
# tags by parse_html. The body is changed to a <div>.
# That means we often end up with <div><div> or some
# such, but that is OK
root = parse_html(note['doc'])
# There should be only one <body>
root = root.xpath('//body')[0]
# Change the body to a div
root.tag = 'div'
# Expand all the resources in the note
root = expand_note_resources(root, db.get_notes_resource)
note = html.tostring(root, encoding='unicode')
return '' if note is None else note
except Exception as e:
traceback.print_exc()
raise ValueError(e)
class BuiltinHasNote(BuiltinFormatterFunction):
name = 'has_note'
arg_count = 2
category = 'Template database functions'
__doc__ = doc = _("has_note(field_name, field_value) -- return '1' "
"if the value 'field_value' in the field 'field_name' "
"has an attached note, '' otherwise. Example: "
"has_note('tags', 'Fiction') returns '1' if the tag "
"'fiction' has an attached note, '' otherwise.")
def evaluate(self, formatter, kwargs, mi, locals, field_name, field_value):
db = self.get_database(mi).new_api
note = None
try:
item_id = db.get_item_id(field_name, field_value, case_sensitive=True)
if item_id is not None:
note = db.notes_data_for(field_name, item_id)
except Exception as e:
traceback.print_exc()
raise ValueError(e)
return '1' if note is not None else ''
class BuiltinIsDarkMode(BuiltinFormatterFunction):
name = 'is_dark_mode'
arg_count = 0
category = 'other'
__doc__ = doc = _("is_dark_mode() -- Returns '1' if calibre is running "
"in dark mode, '' (the empty string) otherwise. This "
"function can be used in advanced color and icon rules "
"to choose different colors/icons according to the mode. "
"Example: {} ").format("if is_dark_mode() then 'dark.png' else 'light.png' fi")
def evaluate(self, formatter, kwargs, mi, locals):
try:
# Import this here so that Qt isn't referenced unless this function is used.
from calibre.gui2 import is_dark_theme
return '1' if is_dark_theme() else ''
except Exception:
only_in_gui_error('is_dark_mode')
class BuiltinFieldListCount(BuiltinFormatterFunction):
name = 'list_count_field'
arg_count = 0
category = 'List manipulation'
__doc__ = doc = _("list_count_field(field_name) -- returns the count of items "
"in the field with the lookup name 'field_name'. The field "
"must be multi-valued such as authors or tags, otherwise "
"the function raises an error. This function is much faster "
"than list_count() because it operates directly on calibre "
"data without converting it to a string first. "
"Example: {}").format("list_count_field('tags')")
def evaluate(self, formatter, kwargs, mi, locals, *args):
# The globals function is implemented in-line in the formatter
raise NotImplementedError()
_formatter_builtins = [
BuiltinAdd(), BuiltinAnd(), BuiltinApproximateFormats(), BuiltinArguments(),
BuiltinAssign(),
BuiltinAuthorLinks(), BuiltinAuthorSorts(), BuiltinBookCount(),
BuiltinBookValues(), BuiltinBooksize(),
BuiltinCapitalize(), BuiltinCharacter(), BuiltinCheckYesNo(), BuiltinCeiling(),
BuiltinCmp(), BuiltinConnectedDeviceName(), BuiltinConnectedDeviceUUID(), BuiltinContains(),
BuiltinCount(), BuiltinCurrentLibraryName(), BuiltinCurrentLibraryPath(),
BuiltinCurrentVirtualLibraryName(), BuiltinDateArithmetic(),
BuiltinDaysBetween(), BuiltinDivide(), BuiltinEval(),
BuiltinExtraFileNames(), BuiltinExtraFileSize(), BuiltinExtraFileModtime(),
BuiltinFieldListCount(), BuiltinFirstNonEmpty(), BuiltinField(), BuiltinFieldExists(),
BuiltinFinishFormatting(), BuiltinFirstMatchingCmp(), BuiltinFloor(),
BuiltinFormatDate(), BuiltinFormatDateField(), BuiltinFormatNumber(), BuiltinFormatsModtimes(),
BuiltinFormatsPaths(), BuiltinFormatsSizes(), BuiltinFractionalPart(),
BuiltinGetLink(),
BuiltinGetNote(), BuiltinGlobals(), BuiltinHasCover(), BuiltinHasExtraFiles(),
BuiltinHasNote(), BuiltinHumanReadable(), BuiltinIdentifierInList(),
BuiltinIfempty(), BuiltinIsDarkMode(), BuiltinLanguageCodes(), BuiltinLanguageStrings(),
BuiltinInList(), BuiltinIsMarked(), BuiltinListCountMatching(),
BuiltinListDifference(), BuiltinListEquals(), BuiltinListIntersection(),
BuiltinListitem(), BuiltinListJoin(), BuiltinListRe(),
BuiltinListReGroup(), BuiltinListRemoveDuplicates(), BuiltinListSort(),
BuiltinListSplit(), BuiltinListUnion(),BuiltinLookup(),
BuiltinLowercase(), BuiltinMod(), BuiltinMultiply(), BuiltinNot(), BuiltinOndevice(),
BuiltinOr(), BuiltinPrint(), BuiltinRatingToStars(), BuiltinRange(),
BuiltinRawField(), BuiltinRawList(),
BuiltinRe(), BuiltinReGroup(), BuiltinRound(), BuiltinSelect(), BuiltinSeriesSort(),
BuiltinSetGlobals(), BuiltinShorten(), BuiltinStrcat(), BuiltinStrcatMax(),
BuiltinStrcmp(), BuiltinStrcmpcase(), BuiltinStrInList(), BuiltinStrlen(), BuiltinSubitems(),
BuiltinSublist(),BuiltinSubstr(), BuiltinSubtract(), BuiltinSwapAroundArticles(),
BuiltinSwapAroundComma(), BuiltinSwitch(), BuiltinSwitchIf(),
BuiltinTemplate(), BuiltinTest(), BuiltinTitlecase(), BuiltinToday(),
BuiltinToHex(), BuiltinTransliterate(), BuiltinUppercase(), BuiltinUrlsFromIdentifiers(),
BuiltinUserCategories(), BuiltinVirtualLibraries(), BuiltinAnnotationCount()
]
class FormatterUserFunction(FormatterFunction):
def __init__(self, name, doc, arg_count, program_text, object_type):
self.object_type = object_type
self.name = name
self.doc = doc
self.arg_count = arg_count
self.program_text = program_text
self.cached_compiled_text = None
# Keep this for external code compatibility. Set it to True if we have a
# python template function, otherwise false. This might break something
# if the code depends on stored templates being in GPM.
self.is_python = True if object_type is StoredObjectType.PythonFunction else False
def to_pref(self):
return [self.name, self.doc, self.arg_count, self.program_text]
tabs = re.compile(r'^\t*')
def function_object_type(thing):
# 'thing' can be a preference instance, program text, or an already-compiled function
if isinstance(thing, FormatterUserFunction):
return thing.object_type
if isinstance(thing, list):
text = thing[3]
else:
text = thing
if text.startswith('def'):
return StoredObjectType.PythonFunction
if text.startswith('program'):
return StoredObjectType.StoredGPMTemplate
if text.startswith('python'):
return StoredObjectType.StoredPythonTemplate
raise ValueError('Unknown program type in formatter function pref')
def function_pref_name(pref):
return pref[0]
def compile_user_function(name, doc, arg_count, eval_func):
typ = function_object_type(eval_func)
if typ is not StoredObjectType.PythonFunction:
return FormatterUserFunction(name, doc, arg_count, eval_func, typ)
def replace_func(mo):
return mo.group().replace('\t', ' ')
func = ' ' + '\n '.join([tabs.sub(replace_func, line)
for line in eval_func.splitlines()])
prog = '''
from calibre.utils.formatter_functions import FormatterUserFunction
from calibre.utils.formatter_functions import formatter_functions
class UserFunction(FormatterUserFunction):
''' + func
locals_ = {}
if DEBUG and tweaks.get('enable_template_debug_printing', False):
print(prog)
exec(prog, locals_)
cls = locals_['UserFunction'](name, doc, arg_count, eval_func, typ)
return cls
def compile_user_template_functions(funcs):
compiled_funcs = {}
for func in funcs:
try:
# Force a name conflict to test the logic
# if func[0] == 'myFunc2':
# func[0] = 'myFunc3'
# Compile the function so that the tab processing is done on the
# source. This helps ensure that if the function already is defined
# then white space differences don't cause them to compare differently
cls = compile_user_function(*func)
cls.object_type = function_object_type(func)
compiled_funcs[cls.name] = cls
except Exception:
try:
func_name = func[0]
except Exception:
func_name = 'Unknown'
prints('**** Compilation errors in user template function "%s" ****' % func_name)
traceback.print_exc(limit=10)
prints('**** End compilation errors in %s "****"' % func_name)
return compiled_funcs
def load_user_template_functions(library_uuid, funcs, precompiled_user_functions=None):
unload_user_template_functions(library_uuid)
if precompiled_user_functions:
compiled_funcs = precompiled_user_functions
else:
compiled_funcs = compile_user_template_functions(funcs)
formatter_functions().register_functions(library_uuid, list(compiled_funcs.values()))
def unload_user_template_functions(library_uuid):
formatter_functions().unregister_functions(library_uuid)
| 121,472 | Python | .py | 2,410 | 39.219917 | 125 | 0.585434 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,148 | run_tests.py | kovidgoyal_calibre/src/calibre/utils/run_tests.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import functools
import importlib
import importlib.resources
import os
import unittest
from calibre.utils.monotonic import monotonic
is_ci = os.environ.get('CI', '').lower() == 'true'
def no_endl(f):
@functools.wraps(f)
def func(*args, **kwargs):
self = f.__self__
orig = self.stream.writeln
self.stream.writeln = self.stream.write
try:
return f(*args, **kwargs)
finally:
self.stream.writeln = orig
return func
class TestResult(unittest.TextTestResult):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_time = {}
for x in ('Success', 'Error', 'Failure', 'Skip', 'ExpectedFailure', 'UnexpectedSuccess'):
x = 'add' + x
setattr(self, x, no_endl(getattr(self, x)))
self.times = {}
def startTest(self, test):
self.start_time[test] = monotonic()
return super().startTest(test)
def stopTest(self, test):
orig = self.stream.writeln
self.stream.writeln = self.stream.write
super().stopTest(test)
elapsed = monotonic()
elapsed -= self.start_time.get(test, elapsed)
self.times[test] = elapsed
self.stream.writeln = orig
self.stream.writeln(' [%.1f s]' % elapsed)
def stopTestRun(self):
super().stopTestRun()
if self.wasSuccessful():
tests = sorted(self.times, key=self.times.get, reverse=True)
slowest = [f'{t.id()} [{self.times[t]:.1f} s]' for t in tests[:3]]
if len(slowest) > 1:
self.stream.writeln('\nSlowest tests: %s' % ' '.join(slowest))
def find_tests_in_package(package, excludes=('main.py',)):
items = [path.name for path in importlib.resources.files(package).iterdir()]
suits = []
excludes = set(excludes) | {x + 'c' for x in excludes}
seen = set()
for x in items:
if (x.endswith('.py') or x.endswith('.pyc')) and x not in excludes:
q = x.rpartition('.')[0]
if q in seen:
continue
seen.add(q)
m = importlib.import_module(package + '.' + x.partition('.')[0])
suits.append(unittest.defaultTestLoader.loadTestsFromModule(m))
return unittest.TestSuite(suits)
def itertests(suite):
stack = [suite]
while stack:
suite = stack.pop()
for test in suite:
if isinstance(test, unittest.TestSuite):
stack.append(test)
continue
if test.__class__.__name__ == 'ModuleImportFailure':
raise Exception('Failed to import a test module: %s' % test)
yield test
def init_env():
from calibre.ebooks.metadata.book.base import reset_field_metadata
from calibre.ebooks.oeb.polish.utils import setup_css_parser_serialization
from calibre.utils.config_base import reset_tweaks_to_default
reset_tweaks_to_default()
reset_field_metadata()
setup_css_parser_serialization()
def filter_tests(suite, test_ok):
ans = unittest.TestSuite()
added = set()
for test in itertests(suite):
if test_ok(test) and test not in added:
ans.addTest(test)
added.add(test)
return ans
def filter_tests_by_name(suite, *names):
names = {x if x.startswith('test_') else 'test_' + x for x in names}
def q(test):
return test._testMethodName in names
return filter_tests(suite, q)
def remove_tests_by_name(suite, *names):
names = {x if x.startswith('test_') else 'test_' + x for x in names}
def q(test):
return test._testMethodName not in names
return filter_tests(suite, q)
def filter_tests_by_module(suite, *names):
names = frozenset(names)
def q(test):
m = test.__class__.__module__.rpartition('.')[-1]
return m in names
return filter_tests(suite, q)
def run_tests(find_tests, verbosity=4):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'name', nargs='?', default=None,
help='The name of the test to run, for example: writing.WritingTest.many_many_basic or .many_many_basic for a shortcut')
args = parser.parse_args()
tests = find_tests()
if args.name:
if args.name.startswith('.'):
tests = filter_tests_by_name(tests, args.name[1:])
else:
tests = filter_tests_by_module(tests, args.name)
if not tests._tests:
raise SystemExit('No test named %s found' % args.name)
run_cli(tests, verbosity, buffer=not args.name)
class TestImports(unittest.TestCase):
def base_check(self, base, exclude_packages, exclude_modules):
import importlib
import_base = os.path.dirname(base)
count = 0
for root, dirs, files in os.walk(base):
for d in tuple(dirs):
if not os.path.isfile(os.path.join(root, d, '__init__.py')):
dirs.remove(d)
for fname in files:
module_name, ext = os.path.splitext(fname)
if ext != '.py':
continue
path = os.path.join(root, module_name)
relpath = os.path.relpath(path, import_base).replace(os.sep, '/')
full_module_name = '.'.join(relpath.split('/'))
if full_module_name.endswith('.__init__'):
full_module_name = full_module_name.rpartition('.')[0]
if full_module_name in exclude_modules or ('.' in full_module_name and full_module_name.rpartition('.')[0] in exclude_packages):
continue
importlib.import_module(full_module_name)
count += 1
return count
def test_import_of_all_python_modules(self):
from calibre.constants import isbsd, islinux, ismacos, iswindows
exclude_packages = {'calibre.devices.mtp.unix.upstream'}
exclude_modules = set()
if not iswindows:
exclude_modules |= {'calibre.utils.iphlpapi', 'calibre.utils.open_with.windows', 'calibre.devices.winusb'}
exclude_packages |= {'calibre.utils.winreg', 'calibre.utils.windows'}
if not ismacos:
exclude_modules.add('calibre.utils.open_with.osx')
if not islinux:
exclude_modules |= {
'calibre.linux', 'calibre.gui2.tts.speechd',
'calibre.utils.linux_trash', 'calibre.utils.open_with.linux',
'calibre.gui2.linux_file_dialogs',
}
if 'SKIP_SPEECH_TESTS' in os.environ:
exclude_packages.add('calibre.gui2.tts')
if not isbsd:
exclude_modules.add('calibre.devices.usbms.hal')
d = os.path.dirname
SRC = d(d(d(os.path.abspath(__file__))))
self.assertGreater(self.base_check(os.path.join(SRC, 'odf'), exclude_packages, exclude_modules), 10)
base = os.path.join(SRC, 'calibre')
self.assertGreater(self.base_check(base, exclude_packages, exclude_modules), 1000)
import calibre.web.feeds.feedparser as f
del f
from calibre.ebooks.markdown import Markdown
del Markdown
def find_tests(which_tests=None, exclude_tests=None):
from calibre.constants import iswindows
ans = []
a = ans.append
def ok(x):
return (not which_tests or x in which_tests) and (not exclude_tests or x not in exclude_tests)
if ok('build'):
from calibre.test_build import find_tests
a(find_tests(only_build=True))
if ok('srv'):
from calibre.srv.tests.main import find_tests
a(find_tests())
if ok('db'):
from calibre.db.tests.main import find_tests
a(find_tests())
if ok('polish'):
from calibre.ebooks.oeb.polish.tests.main import find_tests
a(find_tests())
if ok('opf'):
from calibre.ebooks.metadata.opf2 import suite
a(suite())
from calibre.ebooks.metadata.opf3_test import suite
a(suite())
if ok('css'):
from tinycss.tests.main import find_tests
a(find_tests())
from calibre.ebooks.oeb.normalize_css import test_normalization
a(test_normalization(return_tests=True))
from calibre.ebooks.css_transform_rules import test
a(test(return_tests=True))
from calibre.ebooks.html_transform_rules import test
a(test(return_tests=True))
from css_selectors.tests import find_tests
a(find_tests())
if ok('docx'):
from calibre.ebooks.docx.fields import test_parse_fields
a(test_parse_fields(return_tests=True))
from calibre.ebooks.docx.writer.utils import test_convert_color
a(test_convert_color(return_tests=True))
if ok('cfi'):
from calibre.ebooks.epub.cfi.tests import find_tests
a(find_tests())
if ok('matcher'):
from calibre.utils.matcher import test
a(test(return_tests=True))
if ok('scraper'):
from calibre.scraper.test_fetch_backend import find_tests
a(find_tests())
if ok('icu'):
from calibre.utils.icu_test import find_tests
a(find_tests())
if ok('smartypants'):
from calibre.utils.smartypants import run_tests
a(run_tests(return_tests=True))
if ok('ebooks'):
from calibre.ebooks.metadata.rtf import find_tests
a(find_tests())
from calibre.ebooks.metadata.html import find_tests
a(find_tests())
from calibre.utils.xml_parse import find_tests
a(find_tests())
from calibre.gui2.viewer.annotations import find_tests
a(find_tests())
from calibre.ebooks.html_entities import find_tests
a(find_tests())
from calibre.spell.dictionary import find_tests
a(find_tests())
if ok('misc'):
from calibre.ebooks.html.input import find_tests
a(find_tests())
from calibre.ebooks.metadata.test_author_sort import find_tests
a(find_tests())
from calibre.ebooks.metadata.tag_mapper import find_tests
a(find_tests())
from calibre.ebooks.metadata.author_mapper import find_tests
a(find_tests())
from calibre.utils.shared_file import find_tests
a(find_tests())
from calibre.utils.test_lock import find_tests
a(find_tests())
from calibre.utils.search_query_parser_test import find_tests
a(find_tests())
from calibre.utils.html2text import find_tests
a(find_tests())
from calibre.utils.shm import find_tests
a(find_tests())
from calibre.library.comments import find_tests
a(find_tests())
from calibre.ebooks.compression.palmdoc import find_tests
a(find_tests())
from calibre.gui2.viewer.convert_book import find_tests
a(find_tests())
from calibre.utils.hyphenation.test_hyphenation import find_tests
a(find_tests())
from calibre.live import find_tests
a(find_tests())
from calibre.utils.copy_files_test import find_tests
a(find_tests())
if iswindows:
from calibre.utils.windows.wintest import find_tests
a(find_tests())
a(unittest.defaultTestLoader.loadTestsFromTestCase(TestImports))
if ok('dbcli'):
from calibre.db.cli.tests import find_tests
a(find_tests())
tests = unittest.TestSuite(ans)
return tests
def run_test(test_name, verbosity=4, buffer=False):
# calibre-debug -t test_name
which_tests = None
if test_name.startswith('@'):
which_tests = test_name[1:],
tests = find_tests(which_tests)
if test_name != 'all':
if test_name.startswith('.'):
tests = filter_tests_by_module(tests, test_name[1:])
elif test_name.startswith('@'):
pass
else:
tests = filter_tests_by_name(tests, test_name)
if not tests._tests:
raise SystemExit(f'No test named {test_name} found')
run_cli(tests, verbosity, buffer=buffer)
def run_cli(suite, verbosity=4, buffer=True):
r = unittest.TextTestRunner
r.resultclass = unittest.TextTestResult if verbosity < 2 else TestResult
init_env()
result = r(verbosity=verbosity, buffer=buffer and not is_ci).run(suite)
rc = 0 if result.wasSuccessful() else 1
if is_ci:
# for some reason interpreter shutdown hangs probably some non-daemonic
# thread
os._exit(rc)
else:
raise SystemExit(rc)
| 12,614 | Python | .py | 305 | 32.760656 | 144 | 0.627446 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,149 | file_type_icons.py | kovidgoyal_calibre/src/calibre/utils/file_type_icons.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
EXT_MAP = {
'default' : 'unknown',
'dir' : 'dir',
'zero' : 'zero',
'jpeg' : 'jpeg',
'jpg' : 'jpeg',
'gif' : 'gif',
'png' : 'png',
'bmp' : 'bmp',
'cbz' : 'cbz',
'cbr' : 'cbr',
'svg' : 'svg',
'html' : 'html',
'htmlz' : 'html',
'htm' : 'html',
'xhtml' : 'html',
'xhtm' : 'html',
'lit' : 'lit',
'lrf' : 'lrf',
'lrx' : 'lrx',
'pdf' : 'pdf',
'pdr' : 'zero',
'rar' : 'rar',
'zip' : 'zip',
'txt' : 'txt',
'text' : 'txt',
'prc' : 'mobi',
'azw' : 'mobi',
'mobi' : 'mobi',
'pobi' : 'mobi',
'mbp' : 'zero',
'azw1' : 'tpz',
'azw2' : 'azw2',
'azw3' : 'azw3',
'azw4' : 'pdf',
'tpz' : 'tpz',
'tan' : 'zero',
'epub' : 'epub',
'fb2' : 'fb2',
'rtf' : 'rtf',
'odt' : 'odt',
'snb' : 'snb',
'djv' : 'djvu',
'djvu' : 'djvu',
'xps' : 'xps',
'oxps' : 'xps',
'docx' : 'docx',
'opml' : 'opml',
}
| 1,221 | Python | .py | 51 | 19.137255 | 71 | 0.350515 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,150 | copy_files.py | kovidgoyal_calibre/src/calibre/utils/copy_files.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2023, Kovid Goyal <kovid at kovidgoyal.net>
import os
import shutil
import stat
import time
from collections import defaultdict
from contextlib import suppress
from typing import Callable, Dict, List, Set, Tuple, Union
from calibre.constants import filesystem_encoding, iswindows
from calibre.utils.filenames import make_long_path_useable, samefile, windows_hardlink
if iswindows:
from calibre_extensions import winutil
WINDOWS_SLEEP_FOR_RETRY_TIME = 2 # seconds
WindowsFileId = Tuple[int, int, int]
class UnixFileCopier:
def __init__(self, delete_all=False, allow_move=False):
self.delete_all = delete_all
self.allow_move = allow_move
self.copy_map: Dict[str, str] = {}
def register(self, path: str, dest: str) -> None:
self.copy_map[path] = dest
def register_folder(self, path: str) -> None:
pass
def __enter__(self) -> None:
pass
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
if self.delete_all and exc_val is None:
self.delete_all_source_files()
def rename_all(self) -> None:
for src_path, dest_path in self.copy_map.items():
os.replace(src_path, dest_path)
def copy_all(self) -> None:
for src_path, dest_path in self.copy_map.items():
with suppress(OSError):
os.link(src_path, dest_path, follow_symlinks=False)
try:
shutil.copystat(src_path, dest_path, follow_symlinks=False)
except OSError:
# Failure to copy metadata is not critical
import traceback
traceback.print_exc()
continue
with suppress(shutil.SameFileError):
shutil.copy2(src_path, dest_path, follow_symlinks=False)
def delete_all_source_files(self) -> None:
for src_path in self.copy_map:
with suppress(FileNotFoundError):
os.unlink(src_path)
def windows_lock_path_and_callback(path: str, f: Callable) -> None:
is_folder = os.path.isdir(path)
flags = winutil.FILE_FLAG_BACKUP_SEMANTICS if is_folder else winutil.FILE_FLAG_SEQUENTIAL_SCAN
h = winutil.create_file(make_long_path_useable(path), winutil.GENERIC_READ, 0, winutil.OPEN_EXISTING, flags)
try:
f()
finally:
h.close()
class WindowsFileCopier:
'''
Locks all files before starting the copy, ensuring other processes cannot interfere
'''
def __init__(self, delete_all=False, allow_move=False):
self.delete_all = delete_all
self.allow_move = allow_move
self.path_to_fileid_map : Dict[str, WindowsFileId] = {}
self.fileid_to_paths_map: Dict[WindowsFileId, Set[str]] = defaultdict(set)
self.path_to_handle_map: Dict[str, 'winutil.Handle'] = {}
self.folder_to_handle_map: Dict[str, 'winutil.Handle'] = {}
self.folders: List[str] = []
self.copy_map: Dict[str, str] = {}
def register(self, path: str, dest: str) -> None:
with suppress(OSError):
# Ensure the file is not read-only
winutil.set_file_attributes(make_long_path_useable(path), winutil.FILE_ATTRIBUTE_NORMAL)
self.path_to_fileid_map[path] = winutil.get_file_id(make_long_path_useable(path))
self.copy_map[path] = dest
def register_folder(self, path: str) -> None:
with suppress(OSError):
# Ensure the folder is not read-only
winutil.set_file_attributes(make_long_path_useable(path), winutil.FILE_ATTRIBUTE_NORMAL)
self.path_to_fileid_map[path] = winutil.get_file_id(make_long_path_useable(path))
self.folders.append(path)
def _open_file(self, path: str, retry_on_sharing_violation: bool = True, is_folder: bool = False) -> 'winutil.Handle':
flags = winutil.FILE_FLAG_BACKUP_SEMANTICS if is_folder else winutil.FILE_FLAG_SEQUENTIAL_SCAN
access_flags = winutil.GENERIC_READ
if self.delete_all:
access_flags |= winutil.DELETE
share_flags = winutil.FILE_SHARE_DELETE if self.allow_move else 0
try:
return winutil.create_file(make_long_path_useable(path), access_flags, share_flags, winutil.OPEN_EXISTING, flags)
except OSError as e:
if e.winerror == winutil.ERROR_SHARING_VIOLATION:
# The file could be a hardlink to an already opened file,
# in which case we use the same handle for both files
fileid = self.path_to_fileid_map[path]
for other in self.fileid_to_paths_map[fileid]:
if other in self.path_to_handle_map:
return self.path_to_handle_map[other]
if retry_on_sharing_violation:
time.sleep(WINDOWS_SLEEP_FOR_RETRY_TIME)
return self._open_file(path, False, is_folder)
raise
def open_all_handles(self) -> None:
for path, file_id in self.path_to_fileid_map.items():
self.fileid_to_paths_map[file_id].add(path)
for src in self.copy_map:
self.path_to_handle_map[src] = self._open_file(src)
for path in self.folders:
self.folder_to_handle_map[path] = self._open_file(path, is_folder=True)
def __enter__(self) -> None:
try:
self.open_all_handles()
except OSError:
self.close_all_handles()
raise
def close_all_handles(self, delete_on_close: bool = False) -> None:
while self.path_to_handle_map:
path, h = next(iter(self.path_to_handle_map.items()))
if delete_on_close:
winutil.set_file_handle_delete_on_close(h, True)
h.close()
self.path_to_handle_map.pop(path)
while self.folder_to_handle_map:
path, h = next(reversed(self.folder_to_handle_map.items()))
if delete_on_close:
try:
winutil.set_file_handle_delete_on_close(h, True)
except OSError as err:
# Ignore dir not empty errors. Should never happen but we
# ignore it as the UNIX semantics are to not delete folders
# during __exit__ anyway and we dont want to leak the handle.
if err.winerror != winutil.ERROR_DIR_NOT_EMPTY:
raise
h.close()
self.folder_to_handle_map.pop(path)
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.close_all_handles(delete_on_close=self.delete_all and exc_val is None)
def copy_all(self) -> None:
for src_path, dest_path in self.copy_map.items():
with suppress(Exception):
windows_hardlink(make_long_path_useable(src_path), make_long_path_useable(dest_path))
shutil.copystat(make_long_path_useable(src_path), make_long_path_useable(dest_path), follow_symlinks=False)
continue
handle = self.path_to_handle_map[src_path]
winutil.set_file_pointer(handle, 0, winutil.FILE_BEGIN)
with open(make_long_path_useable(dest_path), 'wb') as f:
sz = 1024 * 1024
while True:
raw = winutil.read_file(handle, sz)
if not raw:
break
f.write(raw)
shutil.copystat(make_long_path_useable(src_path), make_long_path_useable(dest_path), follow_symlinks=False)
def rename_all(self) -> None:
for src_path, dest_path in self.copy_map.items():
winutil.move_file(make_long_path_useable(src_path), make_long_path_useable(dest_path))
def get_copier(delete_all=False, allow_move=False) -> Union[UnixFileCopier, WindowsFileCopier]:
return (WindowsFileCopier if iswindows else UnixFileCopier)(delete_all, allow_move)
def rename_files(src_to_dest_map: Dict[str, str]) -> None:
' Rename a bunch of files. On Windows all files are locked before renaming so no other process can interfere. '
copier = get_copier(allow_move=True)
for s, d in src_to_dest_map.items():
copier.register(s, d)
with copier:
copier.rename_all()
def copy_files(src_to_dest_map: Dict[str, str], delete_source: bool = False) -> None:
copier = get_copier(delete_source)
for s, d in src_to_dest_map.items():
if not samefile(s, d):
copier.register(s, d)
with copier:
copier.copy_all()
def identity_transform(src_path: str, dest_path: str) -> str:
return dest_path
def register_folder_recursively(
src: str, copier: Union[UnixFileCopier, WindowsFileCopier], dest_dir: str,
transform_destination_filename: Callable[[str, str], str] = identity_transform,
read_only: bool = False
) -> None:
def dest_from_entry(dirpath: str, x: str) -> str:
path = os.path.join(dirpath, x)
rel = os.path.relpath(path, src)
return os.path.join(dest_dir, rel)
def raise_error(e: OSError) -> None:
raise e
copier.register_folder(src)
for (dirpath, dirnames, filenames) in os.walk(src, onerror=raise_error):
for d in dirnames:
path = os.path.join(dirpath, d)
dest = dest_from_entry(dirpath, d)
if not read_only:
os.makedirs(make_long_path_useable(dest), exist_ok=True)
shutil.copystat(make_long_path_useable(path), make_long_path_useable(dest), follow_symlinks=False)
copier.register_folder(path)
for f in filenames:
path = os.path.join(dirpath, f)
dest = dest_from_entry(dirpath, f)
dest = transform_destination_filename(path, dest)
if not iswindows and not read_only:
s = os.stat(path, follow_symlinks=False)
if stat.S_ISLNK(s.st_mode):
link_dest = os.readlink(path)
os.symlink(link_dest, dest)
continue
copier.register(path, dest)
def windows_check_if_files_in_use(src_folder: str) -> None:
copier = get_copier()
register_folder_recursively(src_folder, copier, os.getcwd(), read_only=True)
with copier:
pass
def copy_tree(
src: str, dest: str,
transform_destination_filename: Callable[[str, str], str] = identity_transform,
delete_source: bool = False
) -> None:
'''
Copy all files in the tree over. On Windows locks all files before starting the copy to ensure that
other processes cannot interfere once the copy starts. Uses hardlinks, falling back to actual file copies
only if hardlinking fails.
'''
if iswindows:
if isinstance(src, bytes):
src = src.decode(filesystem_encoding)
if isinstance(dest, bytes):
dest = dest.decode(filesystem_encoding)
dest = os.path.abspath(dest)
os.makedirs(dest, exist_ok=True)
if samefile(src, dest):
raise ValueError(f'Cannot copy tree if the source and destination are the same: {src!r} == {dest!r}')
dest_dir = dest
copier = get_copier(delete_source)
register_folder_recursively(src, copier, dest_dir, transform_destination_filename)
with copier:
copier.copy_all()
if delete_source and os.path.exists(make_long_path_useable(src)):
try:
shutil.rmtree(make_long_path_useable(src))
except FileNotFoundError:
# some kind of delayed folder removal on handle close on Windows? Or exists() is succeeding but
# rmdir() is failing? Or something deleted the
# folder between the call to exists() and rmtree(). Windows is full
# of nanny programs that keep users safe from "themselves".
pass
except OSError:
if iswindows:
time.sleep(WINDOWS_SLEEP_FOR_RETRY_TIME)
shutil.rmtree(make_long_path_useable(src))
else:
raise
| 12,046 | Python | .py | 249 | 38.281124 | 125 | 0.62666 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,151 | test_lock.py | kovidgoyal_calibre/src/calibre/utils/test_lock.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import os
import shutil
import subprocess
import sys
import tempfile
import time
import unittest
from threading import Thread
from calibre.constants import cache_dir, iswindows
from calibre.utils.lock import ExclusiveFile, create_single_instance_mutex, unix_open
from calibre.utils.tdir_in_cache import clean_tdirs_in, is_tdir_locked, retry_lock_tdir, tdir_in_cache, tdirs_in, unlock_file
from polyglot.builtins import iteritems, native_string_type
def FastFailEF(name):
return ExclusiveFile(name, sleep_time=0.01, timeout=0.05)
class Other(Thread):
daemon = True
locked = None
def run(self):
try:
with FastFailEF('testsp'):
self.locked = True
except OSError:
self.locked = False
def run_worker(mod, func, **kw):
try:
exe = [sys.executable, os.path.join(sys.setup_dir, 'run-calibre-worker.py')]
except AttributeError:
exe = [
os.path.join(
os.path.dirname(os.path.abspath(sys.executable)),
'calibre-parallel' + ('.exe' if iswindows else '')
)
]
env = kw.get('env', os.environ.copy())
env['CALIBRE_SIMPLE_WORKER'] = mod + ':' + func
if iswindows:
kw['creationflags'] = subprocess.CREATE_NO_WINDOW
kw['env'] = {native_string_type(k): native_string_type(v)
for k, v in iteritems(env)} # windows needs bytes in env
return subprocess.Popen(exe, **kw)
class IPCLockTest(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
self.tdir = tempfile.mkdtemp()
os.chdir(self.tdir)
self.original_cache_dir = cache_dir()
cache_dir.ans = self.tdir
def tearDown(self):
cache_dir.ans = self.original_cache_dir
os.chdir(self.cwd)
for i in range(100):
try:
shutil.rmtree(self.tdir)
break
except OSError:
time.sleep(0.1)
def test_exclusive_file_same_process(self):
fname = 'testsp'
with ExclusiveFile(fname):
ef = FastFailEF(fname)
self.assertRaises(EnvironmentError, ef.__enter__)
t = Other()
t.start(), t.join()
self.assertIs(t.locked, False)
if not iswindows:
import fcntl
with unix_open(fname) as f:
self.assertEqual(
1, fcntl.fcntl(f.fileno(), fcntl.F_GETFD) & fcntl.FD_CLOEXEC
)
def run_other_ef_op(self, clean_exit):
child = run_worker('calibre.utils.test_lock', 'other1')
try:
while child.poll() is None:
if os.path.exists('ready'):
break
time.sleep(0.01)
self.assertIsNone(child.poll(), 'child died without creating ready dir')
ef = FastFailEF('test')
self.assertRaises(EnvironmentError, ef.__enter__)
if clean_exit:
os.mkdir('quit')
else:
child.kill()
self.assertIsNotNone(child.wait())
with ExclusiveFile('test', timeout=3):
pass
finally:
if child.poll() is None:
child.kill()
child.wait()
def test_exclusive_file_other_process_clean(self):
self.run_other_ef_op(True)
def test_exclusive_file_other_process_kill(self):
self.run_other_ef_op(False)
def test_single_instance(self):
release_mutex = create_single_instance_mutex('test')
for i in range(5):
child = run_worker('calibre.utils.test_lock', 'other2')
self.assertEqual(child.wait(), 0)
release_mutex()
for i in range(5):
child = run_worker('calibre.utils.test_lock', 'other2')
self.assertEqual(child.wait(), 1)
child = run_worker('calibre.utils.test_lock', 'other3')
while not os.path.exists('ready'):
time.sleep(0.01)
child.kill()
child.wait()
release_mutex = create_single_instance_mutex('test')
self.assertIsNotNone(release_mutex)
release_mutex()
def test_tdir_in_cache_dir(self):
child = run_worker('calibre.utils.test_lock', 'other4')
tdirs = []
while not tdirs:
time.sleep(0.05)
gl = retry_lock_tdir('t', sleep=0.05)
try:
tdirs = list(tdirs_in('t'))
finally:
unlock_file(gl)
self.assertTrue(is_tdir_locked(tdirs[0]))
c2 = run_worker('calibre.utils.test_lock', 'other5')
self.assertEqual(c2.wait(), 0)
self.assertTrue(is_tdir_locked(tdirs[0]))
child.kill(), child.wait()
self.assertTrue(os.path.exists(tdirs[0]))
self.assertFalse(is_tdir_locked(tdirs[0]))
clean_tdirs_in('t')
self.assertFalse(os.path.exists(tdirs[0]))
self.assertEqual(os.listdir('t'), ['tdir-lock'])
def other1():
e = ExclusiveFile('test')
with e:
os.mkdir('ready')
while not os.path.exists('quit'):
time.sleep(0.02)
def other2():
release_mutex = create_single_instance_mutex('test')
if release_mutex is None:
ret = 0
else:
ret = 1
release_mutex()
raise SystemExit(ret)
def other3():
release_mutex = create_single_instance_mutex('test')
try:
os.mkdir('ready')
time.sleep(30)
finally:
if release_mutex is not None:
release_mutex()
def other4():
cache_dir.ans = os.getcwd()
tdir_in_cache('t')
time.sleep(30)
def other5():
cache_dir.ans = os.getcwd()
if not os.path.isdir(tdir_in_cache('t')):
raise SystemExit(1)
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(IPCLockTest)
def run_tests():
from calibre.utils.run_tests import run_tests
run_tests(find_tests)
if __name__ == '__main__':
run_tests()
| 6,089 | Python | .py | 171 | 26.719298 | 125 | 0.593506 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,152 | speedups.py | kovidgoyal_calibre/src/calibre/utils/speedups.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import os
class ReadOnlyFileBuffer:
''' A zero copy implementation of a file like object. Uses memoryviews for efficiency. '''
def __init__(self, raw: bytes, name: str = ''):
self.sz, self.mv = len(raw), (raw if isinstance(raw, memoryview) else memoryview(raw))
self.pos = 0
self.name: str = name
def tell(self):
return self.pos
def read(self, n: int | None = None) -> memoryview:
if n is None:
ans = self.mv[self.pos:]
self.pos = self.sz
return ans
ans = self.mv[self.pos:self.pos+n]
self.pos = min(self.pos + n, self.sz)
return ans
def seek(self, pos, whence=os.SEEK_SET):
if whence == os.SEEK_SET:
self.pos = pos
elif whence == os.SEEK_END:
self.pos = self.sz + pos
else:
self.pos += pos
self.pos = max(0, min(self.pos, self.sz))
return self.pos
def seekable(self):
return True
def getvalue(self):
return self.mv
def close(self):
pass
def svg_path_to_painter_path(d):
'''
Convert a tiny SVG 1.2 path into a QPainterPath.
:param d: The value of the d attribute of an SVG <path> tag
'''
from qt.core import QPainterPath
cmd = last_cmd = b''
path = QPainterPath()
moveto_abs, moveto_rel = b'M', b'm'
closepath1, closepath2 = b'Z', b'z'
lineto_abs, lineto_rel = b'L', b'l'
hline_abs, hline_rel = b'H', b'h'
vline_abs, vline_rel = b'V', b'v'
curveto_abs, curveto_rel = b'C', b'c'
smoothcurveto_abs, smoothcurveto_rel = b'S', b's'
quadcurveto_abs, quadcurveto_rel = b'Q', b'q'
smoothquadcurveto_abs, smoothquadcurveto_rel = b'T', b't'
# Store the last parsed values
# x/y = end position
# x1/y1 and x2/y2 = bezier control points
x = y = x1 = y1 = x2 = y2 = 0
if isinstance(d, str):
d = d.encode('ascii')
d = d.replace(b',', b' ').replace(b'\n', b' ')
end = len(d)
pos = [0]
def read_byte():
p = pos[0]
pos[0] += 1
return d[p:p+1]
def parse_float():
chars = []
while pos[0] < end:
c = read_byte()
if c == b' ' and not chars:
continue
if c in b'-.0123456789':
chars.append(c)
else:
break
if not chars:
raise ValueError('Premature end of input while expecting a number')
return float(b''.join(chars))
def parse_floats(num, x_offset=0, y_offset=0):
for i in range(num):
val = parse_float()
yield val + (x_offset if i % 2 == 0 else y_offset)
repeated_command = None
while pos[0] < end:
last_cmd = cmd
cmd = read_byte() if repeated_command is None else repeated_command
repeated_command = None
if cmd == b' ':
continue
if cmd == moveto_abs:
x, y = parse_float(), parse_float()
path.moveTo(x, y)
elif cmd == moveto_rel:
x += parse_float()
y += parse_float()
path.moveTo(x, y)
elif cmd == closepath1 or cmd == closepath2:
path.closeSubpath()
elif cmd == lineto_abs:
x, y = parse_floats(2)
path.lineTo(x, y)
elif cmd == lineto_rel:
x += parse_float()
y += parse_float()
path.lineTo(x, y)
elif cmd == hline_abs:
x = parse_float()
path.lineTo(x, y)
elif cmd == hline_rel:
x += parse_float()
path.lineTo(x, y)
elif cmd == vline_abs:
y = parse_float()
path.lineTo(x, y)
elif cmd == vline_rel:
y += parse_float()
path.lineTo(x, y)
elif cmd == curveto_abs:
x1, y1, x2, y2, x, y = parse_floats(6)
path.cubicTo(x1, y1, x2, y2, x, y)
elif cmd == curveto_rel:
x1, y1, x2, y2, x, y = parse_floats(6, x, y)
path.cubicTo(x1, y1, x2, y2, x, y)
elif cmd == smoothcurveto_abs:
if last_cmd == curveto_abs or last_cmd == curveto_rel or last_cmd == smoothcurveto_abs or last_cmd == smoothcurveto_rel:
x1 = 2 * x - x2
y1 = 2 * y - y2
else:
x1, y1 = x, y
x2, y2, x, y = parse_floats(4)
path.cubicTo(x1, y1, x2, y2, x, y)
elif cmd == smoothcurveto_rel:
if last_cmd == curveto_abs or last_cmd == curveto_rel or last_cmd == smoothcurveto_abs or last_cmd == smoothcurveto_rel:
x1 = 2 * x - x2
y1 = 2 * y - y2
else:
x1, y1 = x, y
x2, y2, x, y = parse_floats(4, x, y)
path.cubicTo(x1, y1, x2, y2, x, y)
elif cmd == quadcurveto_abs:
x1, y1, x, y = parse_floats(4)
path.quadTo(x1, y1, x, y)
elif cmd == quadcurveto_rel:
x1, y1, x, y = parse_floats(4, x, y)
path.quadTo(x1, y1, x, y)
elif cmd == smoothquadcurveto_abs:
if last_cmd in (quadcurveto_abs, quadcurveto_rel, smoothquadcurveto_abs, smoothquadcurveto_rel):
x1 = 2 * x - x1
y1 = 2 * y - y1
else:
x1, y1 = x, y
x, y = parse_floats(2)
path.quadTo(x1, y1, x, y)
elif cmd == smoothquadcurveto_rel:
if last_cmd in (quadcurveto_abs, quadcurveto_rel, smoothquadcurveto_abs, smoothquadcurveto_rel):
x1 = 2 * x - x1
y1 = 2 * y - y1
else:
x1, y1 = x, y
x, y = parse_floats(2, x, y)
path.quadTo(x1, y1, x, y)
elif cmd in b'-.0123456789':
# A new number begins
# In this case, multiple parameters tuples are specified for the last command
# We rewind to reparse data correctly
pos[0] -= 1
# Handle extra parameters
if last_cmd == moveto_abs:
repeated_command = cmd = lineto_abs
elif last_cmd == moveto_rel:
repeated_command = cmd = lineto_rel
elif last_cmd in (closepath1, closepath2):
raise ValueError('Extra parameters after close path command')
elif last_cmd in (
lineto_abs, lineto_rel, hline_abs, hline_rel, vline_abs,
vline_rel, curveto_abs, curveto_rel,smoothcurveto_abs,
smoothcurveto_rel, quadcurveto_abs, quadcurveto_rel,
smoothquadcurveto_abs, smoothquadcurveto_rel
):
repeated_command = cmd = last_cmd
else:
raise ValueError('Unknown path command: %s' % cmd)
return path
| 6,925 | Python | .py | 182 | 27.467033 | 132 | 0.522399 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,153 | copy_files_test.py | kovidgoyal_calibre/src/calibre/utils/copy_files_test.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2023, Kovid Goyal <kovid at kovidgoyal.net>
import os
import shutil
import tempfile
import time
import unittest
from contextlib import closing
from calibre import walk
from calibre.constants import iswindows
from .copy_files import copy_tree, rename_files
from .filenames import nlinks_file
if iswindows:
from calibre_extensions import winutil
class TestCopyFiles(unittest.TestCase):
ae = unittest.TestCase.assertEqual
def setUp(self):
self.tdir = t = tempfile.mkdtemp()
def wf(*parts):
d = os.path.join(t, *parts)
os.makedirs(os.path.dirname(d), exist_ok=True)
with open(d, 'w') as f:
f.write(' '.join(parts))
wf('base'), wf('src/one'), wf('src/sub/a')
if not iswindows:
os.symlink('sub/a', os.path.join(t, 'src/link'))
def tearDown(self):
if self.tdir:
try:
shutil.rmtree(self.tdir)
except OSError:
time.sleep(1)
shutil.rmtree(self.tdir)
self.tdir = ''
def s(self, *path):
return os.path.abspath(os.path.join(self.tdir, 'src', *path))
def d(self, *path):
return os.path.abspath(os.path.join(self.tdir, 'dest', *path))
def file_data_eq(self, path):
with open(self.s(path)) as src, open(self.d(path)) as dest:
self.ae(src.read(), dest.read())
def reset(self):
self.tearDown()
self.setUp()
def test_renaming_of_files(self):
for name in 'one two'.split():
with open(os.path.join(self.tdir, name), 'w') as f:
f.write(name)
renames = {os.path.join(self.tdir, k): os.path.join(self.tdir, v) for k, v in {'one': 'One', 'two': 'three'}.items()}
rename_files(renames)
contents = set(os.listdir(self.tdir)) - {'base', 'src'}
self.ae(contents, {'One', 'three'})
def test_copying_of_trees(self):
src, dest = self.s(), self.d()
copy_tree(src, dest)
eq = self.file_data_eq
eq('one')
eq('sub/a')
if not iswindows:
eq('link')
self.ae(os.readlink(self.d('link')), 'sub/a')
self.ae(nlinks_file(self.s('one')), 2)
self.ae(set(os.listdir(self.tdir)), {'src', 'dest', 'base'})
self.reset()
src, dest = self.s(), self.d()
copy_tree(src, dest, delete_source=True)
self.ae(set(os.listdir(self.tdir)), {'dest', 'base'})
self.ae(nlinks_file(self.d('one')), 1)
self.assertFalse(os.path.exists(src))
def transform_destination_filename(src, dest):
return dest + '.extra'
self.reset()
src, dest = self.s(), self.d()
copy_tree(src, dest, transform_destination_filename=transform_destination_filename)
with open(self.d('sub/a.extra')) as d:
self.ae(d.read(), 'src/sub/a')
if not iswindows:
self.ae(os.readlink(self.d('link.extra')), 'sub/a')
self.reset()
src, dest = self.s(), self.d()
if iswindows:
os.mkdir(self.s('lockdir'))
open(self.s('lockdir/lockfile'), 'w').close()
before = frozenset(walk(src))
with open(self.s('lockdir/lockfile')) as locked:
locked
self.assertRaises(IOError, copy_tree, src, dest, delete_source=True)
self.ae(set(os.listdir(self.d())), {'sub', 'lockdir'})
self.assertFalse(tuple(walk(self.d())))
self.ae(before, frozenset(walk(src)), 'Source files were deleted despite there being an error')
shutil.rmtree(dest)
os.mkdir(dest)
h = winutil.create_file(
self.s('lockdir'), winutil.GENERIC_READ|winutil.GENERIC_WRITE|winutil.DELETE,
winutil.FILE_SHARE_READ|winutil.FILE_SHARE_WRITE|winutil.FILE_SHARE_DELETE, winutil.OPEN_EXISTING,
winutil.FILE_FLAG_BACKUP_SEMANTICS)
with closing(h):
self.assertRaises(IOError, copy_tree, src, dest, delete_source=True)
self.ae(set(os.listdir(self.d())), {'sub', 'lockdir'})
self.assertFalse(tuple(walk(self.d())))
self.ae(before, frozenset(walk(src)), 'Source files were deleted despite there being an error')
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(TestCopyFiles)
| 4,470 | Python | .py | 103 | 33.737864 | 125 | 0.589832 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,154 | logging.py | kovidgoyal_calibre/src/calibre/utils/logging.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2009, Kovid Goyal <kovid at kovidgoyal.net>
# A simplified logging system
import io
import sys
import traceback
from contextlib import suppress
from functools import partial
from threading import Lock
from calibre.prints import prints
from polyglot.builtins import as_unicode
DEBUG = 0
INFO = 1
WARN = 2
ERROR = 3
class Stream:
def __init__(self, stream=None):
if stream is None:
stream = io.StringIO()
self.stream = stream
self.encoding = getattr(self.stream, 'encoding', None) or 'utf-8'
def write(self, text):
self._prints(text, end='')
def flush(self):
with suppress(BrokenPipeError):
# Don't fail if we were logging to a pipe and it got closed
self.stream.flush()
def prints(self, level, *args, **kwargs):
self._prints(*args, **kwargs)
def _prints(self, *args, **kwargs):
prints(*args, **kwargs, file=self.stream)
stdout_sentinel = object()
class ANSIStream(Stream):
def __init__(self, stream=stdout_sentinel):
if stream is stdout_sentinel:
stream = sys.stdout
Stream.__init__(self, stream)
self.color = {
DEBUG: 'green',
INFO: None,
WARN: 'yellow',
ERROR: 'red',
}
def prints(self, level, *args, **kwargs):
from calibre.utils.terminal import ColoredStream
with ColoredStream(self.stream, self.color[level]):
self._prints(*args, **kwargs)
class FileStream(Stream):
def __init__(self, stream=None):
Stream.__init__(self, stream)
def prints(self, level, *args, **kwargs):
self._prints(*args, **kwargs)
class HTMLStream(Stream):
color = {
DEBUG: '<span style="color:green">',
INFO: '<span>',
WARN: '<span style="color:blue">',
ERROR: '<span style="color:red">'
}
normal = '</span>'
def __init__(self, stream=stdout_sentinel):
if stream is stdout_sentinel:
stream = sys.stdout
Stream.__init__(self, stream)
def prints(self, level, *args, **kwargs):
self._prints(self.color[level], end='')
self._prints(*args, **kwargs)
self._prints(self.normal, end='')
class UnicodeHTMLStream(HTMLStream):
def __init__(self):
self.clear()
def flush(self):
pass
def prints(self, level, *args, **kwargs):
col = self.color[level]
if col != self.last_col:
if self.data:
self.data.append(self.normal)
self.data.append(col)
self.last_col = col
sep = kwargs.get('sep', ' ')
end = kwargs.get('end', '\n')
for arg in args:
arg = as_unicode(arg)
self.data.append(arg+sep)
self.plain_text.append(arg+sep)
self.data.append(end)
self.plain_text.append(end)
def clear(self):
self.data = []
self.plain_text = []
self.last_col = self.color[INFO]
@property
def html(self):
end = self.normal if self.data else ''
return ''.join(self.data) + end
def dump(self):
return [self.data, self.plain_text, self.last_col]
def load(self, dump):
self.data, self.plain_text, self.last_col = dump
def append_dump(self, dump):
d, p, lc = dump
self.data.extend(d)
self.plain_text.extend(p)
self.last_col = lc
class Log:
DEBUG = DEBUG
INFO = INFO
WARN = WARN
ERROR = ERROR
def __init__(self, level=INFO):
self.filter_level = level
default_output = ANSIStream()
self.outputs = [default_output]
self.debug = partial(self.print_with_flush, DEBUG)
self.info = partial(self.print_with_flush, INFO)
self.warn = self.warning = partial(self.print_with_flush, WARN)
self.error = partial(self.print_with_flush, ERROR)
def prints(self, level, *args, **kwargs):
if level < self.filter_level:
return
for output in self.outputs:
output.prints(level, *args, **kwargs)
def print_with_flush(self, level, *args, **kwargs):
if level < self.filter_level:
return
for output in self.outputs:
output.prints(level, *args, **kwargs)
self.flush()
def exception(self, *args, **kwargs):
limit = kwargs.pop('limit', None)
self.print_with_flush(ERROR, *args, **kwargs)
self.print_with_flush(DEBUG, traceback.format_exc(limit))
def __call__(self, *args, **kwargs):
self.info(*args, **kwargs)
def __enter__(self):
self.orig_filter_level = self.filter_level
self.filter_level = self.ERROR + 100
def __exit__(self, *args):
self.filter_level = self.orig_filter_level
def flush(self):
for o in self.outputs:
if hasattr(o, 'flush'):
o.flush()
def close(self):
for o in self.outputs:
if hasattr(o, 'close'):
o.close()
class DevNull(Log):
def __init__(self):
Log.__init__(self, level=Log.ERROR)
self.outputs = []
class ThreadSafeLog(Log):
exception_traceback_level = Log.DEBUG
def __init__(self, level=Log.INFO):
Log.__init__(self, level=level)
self._lock = Lock()
def prints(self, *args, **kwargs):
with self._lock:
Log.prints(self, *args, **kwargs)
def print_with_flush(self, *args, **kwargs):
with self._lock:
Log.print_with_flush(self, *args, **kwargs)
def exception(self, *args, **kwargs):
limit = kwargs.pop('limit', None)
with self._lock:
Log.print_with_flush(self, ERROR, *args, **kwargs)
Log.print_with_flush(self, self.exception_traceback_level, traceback.format_exc(limit))
class ThreadSafeWrapper(Log):
def __init__(self, other_log):
Log.__init__(self, level=other_log.filter_level)
self.outputs = list(other_log.outputs)
self._lock = Lock()
def prints(self, *args, **kwargs):
with self._lock:
Log.prints(self, *args, **kwargs)
def print_with_flush(self, *args, **kwargs):
with self._lock:
Log.print_with_flush(self, *args, **kwargs)
class GUILog(ThreadSafeLog):
'''
Logs in HTML and plain text as unicode. Ideal for display in a GUI context.
'''
def __init__(self):
ThreadSafeLog.__init__(self, level=self.DEBUG)
self.outputs = [UnicodeHTMLStream()]
def clear(self):
self.outputs[0].clear()
@property
def html(self):
return self.outputs[0].html
@property
def plain_text(self):
return ''.join(self.outputs[0].plain_text)
def dump(self):
return self.outputs[0].dump()
def load(self, dump):
return self.outputs[0].load(dump)
def append_dump(self, dump):
return self.outputs[0].append_dump(dump)
default_log = Log()
| 7,071 | Python | .py | 201 | 27.328358 | 99 | 0.596938 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,155 | mem.py | kovidgoyal_calibre/src/calibre/utils/mem.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Measure memory usage of the current process.
The key function is memory() which returns the current memory usage in MB.
You can pass a number to memory and it will be subtracted from the returned
value.
'''
import gc
import os
def get_memory():
'Return memory usage in bytes'
# See https://pythonhosted.org/psutil/#psutil.Process.memory_info
import psutil
return psutil.Process(os.getpid()).memory_info().rss
def memory(since=0.0):
'Return memory used in MB. The value of since is subtracted from the used memory'
ans = get_memory()
ans /= float(1024**2)
return ans - since
def gc_histogram():
"""Returns per-class counts of existing objects."""
result = {}
for o in gc.get_objects():
t = type(o)
count = result.get(t, 0)
result[t] = count + 1
return result
def diff_hists(h1, h2):
"""Prints differences between two results of gc_histogram()."""
for k in h1:
if k not in h2:
h2[k] = 0
if h1[k] != h2[k]:
print("%s: %d -> %d (%s%d)" % (
k, h1[k], h2[k], h2[k] > h1[k] and "+" or "", h2[k] - h1[k]))
| 1,295 | Python | .py | 38 | 29.157895 | 85 | 0.628617 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,156 | html2text.py | kovidgoyal_calibre/src/calibre/utils/html2text.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
from calibre.utils.localization import _
def html2text(html, single_line_break=True, default_image_alt=''):
import re
from html2text import HTML2Text
if isinstance(html, bytes):
from calibre.ebooks.chardet import xml_to_unicode
html = xml_to_unicode(html, strip_encoding_pats=True, resolve_entities=True)[0]
# replace <u> tags with <span> as <u> becomes emphasis in html2text
html = re.sub(
r'<\s*(?P<solidus>/?)\s*[uU]\b(?P<rest>[^>]*)>',
r'<\g<solidus>span\g<rest>>', html)
h2t = HTML2Text()
h2t.default_image_alt = default_image_alt or _('Unnamed image')
h2t.body_width = 0
h2t.single_line_break = single_line_break
h2t.emphasis_mark = '*'
return h2t.handle(html)
def find_tests():
import unittest
class TestH2T(unittest.TestCase):
def test_html2text_behavior(self):
for src, expected in {
'<u>test</U>': 'test\n',
'<i>test</i>': '*test*\n',
'<a href="http://else.where/other">other</a>': '[other](http://else.where/other)\n',
'<img src="test.jpeg">': '\n',
'<a href="#t">test</a> <span id="t">dest</span>': 'test dest\n',
'<>a': '<>a\n',
'<p>a<p>b': 'a\nb\n',
}.items():
self.assertEqual(html2text(src), expected)
return unittest.defaultTestLoader.loadTestsFromTestCase(TestH2T)
| 1,570 | Python | .py | 34 | 37.352941 | 100 | 0.593054 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,157 | https.py | kovidgoyal_calibre/src/calibre/utils/https.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import ssl
from contextlib import closing
from calibre import get_proxies
from calibre.utils.resources import get_path as P
from polyglot import http_client
from polyglot.urllib import urlsplit
class HTTPError(ValueError):
def __init__(self, url, code):
msg = '%s returned an unsupported http response code: %d (%s)' % (
url, code, http_client.responses.get(code, None))
ValueError.__init__(self, msg)
self.code = code
self.url = url
class HTTPSConnection(http_client.HTTPSConnection):
def __init__(self, *args, **kwargs):
cafile = kwargs.pop('cert_file', None)
if cafile is None:
kwargs['context'] = ssl._create_unverified_context()
else:
kwargs['context'] = ssl.create_default_context(cafile=cafile)
if kwargs.pop('disable_x509_strict_checking', False):
# python 3.13 forces VERIFY_X509_STRICT which breaks with the
# private certificate used for downloads from code.calibre-ebook.com
kwargs['context'].verify_flags &= ~ssl.VERIFY_X509_STRICT
else:
kwargs['context'].verify_flags |= ssl.VERIFY_X509_STRICT
http_client.HTTPSConnection.__init__(self, *args, **kwargs)
def get_https_resource_securely(
url, cacerts='calibre-ebook-root-CA.crt', timeout=60, max_redirects=5, ssl_version=None, headers=None, get_response=False):
'''
Download the resource pointed to by url using https securely (verify server
certificate). Ensures that redirects, if any, are also downloaded
securely. Needs a CA certificates bundle (in PEM format) to verify the
server's certificates.
You can pass cacerts=None to download using SSL but without verifying the server certificate.
'''
disable_x509_strict_checking = cacerts == 'calibre-ebook-root-CA.crt'
cert_file = None
if cacerts is not None:
cert_file = P(cacerts, allow_user_override=False)
p = urlsplit(url)
if p.scheme != 'https':
raise ValueError(f'URL {url} scheme must be https, not {p.scheme!r}')
hostname, port = p.hostname, p.port
proxies = get_proxies()
has_proxy = False
for q in ('https', 'http'):
if q in proxies:
try:
h, po = proxies[q].rpartition(':')[::2]
po = int(po)
if h:
hostname, port, has_proxy = h, po, True
break
except Exception:
# Invalid proxy, ignore
pass
c = HTTPSConnection(hostname, port, cert_file=cert_file, timeout=timeout, disable_x509_strict_checking=disable_x509_strict_checking)
if has_proxy:
c.set_tunnel(p.hostname, p.port)
with closing(c):
c.connect() # This is needed for proxy connections
path = p.path or '/'
if p.query:
path += '?' + p.query
c.request('GET', path, headers=headers or {})
response = c.getresponse()
if response.status in (http_client.MOVED_PERMANENTLY, http_client.FOUND, http_client.SEE_OTHER):
if max_redirects <= 0:
raise ValueError('Too many redirects, giving up')
newurl = response.getheader('Location', None)
if newurl is None:
raise ValueError('%s returned a redirect response with no Location header' % url)
return get_https_resource_securely(
newurl, cacerts=cacerts, timeout=timeout, max_redirects=max_redirects-1, get_response=get_response)
if response.status != http_client.OK:
raise HTTPError(url, response.status)
if get_response:
return response
return response.read()
if __name__ == '__main__':
print(get_https_resource_securely('https://code.calibre-ebook.com/latest'))
| 3,947 | Python | .py | 85 | 37.694118 | 136 | 0.637877 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,158 | formatter.py | kovidgoyal_calibre/src/calibre/utils/formatter.py | '''
Created on 23 Sep 2010
@author: charles
'''
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import numbers
import re
import string
import traceback
from collections import OrderedDict
from functools import partial
from math import modf
from sys import exc_info
from calibre import prints
from calibre.constants import DEBUG
from calibre.ebooks.metadata.book.base import field_metadata
from calibre.utils.config import tweaks
from calibre.utils.formatter_functions import StoredObjectType, formatter_functions, function_object_type, get_database
from calibre.utils.icu import strcmp
from calibre.utils.localization import _
from polyglot.builtins import error_message
class Node:
NODE_RVALUE = 1
NODE_IF = 2
NODE_ASSIGN = 3
NODE_FUNC = 4
NODE_COMPARE_STRING = 5
NODE_COMPARE_NUMERIC = 6
NODE_CONSTANT = 7
NODE_FIELD = 8
NODE_RAW_FIELD = 9
NODE_CALL_STORED_TEMPLATE = 10
NODE_ARGUMENTS = 11
NODE_FIRST_NON_EMPTY = 12
NODE_FOR = 13
NODE_GLOBALS = 14
NODE_SET_GLOBALS = 15
NODE_CONTAINS = 16
NODE_BINARY_LOGOP = 17
NODE_UNARY_LOGOP = 18
NODE_BINARY_ARITHOP = 19
NODE_UNARY_ARITHOP = 20
NODE_PRINT = 21
NODE_BREAK = 22
NODE_CONTINUE = 23
NODE_RETURN = 24
NODE_CHARACTER = 25
NODE_STRCAT = 26
NODE_BINARY_STRINGOP = 27
NODE_LOCAL_FUNCTION_DEFINE = 28
NODE_LOCAL_FUNCTION_CALL = 29
NODE_RANGE = 30
NODE_SWITCH = 31
NODE_SWITCH_IF = 32
NODE_LIST_COUNT_FIELD = 33
def __init__(self, line_number, name):
self.my_line_number = line_number
self.my_node_name = name
@property
def node_name(self):
return self.my_node_name
@property
def line_number(self):
return self.my_line_number
class IfNode(Node):
def __init__(self, line_number, condition, then_part, else_part):
Node.__init__(self, line_number, 'if ...')
self.node_type = self.NODE_IF
self.condition = condition
self.then_part = then_part
self.else_part = else_part
class ForNode(Node):
def __init__(self, line_number, variable, list_field_expr, separator, block):
Node.__init__(self, line_number, 'for ...:')
self.node_type = self.NODE_FOR
self.variable = variable
self.list_field_expr = list_field_expr
self.separator = separator
self.block = block
class RangeNode(Node):
def __init__(self, line_number, variable, start_expr, stop_expr, step_expr, limit_expr, block):
Node.__init__(self, line_number, 'for ...:')
self.node_type = self.NODE_RANGE
self.variable = variable
self.start_expr = start_expr
self.stop_expr = stop_expr
self.step_expr = step_expr
self.limit_expr = limit_expr
self.block = block
class BreakNode(Node):
def __init__(self, line_number):
Node.__init__(self, line_number, 'break')
self.node_type = self.NODE_BREAK
class ContinueNode(Node):
def __init__(self, line_number):
Node.__init__(self, line_number, 'continue')
self.node_type = self.NODE_CONTINUE
class ReturnNode(Node):
def __init__(self, line_number, expr):
Node.__init__(self, line_number, 'return')
self.expr = expr
self.node_type = self.NODE_RETURN
class AssignNode(Node):
def __init__(self, line_number, left, right):
Node.__init__(self, line_number, 'assign to ' + left)
self.node_type = self.NODE_ASSIGN
self.left = left
self.right = right
class FunctionNode(Node):
def __init__(self, line_number, function_name, expression_list):
Node.__init__(self, line_number, function_name + '()')
self.node_type = self.NODE_FUNC
self.name = function_name
self.expression_list = expression_list
class StoredTemplateCallNode(Node):
def __init__(self, line_number, name, function, expression_list):
Node.__init__(self, line_number, 'call template: ' + name + '()')
self.node_type = self.NODE_CALL_STORED_TEMPLATE
self.name = name
self.function = function # instance of the definition class
self.expression_list = expression_list
class LocalFunctionDefineNode(Node):
def __init__(self, line_number, function_name, argument_list, block):
Node.__init__(self, line_number, 'define local function' + function_name + '()')
self.node_type = self.NODE_LOCAL_FUNCTION_DEFINE
self.name = function_name
self.argument_list = argument_list
self.block = block
def attributes_to_tuple(self):
return (self.line_number, self.argument_list, self.block)
class LocalFunctionCallNode(Node):
def __init__(self, line_number, name, arguments):
Node.__init__(self, line_number, 'call local function: ' + name + '()')
self.node_type = self.NODE_LOCAL_FUNCTION_CALL
self.name = name
self.arguments = arguments
class ArgumentsNode(Node):
def __init__(self, line_number, expression_list):
Node.__init__(self, line_number, 'arguments()')
self.node_type = self.NODE_ARGUMENTS
self.expression_list = expression_list
class GlobalsNode(Node):
def __init__(self, line_number, expression_list):
Node.__init__(self, line_number, 'globals()')
self.node_type = self.NODE_GLOBALS
self.expression_list = expression_list
class SetGlobalsNode(Node):
def __init__(self, line_number, expression_list):
Node.__init__(self, line_number, 'set_globals()')
self.node_type = self.NODE_SET_GLOBALS
self.expression_list = expression_list
class StringCompareNode(Node):
def __init__(self, line_number, operator, left, right):
Node.__init__(self, line_number, 'comparision: ' + operator)
self.node_type = self.NODE_COMPARE_STRING
self.operator = operator
self.left = left
self.right = right
class StringBinaryNode(Node):
def __init__(self, line_number, operator, left, right):
Node.__init__(self, line_number, 'binary operator: ' + operator)
self.node_type = self.NODE_BINARY_STRINGOP
self.operator = operator
self.left = left
self.right = right
class NumericCompareNode(Node):
def __init__(self, line_number, operator, left, right):
Node.__init__(self, line_number, 'comparison: ' + operator)
self.node_type = self.NODE_COMPARE_NUMERIC
self.operator = operator
self.left = left
self.right = right
class LogopBinaryNode(Node):
def __init__(self, line_number, operator, left, right):
Node.__init__(self, line_number, 'binary operator: ' + operator)
self.node_type = self.NODE_BINARY_LOGOP
self.operator = operator
self.left = left
self.right = right
class LogopUnaryNode(Node):
def __init__(self, line_number, operator, expr):
Node.__init__(self, line_number, 'unary operator: ' + operator)
self.node_type = self.NODE_UNARY_LOGOP
self.operator = operator
self.expr = expr
class NumericBinaryNode(Node):
def __init__(self, line_number, operator, left, right):
Node.__init__(self, line_number, 'binary operator: ' + operator)
self.node_type = self.NODE_BINARY_ARITHOP
self.operator = operator
self.left = left
self.right = right
class NumericUnaryNode(Node):
def __init__(self, line_number, operator, expr):
Node.__init__(self, line_number, 'unary operator: '+ operator)
self.node_type = self.NODE_UNARY_ARITHOP
self.operator = operator
self.expr = expr
class ConstantNode(Node):
def __init__(self, line_number, value):
Node.__init__(self, line_number, 'constant: ' + value)
self.node_type = self.NODE_CONSTANT
self.value = value
class VariableNode(Node):
def __init__(self, line_number, name):
Node.__init__(self, line_number, 'variable: ' + name)
self.node_type = self.NODE_RVALUE
self.name = name
class FieldNode(Node):
def __init__(self, line_number, expression):
Node.__init__(self, line_number, 'field()')
self.node_type = self.NODE_FIELD
self.expression = expression
class RawFieldNode(Node):
def __init__(self, line_number, expression, default=None):
Node.__init__(self, line_number, 'raw_field()')
self.node_type = self.NODE_RAW_FIELD
self.expression = expression
self.default = default
class FirstNonEmptyNode(Node):
def __init__(self, line_number, expression_list):
Node.__init__(self, line_number, 'first_non_empty()')
self.node_type = self.NODE_FIRST_NON_EMPTY
self.expression_list = expression_list
class SwitchNode(Node):
def __init__(self, line_number, expression_list):
Node.__init__(self, line_number, 'first_non_empty()')
self.node_type = self.NODE_SWITCH
self.expression_list = expression_list
class SwitchIfNode(Node):
def __init__(self, line_number, expression_list):
Node.__init__(self, line_number, 'switch_if()')
self.node_type = self.NODE_SWITCH_IF
self.expression_list = expression_list
class ContainsNode(Node):
def __init__(self, line_number, arguments):
Node.__init__(self, line_number, 'contains()')
self.node_type = self.NODE_CONTAINS
self.value_expression = arguments[0]
self.test_expression = arguments[1]
self.match_expression = arguments[2]
self.not_match_expression = arguments[3]
class PrintNode(Node):
def __init__(self, line_number, arguments):
Node.__init__(self, line_number, 'print')
self.node_type = self.NODE_PRINT
self.arguments = arguments
class CharacterNode(Node):
def __init__(self, line_number, expression):
Node.__init__(self, line_number, 'character()')
self.node_type = self.NODE_CHARACTER
self.expression = expression
class StrcatNode(Node):
def __init__(self, line_number, expression_list):
Node.__init__(self, line_number, 'strcat()')
self.node_type = self.NODE_STRCAT
self.expression_list = expression_list
class ListCountFieldNode(Node):
def __init__(self, line_number, expression):
Node.__init__(self, line_number, 'list_count_field()')
self.node_type = self.NODE_LIST_COUNT_FIELD
self.expression = expression
class _Parser:
LEX_OP = 1
LEX_ID = 2
LEX_CONST = 3
LEX_EOF = 4
LEX_STRING_INFIX = 5
LEX_NUMERIC_INFIX = 6
LEX_KEYWORD = 7
LEX_NEWLINE = 8
def error(self, message):
ln = None
try:
tval = "'" + self.prog[self.lex_pos-1][1] + "'"
except Exception:
tval = _('Unknown')
if self.lex_pos > 0 and self.lex_pos < self.prog_len:
location = tval
ln = self.line_number
else:
location = _('the end of the program')
if ln:
raise ValueError(_('{0}: {1} near {2} on line {3}').format(
'Formatter', message, location, ln))
else:
raise ValueError(_('{0}: {1} near {2}').format(
'Formatter', message, location))
def check_eol(self):
while self.lex_pos < len(self.prog) and self.prog[self.lex_pos] == self.LEX_NEWLINE:
self.line_number += 1
self.consume()
def token(self):
self.check_eol()
try:
token = self.prog[self.lex_pos][1]
self.lex_pos += 1
return token
except:
return None
def consume(self):
self.lex_pos += 1
def token_op_is(self, op):
self.check_eol()
try:
token = self.prog[self.lex_pos]
return token[1] == op and token[0] == self.LEX_OP
except:
return False
def token_op_is_string_infix_compare(self):
self.check_eol()
try:
return self.prog[self.lex_pos][0] == self.LEX_STRING_INFIX
except:
return False
def token_op_is_numeric_infix_compare(self):
self.check_eol()
try:
return self.prog[self.lex_pos][0] == self.LEX_NUMERIC_INFIX
except:
return False
def token_is_newline(self):
return self.lex_pos < len(self.prog) and self.prog[self.lex_pos] == self.LEX_NEWLINE
def token_is_id(self):
self.check_eol()
try:
return self.prog[self.lex_pos][0] == self.LEX_ID
except:
return False
def token_is(self, candidate):
self.check_eol()
try:
token = self.prog[self.lex_pos]
return token[1] == candidate and token[0] == self.LEX_KEYWORD
except:
return False
def token_is_keyword(self):
self.check_eol()
try:
return self.prog[self.lex_pos][0] == self.LEX_KEYWORD
except:
return False
def token_is_constant(self):
self.check_eol()
try:
return self.prog[self.lex_pos][0] == self.LEX_CONST
except:
return False
def token_is_eof(self):
self.check_eol()
try:
return self.prog[self.lex_pos][0] == self.LEX_EOF
except:
return True
def token_text(self):
self.check_eol()
try:
return self.prog[self.lex_pos][1]
except:
return _("'End of program'")
def program(self, parent, funcs, prog):
self.line_number = 1
self.lex_pos = 0
self.parent = parent
self.funcs = funcs
self.func_names = frozenset(set(self.funcs.keys()))
self.prog = prog[0]
self.prog_len = len(self.prog)
self.local_functions = set()
if prog[1] != '':
self.error(_("Failed to scan program. Invalid input '{0}'").format(prog[1]))
tree = self.expression_list()
if not self.token_is_eof():
self.error(_("Expected end of program, found '{0}'").format(self.token_text()))
return tree
def expression_list(self):
expr_list = []
while True:
while self.token_is_newline():
self.line_number += 1
self.consume()
if self.token_is_eof():
break
expr_list.append(self.top_expr())
if self.token_op_is(';'):
self.consume()
else:
break
return expr_list
def if_expression(self):
self.consume()
line_number = self.line_number
condition = self.top_expr()
if not self.token_is('then'):
self.error(_("{0} statement: expected '{1}', "
"found '{2}'").format('if', 'then', self.token_text()))
self.consume()
then_part = self.expression_list()
if self.token_is('elif'):
return IfNode(line_number, condition, then_part, [self.if_expression(),])
if self.token_is('else'):
self.consume()
else_part = self.expression_list()
else:
else_part = None
if not self.token_is('fi'):
self.error(_("{0} statement: expected '{1}', "
"found '{2}'").format('if', 'fi', self.token_text()))
self.consume()
return IfNode(line_number, condition, then_part, else_part)
def for_expression(self):
line_number = self.line_number
self.consume()
if not self.token_is_id():
self.error(_("'{0}' statement: expected an identifier").format('for'))
variable = self.token()
if not self.token_is('in'):
self.error(_("{0} statement: expected '{1}', "
"found '{2}'").format('for', 'in', self.token_text()))
self.consume()
if self.token_text() == 'range':
is_list = False
self.consume()
if not self.token_op_is('('):
self.error(_("{0} statement: expected '(', "
"found '{1}'").format('for', self.token_text()))
self.consume()
start_expr = ConstantNode(line_number, '0')
step_expr = ConstantNode(line_number, '1')
limit_expr = None
stop_expr = self.top_expr()
if self.token_op_is(','):
self.consume()
start_expr = stop_expr
stop_expr = self.top_expr()
if self.token_op_is(','):
self.consume()
step_expr = self.top_expr()
if self.token_op_is(','):
self.consume()
limit_expr = self.top_expr()
if not self.token_op_is(')'):
self.error(_("{0} statement: expected ')', "
"found '{1}'").format('for', self.token_text()))
self.consume()
else:
is_list = True
list_expr = self.top_expr()
if self.token_is('separator'):
self.consume()
separator = self.expr()
else:
separator = None
if not self.token_op_is(':'):
self.error(_("{0} statement: expected '{1}', "
"found '{2}'").format('for', ':', self.token_text()))
self.consume()
block = self.expression_list()
if not self.token_is('rof'):
self.error(_("{0} statement: expected '{1}', "
"found '{2}'").format('for', 'rof', self.token_text()))
self.consume()
if is_list:
return ForNode(line_number, variable, list_expr, separator, block)
return RangeNode(line_number, variable, start_expr, stop_expr, step_expr, limit_expr, block)
def define_function_expression(self):
self.consume()
line_number = self.line_number
if not self.token_is_id():
self.error(_("'{0}' statement: expected a function name identifier").format('def'))
function_name = self.token()
if function_name in self.local_functions:
self.error(_("Function name '{0}' is already defined").format(function_name))
if not self.token_op_is('('):
self.error(_("'{0}' statement: expected a '('").format('def'))
self.consume()
arguments = []
while not self.token_op_is(')'):
a = self.top_expr()
if a.node_type not in (Node.NODE_ASSIGN, Node.NODE_RVALUE):
self.error(_("Parameters to a function must be "
"variables or assignments"))
if a.node_type == Node.NODE_RVALUE:
a = AssignNode(line_number, a.name, ConstantNode(self.line_number, ''))
arguments.append(a)
if not self.token_op_is(','):
break
self.consume()
t = self.token()
if t != ')':
self.error(_("'{0}' statement: expected a ')' at end of argument list").format('def'))
if not self.token_op_is(':'):
self.error(_("'{0}' statement: missing ':'").format('def'))
self.consume()
block = self.expression_list()
if not self.token_is('fed'):
self.error(_("'{0}' statement: missing the closing '{1}'").format('def', 'fed'))
self.consume()
self.local_functions.add(function_name)
return LocalFunctionDefineNode(line_number, function_name, arguments, block)
def local_call_expression(self, name, arguments):
return LocalFunctionCallNode(self.line_number, name, arguments)
def call_expression(self, name, arguments):
compiled_func = self.funcs[name].cached_compiled_text
if compiled_func is None:
text = self.funcs[name].program_text
if function_object_type(text) is StoredObjectType.StoredGPMTemplate:
text = text[len('program:'):]
compiled_func = _Parser().program(self.parent, self.funcs,
self.parent.lex_scanner.scan(text))
elif function_object_type(text) is StoredObjectType.StoredPythonTemplate:
text = text[len('python:'):]
compiled_func = self.parent.compile_python_template(text)
else:
self.error(_("A stored template must begin with '{0}' or {1}").format('program:', 'python:'))
self.funcs[name].cached_compiled_text = compiled_func
return StoredTemplateCallNode(self.line_number, name, self.funcs[name], arguments)
def top_expr(self):
return self.or_expr()
def or_expr(self):
left = self.and_expr()
while self.token_op_is('||'):
self.consume()
right = self.and_expr()
left = LogopBinaryNode(self.line_number, 'or', left, right)
return left
def and_expr(self):
left = self.not_expr()
while self.token_op_is('&&'):
self.consume()
right = self.not_expr()
left = LogopBinaryNode(self.line_number, 'and', left, right)
return left
def not_expr(self):
if self.token_op_is('!'):
self.consume()
return LogopUnaryNode(self.line_number, 'not', self.not_expr())
return self.string_binary_expr()
def string_binary_expr(self):
left = self.compare_expr()
while self.token_op_is('&'):
operator = self.token()
right = self.compare_expr()
left = StringBinaryNode(self.line_number, operator, left, right)
return left
def compare_expr(self):
left = self.add_subtract_expr()
if (self.token_op_is_string_infix_compare() or
self.token_is('in') or self.token_is('inlist') or self.token_is('inlist_field')):
operator = self.token()
return StringCompareNode(self.line_number, operator, left, self.add_subtract_expr())
if self.token_op_is_numeric_infix_compare():
operator = self.token()
return NumericCompareNode(self.line_number, operator, left, self.add_subtract_expr())
return left
def add_subtract_expr(self):
left = self.times_divide_expr()
while self.token_op_is('+') or self.token_op_is('-'):
operator = self.token()
right = self.times_divide_expr()
left = NumericBinaryNode(self.line_number, operator, left, right)
return left
def times_divide_expr(self):
left = self.unary_plus_minus_expr()
while self.token_op_is('*') or self.token_op_is('/'):
operator = self.token()
right = self.unary_plus_minus_expr()
left = NumericBinaryNode(self.line_number, operator, left, right)
return left
def unary_plus_minus_expr(self):
if self.token_op_is('+'):
self.consume()
return NumericUnaryNode(self.line_number, '+', self.unary_plus_minus_expr())
if self.token_op_is('-'):
self.consume()
return NumericUnaryNode(self.line_number, '-', self.unary_plus_minus_expr())
return self.expr()
keyword_nodes = {
'if': (lambda self:None, if_expression),
'for': (lambda self:None, for_expression),
'break': (lambda self: self.consume(), lambda self: BreakNode(self.line_number)),
'continue': (lambda self: self.consume(), lambda self: ContinueNode(self.line_number)),
'return': (lambda self: self.consume(), lambda self: ReturnNode(self.line_number, self.top_expr())),
'def': (lambda self: None, define_function_expression),
}
# {inlined_function_name: tuple(constraint on number of length, node builder) }
inlined_function_nodes = {
'field': (lambda args: len(args) == 1,
lambda ln, args: FieldNode(ln, args[0])),
'raw_field': (lambda args: len(args) == 1,
lambda ln, args: RawFieldNode(ln, *args)),
'test': (lambda args: len(args) == 3,
lambda ln, args: IfNode(ln, args[0], (args[1],), (args[2],))),
'first_non_empty': (lambda args: len(args) >= 1,
lambda ln, args: FirstNonEmptyNode(ln, args)),
'switch': (lambda args: len(args) >= 3 and (len(args) %2) == 0,
lambda ln, args: SwitchNode(ln, args)),
'switch_if': (lambda args: len(args) > 0 and (len(args) %2) == 1,
lambda ln, args: SwitchIfNode(ln, args)),
'assign': (lambda args: len(args) == 2 and len(args[0]) == 1 and args[0][0].node_type == Node.NODE_RVALUE,
lambda ln, args: AssignNode(ln, args[0][0].name, args[1])),
'contains': (lambda args: len(args) == 4,
lambda ln, args: ContainsNode(ln, args)),
'character': (lambda args: len(args) == 1,
lambda ln, args: CharacterNode(ln, args[0])),
'print': (lambda _: True,
lambda ln, args: PrintNode(ln, args)),
'strcat': (lambda _: True,
lambda ln, args: StrcatNode(ln, args)),
'list_count_field': (lambda args: len(args) == 1,
lambda ln, args: ListCountFieldNode(ln, args[0]))
}
def expr(self):
if self.token_op_is('('):
self.consume()
rv = self.expression_list()
if not self.token_op_is(')'):
self.error(_("Expected '{0}', found '{1}'").format(')', self.token_text()))
self.consume()
return rv
# Check if we have a keyword-type expression
if self.token_is_keyword():
t = self.token_text()
kw_tuple = self.keyword_nodes.get(t, None)
if kw_tuple:
# These are keywords, so there can't be ambiguity between these,
# ids, and functions.
kw_tuple[0](self)
return kw_tuple[1](self)
# Not a keyword. Check if we have an id reference or a function call
if self.token_is_id():
# We have an identifier. Check if it is a shorthand field reference
line_number = self.line_number
id_ = self.token()
if len(id_) > 1 and id_[0] == '$':
if id_[1] == '$':
return RawFieldNode(line_number, ConstantNode(self.line_number, id_[2:]))
return FieldNode(line_number, ConstantNode(self.line_number, id_[1:]))
# Do we have a function call?
if not self.token_op_is('('):
# Nope. We must have an lvalue (identifier) or an assignment
if self.token_op_is('='):
# classic assignment statement
self.consume()
return AssignNode(line_number, id_, self.top_expr())
return VariableNode(line_number, id_)
# We have a function.
# Check if it is a known one. We do this here so error reporting is
# better, as it can identify the tokens near the problem.
id_ = id_.strip()
if id_ not in self.func_names and id_ not in self.local_functions:
self.error(_('Unknown function {0}').format(id_))
# Eat the opening paren, parse the argument list, then eat the closing paren
self.consume()
arguments = list()
while not self.token_op_is(')'):
# parse an argument expression (recursive call)
arguments.append(self.expression_list())
if not self.token_op_is(','):
break
self.consume()
t = self.token()
if t != ')':
self.error(_("Expected a '{0}' for function call, "
"found '{1}'").format(')', t))
# Check for an inlined function
function_tuple = self.inlined_function_nodes.get(id_, None)
if function_tuple and function_tuple[0](arguments):
return function_tuple[1](line_number, arguments)
# More complicated special cases
if id_ == 'arguments' or id_ == 'globals' or id_ == 'set_globals':
new_args = []
for arg_list in arguments:
arg = arg_list[0]
if arg.node_type not in (Node.NODE_ASSIGN, Node.NODE_RVALUE):
self.error(_("Parameters to '{0}' must be "
"variables or assignments").format(id_))
if arg.node_type == Node.NODE_RVALUE:
arg = AssignNode(line_number, arg.name, ConstantNode(self.line_number, ''))
new_args.append(arg)
if id_ == 'arguments':
return ArgumentsNode(line_number, new_args)
if id_ == 'set_globals':
return SetGlobalsNode(line_number, new_args)
return GlobalsNode(line_number, new_args)
# Check for calling a local function template
if id_ in self.local_functions:
return self.local_call_expression(id_, arguments)
# Check for calling a stored template
if id_ in self.func_names and self.funcs[id_].object_type is not StoredObjectType.PythonFunction:
return self.call_expression(id_, arguments)
# We must have a reference to a formatter function. Check if
# the right number of arguments were supplied
cls = self.funcs[id_]
if cls.arg_count != -1 and len(arguments) != cls.arg_count:
self.error(_('Incorrect number of arguments for function {0}').format(id_))
return FunctionNode(line_number, id_, arguments)
elif self.token_is_constant():
# String or number
return ConstantNode(self.line_number, self.token())
else:
# Who knows what?
self.error(_("Expected an expression, found '{0}'").format(self.token_text()))
class ExecutionBase(Exception):
def __init__(self, name):
super().__init__(_('{0} outside of for loop').format(name) if name else '')
self.value = ''
def set_value(self, v):
self.value = v
def get_value(self):
return self.value
class ContinueExecuted(ExecutionBase):
def __init__(self):
super().__init__('continue')
class BreakExecuted(ExecutionBase):
def __init__(self):
super().__init__('break')
class ReturnExecuted(ExecutionBase):
def __init__(self):
super().__init__('return')
class StopException(Exception):
def __init__(self):
super().__init__('Template evaluation stopped')
class PythonTemplateContext:
def __init__(self):
# Set attributes we already know must exist.
object.__init__(self)
self.db = None
self.arguments = None
self.globals = None
self.formatter = None
self.funcs = None
self.attrs_set = {'db', 'arguments', 'globals', 'funcs'}
def set_values(self, **kwargs):
# Create/set attributes from the named parameters. Doing it this way we
# aren't required to change the signature of this method if/when we add
# attributes in the future. However, if a user depends upon the
# existence of some attribute and the context creator doesn't supply it
# then the user will get an AttributeError exception.
for k,v in kwargs.items():
self.attrs_set.add(k)
setattr(self, k, v)
@property
def attributes(self):
# return a list of attributes in the context object
return sorted(list(self.attrs_set))
def __str__(self):
# return a string of the attribute with values separated by newlines
attrs = sorted(list(self.attrs_set))
ans = OrderedDict()
for k in attrs:
ans[k] = getattr(self, k, None)
return '\n'.join(f'{k}:{v}' for k,v in ans.items())
class FormatterFuncsCaller:
'''
Provides a convenient solution to call functions loaded in the
TemplateFormatter. The functions are called using their name as an attribute
of this class, with an underscore at the end if the name conflicts with a
Python keyword. If the name contain a illegal character for a attribute
(like .:-), use getattr(). Example: context.funcs.list_re_group()
'''
def __init__(self, formatter):
if not isinstance(formatter, TemplateFormatter):
raise TypeError(f'{formatter} is not an instance of TemplateFormatter')
self.__formatter__ = formatter
def __getattribute__(self, name):
if name.startswith('__') and name.endswith('__'): # return internal special attribute
try:
return object.__getattribute__(self, name)
except Exception:
pass
formatter = self.__formatter__
func_name = ''
if name.endswith('_') and name[:-1] in formatter.funcs: # give the priority to the backup name
func_name = name[:-1]
elif name in formatter.funcs:
func_name = name
if func_name:
def call(*args, **kargs):
def n(d):
return '' if d is None else str(d)
args = tuple(n(a) for a in args)
try:
if kargs:
raise ValueError(_('Keyword arguments are not allowed'))
# special function
if func_name == 'arguments':
raise ValueError(_("Don't call {0}. Instead use {1}").format('arguments()', 'context.arguments'))
if func_name == 'globals':
raise ValueError(_("Don't call {0}. Instead use {1}").format('globals()', 'context.globals'))
if func_name == 'set_globals':
raise ValueError(_("Don't call {0}. Instead use {1}").format('set_globals()', "context.globals['name'] = val"))
if func_name == 'character':
if _Parser.inlined_function_nodes['character'][0](args):
rslt = _Interpreter.characters.get(args[0])
if rslt is None:
raise ValueError(_("Invalid character name '{0}'").format(args[0]))
else:
raise ValueError(_('Incorrect number of arguments'))
else:
# built-in/user template functions and Stored GPM/Python templates
func = formatter.funcs[func_name]
if func.object_type == StoredObjectType.PythonFunction:
rslt = func.evaluate(formatter, formatter.kwargs, formatter.book, formatter.locals, *args)
else:
rslt = formatter._eval_sfm_call(func_name, args, formatter.global_vars)
except Exception as e:
# Change the error message to return the name used in the template
e = e.__class__(_('Error in function {0} :: {1}').format(
name,
re.sub(r'\w+\.evaluate\(\)\s*', '', str(e), 1))) # remove UserFunction.evaluate() | Builtin*.evaluate()
e.is_internal = True
raise e
return rslt
return call
e = AttributeError(_("No function named {!r} exists").format(name))
e.is_internal = True
raise e
def __dir__(self):
return list(set(object.__dir__(self) +
list(self.__formatter__.funcs.keys()) +
[f+'_' for f in self.__formatter__.funcs.keys()]))
class _Interpreter:
def error(self, message, line_number):
m = _('Interpreter: {0} - line number {1}').format(message, line_number)
raise ValueError(m)
def program(self, funcs, parent, prog, val, is_call=False, args=None,
global_vars=None, break_reporter=None):
self.parent = parent
self.parent_kwargs = parent.kwargs
self.parent_book = parent.book
self.funcs = funcs
self.locals = {'$':val}
self.local_functions = dict()
self.override_line_number = None
self.global_vars = global_vars if isinstance(global_vars, dict) else {}
if break_reporter:
self.break_reporter = self.call_break_reporter
self.real_break_reporter = break_reporter
else:
self.break_reporter = None
try:
if is_call:
# prog is an instance of the function definition class
ret = self.do_node_stored_template_call(StoredTemplateCallNode(1, prog.name, prog, None), args=args)
else:
ret = self.expression_list(prog)
except ReturnExecuted as e:
ret = e.get_value()
return ret
def call_break_reporter(self, txt, val, line_number):
self.real_break_reporter(txt, val, self.locals,
self.override_line_number if self.override_line_number
else line_number)
def expression_list(self, prog):
val = ''
try:
for p in prog:
val = self.expr(p)
except (BreakExecuted, ContinueExecuted) as e:
e.set_value(val)
raise e
return val
def do_node_if(self, prog):
line_number = prog.line_number
test_part = self.expr(prog.condition)
if self.break_reporter:
self.break_reporter("'if': condition value", test_part, line_number)
if test_part:
v = self.expression_list(prog.then_part)
if self.break_reporter:
self.break_reporter("'if': then-block value", v, line_number)
return v
elif prog.else_part:
v = self.expression_list(prog.else_part)
if self.break_reporter:
self.break_reporter("'if': else-block value", v, line_number)
return v
return ''
def do_node_for(self, prog):
line_number = prog.line_number
try:
separator = ',' if prog.separator is None else self.expr(prog.separator)
v = prog.variable
f = self.expr(prog.list_field_expr)
res = getattr(self.parent_book, f, f)
if res is not None:
if isinstance(res, str):
res = [r.strip() for r in res.split(separator) if r.strip()]
ret = ''
if self.break_reporter:
self.break_reporter("'for' list value", separator.join(res), line_number)
try:
for x in res:
try:
self.locals[v] = x
ret = self.expression_list(prog.block)
except ContinueExecuted as e:
ret = e.get_value()
except BreakExecuted as e:
ret = e.get_value()
if (self.break_reporter):
self.break_reporter("'for' block value", ret, line_number)
elif self.break_reporter:
# Shouldn't get here
self.break_reporter("'for' list value", '', line_number)
ret = ''
return ret
except (StopException, ValueError, ReturnExecuted) as e:
raise e
except Exception as e:
self.error(_("Unhandled exception '{0}'").format(e), line_number)
def do_node_range(self, prog):
line_number = prog.line_number
try:
try:
start_val = int(self.float_deal_with_none(self.expr(prog.start_expr)))
except ValueError:
self.error(_("{0}: {1} must be an integer").format('for', 'start'), line_number)
try:
stop_val = int(self.float_deal_with_none(self.expr(prog.stop_expr)))
except ValueError:
self.error(_("{0}: {1} must be an integer").format('for', 'stop'), line_number)
try:
step_val = int(self.float_deal_with_none(self.expr(prog.step_expr)))
except ValueError:
self.error(_("{0}: {1} must be an integer").format('for', 'step'), line_number)
try:
limit_val = (1000 if prog.limit_expr is None else
int(self.float_deal_with_none(self.expr(prog.limit_expr))))
except ValueError:
self.error(_("{0}: {1} must be an integer").format('for', 'limit'), line_number)
var = prog.variable
if (self.break_reporter):
self.break_reporter("'for': start value", str(start_val), line_number)
self.break_reporter("'for': stop value", str(stop_val), line_number)
self.break_reporter("'for': step value", str(step_val), line_number)
self.break_reporter("'for': limit value", str(limit_val), line_number)
ret = ''
try:
range_gen = range(start_val, stop_val, step_val)
if len(range_gen) > limit_val:
self.error(
_("{0}: the range length ({1}) is larger than the limit ({2})").format(
'for', str(len(range_gen)), str(limit_val)), line_number)
for x in (str(x) for x in range_gen):
try:
if (self.break_reporter):
self.break_reporter(f"'for': assign to loop index '{var}'", x, line_number)
self.locals[var] = x
ret = self.expression_list(prog.block)
except ContinueExecuted as e:
ret = e.get_value()
except BreakExecuted as e:
ret = e.get_value()
if (self.break_reporter):
self.break_reporter("'for' block value", ret, line_number)
return ret
except (StopException, ValueError) as e:
raise e
except Exception as e:
self.error(_("Unhandled exception '{0}'").format(e), line_number)
def do_node_rvalue(self, prog):
try:
if (self.break_reporter):
self.break_reporter(prog.node_name, self.locals[prog.name], prog.line_number)
return self.locals[prog.name]
except:
self.error(_("Unknown identifier '{0}'").format(prog.name), prog.line_number)
def do_node_func(self, prog):
args = list()
for arg in prog.expression_list:
# evaluate the expression (recursive call)
args.append(self.expr(arg))
# Evaluate the function.
id_ = prog.name.strip()
cls = self.funcs[id_]
res = cls.eval_(self.parent, self.parent_kwargs,
self.parent_book, self.locals, *args)
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
def do_node_stored_template_call(self, prog, args=None):
if (self.break_reporter):
self.break_reporter(prog.node_name, _('before evaluating arguments'), prog.line_number)
if args is None:
args = []
for arg in prog.expression_list:
# evaluate the expression (recursive call)
args.append(self.expr(arg))
saved_locals = self.locals
saved_local_functions = self.local_functions
self.locals = {}
self.local_functions = {}
for dex, v in enumerate(args):
self.locals['*arg_'+ str(dex)] = v
if (self.break_reporter):
self.break_reporter(prog.node_name, _('after evaluating arguments'), prog.line_number)
saved_line_number = self.override_line_number
self.override_line_number = (self.override_line_number if self.override_line_number
else prog.line_number)
else:
saved_line_number = None
try:
if function_object_type(prog.function.program_text) is StoredObjectType.StoredGPMTemplate:
val = self.expression_list(prog.function.cached_compiled_text)
else:
val = self.parent._run_python_template(prog.function.cached_compiled_text, args)
except ReturnExecuted as e:
val = e.get_value()
self.override_line_number = saved_line_number
self.locals = saved_locals
self.local_functions = saved_local_functions
if (self.break_reporter):
self.break_reporter(prog.node_name + _(' returned value'), val, prog.line_number)
return val
def do_node_local_function_define(self, prog):
if (self.break_reporter):
self.break_reporter(prog.node_name, '', prog.line_number)
self.local_functions[prog.name] = prog
return ''
def do_node_local_function_call(self, prog):
if (self.break_reporter):
self.break_reporter(prog.node_name, _('before evaluating arguments'), prog.line_number)
line_number, argument_list, block = self.local_functions[prog.name].attributes_to_tuple()
if len(prog.arguments) > len(argument_list):
self.error(_("Function {0}: argument count mismatch -- "
"{1} given, at most {2} required").format(prog.name,
len(prog.arguments),
len(argument_list)),
prog.line_number)
new_locals = dict()
for i,arg in enumerate(argument_list):
if len(prog.arguments) > i:
new_locals[arg.left] = self.expr(prog.arguments[i])
else:
new_locals[arg.left] = self.expr(arg.right)
saved_locals = self.locals
self.locals = new_locals
if (self.break_reporter):
self.break_reporter(prog.node_name, _('after evaluating arguments'), prog.line_number)
saved_line_number = self.override_line_number
self.override_line_number = (self.override_line_number if self.override_line_number
else line_number)
else:
saved_line_number = None
try:
val = self.expr(block)
except ReturnExecuted as e:
val = e.get_value()
finally:
self.locals = saved_locals
self.override_line_number = saved_line_number
if (self.break_reporter):
self.break_reporter(prog.node_name + _(' returned value'), val, prog.line_number)
return val
def do_node_arguments(self, prog):
for dex, arg in enumerate(prog.expression_list):
self.locals[arg.left] = self.locals.get('*arg_'+ str(dex), self.expr(arg.right))
if (self.break_reporter):
self.break_reporter(prog.node_name, '', prog.line_number)
return ''
def do_node_globals(self, prog):
res = ''
for arg in prog.expression_list:
res = self.locals[arg.left] = self.global_vars.get(arg.left, self.expr(arg.right))
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
def do_node_set_globals(self, prog):
res = ''
for arg in prog.expression_list:
res = self.global_vars[arg.left] = self.locals.get(arg.left, self.expr(arg.right))
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
def do_node_constant(self, prog):
if (self.break_reporter):
self.break_reporter(prog.node_name, prog.value, prog.line_number)
return prog.value
def do_node_field(self, prog):
try:
name = self.expr(prog.expression)
try:
res = self.parent.get_value(name, [], self.parent_kwargs)
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
except StopException:
raise
except:
self.error(_("Unknown field '{0}'").format(name), prog.line_number)
except (StopException, ValueError):
raise
except:
self.error(_("Unknown field '{0}'").format('internal parse error'),
prog.line_number)
def do_node_raw_field(self, prog):
try:
name = self.expr(prog.expression)
name = field_metadata.search_term_to_field_key(name)
res = getattr(self.parent_book, name, None)
if res is None and prog.default is not None:
res = self.expr(prog.default)
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
if res is not None:
if isinstance(res, list):
fm = self.parent_book.metadata_for_field(name)
if fm is None:
res = ', '.join(res)
else:
res = fm['is_multiple']['list_to_ui'].join(res)
else:
res = str(res)
else:
res = str(res) # Should be the string "None"
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
except (StopException, ValueError) as e:
raise e
except:
self.error(_("Unknown field '{0}'").format('internal parse error'),
prog.line_number)
def do_node_assign(self, prog):
t = self.expr(prog.right)
self.locals[prog.left] = t
if (self.break_reporter):
self.break_reporter(prog.node_name, t, prog.line_number)
return t
def do_node_first_non_empty(self, prog):
for expr in prog.expression_list:
v = self.expr(expr)
if v:
if self.break_reporter:
self.break_reporter(prog.node_name, v, prog.line_number)
return v
if (self.break_reporter):
self.break_reporter(prog.node_name, '', prog.line_number)
return ''
def do_node_switch(self, prog):
val = self.expr(prog.expression_list[0])
for i in range(1, len(prog.expression_list)-1, 2):
v = self.expr(prog.expression_list[i])
if re.search(v, val, flags=re.I):
res = self.expr(prog.expression_list[i+1])
if self.break_reporter:
self.break_reporter(prog.node_name, res, prog.line_number)
return res
res = self.expr(prog.expression_list[-1])
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
def do_node_switch_if(self, prog):
for i in range(0, len(prog.expression_list)-1, 2):
tst = self.expr(prog.expression_list[i])
if self.break_reporter:
self.break_reporter("switch_if(): test expr", tst, prog.line_number)
if tst:
res = self.expr(prog.expression_list[i+1])
if self.break_reporter:
self.break_reporter("switch_if(): value expr", res, prog.line_number)
return res
res = self.expr(prog.expression_list[-1])
if (self.break_reporter):
self.break_reporter("switch_if(): default expr", res, prog.line_number)
return res
def do_node_strcat(self, prog):
res = ''.join([self.expr(expr) for expr in prog.expression_list])
if self.break_reporter:
self.break_reporter(prog.node_name, res, prog.line_number)
return res
def do_node_list_count_field(self, prog):
name = field_metadata.search_term_to_field_key(self.expr(prog.expression))
res = getattr(self.parent_book, name, None)
if res is None or not isinstance(res, (list, tuple, set, dict)):
self.error(_("Field '{0}' is either not a field or not a list").format(name), prog.line_number)
ans = str(len(res))
if self.break_reporter:
self.break_reporter(prog.node_name, ans, prog.line_number)
return ans
def do_node_break(self, prog):
if (self.break_reporter):
self.break_reporter(prog.node_name, '', prog.line_number)
raise BreakExecuted()
def do_node_continue(self, prog):
if (self.break_reporter):
self.break_reporter(prog.node_name, '', prog.line_number)
raise ContinueExecuted()
def do_node_return(self, prog):
v = self.expr(prog.expr)
if (self.break_reporter):
self.break_reporter(prog.node_name, v, prog.line_number)
e = ReturnExecuted()
e.set_value(v)
raise e
def do_node_contains(self, prog):
v = self.expr(prog.value_expression)
t = self.expr(prog.test_expression)
if re.search(t, v, flags=re.I):
res = self.expr(prog.match_expression)
else:
res = self.expr(prog.not_match_expression)
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
INFIX_STRING_COMPARE_OPS = {
"==": lambda x, y: strcmp(x, y) == 0,
"!=": lambda x, y: strcmp(x, y) != 0,
"<": lambda x, y: strcmp(x, y) < 0,
"<=": lambda x, y: strcmp(x, y) <= 0,
">": lambda x, y: strcmp(x, y) > 0,
">=": lambda x, y: strcmp(x, y) >= 0,
"in": lambda x, y: re.search(x, y, flags=re.I),
"inlist": lambda x, y: list(filter(partial(re.search, x, flags=re.I),
[v.strip() for v in y.split(',') if v.strip()]))
}
def do_inlist_field(self, left, right, prog):
res = getattr(self.parent_book, right, None)
if res is None or not isinstance(res, (list, tuple, set, dict)):
self.error(_("Field '{0}' is either not a field or not a list").format(right), prog.line_number)
pat = re.compile(left, flags=re.I)
if isinstance(res, dict): # identifiers
for k,v in res.items():
if re.search(pat, f'{k}:{v}'):
return '1'
else:
for v in res:
if re.search(pat, v):
return '1'
return ''
def do_node_string_infix(self, prog):
try:
left = self.expr(prog.left)
right = self.expr(prog.right)
try:
res = '1' if self.INFIX_STRING_COMPARE_OPS[prog.operator](left, right) else ''
except KeyError:
if prog.operator == 'inlist_field':
res = self.do_inlist_field(left, right, prog)
else:
raise
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
except (StopException, ValueError) as e:
raise e
except:
self.error(_("Error during string comparison: "
"operator '{0}'").format(prog.operator), prog.line_number)
INFIX_NUMERIC_COMPARE_OPS = {
"==#": lambda x, y: x == y,
"!=#": lambda x, y: x != y,
"<#": lambda x, y: x < y,
"<=#": lambda x, y: x <= y,
">#": lambda x, y: x > y,
">=#": lambda x, y: x >= y,
}
def float_deal_with_none(self, v):
# Undefined values and the string 'None' are assumed to be zero.
# The reason for string 'None': raw_field returns it for undefined values
return float(v if v and v != 'None' else 0)
def do_node_numeric_infix(self, prog):
try:
left = self.float_deal_with_none(self.expr(prog.left))
right = self.float_deal_with_none(self.expr(prog.right))
res = '1' if self.INFIX_NUMERIC_COMPARE_OPS[prog.operator](left, right) else ''
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
except (StopException, ValueError) as e:
raise e
except:
self.error(_("Value used in comparison is not a number: "
"operator '{0}'").format(prog.operator), prog.line_number)
LOGICAL_BINARY_OPS = {
'and': lambda self, x, y: self.expr(x) and self.expr(y),
'or': lambda self, x, y: self.expr(x) or self.expr(y),
}
def do_node_logop(self, prog):
try:
res = ('1' if self.LOGICAL_BINARY_OPS[prog.operator](self, prog.left, prog.right) else '')
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
except (StopException, ValueError) as e:
raise e
except:
self.error(_("Error during operator evaluation: "
"operator '{0}'").format(prog.operator), prog.line_number)
LOGICAL_UNARY_OPS = {
'not': lambda x: not x,
}
def do_node_logop_unary(self, prog):
try:
expr = self.expr(prog.expr)
res = ('1' if self.LOGICAL_UNARY_OPS[prog.operator](expr) else '')
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
except (StopException, ValueError) as e:
raise e
except:
self.error(_("Error during operator evaluation: "
"operator '{0}'").format(prog.operator), prog.line_number)
ARITHMETIC_BINARY_OPS = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x / y,
}
def do_node_binary_arithop(self, prog):
try:
answer = self.ARITHMETIC_BINARY_OPS[prog.operator](
self.float_deal_with_none(self.expr(prog.left)),
self.float_deal_with_none(self.expr(prog.right)))
res = str(answer if modf(answer)[0] != 0 else int(answer))
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
except (StopException, ValueError) as e:
raise e
except:
self.error(_("Error during operator evaluation: "
"operator '{0}'").format(prog.operator), prog.line_number)
ARITHMETIC_UNARY_OPS = {
'+': lambda x: x,
'-': lambda x: -x,
}
def do_node_unary_arithop(self, prog):
try:
expr = self.ARITHMETIC_UNARY_OPS[prog.operator](float(self.expr(prog.expr)))
res = str(expr if modf(expr)[0] != 0 else int(expr))
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
except (StopException, ValueError) as e:
raise e
except:
self.error(_("Error during operator evaluation: "
"operator '{0}'").format(prog.operator), prog.line_number)
def do_node_stringops(self, prog):
try:
res = self.expr(prog.left) + self.expr(prog.right)
if (self.break_reporter):
self.break_reporter(prog.node_name, res, prog.line_number)
return res
except (StopException, ValueError) as e:
raise e
except:
self.error(_("Error during operator evaluation: "
"operator '{0}'").format(prog.operator), prog.line_number)
characters = {
'return': '\r',
'newline': '\n',
'tab': '\t',
'backslash': '\\',
}
def do_node_character(self, prog):
try:
key = self.expr(prog.expression)
ret = self.characters.get(key, None)
if ret is None:
self.error(_("Function {0}: invalid character name '{1}")
.format('character', key), prog.line_number)
if (self.break_reporter):
self.break_reporter(prog.node_name, ret, prog.line_number)
except (StopException, ValueError) as e:
raise e
return ret
def do_node_print(self, prog):
res = []
for arg in prog.arguments:
res.append(self.expr(arg))
print(res)
return res[0] if res else ''
NODE_OPS = {
Node.NODE_IF: do_node_if,
Node.NODE_ASSIGN: do_node_assign,
Node.NODE_CONSTANT: do_node_constant,
Node.NODE_RVALUE: do_node_rvalue,
Node.NODE_FUNC: do_node_func,
Node.NODE_FIELD: do_node_field,
Node.NODE_RAW_FIELD: do_node_raw_field,
Node.NODE_COMPARE_STRING: do_node_string_infix,
Node.NODE_COMPARE_NUMERIC: do_node_numeric_infix,
Node.NODE_ARGUMENTS: do_node_arguments,
Node.NODE_CALL_STORED_TEMPLATE: do_node_stored_template_call,
Node.NODE_FIRST_NON_EMPTY: do_node_first_non_empty,
Node.NODE_SWITCH: do_node_switch,
Node.NODE_SWITCH_IF: do_node_switch_if,
Node.NODE_FOR: do_node_for,
Node.NODE_RANGE: do_node_range,
Node.NODE_GLOBALS: do_node_globals,
Node.NODE_SET_GLOBALS: do_node_set_globals,
Node.NODE_CONTAINS: do_node_contains,
Node.NODE_BINARY_LOGOP: do_node_logop,
Node.NODE_UNARY_LOGOP: do_node_logop_unary,
Node.NODE_BINARY_ARITHOP: do_node_binary_arithop,
Node.NODE_UNARY_ARITHOP: do_node_unary_arithop,
Node.NODE_PRINT: do_node_print,
Node.NODE_BREAK: do_node_break,
Node.NODE_CONTINUE: do_node_continue,
Node.NODE_RETURN: do_node_return,
Node.NODE_CHARACTER: do_node_character,
Node.NODE_STRCAT: do_node_strcat,
Node.NODE_BINARY_STRINGOP: do_node_stringops,
Node.NODE_LOCAL_FUNCTION_DEFINE: do_node_local_function_define,
Node.NODE_LOCAL_FUNCTION_CALL: do_node_local_function_call,
Node.NODE_LIST_COUNT_FIELD: do_node_list_count_field,
}
def expr(self, prog):
try:
if isinstance(prog, list):
return self.expression_list(prog)
return self.NODE_OPS[prog.node_type](self, prog)
except (ValueError, ExecutionBase, StopException) as e:
raise e
except Exception as e:
if (DEBUG):
traceback.print_exc()
self.error(_("Internal error evaluating an expression: '{0}'").format(str(e)),
prog.line_number)
class TemplateFormatter(string.Formatter):
'''
Provides a format function that substitutes '' for any missing value
'''
_validation_string = 'This Is Some Text THAT SHOULD be LONG Enough.%^&*'
# Dict to do recursion detection. It is up to the individual get_value
# method to use it. It is cleared when starting to format a template
composite_values = {}
def __init__(self):
string.Formatter.__init__(self)
self.book = None
self.kwargs = None
self.strip_results = True
self.column_name = None
self.template_cache = None
self.global_vars = {}
self.locals = {}
self.funcs = formatter_functions().get_functions()
self._interpreters = []
self._template_parser = None
self.recursion_stack = []
self.recursion_level = -1
self._caller = None
self.python_context_object = None
def _do_format(self, val, fmt):
if not fmt or not val:
return val
if val == self._validation_string:
val = '0'
typ = fmt[-1]
if typ == 's':
pass
elif 'bcdoxXn'.find(typ) >= 0:
try:
val = int(val)
except Exception:
raise ValueError(
_('format: type {0} requires an integer value, got {1}').format(typ, val))
elif 'eEfFgGn%'.find(typ) >= 0:
try:
val = float(val)
except:
raise ValueError(
_('format: type {0} requires a decimal (float) value, got {1}').format(typ, val))
return str(('{0:'+fmt+'}').format(val))
def _explode_format_string(self, fmt):
try:
matches = self.format_string_re.match(fmt)
if matches is None or matches.lastindex != 3:
return fmt, '', ''
return matches.groups()
except:
if DEBUG:
traceback.print_exc()
return fmt, '', ''
format_string_re = re.compile(r'^(.*)\|([^\|]*)\|(.*)$', re.DOTALL)
compress_spaces = re.compile(r'\s+')
backslash_comma_to_comma = re.compile(r'\\,')
arg_parser = re.Scanner([
(r',', lambda x,t: ''),
(r'.*?((?<!\\),)', lambda x,t: t[:-1]),
(r'.*?\)', lambda x,t: t[:-1]),
])
# ################# Template language lexical analyzer ######################
lex_scanner = re.Scanner([
(r'(==#|!=#|<=#|<#|>=#|>#)', lambda x,t: (_Parser.LEX_NUMERIC_INFIX, t)), # noqa
(r'(==|!=|<=|<|>=|>)', lambda x,t: (_Parser.LEX_STRING_INFIX, t)), # noqa
(r'(if|then|else|elif|fi)\b',lambda x,t: (_Parser.LEX_KEYWORD, t)), # noqa
(r'(for|in|rof|separator)\b',lambda x,t: (_Parser.LEX_KEYWORD, t)), # noqa
(r'(separator|limit)\b', lambda x,t: (_Parser.LEX_KEYWORD, t)), # noqa
(r'(def|fed|continue)\b', lambda x,t: (_Parser.LEX_KEYWORD, t)), # noqa
(r'(return|inlist|break)\b', lambda x,t: (_Parser.LEX_KEYWORD, t)), # noqa
(r'(inlist_field)\b', lambda x,t: (_Parser.LEX_KEYWORD, t)), # noqa
(r'(\|\||&&|!|{|})', lambda x,t: (_Parser.LEX_OP, t)), # noqa
(r'[(),=;:\+\-*/&]', lambda x,t: (_Parser.LEX_OP, t)), # noqa
(r'-?[\d\.]+', lambda x,t: (_Parser.LEX_CONST, t)), # noqa
(r'\$\$?#?\w+', lambda x,t: (_Parser.LEX_ID, t)), # noqa
(r'\$', lambda x,t: (_Parser.LEX_ID, t)), # noqa
(r'\w+', lambda x,t: (_Parser.LEX_ID, t)), # noqa
(r'".*?((?<!\\)")', lambda x,t: (_Parser.LEX_CONST, t[1:-1])), # noqa
(r'\'.*?((?<!\\)\')', lambda x,t: (_Parser.LEX_CONST, t[1:-1])), # noqa
(r'\n#.*?(?:(?=\n)|$)', lambda x,t: _Parser.LEX_NEWLINE), # noqa
(r'\s', lambda x,t: _Parser.LEX_NEWLINE if t == '\n' else None), # noqa
], flags=re.DOTALL)
def _eval_program(self, val, prog, column_name, global_vars, break_reporter):
if column_name is not None and self.template_cache is not None:
tree = self.template_cache.get(column_name, None)
if not tree:
tree = self.gpm_parser.program(self, self.funcs, self.lex_scanner.scan(prog))
self.template_cache[column_name] = tree
else:
tree = self.gpm_parser.program(self, self.funcs, self.lex_scanner.scan(prog))
return self.gpm_interpreter.program(self.funcs, self, tree, val,
global_vars=global_vars, break_reporter=break_reporter)
def _eval_sfm_call(self, template_name, args, global_vars):
func = self.funcs[template_name]
compiled_text = func.cached_compiled_text
if func.object_type is StoredObjectType.StoredGPMTemplate:
if compiled_text is None:
compiled_text = self.gpm_parser.program(self, self.funcs,
self.lex_scanner.scan(func.program_text[len('program:'):]))
func.cached_compiled_text = compiled_text
return self.gpm_interpreter.program(self.funcs, self, func, None,
is_call=True, args=args,
global_vars=global_vars)
elif function_object_type(func) is StoredObjectType.StoredPythonTemplate:
if compiled_text is None:
compiled_text = self.compile_python_template(func.program_text[len('python:'):])
func.cached_compiled_text = compiled_text
return self._run_python_template(compiled_text, args)
def _eval_python_template(self, template, column_name):
if column_name is not None and self.template_cache is not None:
func = self.template_cache.get(column_name + '::python', None)
if not func:
func = self.compile_python_template(template)
self.template_cache[column_name + '::python'] = func
else:
func = self.compile_python_template(template)
return self._run_python_template(func, arguments=None)
def _run_python_template(self, compiled_template, arguments):
try:
self.python_context_object.set_values(
db=get_database(self.book, get_database(self.book, None)),
globals=self.global_vars,
arguments=arguments,
formatter=self,
funcs=self._caller)
rslt = compiled_template(self.book, self.python_context_object)
except StopException:
raise
except Exception as e:
stack = traceback.extract_tb(exc_info()[2])
ss = stack[-1]
if getattr(e, 'is_internal', False):
# Exception raised by FormatterFuncsCaller
# get the line inside the current template instead of the FormatterFuncsCaller
for s in reversed(stack):
if s.filename == '<string>':
ss = s
break
raise ValueError(_('Error in function {0} on line {1} : {2} - {3}').format(
ss.name, ss.lineno, type(e).__name__, str(e)))
if not isinstance(rslt, str):
raise ValueError(_('The Python template returned a non-string value: {!r}').format(rslt))
return rslt
def compile_python_template(self, template):
def replace_func(mo):
return mo.group().replace('\t', ' ')
prog ='\n'.join([re.sub(r'^\t*', replace_func, line)
for line in template.splitlines()])
locals_ = {}
if DEBUG and tweaks.get('enable_template_debug_printing', False):
print(prog)
try:
exec(prog, locals_)
func = locals_['evaluate']
return func
except SyntaxError as e:
raise ValueError(
_('Syntax error on line {0} column {1}: text {2}').format(e.lineno, e.offset, e.text))
except KeyError:
raise ValueError(_("The {0} function is not defined in the template").format('evaluate'))
# ################# Override parent classes methods #####################
def get_value(self, key, args, kwargs):
raise Exception('get_value must be implemented in the subclass')
def format_field(self, val, fmt):
# ensure we are dealing with a string.
if isinstance(val, numbers.Number):
if val:
val = str(val)
else:
val = ''
# Handle conditional text
fmt, prefix, suffix = self._explode_format_string(fmt)
# Handle functions
# First see if we have a functional-style expression
if fmt.startswith('\''):
p = 0
else:
p = fmt.find(':\'')
if p >= 0:
p += 1
if p >= 0 and fmt[-1] == '\'':
val = self._eval_program(val, fmt[p+1:-1], None, self.global_vars, None)
colon = fmt[0:p].find(':')
if colon < 0:
dispfmt = ''
else:
dispfmt = fmt[0:colon]
else:
# check for old-style function references
p = fmt.find('(')
dispfmt = fmt
if p >= 0 and fmt[-1] == ')':
colon = fmt[0:p].find(':')
if colon < 0:
dispfmt = ''
colon = 0
else:
dispfmt = fmt[0:colon]
colon += 1
fname = fmt[colon:p].strip()
if fname in self.funcs:
func = self.funcs[fname]
if func.arg_count == 2:
# only one arg expected. Don't bother to scan. Avoids need
# for escaping characters
args = [fmt[p+1:-1]]
else:
args = self.arg_parser.scan(fmt[p+1:])[0]
args = [self.backslash_comma_to_comma.sub(',', a) for a in args]
if func.object_type is not StoredObjectType.PythonFunction:
args.insert(0, val)
val = self._eval_sfm_call(fname, args, self.global_vars)
else:
if (func.arg_count == 1 and (len(args) != 1 or args[0])) or \
(func.arg_count > 1 and func.arg_count != len(args)+1):
raise ValueError(
_('Incorrect number of arguments for function {0}').format(fname))
if func.arg_count == 1:
val = func.eval_(self, self.kwargs, self.book, self.locals, val)
if self.strip_results:
val = val.strip()
else:
val = func.eval_(self, self.kwargs, self.book, self.locals, val, *args)
if self.strip_results:
val = val.strip()
else:
return _('%s: unknown function')%fname
if val:
val = self._do_format(val, dispfmt)
if not val:
return ''
return prefix + val + suffix
def evaluate(self, fmt, args, kwargs, global_vars, break_reporter=None):
if fmt.startswith('program:'):
ans = self._eval_program(kwargs.get('$', None), fmt[8:],
self.column_name, global_vars, break_reporter)
elif fmt.startswith('python:'):
ans = self._eval_python_template(fmt[7:], self.column_name)
else:
ans = self.vformat(fmt, args, kwargs)
if self.strip_results:
ans = self.compress_spaces.sub(' ', ans)
if self.strip_results:
ans = ans.strip(' ')
return ans
# It is possible for a template to indirectly invoke other templates by
# doing field references of composite columns. If this happens then the
# reference can use different parameters when calling safe_format(). Because
# the parameters are saved as instance variables they can possibly affect
# the 'calling' template. To avoid this problem, save the current formatter
# state when recursion is detected. Save state at level zero to be sure that
# all class instance variables are restored to their base settings.
def save_state(self):
self.recursion_level += 1
return (
(self.strip_results,
self.column_name,
self.template_cache,
self.kwargs,
self.book,
self.global_vars,
self.funcs,
self.locals,
self._caller,
self.python_context_object))
def restore_state(self, state):
self.recursion_level -= 1
if state is None:
raise ValueError(_('Formatter state restored before saved'))
(self.strip_results,
self.column_name,
self.template_cache,
self.kwargs,
self.book,
self.global_vars,
self.funcs,
self.locals,
self._caller,
self.python_context_object) = state
# Allocate an interpreter if the formatter encounters a GPM or TPM template.
# We need to allocate additional interpreters if there is composite recursion
# so that the templates are evaluated by separate instances. It is OK to
# reuse already-allocated interpreters because their state is initialized on
# call. As a side effect, no interpreter is instantiated if no TPM/GPM
# template is encountered.
@property
def gpm_interpreter(self):
while len(self._interpreters) <= self.recursion_level:
self._interpreters.append(_Interpreter())
return self._interpreters[self.recursion_level]
# Allocate a parser if needed. Parsers cannot recurse so one is sufficient.
@property
def gpm_parser(self):
if self._template_parser is None:
self._template_parser = _Parser()
return self._template_parser
# ######### a formatter that throws exceptions ############
def unsafe_format(self, fmt, kwargs, book, strip_results=True, global_vars=None,
python_context_object=None):
state = self.save_state()
try:
self._caller = FormatterFuncsCaller(self)
self.strip_results = strip_results
self.column_name = self.template_cache = None
self.kwargs = kwargs
self.book = book
self.composite_values = {}
self.locals = {}
self.global_vars = global_vars if isinstance(global_vars, dict) else {}
if isinstance(python_context_object, PythonTemplateContext):
self.python_context_object = python_context_object
else:
self.python_context_object = PythonTemplateContext()
return self.evaluate(fmt, [], kwargs, self.global_vars)
finally:
self.restore_state(state)
# ######### a formatter guaranteed not to throw an exception ############
def safe_format(self, fmt, kwargs, error_value, book,
column_name=None, template_cache=None,
strip_results=True, template_functions=None,
global_vars=None, break_reporter=None,
python_context_object=None):
state = self.save_state()
if self.recursion_level == 0:
# Initialize the composite values dict if this is the base-level
# call. Recursive calls will use the same dict.
self.composite_values = {}
try:
self._caller = FormatterFuncsCaller(self)
self.strip_results = strip_results
self.column_name = column_name
self.template_cache = template_cache
self.kwargs = kwargs
self.book = book
self.global_vars = global_vars if isinstance(global_vars, dict) else {}
if isinstance(python_context_object, PythonTemplateContext):
self.python_context_object = python_context_object
else:
self.python_context_object = PythonTemplateContext()
if template_functions:
self.funcs = template_functions
else:
self.funcs = formatter_functions().get_functions()
self.locals = {}
try:
ans = self.evaluate(fmt, [], kwargs, self.global_vars, break_reporter=break_reporter)
except StopException as e:
ans = error_message(e)
except Exception as e:
if DEBUG:
if tweaks.get('show_stack_traces_in_formatter', True):
traceback.print_exc()
if column_name:
prints('Error evaluating column named:', column_name)
ans = error_value + ' ' + error_message(e)
return ans
finally:
self.restore_state(state)
class ValidateFormatter(TemplateFormatter):
'''
Provides a formatter that substitutes the validation string for every value
'''
def get_value(self, key, args, kwargs):
return self._validation_string
def validate(self, x):
from calibre.ebooks.metadata.book.base import Metadata
return self.safe_format(x, {}, 'VALIDATE ERROR', Metadata(''))
validation_formatter = ValidateFormatter()
class EvalFormatter(TemplateFormatter):
'''
A template formatter that uses a simple dict instead of an mi instance
'''
def get_value(self, key, args, kwargs):
if key == '':
return ''
key = key.lower()
return kwargs.get(key, _('No such variable {0}').format(key))
# DEPRECATED. This is not thread safe. Do not use.
eval_formatter = EvalFormatter()
| 82,497 | Python | .py | 1,808 | 33.746128 | 135 | 0.553924 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,159 | mreplace.py | kovidgoyal_calibre/src/calibre/utils/mreplace.py | # multiple replace from dictionary : http://code.activestate.com/recipes/81330/
__license__ = 'GPL v3'
__copyright__ = '2010, sengian <sengian1 @ gmail.com>'
__docformat__ = 'restructuredtext en'
import re
try:
from collections import UserDict
except ImportError:
from UserDict import UserDict
class MReplace(UserDict):
def __init__(self, data=None, case_sensitive=True):
UserDict.__init__(self, data)
self.re = None
self.regex = None
self.case_sensitive = case_sensitive
self.compile_regex()
def compile_regex(self):
if len(self.data) > 0:
keys = sorted(self.data, key=len, reverse=True)
if isinstance(keys[0], bytes):
tmp = b"(%s)" % b"|".join(map(re.escape, keys))
else:
tmp = "(%s)" % "|".join(map(re.escape, keys))
if self.re != tmp:
self.re = tmp
if self.case_sensitive:
self.regex = re.compile(self.re)
else:
self.regex = re.compile(self.re, re.I)
def __call__(self, mo):
return self[mo.string[mo.start():mo.end()]]
def mreplace(self, text):
# Replace without regex compile
if len(self.data) < 1 or self.re is None:
return text
return self.regex.sub(self, text)
| 1,364 | Python | .py | 36 | 28.75 | 79 | 0.572403 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,160 | text2int.py | kovidgoyal_calibre/src/calibre/utils/text2int.py | #!/usr/bin/env python
__author__ = "stackoverflow community"
__docformat__ = 'restructuredtext en'
"""
Takes english numeric words and converts them to integers.
Returns False if the word isn't a number.
implementation courtesy of the stackoverflow community:
http://stackoverflow.com/questions/493174/is-there-a-way-to-convert-number-words-to-integers-python
"""
import re
numwords = {}
def text2int(textnum):
if not numwords:
units = ["zero", "one", "two", "three", "four", "five", "six",
"seven", "eight", "nine", "ten", "eleven", "twelve",
"thirteen", "fourteen", "fifteen", "sixteen", "seventeen",
"eighteen", "nineteen"]
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty",
"seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion",
'quadrillion', 'quintillion', 'sexillion', 'septillion',
'octillion', 'nonillion', 'decillion']
numwords["and"] = (1, 0)
for idx, word in enumerate(units):
numwords[word] = (1, idx)
for idx, word in enumerate(tens):
numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales):
numwords[word] = (10 ** (idx * 3 or 2), 0)
ordinal_words = {'first':1, 'second':2, 'third':3, 'fifth':5,
'eighth':8, 'ninth':9, 'twelfth':12}
ordinal_endings = [('ieth', 'y'), ('th', '')]
current = result = 0
tokens = re.split(r"[\s-]+", textnum)
for word in tokens:
if word in ordinal_words:
scale, increment = (1, ordinal_words[word])
else:
for ending, replacement in ordinal_endings:
if word.endswith(ending):
word = f"{word[:-len(ending)]}{replacement}"
if word not in numwords:
# raise Exception("Illegal word: " + word)
return False
scale, increment = numwords[word]
if scale > 1:
current = max(1, current)
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
| 2,229 | Python | .py | 52 | 33.461538 | 99 | 0.561111 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,161 | date.py | kovidgoyal_calibre/src/calibre/utils/date.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
from datetime import MAXYEAR, MINYEAR, datetime, timedelta
from datetime import time as dtime
from functools import partial
from calibre import strftime
from calibre.constants import ismacos, iswindows, preferred_encoding
from calibre.utils.iso8601 import UNDEFINED_DATE, local_tz, utc_tz
from calibre.utils.localization import lcdata
from polyglot.builtins import native_string_type
_utc_tz = utc_tz
_local_tz = local_tz
# When parsing ambiguous dates that could be either dd-MM Or MM-dd use the
# user's locale preferences
if iswindows:
import ctypes
LOCALE_SSHORTDATE, LOCALE_USER_DEFAULT = 0x1f, 0
buf = ctypes.create_string_buffer(b'\0', 255)
try:
ctypes.windll.kernel32.GetLocaleInfoA(LOCALE_USER_DEFAULT, LOCALE_SSHORTDATE, buf, 255)
parse_date_day_first = buf.value.index(b'd') < buf.value.index(b'M')
except:
parse_date_day_first = False
del ctypes, LOCALE_SSHORTDATE, buf, LOCALE_USER_DEFAULT
elif ismacos:
try:
from calibre_extensions.usbobserver import date_format
date_fmt = date_format()
parse_date_day_first = date_fmt.index('d') < date_fmt.index('M')
except:
parse_date_day_first = False
else:
try:
def first_index(raw, queries):
for q in queries:
try:
return raw.index(native_string_type(q))
except ValueError:
pass
return -1
import locale
raw = locale.nl_langinfo(locale.D_FMT)
parse_date_day_first = first_index(raw, ('%d', '%a', '%A')) < first_index(raw, ('%m', '%b', '%B'))
del raw, first_index
except:
parse_date_day_first = False
DEFAULT_DATE = datetime(2000,1,1, tzinfo=utc_tz)
EPOCH = datetime(1970, 1, 1, tzinfo=_utc_tz)
def is_date_undefined(qt_or_dt):
d = qt_or_dt
if d is None:
return True
if hasattr(d, 'toString'):
if hasattr(d, 'date'):
d = d.date()
try:
d = datetime(d.year(), d.month(), d.day(), tzinfo=utc_tz)
except ValueError:
return True # Undefined QDate
return d.year < UNDEFINED_DATE.year or (
d.year == UNDEFINED_DATE.year and
d.month == UNDEFINED_DATE.month and
d.day == UNDEFINED_DATE.day)
_iso_pat = None
def iso_pat():
global _iso_pat
if _iso_pat is None:
_iso_pat = re.compile(r'\d{4}[/.-]\d{1,2}[/.-]\d{1,2}')
return _iso_pat
def parse_date(date_string, assume_utc=False, as_utc=True, default=None):
'''
Parse a date/time string into a timezone aware datetime object. The timezone
is always either UTC or the local timezone.
:param assume_utc: If True and date_string does not specify a timezone,
assume UTC, otherwise assume local timezone.
:param as_utc: If True, return a UTC datetime
:param default: Missing fields are filled in from default. If None, the
current month and year are used.
'''
from dateutil.parser import parse
if not date_string:
return UNDEFINED_DATE
if isinstance(date_string, bytes):
date_string = date_string.decode(preferred_encoding, 'replace')
if default is None:
func = utcnow if assume_utc else now
default = func().replace(day=15, hour=0, minute=0, second=0, microsecond=0,
tzinfo=_utc_tz if assume_utc else _local_tz)
if iso_pat().match(date_string) is not None:
dt = parse(date_string, default=default)
else:
dt = parse(date_string, default=default, dayfirst=parse_date_day_first)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=_utc_tz if assume_utc else _local_tz)
return dt.astimezone(_utc_tz if as_utc else _local_tz)
def fix_only_date(val):
n = val + timedelta(days=1)
if n.month > val.month:
val = val.replace(day=val.day-1)
if val.day == 1:
val = val.replace(day=2)
return val
def parse_only_date(raw, assume_utc=True, as_utc=True):
'''
Parse a date string that contains no time information in a manner that
guarantees that the month and year are always correct in all timezones, and
the day is at most one day wrong.
'''
f = utcnow if assume_utc else now
default = f().replace(hour=0, minute=0, second=0, microsecond=0,
day=15)
return fix_only_date(parse_date(raw, default=default, assume_utc=assume_utc, as_utc=as_utc))
def strptime(val, fmt, assume_utc=False, as_utc=True):
dt = datetime.strptime(val, fmt)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=_utc_tz if assume_utc else _local_tz)
return dt.astimezone(_utc_tz if as_utc else _local_tz)
def dt_factory(time_t, assume_utc=False, as_utc=True):
dt = datetime(*(time_t[0:6]))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=_utc_tz if assume_utc else _local_tz)
return dt.astimezone(_utc_tz if as_utc else _local_tz)
def safeyear(x):
return min(max(MINYEAR, x), MAXYEAR)
def qt_to_dt(qdate_or_qdatetime, as_utc=True):
from qt.core import QDateTime, Qt
o = qdate_or_qdatetime
if o is None or is_date_undefined(qdate_or_qdatetime):
return UNDEFINED_DATE
if hasattr(o, 'toUTC'): # QDateTime
def c(o: QDateTime, tz=utc_tz):
d, t = o.date(), o.time()
try:
return datetime(safeyear(d.year()), d.month(), d.day(), t.hour(), t.minute(), t.second(), t.msec()*1000, tz)
except ValueError:
return datetime(safeyear(d.year()), d.month(), 1, t.hour(), t.minute(), t.second(), t.msec()*1000, tz)
# DST causes differences in how python and Qt convert automatically from local to UTC, so convert explicitly ourselves
# QDateTime::toUTC() and datetime.astimezone(utc_tz) give
# different results for datetimes in the local_tz when DST is involved. Sigh.
spec = o.timeSpec()
if spec == Qt.TimeSpec.LocalTime:
ans = c(o, local_tz)
elif spec == Qt.TimeSpec.UTC:
ans = c(o, utc_tz)
else:
ans = c(o.toUTC(), utc_tz)
return ans.astimezone(utc_tz if as_utc else local_tz)
try:
dt = datetime(safeyear(o.year()), o.month(), o.day()).replace(tzinfo=_local_tz)
except ValueError:
dt = datetime(safeyear(o.year()), o.month(), 1).replace(tzinfo=_local_tz)
return dt.astimezone(_utc_tz if as_utc else _local_tz)
def qt_from_dt(d: datetime, assume_utc=False):
from qt.core import QDate, QDateTime, QTime
if is_date_undefined(d):
from calibre.gui2 import UNDEFINED_QDATETIME
return UNDEFINED_QDATETIME
if d.tzinfo is None:
d = d.replace(tzinfo=utc_tz if assume_utc else local_tz)
d = d.astimezone(local_tz)
# not setting a time zone means this QDateTime has timeSpec() ==
# LocalTime which is what we want for display/editing.
ans = QDateTime(QDate(d.year, d.month, d.day), QTime(d.hour, d.minute, d.second, int(d.microsecond / 1000)))
return ans
def fromtimestamp(ctime, as_utc=True):
return datetime.fromtimestamp(ctime, _utc_tz if as_utc else _local_tz)
def fromordinal(day, as_utc=True):
return datetime.fromordinal(day).replace(
tzinfo=_utc_tz if as_utc else _local_tz)
def isoformat(date_time, assume_utc=False, as_utc=True, sep='T'):
if not hasattr(date_time, 'tzinfo'):
return str(date_time.isoformat())
if date_time.tzinfo is None:
date_time = date_time.replace(tzinfo=_utc_tz if assume_utc else
_local_tz)
date_time = date_time.astimezone(_utc_tz if as_utc else _local_tz)
# native_string_type(sep) because isoformat barfs with unicode sep on python 2.x
return str(date_time.isoformat(native_string_type(sep)))
def internal_iso_format_string():
return 'yyyy-MM-ddThh:mm:ss'
def w3cdtf(date_time, assume_utc=False):
if hasattr(date_time, 'tzinfo'):
if date_time.tzinfo is None:
date_time = date_time.replace(tzinfo=_utc_tz if assume_utc else
_local_tz)
date_time = date_time.astimezone(_utc_tz if as_utc else _local_tz)
return str(date_time.strftime('%Y-%m-%dT%H:%M:%SZ'))
def as_local_time(date_time, assume_utc=True):
if not hasattr(date_time, 'tzinfo'):
return date_time
if date_time.tzinfo is None:
date_time = date_time.replace(tzinfo=_utc_tz if assume_utc else
_local_tz)
return date_time.astimezone(_local_tz)
def dt_as_local(dt):
if dt.tzinfo is local_tz:
return dt
return dt.astimezone(local_tz)
def as_utc(date_time, assume_utc=True):
if not hasattr(date_time, 'tzinfo'):
return date_time
if date_time.tzinfo is None:
date_time = date_time.replace(tzinfo=_utc_tz if assume_utc else
_local_tz)
return date_time.astimezone(_utc_tz)
def now():
return datetime.now(_local_tz)
def utcnow():
return datetime.now(_utc_tz)
def utcfromtimestamp(stamp):
try:
return datetime.fromtimestamp(stamp, _utc_tz)
except Exception:
# Raised if stamp is out of range for the platforms gmtime function
# For example, this happens with negative values on windows
try:
return EPOCH + timedelta(seconds=stamp)
except Exception:
# datetime can only represent years between 1 and 9999
import traceback
traceback.print_exc()
return utcnow()
def timestampfromdt(dt, assume_utc=True):
return (as_utc(dt, assume_utc=assume_utc) - EPOCH).total_seconds()
# Format date functions {{{
def fd_format_hour(dt, ampm, hr):
l = len(hr)
h = dt.hour
if ampm:
h = h%12
if l == 1:
return '%d'%h
return '%02d'%h
def fd_format_minute(dt, ampm, min):
l = len(min)
if l == 1:
return '%d'%dt.minute
return '%02d'%dt.minute
def fd_format_second(dt, ampm, sec):
l = len(sec)
if l == 1:
return '%d'%dt.second
return '%02d'%dt.second
def fd_format_ampm(dt, ampm, ap):
res = strftime('%p', t=dt.timetuple())
if ap == 'AP':
return res
return res.lower()
def fd_format_day(dt, ampm, dy):
l = len(dy)
if l == 1:
return '%d'%dt.day
if l == 2:
return '%02d'%dt.day
return lcdata['abday' if l == 3 else 'day'][(dt.weekday() + 1) % 7]
def fd_format_month(dt, ampm, mo):
l = len(mo)
if l == 1:
return '%d'%dt.month
if l == 2:
return '%02d'%dt.month
return lcdata['abmon' if l == 3 else 'mon'][dt.month - 1]
def fd_format_year(dt, ampm, yr):
if len(yr) == 2:
return '%02d'%(dt.year % 100)
return '%04d'%dt.year
fd_function_index = {
'd': fd_format_day,
'M': fd_format_month,
'y': fd_format_year,
'h': fd_format_hour,
'm': fd_format_minute,
's': fd_format_second,
'a': fd_format_ampm,
'A': fd_format_ampm,
}
def fd_repl_func(dt, ampm, mo):
s = mo.group(0)
if not s:
return ''
return fd_function_index[s[0]](dt, ampm, s)
def format_date(dt, format, assume_utc=False, as_utc=False):
''' Return a date formatted as a string using a subset of Qt's formatting codes '''
if not format:
format = 'dd MMM yyyy'
if not isinstance(dt, datetime):
dt = datetime.combine(dt, dtime())
if hasattr(dt, 'tzinfo'):
if dt.tzinfo is None:
dt = dt.replace(tzinfo=_utc_tz if assume_utc else
_local_tz)
dt = dt.astimezone(_utc_tz if as_utc else _local_tz)
if format == 'iso':
return isoformat(dt, assume_utc=assume_utc, as_utc=as_utc)
if dt == UNDEFINED_DATE:
return ''
repl_func = partial(fd_repl_func, dt, 'ap' in format.lower())
return re.sub(
'(s{1,2})|(m{1,2})|(h{1,2})|(ap)|(AP)|(d{1,4}|M{1,4}|(?:yyyy|yy))',
repl_func, format)
# }}}
# Clean date functions {{{
def cd_has_hour(tt, dt):
tt['hour'] = dt.hour
return ''
def cd_has_minute(tt, dt):
tt['min'] = dt.minute
return ''
def cd_has_second(tt, dt):
tt['sec'] = dt.second
return ''
def cd_has_day(tt, dt):
tt['day'] = dt.day
return ''
def cd_has_month(tt, dt):
tt['mon'] = dt.month
return ''
def cd_has_year(tt, dt):
tt['year'] = dt.year
return ''
cd_function_index = {
'd': cd_has_day,
'M': cd_has_month,
'y': cd_has_year,
'h': cd_has_hour,
'm': cd_has_minute,
's': cd_has_second
}
def cd_repl_func(tt, dt, match_object):
s = match_object.group(0)
if not s:
return ''
return cd_function_index[s[0]](tt, dt)
def clean_date_for_sort(dt, fmt=None):
''' Return dt with fields not in shown in format set to a default '''
if not fmt:
fmt = 'yyMd'
if not isinstance(dt, datetime):
dt = datetime.combine(dt, dtime())
if hasattr(dt, 'tzinfo'):
if dt.tzinfo is None:
dt = dt.replace(tzinfo=_local_tz)
dt = as_local_time(dt)
if fmt == 'iso':
fmt = 'yyMdhms'
tt = {'year':UNDEFINED_DATE.year, 'mon':UNDEFINED_DATE.month,
'day':UNDEFINED_DATE.day, 'hour':UNDEFINED_DATE.hour,
'min':UNDEFINED_DATE.minute, 'sec':UNDEFINED_DATE.second}
repl_func = partial(cd_repl_func, tt, dt)
re.sub('(s{1,2})|(m{1,2})|(h{1,2})|(d{1,4}|M{1,4}|(?:yyyy|yy))', repl_func, fmt)
return dt.replace(year=tt['year'], month=tt['mon'], day=tt['day'], hour=tt['hour'],
minute=tt['min'], second=tt['sec'], microsecond=0)
# }}}
def replace_months(datestr, clang):
# Replace months by english equivalent for parse_date
frtoen = {
'[jJ]anvier': 'jan',
'[fF].vrier': 'feb',
'[mM]ars': 'mar',
'[aA]vril': 'apr',
'[mM]ai': 'may',
'[jJ]uin': 'jun',
'[jJ]uillet': 'jul',
'[aA]o.t': 'aug',
'[sS]eptembre': 'sep',
'[Oo]ctobre': 'oct',
'[nN]ovembre': 'nov',
'[dD].cembre': 'dec'}
detoen = {
'[jJ]anuar': 'jan',
'[fF]ebruar': 'feb',
'[mM].rz': 'mar',
'[aA]pril': 'apr',
'[mM]ai': 'may',
'[jJ]uni': 'jun',
'[jJ]uli': 'jul',
'[aA]ugust': 'aug',
'[sS]eptember': 'sep',
'[Oo]ktober': 'oct',
'[nN]ovember': 'nov',
'[dD]ezember': 'dec'}
if clang == 'fr':
dictoen = frtoen
elif clang == 'de':
dictoen = detoen
else:
return datestr
for k in dictoen:
tmp = re.sub(k, dictoen[k], datestr)
if tmp != datestr:
break
return tmp
| 14,819 | Python | .py | 395 | 30.683544 | 126 | 0.613277 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,162 | unsmarten.py | kovidgoyal_calibre/src/calibre/utils/unsmarten.py | __license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from calibre.utils.mreplace import MReplace
_mreplace = MReplace({
'–': '--',
'–': '--',
'–': '--',
'—': '---',
'—': '---',
'—': '---',
'…': '...',
'…': '...',
'…': '...',
'“': '"',
'”': '"',
'„': '"',
'″': '"',
'“': '"',
'”': '"',
'„': '"',
'″': '"',
'“':'"',
'”':'"',
'„':'"',
'″':'"',
'‘':"'",
'’':"'",
'′':"'",
'‘':"'",
'’':"'",
'′':"'",
'‘':"'",
'’':"'",
'′':"'",
})
unsmarten_text = _mreplace.mreplace
| 913 | Python | .py | 37 | 16.567568 | 60 | 0.298945 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,163 | __init__.py | kovidgoyal_calibre/src/calibre/utils/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Miscelleaneous utilities.
'''
from time import time
from polyglot.builtins import as_bytes
def join_with_timeout(q, timeout=2):
''' Join the queue q with a specified timeout. Blocks until all tasks on
the queue are done or times out with a runtime error. '''
q.all_tasks_done.acquire()
try:
endtime = time() + timeout
while q.unfinished_tasks:
remaining = endtime - time()
if remaining <= 0.0:
raise RuntimeError('Waiting for queue to clear timed out')
q.all_tasks_done.wait(remaining)
finally:
q.all_tasks_done.release()
def unpickle_binary_string(data):
# Maintains compatibility with python's pickle module protocol version 2
import struct
PROTO, SHORT_BINSTRING, BINSTRING = b'\x80', b'U', b'T'
if data.startswith(PROTO + b'\x02'):
offset = 2
which = data[offset:offset+1]
offset += 1
if which == BINSTRING:
sz, = struct.unpack_from('<i', data, offset)
offset += struct.calcsize('<i')
elif which == SHORT_BINSTRING:
sz = ord(data[offset:offset+1])
offset += 1
else:
return
return data[offset:offset + sz]
def pickle_binary_string(data):
# Maintains compatibility with python's pickle module protocol version 2
import struct
PROTO, STOP, BINSTRING = b'\x80', b'.', b'T'
data = as_bytes(data)
return PROTO + b'\x02' + BINSTRING + struct.pack(b'<i', len(data)) + data + STOP
| 1,681 | Python | .py | 45 | 30.6 | 84 | 0.631385 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,164 | tdir_in_cache.py | kovidgoyal_calibre/src/calibre/utils/tdir_in_cache.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
import atexit
import errno
import os
import tempfile
import time
from calibre.constants import cache_dir, iswindows
from calibre.ptempfile import remove_dir
from calibre.utils.monotonic import monotonic
TDIR_LOCK = 'tdir-lock'
if iswindows:
from calibre.utils.lock import windows_open
def lock_tdir(path):
return windows_open(os.path.join(path, TDIR_LOCK))
def unlock_file(fobj):
fobj.close()
def remove_tdir(path, lock_file):
lock_file.close()
remove_dir(path)
def is_tdir_locked(path):
try:
with windows_open(os.path.join(path, TDIR_LOCK)):
pass
except OSError:
return True
return False
else:
import fcntl
from calibre.utils.ipc import eintr_retry_call
def lock_tdir(path):
lf = os.path.join(path, TDIR_LOCK)
f = open(lf, 'w')
eintr_retry_call(fcntl.lockf, f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
return f
def unlock_file(fobj):
from calibre.utils.ipc import eintr_retry_call
eintr_retry_call(fcntl.lockf, fobj.fileno(), fcntl.LOCK_UN)
fobj.close()
def remove_tdir(path, lock_file):
lock_file.close()
remove_dir(path)
def is_tdir_locked(path):
lf = os.path.join(path, TDIR_LOCK)
f = open(lf, 'w')
try:
eintr_retry_call(fcntl.lockf, f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
eintr_retry_call(fcntl.lockf, f.fileno(), fcntl.LOCK_UN)
return False
except OSError:
return True
finally:
f.close()
def tdirs_in(b):
try:
tdirs = os.listdir(b)
except OSError as e:
if e.errno != errno.ENOENT:
raise
tdirs = ()
for x in tdirs:
x = os.path.join(b, x)
if os.path.isdir(x):
yield x
def clean_tdirs_in(b):
# Remove any stale tdirs left by previous program crashes
for q in tdirs_in(b):
if not is_tdir_locked(q):
remove_dir(q)
def retry_lock_tdir(path, timeout=30, sleep=0.1):
st = monotonic()
while True:
try:
return lock_tdir(path)
except Exception:
if monotonic() - st > timeout:
raise
time.sleep(sleep)
def tdir_in_cache(base):
''' Create a temp dir inside cache_dir/base. The created dir is robust
against application crashes. i.e. it will be cleaned up the next time the
application starts, even if it was left behind by a previous crash. '''
b = os.path.join(os.path.realpath(cache_dir()), base)
try:
os.makedirs(b)
except OSError as e:
if e.errno != errno.EEXIST:
raise
global_lock = retry_lock_tdir(b)
try:
if b not in tdir_in_cache.scanned:
tdir_in_cache.scanned.add(b)
try:
clean_tdirs_in(b)
except Exception:
import traceback
traceback.print_exc()
tdir = tempfile.mkdtemp(dir=b)
lock_data = lock_tdir(tdir)
atexit.register(remove_tdir, tdir, lock_data)
tdir = os.path.join(tdir, 'a')
os.mkdir(tdir)
return tdir
finally:
unlock_file(global_lock)
tdir_in_cache.scanned = set()
| 3,400 | Python | .py | 106 | 24.207547 | 84 | 0.607339 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,165 | mdns.py | kovidgoyal_calibre/src/calibre/utils/mdns.py | __license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import atexit
import socket
import time
from collections import defaultdict
from threading import Thread
from calibre import force_unicode
from calibre.utils.filenames import ascii_text
_server = None
_all_ip_addresses = {}
class AllIpAddressesGetter(Thread):
def get_all_ips(self):
''' Return a mapping of interface names to the configuration of the
interface, which includes the ip address, netmask and broadcast addresses
'''
import netifaces
all_ips = defaultdict(list)
if hasattr(netifaces, 'AF_INET'):
for x in netifaces.interfaces():
try:
for c in netifaces.ifaddresses(x).get(netifaces.AF_INET, []):
all_ips[x].append(c)
except ValueError:
from calibre import prints
prints('Failed to get IP addresses for interface', x)
import traceback
traceback.print_exc()
return dict(all_ips)
def run(self):
global _all_ip_addresses
# print 'sleeping'
# time.sleep(15)
# print 'slept'
_all_ip_addresses = self.get_all_ips()
_ip_address_getter_thread = None
def get_all_ips(reinitialize=False):
global _all_ip_addresses, _ip_address_getter_thread
if not _ip_address_getter_thread or (reinitialize and not
_ip_address_getter_thread.is_alive()):
_all_ip_addresses = {}
_ip_address_getter_thread = AllIpAddressesGetter()
_ip_address_getter_thread.daemon = True
_ip_address_getter_thread.start()
return _all_ip_addresses
def _get_external_ip():
'Get IP address of interface used to connect to the outside world'
try:
ipaddr = socket.gethostbyname(socket.gethostname())
except Exception:
ipaddr = '127.0.0.1'
if ipaddr.startswith('127.'):
for addr in ('192.0.2.0', '198.51.100.0', 'google.com'):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((addr, 0))
ipaddr = s.getsockname()[0]
if not ipaddr.startswith('127.'):
break
except:
time.sleep(0.3)
# print 'ipaddr: %s' % ipaddr
return ipaddr
_ext_ip = None
def verify_ip_address(addr: str) -> str:
result = ''
if addr not in ('0.0.0.0', '::'):
try:
socket.inet_pton(socket.AF_INET6, addr)
except Exception:
try:
socket.inet_pton(socket.AF_INET, addr)
except Exception:
pass
else:
if len(addr.split('.')) == 4:
result = addr
else:
result = addr
return result
def get_external_ip():
global _ext_ip
if _ext_ip is None:
from calibre.utils.ip_routing import get_default_route_src_address
try:
_ext_ip = get_default_route_src_address() or _get_external_ip()
except Exception:
_ext_ip = _get_external_ip()
return _ext_ip
def start_server():
global _server
if _server is None:
from zeroconf import Zeroconf
try:
_server = Zeroconf()
except Exception:
time.sleep(1)
_server = Zeroconf()
atexit.register(stop_server)
return _server
def inet_aton(addr):
try:
return socket.inet_pton(socket.AF_INET6, addr)
except:
return socket.inet_pton(socket.AF_INET, addr)
def create_service(desc, service_type, port, properties, add_hostname, use_ip_address=None):
port = int(port)
try:
hostname = ascii_text(force_unicode(socket.gethostname())).partition('.')[0]
except:
hostname = 'Unknown'
if add_hostname:
try:
desc += ' (on %s port %d)'%(hostname, port)
except:
try:
desc += ' (on %s)'%hostname
except:
pass
if use_ip_address:
local_ip = use_ip_address
else:
local_ip = get_external_ip()
if not local_ip:
raise ValueError('Failed to determine local IP address to advertise via BonJour')
service_type = service_type+'.local.'
service_name = desc + '.' + service_type
server_name = hostname+'.local.'
from zeroconf import ServiceInfo
return ServiceInfo(
service_type, service_name,
addresses=[inet_aton(local_ip),],
port=port,
properties=properties,
server=server_name)
def publish(desc, service_type, port, properties=None, add_hostname=True, use_ip_address=None, strict=True):
'''
Publish a service.
:param desc: Description of service
:param service_type: Name and type of service. For example _stanza._tcp
:param port: Port the service listens on
:param properties: An optional dictionary whose keys and values will be put
into the TXT record.
'''
server = start_server()
service = create_service(desc, service_type, port, properties, add_hostname,
use_ip_address)
server.register_service(service, strict=strict)
return service
def unpublish(desc, service_type, port, properties=None, add_hostname=True, wait_for_stop=True):
'''
Unpublish a service.
The parameters must be the same as used in the corresponding call to publish
'''
server = start_server()
service = create_service(desc, service_type, port, properties, add_hostname)
num_services = len(server.registry.async_get_service_infos())
server.unregister_service(service)
if num_services < 2:
stop_server(wait_for_stop=wait_for_stop)
def stop_server(wait_for_stop=True):
global _server
srv = _server
_server = None
if srv is not None:
t = Thread(target=srv.close)
t.daemon = True
t.start()
if wait_for_stop:
if wait_for_stop is True:
t.join()
else:
t.join(wait_for_stop)
| 6,239 | Python | .py | 174 | 27.224138 | 108 | 0.603883 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,166 | localization.py | kovidgoyal_calibre/src/calibre/utils/localization.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2009, Kovid Goyal <kovid at kovidgoyal.net>
import io
import locale
import os
import re
from gettext import GNUTranslations, NullTranslations
from calibre.utils.resources import get_path as P
from polyglot.builtins import iteritems
_available_translations = None
def available_translations():
global _available_translations
if _available_translations is None:
stats = P('localization/stats.calibre_msgpack', allow_user_override=False)
if os.path.exists(stats):
from calibre.utils.serialize import msgpack_loads
with open(stats, 'rb') as f:
stats = msgpack_loads(f.read())
else:
stats = {}
_available_translations = [x for x in stats if stats[x] > 0.1]
return _available_translations
default_envvars_for_langcode = ('LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES', 'LANG')
def getlangcode_from_envvars(envvars=default_envvars_for_langcode):
lookup = os.environ.get
for k in envvars:
localename = lookup(k)
if localename:
if k == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return locale._parse_localename(localename)[0]
def get_system_locale():
from calibre.constants import ismacos, iswindows
lang = None
if iswindows:
try:
from calibre.constants import get_windows_user_locale_name
lang = get_windows_user_locale_name()
lang = lang.strip()
if not lang:
lang = None
except:
pass # Windows XP does not have the GetUserDefaultLocaleName fn
elif ismacos:
from calibre_extensions.usbobserver import user_locale
try:
lang = user_locale() or None
except Exception:
# Fallback to environment vars if something bad happened
import traceback
traceback.print_exc()
if lang is None:
try:
lang = getlangcode_from_envvars()
# lang is None in two cases: either the environment variable is not
# set or it's "C". Stop looking for a language in the latter case.
if lang is None:
for var in default_envvars_for_langcode:
if os.environ.get(var) == 'C':
lang = 'en_US'
break
except:
pass # This happens on Ubuntu apparently
if lang is None and 'LANG' in os.environ: # Needed for OS X
try:
lang = os.environ['LANG']
except:
pass
if lang:
lang = lang.replace('-', '_')
lang = '_'.join(lang.split('_')[:2])
return lang
def sanitize_lang(lang):
if lang:
match = re.match('[a-z]{2,3}(_[A-Z]{2}){0,1}', lang)
if match:
lang = match.group()
if lang == 'zh':
lang = 'zh_CN'
if not lang:
lang = 'en'
return lang
def get_lang():
'Try to figure out what language to display the interface in'
from calibre.utils.config_base import prefs
lang = prefs['language']
lang = os.environ.get('CALIBRE_OVERRIDE_LANG', lang)
if lang:
return lang
try:
lang = get_system_locale()
except:
import traceback
traceback.print_exc()
lang = None
return sanitize_lang(lang)
def is_rtl():
return get_lang()[:2].lower() in {'he', 'ar'}
def get_lc_messages_path(lang):
hlang = None
if zf_exists():
if lang in available_translations():
hlang = lang
else:
xlang = lang.split('_')[0].lower()
if xlang in available_translations():
hlang = xlang
return hlang
def zf_exists():
return os.path.exists(P('localization/locales.zip',
allow_user_override=False))
_lang_trans = _country_trans = None
def get_all_translators():
from zipfile import ZipFile
with ZipFile(P('localization/locales.zip', allow_user_override=False), 'r') as zf:
for lang in available_translations():
mpath = get_lc_messages_path(lang)
if mpath is not None:
buf = io.BytesIO(zf.read(mpath + '/messages.mo'))
yield lang, GNUTranslations(buf)
def get_single_translator(mpath, which='messages'):
from zipfile import ZipFile
with ZipFile(P('localization/locales.zip', allow_user_override=False), 'r') as zf:
path = f'{mpath}/{which}.mo'
data = zf.read(path)
buf = io.BytesIO(data)
try:
return GNUTranslations(buf)
except Exception as e:
import traceback
traceback.print_exc()
import hashlib
sig = hashlib.sha1(data).hexdigest()
raise ValueError('Failed to load translations for: {} (size: {} and signature: {}) with error: {}'.format(
path, len(data), sig, e))
def get_iso639_translator(lang):
lang = sanitize_lang(lang)
mpath = get_lc_messages_path(lang) if lang else None
return get_single_translator(mpath, 'iso639') if mpath else None
def get_translator(bcp_47_code):
parts = bcp_47_code.replace('-', '_').split('_')[:2]
parts[0] = lang_as_iso639_1(parts[0].lower()) or 'en'
if len(parts) > 1:
parts[1] = parts[1].upper()
lang = '_'.join(parts)
lang = {'pt':'pt_BR', 'zh':'zh_CN'}.get(lang, lang)
available = available_translations()
found = True
if lang == 'en' or lang.startswith('en_'):
return found, lang, NullTranslations()
if lang not in available:
lang = {'pt':'pt_BR', 'zh':'zh_CN'}.get(parts[0], parts[0])
if lang not in available:
lang = get_lang()
if lang not in available:
lang = 'en'
found = False
if lang == 'en':
return True, lang, NullTranslations()
return found, lang, get_single_translator(lang)
lcdata = {
'abday': ('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'),
'abmon': ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'),
'd_fmt': '%m/%d/%Y',
'd_t_fmt': '%a %d %b %Y %r %Z',
'day': ('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'),
'mon': ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'),
'noexpr': '^[nN].*',
'radixchar': '.',
't_fmt': '%r',
't_fmt_ampm': '%I:%M:%S %p',
'thousep': ',',
'yesexpr': '^[yY].*'
}
def load_po(path):
from calibre.translations.msgfmt import make
buf = io.BytesIO()
try:
make(path, buf)
except Exception:
print(('Failed to compile translations file: %s, ignoring') % path)
buf = None
else:
buf = io.BytesIO(buf.getvalue())
return buf
def translator_for_lang(lang):
t = buf = iso639 = iso3166 = lcdata = None
if 'CALIBRE_TEST_TRANSLATION' in os.environ:
buf = load_po(os.path.expanduser(os.environ['CALIBRE_TEST_TRANSLATION']))
mpath = get_lc_messages_path(lang)
if buf is None and mpath and os.access(mpath + '.po', os.R_OK):
buf = load_po(mpath + '.po')
if mpath is not None:
from zipfile import ZipFile
with ZipFile(P('localization/locales.zip',
allow_user_override=False), 'r') as zf:
if buf is None:
buf = io.BytesIO(zf.read(mpath + '/messages.mo'))
if mpath == 'nds':
mpath = 'de'
isof = mpath + '/iso639.mo'
try:
iso639 = io.BytesIO(zf.read(isof))
except:
pass # No iso639 translations for this lang
isof = mpath + '/iso3166.mo'
try:
iso3166 = io.BytesIO(zf.read(isof))
except:
pass # No iso3166 translations for this lang
if buf is not None:
from calibre.utils.serialize import msgpack_loads
try:
lcdata = msgpack_loads(zf.read(mpath + '/lcdata.calibre_msgpack'))
except:
pass # No lcdata
if buf is not None:
try:
t = GNUTranslations(buf)
except Exception:
import traceback
traceback.print_exc()
t = None
if iso639 is not None:
try:
iso639 = GNUTranslations(iso639)
except Exception:
iso639 = None
else:
if t is not None:
t.add_fallback(iso639)
if iso3166 is not None:
try:
iso3166 = GNUTranslations(iso3166)
except Exception:
iso3166 = None
else:
if t is not None:
t.add_fallback(iso3166)
if t is None:
t = NullTranslations()
return {'translator': t, 'iso639_translator': iso639, 'iso3166_translator': iso3166, 'lcdata': lcdata}
default_translator = NullTranslations()
def _(x: str) -> str:
return default_translator.gettext(x)
def __(x: str) -> str:
return x
def ngettext(singular: str, plural: str, n: int) -> str:
return default_translator.ngettext(singular, plural, n)
def pgettext(context: str, msg: str) -> str:
return default_translator.pgettext(context, msg)
def set_translators():
global _lang_trans, _country_trans, lcdata, default_translator
# To test different translations invoke as
# CALIBRE_OVERRIDE_LANG=de_DE.utf8 program
lang = get_lang()
if lang:
q = translator_for_lang(lang)
default_translator = q['translator']
_lang_trans = q['iso639_translator']
_country_trans = q['iso3166_translator']
if q['lcdata']:
lcdata = q['lcdata']
else:
default_translator = NullTranslations()
try:
set_translators.lang = default_translator.info().get('language')
except Exception:
pass
default_translator.install(names=('ngettext',))
# Now that we have installed a translator, we have to retranslate the help
# for the global prefs object as it was instantiated in get_lang(), before
# the translator was installed.
from calibre.utils.config_base import prefs
prefs.retranslate_help()
set_translators.lang = None
_iso639 = None
_extra_lang_codes = {
'pt_BR' : _('Brazilian Portuguese'),
'zh_CN' : _('Simplified Chinese'),
'zh_TW' : _('Traditional Chinese'),
'bn_IN' : _('Indian Bengali'),
'bn_BD' : _('Bangladeshi Bengali'),
'en' : _('English'),
'und' : _('Unknown')
}
if False:
# Extra strings needed for Qt
# NOTE: Ante Meridian (i.e. like 10:00 AM)
_('AM')
# NOTE: Post Meridian (i.e. like 10:00 PM)
_('PM')
# NOTE: Ante Meridian (i.e. like 10:00 am)
_('am')
# NOTE: Post Meridian (i.e. like 10:00 pm)
_('pm')
_('&Copy')
_('Select All')
_('Copy Link')
_('&Select All')
_('Copy &Link Location')
_('&Undo')
_('&Redo')
_('Cu&t')
_('&Paste')
_('Paste and Match Style')
_('Directions')
_('Left to Right')
_('Right to Left')
_('Fonts')
_('&Step up')
_('Step &down')
_('Close without Saving')
_('Close Tab')
_('Ukraine')
_lcase_map = {}
for k in _extra_lang_codes:
_lcase_map[k.lower()] = k
def _load_iso639():
global _iso639
if _iso639 is None:
ip = P('localization/iso639.calibre_msgpack', allow_user_override=False, data=True)
from calibre.utils.serialize import msgpack_loads
_iso639 = msgpack_loads(ip)
if 'by_3' not in _iso639:
_iso639['by_3'] = _iso639['by_3t']
return _iso639
def load_iso3166():
ans = getattr(load_iso3166, 'ans', None)
if ans is None:
from calibre.utils.serialize import msgpack_loads
ans = load_iso3166.ans = msgpack_loads(P('localization/iso3166.calibre_msgpack', allow_user_override=False, data=True))
return ans
def get_iso_language(lang_trans, lang):
iso639 = _load_iso639()
ans = lang
lang = lang.split('_')[0].lower()
if len(lang) == 2:
ans = iso639['by_2'].get(lang, ans)
elif len(lang) == 3:
if lang in iso639['by_3']:
ans = iso639['by_3'][lang]
return lang_trans(ans)
def get_language(lang, gettext_func=None):
translate = gettext_func or _
lang = _lcase_map.get(lang, lang)
if lang in _extra_lang_codes:
# The translator was not active when _extra_lang_codes was defined, so
# re-translate
return translate(_extra_lang_codes[lang])
if gettext_func is None:
gettext_func = getattr(_lang_trans, 'gettext', translate)
return get_iso_language(gettext_func, lang)
def calibre_langcode_to_name(lc, localize=True):
iso639 = _load_iso639()
translate = _ if localize else lambda x: x
try:
return translate(iso639['by_3'][lc])
except:
pass
return lc
def countrycode_to_name(cc, localize=True):
iso3166 = load_iso3166()
q = cc.upper()
if len(q) == 3:
q = iso3166['three_map'].get(q, q)
try:
name = iso3166['names'][q]
except Exception:
if q == 'UK':
name = 'Ukraine'
else:
return cc
translate = _ if localize else lambda x: x
try:
return translate(name)
except Exception:
return name
def canonicalize_lang(raw):
if not raw:
return None
if not isinstance(raw, str):
raw = raw.decode('utf-8', 'ignore')
raw = raw.lower().strip()
if not raw:
return None
raw = raw.replace('_', '-').partition('-')[0].strip()
if not raw:
return None
iso639 = _load_iso639()
m2to3 = iso639['2to3']
if len(raw) == 2:
ans = m2to3.get(raw, None)
if ans is not None:
return ans
elif len(raw) == 3:
if raw in iso639['by_3']:
return raw
return iso639['name_map'].get(raw, None)
_lang_map = None
def lang_map():
' Return mapping of ISO 639 3 letter codes to localized language names '
iso639 = _load_iso639()
translate = _
global _lang_map
if _lang_map is None:
_lang_map = {k:translate(v) for k, v in iteritems(iso639['by_3'])}
return _lang_map
def lang_map_for_ui():
ans = getattr(lang_map_for_ui, 'ans', None)
if ans is None:
ans = lang_map().copy()
for x in ('zxx', 'mis', 'mul'):
ans.pop(x, None)
lang_map_for_ui.ans = ans
return ans
def reverse_lang_map_for_ui():
ans = getattr(reverse_lang_map_for_ui, 'ans', None)
if ans is None:
ans = reverse_lang_map_for_ui.ans = {v: k for k, v in lang_map_for_ui().items()}
return ans
def langnames_to_langcodes(names):
'''
Given a list of localized language names return a mapping of the names to 3
letter ISO 639 language codes. If a name is not recognized, it is mapped to
None.
'''
iso639 = _load_iso639()
translate = _
ans = {}
names = set(names)
for k, v in iteritems(iso639['by_3']):
tv = translate(v)
if tv in names:
names.remove(tv)
ans[tv] = k
if not names:
break
for x in names:
ans[x] = None
return ans
def lang_as_iso639_1(name_or_code):
code = canonicalize_lang(name_or_code)
if code is not None:
iso639 = _load_iso639()
return iso639['3to2'].get(code, None)
_udc = None
def get_udc():
global _udc
if _udc is None:
from calibre.ebooks.unihandecode import Unihandecoder
_udc = Unihandecoder(lang=get_lang())
return _udc
def user_manual_stats():
stats = getattr(user_manual_stats, 'stats', None)
if stats is None:
import json
try:
stats = json.loads(P('user-manual-translation-stats.json', allow_user_override=False, data=True))
except OSError:
stats = {}
user_manual_stats.stats = stats
return stats
def lang_code_for_user_manual():
lc = lang_as_iso639_1(get_lang())
if lc == 'en':
return ''
stats = user_manual_stats()
if stats.get(lc, 0) < 0.3:
return ''
return lc
def localize_user_manual_link(url):
lc = lang_code_for_user_manual()
if not lc:
return url
from polyglot.urllib import urlparse, urlunparse
parts = urlparse(url)
path = re.sub(r'/generated/[a-z]+/', '/generated/%s/' % lc, parts.path or '')
path = f'/{lc}{path}'
parts = list(parts)
parts[2] = path
return urlunparse(parts)
def website_languages():
stats = getattr(website_languages, 'stats', None)
if stats is None:
try:
stats = frozenset(P('localization/website-languages.txt', allow_user_override=False, data=True).decode('utf-8').split())
except OSError:
stats = frozenset()
website_languages.stats = stats
return stats
def localize_website_link(url):
lc = lang_as_iso639_1(get_lang())
langs = website_languages()
if lc == 'en' or lc not in langs:
return url
from polyglot.urllib import urlparse, urlunparse
parts = urlparse(url)
path = f'/{lc}{parts.path}'
parts = list(parts)
parts[2] = path
return urlunparse(parts)
| 17,518 | Python | .py | 505 | 27.005941 | 134 | 0.588844 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,167 | search_query_parser_test.py | kovidgoyal_calibre/src/calibre/utils/search_query_parser_test.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
import operator
import unittest
from calibre.utils.search_query_parser import Parser, SearchQueryParser
class Tester(SearchQueryParser):
texts = {
1: ['Eugenie Grandet', 'Honor\xe9 de Balzac', 'manybooks.net', 'lrf'],
2: ['Fanny Hill', 'John Cleland', 'manybooks.net', 'lrf'],
3: ['Persuasion', 'Jane Austen', 'manybooks.net', 'lrf'],
4: ['Psmith, Journalist', 'P. G. Wodehouse', 'Some Publisher', 'lrf'],
5: ['The Complete Works of William Shakespeare',
'William Shakespeare',
'manybooks.net',
'lrf'],
6: ['The History of England, Volume I',
'David Hume',
'manybooks.net',
'lrf'],
7: ['Someone Comes to Town, Someone Leaves Town',
'Cory Doctorow',
'Tor Books',
'lrf'],
8: ['Stalky and Co.', 'Rudyard Kipling', 'manybooks.net', 'lrf'],
9: ['A Game of Thrones', 'George R. R. Martin', None, 'lrf,rar'],
10: ['A Clash of Kings', 'George R. R. Martin', None, 'lrf,rar'],
11: ['A Storm of Swords', 'George R. R. Martin', None, 'lrf,rar'],
12: ['Biggles - Pioneer Air Fighter', 'W. E. Johns', None, 'lrf,rtf'],
13: ['Biggles of the Camel Squadron',
'W. E. Johns',
'London:Thames, (1977)',
'lrf,rtf'],
14: ['A Feast for Crows', 'George R. R. Martin', None, 'lrf,rar'],
15: ['Cryptonomicon', 'Neal Stephenson', None, 'lrf,rar'],
16: ['Quicksilver', 'Neal Stephenson', None, 'lrf,zip'],
17: ['The Comedies of William Shakespeare',
'William Shakespeare',
None,
'lrf'],
18: ['The Histories of William Shakespeare',
'William Shakespeare',
None,
'lrf'],
19: ['The Tragedies of William Shakespeare',
'William Shakespeare',
None,
'lrf'],
20: ['An Ideal Husband', 'Oscar Wilde', 'manybooks.net', 'lrf'],
21: ['Flight of the Nighthawks', 'Raymond E. Feist', None, 'lrf,rar'],
22: ['Into a Dark Realm', 'Raymond E. Feist', None, 'lrf,rar'],
23: ['The Sundering', 'Walter Jon Williams', None, 'lrf,rar'],
24: ['The Praxis', 'Walter Jon Williams', None, 'lrf,rar'],
25: ['Conventions of War', 'Walter Jon Williams', None, 'lrf,rar'],
26: ['Banewreaker', 'Jacqueline Carey', None, 'lrf,rar'],
27: ['Godslayer', 'Jacqueline Carey', None, 'lrf,rar'],
28: ["Kushiel's Scion", 'Jacqueline Carey', None, 'lrf,rar'],
29: ['Underworld', 'Don DeLillo', None, 'lrf,rar'],
30: ['Genghis Khan and The Making of the Modern World',
'Jack Weatherford Orc',
'Three Rivers Press',
'lrf,zip'],
31: ['The Best and the Brightest',
'David Halberstam',
'Modern Library',
'lrf,zip'],
32: ['The Killer Angels', 'Michael Shaara', None, 'html,lrf'],
33: ['Band Of Brothers', 'Stephen E Ambrose', None, 'lrf,txt'],
34: ['The Gates of Rome', 'Conn Iggulden', None, 'lrf,rar'],
35: ['The Death of Kings', 'Conn Iggulden', 'Bantam Dell', 'lit,lrf'],
36: ['The Field of Swords', 'Conn Iggulden', None, 'lrf,rar'],
37: ['Masterman Ready', 'Marryat, Captain Frederick', None, 'lrf'],
38: ['With the Lightnings',
'David Drake',
'Baen Publishing Enterprises',
'lit,lrf'],
39: ['Lt. Leary, Commanding',
'David Drake',
'Baen Publishing Enterprises',
'lit,lrf'],
40: ['The Far Side of The Stars',
'David Drake',
'Baen Publishing Enterprises',
'lrf,rar'],
41: ['The Way to Glory',
'David Drake',
'Baen Publishing Enterprises',
'lrf,rar'],
42: ['Some Golden Harbor', 'David Drake', 'Baen Books', 'lrf,rar'],
43: ['Harry Potter And The Half-Blood Prince',
'J. K. Rowling',
None,
'lrf,rar'],
44: ['Harry Potter and the Order of the Phoenix',
'J. K. Rowling',
None,
'lrf,rtf'],
45: ['The Stars at War', 'David Weber , Steve White', None, 'lrf,rtf'],
46: ['The Stars at War II',
'Steve White',
'Baen Publishing Enterprises',
'lrf,rar'],
47: ['Exodus', 'Steve White,Shirley Meier', 'Baen Books', 'lrf,rar'],
48: ['Harry Potter and the Goblet of Fire',
'J. K. Rowling',
None,
'lrf,rar'],
49: ['Harry Potter and the Prisoner of Azkaban',
'J. K. Rowling',
None,
'lrf,rtf'],
50: ['Harry Potter and the Chamber of Secrets',
'J. K. Rowling',
None,
'lit,lrf'],
51: ['Harry Potter and the Deathly Hallows',
'J.K. Rowling',
None,
'lit,lrf,pdf'],
52: ["His Majesty's Dragon", 'Naomi Novik', None, 'lrf,rar'],
53: ['Throne of Jade', 'Naomi Novik', 'Del Rey', 'lit,lrf'],
54: ['Black Powder War', 'Naomi Novik', 'Del Rey', 'lrf,rar'],
55: ['War and Peace', 'Leo Tolstoy', 'gutenberg.org', 'lrf,txt'],
56: ['Anna Karenina', 'Leo Tolstoy', 'gutenberg.org', 'lrf,txt'],
57: ['A Shorter History of Rome',
'Eugene Lawrence,Sir William Smith',
'gutenberg.org',
'lrf,zip'],
58: ['The Name of the Rose', 'Umberto Eco', None, 'lrf,rar'],
71: ["Wind Rider's Oath", 'David Weber', 'Baen', 'lrf'],
74: ['Rally Cry', 'William R Forstchen', None, 'htm,lrf'],
86: ['Empire of Ivory', 'Naomi Novik', None, 'lrf,rar'],
87: ["Renegade's Magic", 'Robin Hobb', None, 'lrf,rar'],
89: ['Master and commander',
"Patrick O'Brian",
'Fontana,\n1971',
'lit,lrf'],
91: ['A Companion to Wolves',
'Sarah Monette,Elizabeth Beär',
None,
'lrf,rar'],
92: ['The Lions of al-Rassan', 'Guy Gavriel Kay', 'Eos', 'lit,lrf'],
93: ['Gardens of the Moon', 'Steven Erikson', 'Tor Fantasy', 'lit,lrf'],
95: ['The Master and Margarita',
'Mikhail Bulgakov',
'N.Y. : Knopf, 1992.',
'lrf,rtf'],
120: ['Deadhouse Gates',
'Steven Erikson',
'London : Bantam Books, 2001.',
'lit,lrf'],
121: ['Memories of Ice', 'Steven Erikson', 'Bantam Books', 'lit,lrf'],
123: ['House of Chains', 'Steven Erikson', 'Bantam Books', 'lit,lrf'],
125: ['Midnight Tides', 'Steven Erikson', 'Bantam Books', 'lit,lrf'],
126: ['The Bonehunters', 'Steven Erikson', 'Bantam Press', 'lit,lrf'],
129: ['Guns, germs, and steel: the fates of human societies',
'Jared Diamond',
'New York : W.W. Norton, c1997.',
'lit,lrf'],
136: ['Wildcards', 'George R. R. Martin', None, 'html,lrf'],
138: ['Off Armageddon Reef', 'David Weber', 'Tor Books', 'lit,lrf'],
144: ['Atonement',
'Ian McEwan',
'New York : Nan A. Talese/Doubleday, 2002.',
'lrf,rar'],
146: ['1632', 'Eric Flint', 'Baen Books', 'lit,lrf'],
147: ['1633', 'David Weber,Eric Flint,Dru Blair', 'Baen', 'lit,lrf'],
148: ['1634: The Baltic War',
'David Weber,Eric Flint',
'Baen',
'lit,lrf'],
150: ['The Dragonbone Chair', 'Tad Williams', 'DAW Trade', 'lrf,rtf'],
152: ['The Little Book That Beats the Market',
'Joel Greenblatt',
'Wiley',
'epub,lrf'],
153: ['Pride of Carthage', 'David Anthony Durham', 'Anchor', 'lit,lrf'],
154: ['Stone of farewell',
'Tad Williams',
'New York : DAW Books, 1990.',
'lrf,txt'],
166: ['American Gods', 'Neil Gaiman', 'HarperTorch', 'lit,lrf'],
176: ['Pillars of the Earth',
'Ken Follett',
'New American Library',
'lit,lrf'],
182: ['The Eye of the world',
'Robert Jordan',
'New York : T. Doherty Associates, c1990.',
'lit,lrf'],
188: ['The Great Hunt', 'Robert Jordan', 'ATOM', 'lrf,zip'],
189: ['The Dragon Reborn', 'Robert Jordan', None, 'lit,lrf'],
190: ['The Shadow Rising', 'Robert Jordan', None, 'lit,lrf'],
191: ['The Fires of Heaven',
'Robert Jordan',
'Time Warner Books Uk',
'lit,lrf'],
216: ['Lord of chaos',
'Robert Jordan',
'New York : TOR, c1994.',
'lit,lrf'],
217: ['A Crown of Swords', 'Robert Jordan', None, 'lit,lrf'],
236: ['The Path of Daggers', 'Robert Jordan', None, 'lit,lrf'],
238: ['The Client',
'John Grisham',
'New York : Island, 1994, c1993.',
'lit,lrf'],
240: ["Winter's Heart", 'Robert Jordan', None, 'lit,lrf'],
242: ['In the Beginning was the Command Line',
'Neal Stephenson',
None,
'lrf,txt'],
249: ['Crossroads of Twilight', 'Robert Jordan', None, 'lit,lrf'],
251: ['Caves of Steel', 'Isaac Asimov', 'Del Rey', 'lrf,zip'],
253: ["Hunter's Run",
'George R. R. Martin,Gardner Dozois,Daniel Abraham',
'Eos',
'lrf,rar'],
257: ['Knife of Dreams', 'Robert Jordan', None, 'lit,lrf'],
258: ['Saturday',
'Ian McEwan',
'London : Jonathan Cape, 2005.',
'lrf,txt'],
259: ['My name is Red',
'Orhan Pamuk; translated from the Turkish by Erda\u011f G\xf6knar',
'New York : Alfred A. Knopf, 2001.',
'lit,lrf'],
265: ['Harbinger', 'David Mack', 'Star Trek', 'lit,lrf'],
267: ['Summon the Thunder',
'Dayton Ward,Kevin Dilmore',
'Pocket Books',
'lit,lrf'],
268: ['Shalimar the Clown',
'Salman Rushdie',
'New York : Random House, 2005.',
'lit,lrf'],
269: ['Reap the Whirlwind', 'David Mack', 'Star Trek', 'lit,lrf'],
272: ['Mistborn', 'Brandon Sanderson', 'Tor Fantasy', 'lrf,rar'],
273: ['The Thousandfold Thought',
'R. Scott Bakker',
'Overlook TP',
'lrf,rtf'],
276: ['Elantris',
'Brandon Sanderson',
'New York : Tor, 2005.',
'lrf,rar'],
291: ['Sundiver',
'David Brin',
'New York : Bantam Books, 1995.',
'lit,lrf'],
299: ['Imperium', 'Robert Harris', 'Arrow', 'lrf,rar'],
300: ['Startide Rising', 'David Brin', 'Bantam', 'htm,lrf'],
301: ['The Uplift War', 'David Brin', 'Spectra', 'lit,lrf'],
304: ['Brightness Reef', 'David Brin', 'Orbit', 'lrf,rar'],
305: ["Infinity's Shore", 'David Brin', 'Spectra', 'txt'],
306: ["Heaven's Reach", 'David Brin', 'Spectra', 'lrf,rar'],
325: ["Foundation's Triumph", 'David Brin', 'Easton Press', 'lit,lrf'],
327: ['I am Charlotte Simmons', 'Tom Wolfe', 'Vintage', 'htm,lrf'],
335: ['The Currents of Space', 'Isaac Asimov', None, 'lit,lrf'],
340: ['The Other Boleyn Girl',
'Philippa Gregory',
'Touchstone',
'lit,lrf'],
341: ["Old Man's War", 'John Scalzi', 'Tor', 'htm,lrf'],
342: ['The Ghost Brigades',
'John Scalzi',
'Tor Science Fiction',
'html,lrf'],
343: ['The Last Colony', 'John S"calzi', 'Tor Books', 'html,lrf'],
344: ['Gossip Girl', 'Cecily von Ziegesar', 'Warner Books', 'lrf,rtf'],
347: ['Little Brother', 'Cory Doctorow', 'Tor Teen', 'lrf'],
348: ['The Reality Dysfunction',
'Peter F. Hamilton',
'Pan MacMillan',
'lit,lrf'],
353: ['A Thousand Splendid Suns',
'Khaled Hosseini',
'Center Point Large Print',
'lit,lrf'],
354: ['Amsterdam', 'Ian McEwan', 'Anchor', 'lrf,txt'],
355: ['The Neutronium Alchemist',
'Peter F. Hamilton',
'Aspect',
'lit,lrf'],
356: ['The Naked God', 'Peter F. Hamilton', 'Aspect', 'lit,lrf'],
421: ['A Shadow in Summer', 'Daniel Abraham', 'Tor Fantasy', 'lrf,rar'],
427: ['Lonesome Dove', 'Larry M\\cMurtry', None, 'lit,lrf'],
440: ['Ghost', 'John Ringo', 'Baen', 'lit,lrf'],
441: ['Kildar', 'John Ringo', 'Baen', 'lit,lrf'],
443: ['Hidden Empire ', 'Kevin J. Anderson', 'Aspect', 'lrf,rar'],
444: ['The Gun Seller',
'Hugh Laurie',
'Washington Square Press',
'lrf,rar']
}
tests = {
'Dysfunction' : {348},
'title:Dysfunction' : {348},
'Title:Dysfunction' : {348},
'title:Dysfunction OR author:Laurie': {348, 444},
'(tag:txt or tag:pdf)': {33, 258, 354, 305, 242, 51, 55, 56, 154},
'(tag:txt OR tag:pdf) and author:Tolstoy': {55, 56},
'Tolstoy txt': {55, 56},
'Hamilton Amsterdam' : set(),
'Beär' : {91},
'dysfunc or tolstoy': {348, 55, 56},
'tag:txt AND NOT tolstoy': {33, 258, 354, 305, 242, 154},
'not tag:lrf' : {305},
'london:thames': {13},
'publisher:london:thames': {13},
'"(1977)"': {13},
'jack weatherford orc': {30},
'S\\"calzi': {343},
'author:S\\"calzi': {343},
'"S\\"calzi"': {343},
'M\\\\cMurtry': {427},
'author:Tolstoy (tag:txt OR tag:pdf)': {55, 56},
}
fields = {'title':0, 'author':1, 'publisher':2, 'tag':3}
_universal_set = set(texts.keys())
def universal_set(self):
return self._universal_set
def get_matches(self, location, query, candidates=None):
location = location.lower()
if location in self.fields.keys():
getter = operator.itemgetter(self.fields[location])
elif location == 'all':
def getter(y):
return ''.join(x if x else '' for x in y)
else:
def getter(x):
return ''
if not query:
return set()
query = query.lower()
if candidates:
return {key for key, val in self.texts.items()
if key in candidates and query and query
in getattr(getter(val), 'lower', lambda : '')()}
else:
return {key for key, val in self.texts.items()
if query and query in getattr(getter(val), 'lower', lambda : '')()}
def run_tests(self, ae):
for query in self.tests.keys():
res = self.parse(query)
ae(self.tests[query], res, f'Failed for query: {query}')
class TestSQP(unittest.TestCase):
def do_test(self, optimize=False):
tester = Tester(['authors', 'author', 'series', 'formats', 'format',
'publisher', 'rating', 'tags', 'tag', 'comments', 'comment', 'cover',
'isbn', 'ondevice', 'pubdate', 'size', 'date', 'title', '#read',
'all', 'search'], test=True, optimize=optimize)
tester.run_tests(self.assertEqual)
def test_sqp_optimized(self):
self.do_test(True)
def test_sqp_unoptimized(self):
self.do_test(False)
def test_sqp_tokenizer(self):
p = Parser()
def tokens(*a):
ans = []
for i in range(0, len(a), 2):
ans.append(({'O': Parser.OPCODE, 'W': Parser.WORD, 'Q': Parser.QUOTED_WORD}[a[i]], a[i+1]))
return ans
def t(query, *a):
self.assertEqual(tokens(*a), p.tokenize(query))
t('xxx', 'W', 'xxx')
t('"a \\" () b"', 'Q', 'a " () b')
t('"a“b"', 'Q', 'a“b')
t('"a”b"', 'Q', 'a”b')
# docstring tests
t(r'"""a\1b"""', 'W', r'a\1b')
t(r'("""a\1b""" AND """c""" OR d)',
'O', '(', 'W', r'a\1b', 'W', 'AND', 'W', 'c', 'W', 'OR', 'W', 'd', 'O', ')')
t(r'template:="""a\1b"""', 'W', r'template:=a\1b')
t('template:="""a\nb"""', 'W', 'template:=a\nb')
t(r'template:"""=a\1b"""', 'W', r'template:=a\1b')
t(r'template:"""program: return ("\"1\"")#@#n:1"""', 'W',
r'template:program: return ("\"1\"")#@#n:1')
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(TestSQP)
class TestRunner(unittest.main):
def createTests(self):
self.test = find_tests()
def run(verbosity=4):
TestRunner(verbosity=verbosity, exit=False)
if __name__ == '__main__':
run()
| 15,195 | Python | .py | 387 | 33.023256 | 107 | 0.572048 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,168 | cleantext.py | kovidgoyal_calibre/src/calibre/utils/cleantext.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2010, Kovid Goyal <kovid at kovidgoyal.net>
import re
from calibre.constants import preferred_encoding
from calibre_extensions.speedup import clean_xml_chars as _ncxc
from polyglot.builtins import codepoint_to_chr
from polyglot.html_entities import name2codepoint
def native_clean_xml_chars(x):
if isinstance(x, bytes):
x = x.decode(preferred_encoding)
return _ncxc(x)
def ascii_pat(for_binary=False):
attr = 'binary' if for_binary else 'text'
ans = getattr(ascii_pat, attr, None)
if ans is None:
chars = set(range(32)) - {9, 10, 13}
chars.add(127)
pat = '|'.join(map(codepoint_to_chr, chars))
if for_binary:
pat = pat.encode('ascii')
ans = re.compile(pat)
setattr(ascii_pat, attr, ans)
return ans
def clean_ascii_chars(txt, charlist=None):
r'''
Remove ASCII control chars.
This is all control chars except \t, \n and \r
'''
is_binary = isinstance(txt, bytes)
empty = b'' if is_binary else ''
if not txt:
return empty
if charlist is None:
pat = ascii_pat(is_binary)
else:
pat = '|'.join(map(codepoint_to_chr, charlist))
if is_binary:
pat = pat.encode('utf-8')
return pat.sub(empty, txt)
def allowed(x):
x = ord(x)
return (x != 127 and (31 < x < 0xd7ff or x in (9, 10, 13))) or (0xe000 < x < 0xfffd) or (0x10000 < x < 0x10ffff)
def py_clean_xml_chars(unicode_string):
return ''.join(filter(allowed, unicode_string))
clean_xml_chars = native_clean_xml_chars or py_clean_xml_chars
def test_clean_xml_chars():
raw = 'asd\x02a\U00010437x\ud801b\udffe\ud802'
if native_clean_xml_chars(raw) != 'asda\U00010437xb':
raise ValueError('Failed to XML clean: %r' % raw)
# Fredrik Lundh: http://effbot.org/zone/re-sub.htm#unescape-html
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text, rm=False, rchar=''):
def fixup(m, rm=rm, rchar=rchar):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return codepoint_to_chr(int(text[3:-1], 16))
else:
return codepoint_to_chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = codepoint_to_chr(name2codepoint[text[1:-1]])
except KeyError:
pass
if rm:
return rchar # replace by char
return text # leave as is
return re.sub("&#?\\w+;", fixup, text)
| 2,816 | Python | .py | 76 | 29.776316 | 116 | 0.613461 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,169 | ip_routing.py | kovidgoyal_calibre/src/calibre/utils/ip_routing.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import re
import subprocess
from calibre.constants import ismacos, iswindows
def get_address_of_default_gateway(family='AF_INET'):
import netifaces
ip = netifaces.gateways()['default'][getattr(netifaces, family)][0]
if isinstance(ip, bytes):
ip = ip.decode('ascii')
return ip
def get_addresses_for_interface(name, family='AF_INET'):
import netifaces
for entry in netifaces.ifaddresses(name)[getattr(netifaces, family)]:
if entry.get('broadcast'): # Not a point-to-point address
addr = entry.get('addr')
if addr:
if isinstance(addr, bytes):
addr = addr.decode('ascii')
yield addr
if iswindows:
def get_default_route_src_address_external():
# Use -6 for IPv6 addresses
raw = subprocess.check_output('route -4 print 0.0.0.0'.split(), creationflags=subprocess.DETACHED_PROCESS).decode('utf-8', 'replace')
in_table = False
default_gateway = get_address_of_default_gateway()
for line in raw.splitlines():
parts = line.strip().split()
if in_table:
if len(parts) == 6:
network, destination, netmask, gateway, interface, metric = parts
elif len(parts) == 5:
destination, netmask, gateway, interface, metric = parts
if gateway == default_gateway:
return interface
else:
if parts == 'Network Destination Netmask Gateway Interface Metric'.split():
in_table = True
def get_default_route_src_address_api():
from calibre.utils.iphlpapi import routes
for route in routes():
if route.interface and route.destination == '0.0.0.0':
for addr in get_addresses_for_interface(route.interface):
return addr
get_default_route_src_address = get_default_route_src_address_api
elif ismacos:
def get_default_route_src_address():
# Use -inet6 for IPv6
raw = subprocess.check_output('route -n get -inet default'.split()).decode('utf-8')
m = re.search(r'^\s*interface:\s*(\S+)\s*$', raw, flags=re.MULTILINE)
if m is not None:
interface = m.group(1)
for addr in get_addresses_for_interface(interface):
return addr
else:
def get_default_route_src_address():
# Use /proc/net/ipv6_route for IPv6 addresses
with open('/proc/net/route', 'rb') as f:
raw = f.read().decode('utf-8')
for line in raw.splitlines():
parts = line.split()
if len(parts) > 1 and parts[1] == '00000000':
for addr in get_addresses_for_interface(parts[0]):
return addr
if __name__ == '__main__':
print(get_default_route_src_address())
| 2,972 | Python | .py | 66 | 34.80303 | 141 | 0.602631 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,170 | filenames.py | kovidgoyal_calibre/src/calibre/utils/filenames.py | '''
Make strings safe for use as ASCII filenames, while trying to preserve as much
meaning as possible.
'''
import errno
import os
import shutil
import time
from contextlib import closing, suppress
from math import ceil
from calibre import force_unicode, isbytestring, prints, sanitize_file_name
from calibre.constants import filesystem_encoding, ismacos, iswindows, preferred_encoding
from calibre.utils.localization import _, get_udc
from polyglot.builtins import iteritems, itervalues
def ascii_text(orig):
udc = get_udc()
try:
ascii = udc.decode(orig)
except Exception:
if isinstance(orig, str):
orig = orig.encode('ascii', 'replace')
ascii = orig.decode(preferred_encoding, 'replace')
if isinstance(ascii, bytes):
ascii = ascii.decode('ascii', 'replace')
return ascii
def ascii_filename(orig, substitute='_'):
if isinstance(substitute, bytes):
substitute = substitute.decode(filesystem_encoding)
orig = ascii_text(orig).replace('?', '_')
ans = ''.join(x if ord(x) >= 32 else substitute for x in orig)
return sanitize_file_name(ans, substitute=substitute)
def shorten_component(s, by_what):
l = len(s)
if l < by_what:
return s
l = (l - by_what)//2
if l <= 0:
return s
return s[:l] + s[-l:]
def limit_component(x, limit=254):
# windows and macs use ytf-16 codepoints for length, linux uses arbitrary
# binary data, but we will assume utf-8
filename_encoding_for_length = 'utf-16' if iswindows or ismacos else 'utf-8'
def encoded_length():
q = x if isinstance(x, bytes) else x.encode(filename_encoding_for_length)
return len(q)
while encoded_length() > limit:
delta = encoded_length() - limit
x = shorten_component(x, max(2, delta // 2))
return x
def shorten_components_to(length, components, more_to_take=0, last_has_extension=True):
components = [limit_component(cx) for cx in components]
filepath = os.sep.join(components)
extra = len(filepath) - (length - more_to_take)
if extra < 1:
return components
deltas = []
for x in components:
pct = len(x)/float(len(filepath))
deltas.append(int(ceil(pct*extra)))
ans = []
for i, x in enumerate(components):
delta = deltas[i]
if delta > len(x):
r = x[0] if x is components[-1] else ''
else:
if last_has_extension and x is components[-1]:
b, e = os.path.splitext(x)
if e == '.':
e = ''
r = shorten_component(b, delta)+e
if r.startswith('.'):
r = x[0]+r
else:
r = shorten_component(x, delta)
r = r.strip()
if not r:
r = x.strip()[0] if x.strip() else 'x'
ans.append(r)
if len(os.sep.join(ans)) > length:
return shorten_components_to(length, components, more_to_take+2)
return ans
def find_executable_in_path(name, path=None):
if path is None:
path = os.environ.get('PATH', '')
exts = '.exe .cmd .bat'.split() if iswindows and not name.endswith('.exe') else ('',)
path = path.split(os.pathsep)
for x in path:
for ext in exts:
q = os.path.abspath(os.path.join(x, name)) + ext
if os.access(q, os.X_OK):
return q
def is_case_sensitive(path):
'''
Return True if the filesystem is case sensitive.
path must be the path to an existing directory. You must have permission
to create and delete files in this directory. The results of this test
apply to the filesystem containing the directory in path.
'''
is_case_sensitive = False
if not iswindows:
name1, name2 = ('calibre_test_case_sensitivity.txt',
'calibre_TesT_CaSe_sensitiVitY.Txt')
f1, f2 = os.path.join(path, name1), os.path.join(path, name2)
with suppress(OSError):
os.remove(f1)
open(f1, 'w').close()
is_case_sensitive = not os.path.exists(f2)
os.remove(f1)
return is_case_sensitive
def case_ignoring_open_file(path, mode='r'):
'''
Open an existing file case insensitively, even on case sensitive file systems
'''
try:
return open(path, mode)
except FileNotFoundError as err:
original_err = err
def next_component(final_path, components):
if not components:
return final_path
component = components.pop()
cl = component.lower()
try:
matches = {x for x in os.listdir(final_path) if x.lower() == cl}
except OSError:
raise original_err from None
for x in matches:
current = os.path.join(final_path, x)
try:
return next_component(current, list(components))
except Exception:
continue
raise original_err
if isbytestring(path):
path = path.decode(filesystem_encoding)
if path.endswith(os.sep):
path = path[:-1]
if not path:
raise ValueError('Path must not point to root')
components = path.split(os.sep)
if len(components) <= 1:
raise ValueError(f'Invalid path: {path}')
final_path = (components[0].upper() + os.sep) if iswindows else '/'
components = list(reversed(components))[:-1]
return open(next_component(final_path, components), mode)
def case_preserving_open_file(path, mode='wb', mkdir_mode=0o777):
'''
Open the file pointed to by path with the specified mode. If any
directories in path do not exist, they are created. Returns the
opened file object and the path to the opened file object. This path is
guaranteed to have the same case as the on disk path. For case insensitive
filesystems, the returned path may be different from the passed in path.
The returned path is always unicode and always an absolute path.
If mode is None, then this function assumes that path points to a directory
and return the path to the directory as the file object.
mkdir_mode specifies the mode with which any missing directories in path
are created.
'''
if isbytestring(path):
path = path.decode(filesystem_encoding)
path = os.path.abspath(path)
sep = force_unicode(os.sep, 'ascii')
if path.endswith(sep):
path = path[:-1]
if not path:
raise ValueError('Path must not point to root')
components = path.split(sep)
if not components:
raise ValueError('Invalid path: %r'%path)
cpath = sep
if iswindows:
# Always upper case the drive letter and add a trailing slash so that
# the first os.listdir works correctly
cpath = components[0].upper() + sep
bdir = path if mode is None else os.path.dirname(path)
if not os.path.exists(bdir):
os.makedirs(bdir, mkdir_mode)
# Walk all the directories in path, putting the on disk case version of
# the directory into cpath
dirs = components[1:] if mode is None else components[1:-1]
for comp in dirs:
cdir = os.path.join(cpath, comp)
cl = comp.lower()
try:
candidates = [c for c in os.listdir(cpath) if c.lower() == cl]
except:
# Dont have permission to do the listdir, assume the case is
# correct as we have no way to check it.
pass
else:
if len(candidates) == 1:
cdir = os.path.join(cpath, candidates[0])
# else: We are on a case sensitive file system so cdir must already
# be correct
cpath = cdir
if mode is None:
ans = fpath = cpath
else:
fname = components[-1]
ans = open(os.path.join(cpath, fname), mode)
# Ensure file and all its metadata is written to disk so that subsequent
# listdir() has file name in it. I don't know if this is actually
# necessary, but given the diversity of platforms, best to be safe.
ans.flush()
os.fsync(ans.fileno())
cl = fname.lower()
try:
candidates = [c for c in os.listdir(cpath) if c.lower() == cl]
except OSError:
# The containing directory, somehow disappeared?
candidates = []
if len(candidates) == 1:
fpath = os.path.join(cpath, candidates[0])
else:
# We are on a case sensitive filesystem
fpath = os.path.join(cpath, fname)
return ans, fpath
def windows_get_fileid(path):
''' The fileid uniquely identifies actual file contents (it is the same for
all hardlinks to a file). Similar to inode number on linux. '''
from calibre_extensions.winutil import get_file_id
if isbytestring(path):
path = path.decode(filesystem_encoding)
with suppress(OSError):
return get_file_id(path)
def samefile_windows(src, dst):
samestring = (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
if samestring:
return True
a, b = windows_get_fileid(src), windows_get_fileid(dst)
if a is None and b is None:
return False
return a == b
def samefile(src, dst):
'''
Check if two paths point to the same actual file on the filesystem. Handles
symlinks, case insensitivity, mapped drives, etc.
Returns True iff both paths exist and point to the same file on disk.
Note: On windows will return True if the two string are identical (up to
case) even if the file does not exist. This is because I have no way of
knowing how reliable the GetFileInformationByHandle method is.
'''
if iswindows:
return samefile_windows(src, dst)
if hasattr(os.path, 'samefile'):
# Unix
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
samestring = (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
return samestring
def windows_get_size(path):
''' On windows file sizes are only accurately stored in the actual file,
not in the directory entry (which could be out of date). So we open the
file, and get the actual size. '''
from calibre_extensions import winutil
if isbytestring(path):
path = path.decode(filesystem_encoding)
with closing(winutil.create_file(
path, 0, winutil.FILE_SHARE_READ | winutil.FILE_SHARE_WRITE | winutil.FILE_SHARE_DELETE,
winutil.OPEN_EXISTING, 0)
) as h:
return winutil.get_file_size(h)
def windows_hardlink(src, dest):
from calibre_extensions import winutil
winutil.create_hard_link(dest, src)
src_size = os.path.getsize(src)
# We open and close dest, to ensure its directory entry is updated
# see http://blogs.msdn.com/b/oldnewthing/archive/2011/12/26/10251026.aspx
for i in range(10):
# If we are on a network filesystem, we have to wait for some indeterminate time, since
# network file systems are the best thing since sliced bread
try:
if windows_get_size(dest) == src_size:
return
except OSError:
pass
time.sleep(0.3)
sz = windows_get_size(dest)
if sz != src_size:
msg = f'Creating hardlink from {src} to {dest} failed: %s'
raise OSError(msg % ('hardlink size: %d not the same as source size' % sz))
def windows_fast_hardlink(src, dest):
from calibre_extensions import winutil
winutil.create_hard_link(dest, src)
ssz, dsz = windows_get_size(src), windows_get_size(dest)
if ssz != dsz:
msg = f'Creating hardlink from {src} to {dest} failed: %s'
raise OSError(msg % ('hardlink size: %d not the same as source size: %s' % (dsz, ssz)))
def windows_nlinks(path):
from calibre_extensions import winutil
if isbytestring(path):
path = path.decode(filesystem_encoding)
return winutil.nlinks(path)
class WindowsAtomicFolderMove:
'''
Move all the files inside a specified folder in an atomic fashion,
preventing any other process from locking a file while the operation is
incomplete. Raises an IOError if another process has locked a file before
the operation starts. Note that this only operates on the files in the
folder, not any sub-folders.
'''
def __init__(self, path):
from collections import defaultdict
from calibre_extensions import winutil
self.handle_map = {}
if isbytestring(path):
path = path.decode(filesystem_encoding)
if not os.path.exists(path):
return
names = os.listdir(path)
name_to_fileid = {x:windows_get_fileid(os.path.join(path, x)) for x in names}
fileid_to_names = defaultdict(set)
for name, fileid in iteritems(name_to_fileid):
fileid_to_names[fileid].add(name)
for x in names:
f = os.path.normcase(os.path.abspath(os.path.join(path, x)))
if not os.path.isfile(f):
continue
with suppress(OSError):
# Ensure the file is not read-only
winutil.set_file_attributes(f, winutil.FILE_ATTRIBUTE_NORMAL)
try:
h = winutil.create_file(f, winutil.GENERIC_READ,
winutil.FILE_SHARE_DELETE,
winutil.OPEN_EXISTING, winutil.FILE_FLAG_SEQUENTIAL_SCAN)
except OSError as e:
if e.winerror == winutil.ERROR_SHARING_VIOLATION:
# The file could be a hardlink to an already opened file,
# in which case we use the same handle for both files
fileid = name_to_fileid[x]
found = False
if fileid is not None:
for other in fileid_to_names[fileid]:
other = os.path.normcase(os.path.abspath(os.path.join(path, other)))
if other in self.handle_map:
self.handle_map[f] = self.handle_map[other]
found = True
break
if found:
continue
self.close_handles()
if e.winerror == winutil.ERROR_SHARING_VIOLATION:
err = IOError(errno.EACCES,
_('File is open in another process'))
err.filename = f
raise err
prints('CreateFile failed for: %r' % f)
raise
except:
self.close_handles()
prints('CreateFile failed for: %r' % f)
raise
self.handle_map[f] = h
def copy_path_to(self, path, dest):
from calibre_extensions import winutil
handle = None
for p, h in self.handle_map.items():
if samefile_windows(path, p):
handle = h
break
if handle is None:
if os.path.exists(path):
raise ValueError('The file %r did not exist when this move'
' operation was started'%path)
else:
raise ValueError('The file %r does not exist'%path)
with suppress(OSError):
windows_hardlink(path, dest)
return
winutil.set_file_pointer(handle, 0, winutil.FILE_BEGIN)
with open(dest, 'wb') as f:
sz = 1024 * 1024
while True:
raw = winutil.read_file(handle, sz)
if not raw:
break
f.write(raw)
def release_file(self, path):
' Release the lock on the file pointed to by path. Will also release the lock on any hardlinks to path '
key = None
for p, h in iteritems(self.handle_map):
if samefile_windows(path, p):
key = (p, h)
break
if key is not None:
key[1].close()
remove = [f for f, h in iteritems(self.handle_map) if h is key[1]]
for x in remove:
self.handle_map.pop(x)
def close_handles(self):
for h in itervalues(self.handle_map):
h.close()
self.handle_map = {}
def delete_originals(self):
from calibre_extensions import winutil
for path in self.handle_map:
winutil.delete_file(path)
self.close_handles()
def hardlink_file(src, dest):
src, dest = make_long_path_useable(src), make_long_path_useable(dest)
if iswindows:
windows_hardlink(src, dest)
return
os.link(src, dest)
def nlinks_file(path):
' Return number of hardlinks to the file '
if iswindows:
return windows_nlinks(path)
return os.stat(path).st_nlink
if iswindows:
from calibre_extensions.winutil import move_file
def rename_file(a, b):
if isinstance(a, bytes):
a = os.fsdecode(a)
if isinstance(b, bytes):
b = os.fsdecode(b)
move_file(a, b)
def retry_on_fail(func, *args, count=10, sleep_time=0.2):
for i in range(count):
try:
func(*args)
break
except OSError:
if i > count - 2:
raise
# Try the operation repeatedly in case something like a virus
# scanner has opened one of the files (I love windows)
time.sleep(sleep_time)
def atomic_rename(oldpath, newpath):
'''Replace the file newpath with the file oldpath. Can fail if the files
are on different volumes. If succeeds, guaranteed to be atomic. newpath may
or may not exist. If it exists, it is replaced. '''
if iswindows:
retry_on_fail(rename_file, oldpath, newpath)
else:
os.rename(oldpath, newpath)
def remove_dir_if_empty(path, ignore_metadata_caches=False):
''' Remove a directory if it is empty or contains only the folder metadata
caches from different OSes. To delete the folder if it contains only
metadata caches, set ignore_metadata_caches to True.'''
try:
os.rmdir(path)
except OSError as e:
try:
entries = os.listdir(path)
except FileNotFoundError: # something deleted path out from under us
return
if e.errno == errno.ENOTEMPTY or len(entries) > 0:
# Some linux systems appear to raise an EPERM instead of an
# ENOTEMPTY, see https://bugs.launchpad.net/bugs/1240797
if ignore_metadata_caches:
try:
found = False
for x in entries:
if x.lower() in {'.ds_store', 'thumbs.db'}:
found = True
x = os.path.join(path, x)
with suppress(FileNotFoundError):
if os.path.isdir(x):
import shutil
shutil.rmtree(x)
else:
os.remove(x)
except Exception: # We could get an error, if, for example, windows has locked Thumbs.db
found = False
if found:
remove_dir_if_empty(path)
return
raise
expanduser = os.path.expanduser
def format_permissions(st_mode):
import stat
for func, letter in (x.split(':') for x in 'REG:- DIR:d BLK:b CHR:c FIFO:p LNK:l SOCK:s'.split()):
if getattr(stat, 'S_IS' + func)(st_mode):
break
else:
letter = '?'
rwx = ('---', '--x', '-w-', '-wx', 'r--', 'r-x', 'rw-', 'rwx')
ans = [letter] + list(rwx[(st_mode >> 6) & 7]) + list(rwx[(st_mode >> 3) & 7]) + list(rwx[(st_mode & 7)])
if st_mode & stat.S_ISUID:
ans[3] = 's' if (st_mode & stat.S_IXUSR) else 'S'
if st_mode & stat.S_ISGID:
ans[6] = 's' if (st_mode & stat.S_IXGRP) else 'l'
if st_mode & stat.S_ISVTX:
ans[9] = 't' if (st_mode & stat.S_IXUSR) else 'T'
return ''.join(ans)
def copyfile(src, dest):
shutil.copyfile(src, dest)
try:
shutil.copystat(src, dest)
except Exception:
pass
def get_hardlink_function(src, dest):
if not iswindows:
return os.link
from calibre_extensions import winutil
if src.startswith(long_path_prefix):
src = src[len(long_path_prefix):]
if dest.startswith(long_path_prefix):
dest = dest[len(long_path_prefix):]
root = dest[0] + ':\\'
if src[0].lower() == dest[0].lower() and winutil.supports_hardlinks(root):
return windows_fast_hardlink
def copyfile_using_links(path, dest, dest_is_dir=True, filecopyfunc=copyfile):
path, dest = os.path.abspath(path), os.path.abspath(dest)
if dest_is_dir:
dest = os.path.join(dest, os.path.basename(path))
hardlink = get_hardlink_function(path, dest)
path, dest = make_long_path_useable(path), make_long_path_useable(dest)
try:
hardlink(path, dest)
except Exception:
filecopyfunc(path, dest)
def copytree_using_links(path, dest, dest_is_parent=True, filecopyfunc=copyfile):
path, dest = os.path.abspath(path), os.path.abspath(dest)
if dest_is_parent:
dest = os.path.join(dest, os.path.basename(path))
hardlink = get_hardlink_function(path, dest)
try:
os.makedirs(dest)
except OSError as e:
if e.errno != errno.EEXIST:
raise
for dirpath, dirnames, filenames in os.walk(path):
base = os.path.relpath(dirpath, path)
dest_base = os.path.join(dest, base)
for dname in dirnames:
try:
os.mkdir(os.path.join(dest_base, dname))
except OSError as e:
if e.errno != errno.EEXIST:
raise
for fname in filenames:
src, df = os.path.join(dirpath, fname), os.path.join(dest_base, fname)
try:
hardlink(src, df)
except Exception:
filecopyfunc(src, df)
rmtree = shutil.rmtree
if iswindows:
long_path_prefix = '\\\\?\\'
def make_long_path_useable(path, threshold=200):
if len(path) > threshold and os.path.isabs(path) and not path.startswith(long_path_prefix):
path = long_path_prefix + os.path.normpath(path)
return path
def is_fat_filesystem(path):
try:
from calibre_extensions.winutil import filesystem_type_name
except ImportError:
return False
if not path:
return False
drive = os.path.abspath(path)[0].upper()
try:
tn = filesystem_type_name(f'{drive}:\\')
except OSError:
return False
# Values I have seen: FAT32, exFAT, NTFS
return tn.upper().startswith('FAT')
def get_long_path_name(path):
from calibre_extensions.winutil import get_long_path_name
lpath = path
if os.path.isabs(lpath) and not lpath.startswith(long_path_prefix):
lpath = long_path_prefix + lpath
try:
return get_long_path_name(lpath)
except FileNotFoundError:
return path
except OSError as e:
if e.winerror == 123: # ERR_INVALID_NAME
return path
raise
else:
def make_long_path_useable(path, threshold=200):
return path
def get_long_path_name(path):
return path
def is_fat_filesystem(path):
# TODO: Implement for Linux and macOS
return False
| 23,829 | Python | .py | 586 | 31.194539 | 112 | 0.60345 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,171 | shared_file.py | kovidgoyal_calibre/src/calibre/utils/shared_file.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import sys
from calibre.constants import iswindows
from polyglot.builtins import reraise
'''
This module defines a share_open() function which is a replacement for
python's builtin open() function.
This replacement, opens 'shareable' files on all platforms. That is files that
can be read from and written to and deleted at the same time by multiple
processes. All file handles are non-inheritable, as in Python 3, but unlike,
Python 2. Non-inheritance is atomic.
Caveats on windows: On windows sharing is co-operative, i.e. it only works if
all processes involved open the file with share_open(). Also while you can
delete a file that is open, you cannot open a new file with the same filename
until all open file handles are closed. You also cannot delete the containing
directory until all file handles are closed. To get around this, rename the
file before deleting it.
'''
if iswindows:
import msvcrt
from numbers import Integral
from calibre_extensions import winutil
_ACCESS_MASK = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
_ACCESS_MAP = {
os.O_RDONLY : winutil.GENERIC_READ,
os.O_WRONLY : winutil.GENERIC_WRITE,
os.O_RDWR : winutil.GENERIC_READ | winutil.GENERIC_WRITE
}
_CREATE_MASK = os.O_CREAT | os.O_EXCL | os.O_TRUNC
_CREATE_MAP = {
0 : winutil.OPEN_EXISTING,
os.O_EXCL : winutil.OPEN_EXISTING,
os.O_CREAT : winutil.OPEN_ALWAYS,
os.O_CREAT | os.O_EXCL : winutil.CREATE_NEW,
os.O_CREAT | os.O_TRUNC | os.O_EXCL : winutil.CREATE_NEW,
os.O_TRUNC : winutil.TRUNCATE_EXISTING,
os.O_TRUNC | os.O_EXCL : winutil.TRUNCATE_EXISTING,
os.O_CREAT | os.O_TRUNC : winutil.CREATE_ALWAYS
}
def os_open(path, flags, mode=0o777, share_flags=winutil.FILE_SHARE_VALID_FLAGS):
'''
Replacement for os.open() allowing moving or unlinking before closing
'''
if not isinstance(flags, Integral):
raise TypeError('flags must be an integer')
if not isinstance(mode, Integral):
raise TypeError('mode must be an integer')
if share_flags & ~winutil.FILE_SHARE_VALID_FLAGS:
raise ValueError('bad share_flags: %r' % share_flags)
access_flags = _ACCESS_MAP[flags & _ACCESS_MASK]
create_flags = _CREATE_MAP[flags & _CREATE_MASK]
attrib_flags = winutil.FILE_ATTRIBUTE_NORMAL
if flags & os.O_CREAT and mode & ~0o444 == 0:
attrib_flags = winutil.FILE_ATTRIBUTE_READONLY
if flags & os.O_TEMPORARY:
share_flags |= winutil.FILE_SHARE_DELETE
attrib_flags |= winutil.FILE_FLAG_DELETE_ON_CLOSE
access_flags |= winutil.DELETE
if flags & os.O_SHORT_LIVED:
attrib_flags |= winutil.FILE_ATTRIBUTE_TEMPORARY
if flags & os.O_SEQUENTIAL:
attrib_flags |= winutil.FILE_FLAG_SEQUENTIAL_SCAN
if flags & os.O_RANDOM:
attrib_flags |= winutil.FILE_FLAG_RANDOM_ACCESS
h = winutil.create_file(
path, access_flags, share_flags, create_flags, attrib_flags)
ans = msvcrt.open_osfhandle(int(h), flags | os.O_NOINHERIT)
h.detach()
return ans
def share_open(*a, **kw):
kw['opener'] = os_open
return open(*a, **kw)
else:
share_open = open
def raise_winerror(x):
reraise(NotImplementedError, None, sys.exc_info()[2])
def find_tests():
import unittest
from calibre.ptempfile import TemporaryDirectory
class SharedFileTest(unittest.TestCase):
def test_shared_file(self):
eq = self.assertEqual
with TemporaryDirectory() as tdir:
fname = os.path.join(tdir, 'test.txt')
with share_open(fname, 'wb') as f:
f.write(b'a' * 20 * 1024)
eq(fname, f.name)
f = share_open(fname, 'rb')
close = [f]
try:
eq(f.read(1), b'a')
if iswindows:
os.rename(fname, fname+'.moved')
os.remove(fname+'.moved')
else:
os.remove(fname)
eq(f.read(1), b'a')
f2 = share_open(fname, 'w+b')
close.append(f2)
f2.write(b'b' * 10 * 1024)
f2.seek(0)
eq(f.read(10000), b'a'*10000)
eq(f2.read(100), b'b' * 100)
f3 = share_open(fname, 'rb')
close.append(f3)
eq(f3.read(100), b'b' * 100)
finally:
for f in close:
f.close()
return unittest.defaultTestLoader.loadTestsFromTestCase(SharedFileTest)
def run_tests():
from calibre.utils.run_tests import run_tests
run_tests(find_tests)
| 5,186 | Python | .py | 116 | 34.741379 | 85 | 0.587535 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,172 | icu.py | kovidgoyal_calibre/src/calibre/utils/icu.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
# Setup code {{{
import codecs
import sys
import threading
from calibre.utils.config_base import prefs, tweaks
from calibre_extensions import icu as _icu
from polyglot.builtins import cmp
_locale = None
cmp
_none = ''
_none2 = b''
_cmap = {}
icu_unicode_version = _icu.unicode_version
_nmodes = {m:getattr(_icu, m) for m in ('NFC', 'NFD', 'NFKC', 'NFKD')}
# Ensure that the python internal filesystem and default encodings are not ASCII
def is_ascii(name):
try:
return codecs.lookup(name).name == b'ascii'
except (TypeError, LookupError):
return True
try:
if is_ascii(sys.getdefaultencoding()):
_icu.set_default_encoding(b'utf-8')
except:
import traceback
traceback.print_exc()
try:
if is_ascii(sys.getfilesystemencoding()):
_icu.set_filesystem_encoding(b'utf-8')
except:
import traceback
traceback.print_exc()
del is_ascii
thread_local_collator_cache = threading.local()
def collator(strength=None, numeric=None, ignore_alternate_chars=None, upper_first=None):
global _locale
if _locale is None:
if tweaks['locale_for_sorting']:
_locale = tweaks['locale_for_sorting']
else:
from calibre.utils.localization import get_lang
_locale = get_lang()
key = strength, numeric, ignore_alternate_chars, upper_first
try:
ans = thread_local_collator_cache.cache.get(key)
except AttributeError:
thread_local_collator_cache.cache = {}
ans = None
if ans is not None:
return ans
if all(x is None for x in key):
try:
ans = _icu.Collator(_locale)
except Exception as e:
print(f'Failed to load collator for locale: {_locale!r} with error {e!r}, using English', file=sys.stderr)
_locale = 'en'
ans = _icu.Collator(_locale)
else:
ans = collator().clone()
if strength is not None:
ans.strength = strength
if numeric is not None:
ans.numeric = numeric
if upper_first is not None:
ans.upper_first = upper_first
if ignore_alternate_chars is not None:
ans.set_attribute(_icu.UCOL_ALTERNATE_HANDLING, _icu.UCOL_SHIFTED if ignore_alternate_chars else _icu.UCOL_NON_IGNORABLE)
thread_local_collator_cache.cache[key] = ans
return ans
def change_locale(locale=None):
global _locale
_locale = locale
try:
thread_local_collator_cache.cache.clear()
except AttributeError:
pass
def primary_collator():
'Ignores case differences and accented chars'
return collator(strength=_icu.UCOL_PRIMARY)
def primary_collator_without_punctuation():
'Ignores space and punctuation and case differences and accented chars'
return collator(strength=_icu.UCOL_PRIMARY, ignore_alternate_chars=True)
def sort_collator():
'Ignores case differences and recognizes numbers in strings (if the tweak is set)'
return collator(strength=_icu.UCOL_SECONDARY, numeric=prefs['numeric_collation'])
def non_numeric_sort_collator():
'Ignores case differences only'
return collator(strength=_icu.UCOL_SECONDARY, numeric=False)
def numeric_collator():
'Uses natural sorting for numbers inside strings so something2 will sort before something10'
return collator(strength=_icu.UCOL_SECONDARY, numeric=True)
def case_sensitive_collator():
'Always sorts upper case letter before lower case'
return collator(numeric=prefs['numeric_collation'], upper_first=True)
def make_sort_key_func(collator_function, func_name='sort_key'):
func = None
def sort_key(a):
nonlocal func
if func is None:
func = getattr(collator_function(), func_name)
try:
return func(a)
except TypeError:
if isinstance(a, bytes):
try:
a = a.decode(sys.getdefaultencoding())
except ValueError:
return a
return func(a)
return b''
return sort_key
def make_two_arg_func(collator_function, func_name='strcmp'):
func = None
def two_args(a, b):
nonlocal func
if func is None:
func = getattr(collator_function(), func_name)
try:
return func(a, b)
except TypeError:
if isinstance(a, bytes):
try:
a = a.decode(sys.getdefaultencoding())
except Exception:
return cmp(a, b)
elif a is None:
a = ''
if isinstance(b, bytes):
try:
b = b.decode(sys.getdefaultencoding())
except Exception:
return cmp(a, b)
elif b is None:
b = ''
return func(a, b)
return two_args
def make_change_case_func(which, name):
def change_case(x):
try:
try:
return _icu.change_case(x, which, _locale)
except NotImplementedError:
pass
collator() # sets _locale
return _icu.change_case(x, which, _locale)
except TypeError:
if isinstance(x, bytes):
try:
x = x.decode(sys.getdefaultencoding())
except ValueError:
return x
return _icu.change_case(x, which, _locale)
raise
change_case.__name__ = name
return change_case
# }}}
# ################ The string functions ########################################
sort_key = make_sort_key_func(sort_collator)
numeric_sort_key = make_sort_key_func(numeric_collator)
primary_sort_key = make_sort_key_func(primary_collator)
case_sensitive_sort_key = make_sort_key_func(case_sensitive_collator)
collation_order = make_sort_key_func(sort_collator, 'collation_order')
collation_order_for_partitioning = make_sort_key_func(non_numeric_sort_collator, 'collation_order')
strcmp = make_two_arg_func(sort_collator)
case_sensitive_strcmp = make_two_arg_func(case_sensitive_collator)
primary_strcmp = make_two_arg_func(primary_collator)
upper = make_change_case_func(_icu.UPPER_CASE, 'upper')
lower = make_change_case_func(_icu.LOWER_CASE, 'lower')
title_case = make_change_case_func(_icu.TITLE_CASE, 'title_case')
def capitalize(x):
try:
return upper(x[0]) + lower(x[1:])
except (IndexError, TypeError, AttributeError):
return x
swapcase = swap_case = _icu.swap_case
find = make_two_arg_func(collator, 'find')
primary_find = make_two_arg_func(primary_collator, 'find')
primary_no_punc_find = make_two_arg_func(primary_collator_without_punctuation, 'find')
contains = make_two_arg_func(collator, 'contains')
primary_contains = make_two_arg_func(primary_collator, 'contains')
primary_no_punc_contains = make_two_arg_func(primary_collator_without_punctuation, 'contains')
startswith = make_two_arg_func(collator, 'startswith')
primary_startswith = make_two_arg_func(primary_collator, 'startswith')
safe_chr = _icu.chr
ord_string = _icu.ord_string
def character_name(string):
try:
return _icu.character_name(str(string)) or None
except (TypeError, ValueError, KeyError):
pass
def character_name_from_code(code):
try:
return _icu.character_name_from_code(code) or ''
except (TypeError, ValueError, KeyError):
return ''
def normalize(text, mode='NFC'):
# This is very slightly slower than using unicodedata.normalize, so stick with
# that unless you have very good reasons not too. Also, it's speed
# decreases on wide python builds, where conversion to/from ICU's string
# representation is slower.
return _icu.normalize(_nmodes[mode], str(text))
def contractions(col=None):
global _cmap
col = col or collator()
ans = _cmap.get(col, None)
if ans is None:
ans = col.contractions()
ans = frozenset(filter(None, ans))
_cmap[col] = ans
return ans
def partition_by_first_letter(items, reverse=False, key=lambda x:x):
# Build a list of 'equal' first letters by noticing changes
# in ICU's 'ordinal' for the first letter.
from collections import OrderedDict
items = sorted(items, key=lambda x:sort_key(key(x)), reverse=reverse)
ans = OrderedDict()
last_c, last_ordnum = ' ', 0
for item in items:
c = upper(key(item) or ' ')
ordnum, ordlen = collation_order(c)
if last_ordnum != ordnum:
last_c = c[0:1]
last_ordnum = ordnum
try:
ans[last_c].append(item)
except KeyError:
ans[last_c] = [item]
return ans
# Return the number of unicode codepoints in a string
string_length = len
# Return the number of UTF-16 codepoints in a string
utf16_length = _icu.utf16_length
def remove_accents_icu(txt: str) -> str:
t = getattr(remove_accents_icu, 'transliterator', None)
if t is None:
t = _icu.Transliterator('remove_accents', '''\
:: NFD (NFC);
:: [:Nonspacing Mark:] Remove;
:: NFC (NFD);
''')
setattr(remove_accents_icu, 'transliterator', t)
return t.transliterate(txt)
def remove_accents_regex(txt: str) -> str:
pat = getattr(remove_accents_regex, 'pat', None)
if pat is None:
import unicodedata
import regex
pat = regex.compile(r'\p{Mn}', flags=regex.UNICODE)
setattr(remove_accents_regex, 'pat', pat)
setattr(remove_accents_regex, 'normalize', unicodedata.normalize)
normalize = remove_accents_regex.normalize
return normalize('NFKC', pat.sub('', normalize('NFKD', txt)))
remove_accents = remove_accents_regex # more robust and faster
################################################################################
if __name__ == '__main__':
from calibre.utils.icu_test import run
run(verbosity=4)
| 10,041 | Python | .py | 261 | 31.570881 | 133 | 0.645653 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,173 | ipython.py | kovidgoyal_calibre/src/calibre/utils/ipython.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
import sys
from calibre.constants import cache_dir, get_version, iswindows
from polyglot.builtins import exec_path
ipydir = os.path.join(cache_dir(), 'ipython')
BANNER = ('Welcome to the interactive calibre shell!\n')
def setup_pyreadline():
config = '''
#Bind keys for exit (keys only work on empty lines
#disable_readline(True) #Disable pyreadline completely.
debug_output("off") #"on" saves log info to./pyreadline_debug_log.txt
#"on_nologfile" only enables print warning messages
bind_exit_key("Control-d")
bind_exit_key("Control-z")
#Commands for moving
bind_key("Home", "beginning_of_line")
bind_key("End", "end_of_line")
bind_key("Left", "backward_char")
bind_key("Control-b", "backward_char")
bind_key("Right", "forward_char")
bind_key("Control-f", "forward_char")
bind_key("Alt-f", "forward_word")
bind_key("Alt-b", "backward_word")
bind_key("Clear", "clear_screen")
bind_key("Control-l", "clear_screen")
bind_key("Control-a", "beginning_of_line")
bind_key("Control-e", "end_of_line")
#bind_key("Control-l", "redraw_current_line")
#Commands for Manipulating the History
bind_key("Return", "accept_line")
bind_key("Control-p", "previous_history")
bind_key("Control-n", "next_history")
bind_key("Up", "history_search_backward")
bind_key("Down", "history_search_forward")
bind_key("Alt-<", "beginning_of_history")
bind_key("Alt->", "end_of_history")
bind_key("Control-r", "reverse_search_history")
bind_key("Control-s", "forward_search_history")
bind_key("Alt-p", "non_incremental_reverse_search_history")
bind_key("Alt-n", "non_incremental_forward_search_history")
bind_key("Control-z", "undo")
bind_key("Control-_", "undo")
#Commands for Changing Text
bind_key("Delete", "delete_char")
bind_key("Control-d", "delete_char")
bind_key("BackSpace", "backward_delete_char")
#bind_key("Control-Shift-v", "quoted_insert")
bind_key("Control-space", "self_insert")
bind_key("Control-BackSpace", "backward_delete_word")
#Killing and Yanking
bind_key("Control-k", "kill_line")
bind_key("Control-shift-k", "kill_whole_line")
bind_key("Escape", "kill_whole_line")
bind_key("Meta-d", "kill_word")
bind_key("Control-w", "unix_word_rubout")
#bind_key("Control-Delete", "forward_kill_word")
#Copy paste
bind_key("Shift-Right", "forward_char_extend_selection")
bind_key("Shift-Left", "backward_char_extend_selection")
bind_key("Shift-Control-Right", "forward_word_extend_selection")
bind_key("Shift-Control-Left", "backward_word_extend_selection")
bind_key("Control-m", "set_mark")
bind_key("Control-Shift-x", "copy_selection_to_clipboard")
#bind_key("Control-c", "copy_selection_to_clipboard") #Needs allow_ctrl_c(True) below to be uncommented
bind_key("Control-q", "copy_region_to_clipboard")
bind_key('Control-Shift-v', "paste_mulitline_code")
bind_key("Control-x", "cut_selection_to_clipboard")
bind_key("Control-v", "paste")
bind_key("Control-y", "yank")
bind_key("Alt-v", "ipython_paste")
#Unbinding keys:
#un_bind_key("Home")
#Other
bell_style("none") #modes: none, audible, visible(not implemented)
show_all_if_ambiguous("on")
mark_directories("on")
completer_delims(" \t\n\"\\'`@$><=;|&{(?")
complete_filesystem("on")
debug_output("off")
#allow_ctrl_c(True) #(Allows use of ctrl-c as copy key, still propagate keyboardinterrupt when not waiting for input)
history_filename(%r)
history_length(2000) #value of -1 means no limit
#set_mode("vi") #will cause following bind_keys to bind to vi mode as well as activate vi mode
#ctrl_c_tap_time_interval(0.3)
'''
try:
import pyreadline.rlmain
if not os.path.exists(ipydir):
os.makedirs(ipydir)
conf = os.path.join(ipydir, 'pyreadline.txt')
hist = os.path.join(ipydir, 'history.txt')
config = config % hist
with open(conf, 'wb') as f:
f.write(config.encode('utf-8'))
pyreadline.rlmain.config_path = conf
import atexit
import readline
import pyreadline.unicode_helper # noqa
# Normally the codepage for pyreadline is set to be sys.stdout.encoding
# if you need to change this uncomment the following line
# pyreadline.unicode_helper.pyreadline_codepage="utf8"
except ImportError:
print("Module readline not available.")
else:
# import tab completion functionality
import rlcompleter
# Override completer from rlcompleter to disable automatic ( on callable
completer_obj = rlcompleter.Completer()
def nop(val, word):
return word
completer_obj._callable_postfix = nop
readline.set_completer(completer_obj.complete)
# activate tab completion
readline.parse_and_bind("tab: complete")
readline.read_history_file()
atexit.register(readline.write_history_file)
del readline, rlcompleter, atexit
class Exit:
def __repr__(self):
raise SystemExit(0)
__str__ = __repr__
def __call__(self):
raise SystemExit(0)
class Helper:
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def simple_repl(user_ns={}):
if iswindows:
setup_pyreadline()
else:
try:
import rlcompleter # noqa
import readline # noqa
readline.parse_and_bind("tab: complete")
except ImportError:
pass
user_ns = user_ns or {}
import sys, re # noqa
for x in ('os', 'sys', 're'):
user_ns[x] = user_ns.get(x, globals().get(x, locals().get(x)))
user_ns['exit'] = Exit()
user_ns['help'] = Helper()
from code import InteractiveConsole
console = InteractiveConsole(user_ns)
console.interact(BANNER + 'Use exit to quit')
def ipython(user_ns=None):
os.environ['IPYTHONDIR'] = ipydir
have_ipython = True
try:
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.prompts import Prompts, Token
from traitlets.config.loader import Config
except ImportError:
have_ipython = False
if not have_ipython:
return simple_repl(user_ns=user_ns)
class CustomPrompt(Prompts):
def in_prompt_tokens(self, cli=None):
return [
(Token.Prompt, 'calibre['),
(Token.PromptNum, get_version()),
(Token.Prompt, ']> '),
]
def out_prompt_tokens(self):
return []
defns = {'os':os, 're':re, 'sys':sys}
defns.update(user_ns or {})
c = Config()
user_conf = os.path.expanduser('~/.ipython/profile_default/ipython_config.py')
if os.path.exists(user_conf):
exec_path(user_conf, {'get_config': lambda: c})
c.TerminalInteractiveShell.prompts_class = CustomPrompt
c.InteractiveShellApp.exec_lines = [
'from __future__ import division, absolute_import, unicode_literals, print_function',
]
c.TerminalInteractiveShell.confirm_exit = False
c.TerminalInteractiveShell.banner1 = BANNER
c.BaseIPythonApplication.ipython_dir = ipydir
c.InteractiveShell.separate_in = ''
c.InteractiveShell.separate_out = ''
c.InteractiveShell.separate_out2 = ''
ipshell = InteractiveShellEmbed.instance(config=c, user_ns=user_ns)
ipshell()
| 8,109 | Python | .py | 192 | 37.15625 | 118 | 0.631652 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,174 | recycle_bin.py | kovidgoyal_calibre/src/calibre/utils/recycle_bin.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import shutil
import sys
import time
from calibre import isbytestring
from calibre.constants import filesystem_encoding, islinux, ismacos, iswindows
recycle = None
if iswindows:
from threading import Lock
from calibre.utils.ipc import eintr_retry_call
from calibre_extensions import winutil
recycler = None
rlock = Lock()
def start_recycler():
global recycler
if recycler is None:
from calibre.utils.ipc.simple_worker import start_pipe_worker
recycler = start_pipe_worker('from calibre.utils.recycle_bin import recycler_main; recycler_main()')
def recycle_path(path):
winutil.move_to_trash(str(path))
def recycler_main():
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
while True:
path = eintr_retry_call(stdin.readline)
if not path:
break
try:
path = path.decode('utf-8').rstrip()
except (ValueError, TypeError):
break
try:
recycle_path(path)
except:
eintr_retry_call(stdout.write, b'KO\n')
stdout.flush()
try:
import traceback
traceback.print_exc() # goes to stderr, which is the same as for parent process
except Exception:
pass # Ignore failures to write the traceback, since GUI processes on windows have no stderr
else:
eintr_retry_call(stdout.write, b'OK\n')
stdout.flush()
def delegate_recycle(path):
if '\n' in path:
raise ValueError('Cannot recycle paths that have newlines in them (%r)' % path)
with rlock:
start_recycler()
recycler.stdin.write(path.encode('utf-8'))
recycler.stdin.write(b'\n')
recycler.stdin.flush()
# Theoretically this could be made non-blocking using a
# thread+queue, however the original implementation was blocking,
# so I am leaving it as blocking.
result = eintr_retry_call(recycler.stdout.readline)
if result.rstrip() != b'OK':
raise RuntimeError('recycler failed to recycle: %r' % path)
def recycle(path):
# We have to run the delete to recycle bin in a separate process as the
# morons who wrote SHFileOperation designed it to spin the event loop
# even when no UI is created. And there is no other way to send files
# to the recycle bin on windows. Le Sigh. So we do it in a worker
# process. Unfortunately, if the worker process exits immediately after
# deleting to recycle bin, winblows does not update the recycle bin
# icon. Le Double Sigh. So we use a long lived worker process, that is
# started on first recycle, and sticks around to handle subsequent
# recycles.
if isinstance(path, bytes):
path = path.decode(filesystem_encoding)
path = os.path.abspath(path) # Windows does not like recycling relative paths
return delegate_recycle(path)
elif ismacos:
from calibre_extensions.cocoa import send2trash
def osx_recycle(path):
if isbytestring(path):
path = path.decode(filesystem_encoding)
send2trash(path)
recycle = osx_recycle
elif islinux:
from calibre.utils.linux_trash import send2trash
def fdo_recycle(path):
if isbytestring(path):
path = path.decode(filesystem_encoding)
path = os.path.abspath(path)
send2trash(path)
recycle = fdo_recycle
can_recycle = callable(recycle)
def nuke_recycle():
global can_recycle
can_recycle = False
def restore_recyle():
global can_recycle
can_recycle = callable(recycle)
def delete_file(path, permanent=False):
if not permanent and can_recycle:
try:
recycle(path)
return
except:
import traceback
traceback.print_exc()
os.remove(path)
def delete_tree(path, permanent=False):
if permanent:
try:
# For completely mysterious reasons, sometimes a file is left open
# leading to access errors. If we get an exception, wait and hope
# that whatever has the file (Antivirus, DropBox?) lets go of it.
shutil.rmtree(path)
except:
import traceback
traceback.print_exc()
time.sleep(1)
shutil.rmtree(path)
else:
if can_recycle:
try:
recycle(path)
return
except:
import traceback
traceback.print_exc()
delete_tree(path, permanent=True)
| 5,017 | Python | .py | 128 | 29.601563 | 113 | 0.620761 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,175 | inotify.py | kovidgoyal_calibre/src/calibre/utils/inotify.py | #!/usr/bin/env python
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import errno
import os
import select
import sys
class INotifyError(Exception):
pass
class NoSuchDir(ValueError):
pass
class BaseDirChanged(ValueError):
pass
class DirTooLarge(ValueError):
def __init__(self, bdir):
ValueError.__init__(self, f'The directory {bdir} is too large to monitor. Try increasing the value in /proc/sys/fs/inotify/max_user_watches')
_inotify = None
def load_inotify(): # {{{
''' Initialize the inotify ctypes wrapper '''
global _inotify
if _inotify is None:
if hasattr(sys, 'getwindowsversion'):
# On windows abort before loading the C library. Windows has
# multiple, incompatible C runtimes, and we have no way of knowing
# if the one chosen by ctypes is compatible with the currently
# loaded one.
raise INotifyError('INotify not available on windows')
if sys.platform == 'darwin':
raise INotifyError('INotify not available on OS X')
import ctypes
if not hasattr(ctypes, 'c_ssize_t'):
raise INotifyError('You need python >= 2.7 to use inotify')
libc = ctypes.CDLL(None, use_errno=True)
for function in ("inotify_add_watch", "inotify_init1", "inotify_rm_watch"):
if not hasattr(libc, function):
raise INotifyError('libc is too old')
# inotify_init1()
prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, use_errno=True)
init1 = prototype(('inotify_init1', libc), ((1, "flags", 0),))
# inotify_add_watch()
prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint32, use_errno=True)
add_watch = prototype(('inotify_add_watch', libc), (
(1, "fd"), (1, "pathname"), (1, "mask")), use_errno=True)
# inotify_rm_watch()
prototype = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_int, use_errno=True)
rm_watch = prototype(('inotify_rm_watch', libc), (
(1, "fd"), (1, "wd")), use_errno=True)
# read()
prototype = ctypes.CFUNCTYPE(ctypes.c_ssize_t, ctypes.c_int, ctypes.c_void_p, ctypes.c_size_t, use_errno=True)
read = prototype(('read', libc), (
(1, "fd"), (1, "buf"), (1, "count")), use_errno=True)
_inotify = (init1, add_watch, rm_watch, read)
return _inotify
# }}}
class INotify:
# See <sys/inotify.h> for the flags defined below
# Supported events suitable for MASK parameter of INOTIFY_ADD_WATCH.
ACCESS = 0x00000001 # File was accessed.
MODIFY = 0x00000002 # File was modified.
ATTRIB = 0x00000004 # Metadata changed.
CLOSE_WRITE = 0x00000008 # Writtable file was closed.
CLOSE_NOWRITE = 0x00000010 # Unwrittable file closed.
OPEN = 0x00000020 # File was opened.
MOVED_FROM = 0x00000040 # File was moved from X.
MOVED_TO = 0x00000080 # File was moved to Y.
CREATE = 0x00000100 # Subfile was created.
DELETE = 0x00000200 # Subfile was deleted.
DELETE_SELF = 0x00000400 # Self was deleted.
MOVE_SELF = 0x00000800 # Self was moved.
# Events sent by the kernel.
UNMOUNT = 0x00002000 # Backing fs was unmounted.
Q_OVERFLOW = 0x00004000 # Event queued overflowed.
IGNORED = 0x00008000 # File was ignored.
# Helper events.
CLOSE = (CLOSE_WRITE | CLOSE_NOWRITE) # Close.
MOVE = (MOVED_FROM | MOVED_TO) # Moves.
# Special flags.
ONLYDIR = 0x01000000 # Only watch the path if it is a directory.
DONT_FOLLOW = 0x02000000 # Do not follow a sym link.
EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects.
MASK_ADD = 0x20000000 # Add to the mask of an already existing watch.
ISDIR = 0x40000000 # Event occurred against dir.
ONESHOT = 0x80000000 # Only send event once.
# All events which a program can wait on.
ALL_EVENTS = (ACCESS | MODIFY | ATTRIB | CLOSE_WRITE | CLOSE_NOWRITE |
OPEN | MOVED_FROM | MOVED_TO | CREATE | DELETE |
DELETE_SELF | MOVE_SELF)
# See <bits/inotify.h>
CLOEXEC = 0x80000
NONBLOCK = 0x800
def __init__(self, cloexec=True, nonblock=True):
import ctypes
import struct
self._init1, self._add_watch, self._rm_watch, self._read = load_inotify()
flags = 0
if cloexec:
flags |= self.CLOEXEC
if nonblock:
flags |= self.NONBLOCK
self._inotify_fd = self._init1(flags)
if self._inotify_fd == -1:
raise INotifyError(os.strerror(ctypes.get_errno()))
self._buf = ctypes.create_string_buffer(5120)
self.fenc = sys.getfilesystemencoding() or 'utf-8'
self.hdr = struct.Struct(b'iIII')
if self.fenc == 'ascii':
self.fenc = 'utf-8'
# We keep a reference to os to prevent it from being deleted
# during interpreter shutdown, which would lead to errors in the
# __del__ method
self.os = os
def handle_error(self):
import ctypes
eno = ctypes.get_errno()
extra = ''
if eno == errno.ENOSPC:
extra = 'You may need to increase the inotify limits on your system, via /proc/sys/inotify/max_user_*'
raise OSError(eno, self.os.strerror(eno) + extra)
def __del__(self):
# This method can be called during interpreter shutdown, which means we
# must do the absolute minimum here. Note that there could be running
# daemon threads that are trying to call other methods on this object.
try:
self.os.close(self._inotify_fd)
except (AttributeError, TypeError):
pass
def close(self):
if hasattr(self, '_inotify_fd'):
self.os.close(self._inotify_fd)
del self.os
del self._add_watch
del self._rm_watch
del self._inotify_fd
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def read(self, get_name=True):
import ctypes
buf = []
while True:
num = self._read(self._inotify_fd, self._buf, len(self._buf))
if num == 0:
break
if num < 0:
en = ctypes.get_errno()
if en == errno.EAGAIN:
break # No more data
if en == errno.EINTR:
continue # Interrupted, try again
raise OSError(en, self.os.strerror(en))
buf.append(self._buf.raw[:num])
raw = b''.join(buf)
pos = 0
lraw = len(raw)
while lraw - pos >= self.hdr.size:
wd, mask, cookie, name_len = self.hdr.unpack_from(raw, pos)
pos += self.hdr.size
name = None
if get_name:
name = raw[pos:pos+name_len].rstrip(b'\0').decode(self.fenc)
pos += name_len
self.process_event(wd, mask, cookie, name)
def process_event(self, *args):
raise NotImplementedError()
def wait(self, timeout=None):
'Return True iff there are events waiting to be read. Blocks if timeout is None. Polls if timeout is 0.'
return len((select.select([self._inotify_fd], [], []) if timeout is None else select.select([self._inotify_fd], [], [], timeout))[0]) > 0
def realpath(path):
return os.path.abspath(os.path.realpath(path))
class INotifyTreeWatcher(INotify):
is_dummy = False
def __init__(self, basedir, ignore_event=None):
super().__init__()
self.basedir = realpath(basedir)
self.watch_tree()
self.modified = set()
self.ignore_event = (lambda path, name: False) if ignore_event is None else ignore_event
def watch_tree(self):
self.watched_dirs = {}
self.watched_rmap = {}
try:
self.add_watches(self.basedir)
except OSError as e:
if e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
def add_watches(self, base, top_level=True):
''' Add watches for this directory and all its descendant directories,
recursively. '''
base = realpath(base)
# There may exist a link which leads to an endless
# add_watches loop or to maximum recursion depth exceeded
if not top_level and base in self.watched_dirs:
return
try:
is_dir = self.add_watch(base)
except OSError as e:
if e.errno == errno.ENOENT:
# The entry could have been deleted between listdir() and
# add_watch().
if top_level:
raise NoSuchDir(f'The dir {base} does not exist')
return
if e.errno == errno.EACCES:
# We silently ignore entries for which we dont have permission,
# unless they are the top level dir
if top_level:
raise NoSuchDir(f'You do not have permission to monitor {base}')
return
raise
else:
if is_dir:
try:
files = os.listdir(base)
except OSError as e:
if e.errno in (errno.ENOTDIR, errno.ENOENT):
# The dir was deleted/replaced between the add_watch()
# and listdir()
if top_level:
raise NoSuchDir(f'The dir {base} does not exist')
return
raise
for x in files:
self.add_watches(os.path.join(base, x), top_level=False)
elif top_level:
# The top level dir is a file, not good.
raise NoSuchDir(f'The dir {base} does not exist')
def add_watch(self, path):
import ctypes
bpath = path if isinstance(path, bytes) else path.encode(self.fenc)
wd = self._add_watch(self._inotify_fd, ctypes.c_char_p(bpath),
# Ignore symlinks and watch only directories
self.DONT_FOLLOW | self.ONLYDIR |
self.MODIFY | self.CREATE | self.DELETE |
self.MOVE_SELF | self.MOVED_FROM | self.MOVED_TO |
self.ATTRIB | self.DELETE_SELF)
if wd == -1:
eno = ctypes.get_errno()
if eno == errno.ENOTDIR:
return False
raise OSError(eno, f'Failed to add watch for: {path}: {self.os.strerror(eno)}')
self.watched_dirs[path] = wd
self.watched_rmap[wd] = path
return True
def process_event(self, wd, mask, cookie, name):
if wd == -1 and (mask & self.Q_OVERFLOW):
# We missed some INOTIFY events, so we dont
# know the state of any tracked dirs.
self.watch_tree()
self.modified.add(None)
return
path = self.watched_rmap.get(wd, None)
if path is not None:
if not self.ignore_event(path, name):
self.modified.add(os.path.join(path, name or ''))
if mask & self.CREATE:
# A new sub-directory might have been created, monitor it.
try:
self.add_watch(os.path.join(path, name))
except OSError as e:
if e.errno == errno.ENOENT:
# Deleted before add_watch()
pass
elif e.errno == errno.ENOSPC:
raise DirTooLarge(self.basedir)
else:
raise
if (mask & self.DELETE_SELF or mask & self.MOVE_SELF) and path == self.basedir:
raise BaseDirChanged('The directory %s was moved/deleted' % path)
def __call__(self):
self.read()
ret = self.modified
self.modified = set()
return ret
if __name__ == '__main__':
w = INotifyTreeWatcher(sys.argv[-1])
w()
print('Monitoring', sys.argv[-1], 'press Ctrl-C to stop')
try:
while w.wait():
modified = w()
for path in modified:
print(path or sys.argv[-1], 'changed')
raise SystemExit('inotify flaked out')
except KeyboardInterrupt:
pass
| 12,540 | Python | .py | 286 | 33.251748 | 149 | 0.574906 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,176 | bibtex.py | kovidgoyal_calibre/src/calibre/utils/bibtex.py | """ Collection of python utility-methodes commonly used by other
bibliograph packages.
From http://pypi.python.org/pypi/bibliograph.core/
from Tom Gross <itconsense@gmail.com>
Adapted for calibre use
Zope Public License (ZPL) Version 2.1
A copyright notice accompanies this license document that
identifies the copyright holders.
This license has been certified as open source. It has also
been designated as GPL compatible by the Free Software
Foundation (FSF).
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the
following conditions are met:
1. Redistributions in source code must retain the
accompanying copyright notice, this list of conditions,
and the following disclaimer.
2. Redistributions in binary form must reproduce the accompanying
copyright notice, this list of conditions, and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
3. Names of the copyright holders must not be used to
endorse or promote products derived from this software
without prior written permission from the copyright
holders.
4. The right to distribute this software or to use it for
any purpose does not give you the right to use
Servicemarks (sm) or Trademarks (tm) of the copyright
holders. Use of them is covered by separate agreement
with the copyright holders.
5. If any files are modified, you must cause the modified
files to carry prominent notices stating that you changed
the files and the date of any change.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS''
AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
NO EVENT SHALL THE COPYRIGHT HOLDERS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
"""
__author__ = 'sengian <sengian1 at gmail.com>'
__docformat__ = 'restructuredtext en'
import re
import string
from calibre.utils.mreplace import MReplace
utf8enc2latex_mapping = { # {{{
# This is a mapping of Unicode characters to LaTeX equivalents.
# The information has been extracted from
# <http://www.w3.org/2003/entities/xml/unicode.xml>, written by
# David Carlisle and Sebastian Rahtz.
#
# The extraction has been done by the "create_unimap.py" script
# located at <http://docutils.sf.net/tools/dev/create_unimap.py>.
# Fix some encoding problem between cp1252 and latin1
# from http://www.microsoft.com/typography/unicode/1252.htm
'\x80': '{\\texteuro}', # EURO SIGN
'\x82': '{,}', # SINGLE LOW-9 QUOTATION MARK
'\x83': '$f$', # LATIN SMALL LETTER F WITH HOOK
'\x84': '{,,}', # DOUBLE LOW-9 QUOTATION MARK
'\x85': '{\\ldots}', # HORIZONTAL ELLIPSIS
'\x86': '{\\textdagger}', # DAGGER
'\x87': '{\\textdaggerdbl}', # DOUBLE DAGGER
'\x88': '{\textasciicircum}', # MODIFIER LETTER CIRCUMFLEX ACCENT
'\x89': '{\\textperthousand}', # PER MILLE SIGN
'\x8A': '{\\v{S}}', # LATIN CAPITAL LETTER S WITH CARON
'\x8B': '{\\guilsinglleft}', # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\x8C': '{\\OE}', # LATIN CAPITAL LIGATURE OE
'\x8E': '{\\v{Z}}', # LATIN CAPITAL LETTER Z WITH CARON
'\x91': '{`}', # LEFT SINGLE QUOTATION MARK
'\x92': "{'}", # RIGHT SINGLE QUOTATION MARK
'\x93': '{\\textquotedblleft}', # LEFT DOUBLE QUOTATION MARK
'\x94': '{\\textquotedblright}', # RIGHT DOUBLE QUOTATION MARK
'\x95': '{\\textbullet}', # BULLET
'\x96': '{\\textendash}', # EN DASH
'\x97': '{\\textemdash}', # EM DASH
'\x98': '{\\texttildelow}', # SMALL TILDE
'\x99': '{\\texttrademark}', # TRADE MARK SIGN
'\x9A': '{\\v{s}}', # LATIN SMALL LETTER S WITH CARON
'\x9B': '{\\guilsinglright}', # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\x9C': '{\\oe}', # LATIN SMALL LIGATURE OE
'\x9E': '{\\v{z}}', # LATIN SMALL LETTER Z WITH CARON
'\x9F': '{\\"{Y}}', # LATIN CAPITAL LETTER Y WITH DIAERESIS
'\xa1': '{\\textexclamdown}',
'\xa2': '{\\textcent}',
'\xa3': '{\\textsterling}',
'\xa4': '{\\textcurrency}',
'\xa5': '{\\textyen}',
'\xa6': '{\\textbrokenbar}',
'\xa7': '{\\textsection}',
'\xa8': '{\\textasciidieresis}',
'\xa9': '{\\textcopyright}',
'\xaa': '{\\textordfeminine}',
'\xab': '{\\guillemotleft}',
'\xad': '$\\-$',
'\xae': '{\\textregistered}',
'\xaf': '{\\textasciimacron}',
'\xb0': '{\\textdegree}',
'\xb6': '{\\textparagraph}',
'\xba': '{\\textordmasculine}',
'\xbb': '{\\guillemotright}',
'\xbc': '{\\textonequarter}',
'\xbd': '{\\textonehalf}',
'\xbe': '{\\textthreequarters}',
'\xbf': '{\\textquestiondown}',
'\xc6': '{\\AE}',
'\xd0': '{\\DH}',
'\xd7': '{\\texttimes}',
'\xd8': '{\\O}',
'\xde': '{\\TH}',
'\xdf': '{\\ss}',
'\xe6': '{\\ae}',
'\xf0': '{\\dh}',
'\xf8': '{\\o}',
'\xfe': '{\\th}',
'\u0100': '{\\={A}}',
'\u0101': '{\\={a}}',
'\u0108': '{\\^{C}}',
'\u0109': '{\\^{c}}',
'\u010a': '{\\.{C}}',
'\u010b': '{\\.{c}}',
'\u0110': '{\\DJ}',
'\u0111': '{\\dj}',
'\u0112': '{\\={E}}',
'\u0113': '{\\={e}}',
'\u0114': '{\\u{E}}',
'\u0115': '{\\u{e}}',
'\u0116': '{\\.{E}}',
'\u0117': '{\\.{e}}',
'\u011c': '{\\^{G}}',
'\u011d': '{\\^{g}}',
'\u0120': '{\\.{G}}',
'\u0121': '{\\.{g}}',
'\u0122': '{\\c{G}}',
'\u0123': '{\\c{g}}',
'\u0124': '{\\^{H}}',
'\u0125': '{\\^{h}}',
'\u0126': '{{\\fontencoding{LELA}\\selectfont\\char40}}',
'\u0127': '$\\Elzxh$',
'\u0128': '{\\升�К�����к醢辈恭����苘��苘辇�К�����к醢辈岌����苘禁升�К�����к醢辈猝����苘禁苘辇�К�����к醢辈悃����苘觖升�К�����к醢辈洄����苘觖苘辇�К�����к醢辈濮����苘臌升�К�����к醢辈妲����苘臌辇�К�����к醢背抱����苘辇К�����к醢背钵����墒�К�����к醢背厂����殛�К�����к醢背揣����苘摞数�К�����к醢背掸����苘摞苘挲�К�����к醢背锭����苘沱她�К�����к醢背阀����苘沱臊�К�����к醢背抚�����苘骘铘孱泔溟铉�膛塘�苘箦戾泗骘铘苘汨狎贡��К�����к醢背猝����苘沱听�К�����к醢背悃����苘沱忑�К�����к醢背妲�����苘骘铘孱泔溟铉�膛塘�苘箦戾泗骘铘苘汨狎舶饼�К�����к醢贝哀�����苘骘铘孱泔溟铉�膛塘�苘箦戾泗骘铘苘汨狎舶昌�К�����к醢贝抱����苘听К�����к醢贝钵����苘忑К�����к醢贝掸����苘沱锡�К�����к醢贝锭����苘沱铨�К�����к醢贝恭��Ⅺь��������к醢贝岌����苘吻�К�����к醢贝猝����苘铉�К�����к醢贝悃����苘禁淆�К�����к醢贝洄����苘禁稞�К�����к醢贝濮����苘觖淆�К�����к醢贝妲����苘觖稞�К�����к醢钡钵����苘吓�К�����к醢钡厂����苘镥�К�����к醢钡锭����苘沱引�К�����к醢钡阀����苘沱螨�К�����к醢钡悃����苘摞育�К�����к醢钡洄����苘摞簖�К�����к醢倍锭�����苘骘铘孱泔溟铉�膛塘�苘箦戾泗骘铘苘汨狎捶��К�����к醢倍阀�����苘骘铘孱泔溟铉�膛塘�苘箦戾泗骘铘苘汨狎冻��К�����к醢倍抚����苘��正�К�����к醢倍恭����苘��觚�К�����к醢倍岌����苘禁正�К�����к醢倍猝����苘禁觚�К�����к醢倍悃����苘觖正�К�����к醢倍洄����苘觖觚�К�����к醢狈钵����苘臌正�К�����к醢狈厂����苘臌觚�К�����к醢狈揣����苘摞���К�����к醢狈掸����苘摞鼾�К�����к醢狈锭����苘摞冽�К�����к醢狈阀����苘摞���К�����к醢惫掸����苘翦�翳鲮殓�К�����к醢惫濮����苘翦�纛蜢彗�К�����к醢贬岌��Г苘弭瑜К�����к醢扁岌�����苘骘铘孱泔溟铉�膛塘�苘箦戾泗骘铘苘汨狎惫谍�К�����к醢便钵����苘翦�翡秕忪屦轲妪К�����к醢辨掸��Ⅺ苘��琮��������к醢驳哀��Г苘澎�趄钺ぇ������к醢驳钵��Г苘澎�趄铙幛К�����к醢驳揣��Г苘澎�镳孱铯К�����к醢驳锭��Г苘澎�螋熹ぇ������к醢驳抚�����苘骘铘孱泔溟铉�膛尚�苘箦戾泗骘铘苘汨狎侗��К�����к醢驳恭��Г苘澎�筱梓幛К�����к醢驳猝��Г苘鲠蝈痼殪镱ぇ������к醢捕抱����琮К�����к醢捕厂��Г苘澎�痃犴磲ぇ������к醢捕揣��Г苘澎�疴玑恧К�����к醢捕掸��Г苘澎�趄铊ぇ������к醢捕悃��Г苘澎�怍潇ぇ������к醢捕洄��Г苘澎�螋祆ぇ������к醢捕妲��Г苘澎�趄铐ぇ������к醢卜哀��Г苘澎�趄铐祢ぇ������к醢卜抱��Г苘澎�祠祉颏К�����к醢卜钵����苘澎�祠祛�К�����к醢卜厂��Г苘澎�螋祛ぇ������к醢卜阀��Г苘澎�沆镯彗ぇ������к醢卜抚����苘翦�麴栝�К�����к醢卜恭��Г苘澎�趄铗ぇ������к醢卜岌��Г苘澎�趄铗欷К�����к醢卜猝��Г苘澎�螋趄铗ぇ������к醢卜悃��Г苘澎�蜢ぇ������к醢卜洄��Г苘澎�螋祢ぇ������к醢卜濮��Г苘澎�骅颏К�����к醢卜妲�����苘骘铘孱泔溟铉�膛尚�苘箦戾泗骘铘苘汨狎舶昌�К�����к醢哺钵��Г苘澎�螋祗ぇ������к醢哺厂��Г苘澎�弩瑜К�����к醢哺阀��Г苘澎�趄铘ぇ������к醢哺抚��Г苘澎�螋祠ぇ������к醢哺岌��Г苘澎�瘐痼殪ぇ������к醢哺猝��Г苘澎�痼泸訾К�����к醢哺悃��Г苘澎�轭鲻ぇ������к醢哺洄��Г苘澎�轭鲼ぇ������к醢哺濮��Г苘澎�趄铢ぇ������к醢补哀��Г苘澎�螋禚ぇ������к醢补钵��Г苘澎��镧瑜К�����к醢补揣��Г苘澎�珈篝ぇ������к醢补掸��Г苘澎�蝈珈篝ぇ������к醢补锭��Г苘澎�轭珈篝ぇ������к醢补濮����苘翦�趑躜铍�К�����к醢册揣��Г苘澎�澌镧瑜К�����к醢册阀��Г苘澎�翦箬ぇ������к醢测悃��Ⅺ���������к醢层阀����苘翦�翎筱殚汜蝻铨К�����к醢层抚��Г苘澎�鲥螋螭К�����к醢层悃��Г苘澎�鲥螋椁К�����к醢蹭哀��Г苘澎�祉螂ぇ������к醢蹭抱��Г苘澎�桁眚毪К�����к醢蹭钵��Г苘澎�筲蜩颏К�����к醢蹭厂��Г苘澎�筲扈颏К�����к醢蹭揣��Г苘澎�蜥轶ぇ������к醢蹭掸��Г苘澎�祜鳏К�����к醢蹭抚����苘翦�翎筱殚怛弼妪К�����к醢蹭恭����苘翦�麴弪轱溷孱翦蝈潺К�����к醢蹭岌����苘螓��К�����к醢蹭猝����苘臌��К�����к醢蹭悃����苘翦�趑殪溴祜鼾К�����к醢插掸����苘麸铄�档��К�����к醢插锭����苘麸铄�创��К�����к醢插阀����苘麸铄�吵��К�����к醢插抚����苘麸铄�膊��К�����к醢插恭����苘麸铄�北��К�����к醢嘲哀����苘帻К�����к醢嘲抱��Ⅺ苘���������к醢嘲钵����苘摭К�����к醢嘲厂����苘',
'\u0304': '{\\=}',
'\u0306': '{\\u}',
'\u0307': '{\\.}',
'\u0308': '{\\"}',
'\u030a': '{\\r}',
'\u030b': '{\\H}',
'\u030c': '{\\v}',
'\u030f': '{\\cyrchar\\C}',
'\u0311': '{{\\fontencoding{LECO}\\selectfont\\char177}}',
'\u0318': '{{\\fontencoding{LECO}\\selectfont\\char184}}',
'\u0319': '{{\\fontencoding{LECO}\\selectfont\\char185}}',
'\u0321': '$\\Elzpalh$',
'\u0322': '{\\Elzrh}',
'\u0327': '{\\c}',
'\u0328': '{\\k}',
'\u032a': '$\\Elzsbbrg$',
'\u032b': '{{\\fontencoding{LECO}\\selectfont\\char203}}',
'\u032f': '{{\\fontencoding{LECO}\\selectfont\\char207}}',
'\u0335': '{\\Elzxl}',
'\u0336': '{\\Elzbar}',
'\u0337': '{{\\fontencoding{LECO}\\selectfont\\char215}}',
'\u0338': '{{\\fontencoding{LECO}\\selectfont\\char216}}',
'\u033a': '{{\\fontencoding{LECO}\\selectfont\\char218}}',
'\u033b': '{{\\fontencoding{LECO}\\selectfont\\char219}}',
'\u033c': '{{\\fontencoding{LECO}\\selectfont\\char220}}',
'\u033d': '{{\\fontencoding{LECO}\\selectfont\\char221}}',
'\u0361': '{{\\fontencoding{LECO}\\selectfont\\char225}}',
'\u0386': "{\\'{A}}",
'\u0388': "{\\'{E}}",
'\u0389': "{\\'{H}}",
'\u038a': "{\\'{}{I}}",
'\u038c': "{\\'{}O}",
'\u038e': "$\\mathrm{'Y}$",
'\u038f': "$\\mathrm{'\\Omega}$",
'\u0390': '$\\acute{\\ddot{\\iota}}$',
'\u0391': '$\\Alpha$',
'\u0392': '$\\Beta$',
'\u0393': '$\\Gamma$',
'\u0394': '$\\Delta$',
'\u0395': '$\\Epsilon$',
'\u0396': '$\\Zeta$',
'\u0397': '$\\Eta$',
'\u0398': '$\\Theta$',
'\u0399': '$\\Iota$',
'\u039a': '$\\Kappa$',
'\u039b': '$\\Lambda$',
'\u039c': '$M$',
'\u039d': '$N$',
'\u039e': '$\\Xi$',
'\u039f': '$O$',
'\u03a0': '$\\Pi$',
'\u03a1': '$\\Rho$',
'\u03a3': '$\\Sigma$',
'\u03a4': '$\\Tau$',
'\u03a5': '$\\Upsilon$',
'\u03a6': '$\\Phi$',
'\u03a8': '$\\Psi$',
'\u03a9': '$\\Omega$',
'\u03aa': '$\\mathrm{\\ddot{I}}$',
'\u03ab': '$\\mathrm{\\ddot{Y}}$',
'\u03ac': "{\\'{$\\alpha$}}",
'\u03ad': '$\\acute{\\epsilon}$',
'\u03ae': '$\\acute{\\eta}$',
'\u03af': '$\\acute{\\iota}$',
'\u03b0': '$\\acute{\\ddot{\\upsilon}}$',
'\u03b1': '$\\alpha$',
'\u03b2': '$\\beta$',
'\u03b3': '$\\gamma$',
'\u03b4': '$\\delta$',
'\u03b5': '$\\epsilon$',
'\u03b6': '$\\zeta$',
'\u03b7': '$\\eta$',
'\u03b8': '{\\texttheta}',
'\u03b9': '$\\iota$',
'\u03ba': '$\\kappa$',
'\u03bb': '$\\lambda$',
'\u03bc': '$\\mu$',
'\u03be': '$\\xi$',
'\u03bf': '$o$',
'\u03c0': '$\\pi$',
'\u03c1': '$\\rho$',
'\u03c2': '$\\varsigma$',
'\u03c3': '$\\sigma$',
'\u03c4': '$\\tau$',
'\u03c5': '$\\upsilon$',
'\u03c6': '$\\varphi$',
'\u03c7': '$\\chi$',
'\u03c8': '$\\psi$',
'\u03c9': '$\\omega$',
'\u03ca': '$\\ddot{\\iota}$',
'\u03cb': '$\\ddot{\\upsilon}$',
'\u03cc': "{\\'{o}}",
'\u03cd': '$\\acute{\\upsilon}$',
'\u03ce': '$\\acute{\\omega}$',
'\u03d0': '{\\Pisymbol{ppi022}{87}}',
'\u03d1': '{\\textvartheta}',
'\u03d2': '$\\Upsilon$',
'\u03d5': '$\\phi$',
'\u03d6': '$\\varpi$',
'\u03da': '$\\Stigma$',
'\u03dc': '$\\Digamma$',
'\u03dd': '$\\digamma$',
'\u03de': '$\\Koppa$',
'\u03e0': '$\\Sampi$',
'\u03f0': '$\\varkappa$',
'\u03f1': '$\\varrho$',
'\u03f4': '{\\textTheta}',
'\u03f6': '$\\backepsilon$',
'\u0403': "{\\cyrchar{\\'\\CYRG}}",
'\u040c': "{\\cyrchar{\\'\\CYRK}}",
'\u0453': "{\\cyrchar{\\'\\cyrg}}",
'\u045c': "{\\cyrchar{\\'\\cyrk}}",
'\u0460': '{\\cyrchar\\CYROMEGA}',
'\u0461': '{\\cyrchar\\cyromega}',
'\u0464': '{\\cyrchar\\CYRIOTE}',
'\u0465': '{\\cyrchar\\cyriote}',
'\u0466': '{\\cyrchar\\CYRLYUS}',
'\u0467': '{\\cyrchar\\cyrlyus}',
'\u0468': '{\\cyrchar\\CYRIOTLYUS}',
'\u0469': '{\\cyrchar\\cyriotlyus}',
'\u046c': '{\\cyrchar\\CYRIOTBYUS}',
'\u046d': '{\\cyrchar\\cyriotbyus}',
'\u046e': '{\\cyrchar\\CYRKSI}',
'\u046f': '{\\cyrchar\\cyrksi}',
'\u0470': '{\\cyrchar\\CYRPSI}',
'\u0471': '{\\cyrchar\\cyrpsi}',
'\u0478': '{\\cyrchar\\CYRUK}',
'\u0479': '{\\cyrchar\\cyruk}',
'\u047a': '{\\cyrchar\\CYROMEGARND}',
'\u047b': '{\\cyrchar\\cyromegarnd}',
'\u047c': '{\\cyrchar\\CYROMEGATITLO}',
'\u047d': '{\\cyrchar\\cyromegatitlo}',
'\u047e': '{\\cyrchar\\CYROT}',
'\u047f': '{\\cyrchar\\cyrot}',
'\u0480': '{\\cyrchar\\CYRKOPPA}',
'\u0481': '{\\cyrchar\\cyrkoppa}',
'\u0482': '{\\cyrchar\\cyrthousands}',
'\u0488': '{\\cyrchar\\cyrhundredthousands}',
'\u0489': '{\\cyrchar\\cyrmillions}',
'\u2002': '{\\hspace{0.6em}}',
'\u2003': '{\\hspace{1em}}',
'\u2004': '{\\hspace{0.33em}}',
'\u2005': '{\\hspace{0.25em}}',
'\u2006': '{\\hspace{0.166em}}',
'\u2007': '{\\hphantom{0}}',
'\u2008': '{\\hphantom{,}}',
'\u2009': '{\\hspace{0.167em}}',
'\u200a': '$\\mkern1mu$',
'\u2010': '{-}',
'\u2013': '{\\textendash}',
'\u2014': '{\\textemdash}',
'\u2015': '{\\rule{1em}{1pt}}',
'\u201b': '$\\Elzreapos$',
'\u201c': '{\\textquotedblleft}',
'\u201d': '{\\textquotedblright}',
'\u2020': '{\\textdagger}',
'\u2021': '{\\textdaggerdbl}',
'\u2022': '{\\textbullet}',
'\u2024': '{.}',
'\u2025': '{..}',
'\u2030': '{\\textperthousand}',
'\u2031': '{\\textpertenthousand}',
'\u2033': "${''}$",
'\u2034': "${'''}$",
'\u2035': '$\\backprime$',
'\u2039': '{\\guilsinglleft}',
'\u203a': '{\\guilsinglright}',
'\u2057': "$''''$",
'\u205f': '{\\mkern4mu}',
'\u2060': '{\\nolinebreak}',
'\u20a7': '{\\ensuremath{\\Elzpes}}',
'\u20ac': '{\\texteuro}',
'\u20db': '$\\dddot$',
'\u20dc': '$\\ddddot$',
'\u2102': '$\\mathbb{C}$',
'\u210a': '{\\mathscr{g}}',
'\u210b': '$\\mathscr{H}$',
'\u210c': '$\\mathfrak{H}$',
'\u210d': '$\\mathbb{H}$',
'\u210f': '$\\hslash$',
'\u2110': '$\\mathscr{I}$',
'\u2111': '$\\mathfrak{I}$',
'\u2112': '$\\mathscr{L}$',
'\u2113': '$\\mathscr{l}$',
'\u2115': '$\\mathbb{N}$',
'\u2118': '$\\wp$',
'\u2119': '$\\mathbb{P}$',
'\u211a': '$\\mathbb{Q}$',
'\u211b': '$\\mathscr{R}$',
'\u211c': '$\\mathfrak{R}$',
'\u211d': '$\\mathbb{R}$',
'\u2122': '{\\texttrademark}',
'\u2124': '$\\mathbb{Z}$',
'\u2128': '$\\mathfrak{Z}$',
'\u2129': '$\\ElsevierGlyph{2129}$',
'\u212b': '{\\AA}',
'\u212c': '$\\mathscr{B}$',
'\u212d': '$\\mathfrak{C}$',
'\u212f': '$\\mathscr{e}$',
'\u2130': '$\\mathscr{E}$',
'\u2131': '$\\mathscr{F}$',
'\u2133': '$\\mathscr{M}$',
'\u2134': '$\\mathscr{o}$',
'\u2135': '$\\aleph$',
'\u2136': '$\\beth$',
'\u2137': '$\\gimel$',
'\u2138': '$\\daleth$',
'\u2153': '$\\textfrac{1}{3}$',
'\u2154': '$\\textfrac{2}{3}$',
'\u2155': '$\\textfrac{1}{5}$',
'\u2156': '$\\textfrac{2}{5}$',
'\u2157': '$\\textfrac{3}{5}$',
'\u2158': '$\\textfrac{4}{5}$',
'\u2159': '$\\textfrac{1}{6}$',
'\u215a': '$\\textfrac{5}{6}$',
'\u215b': '$\\textfrac{1}{8}$',
'\u215c': '$\\textfrac{3}{8}$',
'\u215d': '$\\textfrac{5}{8}$',
'\u215e': '$\\textfrac{7}{8}$',
'\u2194': '$\\leftrightarrow$',
'\u2195': '$\\updownarrow$',
'\u2196': '$\\nwarrow$',
'\u2197': '$\\nearrow$',
'\u2198': '$\\searrow$',
'\u2199': '$\\swarrow$',
'\u219a': '$\\nleftarrow$',
'\u219b': '$\\nrightarrow$',
'\u219c': '$\\arrowwaveright$',
'\u219d': '$\\arrowwaveright$',
'\u219e': '$\\twoheadleftarrow$',
'\u21a0': '$\\twoheadrightarrow$',
'\u21a2': '$\\leftarrowtail$',
'\u21a3': '$\\rightarrowtail$',
'\u21a6': '$\\mapsto$',
'\u21a9': '$\\hookleftarrow$',
'\u21aa': '$\\hookrightarrow$',
'\u21ab': '$\\looparrowleft$',
'\u21ac': '$\\looparrowright$',
'\u21ad': '$\\leftrightsquigarrow$',
'\u21ae': '$\\nleftrightarrow$',
'\u21b0': '$\\Lsh$',
'\u21b1': '$\\Rsh$',
'\u21b3': '$\\ElsevierGlyph{21B3}$',
'\u21b6': '$\\curvearrowleft$',
'\u21b7': '$\\curvearrowright$',
'\u21ba': '$\\circlearrowleft$',
'\u21bb': '$\\circlearrowright$',
'\u21bc': '$\\leftharpoonup$',
'\u21bd': '$\\leftharpoondown$',
'\u21be': '$\\upharpoonright$',
'\u21bf': '$\\upharpoonleft$',
'\u21c0': '$\\rightharpoonup$',
'\u21c1': '$\\rightharpoondown$',
'\u21c2': '$\\downharpoonright$',
'\u21c3': '$\\downharpoonleft$',
'\u21c4': '$\\rightleftarrows$',
'\u21c5': '$\\dblarrowupdown$',
'\u21c6': '$\\leftrightarrows$',
'\u21c7': '$\\leftleftarrows$',
'\u21c8': '$\\upuparrows$',
'\u21c9': '$\\rightrightarrows$',
'\u21ca': '$\\downdownarrows$',
'\u21cb': '$\\leftrightharpoons$',
'\u21cc': '$\\rightleftharpoons$',
'\u21cd': '$\\nLeftarrow$',
'\u21ce': '$\\nLeftrightarrow$',
'\u21cf': '$\\nRightarrow$',
'\u21d0': '$\\Leftarrow$',
'\u21d1': '$\\Uparrow$',
'\u21d2': '$\\Rightarrow$',
'\u21d3': '$\\Downarrow$',
'\u21d4': '$\\Leftrightarrow$',
'\u21d5': '$\\Updownarrow$',
'\u21da': '$\\Lleftarrow$',
'\u21db': '$\\Rrightarrow$',
'\u21dd': '$\\rightsquigarrow$',
'\u21f5': '$\\DownArrowUpArrow$',
'\u2200': '$\\forall$',
'\u2201': '$\\complement$',
'\u2202': '$\\partial$',
'\u2203': '$\\exists$',
'\u2204': '$\\nexists$',
'\u2205': '$\\varnothing$',
'\u2207': '$\\nabla$',
'\u2208': '$\\in$',
'\u2209': '$\\not\\in$',
'\u220b': '$\\ni$',
'\u220c': '$\\not\\ni$',
'\u220f': '$\\prod$',
'\u2210': '$\\coprod$',
'\u2211': '$\\sum$',
'\u2213': '$\\mp$',
'\u2214': '$\\dotplus$',
'\u2216': '$\\setminus$',
'\u2217': '${_\\ast}$',
'\u2218': '$\\circ$',
'\u2219': '$\\bullet$',
'\u221a': '$\\surd$',
'\u221d': '$\\propto$',
'\u221e': '$\\infty$',
'\u221f': '$\\rightangle$',
'\u2220': '$\\angle$',
'\u2221': '$\\measuredangle$',
'\u2222': '$\\sphericalangle$',
'\u2223': '$\\mid$',
'\u2224': '$\\nmid$',
'\u2225': '$\\parallel$',
'\u2226': '$\\nparallel$',
'\u2227': '$\\wedge$',
'\u2228': '$\\vee$',
'\u2229': '$\\cap$',
'\u222a': '$\\cup$',
'\u222b': '$\\int$',
'\u222c': '$\\int\\!\\int$',
'\u222d': '$\\int\\!\\int\\!\\int$',
'\u222e': '$\\oint$',
'\u222f': '$\\surfintegral$',
'\u2230': '$\\volintegral$',
'\u2231': '$\\clwintegral$',
'\u2232': '$\\ElsevierGlyph{2232}$',
'\u2233': '$\\ElsevierGlyph{2233}$',
'\u2234': '$\\therefore$',
'\u2235': '$\\because$',
'\u2237': '$\\Colon$',
'\u2238': '$\\ElsevierGlyph{2238}$',
'\u223a': '$\\mathbin{{:}\\!\\!{-}\\!\\!{:}}$',
'\u223b': '$\\homothetic$',
'\u223c': '$\\sim$',
'\u223d': '$\\backsim$',
'\u223e': '$\\lazysinv$',
'\u2240': '$\\wr$',
'\u2241': '$\\not\\sim$',
'\u2242': '$\\ElsevierGlyph{2242}$',
'\u2243': '$\\simeq$',
'\u2244': '$\\not\\simeq$',
'\u2245': '$\\cong$',
'\u2246': '$\\approxnotequal$',
'\u2247': '$\\not\\cong$',
'\u2248': '$\\approx$',
'\u2249': '$\\not\\approx$',
'\u224a': '$\\approxeq$',
'\u224b': '$\\tildetrpl$',
'\u224c': '$\\allequal$',
'\u224d': '$\\asymp$',
'\u224e': '$\\Bumpeq$',
'\u224f': '$\\bumpeq$',
'\u2250': '$\\doteq$',
'\u2251': '$\\doteqdot$',
'\u2252': '$\\fallingdotseq$',
'\u2253': '$\\risingdotseq$',
'\u2254': '{:=}',
'\u2255': '$=:$',
'\u2256': '$\\eqcirc$',
'\u2257': '$\\circeq$',
'\u2259': '$\\estimates$',
'\u225a': '$\\ElsevierGlyph{225A}$',
'\u225b': '$\\starequal$',
'\u225c': '$\\triangleq$',
'\u225f': '$\\ElsevierGlyph{225F}$',
'\u2260': '$\\not =$',
'\u2262': '$\\not\\equiv$',
'\u2264': '$\\leq$',
'\u2265': '$\\geq$',
'\u2266': '$\\leqq$',
'\u2267': '$\\geqq$',
'\u2268': '$\\lneqq$',
'\u2269': '$\\gneqq$',
'\u226a': '$\\ll$',
'\u226b': '$\\gg$',
'\u226c': '$\\between$',
'\u226d': '$\\not\\kern-0.3em\\times$',
'\u226e': '$\\not<$',
'\u226f': '$\\not>$',
'\u2270': '$\\not\\leq$',
'\u2271': '$\\not\\geq$',
'\u2272': '$\\lessequivlnt$',
'\u2273': '$\\greaterequivlnt$',
'\u2274': '$\\ElsevierGlyph{2274}$',
'\u2275': '$\\ElsevierGlyph{2275}$',
'\u2276': '$\\lessgtr$',
'\u2277': '$\\gtrless$',
'\u2278': '$\\notlessgreater$',
'\u2279': '$\\notgreaterless$',
'\u227a': '$\\prec$',
'\u227b': '$\\succ$',
'\u227c': '$\\preccurlyeq$',
'\u227d': '$\\succcurlyeq$',
'\u227e': '$\\precapprox$',
'\u227f': '$\\succapprox$',
'\u2280': '$\\not\\prec$',
'\u2281': '$\\not\\succ$',
'\u2282': '$\\subset$',
'\u2283': '$\\supset$',
'\u2284': '$\\not\\subset$',
'\u2285': '$\\not\\supset$',
'\u2286': '$\\subseteq$',
'\u2287': '$\\supseteq$',
'\u2288': '$\\not\\subseteq$',
'\u2289': '$\\not\\supseteq$',
'\u228a': '$\\subsetneq$',
'\u228b': '$\\supsetneq$',
'\u228e': '$\\uplus$',
'\u228f': '$\\sqsubset$',
'\u2290': '$\\sqsupset$',
'\u2291': '$\\sqsubseteq$',
'\u2292': '$\\sqsupseteq$',
'\u2293': '$\\sqcap$',
'\u2294': '$\\sqcup$',
'\u2295': '$\\oplus$',
'\u2296': '$\\ominus$',
'\u2297': '$\\otimes$',
'\u2298': '$\\oslash$',
'\u2299': '$\\odot$',
'\u229a': '$\\circledcirc$',
'\u229b': '$\\circledast$',
'\u229d': '$\\circleddash$',
'\u229e': '$\\boxplus$',
'\u229f': '$\\boxminus$',
'\u22a0': '$\\boxtimes$',
'\u22a1': '$\\boxdot$',
'\u22a2': '$\\vdash$',
'\u22a3': '$\\dashv$',
'\u22a4': '$\\top$',
'\u22a5': '$\\perp$',
'\u22a7': '$\\truestate$',
'\u22a8': '$\\forcesextra$',
'\u22a9': '$\\Vdash$',
'\u22aa': '$\\Vvdash$',
'\u22ab': '$\\VDash$',
'\u22ac': '$\\nvdash$',
'\u22ad': '$\\nvDash$',
'\u22ae': '$\\nVdash$',
'\u22af': '$\\nVDash$',
'\u22b2': '$\\vartriangleleft$',
'\u22b3': '$\\vartriangleright$',
'\u22b4': '$\\trianglelefteq$',
'\u22b5': '$\\trianglerighteq$',
'\u22b6': '$\\original$',
'\u22b7': '$\\image$',
'\u22b8': '$\\multimap$',
'\u22b9': '$\\hermitconjmatrix$',
'\u22ba': '$\\intercal$',
'\u22bb': '$\\veebar$',
'\u22be': '$\\rightanglearc$',
'\u22c0': '$\\ElsevierGlyph{22C0}$',
'\u22c1': '$\\ElsevierGlyph{22C1}$',
'\u22c2': '$\\bigcap$',
'\u22c3': '$\\bigcup$',
'\u22c4': '$\\diamond$',
'\u22c5': '$\\cdot$',
'\u22c6': '$\\star$',
'\u22c7': '$\\divideontimes$',
'\u22c8': '$\\bowtie$',
'\u22c9': '$\\ltimes$',
'\u22ca': '$\\rtimes$',
'\u22cb': '$\\leftthreetimes$',
'\u22cc': '$\\rightthreetimes$',
'\u22cd': '$\\backsimeq$',
'\u22ce': '$\\curlyvee$',
'\u22cf': '$\\curlywedge$',
'\u22d0': '$\\Subset$',
'\u22d1': '$\\Supset$',
'\u22d2': '$\\Cap$',
'\u22d3': '$\\Cup$',
'\u22d4': '$\\pitchfork$',
'\u22d6': '$\\lessdot$',
'\u22d7': '$\\gtrdot$',
'\u22d8': '$\\verymuchless$',
'\u22d9': '$\\verymuchgreater$',
'\u22da': '$\\lesseqgtr$',
'\u22db': '$\\gtreqless$',
'\u22de': '$\\curlyeqprec$',
'\u22df': '$\\curlyeqsucc$',
'\u22e2': '$\\not\\sqsubseteq$',
'\u22e3': '$\\not\\sqsupseteq$',
'\u22e5': '$\\Elzsqspne$',
'\u22e6': '$\\lnsim$',
'\u22e7': '$\\gnsim$',
'\u22e8': '$\\precedesnotsimilar$',
'\u22e9': '$\\succnsim$',
'\u22ea': '$\\ntriangleleft$',
'\u22eb': '$\\ntriangleright$',
'\u22ec': '$\\ntrianglelefteq$',
'\u22ed': '$\\ntrianglerighteq$',
'\u22ee': '$\\vdots$',
'\u22ef': '$\\cdots$',
'\u22f0': '$\\upslopeellipsis$',
'\u22f1': '$\\downslopeellipsis$',
'\u2305': '{\\barwedge}',
'\u2306': '$\\perspcorrespond$',
'\u2308': '$\\lceil$',
'\u2309': '$\\rceil$',
'\u230a': '$\\lfloor$',
'\u230b': '$\\rfloor$',
'\u2315': '$\\recorder$',
'\u2316': '$\\mathchar"2208$',
'\u231c': '$\\ulcorner$',
'\u231d': '$\\urcorner$',
'\u231e': '$\\llcorner$',
'\u231f': '$\\lrcorner$',
'\u2322': '$\\frown$',
'\u2323': '$\\smile$',
'\u233d': '$\\ElsevierGlyph{E838}$',
'\u23a3': '$\\Elzdlcorn$',
'\u23b0': '$\\lmoustache$',
'\u23b1': '$\\rmoustache$',
'\u2423': '{\\textvisiblespace}',
'\u2460': '{\\ding{172}}',
'\u2461': '{\\ding{173}}',
'\u2462': '{\\ding{174}}',
'\u2463': '{\\ding{175}}',
'\u2464': '{\\ding{176}}',
'\u2465': '{\\ding{177}}',
'\u2466': '{\\ding{178}}',
'\u2467': '{\\ding{179}}',
'\u2468': '{\\ding{180}}',
'\u2469': '{\\ding{181}}',
'\u24c8': '$\\circledS$',
'\u2506': '$\\Elzdshfnc$',
'\u2519': '$\\Elzsqfnw$',
'\u2571': '$\\diagup$',
'\u25a0': '{\\ding{110}}',
'\u25a1': '$\\square$',
'\u25aa': '$\\blacksquare$',
'\u25ad': '$\\fbox{~}$',
'\u25af': '$\\Elzvrecto$',
'\u25b1': '$\\ElsevierGlyph{E381}$',
'\u25b2': '{\\ding{115}}',
'\u25b3': '$\\bigtriangleup$',
'\u25b4': '$\\blacktriangle$',
'\u25b5': '$\\vartriangle$',
'\u25b8': '$\\blacktriangleright$',
'\u25b9': '$\\triangleright$',
'\u25bc': '{\\ding{116}}',
'\u25bd': '$\\bigtriangledown$',
'\u25be': '$\\blacktriangledown$',
'\u25bf': '$\\triangledown$',
'\u25c2': '$\\blacktriangleleft$',
'\u25c3': '$\\triangleleft$',
'\u25c6': '{\\ding{117}}',
'\u25ca': '$\\lozenge$',
'\u25cb': '$\\bigcirc$',
'\u25cf': '{\\ding{108}}',
'\u25d0': '$\\Elzcirfl$',
'\u25d1': '$\\Elzcirfr$',
'\u25d2': '$\\Elzcirfb$',
'\u25d7': '{\\ding{119}}',
'\u25d8': '$\\Elzrvbull$',
'\u25e7': '$\\Elzsqfl$',
'\u25e8': '$\\Elzsqfr$',
'\u25ea': '$\\Elzsqfse$',
'\u2605': '{\\ding{72}}',
'\u2606': '{\\ding{73}}',
'\u260e': '{\\ding{37}}',
'\u261b': '{\\ding{42}}',
'\u261e': '{\\ding{43}}',
'\u263e': '{\\rightmoon}',
'\u263f': '{\\mercury}',
'\u2640': '{\\venus}',
'\u2642': '{\\male}',
'\u2643': '{\\jupiter}',
'\u2644': '{\\saturn}',
'\u2645': '{\\uranus}',
'\u2646': '{\\neptune}',
'\u2647': '{\\pluto}',
'\u2648': '{\\aries}',
'\u2649': '{\\taurus}',
'\u264a': '{\\gemini}',
'\u264b': '{\\cancer}',
'\u264c': '{\\leo}',
'\u264d': '{\\virgo}',
'\u264e': '{\\libra}',
'\u264f': '{\\scorpio}',
'\u2650': '{\\sagittarius}',
'\u2651': '{\\capricornus}',
'\u2652': '{\\aquarius}',
'\u2653': '{\\pisces}',
'\u2660': '{\\ding{171}}',
'\u2662': '$\\diamond$',
'\u2663': '{\\ding{168}}',
'\u2665': '{\\ding{170}}',
'\u2666': '{\\ding{169}}',
'\u2669': '{\\quarternote}',
'\u266d': '$\\flat$',
'\u266e': '$\\natural$',
'\u266f': '$\\sharp$',
'\u2701': '{\\ding{33}}',
'\u2702': '{\\ding{34}}',
'\u2703': '{\\ding{35}}',
'\u2704': '{\\ding{36}}',
'\u2706': '{\\ding{38}}',
'\u2707': '{\\ding{39}}',
'\u2708': '{\\ding{40}}',
'\u2709': '{\\ding{41}}',
'\u270c': '{\\ding{44}}',
'\u270d': '{\\ding{45}}',
'\u270e': '{\\ding{46}}',
'\u270f': '{\\ding{47}}',
'\u2710': '{\\ding{48}}',
'\u2711': '{\\ding{49}}',
'\u2712': '{\\ding{50}}',
'\u2713': '{\\ding{51}}',
'\u2714': '{\\ding{52}}',
'\u2715': '{\\ding{53}}',
'\u2716': '{\\ding{54}}',
'\u2717': '{\\ding{55}}',
'\u2718': '{\\ding{56}}',
'\u2719': '{\\ding{57}}',
'\u271a': '{\\ding{58}}',
'\u271b': '{\\ding{59}}',
'\u271c': '{\\ding{60}}',
'\u271d': '{\\ding{61}}',
'\u271e': '{\\ding{62}}',
'\u271f': '{\\ding{63}}',
'\u2720': '{\\ding{64}}',
'\u2721': '{\\ding{65}}',
'\u2722': '{\\ding{66}}',
'\u2723': '{\\ding{67}}',
'\u2724': '{\\ding{68}}',
'\u2725': '{\\ding{69}}',
'\u2726': '{\\ding{70}}',
'\u2727': '{\\ding{71}}',
'\u2729': '{\\ding{73}}',
'\u272a': '{\\ding{74}}',
'\u272b': '{\\ding{75}}',
'\u272c': '{\\ding{76}}',
'\u272d': '{\\ding{77}}',
'\u272e': '{\\ding{78}}',
'\u272f': '{\\ding{79}}',
'\u2730': '{\\ding{80}}',
'\u2731': '{\\ding{81}}',
'\u2732': '{\\ding{82}}',
'\u2733': '{\\ding{83}}',
'\u2734': '{\\ding{84}}',
'\u2735': '{\\ding{85}}',
'\u2736': '{\\ding{86}}',
'\u2737': '{\\ding{87}}',
'\u2738': '{\\ding{88}}',
'\u2739': '{\\ding{89}}',
'\u273a': '{\\ding{90}}',
'\u273b': '{\\ding{91}}',
'\u273c': '{\\ding{92}}',
'\u273d': '{\\ding{93}}',
'\u273e': '{\\ding{94}}',
'\u273f': '{\\ding{95}}',
'\u2740': '{\\ding{96}}',
'\u2741': '{\\ding{97}}',
'\u2742': '{\\ding{98}}',
'\u2743': '{\\ding{99}}',
'\u2744': '{\\ding{100}}',
'\u2745': '{\\ding{101}}',
'\u2746': '{\\ding{102}}',
'\u2747': '{\\ding{103}}',
'\u2748': '{\\ding{104}}',
'\u2749': '{\\ding{105}}',
'\u274a': '{\\ding{106}}',
'\u274b': '{\\ding{107}}',
'\u274d': '{\\ding{109}}',
'\u274f': '{\\ding{111}}',
'\u2750': '{\\ding{112}}',
'\u2751': '{\\ding{113}}',
'\u2752': '{\\ding{114}}',
'\u2756': '{\\ding{118}}',
'\u2758': '{\\ding{120}}',
'\u2759': '{\\ding{121}}',
'\u275a': '{\\ding{122}}',
'\u275b': '{\\ding{123}}',
'\u275c': '{\\ding{124}}',
'\u275d': '{\\ding{125}}',
'\u275e': '{\\ding{126}}',
'\u2761': '{\\ding{161}}',
'\u2762': '{\\ding{162}}',
'\u2763': '{\\ding{163}}',
'\u2764': '{\\ding{164}}',
'\u2765': '{\\ding{165}}',
'\u2766': '{\\ding{166}}',
'\u2767': '{\\ding{167}}',
'\u2776': '{\\ding{182}}',
'\u2777': '{\\ding{183}}',
'\u2778': '{\\ding{184}}',
'\u2779': '{\\ding{185}}',
'\u277a': '{\\ding{186}}',
'\u277b': '{\\ding{187}}',
'\u277c': '{\\ding{188}}',
'\u277d': '{\\ding{189}}',
'\u277e': '{\\ding{190}}',
'\u277f': '{\\ding{191}}',
'\u2780': '{\\ding{192}}',
'\u2781': '{\\ding{193}}',
'\u2782': '{\\ding{194}}',
'\u2783': '{\\ding{195}}',
'\u2784': '{\\ding{196}}',
'\u2785': '{\\ding{197}}',
'\u2786': '{\\ding{198}}',
'\u2787': '{\\ding{199}}',
'\u2788': '{\\ding{200}}',
'\u2789': '{\\ding{201}}',
'\u278a': '{\\ding{202}}',
'\u278b': '{\\ding{203}}',
'\u278c': '{\\ding{204}}',
'\u278d': '{\\ding{205}}',
'\u278e': '{\\ding{206}}',
'\u278f': '{\\ding{207}}',
'\u2790': '{\\ding{208}}',
'\u2791': '{\\ding{209}}',
'\u2792': '{\\ding{210}}',
'\u2793': '{\\ding{211}}',
'\u2794': '{\\ding{212}}',
'\u2798': '{\\ding{216}}',
'\u2799': '{\\ding{217}}',
'\u279a': '{\\ding{218}}',
'\u279b': '{\\ding{219}}',
'\u279c': '{\\ding{220}}',
'\u279d': '{\\ding{221}}',
'\u279e': '{\\ding{222}}',
'\u279f': '{\\ding{223}}',
'\u27a0': '{\\ding{224}}',
'\u27a1': '{\\ding{225}}',
'\u27a2': '{\\ding{226}}',
'\u27a3': '{\\ding{227}}',
'\u27a4': '{\\ding{228}}',
'\u27a5': '{\\ding{229}}',
'\u27a6': '{\\ding{230}}',
'\u27a7': '{\\ding{231}}',
'\u27a8': '{\\ding{232}}',
'\u27a9': '{\\ding{233}}',
'\u27aa': '{\\ding{234}}',
'\u27ab': '{\\ding{235}}',
'\u27ac': '{\\ding{236}}',
'\u27ad': '{\\ding{237}}',
'\u27ae': '{\\ding{238}}',
'\u27af': '{\\ding{239}}',
'\u27b1': '{\\ding{241}}',
'\u27b2': '{\\ding{242}}',
'\u27b3': '{\\ding{243}}',
'\u27b4': '{\\ding{244}}',
'\u27b5': '{\\ding{245}}',
'\u27b6': '{\\ding{246}}',
'\u27b7': '{\\ding{247}}',
'\u27b8': '{\\ding{248}}',
'\u27b9': '{\\ding{249}}',
'\u27ba': '{\\ding{250}}',
'\u27bb': '{\\ding{251}}',
'\u27bc': '{\\ding{252}}',
'\u27bd': '{\\ding{253}}',
'\u27be': '{\\ding{254}}',
'\u27f5': '$\\longleftarrow$',
'\u27f6': '$\\longrightarrow$',
'\u27f7': '$\\longleftrightarrow$',
'\u27f8': '$\\Longleftarrow$',
'\u27f9': '$\\Longrightarrow$',
'\u27fa': '$\\Longleftrightarrow$',
'\u27fc': '$\\longmapsto$',
'\u27ff': '$\\sim\\joinrel\\leadsto$',
'\u2905': '$\\ElsevierGlyph{E212}$',
'\u2912': '$\\UpArrowBar$',
'\u2913': '$\\DownArrowBar$',
'\u2923': '$\\ElsevierGlyph{E20C}$',
'\u2924': '$\\ElsevierGlyph{E20D}$',
'\u2925': '$\\ElsevierGlyph{E20B}$',
'\u2926': '$\\ElsevierGlyph{E20A}$',
'\u2927': '$\\ElsevierGlyph{E211}$',
'\u2928': '$\\ElsevierGlyph{E20E}$',
'\u2929': '$\\ElsevierGlyph{E20F}$',
'\u292a': '$\\ElsevierGlyph{E210}$',
'\u2933': '$\\ElsevierGlyph{E21C}$',
'\u2936': '$\\ElsevierGlyph{E21A}$',
'\u2937': '$\\ElsevierGlyph{E219}$',
'\u2940': '$\\Elolarr$',
'\u2941': '$\\Elorarr$',
'\u2942': '$\\ElzRlarr$',
'\u2944': '$\\ElzrLarr$',
'\u2947': '$\\Elzrarrx$',
'\u294e': '$\\LeftRightVector$',
'\u294f': '$\\RightUpDownVector$',
'\u2950': '$\\DownLeftRightVector$',
'\u2951': '$\\LeftUpDownVector$',
'\u2952': '$\\LeftVectorBar$',
'\u2953': '$\\RightVectorBar$',
'\u2954': '$\\RightUpVectorBar$',
'\u2955': '$\\RightDownVectorBar$',
'\u2956': '$\\DownLeftVectorBar$',
'\u2957': '$\\DownRightVectorBar$',
'\u2958': '$\\LeftUpVectorBar$',
'\u2959': '$\\LeftDownVectorBar$',
'\u295a': '$\\LeftTeeVector$',
'\u295b': '$\\RightTeeVector$',
'\u295c': '$\\RightUpTeeVector$',
'\u295d': '$\\RightDownTeeVector$',
'\u295e': '$\\DownLeftTeeVector$',
'\u295f': '$\\DownRightTeeVector$',
'\u2960': '$\\LeftUpTeeVector$',
'\u2961': '$\\LeftDownTeeVector$',
'\u296e': '$\\UpEquilibrium$',
'\u296f': '$\\ReverseUpEquilibrium$',
'\u2970': '$\\RoundImplies$',
'\u297c': '$\\ElsevierGlyph{E214}$',
'\u297d': '$\\ElsevierGlyph{E215}$',
'\u2980': '$\\Elztfnc$',
'\u2985': '$\\ElsevierGlyph{3018}$',
'\u2986': '$\\Elroang$',
'\u2993': '$<\\kern-0.58em($',
'\u2994': '$\\ElsevierGlyph{E291}$',
'\u2999': '$\\Elzddfnc$',
'\u299c': '$\\Angle$',
'\u29a0': '$\\Elzlpargt$',
'\u29b5': '$\\ElsevierGlyph{E260}$',
'\u29b6': '$\\ElsevierGlyph{E61B}$',
'\u29ca': '$\\ElzLap$',
'\u29cb': '$\\Elzdefas$',
'\u29cf': '$\\LeftTriangleBar$',
'\u29d0': '$\\RightTriangleBar$',
'\u29dc': '$\\ElsevierGlyph{E372}$',
'\u29eb': '$\\blacklozenge$',
'\u29f4': '$\\RuleDelayed$',
'\u2a04': '$\\Elxuplus$',
'\u2a05': '$\\ElzThr$',
'\u2a06': '$\\Elxsqcup$',
'\u2a07': '$\\ElzInf$',
'\u2a08': '$\\ElzSup$',
'\u2a0d': '$\\ElzCint$',
'\u2a0f': '$\\clockoint$',
'\u2a10': '$\\ElsevierGlyph{E395}$',
'\u2a16': '$\\sqrint$',
'\u2a25': '$\\ElsevierGlyph{E25A}$',
'\u2a2a': '$\\ElsevierGlyph{E25B}$',
'\u2a2d': '$\\ElsevierGlyph{E25C}$',
'\u2a2e': '$\\ElsevierGlyph{E25D}$',
'\u2a2f': '$\\ElzTimes$',
'\u2a34': '$\\ElsevierGlyph{E25E}$',
'\u2a35': '$\\ElsevierGlyph{E25E}$',
'\u2a3c': '$\\ElsevierGlyph{E259}$',
'\u2a3f': '$\\amalg$',
'\u2a53': '$\\ElzAnd$',
'\u2a54': '$\\ElzOr$',
'\u2a55': '$\\ElsevierGlyph{E36E}$',
'\u2a56': '$\\ElOr$',
'\u2a5e': '$\\perspcorrespond$',
'\u2a5f': '$\\Elzminhat$',
'\u2a63': '$\\ElsevierGlyph{225A}$',
'\u2a6e': '$\\stackrel{*}{=}$',
'\u2a75': '$\\Equal$',
'\u2a7d': '$\\leqslant$',
'\u2a7e': '$\\geqslant$',
'\u2a85': '$\\lessapprox$',
'\u2a86': '$\\gtrapprox$',
'\u2a87': '$\\lneq$',
'\u2a88': '$\\gneq$',
'\u2a89': '$\\lnapprox$',
'\u2a8a': '$\\gnapprox$',
'\u2a8b': '$\\lesseqqgtr$',
'\u2a8c': '$\\gtreqqless$',
'\u2a95': '$\\eqslantless$',
'\u2a96': '$\\eqslantgtr$',
'\u2a9d': '$\\Pisymbol{ppi020}{117}$',
'\u2a9e': '$\\Pisymbol{ppi020}{105}$',
'\u2aa1': '$\\NestedLessLess$',
'\u2aa2': '$\\NestedGreaterGreater$',
'\u2aaf': '$\\preceq$',
'\u2ab0': '$\\succeq$',
'\u2ab5': '$\\precneqq$',
'\u2ab6': '$\\succneqq$',
'\u2ab7': '$\\precapprox$',
'\u2ab8': '$\\succapprox$',
'\u2ab9': '$\\precnapprox$',
'\u2aba': '$\\succnapprox$',
'\u2ac5': '$\\subseteqq$',
'\u2ac6': '$\\supseteqq$',
'\u2acb': '$\\subsetneqq$',
'\u2acc': '$\\supsetneqq$',
'\u2aeb': '$\\ElsevierGlyph{E30D}$',
'\u2af6': '$\\Elztdcol$',
'\u2afd': '${{/}\\!\\!{/}}$',
'\u300a': '$\\ElsevierGlyph{300A}$',
'\u300b': '$\\ElsevierGlyph{300B}$',
'\u3018': '$\\ElsevierGlyph{3018}$',
'\u3019': '$\\ElsevierGlyph{3019}$',
'\u301a': '$\\openbracketleft$',
'\u301b': '$\\openbracketright$',
'\ufb00': '{ff}',
'\ufb01': '{fi}',
'\ufb02': '{fl}',
'\ufb03': '{ffi}',
'\ufb04': '{ffl}',
'\U0001d400': '$\\mathbf{A}$',
'\U0001d401': '$\\mathbf{B}$',
'\U0001d402': '$\\mathbf{C}$',
'\U0001d403': '$\\mathbf{D}$',
'\U0001d404': '$\\mathbf{E}$',
'\U0001d405': '$\\mathbf{F}$',
'\U0001d406': '$\\mathbf{G}$',
'\U0001d407': '$\\mathbf{H}$',
'\U0001d408': '$\\mathbf{I}$',
'\U0001d409': '$\\mathbf{J}$',
'\U0001d40a': '$\\mathbf{K}$',
'\U0001d40b': '$\\mathbf{L}$',
'\U0001d40c': '$\\mathbf{M}$',
'\U0001d40d': '$\\mathbf{N}$',
'\U0001d40e': '$\\mathbf{O}$',
'\U0001d40f': '$\\mathbf{P}$',
'\U0001d410': '$\\mathbf{Q}$',
'\U0001d411': '$\\mathbf{R}$',
'\U0001d412': '$\\mathbf{S}$',
'\U0001d413': '$\\mathbf{T}$',
'\U0001d414': '$\\mathbf{U}$',
'\U0001d415': '$\\mathbf{V}$',
'\U0001d416': '$\\mathbf{W}$',
'\U0001d417': '$\\mathbf{X}$',
'\U0001d418': '$\\mathbf{Y}$',
'\U0001d419': '$\\mathbf{Z}$',
'\U0001d41a': '$\\mathbf{a}$',
'\U0001d41b': '$\\mathbf{b}$',
'\U0001d41c': '$\\mathbf{c}$',
'\U0001d41d': '$\\mathbf{d}$',
'\U0001d41e': '$\\mathbf{e}$',
'\U0001d41f': '$\\mathbf{f}$',
'\U0001d420': '$\\mathbf{g}$',
'\U0001d421': '$\\mathbf{h}$',
'\U0001d422': '$\\mathbf{i}$',
'\U0001d423': '$\\mathbf{j}$',
'\U0001d424': '$\\mathbf{k}$',
'\U0001d425': '$\\mathbf{l}$',
'\U0001d426': '$\\mathbf{m}$',
'\U0001d427': '$\\mathbf{n}$',
'\U0001d428': '$\\mathbf{o}$',
'\U0001d429': '$\\mathbf{p}$',
'\U0001d42a': '$\\mathbf{q}$',
'\U0001d42b': '$\\mathbf{r}$',
'\U0001d42c': '$\\mathbf{s}$',
'\U0001d42d': '$\\mathbf{t}$',
'\U0001d42e': '$\\mathbf{u}$',
'\U0001d42f': '$\\mathbf{v}$',
'\U0001d430': '$\\mathbf{w}$',
'\U0001d431': '$\\mathbf{x}$',
'\U0001d432': '$\\mathbf{y}$',
'\U0001d433': '$\\mathbf{z}$',
'\U0001d434': '$\\mathsl{A}$',
'\U0001d435': '$\\mathsl{B}$',
'\U0001d436': '$\\mathsl{C}$',
'\U0001d437': '$\\mathsl{D}$',
'\U0001d438': '$\\mathsl{E}$',
'\U0001d439': '$\\mathsl{F}$',
'\U0001d43a': '$\\mathsl{G}$',
'\U0001d43b': '$\\mathsl{H}$',
'\U0001d43c': '$\\mathsl{I}$',
'\U0001d43d': '$\\mathsl{J}$',
'\U0001d43e': '$\\mathsl{K}$',
'\U0001d43f': '$\\mathsl{L}$',
'\U0001d440': '$\\mathsl{M}$',
'\U0001d441': '$\\mathsl{N}$',
'\U0001d442': '$\\mathsl{O}$',
'\U0001d443': '$\\mathsl{P}$',
'\U0001d444': '$\\mathsl{Q}$',
'\U0001d445': '$\\mathsl{R}$',
'\U0001d446': '$\\mathsl{S}$',
'\U0001d447': '$\\mathsl{T}$',
'\U0001d448': '$\\mathsl{U}$',
'\U0001d449': '$\\mathsl{V}$',
'\U0001d44a': '$\\mathsl{W}$',
'\U0001d44b': '$\\mathsl{X}$',
'\U0001d44c': '$\\mathsl{Y}$',
'\U0001d44d': '$\\mathsl{Z}$',
'\U0001d44e': '$\\mathsl{a}$',
'\U0001d44f': '$\\mathsl{b}$',
'\U0001d450': '$\\mathsl{c}$',
'\U0001d451': '$\\mathsl{d}$',
'\U0001d452': '$\\mathsl{e}$',
'\U0001d453': '$\\mathsl{f}$',
'\U0001d454': '$\\mathsl{g}$',
'\U0001d456': '$\\mathsl{i}$',
'\U0001d457': '$\\mathsl{j}$',
'\U0001d458': '$\\mathsl{k}$',
'\U0001d459': '$\\mathsl{l}$',
'\U0001d45a': '$\\mathsl{m}$',
'\U0001d45b': '$\\mathsl{n}$',
'\U0001d45c': '$\\mathsl{o}$',
'\U0001d45d': '$\\mathsl{p}$',
'\U0001d45e': '$\\mathsl{q}$',
'\U0001d45f': '$\\mathsl{r}$',
'\U0001d460': '$\\mathsl{s}$',
'\U0001d461': '$\\mathsl{t}$',
'\U0001d462': '$\\mathsl{u}$',
'\U0001d463': '$\\mathsl{v}$',
'\U0001d464': '$\\mathsl{w}$',
'\U0001d465': '$\\mathsl{x}$',
'\U0001d466': '$\\mathsl{y}$',
'\U0001d467': '$\\mathsl{z}$',
'\U0001d468': '$\\mathbit{A}$',
'\U0001d469': '$\\mathbit{B}$',
'\U0001d46a': '$\\mathbit{C}$',
'\U0001d46b': '$\\mathbit{D}$',
'\U0001d46c': '$\\mathbit{E}$',
'\U0001d46d': '$\\mathbit{F}$',
'\U0001d46e': '$\\mathbit{G}$',
'\U0001d46f': '$\\mathbit{H}$',
'\U0001d470': '$\\mathbit{I}$',
'\U0001d471': '$\\mathbit{J}$',
'\U0001d472': '$\\mathbit{K}$',
'\U0001d473': '$\\mathbit{L}$',
'\U0001d474': '$\\mathbit{M}$',
'\U0001d475': '$\\mathbit{N}$',
'\U0001d476': '$\\mathbit{O}$',
'\U0001d477': '$\\mathbit{P}$',
'\U0001d478': '$\\mathbit{Q}$',
'\U0001d479': '$\\mathbit{R}$',
'\U0001d47a': '$\\mathbit{S}$',
'\U0001d47b': '$\\mathbit{T}$',
'\U0001d47c': '$\\mathbit{U}$',
'\U0001d47d': '$\\mathbit{V}$',
'\U0001d47e': '$\\mathbit{W}$',
'\U0001d47f': '$\\mathbit{X}$',
'\U0001d480': '$\\mathbit{Y}$',
'\U0001d481': '$\\mathbit{Z}$',
'\U0001d482': '$\\mathbit{a}$',
'\U0001d483': '$\\mathbit{b}$',
'\U0001d484': '$\\mathbit{c}$',
'\U0001d485': '$\\mathbit{d}$',
'\U0001d486': '$\\mathbit{e}$',
'\U0001d487': '$\\mathbit{f}$',
'\U0001d488': '$\\mathbit{g}$',
'\U0001d489': '$\\mathbit{h}$',
'\U0001d48a': '$\\mathbit{i}$',
'\U0001d48b': '$\\mathbit{j}$',
'\U0001d48c': '$\\mathbit{k}$',
'\U0001d48d': '$\\mathbit{l}$',
'\U0001d48e': '$\\mathbit{m}$',
'\U0001d48f': '$\\mathbit{n}$',
'\U0001d490': '$\\mathbit{o}$',
'\U0001d491': '$\\mathbit{p}$',
'\U0001d492': '$\\mathbit{q}$',
'\U0001d493': '$\\mathbit{r}$',
'\U0001d494': '$\\mathbit{s}$',
'\U0001d495': '$\\mathbit{t}$',
'\U0001d496': '$\\mathbit{u}$',
'\U0001d497': '$\\mathbit{v}$',
'\U0001d498': '$\\mathbit{w}$',
'\U0001d499': '$\\mathbit{x}$',
'\U0001d49a': '$\\mathbit{y}$',
'\U0001d49b': '$\\mathbit{z}$',
'\U0001d49c': '$\\mathscr{A}$',
'\U0001d49e': '$\\mathscr{C}$',
'\U0001d49f': '$\\mathscr{D}$',
'\U0001d4a2': '$\\mathscr{G}$',
'\U0001d4a5': '$\\mathscr{J}$',
'\U0001d4a6': '$\\mathscr{K}$',
'\U0001d4a9': '$\\mathscr{N}$',
'\U0001d4aa': '$\\mathscr{O}$',
'\U0001d4ab': '$\\mathscr{P}$',
'\U0001d4ac': '$\\mathscr{Q}$',
'\U0001d4ae': '$\\mathscr{S}$',
'\U0001d4af': '$\\mathscr{T}$',
'\U0001d4b0': '$\\mathscr{U}$',
'\U0001d4b1': '$\\mathscr{V}$',
'\U0001d4b2': '$\\mathscr{W}$',
'\U0001d4b3': '$\\mathscr{X}$',
'\U0001d4b4': '$\\mathscr{Y}$',
'\U0001d4b5': '$\\mathscr{Z}$',
'\U0001d4b6': '$\\mathscr{a}$',
'\U0001d4b7': '$\\mathscr{b}$',
'\U0001d4b8': '$\\mathscr{c}$',
'\U0001d4b9': '$\\mathscr{d}$',
'\U0001d4bb': '$\\mathscr{f}$',
'\U0001d4bd': '$\\mathscr{h}$',
'\U0001d4be': '$\\mathscr{i}$',
'\U0001d4bf': '$\\mathscr{j}$',
'\U0001d4c0': '$\\mathscr{k}$',
'\U0001d4c1': '$\\mathscr{l}$',
'\U0001d4c2': '$\\mathscr{m}$',
'\U0001d4c3': '$\\mathscr{n}$',
'\U0001d4c5': '$\\mathscr{p}$',
'\U0001d4c6': '$\\mathscr{q}$',
'\U0001d4c7': '$\\mathscr{r}$',
'\U0001d4c8': '$\\mathscr{s}$',
'\U0001d4c9': '$\\mathscr{t}$',
'\U0001d4ca': '$\\mathscr{u}$',
'\U0001d4cb': '$\\mathscr{v}$',
'\U0001d4cc': '$\\mathscr{w}$',
'\U0001d4cd': '$\\mathscr{x}$',
'\U0001d4ce': '$\\mathscr{y}$',
'\U0001d4cf': '$\\mathscr{z}$',
'\U0001d4d0': '$\\mathmit{A}$',
'\U0001d4d1': '$\\mathmit{B}$',
'\U0001d4d2': '$\\mathmit{C}$',
'\U0001d4d3': '$\\mathmit{D}$',
'\U0001d4d4': '$\\mathmit{E}$',
'\U0001d4d5': '$\\mathmit{F}$',
'\U0001d4d6': '$\\mathmit{G}$',
'\U0001d4d7': '$\\mathmit{H}$',
'\U0001d4d8': '$\\mathmit{I}$',
'\U0001d4d9': '$\\mathmit{J}$',
'\U0001d4da': '$\\mathmit{K}$',
'\U0001d4db': '$\\mathmit{L}$',
'\U0001d4dc': '$\\mathmit{M}$',
'\U0001d4dd': '$\\mathmit{N}$',
'\U0001d4de': '$\\mathmit{O}$',
'\U0001d4df': '$\\mathmit{P}$',
'\U0001d4e0': '$\\mathmit{Q}$',
'\U0001d4e1': '$\\mathmit{R}$',
'\U0001d4e2': '$\\mathmit{S}$',
'\U0001d4e3': '$\\mathmit{T}$',
'\U0001d4e4': '$\\mathmit{U}$',
'\U0001d4e5': '$\\mathmit{V}$',
'\U0001d4e6': '$\\mathmit{W}$',
'\U0001d4e7': '$\\mathmit{X}$',
'\U0001d4e8': '$\\mathmit{Y}$',
'\U0001d4e9': '$\\mathmit{Z}$',
'\U0001d4ea': '$\\mathmit{a}$',
'\U0001d4eb': '$\\mathmit{b}$',
'\U0001d4ec': '$\\mathmit{c}$',
'\U0001d4ed': '$\\mathmit{d}$',
'\U0001d4ee': '$\\mathmit{e}$',
'\U0001d4ef': '$\\mathmit{f}$',
'\U0001d4f0': '$\\mathmit{g}$',
'\U0001d4f1': '$\\mathmit{h}$',
'\U0001d4f2': '$\\mathmit{i}$',
'\U0001d4f3': '$\\mathmit{j}$',
'\U0001d4f4': '$\\mathmit{k}$',
'\U0001d4f5': '$\\mathmit{l}$',
'\U0001d4f6': '$\\mathmit{m}$',
'\U0001d4f7': '$\\mathmit{n}$',
'\U0001d4f8': '$\\mathmit{o}$',
'\U0001d4f9': '$\\mathmit{p}$',
'\U0001d4fa': '$\\mathmit{q}$',
'\U0001d4fb': '$\\mathmit{r}$',
'\U0001d4fc': '$\\mathmit{s}$',
'\U0001d4fd': '$\\mathmit{t}$',
'\U0001d4fe': '$\\mathmit{u}$',
'\U0001d4ff': '$\\mathmit{v}$',
'\U0001d500': '$\\mathmit{w}$',
'\U0001d501': '$\\mathmit{x}$',
'\U0001d502': '$\\mathmit{y}$',
'\U0001d503': '$\\mathmit{z}$',
'\U0001d504': '$\\mathfrak{A}$',
'\U0001d505': '$\\mathfrak{B}$',
'\U0001d507': '$\\mathfrak{D}$',
'\U0001d508': '$\\mathfrak{E}$',
'\U0001d509': '$\\mathfrak{F}$',
'\U0001d50a': '$\\mathfrak{G}$',
'\U0001d50d': '$\\mathfrak{J}$',
'\U0001d50e': '$\\mathfrak{K}$',
'\U0001d50f': '$\\mathfrak{L}$',
'\U0001d510': '$\\mathfrak{M}$',
'\U0001d511': '$\\mathfrak{N}$',
'\U0001d512': '$\\mathfrak{O}$',
'\U0001d513': '$\\mathfrak{P}$',
'\U0001d514': '$\\mathfrak{Q}$',
'\U0001d516': '$\\mathfrak{S}$',
'\U0001d517': '$\\mathfrak{T}$',
'\U0001d518': '$\\mathfrak{U}$',
'\U0001d519': '$\\mathfrak{V}$',
'\U0001d51a': '$\\mathfrak{W}$',
'\U0001d51b': '$\\mathfrak{X}$',
'\U0001d51c': '$\\mathfrak{Y}$',
'\U0001d51e': '$\\mathfrak{a}$',
'\U0001d51f': '$\\mathfrak{b}$',
'\U0001d520': '$\\mathfrak{c}$',
'\U0001d521': '$\\mathfrak{d}$',
'\U0001d522': '$\\mathfrak{e}$',
'\U0001d523': '$\\mathfrak{f}$',
'\U0001d524': '$\\mathfrak{g}$',
'\U0001d525': '$\\mathfrak{h}$',
'\U0001d526': '$\\mathfrak{i}$',
'\U0001d527': '$\\mathfrak{j}$',
'\U0001d528': '$\\mathfrak{k}$',
'\U0001d529': '$\\mathfrak{l}$',
'\U0001d52a': '$\\mathfrak{m}$',
'\U0001d52b': '$\\mathfrak{n}$',
'\U0001d52c': '$\\mathfrak{o}$',
'\U0001d52d': '$\\mathfrak{p}$',
'\U0001d52e': '$\\mathfrak{q}$',
'\U0001d52f': '$\\mathfrak{r}$',
'\U0001d530': '$\\mathfrak{s}$',
'\U0001d531': '$\\mathfrak{t}$',
'\U0001d532': '$\\mathfrak{u}$',
'\U0001d533': '$\\mathfrak{v}$',
'\U0001d534': '$\\mathfrak{w}$',
'\U0001d535': '$\\mathfrak{x}$',
'\U0001d536': '$\\mathfrak{y}$',
'\U0001d537': '$\\mathfrak{z}$',
'\U0001d538': '$\\mathbb{A}$',
'\U0001d539': '$\\mathbb{B}$',
'\U0001d53b': '$\\mathbb{D}$',
'\U0001d53c': '$\\mathbb{E}$',
'\U0001d53d': '$\\mathbb{F}$',
'\U0001d53e': '$\\mathbb{G}$',
'\U0001d540': '$\\mathbb{I}$',
'\U0001d541': '$\\mathbb{J}$',
'\U0001d542': '$\\mathbb{K}$',
'\U0001d543': '$\\mathbb{L}$',
'\U0001d544': '$\\mathbb{M}$',
'\U0001d546': '$\\mathbb{O}$',
'\U0001d54a': '$\\mathbb{S}$',
'\U0001d54b': '$\\mathbb{T}$',
'\U0001d54c': '$\\mathbb{U}$',
'\U0001d54d': '$\\mathbb{V}$',
'\U0001d54e': '$\\mathbb{W}$',
'\U0001d54f': '$\\mathbb{X}$',
'\U0001d550': '$\\mathbb{Y}$',
'\U0001d552': '$\\mathbb{a}$',
'\U0001d553': '$\\mathbb{b}$',
'\U0001d554': '$\\mathbb{c}$',
'\U0001d555': '$\\mathbb{d}$',
'\U0001d556': '$\\mathbb{e}$',
'\U0001d557': '$\\mathbb{f}$',
'\U0001d558': '$\\mathbb{g}$',
'\U0001d559': '$\\mathbb{h}$',
'\U0001d55a': '$\\mathbb{i}$',
'\U0001d55b': '$\\mathbb{j}$',
'\U0001d55c': '$\\mathbb{k}$',
'\U0001d55d': '$\\mathbb{l}$',
'\U0001d55e': '$\\mathbb{m}$',
'\U0001d55f': '$\\mathbb{n}$',
'\U0001d560': '$\\mathbb{o}$',
'\U0001d561': '$\\mathbb{p}$',
'\U0001d562': '$\\mathbb{q}$',
'\U0001d563': '$\\mathbb{r}$',
'\U0001d564': '$\\mathbb{s}$',
'\U0001d565': '$\\mathbb{t}$',
'\U0001d566': '$\\mathbb{u}$',
'\U0001d567': '$\\mathbb{v}$',
'\U0001d568': '$\\mathbb{w}$',
'\U0001d569': '$\\mathbb{x}$',
'\U0001d56a': '$\\mathbb{y}$',
'\U0001d56b': '$\\mathbb{z}$',
'\U0001d56c': '$\\mathslbb{A}$',
'\U0001d56d': '$\\mathslbb{B}$',
'\U0001d56e': '$\\mathslbb{C}$',
'\U0001d56f': '$\\mathslbb{D}$',
'\U0001d570': '$\\mathslbb{E}$',
'\U0001d571': '$\\mathslbb{F}$',
'\U0001d572': '$\\mathslbb{G}$',
'\U0001d573': '$\\mathslbb{H}$',
'\U0001d574': '$\\mathslbb{I}$',
'\U0001d575': '$\\mathslbb{J}$',
'\U0001d576': '$\\mathslbb{K}$',
'\U0001d577': '$\\mathslbb{L}$',
'\U0001d578': '$\\mathslbb{M}$',
'\U0001d579': '$\\mathslbb{N}$',
'\U0001d57a': '$\\mathslbb{O}$',
'\U0001d57b': '$\\mathslbb{P}$',
'\U0001d57c': '$\\mathslbb{Q}$',
'\U0001d57d': '$\\mathslbb{R}$',
'\U0001d57e': '$\\mathslbb{S}$',
'\U0001d57f': '$\\mathslbb{T}$',
'\U0001d580': '$\\mathslbb{U}$',
'\U0001d581': '$\\mathslbb{V}$',
'\U0001d582': '$\\mathslbb{W}$',
'\U0001d583': '$\\mathslbb{X}$',
'\U0001d584': '$\\mathslbb{Y}$',
'\U0001d585': '$\\mathslbb{Z}$',
'\U0001d586': '$\\mathslbb{a}$',
'\U0001d587': '$\\mathslbb{b}$',
'\U0001d588': '$\\mathslbb{c}$',
'\U0001d589': '$\\mathslbb{d}$',
'\U0001d58a': '$\\mathslbb{e}$',
'\U0001d58b': '$\\mathslbb{f}$',
'\U0001d58c': '$\\mathslbb{g}$',
'\U0001d58d': '$\\mathslbb{h}$',
'\U0001d58e': '$\\mathslbb{i}$',
'\U0001d58f': '$\\mathslbb{j}$',
'\U0001d590': '$\\mathslbb{k}$',
'\U0001d591': '$\\mathslbb{l}$',
'\U0001d592': '$\\mathslbb{m}$',
'\U0001d593': '$\\mathslbb{n}$',
'\U0001d594': '$\\mathslbb{o}$',
'\U0001d595': '$\\mathslbb{p}$',
'\U0001d596': '$\\mathslbb{q}$',
'\U0001d597': '$\\mathslbb{r}$',
'\U0001d598': '$\\mathslbb{s}$',
'\U0001d599': '$\\mathslbb{t}$',
'\U0001d59a': '$\\mathslbb{u}$',
'\U0001d59b': '$\\mathslbb{v}$',
'\U0001d59c': '$\\mathslbb{w}$',
'\U0001d59d': '$\\mathslbb{x}$',
'\U0001d59e': '$\\mathslbb{y}$',
'\U0001d59f': '$\\mathslbb{z}$',
'\U0001d5a0': '$\\mathsf{A}$',
'\U0001d5a1': '$\\mathsf{B}$',
'\U0001d5a2': '$\\mathsf{C}$',
'\U0001d5a3': '$\\mathsf{D}$',
'\U0001d5a4': '$\\mathsf{E}$',
'\U0001d5a5': '$\\mathsf{F}$',
'\U0001d5a6': '$\\mathsf{G}$',
'\U0001d5a7': '$\\mathsf{H}$',
'\U0001d5a8': '$\\mathsf{I}$',
'\U0001d5a9': '$\\mathsf{J}$',
'\U0001d5aa': '$\\mathsf{K}$',
'\U0001d5ab': '$\\mathsf{L}$',
'\U0001d5ac': '$\\mathsf{M}$',
'\U0001d5ad': '$\\mathsf{N}$',
'\U0001d5ae': '$\\mathsf{O}$',
'\U0001d5af': '$\\mathsf{P}$',
'\U0001d5b0': '$\\mathsf{Q}$',
'\U0001d5b1': '$\\mathsf{R}$',
'\U0001d5b2': '$\\mathsf{S}$',
'\U0001d5b3': '$\\mathsf{T}$',
'\U0001d5b4': '$\\mathsf{U}$',
'\U0001d5b5': '$\\mathsf{V}$',
'\U0001d5b6': '$\\mathsf{W}$',
'\U0001d5b7': '$\\mathsf{X}$',
'\U0001d5b8': '$\\mathsf{Y}$',
'\U0001d5b9': '$\\mathsf{Z}$',
'\U0001d5ba': '$\\mathsf{a}$',
'\U0001d5bb': '$\\mathsf{b}$',
'\U0001d5bc': '$\\mathsf{c}$',
'\U0001d5bd': '$\\mathsf{d}$',
'\U0001d5be': '$\\mathsf{e}$',
'\U0001d5bf': '$\\mathsf{f}$',
'\U0001d5c0': '$\\mathsf{g}$',
'\U0001d5c1': '$\\mathsf{h}$',
'\U0001d5c2': '$\\mathsf{i}$',
'\U0001d5c3': '$\\mathsf{j}$',
'\U0001d5c4': '$\\mathsf{k}$',
'\U0001d5c5': '$\\mathsf{l}$',
'\U0001d5c6': '$\\mathsf{m}$',
'\U0001d5c7': '$\\mathsf{n}$',
'\U0001d5c8': '$\\mathsf{o}$',
'\U0001d5c9': '$\\mathsf{p}$',
'\U0001d5ca': '$\\mathsf{q}$',
'\U0001d5cb': '$\\mathsf{r}$',
'\U0001d5cc': '$\\mathsf{s}$',
'\U0001d5cd': '$\\mathsf{t}$',
'\U0001d5ce': '$\\mathsf{u}$',
'\U0001d5cf': '$\\mathsf{v}$',
'\U0001d5d0': '$\\mathsf{w}$',
'\U0001d5d1': '$\\mathsf{x}$',
'\U0001d5d2': '$\\mathsf{y}$',
'\U0001d5d3': '$\\mathsf{z}$',
'\U0001d5d4': '$\\mathsfbf{A}$',
'\U0001d5d5': '$\\mathsfbf{B}$',
'\U0001d5d6': '$\\mathsfbf{C}$',
'\U0001d5d7': '$\\mathsfbf{D}$',
'\U0001d5d8': '$\\mathsfbf{E}$',
'\U0001d5d9': '$\\mathsfbf{F}$',
'\U0001d5da': '$\\mathsfbf{G}$',
'\U0001d5db': '$\\mathsfbf{H}$',
'\U0001d5dc': '$\\mathsfbf{I}$',
'\U0001d5dd': '$\\mathsfbf{J}$',
'\U0001d5de': '$\\mathsfbf{K}$',
'\U0001d5df': '$\\mathsfbf{L}$',
'\U0001d5e0': '$\\mathsfbf{M}$',
'\U0001d5e1': '$\\mathsfbf{N}$',
'\U0001d5e2': '$\\mathsfbf{O}$',
'\U0001d5e3': '$\\mathsfbf{P}$',
'\U0001d5e4': '$\\mathsfbf{Q}$',
'\U0001d5e5': '$\\mathsfbf{R}$',
'\U0001d5e6': '$\\mathsfbf{S}$',
'\U0001d5e7': '$\\mathsfbf{T}$',
'\U0001d5e8': '$\\mathsfbf{U}$',
'\U0001d5e9': '$\\mathsfbf{V}$',
'\U0001d5ea': '$\\mathsfbf{W}$',
'\U0001d5eb': '$\\mathsfbf{X}$',
'\U0001d5ec': '$\\mathsfbf{Y}$',
'\U0001d5ed': '$\\mathsfbf{Z}$',
'\U0001d5ee': '$\\mathsfbf{a}$',
'\U0001d5ef': '$\\mathsfbf{b}$',
'\U0001d5f0': '$\\mathsfbf{c}$',
'\U0001d5f1': '$\\mathsfbf{d}$',
'\U0001d5f2': '$\\mathsfbf{e}$',
'\U0001d5f3': '$\\mathsfbf{f}$',
'\U0001d5f4': '$\\mathsfbf{g}$',
'\U0001d5f5': '$\\mathsfbf{h}$',
'\U0001d5f6': '$\\mathsfbf{i}$',
'\U0001d5f7': '$\\mathsfbf{j}$',
'\U0001d5f8': '$\\mathsfbf{k}$',
'\U0001d5f9': '$\\mathsfbf{l}$',
'\U0001d5fa': '$\\mathsfbf{m}$',
'\U0001d5fb': '$\\mathsfbf{n}$',
'\U0001d5fc': '$\\mathsfbf{o}$',
'\U0001d5fd': '$\\mathsfbf{p}$',
'\U0001d5fe': '$\\mathsfbf{q}$',
'\U0001d5ff': '$\\mathsfbf{r}$',
'\U0001d600': '$\\mathsfbf{s}$',
'\U0001d601': '$\\mathsfbf{t}$',
'\U0001d602': '$\\mathsfbf{u}$',
'\U0001d603': '$\\mathsfbf{v}$',
'\U0001d604': '$\\mathsfbf{w}$',
'\U0001d605': '$\\mathsfbf{x}$',
'\U0001d606': '$\\mathsfbf{y}$',
'\U0001d607': '$\\mathsfbf{z}$',
'\U0001d608': '$\\mathsfsl{A}$',
'\U0001d609': '$\\mathsfsl{B}$',
'\U0001d60a': '$\\mathsfsl{C}$',
'\U0001d60b': '$\\mathsfsl{D}$',
'\U0001d60c': '$\\mathsfsl{E}$',
'\U0001d60d': '$\\mathsfsl{F}$',
'\U0001d60e': '$\\mathsfsl{G}$',
'\U0001d60f': '$\\mathsfsl{H}$',
'\U0001d610': '$\\mathsfsl{I}$',
'\U0001d611': '$\\mathsfsl{J}$',
'\U0001d612': '$\\mathsfsl{K}$',
'\U0001d613': '$\\mathsfsl{L}$',
'\U0001d614': '$\\mathsfsl{M}$',
'\U0001d615': '$\\mathsfsl{N}$',
'\U0001d616': '$\\mathsfsl{O}$',
'\U0001d617': '$\\mathsfsl{P}$',
'\U0001d618': '$\\mathsfsl{Q}$',
'\U0001d619': '$\\mathsfsl{R}$',
'\U0001d61a': '$\\mathsfsl{S}$',
'\U0001d61b': '$\\mathsfsl{T}$',
'\U0001d61c': '$\\mathsfsl{U}$',
'\U0001d61d': '$\\mathsfsl{V}$',
'\U0001d61e': '$\\mathsfsl{W}$',
'\U0001d61f': '$\\mathsfsl{X}$',
'\U0001d620': '$\\mathsfsl{Y}$',
'\U0001d621': '$\\mathsfsl{Z}$',
'\U0001d622': '$\\mathsfsl{a}$',
'\U0001d623': '$\\mathsfsl{b}$',
'\U0001d624': '$\\mathsfsl{c}$',
'\U0001d625': '$\\mathsfsl{d}$',
'\U0001d626': '$\\mathsfsl{e}$',
'\U0001d627': '$\\mathsfsl{f}$',
'\U0001d628': '$\\mathsfsl{g}$',
'\U0001d629': '$\\mathsfsl{h}$',
'\U0001d62a': '$\\mathsfsl{i}$',
'\U0001d62b': '$\\mathsfsl{j}$',
'\U0001d62c': '$\\mathsfsl{k}$',
'\U0001d62d': '$\\mathsfsl{l}$',
'\U0001d62e': '$\\mathsfsl{m}$',
'\U0001d62f': '$\\mathsfsl{n}$',
'\U0001d630': '$\\mathsfsl{o}$',
'\U0001d631': '$\\mathsfsl{p}$',
'\U0001d632': '$\\mathsfsl{q}$',
'\U0001d633': '$\\mathsfsl{r}$',
'\U0001d634': '$\\mathsfsl{s}$',
'\U0001d635': '$\\mathsfsl{t}$',
'\U0001d636': '$\\mathsfsl{u}$',
'\U0001d637': '$\\mathsfsl{v}$',
'\U0001d638': '$\\mathsfsl{w}$',
'\U0001d639': '$\\mathsfsl{x}$',
'\U0001d63a': '$\\mathsfsl{y}$',
'\U0001d63b': '$\\mathsfsl{z}$',
'\U0001d63c': '$\\mathsfbfsl{A}$',
'\U0001d63d': '$\\mathsfbfsl{B}$',
'\U0001d63e': '$\\mathsfbfsl{C}$',
'\U0001d63f': '$\\mathsfbfsl{D}$',
'\U0001d640': '$\\mathsfbfsl{E}$',
'\U0001d641': '$\\mathsfbfsl{F}$',
'\U0001d642': '$\\mathsfbfsl{G}$',
'\U0001d643': '$\\mathsfbfsl{H}$',
'\U0001d644': '$\\mathsfbfsl{I}$',
'\U0001d645': '$\\mathsfbfsl{J}$',
'\U0001d646': '$\\mathsfbfsl{K}$',
'\U0001d647': '$\\mathsfbfsl{L}$',
'\U0001d648': '$\\mathsfbfsl{M}$',
'\U0001d649': '$\\mathsfbfsl{N}$',
'\U0001d64a': '$\\mathsfbfsl{O}$',
'\U0001d64b': '$\\mathsfbfsl{P}$',
'\U0001d64c': '$\\mathsfbfsl{Q}$',
'\U0001d64d': '$\\mathsfbfsl{R}$',
'\U0001d64e': '$\\mathsfbfsl{S}$',
'\U0001d64f': '$\\mathsfbfsl{T}$',
'\U0001d650': '$\\mathsfbfsl{U}$',
'\U0001d651': '$\\mathsfbfsl{V}$',
'\U0001d652': '$\\mathsfbfsl{W}$',
'\U0001d653': '$\\mathsfbfsl{X}$',
'\U0001d654': '$\\mathsfbfsl{Y}$',
'\U0001d655': '$\\mathsfbfsl{Z}$',
'\U0001d656': '$\\mathsfbfsl{a}$',
'\U0001d657': '$\\mathsfbfsl{b}$',
'\U0001d658': '$\\mathsfbfsl{c}$',
'\U0001d659': '$\\mathsfbfsl{d}$',
'\U0001d65a': '$\\mathsfbfsl{e}$',
'\U0001d65b': '$\\mathsfbfsl{f}$',
'\U0001d65c': '$\\mathsfbfsl{g}$',
'\U0001d65d': '$\\mathsfbfsl{h}$',
'\U0001d65e': '$\\mathsfbfsl{i}$',
'\U0001d65f': '$\\mathsfbfsl{j}$',
'\U0001d660': '$\\mathsfbfsl{k}$',
'\U0001d661': '$\\mathsfbfsl{l}$',
'\U0001d662': '$\\mathsfbfsl{m}$',
'\U0001d663': '$\\mathsfbfsl{n}$',
'\U0001d664': '$\\mathsfbfsl{o}$',
'\U0001d665': '$\\mathsfbfsl{p}$',
'\U0001d666': '$\\mathsfbfsl{q}$',
'\U0001d667': '$\\mathsfbfsl{r}$',
'\U0001d668': '$\\mathsfbfsl{s}$',
'\U0001d669': '$\\mathsfbfsl{t}$',
'\U0001d66a': '$\\mathsfbfsl{u}$',
'\U0001d66b': '$\\mathsfbfsl{v}$',
'\U0001d66c': '$\\mathsfbfsl{w}$',
'\U0001d66d': '$\\mathsfbfsl{x}$',
'\U0001d66e': '$\\mathsfbfsl{y}$',
'\U0001d66f': '$\\mathsfbfsl{z}$',
'\U0001d670': '$\\mathtt{A}$',
'\U0001d671': '$\\mathtt{B}$',
'\U0001d672': '$\\mathtt{C}$',
'\U0001d673': '$\\mathtt{D}$',
'\U0001d674': '$\\mathtt{E}$',
'\U0001d675': '$\\mathtt{F}$',
'\U0001d676': '$\\mathtt{G}$',
'\U0001d677': '$\\mathtt{H}$',
'\U0001d678': '$\\mathtt{I}$',
'\U0001d679': '$\\mathtt{J}$',
'\U0001d67a': '$\\mathtt{K}$',
'\U0001d67b': '$\\mathtt{L}$',
'\U0001d67c': '$\\mathtt{M}$',
'\U0001d67d': '$\\mathtt{N}$',
'\U0001d67e': '$\\mathtt{O}$',
'\U0001d67f': '$\\mathtt{P}$',
'\U0001d680': '$\\mathtt{Q}$',
'\U0001d681': '$\\mathtt{R}$',
'\U0001d682': '$\\mathtt{S}$',
'\U0001d683': '$\\mathtt{T}$',
'\U0001d684': '$\\mathtt{U}$',
'\U0001d685': '$\\mathtt{V}$',
'\U0001d686': '$\\mathtt{W}$',
'\U0001d687': '$\\mathtt{X}$',
'\U0001d688': '$\\mathtt{Y}$',
'\U0001d689': '$\\mathtt{Z}$',
'\U0001d68a': '$\\mathtt{a}$',
'\U0001d68b': '$\\mathtt{b}$',
'\U0001d68c': '$\\mathtt{c}$',
'\U0001d68d': '$\\mathtt{d}$',
'\U0001d68e': '$\\mathtt{e}$',
'\U0001d68f': '$\\mathtt{f}$',
'\U0001d690': '$\\mathtt{g}$',
'\U0001d691': '$\\mathtt{h}$',
'\U0001d692': '$\\mathtt{i}$',
'\U0001d693': '$\\mathtt{j}$',
'\U0001d694': '$\\mathtt{k}$',
'\U0001d695': '$\\mathtt{l}$',
'\U0001d696': '$\\mathtt{m}$',
'\U0001d697': '$\\mathtt{n}$',
'\U0001d698': '$\\mathtt{o}$',
'\U0001d699': '$\\mathtt{p}$',
'\U0001d69a': '$\\mathtt{q}$',
'\U0001d69b': '$\\mathtt{r}$',
'\U0001d69c': '$\\mathtt{s}$',
'\U0001d69d': '$\\mathtt{t}$',
'\U0001d69e': '$\\mathtt{u}$',
'\U0001d69f': '$\\mathtt{v}$',
'\U0001d6a0': '$\\mathtt{w}$',
'\U0001d6a1': '$\\mathtt{x}$',
'\U0001d6a2': '$\\mathtt{y}$',
'\U0001d6a3': '$\\mathtt{z}$',
'\U0001d6a8': '$\\mathbf{\\Alpha}$',
'\U0001d6a9': '$\\mathbf{\\Beta}$',
'\U0001d6aa': '$\\mathbf{\\Gamma}$',
'\U0001d6ab': '$\\mathbf{\\Delta}$',
'\U0001d6ac': '$\\mathbf{\\Epsilon}$',
'\U0001d6ad': '$\\mathbf{\\Zeta}$',
'\U0001d6ae': '$\\mathbf{\\Eta}$',
'\U0001d6af': '$\\mathbf{\\Theta}$',
'\U0001d6b0': '$\\mathbf{\\Iota}$',
'\U0001d6b1': '$\\mathbf{\\Kappa}$',
'\U0001d6b2': '$\\mathbf{\\Lambda}$',
'\U0001d6b3': '$M$',
'\U0001d6b4': '$N$',
'\U0001d6b5': '$\\mathbf{\\Xi}$',
'\U0001d6b6': '$O$',
'\U0001d6b7': '$\\mathbf{\\Pi}$',
'\U0001d6b8': '$\\mathbf{\\Rho}$',
'\U0001d6b9': '{\\mathbf{\\vartheta}}',
'\U0001d6ba': '$\\mathbf{\\Sigma}$',
'\U0001d6bb': '$\\mathbf{\\Tau}$',
'\U0001d6bc': '$\\mathbf{\\Upsilon}$',
'\U0001d6bd': '$\\mathbf{\\Phi}$',
'\U0001d6be': '$\\mathbf{\\Chi}$',
'\U0001d6bf': '$\\mathbf{\\Psi}$',
'\U0001d6c0': '$\\mathbf{\\Omega}$',
'\U0001d6c1': '$\\mathbf{\\nabla}$',
'\U0001d6c2': '$\\mathbf{\\Alpha}$',
'\U0001d6c3': '$\\mathbf{\\Beta}$',
'\U0001d6c4': '$\\mathbf{\\Gamma}$',
'\U0001d6c5': '$\\mathbf{\\Delta}$',
'\U0001d6c6': '$\\mathbf{\\Epsilon}$',
'\U0001d6c7': '$\\mathbf{\\Zeta}$',
'\U0001d6c8': '$\\mathbf{\\Eta}$',
'\U0001d6c9': '$\\mathbf{\\theta}$',
'\U0001d6ca': '$\\mathbf{\\Iota}$',
'\U0001d6cb': '$\\mathbf{\\Kappa}$',
'\U0001d6cc': '$\\mathbf{\\Lambda}$',
'\U0001d6cd': '$M$',
'\U0001d6ce': '$N$',
'\U0001d6cf': '$\\mathbf{\\Xi}$',
'\U0001d6d0': '$O$',
'\U0001d6d1': '$\\mathbf{\\Pi}$',
'\U0001d6d2': '$\\mathbf{\\Rho}$',
'\U0001d6d3': '$\\mathbf{\\varsigma}$',
'\U0001d6d4': '$\\mathbf{\\Sigma}$',
'\U0001d6d5': '$\\mathbf{\\Tau}$',
'\U0001d6d6': '$\\mathbf{\\Upsilon}$',
'\U0001d6d7': '$\\mathbf{\\Phi}$',
'\U0001d6d8': '$\\mathbf{\\Chi}$',
'\U0001d6d9': '$\\mathbf{\\Psi}$',
'\U0001d6da': '$\\mathbf{\\Omega}$',
'\U0001d6db': '$\\partial$',
'\U0001d6dc': '$\\in$',
'\U0001d6dd': '{\\mathbf{\\vartheta}}',
'\U0001d6de': '{\\mathbf{\\varkappa}}',
'\U0001d6df': '{\\mathbf{\\phi}}',
'\U0001d6e0': '{\\mathbf{\\varrho}}',
'\U0001d6e1': '{\\mathbf{\\varpi}}',
'\U0001d6e2': '$\\mathsl{\\Alpha}$',
'\U0001d6e3': '$\\mathsl{\\Beta}$',
'\U0001d6e4': '$\\mathsl{\\Gamma}$',
'\U0001d6e5': '$\\mathsl{\\Delta}$',
'\U0001d6e6': '$\\mathsl{\\Epsilon}$',
'\U0001d6e7': '$\\mathsl{\\Zeta}$',
'\U0001d6e8': '$\\mathsl{\\Eta}$',
'\U0001d6e9': '$\\mathsl{\\Theta}$',
'\U0001d6ea': '$\\mathsl{\\Iota}$',
'\U0001d6eb': '$\\mathsl{\\Kappa}$',
'\U0001d6ec': '$\\mathsl{\\Lambda}$',
'\U0001d6ed': '$M$',
'\U0001d6ee': '$N$',
'\U0001d6ef': '$\\mathsl{\\Xi}$',
'\U0001d6f0': '$O$',
'\U0001d6f1': '$\\mathsl{\\Pi}$',
'\U0001d6f2': '$\\mathsl{\\Rho}$',
'\U0001d6f3': '{\\mathsl{\\vartheta}}',
'\U0001d6f4': '$\\mathsl{\\Sigma}$',
'\U0001d6f5': '$\\mathsl{\\Tau}$',
'\U0001d6f6': '$\\mathsl{\\Upsilon}$',
'\U0001d6f7': '$\\mathsl{\\Phi}$',
'\U0001d6f8': '$\\mathsl{\\Chi}$',
'\U0001d6f9': '$\\mathsl{\\Psi}$',
'\U0001d6fa': '$\\mathsl{\\Omega}$',
'\U0001d6fb': '$\\mathsl{\\nabla}$',
'\U0001d6fc': '$\\mathsl{\\Alpha}$',
'\U0001d6fd': '$\\mathsl{\\Beta}$',
'\U0001d6fe': '$\\mathsl{\\Gamma}$',
'\U0001d6ff': '$\\mathsl{\\Delta}$',
'\U0001d700': '$\\mathsl{\\Epsilon}$',
'\U0001d701': '$\\mathsl{\\Zeta}$',
'\U0001d702': '$\\mathsl{\\Eta}$',
'\U0001d703': '$\\mathsl{\\Theta}$',
'\U0001d704': '$\\mathsl{\\Iota}$',
'\U0001d705': '$\\mathsl{\\Kappa}$',
'\U0001d706': '$\\mathsl{\\Lambda}$',
'\U0001d707': '$M$',
'\U0001d708': '$N$',
'\U0001d709': '$\\mathsl{\\Xi}$',
'\U0001d70a': '$O$',
'\U0001d70b': '$\\mathsl{\\Pi}$',
'\U0001d70c': '$\\mathsl{\\Rho}$',
'\U0001d70d': '$\\mathsl{\\varsigma}$',
'\U0001d70e': '$\\mathsl{\\Sigma}$',
'\U0001d70f': '$\\mathsl{\\Tau}$',
'\U0001d710': '$\\mathsl{\\Upsilon}$',
'\U0001d711': '$\\mathsl{\\Phi}$',
'\U0001d712': '$\\mathsl{\\Chi}$',
'\U0001d713': '$\\mathsl{\\Psi}$',
'\U0001d714': '$\\mathsl{\\Omega}$',
'\U0001d715': '$\\partial$',
'\U0001d716': '$\\in$',
'\U0001d717': '{\\mathsl{\\vartheta}}',
'\U0001d718': '{\\mathsl{\\varkappa}}',
'\U0001d719': '{\\mathsl{\\phi}}',
'\U0001d71a': '{\\mathsl{\\varrho}}',
'\U0001d71b': '{\\mathsl{\\varpi}}',
'\U0001d71c': '$\\mathbit{\\Alpha}$',
'\U0001d71d': '$\\mathbit{\\Beta}$',
'\U0001d71e': '$\\mathbit{\\Gamma}$',
'\U0001d71f': '$\\mathbit{\\Delta}$',
'\U0001d720': '$\\mathbit{\\Epsilon}$',
'\U0001d721': '$\\mathbit{\\Zeta}$',
'\U0001d722': '$\\mathbit{\\Eta}$',
'\U0001d723': '$\\mathbit{\\Theta}$',
'\U0001d724': '$\\mathbit{\\Iota}$',
'\U0001d725': '$\\mathbit{\\Kappa}$',
'\U0001d726': '$\\mathbit{\\Lambda}$',
'\U0001d727': '$M$',
'\U0001d728': '$N$',
'\U0001d729': '$\\mathbit{\\Xi}$',
'\U0001d72a': '$O$',
'\U0001d72b': '$\\mathbit{\\Pi}$',
'\U0001d72c': '$\\mathbit{\\Rho}$',
'\U0001d72d': '{\\mathbit{O}}',
'\U0001d72e': '$\\mathbit{\\Sigma}$',
'\U0001d72f': '$\\mathbit{\\Tau}$',
'\U0001d730': '$\\mathbit{\\Upsilon}$',
'\U0001d731': '$\\mathbit{\\Phi}$',
'\U0001d732': '$\\mathbit{\\Chi}$',
'\U0001d733': '$\\mathbit{\\Psi}$',
'\U0001d734': '$\\mathbit{\\Omega}$',
'\U0001d735': '$\\mathbit{\\nabla}$',
'\U0001d736': '$\\mathbit{\\Alpha}$',
'\U0001d737': '$\\mathbit{\\Beta}$',
'\U0001d738': '$\\mathbit{\\Gamma}$',
'\U0001d739': '$\\mathbit{\\Delta}$',
'\U0001d73a': '$\\mathbit{\\Epsilon}$',
'\U0001d73b': '$\\mathbit{\\Zeta}$',
'\U0001d73c': '$\\mathbit{\\Eta}$',
'\U0001d73d': '$\\mathbit{\\Theta}$',
'\U0001d73e': '$\\mathbit{\\Iota}$',
'\U0001d73f': '$\\mathbit{\\Kappa}$',
'\U0001d740': '$\\mathbit{\\Lambda}$',
'\U0001d741': '$M$',
'\U0001d742': '$N$',
'\U0001d743': '$\\mathbit{\\Xi}$',
'\U0001d744': '$O$',
'\U0001d745': '$\\mathbit{\\Pi}$',
'\U0001d746': '$\\mathbit{\\Rho}$',
'\U0001d747': '$\\mathbit{\\varsigma}$',
'\U0001d748': '$\\mathbit{\\Sigma}$',
'\U0001d749': '$\\mathbit{\\Tau}$',
'\U0001d74a': '$\\mathbit{\\Upsilon}$',
'\U0001d74b': '$\\mathbit{\\Phi}$',
'\U0001d74c': '$\\mathbit{\\Chi}$',
'\U0001d74d': '$\\mathbit{\\Psi}$',
'\U0001d74e': '$\\mathbit{\\Omega}$',
'\U0001d74f': '$\\partial$',
'\U0001d750': '$\\in$',
'\U0001d751': '{\\mathbit{\\vartheta}}',
'\U0001d752': '{\\mathbit{\\varkappa}}',
'\U0001d753': '{\\mathbit{\\phi}}',
'\U0001d754': '{\\mathbit{\\varrho}}',
'\U0001d755': '{\\mathbit{\\varpi}}',
'\U0001d756': '$\\mathsfbf{\\Alpha}$',
'\U0001d757': '$\\mathsfbf{\\Beta}$',
'\U0001d758': '$\\mathsfbf{\\Gamma}$',
'\U0001d759': '$\\mathsfbf{\\Delta}$',
'\U0001d75a': '$\\mathsfbf{\\Epsilon}$',
'\U0001d75b': '$\\mathsfbf{\\Zeta}$',
'\U0001d75c': '$\\mathsfbf{\\Eta}$',
'\U0001d75d': '$\\mathsfbf{\\Theta}$',
'\U0001d75e': '$\\mathsfbf{\\Iota}$',
'\U0001d75f': '$\\mathsfbf{\\Kappa}$',
'\U0001d760': '$\\mathsfbf{\\Lambda}$',
'\U0001d761': '$M$',
'\U0001d762': '$N$',
'\U0001d763': '$\\mathsfbf{\\Xi}$',
'\U0001d764': '$O$',
'\U0001d765': '$\\mathsfbf{\\Pi}$',
'\U0001d766': '$\\mathsfbf{\\Rho}$',
'\U0001d767': '{\\mathsfbf{\\vartheta}}',
'\U0001d768': '$\\mathsfbf{\\Sigma}$',
'\U0001d769': '$\\mathsfbf{\\Tau}$',
'\U0001d76a': '$\\mathsfbf{\\Upsilon}$',
'\U0001d76b': '$\\mathsfbf{\\Phi}$',
'\U0001d76c': '$\\mathsfbf{\\Chi}$',
'\U0001d76d': '$\\mathsfbf{\\Psi}$',
'\U0001d76e': '$\\mathsfbf{\\Omega}$',
'\U0001d76f': '$\\mathsfbf{\\nabla}$',
'\U0001d770': '$\\mathsfbf{\\Alpha}$',
'\U0001d771': '$\\mathsfbf{\\Beta}$',
'\U0001d772': '$\\mathsfbf{\\Gamma}$',
'\U0001d773': '$\\mathsfbf{\\Delta}$',
'\U0001d774': '$\\mathsfbf{\\Epsilon}$',
'\U0001d775': '$\\mathsfbf{\\Zeta}$',
'\U0001d776': '$\\mathsfbf{\\Eta}$',
'\U0001d777': '$\\mathsfbf{\\Theta}$',
'\U0001d778': '$\\mathsfbf{\\Iota}$',
'\U0001d779': '$\\mathsfbf{\\Kappa}$',
'\U0001d77a': '$\\mathsfbf{\\Lambda}$',
'\U0001d77b': '$M$',
'\U0001d77c': '$N$',
'\U0001d77d': '$\\mathsfbf{\\Xi}$',
'\U0001d77e': '$O$',
'\U0001d77f': '$\\mathsfbf{\\Pi}$',
'\U0001d780': '$\\mathsfbf{\\Rho}$',
'\U0001d781': '$\\mathsfbf{\\varsigma}$',
'\U0001d782': '$\\mathsfbf{\\Sigma}$',
'\U0001d783': '$\\mathsfbf{\\Tau}$',
'\U0001d784': '$\\mathsfbf{\\Upsilon}$',
'\U0001d785': '$\\mathsfbf{\\Phi}$',
'\U0001d786': '$\\mathsfbf{\\Chi}$',
'\U0001d787': '$\\mathsfbf{\\Psi}$',
'\U0001d788': '$\\mathsfbf{\\Omega}$',
'\U0001d789': '$\\partial$',
'\U0001d78a': '$\\in$',
'\U0001d78b': '{\\mathsfbf{\\vartheta}}',
'\U0001d78c': '{\\mathsfbf{\\varkappa}}',
'\U0001d78d': '{\\mathsfbf{\\phi}}',
'\U0001d78e': '{\\mathsfbf{\\varrho}}',
'\U0001d78f': '{\\mathsfbf{\\varpi}}',
'\U0001d790': '$\\mathsfbfsl{\\Alpha}$',
'\U0001d791': '$\\mathsfbfsl{\\Beta}$',
'\U0001d792': '$\\mathsfbfsl{\\Gamma}$',
'\U0001d793': '$\\mathsfbfsl{\\Delta}$',
'\U0001d794': '$\\mathsfbfsl{\\Epsilon}$',
'\U0001d795': '$\\mathsfbfsl{\\Zeta}$',
'\U0001d796': '$\\mathsfbfsl{\\Eta}$',
'\U0001d797': '$\\mathsfbfsl{\\vartheta}$',
'\U0001d798': '$\\mathsfbfsl{\\Iota}$',
'\U0001d799': '$\\mathsfbfsl{\\Kappa}$',
'\U0001d79a': '$\\mathsfbfsl{\\Lambda}$',
'\U0001d79b': '$M$',
'\U0001d79c': '$N$',
'\U0001d79d': '$\\mathsfbfsl{\\Xi}$',
'\U0001d79e': '$O$',
'\U0001d79f': '$\\mathsfbfsl{\\Pi}$',
'\U0001d7a0': '$\\mathsfbfsl{\\Rho}$',
'\U0001d7a1': '{\\mathsfbfsl{\\vartheta}}',
'\U0001d7a2': '$\\mathsfbfsl{\\Sigma}$',
'\U0001d7a3': '$\\mathsfbfsl{\\Tau}$',
'\U0001d7a4': '$\\mathsfbfsl{\\Upsilon}$',
'\U0001d7a5': '$\\mathsfbfsl{\\Phi}$',
'\U0001d7a6': '$\\mathsfbfsl{\\Chi}$',
'\U0001d7a7': '$\\mathsfbfsl{\\Psi}$',
'\U0001d7a8': '$\\mathsfbfsl{\\Omega}$',
'\U0001d7a9': '$\\mathsfbfsl{\\nabla}$',
'\U0001d7aa': '$\\mathsfbfsl{\\Alpha}$',
'\U0001d7ab': '$\\mathsfbfsl{\\Beta}$',
'\U0001d7ac': '$\\mathsfbfsl{\\Gamma}$',
'\U0001d7ad': '$\\mathsfbfsl{\\Delta}$',
'\U0001d7ae': '$\\mathsfbfsl{\\Epsilon}$',
'\U0001d7af': '$\\mathsfbfsl{\\Zeta}$',
'\U0001d7b0': '$\\mathsfbfsl{\\Eta}$',
'\U0001d7b1': '$\\mathsfbfsl{\\vartheta}$',
'\U0001d7b2': '$\\mathsfbfsl{\\Iota}$',
'\U0001d7b3': '$\\mathsfbfsl{\\Kappa}$',
'\U0001d7b4': '$\\mathsfbfsl{\\Lambda}$',
'\U0001d7b5': '$M$',
'\U0001d7b6': '$N$',
'\U0001d7b7': '$\\mathsfbfsl{\\Xi}$',
'\U0001d7b8': '$O$',
'\U0001d7b9': '$\\mathsfbfsl{\\Pi}$',
'\U0001d7ba': '$\\mathsfbfsl{\\Rho}$',
'\U0001d7bb': '$\\mathsfbfsl{\\varsigma}$',
'\U0001d7bc': '$\\mathsfbfsl{\\Sigma}$',
'\U0001d7bd': '$\\mathsfbfsl{\\Tau}$',
'\U0001d7be': '$\\mathsfbfsl{\\Upsilon}$',
'\U0001d7bf': '$\\mathsfbfsl{\\Phi}$',
'\U0001d7c0': '$\\mathsfbfsl{\\Chi}$',
'\U0001d7c1': '$\\mathsfbfsl{\\Psi}$',
'\U0001d7c2': '$\\mathsfbfsl{\\Omega}$',
'\U0001d7c3': '$\\partial$',
'\U0001d7c4': '$\\in$',
'\U0001d7c5': '{\\mathsfbfsl{\\vartheta}}',
'\U0001d7c6': '{\\mathsfbfsl{\\varkappa}}',
'\U0001d7c7': '{\\mathsfbfsl{\\phi}}',
'\U0001d7c8': '{\\mathsfbfsl{\\varrho}}',
'\U0001d7c9': '{\\mathsfbfsl{\\varpi}}',
'\U0001d7ce': '$\\mathbf{0}$',
'\U0001d7cf': '$\\mathbf{1}$',
'\U0001d7d0': '$\\mathbf{2}$',
'\U0001d7d1': '$\\mathbf{3}$',
'\U0001d7d2': '$\\mathbf{4}$',
'\U0001d7d3': '$\\mathbf{5}$',
'\U0001d7d4': '$\\mathbf{6}$',
'\U0001d7d5': '$\\mathbf{7}$',
'\U0001d7d6': '$\\mathbf{8}$',
'\U0001d7d7': '$\\mathbf{9}$',
'\U0001d7d8': '$\\mathbb{0}$',
'\U0001d7d9': '$\\mathbb{1}$',
'\U0001d7da': '$\\mathbb{2}$',
'\U0001d7db': '$\\mathbb{3}$',
'\U0001d7dc': '$\\mathbb{4}$',
'\U0001d7dd': '$\\mathbb{5}$',
'\U0001d7de': '$\\mathbb{6}$',
'\U0001d7df': '$\\mathbb{7}$',
'\U0001d7e0': '$\\mathbb{8}$',
'\U0001d7e1': '$\\mathbb{9}$',
'\U0001d7e2': '$\\mathsf{0}$',
'\U0001d7e3': '$\\mathsf{1}$',
'\U0001d7e4': '$\\mathsf{2}$',
'\U0001d7e5': '$\\mathsf{3}$',
'\U0001d7e6': '$\\mathsf{4}$',
'\U0001d7e7': '$\\mathsf{5}$',
'\U0001d7e8': '$\\mathsf{6}$',
'\U0001d7e9': '$\\mathsf{7}$',
'\U0001d7ea': '$\\mathsf{8}$',
'\U0001d7eb': '$\\mathsf{9}$',
'\U0001d7ec': '$\\mathsfbf{0}$',
'\U0001d7ed': '$\\mathsfbf{1}$',
'\U0001d7ee': '$\\mathsfbf{2}$',
'\U0001d7ef': '$\\mathsfbf{3}$',
'\U0001d7f0': '$\\mathsfbf{4}$',
'\U0001d7f1': '$\\mathsfbf{5}$',
'\U0001d7f2': '$\\mathsfbf{6}$',
'\U0001d7f3': '$\\mathsfbf{7}$',
'\U0001d7f4': '$\\mathsfbf{8}$',
'\U0001d7f5': '$\\mathsfbf{9}$',
'\U0001d7f6': '$\\mathtt{0}$',
'\U0001d7f7': '$\\mathtt{1}$',
'\U0001d7f8': '$\\mathtt{2}$',
'\U0001d7f9': '$\\mathtt{3}$',
'\U0001d7fa': '$\\mathtt{4}$',
'\U0001d7fb': '$\\mathtt{5}$',
'\U0001d7fc': '$\\mathtt{6}$',
'\U0001d7fd': '$\\mathtt{7}$',
'\U0001d7fe': '$\\mathtt{8}$',
'\U0001d7ff': '$\\mathtt{9}$',
# Items from simple list
'\u0106': "{\\a\\'C}",
'\u0408': '{\\CYRJE}',
'\u2191': '{\\textuparrow}',
'\u0493': '{\\cyrghcrs}',
'\u2116': '{\\textnumero}',
'\u0418': '{\\CYRI}',
'\u04a3': '{\\cyrndsc}',
'\u2126': '{\\textohm}',
'\u0428': '{\\CYRSH}',
'\u04b3': '{\\cyrhdsc}',
'\u0438': '{\\cyri}',
'\u03bd': '{$\\nu$}',
'\u04c3': '{\\CYRKHK}',
'\u0448': '{\\cyrsh}',
'\xcb': '{\\"E}',
'\u0458': '{\\cyrje}',
'\xdb': '{\\^U}',
'\xeb': '{\\"e}',
'\xfb': '{\\^u}',
'\u0413': '{\\CYRG}',
'\u0498': '{\\CYRZDSC}',
'\xa0': '{�}',
'\u0423': '{\\CYRU}',
'\u04a8': '{\\CYRABHHA}',
'\u0433': '{\\cyrg}',
'\u04b8': '{\\CYRCHVCRS}',
'\u203b': '{\\textreferencemark}',
'\u211e': '{\\textrecipe}',
'\xc0': '{\\`A}',
'\u0443': '{\\cyru}',
'\u04c8': '{\\cyrnhk}',
'\u0151': '{\\H o}',
'\u04d8': '{\\CYRSCHWA}',
'\u0161': '{\\v s}',
'\xe0': '{\\`a}',
'\u0463': '{\\cyryat}',
'\u04e8': '{\\CYROTLD}',
'\u0171': '{\\H u}',
'\u0473': '{\\cyrfita}',
'\u20ab': '{\\textdong}',
'\u2103': '{\\textcelsius}',
'\u040e': '{\\CYRUSHRT}',
'\u2212': '{\\textminus}',
'\u2016': '{\\textbardbl}',
'\u0499': '{\\cyrzdsc}',
'\u041e': '{\\CYRO}',
'\u2120': '{\\textservicemark}',
'\u03a7': '{$\\chi$}',
'\u2026': '{\\textellipsis}',
'\u04a9': '{\\cyrabhha}',
'\u042e': '{\\CYRYU}',
'\xb1': '{\\textpm}',
'\u0130': '{\\.I}',
'\u04b9': '{\\cyrchvcrs}',
'\u043e': '{\\cyro}',
'\xc1': "{\\'A}",
'\u044e': '{\\cyryu}',
'\xd1': '{\\�N}',
'\u0150': '{\\H O}',
'\u04d9': '{\\cyrschwa}',
'\u02dd': '{\\textacutedbl}',
'\u045e': '{\\cyrushrt}',
'\xe1': "{\\'a}",
'\u0160': '{\\v S}',
'\u04e9': '{\\cyrotld}',
'\u266a': '{\\textmusicalnote}',
'\xf1': '{\\�n}',
'\u0170': '{\\H U}',
'\u0409': '{\\CYRLJE}',
'\u048e': '{\\CYRRTICK}',
'\u2190': '{\\leftarrow}',
'\u0419': '{\\CYRISHRT}',
'\u011b': '{\\v e}',
'\u049e': '{\\CYRKHCRS}',
'\u0429': '{\\CYRSHCH}',
'\u04ae': '{\\CYRY}',
'\u0439': '{\\cyrishrt}',
'\u04be': '{\\CYRABHCHDSC}',
'\u0449': '{\\cyrshch}',
'\u04ce': '{\\cyrmdsc}',
'\xd6': '{\\"O}',
'\u0459': '{\\cyrlje}',
'\u015b': "{\\'s}",
'\u20a6': '{\\textnaira}',
'\xf6': '{\\"o}',
'\u017b': '{\\.Z}',
'\u0102': '{\\u A}',
'\u0404': '{\\CYRIE}',
'\u200c': '{\\textcompwordmark}',
'\u048f': '{\\cyrrtick}',
'\u0414': '{\\CYRD}',
'\u049f': '{\\cyrkhcrs}',
'\u0424': '{\\CYRF}',
'\u04af': '{\\cyry}',
'\u0434': '{\\cyrd}',
'\xb7': '{\\textperiodcentered}',
'\u04bf': '{\\cyrabhchdsc}',
'\u0444': '{\\cyrf}',
'\xc7': '{\\c C}',
'\u0454': '{\\cyrie}',
'\u0162': '{\\c T}',
'\xe7': '{\\c c}',
'\u0474': '{\\CYRIZH}',
'\xf7': '{\\textdiv}',
'\u010d': '{\\v c}',
'\u040f': '{\\CYRDZHE}',
'\u0192': '{\\textflorin}',
'\u0494': '{\\CYRGHK}',
'\u041f': '{\\CYRP}',
'\u04a4': '{\\CYRNG}',
'\xac': '{\\textlnot}',
'\u042f': '{\\CYRYA}',
'\u04b4': '{\\CYRTETSE}',
'\u013d': '{\\v L}',
'\u043f': '{\\cyrp}',
'\u04c4': '{\\cyrkhk}',
'\xcc': '{\\`I}',
'\u044f': '{\\cyrya}',
'\u2422': '{\\textblank}',
'\u04d4': '{\\CYRAE}',
'\xdc': '{\\"U}',
'\u045f': '{\\cyrdzhe}',
'\xec': '{\\`i}',
'\u017d': '{\\v Z}',
'\xfc': '{\\"u}',
'\u040a': '{\\CYRNJE}',
'\u010c': '{\\v C}',
'\u0495': '{\\cyrghk}',
'\u041a': '{\\CYRK}',
'\u04a5': '{\\cyrng}',
'\u042a': '{\\CYRHRDSN}',
'\u2032': '{$\\prime$}',
'\u04b5': '{\\cyrtetse}',
'\u043a': '{\\cyrk}',
'\u04c5': '{\\CYRLDSC}',
'\u044a': '{\\cyrhrdsn}',
'\xcd': "{\\'I}",
'\u2052': '{\\textdiscount}',
'\u04d5': '{\\cyrae}',
'\u045a': '{\\cyrnje}',
'\xdd': "{\\'Y}",
'\u046a': '{\\CYRBYUS}',
'\xed': "{\\'\\i}",
'\u25ef': '{\\textbigcircle}',
'\xfd': "{\\'y}",
'\u017c': '{\\.z}',
'\u0405': '{\\CYRDZE}',
'\u0107': "{\\'c}",
'\u0415': '{\\CYRE}',
'\u2117': '{\\textcircledP}',
'\u049a': '{\\CYRKDSC}',
'\u0425': '{\\CYRH}',
'\u2127': '{\\textmho}',
'\u04aa': '{\\CYRSDSC}',
'\xb2': '{\\texttwosuperior}',
'\u0435': '{\\cyre}',
'\u04ba': '{\\CYRSHHA}',
'\u203d': '{\\textinterrobang}',
'\xc2': '{\\^A}',
'\u0445': '{\\cyrh}',
'\u0147': '{\\v N}',
'\u02c6': '{\\textasciicircum}',
'\xd2': '{\\`O}',
'\u0455': '{\\cyrdze}',
'\u2261': '{$\\equiv$}',
'\xe2': '{\\^a}',
'\xf2': '{\\`o}',
'\u0475': '{\\cyrizh}',
'\u010e': '{\\v D}',
'\u0410': '{\\CYRA}',
'\u2018': '{\\textquoteleft}',
'\u049b': '{\\cyrkdsc}',
'\u011e': '{\\u G}',
'\u0420': '{\\CYRR}',
'\u04ab': '{\\cyrsdsc}',
'\u232a': '{\\textrangle}',
'\u212e': '{\\textestimated}',
'\u0430': '{\\cyra}',
'\xb3': '{\\textthreesuperior}',
'\u04bb': '{\\cyrshha}',
'\u013e': '{\\v l}',
'\u0440': '{\\cyrr}',
'\xc3': '{\\�A}',
'\u04cb': '{\\CYRCHLDSC}',
'\xd3': "{\\'O}",
'\u015e': '{\\c S}',
'\xe3': '{\\�a}',
'\u016e': '{\\r U}',
'\xf3': "{\\'o}",
'\u017e': '{\\v z}',
'\u040b': '{\\CYRTSHE}',
'\u0490': '{\\CYRGUP}',
'\u0119': '{\\k e}',
'\u041b': '{\\CYRL}',
'\u04a0': '{\\CYRKBEAK}',
'\u042b': '{\\CYRERY}',
'\u04b0': '{\\CYRYHCRS}',
'\u0e37': '{\\textbaht}',
'\u0139': "{\\'L}",
'\xb8': '{\\c\\ }',
'\u043b': '{\\cyrl}',
'\u04c0': '{\\CYRpalochka}',
'\xc8': '{\\`E}',
'\u044b': '{\\cyrery}',
'\u0159': '{\\v r}',
'\u045b': '{\\cyrtshe}',
'\u04e0': '{\\CYRABHDZE}',
'\u25e6': '{\\textopenbullet}',
'\xe8': '{\\`e}',
'\u046b': '{\\cyrbyus}',
'\u0179': "{\\'Z}",
'\u0406': '{\\CYRII}',
'\u0491': '{\\cyrgup}',
'\u2193': '{\\textdownarrow}',
'\u2192': '{\\textrightarrow}',
'\u0416': '{\\CYRZH}',
'\u0118': '{\\k E}',
'\u201e': '{\\quotedblbase}',
'\u04a1': '{\\cyrkbeak}',
'\u0426': '{\\CYRC}',
'\u04b1': '{\\cyryhcrs}',
'\u0436': '{\\cyrzh}',
'\xb9': '{\\textonesuperior}',
'\u0446': '{\\cyrc}',
'\xc9': "{\\'E}",
'\u0148': '{\\v n}',
'\u204e': '{\\textasteriskcentered}',
'\u0456': '{\\cyrii}',
'\xd9': '{\\`U}',
'\u0158': '{\\v R}',
'\u04e1': '{\\cyrabhdze}',
'\xe9': "{\\'e}",
'\xf9': '{\\`u}',
'\u0178': '{\\"Y}',
'\u0401': '{\\CYRYO}',
'\u0103': '{\\u a}',
'\u0411': '{\\CYRB}',
'\u0496': '{\\CYRZHDSC}',
'\u2019': '{\\textquoteright}',
'\u0421': '{\\CYRS}',
'\u04a6': '{\\CYRPHK}',
'\u0431': '{\\cyrb}',
'\u04b6': '{\\CYRCHRDSC}',
'\u0441': '{\\cyrs}',
'\u0143': "{\\'N}",
'\u04c6': '{\\cyrldsc}',
'\u02ca': '{\\textasciitilde}',
'\xce': '{\\^I}',
'\u0451': '{\\cyryo}',
'\u0163': '{\\c t}',
'\xee': '{\\^\\i}',
'\u0497': '{\\cyrzhdsc}',
'\u011a': '{\\v E}',
'\u041c': '{\\CYRM}',
'\u04a7': '{\\cyrphk}',
'\u042c': '{\\CYRSFTSN}',
'\u04b7': '{\\cyrchrdsc}',
'\u013a': "{\\'l}",
'\u043c': '{\\cyrm}',
'\u2044': '{\\textfractionsolidus}',
'\u04c7': '{\\CYRNHK}',
'\u044c': '{\\cyrsftsn}',
'\xcf': '{\\"I}',
'\u015a': "{\\'S}",
'\xef': '{\\"\\i}',
'\u017a': "{\\'z}",
'\xff': '{\\"y}',
'\u0105': '{\\k a}',
'\u0407': '{\\CYRYI}',
'\u048c': '{\\CYRSEMISFTSN}',
'\u0417': '{\\CYRZ}',
'\u049c': '{\\CYRKVCRS}',
'\u0427': '{\\CYRCH}',
'\u2329': '{\\textlangle}',
'\u04ac': '{\\CYRTDSC}',
'\u0437': '{\\cyrz}',
'\u04bc': '{\\CYRABHCH}',
'\u20a1': '{\\textcolonmonetary}',
'\xc4': '{\\"A}',
'\u0447': '{\\cyrch}',
'\u04cc': '{\\cyrchldsc}',
'\u0155': "{\\'r}",
'\xd4': '{\\^O}',
'\u0457': '{\\cyryi}',
'\u0165': '{\\v t}',
'\xe4': '{\\"a}',
'\u20a9': '{\\textwon}',
'\xf4': '{\\^o}',
'\u0402': '{\\CYRDJE}',
'\u0104': '{\\k A}',
'\u048d': '{\\cyrsemisftsn}',
'\u0412': '{\\CYRV}',
'\u201a': '{\\quotesinglbase}',
'\u049d': '{\\cyrkvcrs}',
'\u20b1': '{\\textpeso}',
'\u0422': '{\\CYRT}',
'\u04ad': '{\\cyrtdsc}',
'\u0432': '{\\cyrv}',
'\xb5': '{\\textmu}',
'\u04bd': '{\\cyrabhch}',
'\u0442': '{\\cyrt}',
'\xc5': '{\\r A}',
'\u0144': "{\\'n}",
'\u04cd': '{\\CYRMDSC}',
'\u0452': '{\\cyrdje}',
'\xd5': '{\\�O}',
'\u0154': "{\\'R}",
'\u0462': '{\\CYRYAT}',
'\xe5': '{\\r a}',
'\u0164': '{\\v T}',
'\u0472': '{\\CYRFITA}',
'\xf5': '{\\�o}',
'\u010f': '{\\v d}',
'\u0492': '{\\CYRGHCRS}',
'\u041d': '{\\CYRN}',
'\u011f': '{\\u g}',
'\u04a2': '{\\CYRNDSC}',
'\u042d': '{\\CYREREV}',
'\u04b2': '{\\CYRHDSC}',
'\u043d': '{\\cyrn}',
'\xca': '{\\^E}',
'\u044d': '{\\cyrerev}',
'\xda': "{\\'U}",
'\u015f': '{\\c s}',
'\u20a4': '{\\textlira}',
'\xea': '{\\^e}',
'\u016f': '{\\r u}',
'\xfa': "{\\'u}"
# Items to add at a latter date (check first)
# '\u0000': r'{$\alpha$}',
# '\u0000': r'{$\beta$}',
# '\u0000': r'{$\gamma$}',
# '\u0000': r'{$\delta$}',
# '\u0000': r'{$\epsilon$}',
# '\u0000': r'{$\varepsilon$}',
# '\u0000': r'{$\zeta$}',
# '\u0000': r'{$\eta$}',
# '\u0000': r'{$\theta$}',
# '\u0000': r'{$\vartheta$}',
# '\u0000': r'{$\iota$}',
# '\u0000': r'{$\kappa$}',
# '\u0000': r'{$\lambda$}',
# '\u0000': r'{$\mu$}',
# '\u0000': r'{$\xi$}',
# '\u0000': r'{$\pi$}',
# '\u0000': r'{$\varpi$}',
# '\u0000': r'{$\rho$}',
# '\u0000': r'{$\varrho$}',
# '\u0000': r'{$\sigma$}',
# '\u0000': r'{$\varsigma$}',
# '\u0000': r'{$\tau$}',
# '\u0000': r'{$\upsilon$}',
# '\u0000': r'{$\phi$}',
# '\u0000': r'{$\varphi$}',
# '\u0000': r'{$\psi$}',
# '\u0000': r'{$\omega$}',
# '\u0000': r'{$\Gamma$}',
# '\u0000': r'{$\Delta$}',
# '\u0000': r'{$\Theta$}',
# '\u0000': r'{$\Lambda$}',
# '\u0000': r'{$\Xi$}',
# '\u0000': r'{$\Pi$}',
# '\u0000': r'{$\Sigma$}',
# '\u0000': r'{$\Upsilon$}',
# '\u0000': r'{$\Phi$}',
# '\u0000': r'{$\Psi$}',
# '\u0000': r'{$\Omega$}',
}
# }}}
entity_mapping = {
'—':'{---}',
'–':'{--}',
'"':'{"}',
}
class BibTeX:
def __init__(self):
self.rep_utf8 = MReplace(utf8enc2latex_mapping)
self.rep_ent = MReplace(entity_mapping)
# Set default conversion to ASCII BibTeX
self.ascii_bibtex = True
# This substitution is based on the description of cite key restrictions at
# http://bibdesk.sourceforge.net/manual/BibDesk%20Help_2.html
self.invalid_cit = re.compile('[ "@\',\\#}{�%&$^]')
self.upper = re.compile('[' +
string.ascii_uppercase + ']')
self.escape = re.compile('[#&%_]')
def ValidateCitationKey(self, text):
"""
Removes characters not allowed in BibTeX keys
"""
return self.invalid_cit.sub('', text)
def braceUppercase(self, text):
"""
Convert uppercase letters to bibtex encoded uppercase
"""
return self.upper.sub(lambda m: '{%s}' % m.group(), text)
def resolveEntities(self, text):
return self.rep_ent.mreplace(text)
def resolveUnicode(self, text):
# UTF-8 text as entry
text = self.rep_utf8.mreplace(text)
return text.replace('$}{$', '')
def escapeSpecialCharacters(self, text):
"""
Latex escaping some (not all) special characters
"""
text = text.replace('\\', '\\\\')
text = text.replace('�', '{\\char`\\�}') # TILDE
return self.escape.sub(lambda m: '\\%s' % m.group(), text)
# Calibre functions: Option to go to official ASCII Bibtex or unofficial UTF-8
def utf8ToBibtex(self, text):
"""
Go from an unicode entry to ASCII Bibtex format without encoding
"""
if len(text) == 0:
return ''
text = self.resolveEntities(text)
text = self.escapeSpecialCharacters(text)
if self.ascii_bibtex :
text = self.resolveUnicode(text)
return text
def bibtex_author_format(self, item):
"""
Format authors for Bibtex compliance (get a list as input)
"""
return self.utf8ToBibtex(' and '.join([author for author in item]))
def stripUnmatchedSyntax(self, text, open_character, close_character):
"""
Strips unmatched BibTeX syntax
"""
stack = []
assert len(open_character) == 1 and len(close_character) == 1
remove = []
for i, ch in enumerate(text):
if ch == open_character:
stack.append(i)
elif ch == close_character:
try:
stack.pop()
except IndexError:
# Remove unmatched closing char
remove.append(i)
# Remove unmatched opening chars
remove.extend(stack)
if remove:
text = list(text)
for i in sorted(remove, reverse=True):
text.pop(i)
text = ''.join(text)
return text
| 88,714 | Python | .py | 2,475 | 30.146667 | 2,497 | 0.476981 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,177 | smtp.py | kovidgoyal_calibre/src/calibre/utils/smtp.py | __license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
This module implements a simple commandline SMTP client that supports:
* Delivery via an SMTP relay with SSL or TLS
* Background delivery with failures being saved in a maildir mailbox
'''
import encodings.idna as idna
import os
import socket
import sys
import traceback
from calibre import isbytestring
from calibre.constants import iswindows
from calibre.utils.localization import _
from polyglot.builtins import as_unicode, native_string_type
def decode_fqdn(fqdn):
if isinstance(fqdn, bytes):
enc = 'mbcs' if iswindows else 'utf-8'
try:
fqdn = fqdn.decode(enc)
except Exception:
fqdn = ''
return fqdn
def sanitize_hostname(hostname):
return hostname.replace('..', '_')
def safe_localhost():
# RFC 2821 says we should use the fqdn in the EHLO/HELO verb, and
# if that can't be calculated, that we should use a domain literal
# instead (essentially an encoded IP address like [A.B.C.D]).
try:
fqdn = decode_fqdn(socket.getfqdn())
except UnicodeDecodeError:
if not iswindows:
raise
from calibre_extensions.winutil import get_computer_name
fqdn = get_computer_name()
if '.' in fqdn and fqdn != '.':
# Some mail servers have problems with non-ascii local hostnames, see
# https://bugs.launchpad.net/bugs/1256549
try:
local_hostname = as_unicode(idna.ToASCII(fqdn))
except Exception:
local_hostname = 'localhost.localdomain'
else:
# We can't find an fqdn hostname, so use a domain literal
addr = '127.0.0.1'
try:
addr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
pass
local_hostname = '[%s]' % addr
return local_hostname
def get_msgid_domain(from_):
from email.utils import parseaddr
try:
# Parse out the address from the From line, and then the domain from that
from_email = parseaddr(from_)[1]
msgid_domain = from_email.partition('@')[2].strip()
# This can sometimes sneak through parseaddr if the input is malformed
msgid_domain = msgid_domain.rstrip('>').strip()
except Exception:
msgid_domain = ''
return msgid_domain or safe_localhost()
def create_mail(from_, to, subject, text=None, attachment_data=None,
attachment_type=None, attachment_name=None):
assert text or attachment_data
import uuid
from email.message import EmailMessage
from email.utils import formatdate
outer = EmailMessage()
outer['From'] = from_
outer['To'] = to
outer['Subject'] = subject
outer['Date'] = formatdate(localtime=True)
outer['Message-Id'] = f"<{uuid.uuid4()}@{get_msgid_domain(from_)}>"
outer.preamble = 'You will not see this in a MIME-aware mail reader.\n'
if text is not None:
if isbytestring(text):
text = text.decode('utf-8', 'replace')
outer.set_content(text)
if attachment_data is not None:
assert attachment_data and attachment_name
try:
maintype, subtype = attachment_type.split('/', 1)
except Exception:
maintype, subtype = 'application', 'octet-stream'
if isinstance(attachment_data, str):
attachment_data = attachment_data.encode('utf-8')
outer.add_attachment(attachment_data, maintype=maintype, subtype=subtype, filename=attachment_name)
return outer
def get_mx(host, verbose=0):
import dns.resolver
if verbose:
print('Find mail exchanger for', host)
answers = list(dns.resolver.query(host, 'MX'))
answers.sort(key=lambda x: int(getattr(x, 'preference', sys.maxsize)))
return [str(x.exchange) for x in answers if hasattr(x, 'exchange')]
def sendmail_direct(from_, to, msg, timeout, localhost, verbose,
debug_output=None):
from email.message import Message
import polyglot.smtplib as smtplib
hosts = get_mx(to.split('@')[-1].strip(), verbose)
timeout=None # Non blocking sockets sometimes don't work
kwargs = dict(timeout=timeout, local_hostname=sanitize_hostname(localhost or safe_localhost()))
if debug_output is not None:
kwargs['debug_to'] = debug_output
s = smtplib.SMTP(**kwargs)
s.set_debuglevel(verbose)
if not hosts:
raise ValueError('No mail server found for address: %s'%to)
last_error = last_traceback = None
for host in hosts:
try:
s.connect(host, 25)
if isinstance(msg, Message):
s.send_message(msg, from_, [to])
else:
s.sendmail(from_, [to], msg)
return s.quit()
except Exception as e:
last_error, last_traceback = e, traceback.format_exc()
if last_error is not None:
print(last_traceback)
raise OSError('Failed to send mail: '+repr(last_error))
def get_smtp_class(use_ssl=False, debuglevel=0):
# We need this as in python 3.7 we have to pass the hostname
# in the constructor, because of https://bugs.python.org/issue36094
# which means the constructor calls connect(),
# but there is no way to set debuglevel before connect() is called
import polyglot.smtplib as smtplib
cls = smtplib.SMTP_SSL if use_ssl else smtplib.SMTP
bases = (cls,)
return type(native_string_type('SMTP'), bases, {native_string_type('debuglevel'): debuglevel})
def sendmail(msg, from_, to, localhost=None, verbose=0, timeout=None,
relay=None, username=None, password=None, encryption='TLS',
port=-1, debug_output=None, verify_server_cert=False, cafile=None):
from email.message import Message
if relay is None:
for x in to:
return sendmail_direct(from_, x, msg, timeout, localhost, verbose)
timeout = None # Non-blocking sockets sometimes don't work
port = int(port)
if port < 0:
port = 25 if encryption != 'SSL' else 465
kwargs = dict(host=relay, port=port, timeout=timeout, local_hostname=sanitize_hostname(localhost or safe_localhost()))
if debug_output is not None:
kwargs['debug_to'] = debug_output
cls = get_smtp_class(use_ssl=encryption == 'SSL', debuglevel=verbose)
s = cls(**kwargs)
if encryption == 'TLS':
context = None
if verify_server_cert:
import ssl
context = ssl.create_default_context(cafile=cafile)
s.starttls(context=context)
s.ehlo()
if username is not None and password is not None:
s.login(username, password)
ret = None
try:
if isinstance(msg, Message):
s.send_message(msg, from_, to)
else:
s.sendmail(from_, to, msg)
finally:
try:
ret = s.quit()
except:
pass # Ignore so as to not hide original error
return ret
def option_parser():
try:
from calibre.utils.config import OptionParser
OptionParser
except ImportError:
from optparse import OptionParser
parser = OptionParser(_('''\
%prog [options] [from to text]
Send mail using the SMTP protocol. %prog has two modes of operation. In the
compose mode you specify from to and text and these are used to build and
send an email message. In the filter mode, %prog reads a complete email
message from STDIN and sends it.
text is the body of the email message.
If text is not specified, a complete email message is read from STDIN.
from is the email address of the sender and to is the email address
of the recipient. When a complete email is read from STDIN, from and to
are only used in the SMTP negotiation, the message headers are not modified.
'''))
c=parser.add_option_group('COMPOSE MAIL',
_('Options to compose an email. Ignored if text is not specified')).add_option
c('-a', '--attachment', help=_('File to attach to the email'))
c('-s', '--subject', help=_('Subject of the email'))
parser.add_option('-l', '--localhost',
help=_('Host name of localhost. Used when connecting '
'to SMTP server.'))
r=parser.add_option_group('SMTP RELAY',
_('Options to use an SMTP relay server to send mail. '
'calibre will try to send the email directly unless --relay is '
'specified.')).add_option
r('-r', '--relay', help=_('An SMTP relay server to use to send mail.'))
r('-p', '--port', default=-1,
help=_('Port to connect to on relay server. Default is to use 465 if '
'encryption method is SSL and 25 otherwise.'))
r('-u', '--username', help=_('Username for relay'))
r('-p', '--password', help=_('Password for relay'))
r('-e', '--encryption-method', default='TLS',
choices=['TLS', 'SSL', 'NONE'],
help=_('Encryption method to use when connecting to relay. Choices are '
'TLS, SSL and NONE. Default is TLS. WARNING: Choosing NONE is highly insecure'))
r('--dont-verify-server-certificate', help=_(
'Do not verify the server certificate when connecting using TLS. This used'
' to be the default behavior in calibre versions before 3.27. If you are using'
' a relay with a self-signed or otherwise invalid certificate, you can use this option to restore'
' the pre 3.27 behavior'))
r('--cafile', help=_(
'Path to a file of concatenated CA certificates in PEM format, used to verify the'
' server certificate when using TLS. By default, the system CA certificates are used.'))
parser.add_option('-o', '--outbox', help=_('Path to maildir folder to store '
'failed email messages in.'))
parser.add_option('-f', '--fork', default=False, action='store_true',
help=_('Fork and deliver message in background. '
'If you use this option, you should also use --outbox '
'to handle delivery failures.'))
parser.add_option('-t', '--timeout', help=_('Timeout for connection'))
parser.add_option('-v', '--verbose', default=0, action='count',
help=_('Be more verbose'))
return parser
def extract_email_address(raw):
from email.utils import parseaddr
return parseaddr(raw)[-1]
def compose_mail(from_, to, text, subject=None, attachment=None,
attachment_name=None):
attachment_type = attachment_data = None
if attachment is not None:
try:
from calibre import guess_type
guess_type
except ImportError:
from mimetypes import guess_type
attachment_data = attachment.read() if hasattr(attachment, 'read') \
else open(attachment, 'rb').read()
attachment_type = guess_type(getattr(attachment, 'name', attachment))[0]
if attachment_name is None:
attachment_name = os.path.basename(getattr(attachment,
'name', attachment))
subject = subject if subject else 'no subject'
return create_mail(from_, to, subject, text=text,
attachment_data=attachment_data, attachment_type=attachment_type,
attachment_name=attachment_name)
def main(args=sys.argv):
parser = option_parser()
opts, args = parser.parse_args(args)
if len(args) > 1:
if len(args) < 4:
print('You must specify the from address, to address and body text'
' on the command line')
return 1
msg = compose_mail(args[1], args[2], args[3], subject=opts.subject,
attachment=opts.attachment)
from_, to = args[1:3]
eto = [extract_email_address(x.strip()) for x in to.split(',')]
efrom = extract_email_address(from_)
else:
from email import message_from_bytes
from email.utils import getaddresses
msg = message_from_bytes(sys.stdin.buffer.read())
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', []) + msg.get_all('bcc', [])
eto = [x[1] for x in getaddresses(tos + ccs) if x[1]]
if not eto:
raise ValueError('Email from STDIN does not specify any recipients')
efrom = getaddresses(msg.get_all('from', []))
if not efrom:
raise ValueError('Email from STDIN does not specify a sender')
efrom = efrom[0][1]
outbox = None
if opts.outbox is not None:
outbox = os.path.abspath(os.path.expanduser(opts.outbox))
from mailbox import Maildir
outbox = Maildir(opts.outbox, factory=None)
if opts.fork:
if os.fork() != 0:
return 0
try:
sendmail(msg, efrom, eto, localhost=opts.localhost, verbose=opts.verbose,
timeout=opts.timeout, relay=opts.relay, username=opts.username,
password=opts.password, port=opts.port,
encryption=opts.encryption_method, verify_server_cert=not opts.dont_verify_server_certificate, cafile=opts.cafile)
except:
if outbox is not None:
outbox.add(msg)
outbox.close()
print('Delivery failed. Message saved to', opts.outbox)
raise
return 0
def config(defaults=None):
from calibre.utils.config import Config, StringConfig
desc = _('Control email delivery')
c = Config('smtp',desc) if defaults is None else StringConfig(defaults,desc)
c.add_opt('from_')
c.add_opt('accounts', default={})
c.add_opt('subjects', default={})
c.add_opt('aliases', default={})
c.add_opt('tags', default={})
c.add_opt('relay_host')
c.add_opt('relay_port', default=25)
c.add_opt('relay_username')
c.add_opt('relay_password')
c.add_opt('encryption', default='TLS', choices=['TLS', 'SSL'])
return c
if __name__ == '__main__':
sys.exit(main())
| 13,932 | Python | .py | 314 | 36.719745 | 127 | 0.645252 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,178 | serialize.py | kovidgoyal_calibre/src/calibre/utils/serialize.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
MSGPACK_MIME = 'application/x-msgpack'
CANARY = 'jPoAv3zOyHvQ5JFNYg4hJ9'
def encoded(typ, data, ExtType):
if ExtType is None:
return {CANARY: typ, 'v': data}
return ExtType(typ, msgpack_dumps(data))
def create_encoder(for_json=False):
from datetime import datetime
ExtType = None
if not for_json:
import msgpack
ExtType = msgpack.ExtType
def encoder(obj):
if isinstance(obj, datetime):
return encoded(0, str(obj.isoformat()), ExtType)
if isinstance(obj, (set, frozenset)):
return encoded(1, tuple(obj), ExtType)
if getattr(obj, '__calibre_serializable__', False):
from calibre.db.categories import Tag
from calibre.ebooks.metadata.book.base import Metadata
from calibre.library.field_metadata import FieldMetadata, fm_as_dict
if isinstance(obj, Metadata):
from calibre.ebooks.metadata.book.serialize import metadata_as_dict
return encoded(
2, metadata_as_dict(obj, encode_cover_data=for_json), ExtType
)
elif isinstance(obj, FieldMetadata):
return encoded(3, fm_as_dict(obj), ExtType)
elif isinstance(obj, Tag):
return encoded(4, obj.as_dict(), ExtType)
if for_json and isinstance(obj, bytes):
return obj.decode('utf-8')
raise TypeError(f'Cannot serialize objects of type {type(obj)}')
return encoder
def msgpack_dumps(obj):
import msgpack
return msgpack.packb(obj, default=create_encoder(), use_bin_type=True)
def json_dumps(data, **kw):
import json
kw['default'] = create_encoder(for_json=True)
kw['ensure_ascii'] = False
ans = json.dumps(data, **kw)
if not isinstance(ans, bytes):
ans = ans.encode('utf-8')
return ans
def decode_metadata(x, for_json):
from calibre.ebooks.metadata.book.serialize import metadata_from_dict
from polyglot.binary import from_base64_bytes
obj = metadata_from_dict(x)
if for_json and obj.cover_data and obj.cover_data[1]:
obj.cover_data = obj.cover_data[0], from_base64_bytes(obj.cover_data[1])
return obj
def decode_field_metadata(x, for_json):
from calibre.library.field_metadata import fm_from_dict
return fm_from_dict(x)
def decode_category_tag(x, for_json):
from calibre.db.categories import Tag
return Tag.from_dict(x)
def decode_datetime(x, fj):
from calibre.utils.iso8601 import parse_iso8601
return parse_iso8601(x, assume_utc=True)
decoders = (
decode_datetime,
lambda x, fj: set(x),
decode_metadata, decode_field_metadata, decode_category_tag
)
def json_decoder(obj):
typ = obj.get(CANARY)
if typ is None:
return obj
return decoders[typ](obj['v'], True)
def msgpack_decoder(code, data):
return decoders[code](msgpack_loads(data), False)
def msgpack_loads(dump, use_list=True):
# use_list controls whether msgpack arrays are unpacked as lists or tuples
import msgpack
return msgpack.unpackb(dump, ext_hook=msgpack_decoder, raw=False, use_list=use_list, strict_map_key=False)
def json_loads(data):
import json
return json.loads(data, object_hook=json_decoder)
def pickle_dumps(data):
import pickle
return pickle.dumps(data, -1)
def pickle_loads(dump):
import pickle
return pickle.loads(dump, encoding='utf-8')
| 3,542 | Python | .py | 88 | 33.590909 | 110 | 0.682164 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,179 | seven_zip.py | kovidgoyal_calibre/src/calibre/utils/seven_zip.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2021, Kovid Goyal <kovid at kovidgoyal.net>
import os
import re
from calibre.constants import iswindows
def open_archive(path_or_stream, mode='r'):
from py7zr import SevenZipFile
return SevenZipFile(path_or_stream, mode=mode)
def names(path_or_stream):
with open_archive(path_or_stream) as zf:
return tuple(zf.getnames())
def extract_member(path_or_stream, match=None, name=None):
if iswindows and name is not None:
name = name.replace(os.sep, '/')
if match is None:
match = re.compile(r'\.(jpg|jpeg|gif|png)\s*$', re.I)
def is_match(fname):
if iswindows:
fname = fname.replace(os.sep, '/')
return (name is not None and fname == name) or \
(match is not None and match.search(fname) is not None)
with open_archive(path_or_stream) as ar:
all_names = list(filter(is_match, ar.getnames()))
if all_names:
return all_names[0] , ar.read(all_names[:1])[all_names[0]].read()
def extract_cover_image(stream):
pos = stream.tell()
from calibre.libunzip import name_ok, sort_key
all_names = sorted(names(stream), key=sort_key)
stream.seek(pos)
for name in all_names:
if name_ok(name):
return extract_member(stream, name=name, match=None)
def extract(path_or_stream, location):
with open_archive(path_or_stream) as f:
f.extract(location)
# Test {{{
def test_basic():
from tempfile import TemporaryDirectory
from calibre import CurrentDir
tdata = {
'1/sub-one': b'sub-one\n',
'2/sub-two.txt': b'sub-two\n',
'F\xfc\xdfe.txt': b'unicode\n',
'max-compressed': b'max\n',
'one.txt': b'one\n',
'symlink': b'2/sub-two.txt',
'uncompressed': b'uncompressed\n',
'\u8bf6\u6bd4\u5c41.txt': b'chinese unicode\n'}
def do_test():
for name, data in tdata.items():
if '/' in name:
os.makedirs(os.path.dirname(name), exist_ok=True)
with open(name, 'wb') as f:
f.write(data)
with open_archive(os.path.join('a.7z'), mode='w') as zf:
for name in tdata:
zf.write(name)
with open_archive(os.path.join('a.7z')) as zf:
if set(zf.getnames()) != set(tdata):
raise ValueError('names not equal')
read_data = {name:af.read() for name, af in zf.readall().items()}
if read_data != tdata:
raise ValueError('data not equal')
for name in tdata:
if name not in '1 2 symlink'.split():
with open(os.path.join(tdir, name), 'rb') as s:
if s.read() != tdata[name]:
raise ValueError('Did not extract %s properly' % name)
with TemporaryDirectory('test-7z') as tdir, CurrentDir(tdir):
do_test()
if __name__ == '__main__':
test_basic()
| 2,980 | Python | .py | 73 | 32.260274 | 78 | 0.596461 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,180 | xml_parse.py | kovidgoyal_calibre/src/calibre/utils/xml_parse.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
from lxml import etree
# resolving of SYSTEM entities is turned off as entities can cause
# reads of local files, for example:
# <!DOCTYPE foo [ <!ENTITY passwd SYSTEM "file:///etc/passwd" >]>
fs = etree.fromstring
class Resolver(etree.Resolver):
def resolve(self, url, id, context):
return self.resolve_string('', context)
def create_parser(recover, encoding=None):
parser = etree.XMLParser(recover=recover, no_network=True, encoding=encoding)
parser.resolvers.add(Resolver())
return parser
def safe_xml_fromstring(string_or_bytes, recover=True):
ans = fs(string_or_bytes, parser=create_parser(recover))
if ans is None and recover:
# this happens on windows where if string_or_bytes is unicode and
# contains non-BMP chars lxml chokes
if not isinstance(string_or_bytes, bytes):
string_or_bytes = string_or_bytes.encode('utf-8')
ans = fs(string_or_bytes, parser=create_parser(True, encoding='utf-8'))
if ans is not None:
return ans
ans = fs(string_or_bytes, parser=create_parser(False))
return ans
def unsafe_xml_fromstring(string_or_bytes):
parser = etree.XMLParser(resolve_entities=True)
return fs(string_or_bytes, parser=parser)
def find_tests():
import os
import tempfile
import unittest
from calibre.constants import iswindows
class TestXMLParse(unittest.TestCase):
def setUp(self):
with tempfile.NamedTemporaryFile(delete=False) as tf:
tf.write(b'external')
self.temp_file = os.path.abspath(tf.name)
if iswindows:
from calibre_extensions.winutil import get_long_path_name
self.temp_file = get_long_path_name(self.temp_file)
def tearDown(self):
os.remove(self.temp_file)
def test_safe_xml_fromstring(self):
templ = '''<!DOCTYPE foo [ <!ENTITY e {id} "{val}" > ]><r>&e;</r>'''
external = 'file:///' + self.temp_file.replace(os.sep, '/')
def t(tid, val, expected, safe=True):
raw = templ.format(id=tid, val=val)
err = None
try:
root = safe_xml_fromstring(raw) if safe else unsafe_xml_fromstring(raw)
except Exception as e:
err = str(e)
root = None
got = getattr(root, 'text', object())
self.assertEqual(got, expected, f'Unexpected result parsing: {raw!r}, got: {got!r} expected: {expected!r} with XML parser error: {err}')
t('SYSTEM', external, 'external', safe=False)
for eid, val, expected in (
('', 'normal entity', 'normal entity'),
('', external, external),
('SYSTEM', external, None),
('SYSTEM', 'http://example.com', None),
('PUBLIC', external, None),
('PUBLIC', 'http://example.com', None),
):
t(eid, val, expected)
def test_lxml_unicode_parsing(self):
from calibre.ebooks.chardet import xml_to_unicode
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'unicode-test.opf'), 'rb') as f:
raw = f.read()
text = xml_to_unicode(raw, strip_encoding_pats=True, resolve_entities=True, assume_utf8=True)[0]
self.assertIsNotNone(safe_xml_fromstring(text))
return unittest.defaultTestLoader.loadTestsFromTestCase(TestXMLParse)
if __name__ == '__main__':
from calibre.utils.run_tests import run_tests
run_tests(find_tests)
| 3,758 | Python | .py | 77 | 38.402597 | 152 | 0.611826 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,181 | short_uuid.py | kovidgoyal_calibre/src/calibre/utils/short_uuid.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
'''
Generate UUID encoded using a user specified alphabet.
'''
import math
import string
import uuid as _uuid
def num_to_string(number, alphabet, alphabet_len, pad_to_length=None):
ans = []
number = max(0, number)
while number:
number, digit = divmod(number, alphabet_len)
ans.append(alphabet[digit])
if pad_to_length is not None and pad_to_length > len(ans):
ans.append(alphabet[0] * (pad_to_length - len(ans)))
return ''.join(ans)
def string_to_num(string, alphabet_map, alphabet_len):
ans = 0
for char in reversed(string):
ans = ans * alphabet_len + alphabet_map[char]
return ans
class ShortUUID:
def __init__(self, alphabet=None):
# We do not include zero and one in the default alphabet as they can be
# confused with the letters O and I in some fonts. And removing them
# does not change the uuid_pad_len.
self.alphabet = tuple(sorted(str(alphabet or (string.digits + string.ascii_letters)[2:])))
self.alphabet_len = len(self.alphabet)
self.alphabet_map = {c:i for i, c in enumerate(self.alphabet)}
self.uuid_pad_len = int(math.ceil(math.log(1 << 128, self.alphabet_len)))
def uuid4(self, pad_to_length=None):
if pad_to_length is None:
pad_to_length = self.uuid_pad_len
return num_to_string(_uuid.uuid4().int, self.alphabet, self.alphabet_len, pad_to_length)
def uuid5(self, namespace, name, pad_to_length=None):
if pad_to_length is None:
pad_to_length = self.uuid_pad_len
return num_to_string(_uuid.uuid5(namespace, name).int, self.alphabet, self.alphabet_len, pad_to_length)
def decode(self, encoded):
return _uuid.UUID(int=string_to_num(encoded, self.alphabet_map, self.alphabet_len))
_global_instance = ShortUUID()
uuid4 = _global_instance.uuid4
uuid5 = _global_instance.uuid5
decode = _global_instance.decode
| 2,029 | Python | .py | 45 | 39.311111 | 111 | 0.681564 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,182 | shm.py | kovidgoyal_calibre/src/calibre/utils/shm.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2022, Kovid Goyal <kovid at kovidgoyal.net>
import errno
import mmap
import os
import secrets
import stat
import struct
from typing import Optional, Union
from calibre.constants import ismacos, iswindows
SHM_NAME_MAX = 30 if ismacos else 254
if iswindows:
import _winapi
else:
import _posixshmem
def make_filename(prefix: str) -> str:
"Create a random filename for the shared memory object."
# number of random bytes to use for name. Use a largeish value
# to make double unlink safe.
if not iswindows and not prefix.startswith('/'):
# FreeBSD requires name to start with /
prefix = '/' + prefix
plen = len(prefix.encode('utf-8'))
safe_length = min(plen + 64, SHM_NAME_MAX)
if safe_length - plen < 2:
raise OSError(errno.ENAMETOOLONG, f'SHM filename prefix {prefix} is too long')
nbytes = (safe_length - plen) // 2
name = prefix + secrets.token_hex(nbytes)
return name
class SharedMemory:
'''
Create or access randomly named shared memory. To create call with empty name and specific size.
To access call with name only.
WARNING: The actual size of the shared memory may be larger than the requested size.
'''
_fd: int = -1
_name: str = ''
_mmap: Optional[mmap.mmap] = None
_size: int = 0
size_fmt = '!I'
num_bytes_for_size = struct.calcsize(size_fmt)
def __init__(
self, name: str = '', size: int = 0, readonly: bool = False,
mode: int = stat.S_IREAD | stat.S_IWRITE,
prefix: str = 'calibre-'
):
if size < 0:
raise TypeError("'size' must be a non-negative integer")
if size and name:
raise TypeError('Cannot specify both name and size')
if not name:
flags = os.O_CREAT | os.O_EXCL
if not size:
raise TypeError("'size' must be > 0")
else:
flags = 0
flags |= os.O_RDONLY if readonly else os.O_RDWR
access = mmap.ACCESS_READ if readonly else mmap.ACCESS_WRITE
create = not name
tries = 30
while not name and tries > 0:
tries -= 1
q = make_filename(prefix)
if iswindows:
h_map = _winapi.CreateFileMapping(
_winapi.INVALID_HANDLE_VALUE,
_winapi.NULL,
_winapi.PAGE_READONLY if readonly else _winapi.PAGE_READWRITE,
(size >> 32) & 0xFFFFFFFF,
size & 0xFFFFFFFF,
q
)
try:
last_error_code = _winapi.GetLastError()
if last_error_code == _winapi.ERROR_ALREADY_EXISTS:
continue
self._mmap = mmap.mmap(-1, size, tagname=q, access=access)
name = q
finally:
_winapi.CloseHandle(h_map)
else:
try:
self._fd = _posixshmem.shm_open(q, flags, mode=mode)
name = q
except FileExistsError:
continue
if tries <= 0:
raise OSError(f'Failed to create a uniquely named SHM file, try shortening the prefix from: {prefix}')
self._name = name
if not create and iswindows:
h_map = _winapi.OpenFileMapping(
_winapi.FILE_MAP_READ,
False,
name
)
try:
p_buf = _winapi.MapViewOfFile(
h_map,
_winapi.FILE_MAP_READ,
0,
0,
0
)
finally:
_winapi.CloseHandle(h_map)
size = _winapi.VirtualQuerySize(p_buf)
self._mmap = mmap.mmap(-1, size, tagname=name)
if not iswindows:
if not create:
self._fd = _posixshmem.shm_open(name, flags, mode)
try:
if flags & os.O_CREAT and size:
os.ftruncate(self._fd, size)
self.stats = os.fstat(self._fd)
size = self.stats.st_size
self._mmap = mmap.mmap(self._fd, size, access=access)
except OSError:
self.unlink()
raise
self._size = size
@property
def memory_address(self) -> int:
import ctypes
obj = ctypes.py_object(self.mmap)
address = ctypes.c_void_p()
length = ctypes.c_ssize_t()
ctypes.pythonapi.PyObject_AsReadBuffer(obj, ctypes.byref(address), ctypes.byref(length))
return address.value
def read(self, sz: int = 0) -> bytes:
if sz <= 0:
sz = self.size
return self.mmap.read(sz)
def write(self, data: bytes) -> None:
self.mmap.write(data)
def tell(self) -> int:
return self.mmap.tell()
def seek(self, pos: int, whence: int = os.SEEK_SET) -> None:
self.mmap.seek(pos, whence)
def flush(self) -> None:
self.mmap.flush()
def write_data_with_size(self, data: Union[str, bytes]) -> None:
if isinstance(data, str):
data = data.encode('utf-8')
sz = struct.pack(self.size_fmt, len(data))
self.write(sz)
self.write(data)
def read_data_with_size(self) -> bytes:
sz = struct.unpack(self.size_fmt, self.read(self.num_bytes_for_size))[0]
return self.read(sz)
def __del__(self) -> None:
try:
self.close()
except OSError:
pass
def __enter__(self) -> 'SharedMemory':
return self
def __exit__(self, *a: object) -> None:
self.close()
@property
def size(self) -> int:
return self._size
@property
def name(self) -> str:
return self._name
@property
def mmap(self) -> mmap.mmap:
ans = self._mmap
if ans is None:
raise RuntimeError('Cannot access the mmap of a closed shared memory object')
return ans
def fileno(self) -> int:
return self._fd
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.name!r}, size={self.size})'
def close(self) -> None:
"""Closes access to the shared memory from this instance but does
not destroy the shared memory block."""
if self._mmap is not None:
self._mmap.close()
self._mmap = None
if self._fd > -1:
os.close(self._fd)
self._fd = -1
self.unlink()
def unlink(self) -> None:
"""Requests that the underlying shared memory block be destroyed.
In order to ensure proper cleanup of resources, unlink should be
called once (and only once) across all processes which have access
to the shared memory block."""
if self._name:
if not iswindows:
try:
_posixshmem.shm_unlink(self._name)
except FileNotFoundError:
pass
self._name = ''
def find_tests():
import unittest
class TestSHM(unittest.TestCase):
ae = unittest.TestCase.assertEqual
def test_shm(self):
with SharedMemory(size=64) as shm:
q = b'test'
shm.write_data_with_size(q)
self.ae(shm.tell(), shm.num_bytes_for_size + len(q))
shm.flush()
with SharedMemory(shm.name, readonly=True) as s2:
self.ae(s2.read_data_with_size(), q)
shm.write(b'ABCD')
shm.flush()
self.ae(s2.read(4), b'ABCD')
self.assertTrue(shm.name)
self.assertFalse(shm.name)
return unittest.defaultTestLoader.loadTestsFromTestCase(TestSHM)
| 7,938 | Python | .py | 214 | 26.219626 | 114 | 0.547678 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,183 | zipfile.py | kovidgoyal_calibre/src/calibre/utils/zipfile.py | """
Read and write ZIP files. Modified by Kovid Goyal to support replacing files in
a zip archive, detecting filename encoding, updating zip files, etc.
"""
import binascii
import io
import os
import re
import shutil
import stat
import struct
import sys
import time
from contextlib import closing
from calibre import sanitize_file_name
from calibre.constants import filesystem_encoding
from calibre.ebooks.chardet import detect
from calibre.ptempfile import SpooledTemporaryFile
from polyglot.builtins import as_bytes, string_or_bytes
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
"ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile"]
def decode_zip_internal_file_name(fname, flags):
codec = 'utf-8' if flags & 0x800 else 'cp437'
return fname.decode(codec, 'replace')
class BadZipfile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile # The exception raised by this module
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = (1 << 16) - 1
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# For a list see: http://www.winzip.com/wz54.htm
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = "<4s4H2LH"
stringEndArchive = b"PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = b"PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = b"PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = b"PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = b"PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def decode_arcname(name):
if not isinstance(name, str):
try:
name = name.decode('utf-8')
except Exception:
res = detect(name)
encoding = res['encoding']
try:
name = name.decode(encoding)
except Exception:
name = name.decode('utf-8', 'replace')
return name
# Added by Kovid to reset timestamp to default if it overflows the DOS
# limits
def fixtimevar(val):
if val < 0 or val > 0xffff:
val = 0
return val
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except OSError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except OSError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except OSError:
return None
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == b"\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append(b"")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
comment = data[start+sizeEndCentDir:]
# check that comment length is correct
# Kovid: Added == 0 check as some zip files apparently dont set this
if endrec[_ECD_COMMENT_SIZE] == 0 or endrec[_ECD_COMMENT_SIZE] == len(comment):
# Append the archive comment and start offset
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
class ZipInfo :
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
'file_offset',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(b'\0' if isinstance(filename, bytes) else '\0')
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != '/':
os_sep, sep = os.sep, '/'
if isinstance(filename, bytes):
os_sep, sep = as_bytes(os_sep), b'/'
if os_sep in filename:
filename = filename.replace(os_sep, sep)
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = b"" # Comment for each file
self.extra = b"" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
self.file_offset = 0
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, fixtimevar(dostime), fixtimevar(dosdate), CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
if isinstance(self.filename, str):
return self.filename.encode('utf-8'), self.flag_bits | 0x800
else:
return self.filename, self.flag_bits
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError("Corrupt extra field %s"%(ln,))
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffff, 0xffffffff):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFF:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffff:
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ch) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32(((self.key1 >> 24) & 255), self.key2)
def __call__(self, c):
"""Decrypt a single byte."""
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
self._UpdateKeys(c)
return c
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(br'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None):
self._fileobj = fileobj
self._decrypter = decrypter
self._orig_pos = fileobj.tell()
self._compress_type = zipinfo.compress_type
self._compress_size = zipinfo.compress_size
self._compress_left = zipinfo.compress_size
if self._compress_type == ZIP_DEFLATED:
self._decompressor = zlib.decompressobj(-15)
self._unconsumed = b''
self._readbuffer = b''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if not readahead:
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
buf = b''
if n is None:
n = -1
while True:
if n < 0:
data = self.read1(n)
elif n > len(buf):
data = self.read1(n - len(buf))
else:
return buf
if len(data) == 0:
return buf
buf += data
def _update_crc(self, newdata, eof):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if eof and self._running_crc != self._expected_crc:
raise BadZipfile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
# Simplify algorithm (branching) by transforming negative n to large n.
if n < 0 or n is None:
n = self.MAX_N
# Bytes available in read buffer.
len_readbuffer = len(self._readbuffer) - self._offset
# Read from file.
if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed):
nbytes = n - len_readbuffer - len(self._unconsumed)
nbytes = max(nbytes, self.MIN_READ_SIZE)
nbytes = min(nbytes, self._compress_left)
data = self._fileobj.read(nbytes)
self._compress_left -= len(data)
if data and self._decrypter is not None:
data = b''.join(bytes(bytearray(map(self._decrypter, bytearray(data)))))
if self._compress_type == ZIP_STORED:
self._update_crc(data, eof=(self._compress_left==0))
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
else:
# Prepare deflated bytes for decompression.
self._unconsumed += data
# Handle unconsumed data.
if (len(self._unconsumed) > 0 and n > len_readbuffer and
self._compress_type == ZIP_DEFLATED):
data = self._decompressor.decompress(
self._unconsumed,
max(n - len_readbuffer, self.MIN_READ_SIZE)
)
self._unconsumed = self._decompressor.unconsumed_tail
eof = len(self._unconsumed) == 0 and self._compress_left == 0
if eof:
data += self._decompressor.flush()
self._update_crc(data, eof=eof)
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
# Read from buffer.
data = self._readbuffer[self._offset: self._offset + n]
self._offset += len(data)
return data
def read_raw(self):
pos = self._fileobj.tell()
self._fileobj.seek(self._orig_pos)
bytes_to_read = self._compress_size
if self._decrypter is not None:
bytes_to_read -= 12
raw = b''
if bytes_to_read > 0:
raw = self._fileobj.read(bytes_to_read)
self._fileobj.seek(pos)
return raw
class ZipFile:
""" Class with methods to open, read, write, close, list and update zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_DEFLATED, allowZip64=True):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a" not %s'%mode)
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
else:
raise RuntimeError("The compression method %s is not supported" % compression)
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = b''
# Check if we were passed a file-like object
if isinstance(file, string_or_bytes):
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = open(file, modeDict[mode])
except OSError:
if mode == 'a':
mode = key = 'w'
self.fp = open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
self._calculate_file_offsets()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipfile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError('Mode must be "r", "w" or "a"')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except OSError:
raise BadZipfile("File is not a zip file")
if not endrec:
raise BadZipfile("File is not a zip file")
if self.debug > 1:
print(endrec)
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print("given, inferred, offset", offset_cd, inferred, concat)
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = io.BytesIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipfile("Bad magic number for central directory")
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print(centdir)
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
flags = centdir[5]
filename = decode_zip_internal_file_name(filename, flags)
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ((d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2)
x._decodeExtra()
x.header_offset = x.header_offset + concat
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] +
centdir[_CD_EXTRA_FIELD_LENGTH] +
centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print("total", total)
def _calculate_file_offsets(self):
for zip_info in self.filelist:
self.fp.seek(zip_info.header_offset, 0)
fheader = self.fp.read(30)
if fheader[0:4] != stringFileHeader:
raise BadZipfile("Bad magic number for file header")
fheader = struct.unpack(structFileHeader, fheader)
# file_offset is computed here, since the extra field for
# the central directory and for the local file header
# refer to different fields, and they can have different
# lengths
file_offset = (zip_info.header_offset + 30 +
fheader[_FH_FILENAME_LENGTH] +
fheader[_FH_EXTRA_FIELD_LENGTH])
fname = self.fp.read(fheader[_FH_FILENAME_LENGTH])
fname = decode_zip_internal_file_name(fname, zip_info.flag_bits)
if fname != zip_info.orig_filename:
raise RuntimeError(
'File name in directory "{}" and header "{}" differ.'.format(
zip_info.orig_filename, fname))
zip_info.file_offset = file_offset
def replace(self, filename, arcname=None, compress_type=None):
"""Delete arcname, and put the bytes from filename into the
archive under the name arcname."""
deleteName = arcname
if deleteName is None:
deleteName = filename
self.delete(deleteName)
self.write(filename, arcname, compress_type)
def replacestr(self, zinfo, byts):
"""Delete zinfo.filename, and write a new file into the archive. The
contents is the string 'bytes'."""
self.delete(zinfo.filename)
self.writestr(zinfo, byts)
def delete(self, name):
"""Delete the file from the archive. If it appears multiple
times only the first instance will be deleted."""
for i in range(0, len(self.filelist)):
if self.filelist[i].filename == name:
if self.debug:
print("Removing", name)
deleted_offset = self.filelist[i].header_offset
deleted_size = (self.filelist[i].file_offset - self.filelist[i].header_offset) + self.filelist[i].compress_size
zinfo_size = struct.calcsize(structCentralDir) + len(self.filelist[i].filename) + len(self.filelist[i].extra)
# Remove the file's data from the archive.
current_offset = self.fp.tell()
self.fp.seek(0, 2)
archive_size = self.fp.tell()
self.fp.seek(deleted_offset + deleted_size)
buf = self.fp.read()
self.fp.seek(deleted_offset)
self.fp.write(buf)
self.fp.truncate(archive_size - deleted_size - zinfo_size)
if current_offset > deleted_offset + deleted_size:
current_offset -= deleted_size
elif current_offset > deleted_offset:
current_offset = deleted_offset
self.fp.seek(current_offset, 0)
# Remove file from central directory.
del self.filelist[i]
# Adjust the remaining offsets in the central directory.
for j in range(i, len(self.filelist)):
if self.filelist[j].header_offset > deleted_offset:
self.filelist[j].header_offset -= deleted_size
if self.filelist[j].file_offset > deleted_offset:
self.filelist[j].file_offset -= deleted_size
self._didModify = True
return
if self.debug:
print(name, "not in archive")
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self):
"""Print a table of contents for the zip file."""
print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"))
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size))
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
except BadZipfile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
self.pwd = pwd
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
return self.open(name, "r", pwd).read()
def read_raw(self, name, mode="r", pwd=None):
"""Return the raw bytes in the zipfile corresponding to name."""
zef = self.open(name, mode=mode, pwd=pwd)
return zef.read_raw()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError('open() requires mode "r", "U", or "rU"')
if not self.fp:
raise RuntimeError(
"Attempt to read ZIP archive that was already closed")
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipfile("Bad magic number for file header")
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
fname = decode_zip_internal_file_name(fname, zinfo.flag_bits)
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename:
print(('WARNING: Header (%r) and directory (%r) filenames do not'
' match inside ZipFile')%(fname, zinfo.orig_filename))
print('Using directory filename %r'%zinfo.orig_filename)
# raise BadZipfile, \
# 'File name in directory "%r" and header "%r" differ.' % (
# zinfo.orig_filename, fname)
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError(("File %s is encrypted, "
"password required for extraction") % name)
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
byts = zef_file.read(12)
h = list(map(zd, bytearray(byts[0:12])))
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if h[11] != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
# Kovid: Extract longer names first, just in case the zip file has
# an entry for a directory without a trailing slash
members.sort(key=len, reverse=True)
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep) and
len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
base_target = targetpath # Added by Kovid
# Sanitize path, changing absolute paths to relative paths
# and removing .. and . (changed by Kovid)
fname = member.filename.replace(os.sep, '/')
fname = os.path.splitdrive(fname)[1]
fname = '/'.join(x for x in fname.split('/') if x not in {'', os.path.curdir, os.path.pardir})
if not fname:
raise BadZipfile('The member %r has an invalid name'%member.filename)
targetpath = os.path.normpath(os.path.join(base_target, fname))
# Added by Kovid as normpath fails to convert forward slashes for UNC
# paths, i.e. paths of the form \\?\C:\some/path
if os.sep != '/':
targetpath = targetpath.replace('/', os.sep)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
try:
os.makedirs(upperdirs)
except: # Added by Kovid
targetpath = os.path.join(base_target,
sanitize_file_name(fname))
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
try:
os.mkdir(targetpath)
except Exception: # Added by Kovid
targetpath = os.path.join(base_target, sanitize_file_name(fname))
os.mkdir(targetpath)
return targetpath
if not os.path.exists(targetpath):
# Kovid: Could be a previously automatically created directory
# in which case it is ignored
with closing(self.open(member, pwd=pwd)) as source:
try:
with open(targetpath, 'wb') as target:
shutil.copyfileobj(source, target)
except:
# Try sanitizing the file name to remove invalid characters
components = list(os.path.split(targetpath))
components[-1] = sanitize_file_name(components[-1])
targetpath = os.sep.join(components)
with open(targetpath, 'wb') as target:
shutil.copyfileobj(source, target)
# Kovid: Try to preserve the timestamps in the ZIP file
try:
mtime = time.localtime()
mtime = time.mktime(member.date_time + (0, 0) + (mtime.tm_isdst,))
os.utime(targetpath, (mtime, mtime))
except:
pass
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print("Duplicate name:", zinfo.filename)
if self.mode not in ("w", "a"):
raise RuntimeError('write() requires mode "w" or "a"')
if not self.fp:
raise RuntimeError(
"Attempt to write ZIP archive that was already closed")
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError(
"Compression requires the (missing) zlib module")
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError(
"The compression method %s is not supported" % zinfo.compress_type)
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if not isinstance(arcname, str):
arcname = arcname.decode(filesystem_encoding)
if isdir and not arcname.endswith('/'):
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
if isdir:
zinfo.compress_type = ZIP_STORED
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, byts, permissions=0o600,
compression=ZIP_DEFLATED, raw_bytes=False):
"""Write a file into the archive. The contents is the string
'byts'. 'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
assert not raw_bytes or (raw_bytes and
isinstance(zinfo_or_arcname, ZipInfo))
if not isinstance(byts, bytes):
byts = byts.encode('utf-8')
if not isinstance(zinfo_or_arcname, ZipInfo):
if not isinstance(zinfo_or_arcname, str):
zinfo_or_arcname = zinfo_or_arcname.decode(filesystem_encoding)
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = compression
zinfo.external_attr = permissions << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
if not raw_bytes:
zinfo.file_size = len(byts) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if not raw_bytes:
zinfo.CRC = crc32(byts) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
byts = co.compress(byts) + co.flush()
zinfo.compress_size = len(byts) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
self.fp.write(byts)
self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def add_dir(self, path, prefix='', simple_filter=lambda x:False):
'''
Add a directory recursively to the zip file with an optional prefix.
'''
if prefix:
self.writestr(prefix+'/', b'', 0o755)
fp = (prefix + ('/' if prefix else '')).replace('//', '/')
for f in os.listdir(path):
if simple_filter(f): # Added by Kovid
continue
arcname = fp + f
f = os.path.join(path, f)
if os.path.isdir(f):
self.add_dir(f, prefix=arcname, simple_filter=simple_filter)
else:
self.write(f, arcname)
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = fixtimevar((dt[0] - 1980) << 9 | dt[1] << 5 | dt[2])
dostime = fixtimevar(dt[3] << 11 | dt[4] << 5 | (dt[5] // 2))
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
print(msg)
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
def safe_replace(zipstream, name, datastream, extra_replacements={},
add_missing=False):
'''
Replace a file in a zip file in a safe manner. This proceeds by extracting
and re-creating the zipfile. This is necessary because :method:`ZipFile.replace`
sometimes created corrupted zip files.
:param zipstream: Stream from a zip file
:param name: The name of the file to replace
:param datastream: The data to replace the file with.
:param extra_replacements: Extra replacements. Mapping of name to file-like
objects
:param add_missing: If a replacement does not exist in the zip file, it is
added. Use with care as currently parent directories
are not created.
'''
z = ZipFile(zipstream, 'r')
replacements = {name:datastream}
replacements.update(extra_replacements)
names = frozenset(replacements.keys())
found = set()
def rbytes(name):
r = replacements[name]
if not isinstance(r, bytes):
r = r.read()
return r
with SpooledTemporaryFile(max_size=100*1024*1024) as temp:
ztemp = ZipFile(temp, 'w')
for obj in z.infolist():
if isinstance(obj.filename, str):
obj.flag_bits |= 0x16 # Set isUTF-8 bit
if obj.filename in names:
ztemp.writestr(obj, rbytes(obj.filename))
found.add(obj.filename)
else:
ztemp.writestr(obj, z.read_raw(obj), raw_bytes=True)
if add_missing:
for name in names - found:
ztemp.writestr(name, rbytes(name))
ztemp.close()
z.close()
temp.seek(0)
zipstream.seek(0)
zipstream.truncate()
shutil.copyfileobj(temp, zipstream)
zipstream.flush()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def writepy(self, pathname, basename=""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = f"{basename}/{name}"
else:
basename = name
if self.debug:
print("Adding package in", pathname, "as", basename)
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
ext = os.path.splitext(filename)[-1]
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print("Adding files from directory", pathname)
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
ext = os.path.splitext(filename)[-1]
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print("Adding", arcname)
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError(
'Files added with writepy() must end with ".py"')
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print("Adding file", arcname)
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print("Compiling", file_py)
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError as err:
print(err.msg)
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = f"{basename}/{archivename}"
return (fname, archivename)
def extractall(source, dest):
with ZipFile(source) as zf:
zf.extractall(dest)
def main(args=None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print(USAGE)
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print(USAGE)
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.printdir()
zf.close()
elif args[0] == '-t':
if len(args) != 2:
print(USAGE)
sys.exit(1)
zf = ZipFile(args[1], 'r')
badfile = zf.testzip()
if badfile:
print(f"The following enclosed file is corrupted: {badfile!r}")
print("Done testing")
elif args[0] == '-e':
if len(args) != 3:
print(USAGE)
sys.exit(1)
zf = ZipFile(args[1], 'r')
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
zf.close()
elif args[0] == '-c':
if len(args) < 3:
print(USAGE)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
zf = ZipFile(args[1], 'w', allowZip64=True)
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
if __name__ == "__main__":
main()
| 65,303 | Python | .py | 1,475 | 32.901695 | 129 | 0.573394 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,184 | webengine.py | kovidgoyal_calibre/src/calibre/utils/webengine.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2021, Kovid Goyal <kovid at kovidgoyal.net>
import json
import os
from qt.core import QBuffer, QIODevice, QObject, pyqtSignal, sip
from qt.webengine import QWebEngineProfile, QWebEngineScript, QWebEngineSettings, QWebEngineUrlScheme
from calibre.constants import FAKE_PROTOCOL, SPECIAL_TITLE_FOR_WEBENGINE_COMMS, cache_dir
def setup_fake_protocol():
p = FAKE_PROTOCOL.encode('ascii')
if not QWebEngineUrlScheme.schemeByName(p).name():
scheme = QWebEngineUrlScheme(p)
scheme.setSyntax(QWebEngineUrlScheme.Syntax.Host)
scheme.setFlags(QWebEngineUrlScheme.Flag.SecureScheme)
QWebEngineUrlScheme.registerScheme(scheme)
def setup_profile(profile):
# Qt uses persistent storage path to store cached GPU data even for OTR profiles
base = os.path.abspath(os.path.join(cache_dir(), 'qwe', profile.storageName() or 'dp'))
cp = os.path.join(base, 'c')
if profile.cachePath() != cp:
profile.setCachePath(cp)
sp = os.path.join(base, 'sp')
if profile.persistentStoragePath() != sp:
profile.setPersistentStoragePath(sp)
return profile
def setup_default_profile():
return setup_profile(QWebEngineProfile.defaultProfile())
def send_reply(rq, mime_type, data):
if sip.isdeleted(rq):
return
# make the buf a child of rq so that it is automatically deleted when
# rq is deleted
buf = QBuffer(parent=rq)
buf.open(QIODevice.OpenModeFlag.WriteOnly)
# we have to copy data into buf as it will be garbage
# collected by python
buf.write(data)
buf.seek(0)
buf.close()
rq.reply(mime_type.encode('ascii'), buf)
def secure_webengine(view_or_page_or_settings, for_viewer=False):
s = view_or_page_or_settings.settings() if hasattr(
view_or_page_or_settings, 'settings') else view_or_page_or_settings
a = s.setAttribute
a(QWebEngineSettings.WebAttribute.PluginsEnabled, False)
if not for_viewer:
a(QWebEngineSettings.WebAttribute.JavascriptEnabled, False)
s.setUnknownUrlSchemePolicy(QWebEngineSettings.UnknownUrlSchemePolicy.DisallowUnknownUrlSchemes)
if hasattr(view_or_page_or_settings, 'setAudioMuted'):
view_or_page_or_settings.setAudioMuted(True)
a(QWebEngineSettings.WebAttribute.JavascriptCanOpenWindows, False)
a(QWebEngineSettings.WebAttribute.JavascriptCanAccessClipboard, False)
# ensure javascript cannot read from local files
a(QWebEngineSettings.WebAttribute.LocalContentCanAccessFileUrls, False)
a(QWebEngineSettings.WebAttribute.AllowWindowActivationFromJavaScript, False)
return s
def insert_scripts(profile, *scripts):
sc = profile.scripts()
for script in scripts:
for existing in sc.find(script.name()):
sc.remove(existing)
for script in scripts:
sc.insert(script)
def create_script(
name, src, world=QWebEngineScript.ScriptWorldId.ApplicationWorld,
injection_point=QWebEngineScript.InjectionPoint.DocumentReady,
on_subframes=True
):
script = QWebEngineScript()
if isinstance(src, bytes):
src = src.decode('utf-8')
script.setSourceCode(src)
script.setName(name)
script.setWorldId(world)
script.setInjectionPoint(injection_point)
script.setRunsOnSubFrames(on_subframes)
return script
from_js = pyqtSignal
class to_js(str):
def __call__(self, *a):
print(f'WARNING: Calling {self.name}() before the javascript bridge is ready')
emit = __call__
class to_js_bound(QObject):
def __init__(self, bridge, name):
QObject.__init__(self, bridge)
self.name = name
def __call__(self, *args):
self.parent().page.runJavaScript('if (window.python_comm) python_comm._from_python({}, {})'.format(
json.dumps(self.name), json.dumps(args)), QWebEngineScript.ScriptWorldId.ApplicationWorld)
emit = __call__
class Bridge(QObject):
bridge_ready = pyqtSignal()
def __init__(self, page):
QObject.__init__(self, page)
self._signals = json.dumps(tuple({k for k, v in self.__class__.__dict__.items() if isinstance(v, pyqtSignal)}))
self._signals_registered = False
page.titleChanged.connect(self._title_changed)
for k, v in self.__class__.__dict__.items():
if isinstance(v, to_js):
v.name = k
@property
def page(self):
return self.parent()
@property
def ready(self):
return self._signals_registered
def _title_changed(self, title):
if title.startswith(SPECIAL_TITLE_FOR_WEBENGINE_COMMS):
self._poll_for_messages()
def _register_signals(self):
self._signals_registered = True
for k, v in self.__class__.__dict__.items():
if isinstance(v, to_js):
setattr(self, k, to_js_bound(self, k))
self.page.runJavaScript('python_comm._register_signals(' + self._signals + ')', QWebEngineScript.ScriptWorldId.ApplicationWorld)
self.bridge_ready.emit()
def _poll_for_messages(self):
self.page.runJavaScript('python_comm._poll()', QWebEngineScript.ScriptWorldId.ApplicationWorld, self._dispatch_messages)
def _dispatch_messages(self, messages):
try:
for msg in messages:
if isinstance(msg, dict):
mt = msg.get('type')
if mt == 'signal':
signal = getattr(self, msg['name'], None)
if signal is None:
print('WARNING: No js-to-python signal named: ' + msg['name'])
else:
args = msg['args']
if args:
signal.emit(*args)
else:
signal.emit()
elif mt == 'qt-ready':
self._register_signals()
except Exception:
if messages:
import traceback
traceback.print_exc()
| 6,095 | Python | .py | 138 | 35.565217 | 136 | 0.65777 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,185 | iso8601.py | kovidgoyal_calibre/src/calibre/utils/iso8601.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from datetime import datetime, timedelta, timezone
from calibre_extensions import speedup
utc_tz = timezone.utc
local_tz = datetime.now().astimezone().tzinfo
UNDEFINED_DATE = datetime(101,1,1, tzinfo=utc_tz)
def parse_iso8601(date_string, assume_utc=False, as_utc=True, require_aware=False):
if not date_string:
return UNDEFINED_DATE
dt, aware, tzseconds = speedup.parse_iso8601(date_string)
tz = utc_tz if assume_utc else local_tz
if aware: # timezone was specified
if tzseconds == 0:
tz = utc_tz
else:
sign = '-' if tzseconds < 0 else '+'
description = "%s%02d:%02d" % (sign, abs(tzseconds) // 3600, (abs(tzseconds) % 3600) // 60)
tz = timezone(timedelta(seconds=tzseconds), description)
elif require_aware:
raise ValueError(f'{date_string} does not specify a time zone')
dt = dt.replace(tzinfo=tz)
if as_utc and tz is utc_tz:
return dt
return dt.astimezone(utc_tz if as_utc else local_tz)
if __name__ == '__main__':
import sys
print(parse_iso8601(sys.argv[-1]))
| 1,196 | Python | .py | 28 | 36.857143 | 103 | 0.668103 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,186 | matcher.py | kovidgoyal_calibre/src/calibre/utils/matcher.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import atexit
import os
import sys
from collections import OrderedDict
from itertools import islice
from math import ceil
from operator import itemgetter
from threading import Lock, Thread
from unicodedata import normalize
from calibre import as_unicode
from calibre import detect_ncpus as cpu_count
from calibre.constants import filesystem_encoding
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import primary_collator, primary_find, primary_sort_key
from calibre.utils.icu import upper as icu_upper
from polyglot.builtins import iteritems, itervalues
from polyglot.queue import Queue
DEFAULT_LEVEL1 = '/'
DEFAULT_LEVEL2 = '-_ 0123456789'
DEFAULT_LEVEL3 = '.'
class PluginFailed(RuntimeError):
pass
class Worker(Thread):
daemon = True
def __init__(self, requests, results):
Thread.__init__(self)
self.requests, self.results = requests, results
atexit.register(lambda: requests.put(None))
def run(self):
while True:
x = self.requests.get()
if x is None:
break
try:
i, scorer, query = x
self.results.put((True, (i, scorer(query))))
except Exception as e:
self.results.put((False, as_unicode(e)))
# import traceback
# traceback.print_exc()
wlock = Lock()
workers = []
def split(tasks, pool_size):
'''
Split a list into a list of sub lists, with the number of sub lists being
no more than pool_size. Each sublist contains
2-tuples of the form (i, x) where x is an element from the original list
and i is the index of the element x in the original list.
'''
ans, count = [], 0
delta = int(ceil(len(tasks) / pool_size))
while tasks:
section = [(count + i, task) for i, task in enumerate(tasks[:delta])]
tasks = tasks[delta:]
count += len(section)
ans.append(section)
return ans
def default_scorer(*args, **kwargs):
try:
return CScorer(*args, **kwargs)
except PluginFailed:
return PyScorer(*args, **kwargs)
class Matcher:
def __init__(
self,
items,
level1=DEFAULT_LEVEL1,
level2=DEFAULT_LEVEL2,
level3=DEFAULT_LEVEL3,
scorer=None
):
with wlock:
if not workers:
requests, results = Queue(), Queue()
w = [Worker(requests, results) for i in range(max(1, cpu_count()))]
[x.start() for x in w]
workers.extend(w)
items = map(lambda x: normalize('NFC', str(x)), filter(None, items))
self.items = items = tuple(items)
tasks = split(items, len(workers))
self.task_maps = [{j: i for j, (i, _) in enumerate(task)} for task in tasks]
scorer = scorer or default_scorer
self.scorers = [
scorer(tuple(map(itemgetter(1), task_items))) for task_items in tasks
]
self.sort_keys = None
def __call__(self, query, limit=None):
query = normalize('NFC', str(query))
with wlock:
for i, scorer in enumerate(self.scorers):
workers[0].requests.put((i, scorer, query))
if self.sort_keys is None:
self.sort_keys = {
i: primary_sort_key(x)
for i, x in enumerate(self.items)
}
num = len(self.task_maps)
scores, positions = {}, {}
error = None
while num > 0:
ok, x = workers[0].results.get()
num -= 1
if ok:
task_num, vals = x
task_map = self.task_maps[task_num]
for i, (score, pos) in enumerate(vals):
item = task_map[i]
scores[item] = score
positions[item] = pos
else:
error = x
if error is not None:
raise Exception('Failed to score items: %s' % error)
items = sorted(((-scores[i], item, positions[i])
for i, item in enumerate(self.items)),
key=itemgetter(0))
if limit is not None:
del items[limit:]
return OrderedDict(x[1:] for x in filter(itemgetter(0), items))
def get_items_from_dir(basedir, acceptq=lambda x: True):
if isinstance(basedir, bytes):
basedir = basedir.decode(filesystem_encoding)
relsep = os.sep != '/'
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
x = os.path.join(dirpath, f)
if acceptq(x):
x = os.path.relpath(x, basedir)
if relsep:
x = x.replace(os.sep, '/')
yield x
class FilesystemMatcher(Matcher):
def __init__(self, basedir, *args, **kwargs):
Matcher.__init__(self, get_items_from_dir(basedir), *args, **kwargs)
# Python implementation of the scoring algorithm {{{
def calc_score_for_char(ctx, prev, current, distance):
factor = 1.0
ans = ctx.max_score_per_char
if prev in ctx.level1:
factor = 0.9
elif prev in ctx.level2 or (
icu_lower(prev) == prev and icu_upper(current) == current
):
factor = 0.8
elif prev in ctx.level3:
factor = 0.7
else:
factor = (1.0 / distance) * 0.75
return ans * factor
def process_item(ctx, haystack, needle):
# non-recursive implementation using a stack
stack = [(0, 0, 0, 0, [-1] * len(needle))]
final_score, final_positions = stack[0][-2:]
push, pop = stack.append, stack.pop
while stack:
hidx, nidx, last_idx, score, positions = pop()
key = (hidx, nidx, last_idx)
mem = ctx.memory.get(key, None)
if mem is None:
for i in range(nidx, len(needle)):
n = needle[i]
if (len(haystack) - hidx < len(needle) - i):
score = 0
break
pos = primary_find(n, haystack[hidx:])[0]
if pos == -1:
score = 0
break
pos += hidx
distance = pos - last_idx
score_for_char = ctx.max_score_per_char if distance <= 1 else calc_score_for_char(
ctx, haystack[pos - 1], haystack[pos], distance
)
hidx = pos + 1
push((hidx, i, last_idx, score, list(positions)))
last_idx = positions[i] = pos
score += score_for_char
ctx.memory[key] = (score, positions)
else:
score, positions = mem
if score > final_score:
final_score = score
final_positions = positions
return final_score, final_positions
class PyScorer:
__slots__ = (
'level1', 'level2', 'level3', 'max_score_per_char', 'items', 'memory'
)
def __init__(
self,
items,
level1=DEFAULT_LEVEL1,
level2=DEFAULT_LEVEL2,
level3=DEFAULT_LEVEL3
):
self.level1, self.level2, self.level3 = level1, level2, level3
self.max_score_per_char = 0
self.items = items
def __call__(self, needle):
for item in self.items:
self.max_score_per_char = (1.0 / len(item) + 1.0 / len(needle)) / 2.0
self.memory = {}
yield process_item(self, item, needle)
# }}}
class CScorer:
def __init__(
self,
items,
level1=DEFAULT_LEVEL1,
level2=DEFAULT_LEVEL2,
level3=DEFAULT_LEVEL3
):
from calibre_extensions.matcher import Matcher
self.m = Matcher(
items,
primary_collator().capsule,
str(level1), str(level2), str(level3)
)
def __call__(self, query):
scores, positions = self.m.calculate_scores(query)
yield from zip(scores, positions)
def test(return_tests=False):
is_sanitized = 'libasan' in os.environ.get('LD_PRELOAD', '')
import unittest
class Test(unittest.TestCase):
@unittest.skipIf(is_sanitized, 'Sanitizer enabled can\'t check for leaks')
def test_mem_leaks(self):
import gc
from calibre.utils.mem import get_memory as memory
m = Matcher(['a'], scorer=CScorer)
m('a')
def doit(c):
m = Matcher([
c + 'im/one.gif',
c + 'im/two.gif',
c + 'text/one.html',
],
scorer=CScorer)
m('one')
start = memory()
for i in range(10):
doit(str(i))
gc.collect()
used10 = memory() - start
start = memory()
for i in range(100):
doit(str(i))
gc.collect()
used100 = memory() - start
if used100 > 0 and used10 > 0:
self.assertLessEqual(used100, 2 * used10)
def test_non_bmp(self):
raw = '_\U0001f431-'
m = Matcher([raw], scorer=CScorer)
positions = next(itervalues(m(raw)))
self.assertEqual(
positions, (0, 1, 2)
)
if return_tests:
return unittest.TestLoader().loadTestsFromTestCase(Test)
class TestRunner(unittest.main):
def createTests(self):
tl = unittest.TestLoader()
self.test = tl.loadTestsFromTestCase(Test)
TestRunner(verbosity=4)
def get_char(string, pos):
return string[pos]
def input_unicode(prompt):
ans = input(prompt)
if isinstance(ans, bytes):
ans = ans.decode(sys.stdin.encoding)
return ans
def main(basedir=None, query=None):
from calibre import prints
from calibre.utils.terminal import ColoredStream
if basedir is None:
try:
basedir = input_unicode('Enter directory to scan [%s]: ' % os.getcwd()
).strip() or os.getcwd()
except (EOFError, KeyboardInterrupt):
return
m = FilesystemMatcher(basedir)
emph = ColoredStream(sys.stdout, fg='red', bold=True)
while True:
if query is None:
try:
query = input_unicode('Enter query: ')
except (EOFError, KeyboardInterrupt):
break
if not query:
break
for path, positions in islice(iteritems(m(query)), 0, 10):
positions = list(positions)
p = 0
while positions:
pos = positions.pop(0)
if pos == -1:
continue
prints(path[p:pos], end='')
ch = get_char(path, pos)
with emph:
prints(ch, end='')
p = pos + len(ch)
prints(path[p:])
query = None
if __name__ == '__main__':
# main(basedir='/t', query='ns')
# test()
main()
| 11,225 | Python | .py | 312 | 25.676282 | 98 | 0.54776 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,187 | certgen.py | kovidgoyal_calibre/src/calibre/utils/certgen.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import socket
from calibre_extensions import certgen
def create_key_pair(size=2048):
return certgen.create_rsa_keypair(size)
def create_cert_request(
key_pair, common_name,
country='IN', state='Maharashtra', locality='Mumbai', organization=None,
organizational_unit=None, email_address=None, alt_names=(), basic_constraints=None,
digital_key_usage=None, ext_key_usage=None,
):
return certgen.create_rsa_cert_req(
key_pair, tuple(alt_names), common_name,
country, state, locality, organization, organizational_unit, email_address,
basic_constraints, digital_key_usage, ext_key_usage,
)
def create_cert(req, ca_cert, ca_keypair, expire=365, not_before=0):
return certgen.create_rsa_cert(req, ca_cert, ca_keypair, not_before, expire)
def create_ca_cert(req, ca_keypair, expire=365, not_before=0):
return certgen.create_rsa_cert(req, None, ca_keypair, not_before, expire)
def serialize_cert(cert):
return certgen.serialize_cert(cert)
def serialize_key(key_pair, password=None):
return certgen.serialize_rsa_key(key_pair, password)
def cert_info(cert):
return certgen.cert_info(cert)
def create_server_cert(
domain_or_ip, ca_cert_file=None, server_cert_file=None, server_key_file=None,
expire=365, ca_key_file=None, ca_name='Dummy Certificate Authority', key_size=2048,
country='IN', state='Maharashtra', locality='Mumbai', organization=None,
organizational_unit=None, email_address=None, alt_names=(), encrypt_key_with_password=None,
):
is_ip = False
try:
socket.inet_pton(socket.AF_INET, domain_or_ip)
is_ip = True
except Exception:
try:
socket.inet_aton(socket.AF_INET6, domain_or_ip)
is_ip = True
except Exception:
pass
if not alt_names:
prefix = 'IP' if is_ip else 'DNS'
alt_names = (f'{prefix}:{domain_or_ip}',)
# Create the Certificate Authority
cakey = create_key_pair(key_size)
careq = create_cert_request(
cakey, ca_name, basic_constraints='critical,CA:TRUE', digital_key_usage='critical,keyCertSign,cRLSign',
ext_key_usage='critical,serverAuth,clientAuth')
cacert = create_ca_cert(careq, cakey)
# Create the server certificate issued by the newly created CA
pkey = create_key_pair(key_size)
req = create_cert_request(
pkey, domain_or_ip, country, state, locality, organization, organizational_unit, email_address, alt_names,
digital_key_usage='critical,keyEncipherment,digitalSignature', ext_key_usage='critical,serverAuth,clientAuth')
cert = create_cert(req, cacert, cakey, expire=expire)
def export(dest, obj, func, *args):
if dest is not None:
data = func(obj, *args)
if isinstance(data, str):
data = data.encode('utf-8')
if hasattr(dest, 'write'):
dest.write(data)
else:
with open(dest, 'wb') as f:
f.write(data)
export(ca_cert_file, cacert, serialize_cert)
export(server_cert_file, cert, serialize_cert)
export(server_key_file, pkey, serialize_key, encrypt_key_with_password)
export(ca_key_file, cakey, serialize_key, encrypt_key_with_password)
return cacert, cakey, cert, pkey
def develop():
cacert, cakey, cert, pkey = create_server_cert('test.me', alt_names=['DNS:moose.cat', 'DNS:huge.bat'])
print("CA Certificate")
print(cert_info(cacert))
print(), print(), print()
print('Server Certificate')
print(cert_info(cert))
certgen.verify_cert(cacert, cacert)
certgen.verify_cert(cacert, cert)
if __name__ == '__main__':
develop()
| 3,806 | Python | .py | 85 | 38.376471 | 118 | 0.680834 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,188 | localunzip.py | kovidgoyal_calibre/src/calibre/utils/localunzip.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Try to read invalid zip files with missing or damaged central directories.
These are apparently produced in large numbers by the fruitcakes over at B&N.
Tries to only use the local headers to extract data from the damaged zip file.
'''
import os
import shutil
import sys
import zlib
from collections import OrderedDict, namedtuple
from struct import calcsize, pack, unpack
from calibre.ptempfile import SpooledTemporaryFile
from polyglot.builtins import itervalues
HEADER_SIG = 0x04034b50
HEADER_BYTE_SIG = pack(b'<L', HEADER_SIG)
local_header_fmt = b'<L5HL2L2H'
local_header_sz = calcsize(local_header_fmt)
ZIP_STORED, ZIP_DEFLATED = 0, 8
DATA_DESCRIPTOR_SIG = pack(b'<L', 0x08074b50)
LocalHeader = namedtuple('LocalHeader',
'signature min_version flags compression_method mod_time mod_date '
'crc32 compressed_size uncompressed_size filename_length extra_length '
'filename extra')
if hasattr(sys, 'getwindowsversion'):
windows_reserved_filenames = (
'CON', 'PRN', 'AUX', 'CLOCK$', 'NUL' 'COM0', 'COM1', 'COM2', 'COM3',
'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9' 'LPT0', 'LPT1', 'LPT2',
'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9')
def is_reserved_filename(x):
base = x.partition('.')[0].upper()
return base in windows_reserved_filenames
else:
def is_reserved_filename(x):
return False
def decode_arcname(name):
if isinstance(name, bytes):
from calibre.ebooks.chardet import detect
try:
name = name.decode('utf-8')
except:
res = detect(name)
encoding = res['encoding']
try:
name = name.decode(encoding)
except:
name = name.decode('utf-8', 'replace')
return name
def find_local_header(f):
pos = f.tell()
raw = f.read(50*1024)
try:
f.seek(pos + raw.index(HEADER_BYTE_SIG))
except ValueError:
f.seek(pos)
return
raw = f.read(local_header_sz)
if len(raw) != local_header_sz:
f.seek(pos)
return
header = LocalHeader(*(unpack(local_header_fmt, raw) + (None, None)))
if header.signature == HEADER_SIG:
return header
f.seek(pos)
def find_data_descriptor(f):
pos = f.tell()
DD = namedtuple('DataDescriptor', 'crc32 compressed_size uncompressed_size')
raw = b'a'*16
try:
while len(raw) >= 16:
raw = f.read(50*1024)
idx = raw.find(DATA_DESCRIPTOR_SIG)
if idx != -1:
f.seek(f.tell() - len(raw) + idx + len(DATA_DESCRIPTOR_SIG))
return DD(*unpack(b'<LLL', f.read(12)))
# Rewind to handle the case of the signature being cut off
# by the 50K boundary
f.seek(f.tell()-len(DATA_DESCRIPTOR_SIG))
raise ValueError('Failed to find data descriptor signature. '
'Data descriptors without signatures are not '
'supported.')
finally:
f.seek(pos)
def read_local_file_header(f):
pos = f.tell()
raw = f.read(local_header_sz)
if len(raw) != local_header_sz:
f.seek(pos)
return
header = LocalHeader(*(unpack(local_header_fmt, raw) + (None, None)))
if header.signature != HEADER_SIG:
f.seek(pos)
header = find_local_header(f)
if header is None:
return
if header.min_version > 20:
raise ValueError('This ZIP file uses unsupported features')
if header.flags & 0b1:
raise ValueError('This ZIP file is encrypted')
if header.flags & (1 << 13):
raise ValueError('This ZIP file uses masking, unsupported.')
if header.compression_method not in {ZIP_STORED, ZIP_DEFLATED}:
raise ValueError('This ZIP file uses an unsupported compression method')
has_data_descriptors = header.flags & (1 << 3)
fname = extra = None
if header.filename_length > 0:
fname = f.read(header.filename_length)
if len(fname) != header.filename_length:
return
try:
fname = fname.decode('ascii')
except UnicodeDecodeError:
if header.flags & (1 << 11):
try:
fname = fname.decode('utf-8')
except UnicodeDecodeError:
pass
fname = decode_arcname(fname).replace('\\', '/')
if header.extra_length > 0:
extra = f.read(header.extra_length)
if len(extra) != header.extra_length:
return
if has_data_descriptors:
desc = find_data_descriptor(f)
header = header._replace(crc32=desc.crc32,
compressed_size=desc.compressed_size,
uncompressed_size=desc.uncompressed_size)
return LocalHeader(*(
header[:-2] + (fname, extra)
))
def read_compressed_data(f, header):
cdata = f.read(header.compressed_size)
return cdata
def copy_stored_file(src, size, dest):
read = 0
amt = min(size, 20*1024)
while read < size:
raw = src.read(min(size-read, amt))
if not raw:
raise ValueError('Premature end of file')
dest.write(raw)
read += len(raw)
def copy_compressed_file(src, size, dest):
d = zlib.decompressobj(-15)
read = 0
amt = min(size, 20*1024)
while read < size:
raw = src.read(min(size-read, amt))
if not raw and read < size:
raise ValueError('Invalid ZIP file, local header is damaged')
read += len(raw)
dest.write(d.decompress(raw, 200*1024))
count = 0
while d.unconsumed_tail:
count += 1
dest.write(d.decompress(d.unconsumed_tail, 200*1024))
if count > 100:
raise ValueError('This ZIP file contains a ZIP bomb in %s'%
os.path.basename(dest.name))
def _extractall(f, path=None, file_info=None):
found = False
while True:
header = read_local_file_header(f)
if not header:
break
has_data_descriptors = header.flags & (1 << 3)
seekval = header.compressed_size + (16 if has_data_descriptors else 0)
found = True
# Sanitize path changing absolute to relative paths and removing .. and
# .
fname = header.filename.replace(os.sep, '/')
fname = os.path.splitdrive(fname)[1]
parts = [x for x in fname.split('/') if x not in {'', os.path.pardir, os.path.curdir}]
if not parts:
continue
if header.uncompressed_size == 0:
# Directory
f.seek(f.tell()+seekval)
if path is not None:
bdir = os.path.join(path, *parts)
if not os.path.exists(bdir):
os.makedirs(bdir)
continue
# File
if file_info is not None:
file_info[header.filename] = (f.tell(), header)
if path is not None:
bdir = os.path.join(path, *(parts[:-1]))
if not os.path.exists(bdir):
os.makedirs(bdir)
dest = os.path.join(path, *parts)
try:
df = open(dest, 'wb')
except OSError:
if is_reserved_filename(os.path.basename(dest)):
raise ValueError('This ZIP file contains a file with a reserved filename'
' that cannot be processed on Windows: {}'.format(os.path.basename(dest)))
raise
with df:
if header.compression_method == ZIP_STORED:
copy_stored_file(f, header.compressed_size, df)
else:
copy_compressed_file(f, header.compressed_size, df)
else:
f.seek(f.tell()+seekval)
if not found:
raise ValueError('Not a ZIP file')
def extractall(path_or_stream, path=None):
f = path_or_stream
close_at_end = False
if not hasattr(f, 'read'):
f = open(f, 'rb')
close_at_end = True
if path is None:
path = os.getcwd()
pos = f.tell()
try:
_extractall(f, path)
finally:
f.seek(pos)
if close_at_end:
f.close()
class LocalZipFile:
def __init__(self, stream):
self.file_info = OrderedDict()
_extractall(stream, file_info=self.file_info)
self.stream = stream
def _get_file_info(self, name):
fi = self.file_info.get(name)
if fi is None:
raise ValueError('This ZIP container has no file named: %s'%name)
return fi
def open(self, name, spool_size=5*1024*1024):
if isinstance(name, LocalHeader):
name = name.filename
offset, header = self._get_file_info(name)
self.stream.seek(offset)
dest = SpooledTemporaryFile(max_size=spool_size)
if header.compression_method == ZIP_STORED:
copy_stored_file(self.stream, header.compressed_size, dest)
else:
copy_compressed_file(self.stream, header.compressed_size, dest)
dest.seek(0)
return dest
def getinfo(self, name):
offset, header = self._get_file_info(name)
return header
def read(self, name, spool_size=5*1024*1024):
with self.open(name, spool_size=spool_size) as f:
return f.read()
def extractall(self, path=None):
self.stream.seek(0)
_extractall(self.stream, path=(path or os.getcwd()))
def close(self):
pass
def safe_replace(self, name, datastream, extra_replacements={},
add_missing=False):
from calibre.utils.zipfile import ZipFile, ZipInfo
replacements = {name:datastream}
replacements.update(extra_replacements)
names = frozenset(list(replacements.keys()))
found = set()
def rbytes(name):
r = replacements[name]
if not isinstance(r, bytes):
r = r.read()
return r
with SpooledTemporaryFile(max_size=100*1024*1024) as temp:
ztemp = ZipFile(temp, 'w')
for offset, header in itervalues(self.file_info):
if header.filename in names:
zi = ZipInfo(header.filename)
zi.compress_type = header.compression_method
ztemp.writestr(zi, rbytes(header.filename))
found.add(header.filename)
else:
ztemp.writestr(header.filename, self.read(header.filename,
spool_size=0))
if add_missing:
for name in names - found:
ztemp.writestr(name, rbytes(name))
ztemp.close()
zipstream = self.stream
temp.seek(0)
zipstream.seek(0)
zipstream.truncate()
shutil.copyfileobj(temp, zipstream)
zipstream.flush()
if __name__ == '__main__':
extractall(sys.argv[-1])
| 11,179 | Python | .py | 293 | 28.631399 | 102 | 0.585956 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,189 | titlecase.py | kovidgoyal_calibre/src/calibre/utils/titlecase.py | #!/usr/bin/env python
"""
Original Perl version by: John Gruber https://daringfireball.net/ 10 May 2008
Python version by Stuart Colville http://muffinresearch.co.uk
Modifications to make it work with non-ascii chars by Kovid Goyal
License: http://www.opensource.org/licenses/mit-license.php
"""
import re
from calibre.utils.icu import capitalize
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import upper as icu_upper
__all__ = ['titlecase']
__version__ = '0.5'
SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\\.?|via|vs\\.?'
PUNCT = r"""!"#$%&'‘’()*+,\-‒–—―./:;?@[\\\]_`{|}~"""
SMALL_WORDS = re.compile(r'^(%s)$' % SMALL, re.I)
INLINE_PERIOD = re.compile(r'[a-z][.][a-z]', re.I)
UC_ELSEWHERE = re.compile(r'[%s]*?[a-zA-Z]+[A-Z]+?' % PUNCT)
CAPFIRST = re.compile(str(r"^[%s]*?(\w)" % PUNCT), flags=re.UNICODE)
SMALL_FIRST = re.compile(fr'^([{PUNCT}]*)({SMALL})\b', re.I|re.U)
SMALL_LAST = re.compile(fr'\b({SMALL})[{PUNCT}]?$', re.I|re.U)
SMALL_AFTER_NUM = re.compile(r'(\d+\s+)(a|an|the)\b', re.I|re.U)
SUBPHRASE = re.compile(r'([:.;?!][ ])(%s)' % SMALL)
APOS_SECOND = re.compile(r"^[dol]{1}['‘]{1}[a-z]+$", re.I)
UC_INITIALS = re.compile(r"^(?:[A-Z]{1}\.{1}|[A-Z]{1}\.{1}[A-Z]{1})+$")
_lang = None
def lang():
global _lang
if _lang is None:
from calibre.utils.localization import get_lang
_lang = get_lang().lower()
return _lang
def titlecase(text):
"""
Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
all_caps = icu_upper(text) == text
pat = re.compile(r'(\s+)')
line = []
for word in pat.split(text):
if not word:
continue
if pat.match(word) is not None:
line.append(word)
continue
if all_caps:
if UC_INITIALS.match(word):
line.append(word)
continue
else:
word = icu_lower(word)
if APOS_SECOND.match(word):
word = word.replace(word[0], icu_upper(word[0]), 1)
word = word[:2] + icu_upper(word[2]) + word[3:]
line.append(word)
continue
if INLINE_PERIOD.search(word) or UC_ELSEWHERE.match(word):
line.append(word)
continue
if SMALL_WORDS.match(word):
line.append(icu_lower(word))
continue
hyphenated = []
for item in word.split('-'):
hyphenated.append(CAPFIRST.sub(lambda m: icu_upper(m.group(0)), item))
line.append("-".join(hyphenated))
result = "".join(line)
result = SMALL_FIRST.sub(lambda m: '{}{}'.format(
m.group(1),
capitalize(m.group(2))
), result)
result = SMALL_AFTER_NUM.sub(lambda m: '{}{}'.format(m.group(1),
capitalize(m.group(2))
), result)
result = SMALL_LAST.sub(lambda m: capitalize(m.group(0)), result)
result = SUBPHRASE.sub(lambda m: '{}{}'.format(
m.group(1),
capitalize(m.group(2))
), result)
return result
| 3,273 | Python | .py | 84 | 32.309524 | 82 | 0.594286 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,190 | linux_trash.py | kovidgoyal_calibre/src/calibre/utils/linux_trash.py | #!/usr/bin/env python
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
# This is a reimplementation of plat_other.py with reference to the
# freedesktop.org trash specification:
# [1] http://www.freedesktop.org/wiki/Specifications/trash-spec
# [2] http://www.ramendik.ru/docs/trashspec.html
# See also:
# [3] http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
#
# For external volumes this implementation will raise an exception if it can't
# find or create the user's trash directory.
import os
import os.path as op
import shutil
import stat
from datetime import datetime
from polyglot.urllib import quote
FILES_DIR = 'files'
INFO_DIR = 'info'
INFO_SUFFIX = '.trashinfo'
# Default of ~/.local/share [3]
XDG_DATA_HOME = op.expanduser(os.environ.get('XDG_DATA_HOME', '~/.local/share'))
HOMETRASH = op.join(XDG_DATA_HOME, 'Trash')
uid = os.getuid()
TOPDIR_TRASH = '.Trash'
TOPDIR_FALLBACK = '.Trash-%s'%uid
def uniquote(raw):
if isinstance(raw, str):
raw = raw.encode('utf-8')
return str(quote(raw))
def is_parent(parent, path):
path = op.realpath(path) # In case it's a symlink
parent = op.realpath(parent)
return path.startswith(parent)
def format_date(date):
return date.strftime("%Y-%m-%dT%H:%M:%S")
def info_for(src, topdir):
# ...it MUST not include a ".."" directory, and for files not "under" that
# directory, absolute pathnames must be used. [2]
if topdir is None or not is_parent(topdir, src):
src = op.abspath(src)
else:
src = op.relpath(src, topdir)
info = "[Trash Info]\n"
info += "Path=" + uniquote(src) + "\n"
info += "DeletionDate=" + format_date(datetime.now()) + "\n"
return info
def check_create(dir):
# use 0700 for paths [3]
if not op.exists(dir):
os.makedirs(dir, 0o700)
def trash_move(src, dst, topdir=None):
filename = op.basename(src)
filespath = op.join(dst, FILES_DIR)
infopath = op.join(dst, INFO_DIR)
base_name, ext = op.splitext(filename)
counter = 0
destname = filename
while op.exists(op.join(filespath, destname)) or op.exists(op.join(infopath, destname + INFO_SUFFIX)):
counter += 1
destname = f'{base_name} {counter}{ext}'
check_create(filespath)
check_create(infopath)
shutil.move(src, op.join(filespath, destname))
with open(op.join(infopath, destname + INFO_SUFFIX), 'wb') as f:
data = info_for(src, topdir)
if not isinstance(data, bytes):
data = data.encode('utf-8')
f.write(data)
def find_mount_point(path):
# Even if something's wrong, "/" is a mount point, so the loop will exit.
# Use realpath in case it's a symlink
path = op.realpath(path) # Required to avoid infinite loop
while not op.ismount(path):
path = op.split(path)[0]
return path
def find_ext_volume_global_trash(volume_root):
# from [2] Trash directories (1) check for a .Trash dir with the right
# permissions set.
trash_dir = op.join(volume_root, TOPDIR_TRASH)
if not op.exists(trash_dir):
return None
mode = os.lstat(trash_dir).st_mode
# vol/.Trash must be a directory, cannot be a symlink, and must have the
# sticky bit set.
if not op.isdir(trash_dir) or op.islink(trash_dir) or not (mode & stat.S_ISVTX):
return None
trash_dir = op.join(trash_dir, str(uid))
try:
check_create(trash_dir)
except OSError:
return None
return trash_dir
def find_ext_volume_fallback_trash(volume_root):
# from [2] Trash directories (1) create a .Trash-$uid dir.
trash_dir = op.join(volume_root, TOPDIR_FALLBACK)
# Try to make the directory, if we can't the OSError exception will escape
# be thrown out of send2trash.
check_create(trash_dir)
return trash_dir
def find_ext_volume_trash(volume_root):
trash_dir = find_ext_volume_global_trash(volume_root)
if trash_dir is None:
trash_dir = find_ext_volume_fallback_trash(volume_root)
return trash_dir
# Pull this out so it's easy to stub (to avoid stubbing lstat itself)
def get_dev(path):
return os.lstat(path).st_dev
def send2trash(path):
if not op.exists(path):
raise OSError("File not found: %s" % path)
# ...should check whether the user has the necessary permissions to delete
# it, before starting the trashing operation itself. [2]
if not os.access(path, os.W_OK):
raise OSError("Permission denied: %s" % path)
# if the file to be trashed is on the same device as HOMETRASH we
# want to move it there.
path_dev = get_dev(path)
# If XDG_DATA_HOME or HOMETRASH do not yet exist we need to stat the
# home directory, and these paths will be created further on if needed.
trash_dev = get_dev(op.expanduser('~'))
if path_dev == trash_dev:
topdir = XDG_DATA_HOME
dest_trash = HOMETRASH
else:
topdir = find_mount_point(path)
trash_dev = get_dev(topdir)
if trash_dev != path_dev:
raise OSError("Couldn't find mount point for %s" % path)
dest_trash = find_ext_volume_trash(topdir)
trash_move(path, dest_trash, topdir)
| 5,427 | Python | .py | 134 | 35.634328 | 106 | 0.68387 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,191 | threadpool.py | kovidgoyal_calibre/src/calibre/utils/threadpool.py | """Easy to use object-oriented thread pool framework.
A thread pool is an object that maintains a pool of worker threads to perform
time consuming operations in parallel. It assigns jobs to the threads
by putting them in a work request queue, where they are picked up by the
next available thread. This then performs the requested operation in the
background and puts the results in a another queue.
The thread pool object can then collect the results from all threads from
this queue as soon as they become available or after all threads have
finished their work. It's also possible, to define callbacks to handle
each result as it comes in.
The basic concept and some code was taken from the book "Python in a Nutshell"
by Alex Martelli, copyright 2003, ISBN 0-596-00188-6, from section 14.5
"Threaded Program Architecture". I wrapped the main program logic in the
ThreadPool class, added the WorkRequest class and the callback system and
tweaked the code here and there. Kudos also to Florent Aide for the exception
handling mechanism.
Basic usage:
>>> pool = TreadPool(poolsize)
>>> requests = makeRequests(some_callable, list_of_args, callback)
>>> [pool.putRequest(req) for req in requests]
>>> pool.wait()
See the end of the module code for a brief, annotated usage example.
Website : http://chrisarndt.de/en/software/python/threadpool/
"""
__all__ = [
'makeRequests',
'NoResultsPending',
'NoWorkersAvailable',
'ThreadPool',
'WorkRequest',
'WorkerThread'
]
__author__ = "Christopher Arndt"
__version__ = "1.2.3"
__revision__ = "$Revision: 1.5 $"
__date__ = "$Date: 2006/06/23 12:32:25 $"
__license__ = 'Python license'
# standard library modules
import threading
from polyglot import queue
# exceptions
class NoResultsPending(Exception):
"""All work requests have been processed."""
pass
class NoWorkersAvailable(Exception):
"""No worker threads available to process remaining requests."""
pass
# classes
class WorkerThread(threading.Thread):
"""Background thread connected to the requests/results queues.
A worker thread sits in the background and picks up work requests from
one queue and puts the results in another until it is dismissed.
"""
def __init__(self, requestsQueue, resultsQueue, **kwds):
"""Set up thread in daemonic mode and start it immediately.
requestsQueue and resultQueue are instances of queue.Queue passed
by the ThreadPool class when it creates a new worker thread.
"""
kwds['daemon'] = True
threading.Thread.__init__(self, **kwds)
self.workRequestQueue = requestsQueue
self.resultQueue = resultsQueue
self._dismissed = threading.Event()
self.start()
def run(self):
"""Repeatedly process the job queue until told to exit."""
while not self._dismissed.isSet():
# thread blocks here, if queue empty
request = self.workRequestQueue.get()
if self._dismissed.isSet():
# if told to exit, return the work request we just picked up
self.workRequestQueue.put(request)
break # and exit
try:
self.resultQueue.put(
(request, request.callable(*request.args, **request.kwds))
)
except:
request.exception = True
import traceback
self.resultQueue.put((request, traceback.format_exc()))
def dismiss(self):
"""Sets a flag to tell the thread to exit when done with current job.
"""
self._dismissed.set()
class WorkRequest:
"""A request to execute a callable for putting in the request queue later.
See the module function makeRequests() for the common case
where you want to build several WorkRequests for the same callable
but with different arguments for each call.
"""
def __init__(self, callable, args=None, kwds=None, requestID=None,
callback=None, exc_callback=None):
"""Create a work request for a callable and attach callbacks.
A work request consists of the callable to be executed by a
worker thread, a list of positional arguments, a dictionary
of keyword arguments.
A callback function can be specified, that is called when the results
of the request are picked up from the result queue. It must accept
two arguments, the request object and the results of the callable,
in that order. If you want to pass additional information to the
callback, just stick it on the request object.
You can also give a callback for when an exception occurs. It should
also accept two arguments, the work request and a tuple with the
exception details as returned by sys.exc_info().
requestID, if given, must be hashable since it is used by the
ThreadPool object to store the results of that work request in a
dictionary. It defaults to the return value of id(self).
"""
if requestID is None:
self.requestID = id(self)
else:
try:
hash(requestID)
except TypeError:
raise TypeError("requestID must be hashable.")
self.requestID = requestID
self.exception = False
self.callback = callback
self.exc_callback = exc_callback
self.callable = callable
self.args = args or []
self.kwds = kwds or {}
class ThreadPool:
"""A thread pool, distributing work requests and collecting results.
See the module doctring for more information.
"""
def __init__(self, num_workers, q_size=0):
"""Set up the thread pool and start num_workers worker threads.
num_workers is the number of worker threads to start initially.
If q_size > 0 the size of the work request queue is limited and
the thread pool blocks when the queue is full and it tries to put
more work requests in it (see putRequest method).
"""
self.requestsQueue = queue.Queue(q_size)
self.resultsQueue = queue.Queue()
self.workers = []
self.workRequests = {}
self.createWorkers(num_workers)
def createWorkers(self, num_workers):
"""Add num_workers worker threads to the pool."""
for i in range(num_workers):
self.workers.append(WorkerThread(self.requestsQueue,
self.resultsQueue))
def dismissWorkers(self, num_workers):
"""Tell num_workers worker threads to quit after their current task.
"""
for i in range(min(num_workers, len(self.workers))):
worker = self.workers.pop()
worker.dismiss()
def putRequest(self, request, block=True, timeout=0):
"""Put work request into work queue and save its id for later."""
assert isinstance(request, WorkRequest)
self.requestsQueue.put(request, block, timeout)
self.workRequests[request.requestID] = request
def poll(self, block=False):
"""Process any new results in the queue."""
while True:
# still results pending?
if not self.workRequests:
raise NoResultsPending
# are there still workers to process remaining requests?
elif block and not self.workers:
raise NoWorkersAvailable
try:
# get back next results
request, result = self.resultsQueue.get(block=block)
# has an exception occurred?
if request.exception and request.exc_callback:
request.exc_callback(request, result)
# hand results to callback, if any
if request.callback and not \
(request.exception and request.exc_callback):
request.callback(request, result)
del self.workRequests[request.requestID]
except queue.Empty:
break
def wait(self, sleep=0):
"""Wait for results, blocking until all have arrived."""
while 1:
try:
self.poll(True)
time.sleep(sleep)
except NoResultsPending:
break
# helper functions
def makeRequests(callable, args_list, callback=None, exc_callback=None):
"""Create several work requests for same callable with different arguments.
Convenience function for creating several work requests for the same
callable where each invocation of the callable receives different values
for its arguments.
args_list contains the parameters for each invocation of callable.
Each item in 'args_list' should be either a 2-item tuple of the list of
positional arguments and a dictionary of keyword arguments or a single,
non-tuple argument.
See docstring for WorkRequest for info on callback and exc_callback.
"""
requests = []
for item in args_list:
if isinstance(item, tuple):
requests.append(
WorkRequest(callable, item[0], item[1], callback=callback,
exc_callback=exc_callback)
)
else:
requests.append(
WorkRequest(callable, [item], None, callback=callback,
exc_callback=exc_callback)
)
return requests
################
# USAGE EXAMPLE
################
if __name__ == '__main__':
import random
import time
# the work the threads will have to do (rather trivial in our example)
def do_something(data):
time.sleep(random.randint(1,5))
result = round(random.random() * data, 5)
# just to show off, we throw an exception once in a while
if result > 3:
raise RuntimeError("Something extraordinary happened!")
return result
# this will be called each time a result is available
def print_result(request, result):
print(f"**Result: {result} from request #{request.requestID}")
# this will be called when an exception occurs within a thread
def handle_exception(request, exc_info):
print("Exception occurred in request #%s: %s" %
(request.requestID, exc_info[1]))
# assemble the arguments for each job to a list...
data = [random.randint(1,10) for i in range(20)]
# ... and build a WorkRequest object for each item in data
requests = makeRequests(do_something, data, print_result, handle_exception)
# or the other form of args_lists accepted by makeRequests: ((,), {})
data = [((random.randint(1,10),), {}) for i in range(20)]
requests.extend(
makeRequests(do_something, data, print_result, handle_exception)
)
# we create a pool of 3 worker threads
main = ThreadPool(3)
# then we put the work requests in the queue...
for req in requests:
main.putRequest(req)
print("Work request #%s added." % req.requestID)
# or shorter:
# [main.putRequest(req) for req in requests]
# ...and wait for the results to arrive in the result queue
# by using ThreadPool.wait(). This would block until results for
# all work requests have arrived:
# main.wait()
# instead we can poll for results while doing something else:
i = 0
while 1:
try:
main.poll()
print("Main thread working...")
time.sleep(0.5)
if i == 10:
print("Adding 3 more worker threads...")
main.createWorkers(3)
i += 1
except KeyboardInterrupt:
print("Interrupted!")
break
except NoResultsPending:
print("All results collected.")
break
| 11,785 | Python | .py | 269 | 35.553903 | 79 | 0.656037 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,192 | random_ua.py | kovidgoyal_calibre/src/calibre/utils/random_ua.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
import json
import random
from calibre.utils.resources import get_path as P
def user_agent_data():
ans = getattr(user_agent_data, 'ans', None)
if ans is None:
ans = user_agent_data.ans = json.loads(
P('user-agent-data.json', data=True, allow_user_override=False))
return ans
def common_english_words():
ans = getattr(common_english_words, 'ans', None)
if ans is None:
ans = common_english_words.ans = tuple(x.strip() for x in P('common-english-words.txt', data=True).decode('utf-8').splitlines())
return ans
def common_user_agents():
return user_agent_data()['common_user_agents']
def common_chrome_user_agents():
for x in user_agent_data()['common_user_agents']:
if 'Chrome/' in x:
yield x
def choose_randomly_by_popularity(ua_list):
pm = user_agents_popularity_map()
weights = None
if pm:
weights = tuple(map(pm.__getitem__, ua_list))
return random.choices(ua_list, weights=weights)[0]
def random_common_chrome_user_agent():
return choose_randomly_by_popularity(tuple(common_chrome_user_agents()))
def user_agents_popularity_map():
return user_agent_data().get('user_agents_popularity', {})
def random_desktop_platform():
return random.choice(user_agent_data()['desktop_platforms'])
def accept_header_for_ua(ua):
# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Content_negotiation/List_of_default_Accept_values
if 'Firefox/' in ua:
return 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8'
return 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
def common_english_word_ua():
words = common_english_words()
w1 = random.choice(words)
w2 = w1
while w2 == w1:
w2 = random.choice(words)
return f'{w1}/{w2}'
| 1,970 | Python | .py | 46 | 37.76087 | 136 | 0.691741 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,193 | draw.py | kovidgoyal_calibre/src/calibre/utils/magick/draw.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from calibre import fit_image
from calibre.utils.img import add_borders_to_image as abti
from calibre.utils.img import image_to_data
from calibre.utils.img import save_cover_data_to as _save_cover_data_to
from calibre.utils.imghdr import identify as _identify
from calibre.utils.magick import Image, create_canvas
def _data_to_image(data):
if isinstance(data, Image):
img = data
else:
img = Image()
img.load(data)
return img
def minify_image(data, minify_to=(1200, 1600), preserve_aspect_ratio=True):
'''
Minify image to specified size if image is bigger than specified
size and return minified image, otherwise, original image is
returned.
:param data: Image data as bytestring or Image object
:param minify_to: A tuple (width, height) to specify target size
:param preserve_aspect_ratio: whether preserve original aspect ratio
'''
img = _data_to_image(data)
owidth, oheight = img.size
nwidth, nheight = minify_to
if owidth <= nwidth and oheight <= nheight:
return img
if preserve_aspect_ratio:
scaled, nwidth, nheight = fit_image(owidth, oheight, nwidth, nheight)
img.size = (nwidth, nheight)
return img
def save_cover_data_to(data, path, bgcolor='#ffffff', resize_to=None,
return_data=False, compression_quality=90, minify_to=None,
grayscale=False):
'''
Saves image in data to path, in the format specified by the path
extension. Removes any transparency. If there is no transparency and no
resize and the input and output image formats are the same, no changes are
made.
:param data: Image data as bytestring or Image object
:param compression_quality: The quality of the image after compression.
Number between 1 and 100. 1 means highest compression, 100 means no
compression (lossless).
:param bgcolor: The color for transparent pixels. Must be specified in hex.
:param resize_to: A tuple (width, height) or None for no resizing
:param minify_to: A tuple (width, height) to specify maximum target size.
:param grayscale: If True, the image is grayscaled
will be resized to fit into this target size. If None the value from the
tweak is used.
'''
fmt = os.path.splitext(path)[1]
if return_data:
path = None
if isinstance(data, Image):
data = data.img
return _save_cover_data_to(
data, path, bgcolor=bgcolor, resize_to=resize_to, compression_quality=compression_quality, minify_to=minify_to, grayscale=grayscale, data_fmt=fmt)
def thumbnail(data, width=120, height=120, bgcolor='#ffffff', fmt='jpg',
preserve_aspect_ratio=True, compression_quality=70):
img = Image()
img.load(data)
owidth, oheight = img.size
if width is None:
width = owidth
if height is None:
height = oheight
if not preserve_aspect_ratio:
scaled = owidth > width or oheight > height
nwidth = width
nheight = height
else:
scaled, nwidth, nheight = fit_image(owidth, oheight, width, height)
if scaled:
img.size = (nwidth, nheight)
canvas = create_canvas(img.size[0], img.size[1], bgcolor)
canvas.compose(img)
data = image_to_data(canvas.img, compression_quality=compression_quality)
return (canvas.size[0], canvas.size[1], data)
def identify_data(data):
'''
Identify the image in data. Returns a 3-tuple
(width, height, format)
or raises an Exception if data is not an image.
'''
fmt, width, height = _identify(data)
return width, height, fmt
def identify(path):
'''
Identify the image at path. Returns a 3-tuple
(width, height, format)
or raises an Exception.
'''
with open(path, 'rb') as f:
fmt, width, height = _identify(f)
return width, height, fmt
def add_borders_to_image(img_data, left=0, top=0, right=0, bottom=0,
border_color='#ffffff', fmt='jpg'):
img = abti(img_data, left=left, top=top, right=right, bottom=bottom, border_color=border_color)
return image_to_data(img, fmt=fmt)
| 4,273 | Python | .py | 104 | 35.673077 | 154 | 0.693105 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,194 | legacy.py | kovidgoyal_calibre/src/calibre/utils/magick/legacy.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import os
from io import BytesIO
from calibre.utils.img import (
add_borders_to_image,
clone_image,
create_canvas,
despeckle_image,
flip_image,
gaussian_blur_image,
gaussian_sharpen_image,
grayscale_image,
image_and_format_from_data,
image_has_transparent_pixels,
image_to_data,
null_image,
overlay_image,
quantize_image,
remove_borders_from_image,
resize_image,
rotate_image,
set_image_opacity,
texture_image,
)
from calibre.utils.imghdr import identify
class PixelWand:
def __init__(self):
self.color = '#ffffff'
class Image:
def __init__(self):
self.read_format = None
self.write_format = None
self.img = null_image()
@property
def clone(self):
ans = Image()
ans.img = clone_image(self.img)
ans.read_format = self.read_format
ans.write_format = self.write_format
return ans
def open(self, path_or_file):
if hasattr(path_or_file, 'read'):
self.load(path_or_file.read())
else:
with open(path_or_file, 'rb') as f:
self.load(f.read())
def load(self, data):
if not data:
raise ValueError('No image data present')
self.img, self.read_format = image_and_format_from_data(data)
read = load
def from_qimage(self, img):
self.img = clone_image(img)
def to_qimage(self):
return clone_image(self.img)
@property
def type(self):
if len(self.img.colorTable()) > 0:
return 'PaletteType'
return 'TrueColorType'
@type.setter
def type(self, t):
if t == 'GrayscaleType':
self.img = grayscale_image(self.img)
elif t == 'PaletteType':
self.img = quantize_image(self.img)
@property
def format(self):
return self.write_format or self.read_format
@format.setter
def format(self, val):
self.write_format = val
@property
def colorspace(self):
return 'RGBColorspace'
@colorspace.setter
def colorspace(self, val):
raise NotImplementedError('Changing image colorspace is not supported')
@property
def size(self):
return self.img.width(), self.img.height()
@size.setter
def size(self, val):
w, h = val[:2]
self.img = resize_image(self.img, w, h)
def save(self, path, format=None):
if format is None:
ext = os.path.splitext(path)[1]
if len(ext) < 2:
raise ValueError('No format specified')
format = ext[1:]
format = format.upper()
with open(path, 'wb') as f:
f.write(self.export(format))
def compose(self, img, left=0, top=0, operation='OverCompositeOp'):
bounds = self.size
if left < 0 or top < 0 or left >= bounds[0] or top >= bounds[1]:
raise ValueError('left and/or top out of bounds')
self.img = overlay_image(img.img, self.img, left=left, top=top)
def rotate(self, background_pixel_wand, degrees):
self.img = rotate_image(self.img, degrees)
def quantize(self, number_colors, colorspace='RGBColorspace', treedepth=0, dither=True, measure_error=False):
self.img = quantize_image(self.img, max_colors=number_colors, dither=dither)
def identify(self, data):
fmt, width, height = identify(data)
return width, height, fmt
def remove_border(self, fuzz=None):
if fuzz is not None and fuzz < 0 or fuzz > 255:
fuzz = None
self.img = remove_borders_from_image(self.img, fuzz)
trim = remove_border
def add_border(self, pixel_wand, dx, dy):
self.img = add_borders_to_image(self.img, left=dx, top=dy, right=dx, bottom=dy, border_color=pixel_wand.color)
def blur(self, radius=-1, sigma=3.0):
self.img = gaussian_blur_image(self.img, radius, sigma)
def copy(self, img):
self.img = clone_image(img.img)
def create_canvas(self, width, height, background_pixel_wand):
self.img = create_canvas(width, height, background_pixel_wand)
def despeckle(self):
self.img = despeckle_image(self.img)
def export(self, fmt='JPEG'):
if fmt.lower() == 'gif':
data = image_to_data(self.img, fmt='PNG', png_compression_level=0)
from PIL import Image
i = Image.open(BytesIO(data))
buf = BytesIO()
i.save(buf, 'gif')
return buf.getvalue()
return image_to_data(self.img, fmt=fmt)
def flip(self, vertical=True):
self.img = flip_image(self.img, horizontal=not vertical, vertical=vertical)
def has_transparent_pixels(self):
return image_has_transparent_pixels(self.img)
def set_border_color(self, *args, **kw):
pass # no-op
def set_compression_quality(self, *args, **kw):
pass # no-op
def set_opacity(self, alpha=0.5):
self.img = set_image_opacity(self.img, alpha)
def set_page(self, *args, **kw):
pass # no-op
def sharpen(self, radius=0, sigma=3):
self.img = gaussian_sharpen_image(self.img, radius, sigma)
def texture(self, img):
self.img = texture_image(self.img, img.img)
def thumbnail(self, width, height):
self.img = resize_image(self.img, width, height)
| 5,480 | Python | .py | 149 | 29.147651 | 118 | 0.623983 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,195 | __init__.py | kovidgoyal_calibre/src/calibre/utils/magick/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.utils.magick.legacy import Image, PixelWand
if False:
PixelWand
def create_canvas(width, height, bgcolor='#ffffff'):
canvas = Image()
canvas.create_canvas(int(width), int(height), str(bgcolor))
return canvas
| 387 | Python | .py | 11 | 32.181818 | 63 | 0.718919 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,196 | lib.py | kovidgoyal_calibre/src/calibre/utils/winreg/lib.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import ctypes
import ctypes.wintypes as types
import datetime
import numbers
import struct
from calibre_extensions import winutil
try:
import winreg
except ImportError:
import _winreg as winreg
# Binding to C library {{{
advapi32 = ctypes.windll.advapi32
HKEY = types.HKEY
PHKEY = ctypes.POINTER(HKEY)
DWORD = types.DWORD
BYTE = types.BYTE
LONG = types.LONG
ULONG = types.ULONG
LPDWORD = ctypes.POINTER(DWORD)
LPBYTE = ctypes.POINTER(BYTE)
LPCWSTR = types.LPCWSTR
LPWSTR = types.LPWSTR
LPCVOID = types.LPCVOID
HKEY_CURRENT_USER = HKCU = HKEY(ULONG(winreg.HKEY_CURRENT_USER).value)
HKEY_CLASSES_ROOT = HKCR = HKEY(ULONG(winreg.HKEY_CLASSES_ROOT).value)
HKEY_LOCAL_MACHINE = HKLM = HKEY(ULONG(winreg.HKEY_LOCAL_MACHINE).value)
KEY_READ = winreg.KEY_READ
KEY_ALL_ACCESS = winreg.KEY_ALL_ACCESS
RRF_RT_ANY = 0x0000ffff
RRF_NOEXPAND = 0x10000000
RRF_ZEROONFAILURE = 0x20000000
class FILETIME(ctypes.Structure):
_fields_ = [("dwLowDateTime", DWORD), ("dwHighDateTime", DWORD)]
def default_errcheck(result, func, args):
if result != getattr(winutil, 'ERROR_SUCCESS', 0): # On shutdown winutil becomes None
raise ctypes.WinError(result)
return args
null = object()
class a:
def __init__(self, name, typ, default=null, in_arg=True):
self.typ = typ
if default is null:
self.spec = ((1 if in_arg else 2), name)
else:
self.spec = ((1 if in_arg else 2), name, default)
def cwrap(name, restype, *args, **kw):
params = (restype,) + tuple(x.typ for x in args)
paramflags = tuple(x.spec for x in args)
func = ctypes.WINFUNCTYPE(*params)((name, kw.get('lib', advapi32)), paramflags)
func.errcheck = kw.get('errcheck', default_errcheck)
return func
RegOpenKey = cwrap(
'RegOpenKeyExW', LONG, a('key', HKEY), a('sub_key', LPCWSTR), a('options', DWORD, 0), a('access', ULONG, KEY_READ), a('result', PHKEY, in_arg=False))
RegCreateKey = cwrap(
'RegCreateKeyExW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, ''), a('reserved', DWORD, 0), a('cls', LPWSTR, None), a('options', DWORD, 0),
a('access', ULONG, KEY_ALL_ACCESS), a('security', ctypes.c_void_p, 0), a('result', PHKEY, in_arg=False), a('disposition', LPDWORD, in_arg=False))
RegCloseKey = cwrap('RegCloseKey', LONG, a('key', HKEY))
def enum_value_errcheck(result, func, args):
if result == winutil.ERROR_SUCCESS:
return args
if result == winutil.ERROR_MORE_DATA:
raise ValueError('buffer too small')
if result == winutil.ERROR_NO_MORE_ITEMS:
raise StopIteration()
raise ctypes.WinError(result)
RegEnumValue = cwrap(
'RegEnumValueW', LONG, a('key', HKEY), a('index', DWORD), a('value_name', LPWSTR), a('value_name_size', LPDWORD), a('reserved', LPDWORD),
a('value_type', LPDWORD), a('data', LPBYTE), a('data_size', LPDWORD), errcheck=enum_value_errcheck)
def last_error_errcheck(result, func, args):
if result == 0:
raise ctypes.WinError()
return args
ExpandEnvironmentStrings = cwrap(
'ExpandEnvironmentStringsW', DWORD, a('src', LPCWSTR), a('dest', LPWSTR), a('size', DWORD), errcheck=last_error_errcheck, lib=ctypes.windll.kernel32)
def expand_environment_strings(src):
buf = ctypes.create_unicode_buffer(32 * 1024)
ExpandEnvironmentStrings(src, buf, len(buf))
return buf.value
def convert_to_registry_data(value, has_expansions=False):
if value is None:
return None, winreg.REG_NONE, 0
if isinstance(value, (str, bytes)):
buf = ctypes.create_unicode_buffer(value)
return buf, (winreg.REG_EXPAND_SZ if has_expansions else winreg.REG_SZ), len(buf) * 2
if isinstance(value, (list, tuple)):
buf = ctypes.create_unicode_buffer('\0'.join(map(str, value)) + '\0\0')
return buf, winreg.REG_MULTI_SZ, len(buf) * 2
if isinstance(value, numbers.Integral):
try:
raw, dtype = struct.pack('L', value), winreg.REG_DWORD
except struct.error:
raw = struct.pack('Q', value), winutil.REG_QWORD
buf = ctypes.create_string_buffer(raw)
return buf, dtype, len(buf)
if isinstance(value, bytes):
buf = ctypes.create_string_buffer(value)
return buf, winreg.REG_BINARY, len(buf)
raise ValueError('Unknown data type: %r' % value)
def convert_registry_data(raw, size, dtype):
if dtype == winreg.REG_NONE:
return None
if dtype == winreg.REG_BINARY:
return ctypes.string_at(raw, size)
if dtype in (winreg.REG_SZ, winreg.REG_EXPAND_SZ, winreg.REG_MULTI_SZ):
ans = ctypes.wstring_at(raw, size // 2).rstrip('\0')
if dtype == winreg.REG_MULTI_SZ:
ans = tuple(ans.split('\0'))
elif dtype == winreg.REG_EXPAND_SZ:
ans = expand_environment_strings(ans)
return ans
if dtype == winreg.REG_DWORD:
if size == 0:
return 0
return ctypes.cast(raw, LPDWORD).contents.value
if dtype == winutil.REG_QWORD:
if size == 0:
return 0
return ctypes.cast(raw, ctypes.POINTER(ctypes.c_uint64)).contents.value
raise ValueError('Unsupported data type: %r' % dtype)
try:
RegSetKeyValue = cwrap(
'RegSetKeyValueW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, None), a('name', LPCWSTR, None),
a('dtype', DWORD, winreg.REG_SZ), a('data', LPCVOID, None), a('size', DWORD))
except Exception:
raise RuntimeError('calibre requires Windows Vista or newer to run, the last version of calibre'
' that could run on Windows XP is version 1.48, available from: http://download.calibre-ebook.com/')
def delete_value_errcheck(result, func, args):
if result == winutil.ERROR_FILE_NOT_FOUND:
return args
if result != 0:
raise ctypes.WinError(result)
return args
RegDeleteKeyValue = cwrap(
'RegDeleteKeyValueW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, None), a('name', LPCWSTR, None), errcheck=delete_value_errcheck)
RegDeleteTree = cwrap(
'RegDeleteTreeW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, None), errcheck=delete_value_errcheck)
RegEnumKeyEx = cwrap(
'RegEnumKeyExW', LONG, a('key', HKEY), a('index', DWORD), a('name', LPWSTR), a('name_size', LPDWORD), a('reserved', LPDWORD, None),
a('cls', LPWSTR, None), a('cls_size', LPDWORD, None), a('last_write_time', ctypes.POINTER(FILETIME), in_arg=False),
errcheck=enum_value_errcheck)
def get_value_errcheck(result, func, args):
if result == winutil.ERROR_SUCCESS:
return args
if result == winutil.ERROR_MORE_DATA:
raise ValueError('buffer too small')
if result == winutil.ERROR_FILE_NOT_FOUND:
raise KeyError('No such value found')
raise ctypes.WinError(result)
RegGetValue = cwrap(
'RegGetValueW', LONG, a('key', HKEY), a('sub_key', LPCWSTR, None), a('value_name', LPCWSTR, None), a('flags', DWORD, RRF_RT_ANY),
a('data_type', LPDWORD, 0), a('data', ctypes.c_void_p, 0), a('size', LPDWORD, 0), errcheck=get_value_errcheck
)
RegLoadMUIString = cwrap(
'RegLoadMUIStringW', LONG, a('key', HKEY), a('value_name', LPCWSTR, None), a('data', LPWSTR, None), a('buf_size', DWORD, 0),
a('size', LPDWORD, 0), a('flags', DWORD, 0), a('directory', LPCWSTR, None), errcheck=get_value_errcheck
)
def filetime_to_datettime(ft):
timestamp = ft.dwHighDateTime
timestamp <<= 32
timestamp |= ft.dwLowDateTime
return datetime.datetime(1601, 1, 1, 0, 0, 0) + datetime.timedelta(microseconds=timestamp/10)
# }}}
class Key:
def __init__(self, create_at=None, open_at=None, root=HKEY_CURRENT_USER, open_mode=KEY_READ):
root = getattr(root, 'hkey', root)
self.was_created = False
if create_at is not None:
self.hkey, self.was_created = RegCreateKey(root, create_at)
elif open_at is not None:
self.hkey = RegOpenKey(root, open_at, 0, open_mode)
else:
self.hkey = HKEY()
def get(self, value_name=None, default=None, sub_key=None):
data_buf = ctypes.create_string_buffer(1024)
len_data_buf = DWORD(len(data_buf))
data_type = DWORD(0)
while True:
len_data_buf.value = len(data_buf)
try:
RegGetValue(self.hkey, sub_key, value_name, RRF_RT_ANY | RRF_NOEXPAND | RRF_ZEROONFAILURE,
ctypes.byref(data_type), data_buf, len_data_buf)
break
except ValueError:
data_buf = ctypes.create_string_buffer(2 * len(data_buf))
except KeyError:
return default
return convert_registry_data(data_buf, len_data_buf.value, data_type.value)
def get_mui_string(self, value_name=None, default=None, directory=None, fallback=True):
data_buf = ctypes.create_unicode_buffer(1024)
len_data_buf = DWORD(len(data_buf))
size = DWORD(0)
while True:
len_data_buf.value = len(data_buf)
try:
RegLoadMUIString(self.hkey, value_name, data_buf, 2 * len(data_buf), ctypes.byref(size), 0, directory)
break
except ValueError:
data_buf = ctypes.create_unicode_buffer(max(2 * len(data_buf), size.value // 2))
except KeyError:
return default
except OSError as err:
if fallback and err.winerror in (winutil.ERROR_BAD_COMMAND, winutil.ERROR_INVALID_DATA):
return self.get(value_name=value_name, default=default)
raise
return data_buf.value
def iterkeynames(self, get_last_write_times=False):
' Iterate over the names of all keys in this key '
name_buf = ctypes.create_unicode_buffer(1024)
lname_buf = DWORD(len(name_buf))
i = 0
while True:
lname_buf.value = len(name_buf)
try:
file_time = RegEnumKeyEx(self.hkey, i, name_buf, ctypes.byref(lname_buf))
except ValueError:
raise RuntimeError('Enumerating keys failed with buffer too small, which should never happen')
except StopIteration:
break
if get_last_write_times:
yield name_buf.value[:lname_buf.value], filetime_to_datettime(file_time)
else:
yield name_buf.value[:lname_buf.value]
i += 1
def delete_value(self, name=None, sub_key=None):
' Delete the named value from this key. If name is None the default value is deleted. If name does not exist, not error is reported. '
RegDeleteKeyValue(self.hkey, sub_key, name)
def delete_tree(self, sub_key=None):
''' Delete this all children of this key. Note that a key is not
actually deleted till the last handle to it is closed. Also if you
specify a sub_key, then the sub-key is deleted completely. If sub_key
does not exist, no error is reported.'''
RegDeleteTree(self.hkey, sub_key)
def set(self, name=None, value=None, sub_key=None, has_expansions=False):
''' Set a value for this key (with optional sub-key). If name is None,
the Default value is set. value can be an integer, a string, bytes or a list
of strings. If you want to use expansions, set has_expansions=True. '''
value, dtype, size = convert_to_registry_data(value, has_expansions=has_expansions)
RegSetKeyValue(self.hkey, sub_key, name, dtype, value, size)
def set_default_value(self, sub_key=None, value=None, has_expansions=False):
self.set(sub_key=sub_key, value=value, has_expansions=has_expansions)
def sub_key(self, path, allow_create=True, open_mode=KEY_READ):
' Create (or open) a sub-key at the specified relative path. When opening instead of creating, use open_mode '
if allow_create:
return Key(create_at=path, root=self.hkey)
return Key(open_at=path, root=self.hkey)
def itervalues(self, get_data=False, sub_key=None):
'''Iterate over all values in this key (or optionally the specified
sub-key. If get_data is True also return the data for every value,
otherwise, just the name.'''
key = self.hkey
if sub_key is not None:
try:
key = RegOpenKey(key, sub_key)
except OSError:
return
try:
name_buf = ctypes.create_unicode_buffer(16385)
lname_buf = DWORD(len(name_buf))
if get_data:
data_buf = (BYTE * 1024)()
ldata_buf = DWORD(len(data_buf))
vtype = DWORD(0)
i = 0
while True:
lname_buf.value = len(name_buf)
if get_data:
ldata_buf.value = len(data_buf)
try:
RegEnumValue(
key, i, name_buf, ctypes.byref(lname_buf), None, ctypes.byref(vtype), data_buf, ctypes.byref(ldata_buf))
except ValueError:
data_buf = (BYTE * ldata_buf.value)()
continue
except StopIteration:
break
data = convert_registry_data(data_buf, ldata_buf.value, vtype.value)
yield name_buf.value[:lname_buf.value], data
else:
try:
RegEnumValue(
key, i, name_buf, ctypes.byref(lname_buf), None, None, None, None)
except StopIteration:
break
yield name_buf.value[:lname_buf.value]
i += 1
finally:
if sub_key is not None:
RegCloseKey(key)
values = itervalues
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __nonzero__(self):
return bool(self.hkey)
def close(self):
if not getattr(self, 'hkey', None):
return
if RegCloseKey is None or HKEY is None:
return # globals become None during exit
RegCloseKey(self.hkey)
self.hkey = HKEY()
def __del__(self):
self.close()
if __name__ == '__main__':
from pprint import pprint
k = Key(open_at=r'Software\RegisteredApplications', root=HKLM)
pprint(tuple(k.values(get_data=True)))
k = Key(r'Software\calibre\winregtest')
k.set('Moose.Cat.1')
k.set('unicode test', 'fällen粗楷体简a\U0001f471')
k.set('none test')
k.set_default_value(r'other\key', '%PATH%', has_expansions=True)
pprint(tuple(k.values(get_data=True)))
pprint(k.get('unicode test'))
k.set_default_value(r'delete\me\please', 'xxx')
pprint(tuple(k.iterkeynames(get_last_write_times=True)))
k.delete_tree('delete')
pprint(tuple(k.iterkeynames(get_last_write_times=True)))
| 15,032 | Python | .py | 322 | 38.10559 | 153 | 0.628579 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,197 | default_programs.py | kovidgoyal_calibre/src/calibre/utils/winreg/default_programs.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import sys
import time
import traceback
from threading import Thread
from calibre import guess_type, prints
from calibre.constants import DEBUG, __version__, isfrozen, isportable
from calibre.utils.localization import _
from calibre.utils.lock import singleinstance
from calibre.utils.winreg.lib import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, Key
from calibre_extensions import winutil
from polyglot.builtins import iteritems, itervalues
# See https://msdn.microsoft.com/en-us/library/windows/desktop/cc144154(v=vs.85).aspx
def default_programs():
return {
'calibre.exe': {
'icon_id':'main_icon',
'description': _('The main calibre program, used to manage your collection of e-books'),
'capability_name': 'calibre64bit',
'name': 'calibre 64-bit',
'assoc_name': 'calibre64bit',
},
'ebook-edit.exe': {
'icon_id':'editor_icon',
'description': _('The calibre E-book editor. It can be used to edit common e-book formats.'),
'capability_name': 'Editor64bit',
'name': 'calibre Editor 64-bit',
'assoc_name': 'calibreEditor64bit',
},
'ebook-viewer.exe': {
'icon_id':'viewer_icon',
'description': _('The calibre E-book viewer. It can view most known e-book formats.'),
'capability_name': 'Viewer64bit',
'name': 'calibre Viewer 64-bit',
'assoc_name': 'calibreViewer64bit',
},
}
def extensions(basename):
if basename == 'calibre.exe':
from calibre.ebooks import BOOK_EXTENSIONS
# We remove rar and zip as they interfere with 7-zip associations
# https://www.mobileread.com/forums/showthread.php?t=256459
return set(BOOK_EXTENSIONS) - {'rar', 'zip'}
if basename == 'ebook-viewer.exe':
from calibre.customize.ui import all_input_formats
return set(all_input_formats())
if basename == 'ebook-edit.exe':
from calibre.ebooks.oeb.polish.import_book import IMPORTABLE
from calibre.ebooks.oeb.polish.main import SUPPORTED
return SUPPORTED | IMPORTABLE
class NotAllowed(ValueError):
pass
def check_allowed():
if not isfrozen:
raise NotAllowed('Not allowed to create associations for non-frozen installs')
if isportable:
raise NotAllowed('Not allowed to create associations for portable installs')
if sys.getwindowsversion()[:2] < (6, 2):
raise NotAllowed('Not allowed to create associations for windows versions older than Windows 8')
if 'CALIBRE_NO_DEFAULT_PROGRAMS' in os.environ:
raise NotAllowed('Disabled by the CALIBRE_NO_DEFAULT_PROGRAMS environment variable')
def create_prog_id(ext, prog_id, ext_map, exe):
with Key(r'Software\Classes\%s' % prog_id) as key:
type_name = _('%s Document') % ext.upper()
key.set(value=type_name)
key.set('FriendlyTypeName', type_name)
key.set('PerceivedType', 'Document')
key.set(sub_key='DefaultIcon', value=exe+',0')
key.set_default_value(r'shell\open\command', '"%s" "%%1"' % exe)
# contrary to the msdn docs, this key prevents calibre programs
# from appearing in the initial open with list, see
# https://www.mobileread.com/forums/showthread.php?t=313668
# key.set('AllowSilentDefaultTakeOver')
with Key(r'Software\Classes\.%s\OpenWithProgIDs' % ext) as key:
key.set(prog_id)
def progid_name(assoc_name, ext):
return f'{assoc_name}.AssocFile.{ext.upper()}'
def cap_path(data):
return r'Software\calibre\%s\Capabilities' % data['capability_name']
def register():
base = os.path.dirname(sys.executable)
for program, data in iteritems(default_programs()):
data = data.copy()
exe = os.path.join(base, program)
capabilities_path = cap_path(data)
ext_map = {ext.lower():guess_type('file.' + ext.lower())[0] for ext in extensions(program)}
ext_map = {ext:mt for ext, mt in iteritems(ext_map) if mt}
prog_id_map = {ext:progid_name(data['assoc_name'], ext) for ext in ext_map}
with Key(capabilities_path) as key:
for k, v in iteritems({'ApplicationDescription':'description', 'ApplicationName':'name'}):
key.set(k, data[v])
key.set('ApplicationIcon', '%s,0' % exe)
key.set_default_value(r'shell\open\command', '"%s" "%%1"' % exe)
with Key('FileAssociations', root=key) as fak, Key('MimeAssociations', root=key) as mak:
# previous_associations = set(fak.values())
for ext, prog_id in iteritems(prog_id_map):
mt = ext_map[ext]
fak.set('.' + ext, prog_id)
mak.set(mt, prog_id)
for ext, prog_id in iteritems(prog_id_map):
create_prog_id(ext, prog_id, ext_map, exe)
with Key(r'Software\RegisteredApplications') as key:
key.set(data['name'], capabilities_path)
winutil.notify_associations_changed()
def unregister():
for program, data in iteritems(default_programs()):
capabilities_path = cap_path(data).rpartition('\\')[0]
ext_map = {ext.lower():guess_type('file.' + ext.lower())[0] for ext in extensions(program)}
ext_map = {ext:mt for ext, mt in iteritems(ext_map) if mt}
prog_id_map = {ext:progid_name(data['assoc_name'], ext) for ext in ext_map}
with Key(r'Software\RegisteredApplications') as key:
key.delete_value(data['name'])
parent, sk = capabilities_path.rpartition('\\')[0::2]
with Key(parent) as key:
key.delete_tree(sk)
for ext, prog_id in iteritems(prog_id_map):
with Key(r'Software\Classes\.%s\OpenWithProgIDs' % ext) as key:
key.delete_value(prog_id)
with Key(r'Software\Classes') as key:
key.delete_tree(prog_id)
class Register(Thread):
daemon = True
def __init__(self, prefs):
Thread.__init__(self, name='RegisterDP')
self.prefs = prefs
self.start()
def run(self):
try:
self.do_register()
except Exception:
traceback.print_exc()
def do_register(self):
try:
check_allowed()
except NotAllowed:
return
if singleinstance('register_default_programs'):
if self.prefs.get('windows_register_default_programs', None) != __version__:
self.prefs['windows_register_default_programs'] = __version__
if DEBUG:
st = time.monotonic()
prints('Registering with default programs...')
register()
if DEBUG:
prints('Registered with default programs in %.1f seconds' % (time.monotonic() - st))
def __enter__(self):
return self
def __exit__(self, *args):
# Give the thread some time to finish in case the user quit the
# application very quickly
self.join(4.0)
def get_prog_id_map(base, key_path):
desc, ans = None, {}
try:
k = Key(open_at=key_path, root=base)
except OSError as err:
if err.winerror == winutil.ERROR_FILE_NOT_FOUND:
return desc, ans
raise
with k:
desc = k.get_mui_string('ApplicationDescription')
if desc is None:
return desc, ans
for ext, prog_id in k.values(sub_key='FileAssociations', get_data=True):
ans[ext[1:].lower()] = prog_id
return desc, ans
def get_open_data(base, prog_id):
try:
k = Key(open_at=r'Software\Classes\%s' % prog_id, root=base)
except OSError as err:
if err.winerror == winutil.ERROR_FILE_NOT_FOUND:
return None, None, None
with k:
cmd = k.get(sub_key=r'shell\open\command')
if cmd:
parts = cmd.split()
if parts[-1] == '/dde' and '%1' not in cmd:
cmd = ' '.join(parts[:-1]) + ' "%1"'
return cmd, k.get(sub_key='DefaultIcon'), k.get_mui_string('FriendlyTypeName') or k.get()
def split_commandline(commandline):
# CommandLineToArgvW returns path to executable if called with empty string.
if not commandline.strip():
return []
return list(winutil.parse_cmdline(commandline))
def friendly_app_name(prog_id=None, exe=None):
try:
return winutil.friendly_name(prog_id, exe)
except Exception:
traceback.print_exc()
def find_programs(extensions):
extensions = frozenset(extensions)
ans = []
seen_prog_ids, seen_cmdlines = set(), set()
# Search for programs registered using Default Programs that claim they are
# capable of handling the specified extensions.
for base in (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE):
try:
k = Key(open_at=r'Software\RegisteredApplications', root=base)
except OSError as err:
if err.winerror == winutil.ERROR_FILE_NOT_FOUND:
continue
raise
with k:
for name, key_path in k.values(get_data=True):
try:
app_desc, prog_id_map = get_prog_id_map(base, key_path)
except Exception:
traceback.print_exc()
continue
for ext in extensions:
prog_id = prog_id_map.get(ext)
if prog_id is not None and prog_id not in seen_prog_ids:
seen_prog_ids.add(prog_id)
cmdline, icon_resource, friendly_name = get_open_data(base, prog_id)
if cmdline and cmdline not in seen_cmdlines:
seen_cmdlines.add(cmdline)
ans.append({'name':app_desc, 'cmdline':cmdline, 'icon_resource':icon_resource})
# Now look for programs that only register with Windows Explorer instead of
# Default Programs (for example, FoxIt PDF reader)
for ext in extensions:
try:
k = Key(open_at=r'Software\Microsoft\Windows\CurrentVersion\Explorer\FileExts\.%s\OpenWithProgIDs' % ext, root=HKEY_CURRENT_USER)
except OSError as err:
if err.winerror == winutil.ERROR_FILE_NOT_FOUND:
continue
for prog_id in itervalues(k):
if prog_id and prog_id not in seen_prog_ids:
seen_prog_ids.add(prog_id)
cmdline, icon_resource, friendly_name = get_open_data(base, prog_id)
if cmdline and cmdline not in seen_cmdlines:
seen_cmdlines.add(cmdline)
exe_name = None
exe = split_commandline(cmdline)
if exe:
exe_name = friendly_app_name(prog_id) or os.path.splitext(os.path.basename(exe[0]))[0]
name = exe_name or friendly_name
if name:
ans.append({'name':name, 'cmdline':cmdline, 'icon_resource':icon_resource})
return ans
if __name__ == '__main__':
from pprint import pprint
pprint(find_programs('docx'.split()))
| 11,318 | Python | .py | 245 | 36.208163 | 141 | 0.610219 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,198 | dde.py | kovidgoyal_calibre/src/calibre/utils/winreg/dde.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import numbers
from ctypes import POINTER, WINFUNCTYPE, byref, c_char_p, c_ulong, c_void_p, windll
from ctypes.wintypes import BOOL, DWORD, LPCWSTR, UINT
from polyglot.builtins import itervalues
HCONV = c_void_p # = DECLARE_HANDLE(HCONV)
HDDEDATA = c_void_p # = DECLARE_HANDLE(HDDEDATA)
HSZ = c_void_p # = DECLARE_HANDLE(HSZ)
LPBYTE = c_char_p # POINTER(BYTE)
LPDWORD = POINTER(DWORD)
LPSTR = c_char_p
ULONG_PTR = c_ulong
DML_ERRORS = {
'ADVACKTIMEOUT': (0x4000, 'A request for a synchronous advise transaction has timed out.'),
'BUSY': (0x4001, 'The response to the transaction caused the DDE_FBUSY flag to be set.'),
'DATAACKTIMEOUT': (0x4002, 'A request for a synchronous data transaction has timed out.'),
'DLL_NOT_INITIALIZED': (0x4003, 'A DDEML function was called without first calling the DdeInitialize function, or an invalid instance identifier was passed to a DDEML function.'), # noqa
'DLL_USAGE': (0x4004, 'An application initialized as APPCLASS_MONITOR has attempted to perform a DDE transaction, or an application initialized as APPCMD_CLIENTONLY has attempted to perform server transactions.'), # noqa
'EXECACKTIMEOUT': (0x4005, 'A request for a synchronous execute transaction has timed out.'),
'INVALIDPARAMETER': (0x4006, 'An invalid transaction identifier was passed to a DDEML function. Once the application has returned from an XTYP_XACT_COMPLETE callback, the transaction identifier for that callback function is no longer valid. A parameter failed to be validated by the DDEML. Some of the possible causes follow: The application used a data handle initialized with a different item name handle than was required by the transaction. The application used a data handle that was initialized with a different clipboard data format than was required by the transaction. The application used a client-side conversation handle with a server-side function or vice versa. The application used a freed data handle or string handle. More than one instance of the application used the same object.'), # noqa
'LOW_MEMORY': (0x4007, 'A DDEML application has created a prolonged race condition (in which the server application outruns the client), causing large amounts of memory to be consumed.'), # noqa
'MEMORY_ERROR': (0x4008, 'A memory allocation has failed.'),
'NO_CONV_ESTABLISHED': (0x400a, 'A client\'s attempt to establish a conversation has failed.'),
'NOTPROCESSED': (0x4009, 'A transaction has failed.'),
'POKEACKTIMEOUT': (0x400b, 'A request for a synchronous poke transaction has timed out.'),
'POSTMSG_FAILED': (0x400c, 'An internal call to the PostMessage function has failed.'),
'REENTRANCY': (0x400d, 'An application instance with a synchronous transaction already in progress attempted to initiate another synchronous transaction, or the DdeEnableCallback function was called from within a DDEML callback function.'), # noqa
'SERVER_DIED': (0x400e, 'A server-side transaction was attempted on a conversation terminated by the client, or the server terminated before completing a transaction.'), # noqa
'SYS_ERROR': (0x400f, 'An internal error has occurred in the DDEML.'),
'UNADVACKTIMEOUT': (0x4010, 'A request to end an advise transaction has timed out.'),
'UNFOUND_QUEUE_ID': (0x4011, 'An invalid transaction identifier was passed to a DDEML function. Once the application has returned from an XTYP_XACT_COMPLETE callback, the transaction identifier for that callback function is no longer valid.'), # noqa
}
DML_ERROR_TEXT = {code:text for (code, text) in itervalues(DML_ERRORS)}
user32 = windll.user32
DDECALLBACK = WINFUNCTYPE(HDDEDATA, UINT, UINT, HCONV, HSZ, HSZ, HDDEDATA, ULONG_PTR, ULONG_PTR)
APPCMD_CLIENTONLY = 0x10
CP_WINUNICODE = 1200
# See windows/ddeml.h for declaration of struct CONVCONTEXT
PCONVCONTEXT = c_void_p
XCLASS_FLAGS = 0x4000
XTYP_EXECUTE = (0x0050 | XCLASS_FLAGS)
class DDEError(ValueError):
pass
def init_errcheck(result, func, args):
if result != 0:
raise DDEError('Failed to initialize DDE client with return code: %x' % result)
return args
def no_errcheck(result, func, args):
return args
def dde_error(instance):
errcode = GetLastError(instance)
raise DDEError(DML_ERRORS.get(errcode, 'Unknown DDE error code: %x' % errcode))
def default_errcheck(result, func, args):
if (isinstance(result, numbers.Integral) and result == 0) or (getattr(result, 'value', False) is None):
dde_error(args[0])
return args
null = object()
class a:
def __init__(self, name, typ, default=null, in_arg=True):
self.typ=typ
if default is null:
self.spec=((1 if in_arg else 2), name)
else:
self.spec=((1 if in_arg else 2), name, default)
def cwrap(name, restype, *args, **kw):
params=(restype,) + tuple(x.typ for x in args)
paramflags=tuple(x.spec for x in args)
func=WINFUNCTYPE(*params)((name, kw.get('lib', user32)), paramflags)
func.errcheck=kw.get('errcheck', default_errcheck)
return func
GetLastError = cwrap('DdeGetLastError', UINT, a('instance', DWORD), errcheck=no_errcheck)
Initialize = cwrap('DdeInitializeW', UINT, a('instance_p', LPDWORD), a('callback', DDECALLBACK), a('command', DWORD),
a('reserved', DWORD, 0), errcheck=init_errcheck)
CreateStringHandle = cwrap('DdeCreateStringHandleW', HSZ, a('instance', DWORD), a('string', LPCWSTR), a('codepage', UINT, CP_WINUNICODE))
Connect = cwrap('DdeConnect', HCONV, a('instance', DWORD), a('service', HSZ), a('topic', HSZ), a('context', PCONVCONTEXT))
FreeStringHandle = cwrap('DdeFreeStringHandle', BOOL, a('instance', DWORD), a('handle', HSZ), errcheck=no_errcheck)
ClientTransaction = cwrap('DdeClientTransaction', HDDEDATA, a('data', LPBYTE), a('size', DWORD), a('conversation', HCONV), a('item', HSZ),
a('fmt', UINT, 0), a('type', UINT, XTYP_EXECUTE), a('timeout', DWORD, 5000), a('result', LPDWORD, LPDWORD()), errcheck=no_errcheck)
FreeDataHandle = cwrap('DdeFreeDataHandle', BOOL, a('data', HDDEDATA), errcheck=no_errcheck)
Disconnect = cwrap('DdeDisconnect', BOOL, a('conversation', HCONV), errcheck=no_errcheck)
Uninitialize = cwrap('DdeUninitialize', BOOL, a('instance', DWORD), errcheck=no_errcheck)
def send_dde_command(service, topic, command):
instance = DWORD(0)
def cb(*args):
pass # ignore callbacks
callback = DDECALLBACK(cb)
Initialize(byref(instance), callback, APPCMD_CLIENTONLY, 0)
hservice = CreateStringHandle(instance, service)
htopic = CreateStringHandle(instance, topic)
conversation = Connect(instance, hservice, htopic, PCONVCONTEXT())
FreeStringHandle(instance, hservice)
FreeStringHandle(instance, htopic)
data = c_char_p(command)
sz = DWORD(len(command) + 1)
res = ClientTransaction(data, sz, conversation, HSZ())
if res == 0:
dde_error(instance)
FreeDataHandle(res)
Disconnect(conversation)
Uninitialize(instance)
if __name__ == '__main__':
send_dde_command('WinWord', 'System', '[REM_DDE_Direct][FileOpen("C:/cygwin64/home/kovid/demo.docx")]')
| 7,311 | Python | .py | 104 | 65.721154 | 818 | 0.726104 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,199 | pool.py | kovidgoyal_calibre/src/calibre/utils/ipc/pool.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import sys
from collections import namedtuple
from multiprocessing.connection import Pipe
from threading import Thread
from calibre import as_unicode, detect_ncpus, prints
from calibre.constants import DEBUG, iswindows
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils import join_with_timeout
from calibre.utils.ipc import eintr_retry_call
from calibre.utils.serialize import pickle_dumps, pickle_loads
from polyglot.builtins import iteritems, itervalues
from polyglot.queue import Queue
Job = namedtuple('Job', 'id module func args kwargs')
Result = namedtuple('Result', 'value err traceback')
WorkerResult = namedtuple('WorkerResult', 'id result is_terminal_failure worker')
TerminalFailure = namedtuple('TerminalFailure', 'message tb job_id')
File = namedtuple('File', 'name')
MAX_SIZE = 30 * 1024 * 1024 # max size of data to send over the connection (old versions of windows cannot handle arbitrary data lengths)
worker_kwargs = {'stdout':None}
get_stdout_from_child = False
if iswindows:
# The windows console cannot show output from child processes
# created with CREATE_NO_WINDOW, so the stdout/stderr file handles
# the child process inherits will be broken. Similarly, windows GUI apps
# have no usable stdout/stderr file handles. In both these cases, redirect
# the child's stdout/stderr to NUL. If we are running in calibre-debug -g,
# then redirect to PIPE and read from PIPE and print to our stdout.
# Note that when running via the "Restart in debug mode" action, stdout is
# not a console (its already redirected to a log file), so no redirection
# is required.
if getattr(sys, 'gui_app', False) or getattr(sys.stdout, 'isatty', lambda : False)():
if DEBUG:
# We are running in a windows console with calibre-debug -g
import subprocess
get_stdout_from_child = True
worker_kwargs['stdout'] = subprocess.PIPE
worker_kwargs['stderr'] = subprocess.STDOUT
else:
from calibre.utils.ipc.launch import windows_null_file
worker_kwargs['stdout'] = worker_kwargs['stderr'] = windows_null_file
def get_stdout(process):
import time
while process.poll() is None:
try:
raw = process.stdout.read(1)
if raw:
try:
sys.stdout.buffer.write(raw)
except OSError:
pass
else:
time.sleep(0.1)
except (EOFError, OSError):
break
def start_worker(code, pass_fds, name=''):
from calibre.utils.ipc.simple_worker import start_pipe_worker
if name:
name = '-' + name
p = start_pipe_worker(code, pass_fds=pass_fds, **worker_kwargs)
if get_stdout_from_child:
t = Thread(target=get_stdout, name='PoolWorkerGetStdout' + name, args=(p,))
t.daemon = True
t.start()
return p
class Failure(Exception):
def __init__(self, tf):
Exception.__init__(self, tf.message)
self.details = tf.tb
self.job_id = tf.job_id
self.failure_message = tf.message
class Worker:
def __init__(self, p, conn, events, name):
self.process, self.conn = p, conn
self.events = events
self.name = name or ''
def __call__(self, job):
eintr_retry_call(self.conn.send_bytes, pickle_dumps(job))
if job is not None:
self.job_id = job.id
t = Thread(target=self.recv, name='PoolWorker-'+self.name)
t.daemon = True
t.start()
def recv(self):
try:
result = pickle_loads(eintr_retry_call(self.conn.recv_bytes))
wr = WorkerResult(self.job_id, result, False, self)
except Exception as err:
import traceback
result = Result(None, as_unicode(err), traceback.format_exc())
wr = WorkerResult(self.job_id, result, True, self)
self.events.put(wr)
def set_common_data(self, data):
eintr_retry_call(self.conn.send_bytes, data)
class Pool(Thread):
daemon = True
def __init__(self, max_workers=None, name=None):
Thread.__init__(self, name=name)
self.max_workers = max_workers or detect_ncpus()
self.available_workers = []
self.busy_workers = {}
self.pending_jobs = []
self.events = Queue()
self.results = Queue()
self.tracker = Queue()
self.terminal_failure = None
self.common_data = pickle_dumps(None)
self.shutting_down = False
self.start()
def set_common_data(self, data=None):
''' Set some data that will be passed to all subsequent jobs without
needing to be transmitted every time. You must call this method before
queueing any jobs, otherwise the behavior is undefined. You can call it
after all jobs are done, then it will be used for the new round of
jobs. Can raise the :class:`Failure` exception is data could not be
sent to workers.'''
if self.failed:
raise Failure(self.terminal_failure)
self.events.put(data)
def __call__(self, job_id, module, func, *args, **kwargs):
'''
Schedule a job. The job will be run in a worker process, with the
result placed in self.results. If a terminal failure has occurred
previously, this method will raise the :class:`Failure` exception.
:param job_id: A unique id for the job. The result will have this id.
:param module: Either a fully qualified python module name or python
source code which will be executed as a module.
Source code is detected by the presence of newlines in module.
:param func: Name of the function from ``module`` that will be
executed. ``args`` and ``kwargs`` will be passed to the function.
'''
if self.failed:
raise Failure(self.terminal_failure)
job = Job(job_id, module, func, args, kwargs)
self.tracker.put(None)
self.events.put(job)
def wait_for_tasks(self, timeout=None):
''' Wait for all queued jobs to be completed, if timeout is not None,
will raise a RuntimeError if jobs are not completed in the specified
time. Will raise a :class:`Failure` exception if a terminal failure has
occurred previously. '''
if self.failed:
raise Failure(self.terminal_failure)
if timeout is None:
self.tracker.join()
else:
join_with_timeout(self.tracker, timeout)
def shutdown(self, wait_time=0.1):
''' Shutdown this pool, terminating all worker process. The pool cannot
be used after a shutdown. '''
self.shutting_down = True
self.events.put(None)
self.shutdown_workers(wait_time=wait_time)
def create_worker(self):
a, b = Pipe()
with a:
cmd = 'from {0} import run_main, {1}; run_main({2!r}, {1})'.format(
self.__class__.__module__, 'worker_main', a.fileno())
p = start_worker(cmd, (a.fileno(),))
sys.stdout.flush()
p.stdin.close()
w = Worker(p, b, self.events, self.name)
if self.common_data != pickle_dumps(None):
w.set_common_data(self.common_data)
return w
def start_worker(self):
try:
w = self.create_worker()
if not self.shutting_down:
self.available_workers.append(w)
except Exception:
import traceback
self.terminal_failure = TerminalFailure('Failed to start worker process', traceback.format_exc(), None)
self.terminal_error()
return False
def run(self):
if self.start_worker() is False:
return
while True:
event = self.events.get()
if event is None or self.shutting_down:
break
if self.handle_event(event) is False:
break
def handle_event(self, event):
if isinstance(event, Job):
job = event
if not self.available_workers:
if len(self.busy_workers) >= self.max_workers:
self.pending_jobs.append(job)
return
if self.start_worker() is False:
return False
return self.run_job(job)
elif isinstance(event, WorkerResult):
worker_result = event
self.busy_workers.pop(worker_result.worker, None)
self.available_workers.append(worker_result.worker)
self.tracker.task_done()
if worker_result.is_terminal_failure:
self.terminal_failure = TerminalFailure('Worker process crashed while executing job', worker_result.result.traceback, worker_result.id)
self.terminal_error()
return False
self.results.put(worker_result)
else:
self.common_data = pickle_dumps(event)
if len(self.common_data) > MAX_SIZE:
self.cd_file = PersistentTemporaryFile('pool_common_data')
with self.cd_file as f:
f.write(self.common_data)
self.common_data = pickle_dumps(File(f.name))
for worker in self.available_workers:
try:
worker.set_common_data(self.common_data)
except Exception:
import traceback
self.terminal_failure = TerminalFailure('Worker process crashed while sending common data', traceback.format_exc(), None)
self.terminal_error()
return False
while self.pending_jobs and self.available_workers:
if self.run_job(self.pending_jobs.pop()) is False:
return False
def run_job(self, job):
worker = self.available_workers.pop()
try:
worker(job)
except Exception:
import traceback
self.terminal_failure = TerminalFailure('Worker process crashed while sending job', traceback.format_exc(), job.id)
self.terminal_error()
return False
self.busy_workers[worker] = job
@property
def failed(self):
return self.terminal_failure is not None
def terminal_error(self):
if self.shutting_down:
return
for worker, job in iteritems(self.busy_workers):
self.results.put(WorkerResult(job.id, Result(None, None, None), True, worker))
self.tracker.task_done()
while self.pending_jobs:
job = self.pending_jobs.pop()
self.results.put(WorkerResult(job.id, Result(None, None, None), True, None))
self.tracker.task_done()
self.shutdown()
def shutdown_workers(self, wait_time=0.1):
self.worker_data = self.common_data = None
for worker in self.busy_workers:
if worker.process.poll() is None:
try:
worker.process.terminate()
except OSError:
pass # If the process has already been killed
workers = [w.process for w in self.available_workers + list(self.busy_workers)]
aw = list(self.available_workers)
def join():
for w in aw:
try:
w(None)
except Exception:
pass
for w in workers:
try:
w.wait()
except Exception:
pass
reaper = Thread(target=join, name='ReapPoolWorkers')
reaper.daemon = True
reaper.start()
reaper.join(wait_time)
for w in self.available_workers + list(self.busy_workers):
try:
w.conn.close()
except Exception:
pass
for w in workers:
if w.poll() is None:
try:
w.kill()
except OSError:
pass
del self.available_workers[:]
self.busy_workers.clear()
if hasattr(self, 'cd_file'):
try:
os.remove(self.cd_file.name)
except OSError:
pass
def worker_main(conn):
from importlib import import_module
common_data = None
while True:
try:
job = pickle_loads(eintr_retry_call(conn.recv_bytes))
except EOFError:
break
except KeyboardInterrupt:
break
except Exception:
prints('recv() failed in worker, terminating worker', file=sys.stderr)
import traceback
traceback.print_exc()
return 1
if job is None:
break
if not isinstance(job, Job):
if isinstance(job, File):
with open(job.name, 'rb') as f:
common_data = f.read()
common_data = pickle_loads(common_data)
else:
common_data = job
continue
try:
if '\n' in job.module:
import_module('calibre.customize.ui') # Load plugins
from calibre.utils.ipc.simple_worker import compile_code
mod = compile_code(job.module)
func = mod[job.func]
else:
func = getattr(import_module(job.module), job.func)
if common_data is not None:
job.kwargs['common_data'] = common_data
result = func(*job.args, **job.kwargs)
result = Result(result, None, None)
except Exception as err:
import traceback
result = Result(None, as_unicode(err), traceback.format_exc())
try:
eintr_retry_call(conn.send_bytes, pickle_dumps(result))
except EOFError:
break
except Exception:
prints('send() failed in worker, terminating worker', file=sys.stderr)
import traceback
traceback.print_exc()
return 1
return 0
def run_main(client_fd, func):
if iswindows:
from multiprocessing.connection import PipeConnection as Connection
else:
from multiprocessing.connection import Connection
with Connection(client_fd) as conn:
raise SystemExit(func(conn))
def test_write():
print('Printing to stdout in worker')
def test():
def get_results(pool, ignore_fail=False):
ans = {}
while not p.results.empty():
r = p.results.get()
if not ignore_fail and r.is_terminal_failure:
print(r.result.err)
print(r.result.traceback)
raise SystemExit(1)
ans[r.id] = r.result
return ans
# Test normal execution
p = Pool(name='Test')
expected_results = {}
for i in range(1000):
p(i, 'def x(i):\n return 2*i', 'x', i)
expected_results[i] = 2 * i
p.wait_for_tasks(30)
results = {k:v.value for k, v in iteritems(get_results(p))}
if results != expected_results:
raise SystemExit(f'{expected_results!r} != {results!r}')
p.shutdown(), p.join()
# Test common_data
p = Pool(name='Test')
expected_results = {}
p.start_worker()
p.set_common_data(7)
for i in range(1000):
p(i, 'def x(i, common_data=None):\n return common_data + i', 'x', i)
expected_results[i] = 7 + i
p.wait_for_tasks(30)
results = {k:v.value for k, v in iteritems(get_results(p))}
if results != expected_results:
raise SystemExit(f'{expected_results!r} != {results!r}')
p.shutdown(), p.join()
# Test large common data
p = Pool(name='Test')
data = b'a' * (4 * MAX_SIZE)
p.set_common_data(data)
p(0, 'def x(i, common_data=None):\n return len(common_data)', 'x', 0)
p.wait_for_tasks(30)
results = get_results(p)
if len(data) != results[0].value:
raise SystemExit('Common data was not returned correctly')
p.shutdown(), p.join()
# Test exceptions in jobs
p = Pool(name='Test')
for i in range(1000):
p(i, 'def x(i):\n return 1/0', 'x', i)
p.wait_for_tasks(30)
c = 0
for r in itervalues(get_results(p)):
c += 1
if not r.traceback or 'ZeroDivisionError' not in r.traceback:
raise SystemExit('Unexpected result: %s' % r)
if c != 1000:
raise SystemExit('Incorrect number of results')
p.shutdown(), p.join()
# Test worker crash
p = Pool(name='Test')
for i in range(1000):
try:
p(i, 'def x(i):\n os._exit(1)', 'x', i)
except Failure:
break
try:
p.wait_for_tasks(30)
except Failure:
pass
results = get_results(p, ignore_fail=True)
if not p.failed:
raise SystemExit('No expected terminal failure')
p.shutdown(), p.join()
# Test shutting down with busy workers
p = Pool(name='Test')
for i in range(1000):
p(i, 'import time;\ndef x(i):\n time.sleep(10000)', 'x', i)
p.shutdown(), p.join()
print('Tests all passed!')
| 17,384 | Python | .py | 430 | 30.432558 | 151 | 0.595845 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |