id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27,500 | meta.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/meta.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import collections
import os
import regex
from calibre import isbytestring
from calibre.constants import filesystem_encoding
from calibre.customize.ui import get_file_type_metadata, set_file_type_metadata
from calibre.ebooks.metadata import MetaInformation, string_to_authors
from calibre.ebooks.metadata.opf2 import OPF
from calibre.utils.config import prefs
# The priorities for loading metadata from different file types
# Higher values should be used to update metadata from lower values
METADATA_PRIORITIES = collections.defaultdict(int)
for i, ext in enumerate((
'html', 'htm', 'xhtml', 'xhtm',
'rtf', 'fb2', 'pdf', 'prc', 'odt',
'epub', 'lit', 'lrx', 'lrf', 'mobi',
'azw', 'azw3', 'azw1', 'rb', 'imp', 'snb'
)):
METADATA_PRIORITIES[ext] = i + 1
def path_to_ext(path):
return os.path.splitext(path)[1][1:].lower()
def metadata_from_formats(formats, force_read_metadata=False, pattern=None):
try:
return _metadata_from_formats(formats, force_read_metadata, pattern)
except:
mi = metadata_from_filename(list(iter(formats))[0], pat=pattern)
if not mi.authors:
mi.authors = [_('Unknown')]
return mi
def _metadata_from_formats(formats, force_read_metadata=False, pattern=None):
mi = MetaInformation(None, None)
formats.sort(key=lambda x: METADATA_PRIORITIES[path_to_ext(x)])
extensions = list(map(path_to_ext, formats))
if 'opf' in extensions:
opf = formats[extensions.index('opf')]
mi2 = opf_metadata(opf)
if mi2 is not None and mi2.title:
return mi2
for path, ext in zip(formats, extensions):
with open(path, 'rb') as stream:
try:
newmi = get_metadata(stream, stream_type=ext,
use_libprs_metadata=True,
force_read_metadata=force_read_metadata,
pattern=pattern)
mi.smart_update(newmi)
except Exception:
continue
if getattr(mi, 'application_id', None) is not None:
return mi
if not mi.title:
mi.title = _('Unknown')
if not mi.authors:
mi.authors = [_('Unknown')]
return mi
def get_metadata(stream, stream_type='lrf', use_libprs_metadata=False,
force_read_metadata=False, pattern=None):
pos = 0
if hasattr(stream, 'tell'):
pos = stream.tell()
try:
return _get_metadata(stream, stream_type, use_libprs_metadata,
force_read_metadata, pattern)
finally:
if hasattr(stream, 'seek'):
stream.seek(pos)
def _get_metadata(stream, stream_type, use_libprs_metadata,
force_read_metadata=False, pattern=None):
if stream_type:
stream_type = stream_type.lower()
if stream_type in ('html', 'html', 'xhtml', 'xhtm', 'xml'):
stream_type = 'html'
if stream_type in ('mobi', 'prc', 'azw'):
stream_type = 'mobi'
if stream_type in ('odt', 'ods', 'odp', 'odg', 'odf'):
stream_type = 'odt'
opf = None
if hasattr(stream, 'name'):
c = os.path.splitext(stream.name)[0]+'.opf'
if os.access(c, os.R_OK):
opf = opf_metadata(os.path.abspath(c))
if use_libprs_metadata and getattr(opf, 'application_id', None) is not None:
return opf
name = os.path.basename(getattr(stream, 'name', ''))
# The fallback pattern matches the default filename format produced by calibre
base = metadata_from_filename(name, pat=pattern, fallback_pat=regex.compile(
r'^(?P<title>.+) - (?P<author>[^-]+)$', flags=regex.UNICODE | regex.VERSION1 | regex.FULLCASE))
if not base.authors:
base.authors = [_('Unknown')]
if not base.title:
base.title = _('Unknown')
mi = MetaInformation(None, None)
if force_read_metadata or prefs['read_file_metadata']:
mi = get_file_type_metadata(stream, stream_type)
base.smart_update(mi)
if opf is not None:
base.smart_update(opf)
return base
def set_metadata(stream, mi, stream_type='lrf', report_error=None):
if stream_type:
stream_type = stream_type.lower()
set_file_type_metadata(stream, mi, stream_type, report_error=report_error)
def metadata_from_filename(name, pat=None, fallback_pat=None):
if isbytestring(name):
name = name.decode(filesystem_encoding, 'replace')
name = name.rpartition('.')[0]
mi = MetaInformation(None, None)
if pat is None:
try:
pat = regex.compile(prefs.get('filename_pattern'), flags=regex.UNICODE | regex.VERSION1 | regex.FULLCASE)
except Exception:
try:
pat = regex.compile(prefs.get('filename_pattern'), flags=regex.UNICODE | regex.VERSION0 | regex.FULLCASE)
except Exception:
pat = regex.compile('(?P<title>.+) - (?P<author>[^_]+)', flags=regex.UNICODE | regex.VERSION0 | regex.FULLCASE)
name = name.replace('_', ' ')
match = pat.search(name)
if match is None and fallback_pat is not None:
match = fallback_pat.search(name)
if match is not None:
try:
mi.title = match.group('title')
except IndexError:
pass
try:
au = match.group('author')
aus = string_to_authors(au)
if aus:
mi.authors = aus
if prefs['swap_author_names'] and mi.authors:
def swap(a):
if ',' in a:
parts = a.split(',', 1)
else:
parts = a.split(None, 1)
if len(parts) > 1:
t = parts[-1]
parts = parts[:-1]
parts.insert(0, t)
return ' '.join(parts)
mi.authors = [swap(x) for x in mi.authors]
except (IndexError, ValueError):
pass
try:
mi.series = match.group('series')
except IndexError:
pass
try:
si = match.group('series_index')
mi.series_index = float(si)
except (IndexError, ValueError, TypeError):
pass
try:
si = match.group('isbn')
mi.isbn = si
except (IndexError, ValueError):
pass
try:
publisher = match.group('publisher')
mi.publisher = publisher
except (IndexError, ValueError):
pass
try:
pubdate = match.group('published')
if pubdate:
from calibre.utils.date import parse_only_date
mi.pubdate = parse_only_date(pubdate)
except:
pass
try:
comments = match.group('comments')
mi.comments = comments
except (IndexError, ValueError):
pass
if mi.is_null('title'):
mi.title = name
return mi
def opf_metadata(opfpath):
if hasattr(opfpath, 'read'):
f = opfpath
opfpath = getattr(f, 'name', os.getcwd())
else:
f = open(opfpath, 'rb')
try:
opf = OPF(f, os.path.dirname(opfpath))
if opf.application_id is not None:
mi = opf.to_book_metadata()
if hasattr(opf, 'cover') and opf.cover:
cpath = os.path.join(os.path.dirname(opfpath), opf.cover)
if os.access(cpath, os.R_OK):
fmt = cpath.rpartition('.')[-1]
with open(cpath, 'rb') as f:
data = f.read()
mi.cover_data = (fmt, data)
return mi
except Exception:
import traceback
traceback.print_exc()
pass
def forked_read_metadata(original_path, tdir):
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.ebooks.metadata.worker import run_import_plugins
from calibre.utils.filenames import make_long_path_useable
path = run_import_plugins((original_path,), os.getpid(), tdir)[0]
if path != original_path:
with open(os.path.join(tdir, 'file_changed_by_plugins'), 'w') as f:
f.write(os.path.abspath(path))
with open(make_long_path_useable(path), 'rb') as f:
fmt = os.path.splitext(path)[1][1:].lower()
f.seek(0, 2)
sz = f.tell()
with open(os.path.join(tdir, 'size.txt'), 'wb') as s:
s.write(str(sz).encode('ascii'))
f.seek(0)
mi = get_metadata(f, fmt)
if mi.cover_data and mi.cover_data[1]:
with open(os.path.join(tdir, 'cover.jpg'), 'wb') as f:
f.write(mi.cover_data[1])
mi.cover_data = (None, None)
mi.cover = 'cover.jpg'
opf = metadata_to_opf(mi, default_lang='und')
with open(os.path.join(tdir, 'metadata.opf'), 'wb') as f:
f.write(opf)
| 9,065 | Python | .py | 225 | 30.368889 | 127 | 0.577997 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,501 | xmp.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/xmp.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import copy
import json
import re
import sys
from collections import defaultdict
from itertools import repeat
from lxml import etree
from lxml.builder import ElementMaker
from calibre import prints
from calibre.ebooks.metadata import check_doi, check_isbn, string_to_authors
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.opf2 import dump_dict
from calibre.utils.date import isoformat, now, parse_date
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.builtins import iteritems, string_or_bytes
_xml_declaration = re.compile(r'<\?xml[^<>]+encoding\s*=\s*[\'"](.*?)[\'"][^<>]*>', re.IGNORECASE)
NS_MAP = {
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'dc': 'http://purl.org/dc/elements/1.1/',
'pdf': 'http://ns.adobe.com/pdf/1.3/',
'pdfx': 'http://ns.adobe.com/pdfx/1.3/',
'xmp': 'http://ns.adobe.com/xap/1.0/',
'xmpidq': 'http://ns.adobe.com/xmp/Identifier/qual/1.0/',
'xmpMM': 'http://ns.adobe.com/xap/1.0/mm/',
'xmpRights': 'http://ns.adobe.com/xap/1.0/rights/',
'xmpBJ': 'http://ns.adobe.com/xap/1.0/bj/',
'xmpTPg': 'http://ns.adobe.com/xap/1.0/t/pg/',
'xmpDM': 'http://ns.adobe.com/xmp/1.0/DynamicMedia/',
'prism': 'http://prismstandard.org/namespaces/basic/2.0/',
'crossmark': 'http://crossref.org/crossmark/1.0/',
'xml': 'http://www.w3.org/XML/1998/namespace',
'x': 'adobe:ns:meta/',
'calibre': 'http://calibre-ebook.com/xmp-namespace',
'calibreSI': 'http://calibre-ebook.com/xmp-namespace-series-index',
'calibreCC': 'http://calibre-ebook.com/xmp-namespace-custom-columns',
}
KNOWN_ID_SCHEMES = {'isbn', 'url', 'doi'}
def expand(name):
prefix, name = name.partition(':')[::2]
return f'{{{NS_MAP[prefix]}}}{name}'
xpath_cache = {}
def XPath(expr):
ans = xpath_cache.get(expr, None)
if ans is None:
xpath_cache[expr] = ans = etree.XPath(expr, namespaces=NS_MAP)
return ans
def parse_xmp_packet(raw_bytes):
raw_bytes = raw_bytes.strip()
enc = None
pat = r'''<?xpacket\s+[^>]*?begin\s*=\s*['"]([^'"]*)['"]'''
encodings = ('8', '16-le', '16-be', '32-le', '32-be')
header = raw_bytes[:1024]
emap = {'\ufeff'.encode('utf-'+x):'utf-'+x for x in encodings}
emap[b''] = 'utf-8'
for q in encodings:
m = re.search(pat.encode('utf-'+q), header)
if m is not None:
enc = emap.get(m.group(1), enc)
break
if enc is None:
return safe_xml_fromstring(raw_bytes)
raw = _xml_declaration.sub('', raw_bytes.decode(enc)) # lxml barfs if encoding declaration present in unicode string
return safe_xml_fromstring(raw)
def serialize_xmp_packet(root, encoding='utf-8'):
root.tail = '\n' + '\n'.join(repeat(' '*100, 30)) # Adobe spec recommends inserting padding at the end of the packet
raw_bytes = etree.tostring(root, encoding=encoding, pretty_print=True, with_tail=True, method='xml')
return b'<?xpacket begin="%s" id="W5M0MpCehiHzreSzNTczkc9d"?>\n%s\n<?xpacket end="w"?>' % ('\ufeff'.encode(encoding), raw_bytes)
def read_simple_property(elem):
# A simple property
if elem is not None:
if elem.text:
return elem.text
return elem.get(expand('rdf:resource'), '')
def read_lang_alt(parent):
# A text value with possible alternate values in different languages
items = XPath('descendant::rdf:li[@xml:lang="x-default"]')(parent)
if items:
return items[0]
items = XPath('descendant::rdf:li')(parent)
if items:
return items[0]
def read_sequence(parent):
# A sequence or set of values (assumes simple properties in the sequence)
for item in XPath('descendant::rdf:li')(parent):
yield read_simple_property(item)
def uniq(vals, kmap=lambda x:x):
''' Remove all duplicates from vals, while preserving order. kmap must be a
callable that returns a hashable value for every item in vals '''
vals = vals or ()
lvals = (kmap(x) for x in vals)
seen = set()
seen_add = seen.add
return tuple(x for x, k in zip(vals, lvals) if k not in seen and not seen_add(k))
def multiple_sequences(expr, root):
# Get all values for sequence elements matching expr, ensuring the returned
# list contains distinct non-null elements preserving their order.
ans = []
for item in XPath(expr)(root):
ans += list(read_sequence(item))
return list(filter(None, uniq(ans)))
def first_alt(expr, root):
# The first element matching expr, assumes that the element contains a
# language alternate array
for item in XPath(expr)(root):
q = read_simple_property(read_lang_alt(item))
if q:
return q
def first_simple(expr, root):
# The value for the first occurrence of an element matching expr (assumes
# simple property)
for item in XPath(expr)(root):
q = read_simple_property(item)
if q:
return q
def first_sequence(expr, root):
# The first item in a sequence
for item in XPath(expr)(root):
for ans in read_sequence(item):
return ans
def read_series(root):
for item in XPath('//calibre:series')(root):
val = XPath('descendant::rdf:value')(item)
if val:
series = val[0].text
if series and series.strip():
series_index = 1.0
for si in XPath('descendant::calibreSI:series_index')(item):
try:
series_index = float(si.text)
except (TypeError, ValueError):
continue
else:
break
return series, series_index
return None, None
def read_user_metadata(mi, root):
from calibre.ebooks.metadata.book.json_codec import decode_is_multiple
from calibre.utils.config import from_json
fields = set()
for item in XPath('//calibre:custom_metadata')(root):
for li in XPath('./rdf:Bag/rdf:li')(item):
name = XPath('descendant::calibreCC:name')(li)
if name:
name = name[0].text
if name.startswith('#') and name not in fields:
val = XPath('descendant::rdf:value')(li)
if val:
fm = val[0].text
try:
fm = json.loads(fm, object_hook=from_json)
decode_is_multiple(fm)
mi.set_user_metadata(name, fm)
fields.add(name)
except:
prints('Failed to read user metadata:', name)
import traceback
traceback.print_exc()
def read_xmp_identifers(parent):
''' For example:
<rdf:li rdf:parseType="Resource"><xmpidq:Scheme>URL</xmp:idq><rdf:value>http://foo.com</rdf:value></rdf:li>
or the longer form:
<rdf:li><rdf:Description><xmpidq:Scheme>URL</xmp:idq><rdf:value>http://foo.com</rdf:value></rdf:Description></rdf:li>
'''
for li in XPath('./rdf:Bag/rdf:li')(parent):
is_resource = li.attrib.get(expand('rdf:parseType'), None) == 'Resource'
is_resource = is_resource or (len(li) == 1 and li[0].tag == expand('rdf:Description'))
if not is_resource:
yield None, li.text or ''
value = XPath('descendant::rdf:value')(li)
if not value:
continue
value = value[0].text or ''
scheme = XPath('descendant::xmpidq:Scheme')(li)
if not scheme:
yield None, value
else:
yield scheme[0].text or '', value
def safe_parse_date(raw):
if raw:
try:
return parse_date(raw)
except Exception:
pass
def more_recent(one, two):
if one is None:
return two
if two is None:
return one
try:
return max(one, two)
except Exception:
return one
def metadata_from_xmp_packet(raw_bytes):
root = parse_xmp_packet(raw_bytes)
mi = Metadata(_('Unknown'))
title = first_alt('//dc:title', root)
if title:
if title.startswith(r'\376\377'):
# corrupted XMP packet generated by Nitro PDF. See
# https://bugs.launchpad.net/calibre/+bug/1541981
raise ValueError('Corrupted XMP metadata packet detected, probably generated by Nitro PDF')
mi.title = title
authors = multiple_sequences('//dc:creator', root)
if authors:
mi.authors = [au for aus in authors for au in string_to_authors(aus)]
tags = multiple_sequences('//dc:subject', root) or multiple_sequences('//pdf:Keywords', root)
if tags:
mi.tags = tags
comments = first_alt('//dc:description', root)
if comments:
mi.comments = comments
publishers = multiple_sequences('//dc:publisher', root)
if publishers:
mi.publisher = publishers[0]
try:
pubdate = parse_date(first_sequence('//dc:date', root) or first_simple('//xmp:CreateDate', root), assume_utc=False)
except:
pass
else:
mi.pubdate = pubdate
bkp = first_simple('//xmp:CreatorTool', root)
if bkp:
mi.book_producer = bkp
md = safe_parse_date(first_simple('//xmp:MetadataDate', root))
mod = safe_parse_date(first_simple('//xmp:ModifyDate', root))
fd = more_recent(md, mod)
if fd is not None:
mi.metadata_date = fd
rating = first_simple('//calibre:rating', root)
if rating is not None:
try:
rating = float(rating)
if 0 <= rating <= 10:
mi.rating = rating
except (ValueError, TypeError):
pass
series, series_index = read_series(root)
if series:
mi.series, mi.series_index = series, series_index
for x in ('title_sort', 'author_sort'):
for elem in XPath('//calibre:' + x)(root):
val = read_simple_property(elem)
if val:
setattr(mi, x, val)
break
for x in ('link_maps', 'user_categories'):
val = first_simple('//calibre:'+x, root)
if val:
try:
setattr(mi, x, json.loads(val))
except Exception:
pass
elif x == 'link_maps':
val = first_simple('//calibre:author_link_map', root)
if val:
try:
setattr(mi, x, {'authors': json.loads(val)})
except Exception:
pass
languages = multiple_sequences('//dc:language', root)
if languages:
languages = list(filter(None, map(canonicalize_lang, languages)))
if languages:
mi.languages = languages
identifiers = {}
for xmpid in XPath('//xmp:Identifier')(root):
for scheme, value in read_xmp_identifers(xmpid):
if scheme and value:
identifiers[scheme.lower()] = value
for namespace in ('prism', 'pdfx'):
for scheme in KNOWN_ID_SCHEMES:
if scheme not in identifiers:
val = first_simple(f'//{namespace}:{scheme}', root)
scheme = scheme.lower()
if scheme == 'isbn':
val = check_isbn(val)
elif scheme == 'doi':
val = check_doi(val)
if val:
identifiers[scheme] = val
# Check Dublin Core for recognizable identifier types
for scheme, check_func in iteritems({'doi':check_doi, 'isbn':check_isbn}):
if scheme not in identifiers:
val = check_func(first_simple('//dc:identifier', root))
if val:
identifiers['doi'] = val
if identifiers:
mi.set_identifiers(identifiers)
read_user_metadata(mi, root)
return mi
def consolidate_metadata(info_mi, info):
''' When both the PDF Info dict and XMP metadata are present, prefer the xmp
metadata unless the Info ModDate is never than the XMP MetadataDate. This
is the algorithm recommended by the PDF spec. '''
try:
raw = info['xmp_metadata'].rstrip()
if not raw:
return info_mi
xmp_mi = metadata_from_xmp_packet(raw)
except Exception:
import traceback
traceback.print_exc()
return info_mi
info_title, info_authors, info_tags = info_mi.title or _('Unknown'), list(info_mi.authors or ()), list(info_mi.tags or ())
info_mi.smart_update(xmp_mi, replace_metadata=True)
prefer_info = False
if 'ModDate' in info and hasattr(xmp_mi, 'metadata_date'):
try:
info_date = parse_date(info['ModDate'])
except Exception:
pass
else:
prefer_info = info_date > xmp_mi.metadata_date
if prefer_info:
info_mi.title, info_mi.authors, info_mi.tags = info_title, info_authors, info_tags
else:
# We'll use the xmp tags/authors but fallback to the info ones if the
# xmp does not have tags/authors. smart_update() should have taken care of
# the rest
info_mi.authors, info_mi.tags = (info_authors if xmp_mi.is_null('authors') else xmp_mi.authors), xmp_mi.tags or info_tags
return info_mi
def nsmap(*args):
return {x:NS_MAP[x] for x in args}
def create_simple_property(parent, tag, value):
e = parent.makeelement(expand(tag))
parent.append(e)
e.text = value
def create_alt_property(parent, tag, value):
e = parent.makeelement(expand(tag))
parent.append(e)
alt = e.makeelement(expand('rdf:Alt'))
e.append(alt)
li = alt.makeelement(expand('rdf:li'))
alt.append(li)
li.set(expand('xml:lang'), 'x-default')
li.text = value
def create_sequence_property(parent, tag, val, ordered=True):
e = parent.makeelement(expand(tag))
parent.append(e)
seq = e.makeelement(expand('rdf:' + ('Seq' if ordered else 'Bag')))
e.append(seq)
for x in val:
li = seq.makeelement(expand('rdf:li'))
li.text = x
seq.append(li)
def create_identifiers(xmp, identifiers):
xmpid = xmp.makeelement(expand('xmp:Identifier'))
xmp.append(xmpid)
bag = xmpid.makeelement(expand('rdf:Bag'))
xmpid.append(bag)
for scheme, value in iteritems(identifiers):
li = bag.makeelement(expand('rdf:li'))
li.set(expand('rdf:parseType'), 'Resource')
bag.append(li)
s = li.makeelement(expand('xmpidq:Scheme'))
s.text = scheme
li.append(s)
val = li.makeelement(expand('rdf:value'))
li.append(val)
val.text = value
def create_series(calibre, series, series_index):
s = calibre.makeelement(expand('calibre:series'))
s.set(expand('rdf:parseType'), 'Resource')
calibre.append(s)
val = s.makeelement(expand('rdf:value'))
s.append(val)
val.text = series
try:
series_index = float(series_index)
except (TypeError, ValueError):
series_index = 1.0
si = s.makeelement(expand('calibreSI:series_index'))
si.text = '%.2f' % series_index
s.append(si)
def create_user_metadata(calibre, all_user_metadata):
from calibre.ebooks.metadata.book.json_codec import encode_is_multiple, object_to_unicode
from calibre.utils.config import to_json
s = calibre.makeelement(expand('calibre:custom_metadata'))
calibre.append(s)
bag = s.makeelement(expand('rdf:Bag'))
s.append(bag)
for name, fm in iteritems(all_user_metadata):
try:
fm = copy.copy(fm)
encode_is_multiple(fm)
fm = object_to_unicode(fm)
fm = json.dumps(fm, default=to_json, ensure_ascii=False)
except:
prints('Failed to write user metadata:', name)
import traceback
traceback.print_exc()
continue
li = bag.makeelement(expand('rdf:li'))
li.set(expand('rdf:parseType'), 'Resource')
bag.append(li)
n = li.makeelement(expand('calibreCC:name'))
li.append(n)
n.text = name
val = li.makeelement(expand('rdf:value'))
val.text = fm
li.append(val)
def metadata_to_xmp_packet(mi):
A = ElementMaker(namespace=NS_MAP['x'], nsmap=nsmap('x'))
R = ElementMaker(namespace=NS_MAP['rdf'], nsmap=nsmap('rdf'))
root = A.xmpmeta(R.RDF)
rdf = root[0]
dc = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('dc'))
dc.set(expand('rdf:about'), '')
rdf.append(dc)
for prop, tag in iteritems({'title':'dc:title', 'comments':'dc:description'}):
val = mi.get(prop) or ''
create_alt_property(dc, tag, val)
for prop, (tag, ordered) in iteritems({
'authors':('dc:creator', True), 'tags':('dc:subject', False), 'publisher':('dc:publisher', False),
}):
val = mi.get(prop) or ()
if isinstance(val, string_or_bytes):
val = [val]
create_sequence_property(dc, tag, val, ordered)
if not mi.is_null('pubdate'):
create_sequence_property(dc, 'dc:date', [isoformat(mi.pubdate, as_utc=False)]) # Adobe spec recommends local time
if not mi.is_null('languages'):
langs = list(filter(None, map(lambda x:lang_as_iso639_1(x) or canonicalize_lang(x), mi.languages)))
if langs:
create_sequence_property(dc, 'dc:language', langs, ordered=False)
xmp = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('xmp', 'xmpidq'))
xmp.set(expand('rdf:about'), '')
rdf.append(xmp)
extra_ids = {}
for x in ('prism', 'pdfx'):
p = extra_ids[x] = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap(x))
p.set(expand('rdf:about'), '')
rdf.append(p)
identifiers = mi.get_identifiers()
if identifiers:
create_identifiers(xmp, identifiers)
for scheme, val in iteritems(identifiers):
if scheme in {'isbn', 'doi'}:
for prefix, parent in iteritems(extra_ids):
ie = parent.makeelement(expand('%s:%s'%(prefix, scheme)))
ie.text = val
parent.append(ie)
d = xmp.makeelement(expand('xmp:MetadataDate'))
d.text = isoformat(now(), as_utc=False)
xmp.append(d)
calibre = rdf.makeelement(expand('rdf:Description'), nsmap=nsmap('calibre', 'calibreSI', 'calibreCC'))
calibre.set(expand('rdf:about'), '')
rdf.append(calibre)
if not mi.is_null('rating'):
try:
r = float(mi.rating)
except (TypeError, ValueError):
pass
else:
create_simple_property(calibre, 'calibre:rating', '%g' % r)
if not mi.is_null('series'):
create_series(calibre, mi.series, mi.series_index)
if not mi.is_null('timestamp'):
create_simple_property(calibre, 'calibre:timestamp', isoformat(mi.timestamp, as_utc=False))
for x in ('link_maps', 'user_categories'):
val = getattr(mi, x, None)
if val:
create_simple_property(calibre, 'calibre:'+x, dump_dict(val))
for x in ('title_sort', 'author_sort'):
if not mi.is_null(x):
create_simple_property(calibre, 'calibre:'+x, getattr(mi, x))
all_user_metadata = mi.get_all_user_metadata(True)
if all_user_metadata:
create_user_metadata(calibre, all_user_metadata)
return serialize_xmp_packet(root)
def find_used_namespaces(elem):
def getns(x):
return (x.partition('}')[0][1:] if '}' in x else None)
ans = {getns(x) for x in list(elem.attrib) + [elem.tag]}
for child in elem.iterchildren(etree.Element):
ans |= find_used_namespaces(child)
return ans
def find_preferred_prefix(namespace, elems):
for elem in elems:
ans = {v:k for k, v in iteritems(elem.nsmap)}.get(namespace, None)
if ans is not None:
return ans
return find_preferred_prefix(namespace, elem.iterchildren(etree.Element))
def find_nsmap(elems):
used_namespaces = set()
for elem in elems:
used_namespaces |= find_used_namespaces(elem)
ans = {}
used_namespaces -= {NS_MAP['xml'], NS_MAP['x'], None, NS_MAP['rdf']}
rmap = {v:k for k, v in iteritems(NS_MAP)}
i = 0
for ns in used_namespaces:
if ns in rmap:
ans[rmap[ns]] = ns
else:
pp = find_preferred_prefix(ns, elems)
if pp and pp not in ans:
ans[pp] = ns
else:
i += 1
ans['ns%d' % i] = ns
return ans
def clone_into(parent, elem):
' Clone the element, assuming that all namespace declarations are present in parent '
clone = parent.makeelement(elem.tag)
parent.append(clone)
if elem.text and not elem.text.isspace():
clone.text = elem.text
if elem.tail and not elem.tail.isspace():
clone.tail = elem.tail
clone.attrib.update(elem.attrib)
for child in elem.iterchildren(etree.Element):
clone_into(clone, child)
def merge_xmp_packet(old, new):
''' Merge metadata present in the old packet that is not present in the new
one into the new one. Assumes the new packet was generated by
metadata_to_xmp_packet() '''
old, new = parse_xmp_packet(old), parse_xmp_packet(new)
# As per the adobe spec all metadata items have to be present inside top-level rdf:Description containers
item_xpath = XPath('//rdf:RDF/rdf:Description/*')
# First remove all data fields that metadata_to_xmp_packet() knowns about,
# since either they will have been set or if not present, imply they have
# been cleared
defined_tags = {expand(prefix + ':' + scheme) for prefix in ('prism', 'pdfx') for scheme in KNOWN_ID_SCHEMES}
defined_tags |= {expand('dc:' + x) for x in ('identifier', 'title', 'creator', 'date', 'description', 'language', 'publisher', 'subject')}
defined_tags |= {expand('xmp:' + x) for x in ('MetadataDate', 'Identifier')}
# For redundancy also remove all fields explicitly set in the new packet
defined_tags |= {x.tag for x in item_xpath(new)}
calibrens = '{%s}' % NS_MAP['calibre']
for elem in item_xpath(old):
if elem.tag in defined_tags or (elem.tag and elem.tag.startswith(calibrens)):
elem.getparent().remove(elem)
# Group all items into groups based on their namespaces
groups = defaultdict(list)
for item in item_xpath(new):
ns = item.nsmap[item.prefix]
groups[ns].append(item)
for item in item_xpath(old):
ns = item.nsmap[item.prefix]
groups[ns].append(item)
A = ElementMaker(namespace=NS_MAP['x'], nsmap=nsmap('x'))
R = ElementMaker(namespace=NS_MAP['rdf'], nsmap=nsmap('rdf'))
root = A.xmpmeta(R.RDF)
rdf = root[0]
for namespace in sorted(groups, key=lambda x:{NS_MAP['dc']:'a', NS_MAP['xmp']:'b', NS_MAP['calibre']:'c'}.get(x, 'z'+x)):
items = groups[namespace]
desc = rdf.makeelement(expand('rdf:Description'), nsmap=find_nsmap(items))
desc.set(expand('rdf:about'), '')
rdf.append(desc)
for item in items:
clone_into(desc, item)
return serialize_xmp_packet(root)
if __name__ == '__main__':
from calibre.utils.podofo import get_xmp_metadata
xmp_packet = get_xmp_metadata(sys.argv[-1])
mi = metadata_from_xmp_packet(xmp_packet)
np = metadata_to_xmp_packet(mi)
print(merge_xmp_packet(xmp_packet, np))
| 23,574 | Python | .py | 562 | 33.959075 | 142 | 0.616267 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,502 | opf2.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/opf2.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
lxml based OPF parser.
'''
import copy
import functools
import glob
import io
import json
import os
import re
import sys
import uuid
from contextlib import suppress
from lxml import etree
from calibre import guess_type, prints
from calibre.constants import __appname__, __version__, filesystem_encoding
from calibre.ebooks import escape_xpath_attr
from calibre.ebooks.metadata import MetaInformation, check_isbn, string_to_authors
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.metadata.utils import parse_opf
from calibre.ebooks.metadata.utils import pretty_print_opf as _pretty_print
from calibre.utils.cleantext import clean_ascii_chars, clean_xml_chars
from calibre.utils.config import tweaks
from calibre.utils.date import isoformat, parse_date
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import upper as icu_upper
from calibre.utils.localization import canonicalize_lang, get_lang
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.builtins import iteritems
from polyglot.urllib import unquote, urlparse
pretty_print_opf = False
class PrettyPrint:
def __enter__(self):
global pretty_print_opf
pretty_print_opf = True
def __exit__(self, *args):
global pretty_print_opf
pretty_print_opf = False
pretty_print = PrettyPrint()
class Resource: # {{{
'''
Represents a resource (usually a file on the filesystem or a URL pointing
to the web. Such resources are commonly referred to in OPF files.
They have the interface:
:member:`path`
:member:`mime_type`
:method:`href`
'''
def __init__(self, href_or_path, basedir=os.getcwd(), is_path=True):
self.orig = href_or_path
self._href = None
self._basedir = basedir
self.path = None
self.fragment = ''
try:
self.mime_type = guess_type(href_or_path)[0]
except:
self.mime_type = None
if self.mime_type is None:
self.mime_type = 'application/octet-stream'
if is_path:
path = href_or_path
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(basedir, path))
if isinstance(path, bytes):
path = path.decode(filesystem_encoding)
self.path = path
else:
href_or_path = href_or_path
url = urlparse(href_or_path)
if url[0] not in ('', 'file'):
self._href = href_or_path
else:
pc = url[2]
if isinstance(pc, str):
pc = pc.encode('utf-8')
pc = pc.decode('utf-8')
self.path = os.path.abspath(os.path.join(basedir, pc.replace('/', os.sep)))
self.fragment = url[-1]
def href(self, basedir=None):
'''
Return a URL pointing to this resource. If it is a file on the filesystem
the URL is relative to `basedir`.
`basedir`: If None, the basedir of this resource is used (see :method:`set_basedir`).
If this resource has no basedir, then the current working directory is used as the basedir.
'''
if basedir is None:
if self._basedir:
basedir = self._basedir
else:
basedir = os.getcwd()
if self.path is None:
return self._href
frag = ('#' + self.fragment) if self.fragment else ''
if self.path == basedir:
return frag
try:
rpath = os.path.relpath(self.path, basedir)
except ValueError: # On windows path and basedir could be on different drives
rpath = self.path
if isinstance(rpath, bytes):
rpath = rpath.decode(filesystem_encoding)
return rpath.replace(os.sep, '/')+frag
def set_basedir(self, path):
self._basedir = path
def basedir(self):
return self._basedir
def __repr__(self):
return 'Resource(%s, %s)'%(repr(self.path), repr(self.href()))
# }}}
class ResourceCollection: # {{{
def __init__(self):
self._resources = []
def __iter__(self):
yield from self._resources
def __len__(self):
return len(self._resources)
def __getitem__(self, index):
return self._resources[index]
def __bool__(self):
return len(self._resources) > 0
def __str__(self):
resources = map(repr, self)
return '[%s]'%', '.join(resources)
__unicode__ = __str__
def __repr__(self):
return str(self)
def append(self, resource):
if not isinstance(resource, Resource):
raise ValueError('Can only append objects of type Resource')
self._resources.append(resource)
def remove(self, resource):
self._resources.remove(resource)
def replace(self, start, end, items):
'Same as list[start:end] = items'
self._resources[start:end] = items
@staticmethod
def from_directory_contents(top, topdown=True):
collection = ResourceCollection()
for spec in os.walk(top, topdown=topdown):
path = os.path.abspath(os.path.join(spec[0], spec[1]))
res = Resource.from_path(path)
res.set_basedir(top)
collection.append(res)
return collection
def set_basedir(self, path):
for res in self:
res.set_basedir(path)
# }}}
class ManifestItem(Resource): # {{{
@staticmethod
def from_opf_manifest_item(item, basedir):
href = item.get('href', None)
if href:
res = ManifestItem(href, basedir=basedir, is_path=True)
mt = item.get('media-type', '').strip()
if mt:
res.mime_type = mt
return res
@property
def media_type(self):
return self.mime_type
@media_type.setter
def media_type(self, val):
self.mime_type = val
def __unicode__representation__(self):
return '<item id="%s" href="%s" media-type="%s" />'%(self.id, self.href(), self.media_type)
__str__ = __unicode__representation__
def __repr__(self):
return str(self)
def __getitem__(self, index):
if index == 0:
return self.href()
if index == 1:
return self.media_type
raise IndexError('%d out of bounds.'%index)
# }}}
class Manifest(ResourceCollection): # {{{
def append_from_opf_manifest_item(self, item, dir):
self.append(ManifestItem.from_opf_manifest_item(item, dir))
id = item.get('id', '')
if not id:
id = 'id%d'%self.next_id
self[-1].id = id
self.next_id += 1
@staticmethod
def from_opf_manifest_element(items, dir):
m = Manifest()
for item in items:
try:
m.append_from_opf_manifest_item(item, dir)
except ValueError:
continue
return m
@staticmethod
def from_paths(entries):
'''
`entries`: List of (path, mime-type) If mime-type is None it is autodetected
'''
m = Manifest()
for path, mt in entries:
mi = ManifestItem(path, is_path=True)
if mt:
mi.mime_type = mt
mi.id = 'id%d'%m.next_id
m.next_id += 1
m.append(mi)
return m
def add_item(self, path, mime_type=None):
mi = ManifestItem(path, is_path=True)
if mime_type:
mi.mime_type = mime_type
mi.id = 'id%d'%self.next_id
self.next_id += 1
self.append(mi)
return mi.id
def __init__(self):
ResourceCollection.__init__(self)
self.next_id = 1
def item(self, id):
for i in self:
if i.id == id:
return i
def id_for_path(self, path):
path = os.path.normpath(os.path.abspath(path))
for i in self:
if i.path and os.path.normpath(i.path) == path:
return i.id
def path_for_id(self, id):
for i in self:
if i.id == id:
return i.path
def type_for_id(self, id):
for i in self:
if i.id == id:
return i.mime_type
# }}}
class Spine(ResourceCollection): # {{{
class Item(Resource):
def __init__(self, idfunc, *args, **kwargs):
Resource.__init__(self, *args, **kwargs)
self.is_linear = True
self.id = idfunc(self.path)
self.idref = None
def __repr__(self):
return 'Spine.Item(path=%r, id=%s, is_linear=%s)' % \
(self.path, self.id, self.is_linear)
@staticmethod
def from_opf_spine_element(itemrefs, manifest):
s = Spine(manifest)
seen = set()
path_map = {i.id:i.path for i in s.manifest}
for itemref in itemrefs:
idref = itemref.get('idref', None)
if idref is not None:
path = path_map.get(idref)
if path and path not in seen:
r = Spine.Item(lambda x:idref, path, is_path=True)
r.is_linear = itemref.get('linear', 'yes') == 'yes'
r.idref = idref
s.append(r)
seen.add(path)
return s
@staticmethod
def from_paths(paths, manifest):
s = Spine(manifest)
for path in paths:
try:
s.append(Spine.Item(s.manifest.id_for_path, path, is_path=True))
except:
continue
return s
def __init__(self, manifest):
ResourceCollection.__init__(self)
self.manifest = manifest
def replace(self, start, end, ids):
'''
Replace the items between start (inclusive) and end (not inclusive) with
with the items identified by ids. ids can be a list of any length.
'''
items = []
for id in ids:
path = self.manifest.path_for_id(id)
if path is None:
raise ValueError('id %s not in manifest')
items.append(Spine.Item(lambda x: id, path, is_path=True))
ResourceCollection.replace(start, end, items)
def linear_items(self):
for r in self:
if r.is_linear:
yield r.path
def nonlinear_items(self):
for r in self:
if not r.is_linear:
yield r.path
def items(self):
for i in self:
yield i.path
# }}}
class Guide(ResourceCollection): # {{{
class Reference(Resource):
@staticmethod
def from_opf_resource_item(ref, basedir):
title, href, type = ref.get('title', ''), ref.get('href'), ref.get('type')
res = Guide.Reference(href, basedir, is_path=True)
res.title = title
res.type = type
return res
def __repr__(self):
ans = '<reference type="%s" href="%s" '%(self.type, self.href())
if self.title:
ans += 'title="%s" '%self.title
return ans + '/>'
@staticmethod
def from_opf_guide(references, base_dir=os.getcwd()):
coll = Guide()
for ref in references:
try:
ref = Guide.Reference.from_opf_resource_item(ref, base_dir)
coll.append(ref)
except:
continue
return coll
def set_cover(self, path):
for i in tuple(self):
if 'cover' in i.type.lower():
self.remove(i)
for typ in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'):
self.append(Guide.Reference(path, is_path=True))
self[-1].type = typ
self[-1].title = ''
# }}}
class MetadataField:
def __init__(self, name, is_dc=True, formatter=None, none_is=None,
renderer=lambda x: str(x)):
self.name = name
self.is_dc = is_dc
self.formatter = formatter
self.none_is = none_is
self.renderer = renderer
def __real_get__(self, obj, type=None):
ans = obj.get_metadata_element(self.name)
if ans is None:
return None
ans = obj.get_text(ans)
if ans is None:
return ans
if self.formatter is not None:
try:
ans = self.formatter(ans)
except:
return None
if hasattr(ans, 'strip'):
ans = ans.strip()
return ans
def __get__(self, obj, type=None):
ans = self.__real_get__(obj, type)
if ans is None:
ans = self.none_is
return ans
def __set__(self, obj, val):
elem = obj.get_metadata_element(self.name)
if val is None:
if elem is not None:
elem.getparent().remove(elem)
return
if elem is None:
elem = obj.create_metadata_element(self.name, is_dc=self.is_dc)
obj.set_text(elem, self.renderer(val))
class LinkMapsField:
def __get__(self, obj, type=None):
ans = obj.get_metadata_element('link_maps')
if ans is not None:
ans = obj.get_text(ans)
if ans:
with suppress(Exception):
return json.loads(ans)
ans = obj.get_metadata_element('author_link_map')
if ans is not None:
ans = obj.get_text(ans)
if ans:
with suppress(Exception):
return {'authors': json.loads(ans)}
return {}
def __set__(self, obj, val):
elem = obj.get_metadata_element('author_link_map')
if elem is not None:
elem.getparent().remove(elem)
elem = obj.get_metadata_element('link_maps')
if not val:
if elem is not None:
elem.getparent().remove(elem)
return
if elem is None:
elem = obj.create_metadata_element('link_maps', is_dc=False)
obj.set_text(elem, dump_dict(val))
class TitleSortField(MetadataField):
def __get__(self, obj, type=None):
c = self.__real_get__(obj, type)
if c is None:
matches = obj.title_path(obj.metadata)
if matches:
for match in matches:
ans = match.get('{%s}file-as'%obj.NAMESPACES['opf'], None)
if not ans:
ans = match.get('file-as', None)
if ans:
c = ans
if not c:
c = self.none_is
else:
c = c.strip()
return c
def __set__(self, obj, val):
MetadataField.__set__(self, obj, val)
matches = obj.title_path(obj.metadata)
if matches:
for match in matches:
for attr in list(match.attrib):
if attr.endswith('file-as'):
del match.attrib[attr]
def serialize_user_metadata(metadata_elem, all_user_metadata, tail='\n'+(' '*8)):
from calibre.ebooks.metadata.book.json_codec import encode_is_multiple, object_to_unicode
from calibre.utils.config import to_json
for name, fm in all_user_metadata.items():
try:
fm = copy.copy(fm)
if (fm.get('datatype', 'text') == 'composite' and
not fm.get('display', {}).get('composite_store_template_value_in_opf', True)):
fm['#value#'] = ''
encode_is_multiple(fm)
fm = object_to_unicode(fm)
fm = json.dumps(fm, default=to_json, ensure_ascii=False)
except:
prints('Failed to write user metadata:', name)
import traceback
traceback.print_exc()
continue
meta = metadata_elem.makeelement('meta')
meta.set('name', 'calibre:user_metadata:'+name)
meta.set('content', fm)
meta.tail = tail
metadata_elem.append(meta)
def serialize_annotations(metadata_elem, annotations, tail='\n'+(' '*8)):
for item in annotations:
data = json.dumps(item, ensure_ascii=False)
if isinstance(data, bytes):
data = data.decode('utf-8')
meta = metadata_elem.makeelement('meta')
meta.set('name', 'calibre:annotation')
meta.set('content', data)
meta.tail = tail
metadata_elem.append(meta)
def dump_dict(cats):
if not cats:
cats = {}
from calibre.ebooks.metadata.book.json_codec import object_to_unicode
return json.dumps(object_to_unicode(cats), ensure_ascii=False,
skipkeys=True)
class OPF: # {{{
MIMETYPE = 'application/oebps-package+xml'
NAMESPACES = {
None: "http://www.idpf.org/2007/opf",
'dc': "http://purl.org/dc/elements/1.1/",
'opf': "http://www.idpf.org/2007/opf",
}
META = '{%s}meta' % NAMESPACES['opf']
xpn = NAMESPACES.copy()
xpn.pop(None)
xpn['re'] = 'http://exslt.org/regular-expressions'
XPath = functools.partial(etree.XPath, namespaces=xpn)
CONTENT = XPath('self::*[re:match(name(), "meta$", "i")]/@content')
TEXT = XPath('string()')
metadata_path = XPath('descendant::*[re:match(name(), "metadata", "i")]')
metadata_elem_path = XPath(
'descendant::*[re:match(name(), concat($name, "$"), "i") or (re:match(name(), "meta$", "i") '
'and re:match(@name, concat("^calibre:", $name, "$"), "i"))]')
title_path = XPath('descendant::*[re:match(name(), "title", "i")]')
authors_path = XPath('descendant::*[re:match(name(), "creator", "i") and (@role="aut" or @opf:role="aut" or (not(@role) and not(@opf:role)))]')
editors_path = XPath('descendant::*[re:match(name(), "creator", "i") and (@role="edt" or @opf:role="edt")]')
bkp_path = XPath('descendant::*[re:match(name(), "contributor", "i") and (@role="bkp" or @opf:role="bkp")]')
tags_path = XPath('descendant::*[re:match(name(), "subject", "i")]')
isbn_path = XPath('descendant::*[re:match(name(), "identifier", "i") and '
'(re:match(@scheme, "isbn", "i") or re:match(@opf:scheme, "isbn", "i"))]')
pubdate_path = XPath('descendant::*[re:match(name(), "date", "i")]')
raster_cover_path = XPath('descendant::*[re:match(name(), "meta", "i") and '
're:match(@name, "cover", "i") and @content]')
guide_cover_path = XPath('descendant::*[local-name()="guide"]/*[local-name()="reference" and re:match(@type, "cover", "i")]/@href')
identifier_path = XPath('descendant::*[re:match(name(), "identifier", "i")]')
application_id_path = XPath('descendant::*[re:match(name(), "identifier", "i") and '
'(re:match(@opf:scheme, "calibre|libprs500", "i") or re:match(@scheme, "calibre|libprs500", "i"))]')
uuid_id_path = XPath('descendant::*[re:match(name(), "identifier", "i") and '
'(re:match(@opf:scheme, "uuid", "i") or re:match(@scheme, "uuid", "i"))]')
languages_path = XPath('descendant::*[local-name()="language"]')
manifest_path = XPath('descendant::*[re:match(name(), "manifest", "i")]/*[re:match(name(), "item", "i")]')
manifest_ppath = XPath('descendant::*[re:match(name(), "manifest", "i")]')
spine_path = XPath('descendant::*[re:match(name(), "spine", "i")]/*[re:match(name(), "itemref", "i")]')
guide_path = XPath('descendant::*[re:match(name(), "guide", "i")]/*[re:match(name(), "reference", "i")]')
publisher = MetadataField('publisher')
comments = MetadataField('description')
category = MetadataField('type')
rights = MetadataField('rights')
series = MetadataField('series', is_dc=False)
if tweaks['use_series_auto_increment_tweak_when_importing']:
series_index = MetadataField('series_index', is_dc=False,
formatter=float, none_is=None)
else:
series_index = MetadataField('series_index', is_dc=False,
formatter=float, none_is=1)
title_sort = TitleSortField('title_sort', is_dc=False)
rating = MetadataField('rating', is_dc=False, formatter=float)
publication_type = MetadataField('publication_type', is_dc=False)
timestamp = MetadataField('timestamp', is_dc=False,
formatter=parse_date, renderer=isoformat)
user_categories = MetadataField('user_categories', is_dc=False,
formatter=json.loads,
renderer=dump_dict)
link_maps = LinkMapsField()
def __init__(self, stream, basedir=os.getcwd(), unquote_urls=True,
populate_spine=True, try_to_guess_cover=False, preparsed_opf=None, read_toc=True):
self.try_to_guess_cover = try_to_guess_cover
self.basedir = self.base_dir = basedir
self.path_to_html_toc = self.html_toc_fragment = None
self.root = parse_opf(stream) if preparsed_opf is None else preparsed_opf
try:
self.package_version = float(self.root.get('version', None))
except (AttributeError, TypeError, ValueError):
self.package_version = 0
self.metadata = self.metadata_path(self.root)
if not self.metadata:
self.metadata = [self.root.makeelement('{http://www.idpf.org/2007/opf}metadata')]
self.root.insert(0, self.metadata[0])
self.metadata[0].tail = '\n'
self.metadata = self.metadata[0]
if unquote_urls:
self.unquote_urls()
self.manifest = Manifest()
m = self.manifest_path(self.root)
if m:
self.manifest = Manifest.from_opf_manifest_element(m, basedir)
self.spine = None
s = self.spine_path(self.root)
if populate_spine and s:
self.spine = Spine.from_opf_spine_element(s, self.manifest)
self.guide = None
guide = self.guide_path(self.root)
self.guide = Guide.from_opf_guide(guide, basedir) if guide else None
self.cover_data = (None, None)
if read_toc:
self.find_toc()
else:
self.toc = None
self.read_user_metadata()
def read_user_metadata(self):
self._user_metadata_ = {}
temp = Metadata('x', ['x'])
from calibre.ebooks.metadata.book.json_codec import decode_is_multiple
from calibre.utils.config import from_json
elems = self.root.xpath('//*[name() = "meta" and starts-with(@name,'
'"calibre:user_metadata:") and @content]')
for elem in elems:
name = elem.get('name')
name = ':'.join(name.split(':')[2:])
if not name or not name.startswith('#'):
continue
fm = elem.get('content')
try:
fm = json.loads(fm, object_hook=from_json)
decode_is_multiple(fm)
temp.set_user_metadata(name, fm)
except:
prints('Failed to read user metadata:', name)
import traceback
traceback.print_exc()
continue
self._user_metadata_ = temp.get_all_user_metadata(True)
def to_book_metadata(self):
if self.package_version >= 3.0:
from calibre.ebooks.metadata.opf3 import read_metadata
return read_metadata(self.root)
# avoid deepcopy of non-metadata items
manifest, spine, guide, toc = self.manifest, self.spine, self.guide, self.toc
self.manifest = self.spine = self.guide = self.toc = None
try:
ans = MetaInformation(self)
finally:
self.manifest, self.spine, self.guide, self.toc = manifest, spine, guide, toc
for n, v in self._user_metadata_.items():
ans.set_user_metadata(n, v)
ans.set_identifiers(self.get_identifiers())
ans.link_maps = self.link_maps
ans.cover = self.cover # needed because we nuke the guide while creating ans
return ans
def read_annotations(self):
for elem in self.root.xpath('//*[name() = "meta" and @name = "calibre:annotation" and @content]'):
try:
yield json.loads(elem.get('content'))
except Exception:
pass
def write_user_metadata(self):
elems = self.root.xpath('//*[name() = "meta" and starts-with(@name,'
'"calibre:user_metadata:") and @content]')
for elem in elems:
elem.getparent().remove(elem)
serialize_user_metadata(self.metadata,
self._user_metadata_)
def find_toc(self):
self.toc = None
try:
spine = self.XPath('descendant::*[re:match(name(), "spine", "i")]')(self.root)
toc = None
if spine:
spine = spine[0]
toc = spine.get('toc', None)
if toc is None and self.guide:
for item in self.guide:
if item.type and item.type.lower() == 'toc':
toc = item.path
if toc is None:
for item in self.manifest:
if 'toc' in item.href().lower():
toc = item.path
if toc is None:
return
self.toc = TOC(base_path=self.base_dir)
is_ncx = getattr(self, 'manifest', None) is not None and \
self.manifest.type_for_id(toc) is not None and \
'dtbncx' in self.manifest.type_for_id(toc)
if is_ncx or toc.lower() in ('ncx', 'ncxtoc'):
path = self.manifest.path_for_id(toc)
if path:
self.toc.read_ncx_toc(path)
else:
f = glob.glob(os.path.join(self.base_dir, '*.ncx'))
if f:
self.toc.read_ncx_toc(f[0])
else:
self.path_to_html_toc, self.html_toc_fragment = \
toc.partition('#')[0], toc.partition('#')[-1]
if not os.access(self.path_to_html_toc, os.R_OK) or \
not os.path.isfile(self.path_to_html_toc):
self.path_to_html_toc = None
self.toc.read_html_toc(toc)
except:
pass
def get_text(self, elem):
return ''.join(self.CONTENT(elem) or self.TEXT(elem))
def set_text(self, elem, content):
if elem.tag == self.META:
elem.attrib['content'] = content
else:
elem.text = content
def itermanifest(self):
return self.manifest_path(self.root)
def create_manifest_item(self, href, media_type, append=False):
ids = {i.get('id', None) for i in self.itermanifest()}
manifest_id = 'id1'
c = 1
while manifest_id in ids:
c += 1
manifest_id = 'id%d'%c
if not media_type:
media_type = 'application/xhtml+xml'
ans = etree.Element('{%s}item'%self.NAMESPACES['opf'],
attrib={'id':manifest_id, 'href':href, 'media-type':media_type})
ans.tail = '\n\t\t'
if append:
manifest = self.manifest_ppath(self.root)[0]
manifest.append(ans)
return ans
def replace_manifest_item(self, item, items):
items = [self.create_manifest_item(*i) for i in items]
for i, item2 in enumerate(items):
item2.set('id', item.get('id')+'.%d'%(i+1))
manifest = item.getparent()
index = manifest.index(item)
manifest[index:index+1] = items
return [i.get('id') for i in items]
def iterspine(self):
return self.spine_path(self.root)
def spine_items(self):
for item in self.iterspine():
idref = item.get('idref', '')
for x in self.itermanifest():
if x.get('id', None) == idref:
yield x.get('href', '')
def first_spine_item(self):
items = self.iterspine()
if not items:
return None
idref = items[0].get('idref', '')
for x in self.itermanifest():
if x.get('id', None) == idref:
return x.get('href', None)
def create_spine_item(self, idref):
ans = etree.Element('{%s}itemref'%self.NAMESPACES['opf'], idref=idref)
ans.tail = '\n\t\t'
return ans
def replace_spine_items_by_idref(self, idref, new_idrefs):
items = list(map(self.create_spine_item, new_idrefs))
spine = self.XPath('/opf:package/*[re:match(name(), "spine", "i")]')(self.root)[0]
old = [i for i in self.iterspine() if i.get('idref', None) == idref]
for x in old:
i = spine.index(x)
spine[i:i+1] = items
def create_guide_element(self):
e = etree.SubElement(self.root, '{%s}guide'%self.NAMESPACES['opf'])
e.text = '\n '
e.tail = '\n'
return e
def remove_guide(self):
self.guide = None
for g in self.root.xpath('./*[re:match(name(), "guide", "i")]', namespaces={'re':'http://exslt.org/regular-expressions'}):
self.root.remove(g)
def create_guide_item(self, type, title, href):
e = etree.Element('{%s}reference'%self.NAMESPACES['opf'],
type=type, title=title, href=href)
e.tail='\n'
return e
def add_guide_item(self, type, title, href):
g = self.root.xpath('./*[re:match(name(), "guide", "i")]', namespaces={'re':'http://exslt.org/regular-expressions'})[0]
g.append(self.create_guide_item(type, title, href))
def iterguide(self):
return self.guide_path(self.root)
def unquote_urls(self):
def get_href(item):
raw = unquote(item.get('href', ''))
if not isinstance(raw, str):
raw = raw.decode('utf-8')
return raw
for item in self.itermanifest():
item.set('href', get_href(item))
for item in self.iterguide():
item.set('href', get_href(item))
@property
def title(self):
# TODO: Add support for EPUB 3 refinements
for elem in self.title_path(self.metadata):
title = self.get_text(elem)
if title and title.strip():
return re.sub(r'\s+', ' ', title.strip())
@title.setter
def title(self, val):
val = (val or '').strip()
titles = self.title_path(self.metadata)
if self.package_version < 3:
# EPUB 3 allows multiple title elements containing sub-titles,
# series and other things. We all loooove EPUB 3.
for title in titles:
title.getparent().remove(title)
titles = ()
if val:
title = titles[0] if titles else self.create_metadata_element('title')
title.text = re.sub(r'\s+', ' ', str(val))
@property
def authors(self):
ans = []
for elem in self.authors_path(self.metadata):
ans.extend(string_to_authors(self.get_text(elem)))
if not ans:
for elem in self.editors_path(self.metadata):
ans.extend(string_to_authors(self.get_text(elem)))
return ans
@authors.setter
def authors(self, val):
remove = list(self.authors_path(self.metadata)) or list(self.editors_path(self.metadata))
for elem in remove:
elem.getparent().remove(elem)
# Ensure new author element is at the top of the list
# for broken implementations that always use the first
# <dc:creator> element with no attention to the role
for author in reversed(val):
elem = self.metadata.makeelement('{%s}creator'%
self.NAMESPACES['dc'], nsmap=self.NAMESPACES)
elem.tail = '\n'
self.metadata.insert(0, elem)
elem.set('{%s}role'%self.NAMESPACES['opf'], 'aut')
self.set_text(elem, author.strip())
@property
def author_sort(self):
matches = self.authors_path(self.metadata) or self.editors_path(self.metadata)
if matches:
for match in matches:
ans = match.get('{%s}file-as'%self.NAMESPACES['opf']) or match.get('file-as')
if ans:
return ans
@author_sort.setter
def author_sort(self, val):
matches = self.authors_path(self.metadata) or self.editors_path(self.metadata)
if matches:
for key in matches[0].attrib:
if key.endswith('file-as'):
matches[0].attrib.pop(key)
matches[0].set('{%s}file-as'%self.NAMESPACES['opf'], str(val))
@property
def tags(self):
ans = []
for tag in self.tags_path(self.metadata):
text = self.get_text(tag)
if text and text.strip():
ans.extend([x.strip() for x in text.split(',')])
return ans
@tags.setter
def tags(self, val):
for tag in list(self.tags_path(self.metadata)):
tag.getparent().remove(tag)
for tag in val:
elem = self.create_metadata_element('subject')
self.set_text(elem, str(tag))
@property
def pubdate(self):
ans = None
for match in self.pubdate_path(self.metadata):
try:
val = parse_date(etree.tostring(match, encoding='unicode',
method='text', with_tail=False).strip())
except:
continue
if ans is None or val < ans:
ans = val
return ans
@pubdate.setter
def pubdate(self, val):
least_val = least_elem = None
for match in self.pubdate_path(self.metadata):
try:
cval = parse_date(etree.tostring(match, encoding='unicode',
method='text', with_tail=False).strip())
except:
match.getparent().remove(match)
else:
if not val:
match.getparent().remove(match)
if least_val is None or cval < least_val:
least_val, least_elem = cval, match
if val:
if least_val is None:
least_elem = self.create_metadata_element('date')
least_elem.attrib.clear()
least_elem.text = isoformat(val)
@property
def isbn(self):
for match in self.isbn_path(self.metadata):
return self.get_text(match) or None
@isbn.setter
def isbn(self, val):
uuid_id = None
for attr in self.root.attrib:
if attr.endswith('unique-identifier'):
uuid_id = self.root.attrib[attr]
break
matches = self.isbn_path(self.metadata)
if not val:
for x in matches:
xid = x.get('id', None)
is_package_identifier = uuid_id is not None and uuid_id == xid
if is_package_identifier:
self.set_text(x, str(uuid.uuid4()))
for attr in x.attrib:
if attr.endswith('scheme'):
x.attrib[attr] = 'uuid'
else:
x.getparent().remove(x)
return
if not matches:
attrib = {'{%s}scheme'%self.NAMESPACES['opf']: 'ISBN'}
matches = [self.create_metadata_element('identifier',
attrib=attrib)]
self.set_text(matches[0], str(val))
def get_identifiers(self):
identifiers = {}
schemeless = []
for x in self.XPath(
'descendant::*[local-name() = "identifier" and text()]')(
self.metadata):
found_scheme = False
for attr, val in iteritems(x.attrib):
if attr.endswith('scheme'):
typ = icu_lower(val)
val = etree.tostring(x, with_tail=False, encoding='unicode',
method='text').strip()
if val and typ not in ('calibre', 'uuid'):
if typ == 'isbn' and val.lower().startswith('urn:isbn:'):
val = val[len('urn:isbn:'):]
identifiers[typ] = val
found_scheme = True
break
if not found_scheme:
val = etree.tostring(x, with_tail=False, encoding='unicode',
method='text').strip()
if val.lower().startswith('urn:isbn:'):
val = check_isbn(val.split(':')[-1])
if val is not None:
identifiers['isbn'] = val
else:
schemeless.append(val)
if schemeless and 'isbn' not in identifiers:
for val in schemeless:
if check_isbn(val, simple_sanitize=True) is not None:
identifiers['isbn'] = check_isbn(val)
break
return identifiers
def set_identifiers(self, identifiers):
identifiers = identifiers.copy()
uuid_id = None
for attr in self.root.attrib:
if attr.endswith('unique-identifier'):
uuid_id = self.root.attrib[attr]
break
for x in self.XPath(
'descendant::*[local-name() = "identifier"]')(
self.metadata):
xid = x.get('id', None)
is_package_identifier = uuid_id is not None and uuid_id == xid
typ = {val.lower() for attr, val in iteritems(x.attrib) if attr.endswith('scheme')}
if is_package_identifier:
typ = tuple(typ)
if typ and typ[0] in identifiers:
self.set_text(x, identifiers.pop(typ[0]))
continue
if typ and not (typ & {'calibre', 'uuid'}):
x.getparent().remove(x)
for typ, val in iteritems(identifiers):
attrib = {'{%s}scheme'%self.NAMESPACES['opf']: typ.upper()}
self.set_text(self.create_metadata_element(
'identifier', attrib=attrib), str(val))
@property
def application_id(self):
for match in self.application_id_path(self.metadata):
return self.get_text(match) or None
@application_id.setter
def application_id(self, val):
removed_ids = set()
for x in tuple(self.application_id_path(self.metadata)):
removed_ids.add(x.get('id', None))
x.getparent().remove(x)
uuid_id = None
for attr in self.root.attrib:
if attr.endswith('unique-identifier'):
uuid_id = self.root.attrib[attr]
break
attrib = {'{%s}scheme'%self.NAMESPACES['opf']: 'calibre'}
if uuid_id and uuid_id in removed_ids:
attrib['id'] = uuid_id
self.set_text(self.create_metadata_element(
'identifier', attrib=attrib), str(val))
@property
def uuid(self):
for match in self.uuid_id_path(self.metadata):
return self.get_text(match) or None
@uuid.setter
def uuid(self, val):
matches = self.uuid_id_path(self.metadata)
if not matches:
attrib = {'{%s}scheme'%self.NAMESPACES['opf']: 'uuid'}
matches = [self.create_metadata_element('identifier',
attrib=attrib)]
self.set_text(matches[0], str(val))
@property
def language(self):
ans = self.languages
if ans:
return ans[0]
@language.setter
def language(self, val):
self.languages = [val]
@property
def languages(self):
ans = []
for match in self.languages_path(self.metadata):
t = self.get_text(match)
if t and t.strip():
l = canonicalize_lang(t.strip())
if l:
ans.append(l)
return ans
@languages.setter
def languages(self, val):
matches = self.languages_path(self.metadata)
for x in matches:
x.getparent().remove(x)
num_done = 0
for lang in val:
l = self.create_metadata_element('language')
self.set_text(l, str(lang))
num_done += 1
if num_done == 0:
l = self.create_metadata_element('language')
self.set_text(l, 'und')
@property
def raw_languages(self):
for match in self.languages_path(self.metadata):
t = self.get_text(match)
if t and t.strip():
yield t.strip()
@property
def book_producer(self):
for match in self.bkp_path(self.metadata):
return self.get_text(match) or None
@book_producer.setter
def book_producer(self, val):
matches = self.bkp_path(self.metadata)
if not matches:
matches = [self.create_metadata_element('contributor')]
matches[0].set('{%s}role'%self.NAMESPACES['opf'], 'bkp')
self.set_text(matches[0], str(val))
def identifier_iter(self):
yield from self.identifier_path(self.metadata)
@property
def raw_unique_identifier(self):
uuid_elem = None
for attr in self.root.attrib:
if attr.endswith('unique-identifier'):
uuid_elem = self.root.attrib[attr]
break
if uuid_elem:
matches = self.root.xpath('//*[@id=%s]'%escape_xpath_attr(uuid_elem))
if matches:
for m in matches:
raw = m.text
if raw:
return raw
@property
def unique_identifier(self):
raw = self.raw_unique_identifier
if raw:
return raw.rpartition(':')[-1]
@property
def page_progression_direction(self):
spine = self.XPath('descendant::*[re:match(name(), "spine", "i")][1]')(self.root)
if spine:
for k, v in iteritems(spine[0].attrib):
if k == 'page-progression-direction' or k.endswith('}page-progression-direction'):
return v
@property
def primary_writing_mode(self):
for m in self.XPath('//*[local-name()="meta" and @name="primary-writing-mode" and @content]')(self.root):
return m.get('content')
@property
def epub3_raster_cover(self):
for item in self.itermanifest():
props = set((item.get('properties') or '').lower().split())
if 'cover-image' in props:
mt = item.get('media-type', '')
if mt and 'xml' not in mt and 'html' not in mt:
return item.get('href', None)
@property
def raster_cover(self):
covers = self.raster_cover_path(self.metadata)
if covers:
cover_id = covers[0].get('content')
for item in self.itermanifest():
if item.get('id', None) == cover_id:
mt = item.get('media-type', '')
if mt and 'xml' not in mt and 'html' not in mt:
return item.get('href', None)
for item in self.itermanifest():
if item.get('href', None) == cover_id:
mt = item.get('media-type', '')
if mt and 'xml' not in mt and 'html' not in mt:
return item.get('href', None)
elif self.package_version >= 3.0:
return self.epub3_raster_cover
@property
def guide_raster_cover(self):
covers = self.guide_cover_path(self.root)
if covers:
mt_map = {i.get('href'):i for i in self.itermanifest()}
for href in covers:
if href:
i = mt_map.get(href)
if i is not None:
iid, mt = i.get('id'), i.get('media-type')
if iid and mt and mt.lower() in {'image/png', 'image/jpeg', 'image/jpg', 'image/gif'}:
return i
@property
def epub3_nav(self):
if self.package_version >= 3.0:
for item in self.itermanifest():
props = (item.get('properties') or '').lower().split()
if 'nav' in props:
mt = item.get('media-type') or ''
if 'html' in mt.lower():
mid = item.get('id')
if mid:
path = self.manifest.path_for_id(mid)
if path and os.path.exists(path):
return path
@property
def cover(self):
if self.guide is not None:
for t in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'):
for item in self.guide:
if item.type and item.type.lower() == t:
return item.path
@cover.setter
def cover(self, path):
if self.guide is not None:
self.guide.set_cover(path)
for item in list(self.iterguide()):
if 'cover' in item.get('type', ''):
item.getparent().remove(item)
else:
g = self.create_guide_element()
self.guide = Guide()
self.guide.set_cover(path)
etree.SubElement(g, 'opf:reference', nsmap=self.NAMESPACES,
attrib={'type':'cover', 'href':self.guide[-1].href()})
id = self.manifest.id_for_path(self.cover)
if id is None:
for t in ('cover', 'other.ms-coverimage-standard', 'other.ms-coverimage'):
for item in self.guide:
if item.type.lower() == t:
self.create_manifest_item(item.href(), guess_type(path)[0])
def get_metadata_element(self, name):
matches = self.metadata_elem_path(self.metadata, name=name)
if matches:
return matches[-1]
def create_metadata_element(self, name, attrib=None, is_dc=True):
if is_dc:
name = '{{{}}}{}'.format(self.NAMESPACES['dc'], name)
else:
attrib = attrib or {}
attrib['name'] = 'calibre:' + name
name = '{{{}}}{}'.format(self.NAMESPACES['opf'], 'meta')
nsmap = dict(self.NAMESPACES)
del nsmap['opf']
elem = etree.SubElement(self.metadata, name, attrib=attrib,
nsmap=nsmap)
elem.tail = '\n'
return elem
def render(self, encoding='utf-8'):
for meta in self.raster_cover_path(self.metadata):
# Ensure that the name attribute occurs before the content
# attribute. Needed for Nooks.
a = meta.attrib
c = a.get('content', None)
if c is not None:
del a['content']
a['content'] = c
# The PocketBook requires calibre:series_index to come after
# calibre:series or it fails to read series info
# We swap attributes instead of elements, as that avoids namespace
# re-declarations
smap = {}
for child in self.metadata.xpath('./*[@name="calibre:series" or @name="calibre:series_index"]'):
smap[child.get('name')] = (child, self.metadata.index(child))
if len(smap) == 2 and smap['calibre:series'][1] > smap['calibre:series_index'][1]:
s, si = smap['calibre:series'][0], smap['calibre:series_index'][0]
def swap(attr):
t = s.get(attr, '')
s.set(attr, si.get(attr, '')), si.set(attr, t)
swap('name'), swap('content')
self.write_user_metadata()
if pretty_print_opf:
_pretty_print(self.root)
raw = etree.tostring(self.root, encoding=encoding, pretty_print=True)
if not raw.lstrip().startswith(b'<?xml '):
raw = ('<?xml version="1.0" encoding="%s"?>\n'%encoding.upper()).encode('ascii') + raw
return raw
def smart_update(self, mi, replace_metadata=False, apply_null=False):
for attr in ('title', 'authors', 'author_sort', 'title_sort',
'publisher', 'series', 'series_index', 'rating',
'isbn', 'tags', 'category', 'comments', 'book_producer',
'pubdate', 'user_categories', 'link_maps'):
val = getattr(mi, attr, None)
if attr == 'rating' and val:
val = float(val)
is_null = val is None or val in ((), [], (None, None), {}) or (attr == 'rating' and (not val or val < 0.1))
if is_null:
if apply_null and attr in {'series', 'tags', 'isbn', 'comments', 'publisher', 'rating'}:
setattr(self, attr, ([] if attr == 'tags' else None))
else:
setattr(self, attr, val)
langs = getattr(mi, 'languages', [])
if langs == ['und']:
langs = []
if apply_null or langs:
self.languages = langs or []
temp = self.to_book_metadata()
temp.remove_stale_user_metadata(mi)
temp.smart_update(mi, replace_metadata=replace_metadata)
if not replace_metadata and callable(getattr(temp, 'custom_field_keys', None)):
# We have to replace non-null fields regardless of the value of
# replace_metadata to match the behavior of the builtin fields
# above.
for x in temp.custom_field_keys():
meta = temp.get_user_metadata(x, make_copy=True)
if meta is None:
continue
if meta['datatype'] == 'text' and meta['is_multiple']:
val = mi.get(x, [])
if val or apply_null:
temp.set(x, val)
elif meta['datatype'] in {'int', 'float', 'bool'}:
missing = object()
val = mi.get(x, missing)
if val is missing:
if apply_null:
temp.set(x, None)
elif apply_null or val is not None:
temp.set(x, val)
elif apply_null and mi.is_null(x) and not temp.is_null(x):
temp.set(x, None)
self._user_metadata_ = temp.get_all_user_metadata(True)
# }}}
class OPFCreator(Metadata):
def __init__(self, base_path, other):
'''
Initialize.
@param base_path: An absolute path to the folder in which this OPF file
will eventually be. This is used by the L{create_manifest} method
to convert paths to files into relative paths.
'''
Metadata.__init__(self, title='', other=other)
self.base_path = os.path.abspath(base_path)
self.page_progression_direction = None
self.primary_writing_mode = None
if self.application_id is None:
self.application_id = str(uuid.uuid4())
if not isinstance(self.toc, TOC):
self.toc = None
if not self.authors:
self.authors = [_('Unknown')]
if self.guide is None:
self.guide = Guide()
if self.cover:
self.guide.set_cover(self.cover)
def create_manifest(self, entries):
'''
Create <manifest>
`entries`: List of (path, mime-type) If mime-type is None it is autodetected
'''
entries = list(map(lambda x: x if os.path.isabs(x[0]) else
(os.path.abspath(os.path.join(self.base_path, x[0])), x[1]),
entries))
self.manifest = Manifest.from_paths(entries)
self.manifest.set_basedir(self.base_path)
def create_manifest_from_files_in(self, files_and_dirs,
exclude=lambda x:False):
entries = []
def dodir(dir):
for spec in os.walk(dir):
root, files = spec[0], spec[-1]
for name in files:
path = os.path.join(root, name)
if os.path.isfile(path) and not exclude(path):
entries.append((path, None))
for i in files_and_dirs:
if os.path.isdir(i):
dodir(i)
else:
entries.append((i, None))
self.create_manifest(entries)
def create_spine(self, entries):
'''
Create the <spine> element. Must first call :method:`create_manifest`.
`entries`: List of paths
'''
entries = list(map(lambda x: x if os.path.isabs(x) else
os.path.abspath(os.path.join(self.base_path, x)), entries))
self.spine = Spine.from_paths(entries, self.manifest)
def set_toc(self, toc):
'''
Set the toc. You must call :method:`create_spine` before calling this
method.
:param toc: A :class:`TOC` object
'''
self.toc = toc
def create_guide(self, guide_element):
self.guide = Guide.from_opf_guide(guide_element, self.base_path)
self.guide.set_basedir(self.base_path)
def render(self, opf_stream=sys.stdout, ncx_stream=None,
ncx_manifest_entry=None, encoding=None, process_guide=None):
if encoding is None:
encoding = 'utf-8'
toc = getattr(self, 'toc', None)
if self.manifest:
self.manifest.set_basedir(self.base_path)
if ncx_manifest_entry is not None and toc is not None:
if not os.path.isabs(ncx_manifest_entry):
ncx_manifest_entry = os.path.join(self.base_path, ncx_manifest_entry)
remove = [i for i in self.manifest if i.id == 'ncx']
for item in remove:
self.manifest.remove(item)
self.manifest.append(ManifestItem(ncx_manifest_entry, self.base_path))
self.manifest[-1].id = 'ncx'
self.manifest[-1].mime_type = 'application/x-dtbncx+xml'
if self.guide is None:
self.guide = Guide()
if self.cover:
cover = self.cover
if not os.path.isabs(cover):
cover = os.path.abspath(os.path.join(self.base_path, cover))
self.guide.set_cover(cover)
self.guide.set_basedir(self.base_path)
# Actual rendering
from lxml.builder import ElementMaker
from calibre.ebooks.oeb.base import CALIBRE_NS, DC11_NS, OPF2_NS
DNS = OPF2_NS+'___xx___'
E = ElementMaker(namespace=DNS, nsmap={None:DNS})
M = ElementMaker(namespace=DNS,
nsmap={'dc':DC11_NS, 'calibre':CALIBRE_NS, 'opf':OPF2_NS})
DC = ElementMaker(namespace=DC11_NS)
def DC_ELEM(tag, text, dc_attrs={}, opf_attrs={}):
if text:
elem = getattr(DC, tag)(clean_ascii_chars(text), **dc_attrs)
else:
elem = getattr(DC, tag)(**dc_attrs)
for k, v in opf_attrs.items():
elem.set('{%s}%s'%(OPF2_NS, k), v)
return elem
def CAL_ELEM(name, content):
return M.meta(name=name, content=content)
metadata = M.metadata()
a = metadata.append
role = {}
a(DC_ELEM('title', self.title if self.title else _('Unknown'),
opf_attrs=role))
for i, author in enumerate(self.authors):
fa = {'role':'aut'}
if i == 0 and self.author_sort:
fa['file-as'] = self.author_sort
a(DC_ELEM('creator', author, opf_attrs=fa))
a(DC_ELEM('contributor', '%s (%s) [%s]'%(__appname__, __version__,
'https://calibre-ebook.com'), opf_attrs={'role':'bkp',
'file-as':__appname__}))
a(DC_ELEM('identifier', str(self.application_id),
opf_attrs={'scheme':__appname__},
dc_attrs={'id':__appname__+'_id'}))
if getattr(self, 'pubdate', None) is not None:
a(DC_ELEM('date', self.pubdate.isoformat()))
langs = self.languages
if not langs or langs == ['und']:
langs = [get_lang().replace('_', '-').partition('-')[0]]
for lang in langs:
a(DC_ELEM('language', lang))
if self.comments:
a(DC_ELEM('description', self.comments))
if self.publisher:
a(DC_ELEM('publisher', self.publisher))
for key, val in iteritems(self.get_identifiers()):
a(DC_ELEM('identifier', val, opf_attrs={'scheme':icu_upper(key)}))
if self.rights:
a(DC_ELEM('rights', self.rights))
if self.tags:
for tag in self.tags:
a(DC_ELEM('subject', tag))
if self.series:
a(CAL_ELEM('calibre:series', self.series))
if self.series_index is not None:
a(CAL_ELEM('calibre:series_index', self.format_series_index()))
if self.title_sort:
a(CAL_ELEM('calibre:title_sort', self.title_sort))
if self.rating is not None:
a(CAL_ELEM('calibre:rating', str(self.rating)))
if self.timestamp is not None:
a(CAL_ELEM('calibre:timestamp', self.timestamp.isoformat()))
if self.publication_type is not None:
a(CAL_ELEM('calibre:publication_type', self.publication_type))
if self.user_categories:
from calibre.ebooks.metadata.book.json_codec import object_to_unicode
a(CAL_ELEM('calibre:user_categories',
json.dumps(object_to_unicode(self.user_categories))))
if self.primary_writing_mode:
a(M.meta(name='primary-writing-mode', content=self.primary_writing_mode))
manifest = E.manifest()
if self.manifest is not None:
for ref in self.manifest:
href = ref.href()
if isinstance(href, bytes):
href = href.decode('utf-8')
item = E.item(id=str(ref.id), href=href)
item.set('media-type', ref.mime_type)
manifest.append(item)
spine = E.spine()
if self.toc is not None:
spine.set('toc', 'ncx')
if self.page_progression_direction is not None:
spine.set('page-progression-direction', self.page_progression_direction)
if self.spine is not None:
for ref in self.spine:
if ref.id is not None:
spine.append(E.itemref(idref=ref.id))
guide = E.guide()
if self.guide is not None:
for ref in self.guide:
href = ref.href()
if isinstance(href, bytes):
href = href.decode('utf-8')
item = E.reference(type=ref.type, href=href)
if ref.title:
item.set('title', ref.title)
guide.append(item)
if process_guide is not None:
process_guide(E, guide)
serialize_user_metadata(metadata, self.get_all_user_metadata(False))
root = E.package(
metadata,
manifest,
spine,
guide
)
root.set('unique-identifier', __appname__+'_id')
root.set('version', '2.0')
raw = etree.tostring(root, pretty_print=True, xml_declaration=True,
encoding=encoding)
raw = raw.replace(DNS.encode('utf-8'), OPF2_NS.encode('utf-8'))
opf_stream.write(raw)
opf_stream.flush()
if toc is not None and ncx_stream is not None:
toc.render(ncx_stream, self.application_id)
ncx_stream.flush()
def metadata_to_opf(mi, as_string=True, default_lang=None):
import textwrap
from lxml import etree
from calibre.ebooks.oeb.base import DC, OPF
if not mi.application_id:
mi.application_id = str(uuid.uuid4())
if not mi.uuid:
mi.uuid = str(uuid.uuid4())
if not mi.book_producer:
mi.book_producer = __appname__ + ' (%s) '%__version__ + \
'[https://calibre-ebook.com]'
if not mi.languages:
lang = (get_lang().replace('_', '-').partition('-')[0] if default_lang
is None else default_lang)
mi.languages = [lang]
root = safe_xml_fromstring(textwrap.dedent(
'''
<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="uuid_id" version="2.0">
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<dc:identifier opf:scheme="%(a)s" id="%(a)s_id">%(id)s</dc:identifier>
<dc:identifier opf:scheme="uuid" id="uuid_id">%(uuid)s</dc:identifier>
</metadata>
<guide/>
</package>
'''%dict(a=__appname__, id=mi.application_id, uuid=mi.uuid)))
metadata = root[0]
guide = root[1]
metadata[0].tail = '\n'+(' '*8)
def factory(tag, text=None, sort=None, role=None, scheme=None, name=None,
content=None):
attrib = {}
if sort:
attrib[OPF('file-as')] = sort
if role:
attrib[OPF('role')] = role
if scheme:
attrib[OPF('scheme')] = scheme
if name:
attrib['name'] = name
if content:
attrib['content'] = content
try:
elem = metadata.makeelement(tag, attrib=attrib)
except ValueError:
elem = metadata.makeelement(tag, attrib={k:clean_xml_chars(v) for k, v in iteritems(attrib)})
elem.tail = '\n'+(' '*8)
if text:
try:
elem.text = text.strip()
except ValueError:
elem.text = clean_ascii_chars(text.strip())
metadata.append(elem)
factory(DC('title'), mi.title)
for au in mi.authors:
factory(DC('creator'), au, mi.author_sort, 'aut')
factory(DC('contributor'), mi.book_producer, __appname__, 'bkp')
if hasattr(mi.pubdate, 'isoformat'):
factory(DC('date'), isoformat(mi.pubdate))
if hasattr(mi, 'category') and mi.category:
factory(DC('type'), mi.category)
if mi.comments:
factory(DC('description'), clean_ascii_chars(mi.comments))
if mi.publisher:
factory(DC('publisher'), mi.publisher)
for key, val in iteritems(mi.get_identifiers()):
factory(DC('identifier'), val, scheme=icu_upper(key))
if mi.rights:
factory(DC('rights'), mi.rights)
for lang in mi.languages:
if not lang or lang.lower() == 'und':
continue
factory(DC('language'), lang)
if mi.tags:
for tag in mi.tags:
factory(DC('subject'), tag)
def meta(n, c):
return factory('meta', name='calibre:' + n, content=c)
if not mi.is_null('link_maps'):
meta('link_maps', dump_dict(mi.link_maps))
if mi.series:
meta('series', mi.series)
if mi.series_index is not None:
meta('series_index', mi.format_series_index())
if mi.rating is not None:
meta('rating', str(mi.rating))
if hasattr(mi.timestamp, 'isoformat'):
meta('timestamp', isoformat(mi.timestamp))
if mi.publication_type:
meta('publication_type', mi.publication_type)
if mi.title_sort:
meta('title_sort', mi.title_sort)
if mi.user_categories:
meta('user_categories', dump_dict(mi.user_categories))
serialize_user_metadata(metadata, mi.get_all_user_metadata(False))
all_annotations = getattr(mi, 'all_annotations', None)
if all_annotations:
serialize_annotations(metadata, all_annotations)
metadata[-1].tail = '\n' +(' '*4)
if mi.cover:
if not isinstance(mi.cover, str):
mi.cover = mi.cover.decode(filesystem_encoding)
guide.text = '\n'+(' '*8)
r = guide.makeelement(OPF('reference'),
attrib={'type':'cover', 'title':_('Cover'), 'href':mi.cover})
r.tail = '\n' +(' '*4)
guide.append(r)
if pretty_print_opf:
_pretty_print(root)
return etree.tostring(root, pretty_print=True, encoding='utf-8',
xml_declaration=True) if as_string else root
def test_m2o():
from calibre.utils.date import now as nowf
mi = MetaInformation('test & title', ['a"1', "a'2"])
mi.title_sort = 'a\'"b'
mi.author_sort = 'author sort'
mi.pubdate = nowf()
mi.language = 'en'
mi.comments = 'what a fun book\n\n'
mi.publisher = 'publisher'
mi.set_identifiers({'isbn':'booo', 'dummy':'dummy'})
mi.tags = ['a', 'b']
mi.series = 's"c\'l&<>'
mi.series_index = 3.34
mi.rating = 3
mi.timestamp = nowf()
mi.publication_type = 'ooooo'
mi.rights = 'yes'
mi.cover = os.path.abspath('asd.jpg')
opf = metadata_to_opf(mi)
print(opf)
newmi = MetaInformation(OPF(io.BytesIO(opf)))
for attr in ('author_sort', 'title_sort', 'comments',
'publisher', 'series', 'series_index', 'rating',
'isbn', 'tags', 'cover_data', 'application_id',
'language', 'cover',
'book_producer', 'timestamp',
'pubdate', 'rights', 'publication_type'):
o, n = getattr(mi, attr), getattr(newmi, attr)
if o != n and o.strip() != n.strip():
print('FAILED:', attr, getattr(mi, attr), '!=', getattr(newmi, attr))
if mi.get_identifiers() != newmi.get_identifiers():
print('FAILED:', 'identifiers', mi.get_identifiers(), end=' ')
print('!=', newmi.get_identifiers())
def suite():
import unittest
class OPFTest(unittest.TestCase):
def setUp(self):
self.stream = io.BytesIO(
b'''\
<?xml version="1.0" encoding="UTF-8"?>
<package version="2.0" xmlns="http://www.idpf.org/2007/opf" >
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<dc:title opf:file-as="Wow">A Cool & © ß Title</dc:title>
<creator opf:role="aut" file-as="Monkey">Monkey Kitchen</creator>
<creator opf:role="aut">Next</creator>
<dc:subject>One</dc:subject><dc:subject>Two</dc:subject>
<dc:identifier scheme="ISBN">123456789</dc:identifier>
<dc:identifier scheme="dummy">dummy</dc:identifier>
<meta name="calibre:series" content="A one book series" />
<meta name="calibre:rating" content="4"/>
<meta name="calibre:publication_type" content="test"/>
<meta name="calibre:series_index" content="2.5" />
</metadata>
<manifest>
<item id="1" href="a%20%7E%20b" media-type="text/txt" />
</manifest>
</package>
'''
)
self.opf = OPF(self.stream, os.getcwd())
def testReading(self, opf=None):
if opf is None:
opf = self.opf
self.assertEqual(opf.title, 'A Cool & \xa9 \xdf Title')
self.assertEqual(opf.authors, 'Monkey Kitchen,Next'.split(','))
self.assertEqual(opf.author_sort, 'Monkey')
self.assertEqual(opf.title_sort, 'Wow')
self.assertEqual(opf.tags, ['One', 'Two'])
self.assertEqual(opf.isbn, '123456789')
self.assertEqual(opf.series, 'A one book series')
self.assertEqual(opf.series_index, 2.5)
self.assertEqual(opf.rating, 4)
self.assertEqual(opf.publication_type, 'test')
self.assertEqual(list(opf.itermanifest())[0].get('href'), 'a ~ b')
self.assertEqual(opf.get_identifiers(), {'isbn':'123456789',
'dummy':'dummy'})
def testWriting(self):
for test in [('title', 'New & Title'), ('authors', ['One', 'Two']),
('author_sort', "Kitchen"), ('tags', ['Three']),
('isbn', 'a'), ('rating', 3), ('series_index', 1),
('title_sort', 'ts')]:
setattr(self.opf, *test)
attr, val = test
self.assertEqual(getattr(self.opf, attr), val)
self.opf.render()
def testCreator(self):
opf = OPFCreator(os.getcwd(), self.opf)
buf = io.BytesIO()
opf.render(buf)
raw = buf.getvalue()
self.testReading(opf=OPF(io.BytesIO(raw), os.getcwd()))
def testSmartUpdate(self):
self.opf.smart_update(MetaInformation(self.opf))
self.testReading()
return unittest.TestLoader().loadTestsFromTestCase(OPFTest)
def test():
import unittest
unittest.TextTestRunner(verbosity=2).run(suite())
def test_user_metadata():
mi = Metadata('Test title', ['test author1', 'test author2'])
um = {
'#myseries': {'#value#': 'test series\xe4', 'datatype':'text',
'is_multiple': None, 'name': 'My Series'},
'#myseries_index': {'#value#': 2.45, 'datatype': 'float',
'is_multiple': None},
'#mytags': {'#value#':['t1','t2','t3'], 'datatype':'text',
'is_multiple': '|', 'name': 'My Tags'}
}
mi.set_all_user_metadata(um)
raw = metadata_to_opf(mi)
opfc = OPFCreator(os.getcwd(), other=mi)
out = io.BytesIO()
opfc.render(out)
raw2 = out.getvalue()
f = io.BytesIO(raw)
opf = OPF(f)
f2 = io.BytesIO(raw2)
opf2 = OPF(f2)
assert um == opf._user_metadata_
assert um == opf2._user_metadata_
print(opf.render())
if __name__ == '__main__':
# test_user_metadata()
test_m2o()
test()
| 70,073 | Python | .py | 1,653 | 31.222021 | 150 | 0.550428 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,503 | worker.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/worker.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2009, Kovid Goyal <kovid at kovidgoyal.net>
import os
import shutil
from calibre.customize.ui import run_plugins_on_import
from calibre.ebooks.metadata.meta import metadata_from_formats
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.utils.filenames import samefile
from calibre.utils.icu import lower as icu_lower
def serialize_metadata_for(paths, tdir, group_id):
mi = metadata_from_formats(paths)
mi.cover = None
cdata = None
if mi.cover_data:
cdata = mi.cover_data[-1]
mi.cover_data = (None, None)
if not mi.application_id:
mi.application_id = '__calibre_dummy__'
opf = metadata_to_opf(mi, default_lang='und')
has_cover = False
if cdata:
with open(os.path.join(tdir, '%s.cdata' % group_id), 'wb') as f:
f.write(cdata)
has_cover = True
return mi, opf, has_cover
def read_metadata_bulk(get_opf, get_cover, paths):
mi = metadata_from_formats(paths)
mi.cover = None
cdata = None
if mi.cover_data:
cdata = mi.cover_data[-1]
mi.cover_data = (None, None)
if not mi.application_id:
mi.application_id = '__calibre_dummy__'
ans = {'opf': None, 'cdata': None}
if get_opf:
ans['opf'] = metadata_to_opf(mi, default_lang='und')
if get_cover:
ans['cdata'] = cdata
return ans
def run_import_plugins(paths, group_id, tdir):
final_paths = []
for path in paths:
if not os.access(path, os.R_OK):
continue
try:
nfp = run_plugins_on_import(path)
except Exception:
nfp = None
import traceback
traceback.print_exc()
if nfp and os.access(nfp, os.R_OK) and not samefile(nfp, path):
# Ensure that the filename is preserved so that
# reading metadata from filename is not broken
name = os.path.splitext(os.path.basename(path))[0]
ext = os.path.splitext(nfp)[1]
path = os.path.join(tdir, str(group_id), name + ext)
os.makedirs(os.path.dirname(path), exist_ok=True)
try:
os.replace(nfp, path)
except OSError:
shutil.copyfile(nfp, path)
final_paths.append(path)
return final_paths
def has_book(mi, data_for_has_book):
return mi.title and icu_lower(mi.title.strip()) in data_for_has_book
def read_metadata(paths, group_id, tdir, common_data=None):
paths = run_import_plugins(paths, group_id, tdir)
mi, opf, has_cover = serialize_metadata_for(paths, tdir, group_id)
duplicate_info = None
if isinstance(common_data, (set, frozenset)):
duplicate_info = has_book(mi, common_data)
return paths, opf, has_cover, duplicate_info
| 2,809 | Python | .py | 73 | 31.39726 | 72 | 0.640602 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,504 | ereader.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/ereader.py | '''
Read meta information from eReader pdb files.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
import struct
from calibre.ebooks.metadata import MetaInformation, authors_to_string
from calibre.ebooks.pdb.ereader.reader132 import HeaderRecord
from calibre.ebooks.pdb.header import PdbHeaderBuilder, PdbHeaderReader
def get_cover(pheader, eheader):
cover_data = None
for i in range(eheader.image_count):
raw = pheader.section_data(eheader.image_data_offset + i)
if raw[4:4 + 32].strip(b'\x00') == b'cover.png':
cover_data = raw[62:]
break
return ('png', cover_data)
def get_metadata(stream, extract_cover=True):
"""
Return metadata as a L{MetaInfo} object
"""
mi = MetaInformation(None, [_('Unknown')])
stream.seek(0)
pheader = PdbHeaderReader(stream)
# Only Dropbook produced 132 byte record0 files are supported
if len(pheader.section_data(0)) == 132:
hr = HeaderRecord(pheader.section_data(0))
if hr.compression in (2, 10) and hr.has_metadata == 1:
try:
mdata = pheader.section_data(hr.metadata_offset)
mdata = mdata.decode('cp1252', 'replace').split('\x00')
mi.title = re.sub(r'[^a-zA-Z0-9 \._=\+\-!\?,\'\"]', '', mdata[0])
mi.authors = [re.sub(r'[^a-zA-Z0-9 \._=\+\-!\?,\'\"]', '', mdata[1])]
mi.publisher = re.sub(r'[^a-zA-Z0-9 \._=\+\-!\?,\'\"]', '', mdata[3])
mi.isbn = re.sub(r'[^a-zA-Z0-9 \._=\+\-!\?,\'\"]', '', mdata[4])
except Exception:
pass
if extract_cover:
mi.cover_data = get_cover(pheader, hr)
if not mi.title:
mi.title = pheader.title if pheader.title else _('Unknown')
return mi
def set_metadata(stream, mi):
pheader = PdbHeaderReader(stream)
# Only Dropbook produced 132 byte record0 files are supported
if pheader.section_data(0) != 132:
return
sections = [pheader.section_data(x) for x in range(0, pheader.section_count())]
hr = HeaderRecord(sections[0])
if hr.compression not in (2, 10):
return
# Create a metadata record for the file if one does not already exist
if not hr.has_metadata:
sections += [b'', b'MeTaInFo\x00']
last_data = len(sections) - 1
for i in range(0, 132, 2):
val, = struct.unpack('>H', sections[0][i:i + 2])
if val >= hr.last_data_offset:
sections[0][i:i + 2] = struct.pack('>H', last_data)
sections[0][24:26] = struct.pack('>H', 1) # Set has metadata
sections[0][44:46] = struct.pack('>H', last_data - 1) # Set location of metadata
sections[0][52:54] = struct.pack('>H', last_data) # Ensure last data offset is updated
# Merge the metadata into the file
file_mi = get_metadata(stream, False)
file_mi.smart_update(mi)
sections[hr.metadata_offset] = ('{}\x00{}\x00{}\x00{}\x00{}\x00'.format(
file_mi.title, authors_to_string(file_mi.authors), '', file_mi.publisher, file_mi.isbn)).encode('cp1252', 'replace')
# Rebuild the PDB wrapper because the offsets have changed due to the
# new metadata.
pheader_builder = PdbHeaderBuilder(pheader.ident, pheader.title)
stream.seek(0)
stream.truncate(0)
pheader_builder.build_header([len(x) for x in sections], stream)
# Write the data back to the file
for item in sections:
stream.write(item)
| 3,582 | Python | .py | 78 | 38.410256 | 124 | 0.616801 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,505 | archive.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/archive.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from contextlib import closing
from calibre.customize import FileTypePlugin
from calibre.utils.localization import canonicalize_lang
def is_comic(list_of_names):
extensions = {x.rpartition('.')[-1].lower() for x in list_of_names
if '.' in x and x.lower().rpartition('/')[-1] != 'thumbs.db'}
comic_extensions = {'jpg', 'jpeg', 'png'}
return len(extensions - comic_extensions) == 0
def archive_type(stream):
from calibre.utils.zipfile import stringFileHeader
try:
pos = stream.tell()
except:
pos = 0
id_ = stream.read(4)
ans = None
if id_ == stringFileHeader:
ans = 'zip'
elif id_.startswith(b'Rar'):
ans = 'rar'
try:
stream.seek(pos)
except Exception:
pass
return ans
class KPFExtract(FileTypePlugin):
name = 'KPF Extract'
author = 'Kovid Goyal'
description = _('Extract the source DOCX file from Amazon Kindle Create KPF files.'
' Note this will not contain any edits made in the Kindle Create program itself.')
file_types = {'kpf'}
supported_platforms = ['windows', 'osx', 'linux']
on_import = True
def run(self, archive):
from calibre.utils.zipfile import ZipFile
with ZipFile(archive, 'r') as zf:
fnames = zf.namelist()
candidates = [x for x in fnames if x.lower().endswith('.docx')]
if not candidates:
return archive
of = self.temporary_file('_kpf_extract.docx')
with closing(of):
of.write(zf.read(candidates[0]))
return of.name
class RAR:
def __init__(self, archive):
self.archive = archive
def close(self):
pass
def namelist(self):
from calibre.utils.unrar import names
return list(names(self.archive))
def read(self, fname):
from calibre.utils.unrar import extract_member
return extract_member(self.archive, match=None, name=fname)[1]
class SevenZip:
def __init__(self, archive):
from py7zr import SevenZipFile
self.zf = SevenZipFile(archive, 'r')
def namelist(self):
return list(self.zf.getnames())
def close(self):
self.zf.close()
def read(self, fname):
return self.zf.read((fname,))[fname].read()
class ArchiveExtract(FileTypePlugin):
name = 'Archive Extract'
author = 'Kovid Goyal'
description = _('Extract common e-book formats from archive files '
'(ZIP/RAR/7z). Also try to autodetect if they are actually '
'CBZ/CBR/CB7 files.')
file_types = {'zip', 'rar', '7z'}
supported_platforms = ['windows', 'osx', 'linux']
on_import = True
def run(self, archive):
import shutil
q = archive.lower()
if q.endswith('.rar'):
comic_ext = 'cbr'
zf = RAR(archive)
elif q.endswith('.7z'):
comic_ext = 'cb7'
zf = SevenZip(archive)
else:
from calibre.utils.zipfile import ZipFile
zf = ZipFile(archive, 'r')
comic_ext = 'cbz'
def fname_ok(fname):
bn = os.path.basename(fname).lower()
if bn == 'thumbs.db':
return False
if '.' not in bn:
return False
if bn.rpartition('.')[-1] in {'diz', 'nfo'}:
return False
if '__MACOSX' in fname.split('/'):
return False
return True
with closing(zf):
fnames = zf.namelist()
fnames = list(filter(fname_ok, fnames))
if is_comic(fnames):
of = self.temporary_file('_archive_extract.'+comic_ext)
with closing(of), open(archive, 'rb') as f:
shutil.copyfileobj(f, of)
return of.name
if len(fnames) > 1 or not fnames:
return archive
fname = fnames[0]
ext = os.path.splitext(fname)[1][1:]
if ext.lower() not in {
'lit', 'epub', 'mobi', 'prc', 'rtf', 'pdf', 'mp3', 'pdb',
'azw', 'azw1', 'azw3', 'fb2', 'docx', 'doc', 'odt'}:
return archive
of = self.temporary_file('_archive_extract.'+ext)
with closing(of):
of.write(zf.read(fname))
return of.name
def get_comic_book_info(d, mi, series_index='volume'):
# See http://code.google.com/p/comicbookinfo/wiki/Example
series = d.get('series', '')
if series.strip():
mi.series = series
si = d.get(series_index, None)
if si is None:
si = d.get('issue' if series_index == 'volume' else 'volume', None)
if si is not None:
try:
mi.series_index = float(si)
except Exception:
mi.series_index = 1
if d.get('language', None):
lang = canonicalize_lang(d.get('lang'))
if lang:
mi.languages = [lang]
if d.get('rating', -1) > -1:
mi.rating = d['rating']
for x in ('title', 'publisher'):
y = d.get(x, '').strip()
if y:
setattr(mi, x, y)
tags = d.get('tags', [])
if tags:
mi.tags = tags
authors = []
for credit in d.get('credits', []):
if credit.get('role', '') in ('Writer', 'Artist', 'Cartoonist',
'Creator'):
x = credit.get('person', '')
if x:
x = ' '.join(reversed(x.split(', ')))
authors.append(x)
if authors:
mi.authors = authors
comments = d.get('comments', '')
if comments and comments.strip():
mi.comments = comments.strip()
pubm, puby = d.get('publicationMonth', None), d.get('publicationYear', None)
if puby is not None:
from datetime import date
from calibre.utils.date import parse_only_date
try:
dt = date(puby, 6 if pubm is None else pubm, 15)
dt = parse_only_date(str(dt))
mi.pubdate = dt
except Exception:
pass
def parse_comic_comment(comment, series_index='volume'):
# See http://code.google.com/p/comicbookinfo/wiki/Example
import json
from calibre.ebooks.metadata import MetaInformation
mi = MetaInformation(None, None)
m = json.loads(comment)
if isinstance(m, dict):
for cat in m:
if cat.startswith('ComicBookInfo'):
get_comic_book_info(m[cat], mi, series_index=series_index)
break
return mi
def get_comic_metadata(stream, stream_type, series_index='volume'):
comment = None
if stream_type == 'cbz':
from calibre.utils.zipfile import ZipFile
zf = ZipFile(stream)
comment = zf.comment
elif stream_type == 'cbr':
from calibre.utils.unrar import comment as get_comment
comment = get_comment(stream)
return parse_comic_comment(comment or b'{}', series_index=series_index)
def get_comic_images(path, tdir, first=1, last=0): # first and last use 1 based indexing
from functools import partial
with open(path, 'rb') as f:
fmt = archive_type(f)
if fmt not in ('zip', 'rar'):
return 0
items = {}
if fmt == 'rar':
from calibre.utils.unrar import headers
for h in headers(path):
items[h['filename']] = lambda : partial(h.get, 'file_time', 0)
else:
from zipfile import ZipFile
with ZipFile(path) as zf:
for i in zf.infolist():
items[i.filename] = partial(getattr, i, 'date_time')
from calibre.ebooks.comic.input import find_pages
pages = find_pages(items)
if last <= 0:
last = len(pages)
pages = pages[first-1:last]
def make_filename(num, ext):
return f'{num:08d}{ext}'
if fmt == 'rar':
all_pages = {p:i+first for i, p in enumerate(pages)}
from calibre.utils.unrar import extract_members
current = None
def callback(x):
nonlocal current
if isinstance(x, dict):
if current is not None:
current.close()
fname = x['filename']
if fname in all_pages:
ext = os.path.splitext(fname)[1]
num = all_pages[fname]
current = open(os.path.join(tdir, make_filename(num, ext)), 'wb')
return True
return False
if isinstance(x, bytes):
current.write(x)
extract_members(path, callback)
if current is not None:
current.close()
else:
import shutil
with ZipFile(path) as zf:
for i, name in enumerate(pages):
num = i + first
ext = os.path.splitext(name)[1]
with open(os.path.join(tdir, make_filename(num, ext)), 'wb') as dest, zf.open(name) as src:
shutil.copyfileobj(src, dest)
return len(pages)
| 9,205 | Python | .py | 247 | 27.684211 | 107 | 0.563706 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,506 | extz.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/extz.py | __license__ = 'GPL v3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
'''
Read meta information from extZ (TXTZ, HTMLZ...) files.
'''
import io
import os
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.zipfile import ZipFile, safe_replace
def get_metadata(stream, extract_cover=True):
'''
Return metadata as a L{MetaInfo} object
'''
mi = MetaInformation(_('Unknown'), [_('Unknown')])
stream.seek(0)
try:
with ZipFile(stream) as zf:
opf_name = get_first_opf_name(zf)
with zf.open(opf_name) as opf_stream:
opf = OPF(opf_stream)
mi = opf.to_book_metadata()
if extract_cover:
cover_href = opf.raster_cover or opf.guide_raster_cover
if not cover_href:
for meta in opf.metadata.xpath('//*[local-name()="meta" and @name="cover"]'):
val = meta.get('content')
if val.rpartition('.')[2].lower() in {'jpeg', 'jpg', 'png'}:
cover_href = val
break
else:
for val in opf.guide_cover_path(opf.root): # this is needed because the cover is not in the manifest
if val.rpartition('.')[2].lower() in {'jpeg', 'jpg', 'png'}:
cover_href = val
break
else:
# txtz files use a special element for cover
for cpath in opf.root.xpath('//cover-relpath-from-base'):
if cpath.text:
cover_href = cpath.text
break
if cover_href:
try:
mi.cover_data = (os.path.splitext(cover_href)[1], zf.read(cover_href))
except Exception:
pass
except Exception:
return mi
return mi
def set_metadata(stream, mi):
replacements = {}
# Get the OPF in the archive.
with ZipFile(stream) as zf:
opf_path = get_first_opf_name(zf)
opf_stream = io.BytesIO(zf.read(opf_path))
opf = OPF(opf_stream)
# Cover.
new_cdata = None
try:
new_cdata = mi.cover_data[1]
if not new_cdata:
raise Exception('no cover')
except:
try:
with open(mi.cover, 'rb') as f:
new_cdata = f.read()
except:
pass
if new_cdata:
cpath = opf.raster_cover
if not cpath:
cpath = 'cover.jpg'
new_cover = _write_new_cover(new_cdata, cpath)
replacements[cpath] = open(new_cover.name, 'rb')
mi.cover = cpath
# Update the metadata.
opf.smart_update(mi, replace_metadata=True)
newopf = io.BytesIO(opf.render())
safe_replace(stream, opf_path, newopf, extra_replacements=replacements, add_missing=True)
# Cleanup temporary files.
try:
if cpath is not None:
replacements[cpath].close()
os.remove(replacements[cpath].name)
except:
pass
def get_first_opf_name(zf):
names = zf.namelist()
opfs = []
for n in names:
if n.endswith('.opf') and '/' not in n:
opfs.append(n)
if not opfs:
raise Exception('No OPF found')
opfs.sort()
return opfs[0]
def _write_new_cover(new_cdata, cpath):
from calibre.utils.img import save_cover_data_to
new_cover = PersistentTemporaryFile(suffix=os.path.splitext(cpath)[1])
new_cover.close()
save_cover_data_to(new_cdata, new_cover.name)
return new_cover
| 3,848 | Python | .py | 103 | 26.31068 | 125 | 0.546917 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,507 | haodoo.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/haodoo.py | '''
Read meta information from Haodoo.net pdb files.
'''
__license__ = 'GPL v3'
__copyright__ = '2012, Kan-Ru Chen <kanru@kanru.info>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.pdb.haodoo.reader import Reader
from calibre.ebooks.pdb.header import PdbHeaderReader
def get_metadata(stream, extract_cover=True):
'''
Return metadata as a L{MetaInfo} object
'''
stream.seek(0)
pheader = PdbHeaderReader(stream)
reader = Reader(pheader, stream, None, None)
return reader.get_metadata()
| 532 | Python | .py | 16 | 30.125 | 54 | 0.723529 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,508 | cli.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/cli.py | __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
ebook-meta
'''
import os
import sys
import unicodedata
from calibre import prints
from calibre.customize.ui import force_identifiers, metadata_readers, metadata_writers
from calibre.ebooks.lrf.meta import LRFMetaFile
from calibre.ebooks.metadata import MetaInformation, authors_to_sort_string, string_to_authors, title_sort
from calibre.ebooks.metadata.meta import get_metadata, set_metadata
from calibre.utils.config import StringConfig
from calibre.utils.date import parse_date
from polyglot.builtins import iteritems
USAGE=_('%prog ebook_file [options]\n') + \
_('''
Read/Write metadata from/to e-book files.
Supported formats for reading metadata: {0}
Supported formats for writing metadata: {1}
Different file types support different kinds of metadata. If you try to set
some metadata on a file type that does not support it, the metadata will be
silently ignored.
''')
def config():
c = StringConfig('')
c.add_opt('title', ['-t', '--title'],
help=_('Set the title.'))
c.add_opt('authors', ['-a', '--authors'],
help=_('Set the authors. Multiple authors should be separated '
'by the & character. Author names should be in the order '
'Firstname Lastname.'))
c.add_opt('title_sort', ['--title-sort'],
help=_('The version of the title to be used for sorting. '
'If unspecified, and the title is specified, it will '
'be auto-generated from the title.'))
c.add_opt('author_sort', ['--author-sort'],
help=_('String to be used when sorting by author. '
'If unspecified, and the author(s) are specified, it will '
'be auto-generated from the author(s).'))
c.add_opt('cover', ['--cover'],
help=_('Set the cover to the specified file.'))
c.add_opt('comments', ['-c', '--comments'],
help=_('Set the e-book description.'))
c.add_opt('publisher', ['-p', '--publisher'],
help=_('Set the e-book publisher.'))
c.add_opt('category', ['--category'],
help=_('Set the book category.'))
c.add_opt('series', ['-s', '--series'],
help=_('Set the series this e-book belongs to.'))
c.add_opt('series_index', ['-i', '--index'],
help=_('Set the index of the book in this series.'))
c.add_opt('rating', ['-r', '--rating'],
help=_('Set the rating. Should be a number between 1 and 5.'))
c.add_opt('isbn', ['--isbn'],
help=_('Set the ISBN of the book.'))
c.add_opt('identifiers', ['--identifier'], action='append',
help=_('Set the identifiers for the book, can be specified multiple times.'
' For example: --identifier uri:https://acme.com --identifier isbn:12345'
' To remove an identifier, specify no value, --identifier isbn:'
' Note that for EPUB files, an identifier marked as the package identifier cannot be removed.'))
c.add_opt('tags', ['--tags'],
help=_('Set the tags for the book. Should be a comma separated list.'))
c.add_opt('book_producer', ['-k', '--book-producer'],
help=_('Set the book producer.'))
c.add_opt('language', ['-l', '--language'],
help=_('Set the language.'))
c.add_opt('pubdate', ['-d', '--date'],
help=_('Set the published date.'))
c.add_opt('get_cover', ['--get-cover'],
help=_('Get the cover from the e-book and save it at as the '
'specified file.'))
c.add_opt('to_opf', ['--to-opf'],
help=_('Specify the name of an OPF file. The metadata will '
'be written to the OPF file.'))
c.add_opt('from_opf', ['--from-opf'],
help=_('Read metadata from the specified OPF file and use it to '
'set metadata in the e-book. Metadata specified on the '
'command line will override metadata read from the OPF file'))
c.add_opt('lrf_bookid', ['--lrf-bookid'],
help=_('Set the BookID in LRF files'))
return c
def filetypes():
readers = set()
for r in metadata_readers():
readers = readers.union(set(r.file_types))
return readers
def option_parser():
writers = set()
for w in metadata_writers():
writers = writers.union(set(w.file_types))
ft, w = ', '.join(sorted(filetypes())), ', '.join(sorted(writers))
return config().option_parser(USAGE.format(ft, w))
def normalize(x):
return unicodedata.normalize('NFC', x)
def do_set_metadata(opts, mi, stream, stream_type):
mi = MetaInformation(mi)
for x in ('guide', 'toc', 'manifest', 'spine'):
setattr(mi, x, None)
from_opf = getattr(opts, 'from_opf', None)
if from_opf is not None:
from calibre.ebooks.metadata.opf2 import OPF
opf_mi = OPF(open(from_opf, 'rb')).to_book_metadata()
mi.smart_update(opf_mi)
for pref in config().option_set.preferences:
if pref.name in ('to_opf', 'from_opf', 'authors', 'title_sort',
'author_sort', 'get_cover', 'cover', 'tags',
'lrf_bookid', 'identifiers'):
continue
val = getattr(opts, pref.name, None)
if val is not None:
setattr(mi, pref.name, val)
if getattr(opts, 'authors', None) is not None:
mi.authors = string_to_authors(opts.authors)
mi.author_sort = authors_to_sort_string(mi.authors)
if getattr(opts, 'author_sort', None) is not None:
mi.author_sort = opts.author_sort
if getattr(opts, 'title_sort', None) is not None:
mi.title_sort = opts.title_sort
elif getattr(opts, 'title', None) is not None:
mi.title_sort = title_sort(opts.title)
if getattr(opts, 'tags', None) is not None:
mi.tags = [t.strip() for t in opts.tags.split(',')]
if getattr(opts, 'series', None) is not None:
mi.series = opts.series.strip()
if getattr(opts, 'series_index', None) is not None:
mi.series_index = float(opts.series_index.strip())
if getattr(opts, 'pubdate', None) is not None:
mi.pubdate = parse_date(opts.pubdate, assume_utc=False, as_utc=False)
if getattr(opts, 'identifiers', None):
val = {k.strip():v.strip() for k, v in (x.partition(':')[0::2] for x in opts.identifiers)}
if val:
orig = mi.get_identifiers()
orig.update(val)
val = {k:v for k, v in iteritems(orig) if k and v}
mi.set_identifiers(val)
if getattr(opts, 'cover', None) is not None:
ext = os.path.splitext(opts.cover)[1].replace('.', '').upper()
mi.cover_data = (ext, open(opts.cover, 'rb').read())
with force_identifiers:
set_metadata(stream, mi, stream_type)
def main(args=sys.argv):
parser = option_parser()
opts, args = parser.parse_args(list(map(normalize, args)))
if len(args) < 2:
parser.print_help()
prints(_('No file specified'), file=sys.stderr)
return 1
path = args[1]
stream_type = os.path.splitext(path)[1].replace('.', '').lower()
trying_to_set = False
for pref in config().option_set.preferences:
if pref.name in ('to_opf', 'get_cover'):
continue
if getattr(opts, pref.name) is not None:
trying_to_set = True
break
with open(path, 'rb') as stream:
mi = get_metadata(stream, stream_type, force_read_metadata=True)
if trying_to_set:
prints(_('Original metadata')+'::')
metadata = str(mi)
if trying_to_set:
metadata = '\t'+'\n\t'.join(metadata.split('\n'))
prints(metadata)
if trying_to_set:
with open(path, 'r+b') as stream:
do_set_metadata(opts, mi, stream, stream_type)
stream.seek(0)
stream.flush()
lrf = None
if stream_type == 'lrf':
if opts.lrf_bookid is not None:
lrf = LRFMetaFile(stream)
lrf.book_id = opts.lrf_bookid
mi = get_metadata(stream, stream_type, force_read_metadata=True)
prints('\n' + _('Changed metadata') + '::')
metadata = str(mi)
metadata = '\t'+'\n\t'.join(metadata.split('\n'))
prints(metadata)
if lrf is not None:
prints('\tBookID:', lrf.book_id)
if opts.to_opf is not None:
from calibre.ebooks.metadata.opf2 import OPFCreator
opf = OPFCreator(os.getcwd(), mi)
with open(opts.to_opf, 'wb') as f:
opf.render(f)
prints(_('OPF created in'), opts.to_opf)
if opts.get_cover is not None:
if mi.cover_data and mi.cover_data[1]:
with open(opts.get_cover, 'wb') as f:
f.write(mi.cover_data[1])
prints(_('Cover saved to'), f.name)
else:
prints(_('No cover found'), file=sys.stderr)
return 0
if __name__ == '__main__':
sys.exit(main())
| 9,186 | Python | .py | 200 | 37.11 | 117 | 0.590843 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,509 | lrx.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/lrx.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Read metadata from LRX files
'''
import struct
from zlib import decompress
from calibre.ebooks.metadata import MetaInformation, string_to_authors
from calibre.utils.xml_parse import safe_xml_fromstring
def _read(f, at, amount):
f.seek(at)
return f.read(amount)
def word_be(buf):
return struct.unpack('>L', buf)[0]
def word_le(buf):
return struct.unpack('<L', buf)[0]
def short_le(buf):
return struct.unpack('<H', buf)[0]
def short_be(buf):
return struct.unpack('>H', buf)[0]
def get_metadata(f):
def read(at, amount):
return _read(f, at, amount)
f.seek(0)
buf = f.read(12)
if buf[4:] == b'ftypLRX2':
offset = 0
while True:
offset += word_be(buf[:4])
try:
buf = read(offset, 8)
except:
raise ValueError('Not a valid LRX file')
if buf[4:] == b'bbeb':
break
offset += 8
buf = read(offset, 16)
if buf[:8].decode('utf-16-le') != 'LRF\x00':
raise ValueError('Not a valid LRX file')
lrf_version = word_le(buf[8:12])
offset += 0x4c
compressed_size = short_le(read(offset, 2))
offset += 2
if lrf_version >= 800:
offset += 6
compressed_size -= 4
uncompressed_size = word_le(read(offset, 4))
info = decompress(f.read(compressed_size))
if len(info) != uncompressed_size:
raise ValueError('LRX file has malformed metadata section')
root = safe_xml_fromstring(info)
bi = root.find('BookInfo')
title = bi.find('Title')
title_sort = title.get('reading', None)
title = title.text
author = bi.find('Author')
author_sort = author.get('reading', None)
mi = MetaInformation(title, string_to_authors(author.text))
mi.title_sort, mi.author_sort = title_sort, author_sort
author = author.text
publisher = bi.find('Publisher')
mi.publisher = getattr(publisher, 'text', None)
mi.tags = [x.text for x in bi.findall('Category')]
mi.language = root.find('DocInfo').find('Language').text
return mi
elif buf[4:8] == b'LRX':
raise ValueError('Librie LRX format not supported')
else:
raise ValueError('Not a LRX file')
| 2,486 | Python | .py | 71 | 27.619718 | 71 | 0.598248 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,510 | fb2.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/fb2.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Roman Mukhin <ramses_ru at hotmail.com>, '\
'2008, Anatoly Shipitsin <norguhtar at gmail.com>'
'''Read meta information from fb2 files'''
import os
import random
from functools import partial
from string import ascii_letters, digits
from lxml import etree
from calibre import force_unicode, guess_all_extensions, guess_type, prints, strftime
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.metadata import MetaInformation, check_isbn
from calibre.utils.date import parse_only_date
from calibre.utils.img import save_cover_data_to
from calibre.utils.imghdr import identify
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.binary import as_base64_unicode
NAMESPACES = {
'fb2' : 'http://www.gribuser.ru/xml/fictionbook/2.0',
'fb21' : 'http://www.gribuser.ru/xml/fictionbook/2.1',
'xlink' : 'http://www.w3.org/1999/xlink'
}
tostring = partial(etree.tostring, method='text', encoding='unicode')
def XLINK(tag):
return '{%s}%s'%(NAMESPACES['xlink'], tag)
class Context:
def __init__(self, root):
try:
self.fb_ns = root.nsmap[root.prefix] or NAMESPACES['fb2']
except Exception:
self.fb_ns = NAMESPACES['fb2']
self.namespaces = {
'fb': self.fb_ns,
'fb2': self.fb_ns,
'xlink': NAMESPACES['xlink']
}
def XPath(self, *args):
return etree.XPath(*args, namespaces=self.namespaces)
def get_or_create(self, parent, tag, attribs={}, at_start=True):
xpathstr='./fb:'+tag
for n, v in attribs.items():
xpathstr += f'[@{n}="{v}"]'
ans = self.XPath(xpathstr)(parent)
if ans:
ans = ans[0]
else:
ans = self.create_tag(parent, tag, attribs, at_start)
return ans
def create_tag(self, parent, tag, attribs={}, at_start=True):
ans = parent.makeelement(f'{{{self.fb_ns}}}{tag}')
ans.attrib.update(attribs)
if at_start:
parent.insert(0, ans)
else:
parent.append(ans)
return ans
def clear_meta_tags(self, doc, tag):
for parent in ('title-info', 'src-title-info', 'publish-info'):
for x in self.XPath('//fb:%s/fb:%s'%(parent, tag))(doc):
x.getparent().remove(x)
def text2fb2(self, parent, text):
lines = text.split('\n')
for line in lines:
line = line.strip()
if line:
p = self.create_tag(parent, 'p', at_start=False)
p.text = line
else:
self.create_tag(parent, 'empty-line', at_start=False)
def get_fb2_data(stream):
from calibre.utils.zipfile import BadZipfile, ZipFile
pos = stream.tell()
try:
zf = ZipFile(stream)
except BadZipfile:
stream.seek(pos)
ans = stream.read()
zip_file_name = None
else:
names = zf.namelist()
names = [x for x in names if x.lower().endswith('.fb2')] or names
zip_file_name = names[0]
ans = zf.open(zip_file_name).read()
return ans, zip_file_name
def get_metadata(stream):
''' Return fb2 metadata as a L{MetaInformation} object '''
root = _get_fbroot(get_fb2_data(stream)[0])
ctx = Context(root)
book_title = _parse_book_title(root, ctx)
authors = _parse_authors(root, ctx) or [_('Unknown')]
# fallback for book_title
if book_title:
book_title = str(book_title)
else:
book_title = force_unicode(os.path.splitext(
os.path.basename(getattr(stream, 'name',
_('Unknown'))))[0])
mi = MetaInformation(book_title, authors)
try:
_parse_cover(root, mi, ctx)
except:
pass
try:
_parse_comments(root, mi, ctx)
except:
pass
try:
_parse_tags(root, mi, ctx)
except:
pass
try:
_parse_series(root, mi, ctx)
except:
pass
try:
_parse_isbn(root, mi, ctx)
except:
pass
try:
_parse_publisher(root, mi, ctx)
except:
pass
try:
_parse_pubdate(root, mi, ctx)
except:
pass
try:
_parse_language(root, mi, ctx)
except:
pass
return mi
def _parse_authors(root, ctx):
authors = []
# pick up authors but only from 1 section <title-info>; otherwise it is not consistent!
# Those are fallbacks: <src-title-info>, <document-info>
author = None
for author_sec in ['title-info', 'src-title-info', 'document-info']:
for au in ctx.XPath('//fb:%s/fb:author'%author_sec)(root):
author = _parse_author(au, ctx)
if author:
authors.append(author)
if author:
break
# if no author so far
if not authors:
authors.append(_('Unknown'))
return authors
def _parse_author(elm_author, ctx):
""" Returns a list of display author and sortable author"""
xp_templ = 'normalize-space(fb:%s/text())'
author = ctx.XPath(xp_templ % 'first-name')(elm_author)
lname = ctx.XPath(xp_templ % 'last-name')(elm_author)
mname = ctx.XPath(xp_templ % 'middle-name')(elm_author)
if mname:
author = (author + ' ' + mname).strip()
if lname:
author = (author + ' ' + lname).strip()
# fallback to nickname
if not author:
nname = ctx.XPath(xp_templ % 'nickname')(elm_author)
if nname:
author = nname
return str(author)
def _parse_book_title(root, ctx):
# <title-info> has a priority. (actually <title-info> is mandatory)
# other are backup solution (sequence is important. Other than in fb2-doc)
xp_ti = '//fb:title-info/fb:book-title/text()'
xp_pi = '//fb:publish-info/fb:book-title/text()'
xp_si = '//fb:src-title-info/fb:book-title/text()'
book_title = ctx.XPath(f'normalize-space({xp_ti}|{xp_pi}|{xp_si})')(root)
return book_title
def _parse_cover(root, mi, ctx):
# pickup from <title-info>, if not exists it fallbacks to <src-title-info>
imgid = ctx.XPath('substring-after(string(//fb:coverpage/fb:image/@xlink:href), "#")')(root)
if imgid:
try:
_parse_cover_data(root, imgid, mi, ctx)
except:
pass
def _parse_cover_data(root, imgid, mi, ctx):
from calibre.ebooks.fb2 import base64_decode
elm_binary = ctx.XPath('//fb:binary[@id="%s"]'%imgid)(root)
if elm_binary:
mimetype = elm_binary[0].get('content-type', 'image/jpeg')
mime_extensions = guess_all_extensions(mimetype)
if not mime_extensions and mimetype.startswith('image/'):
mimetype_fromid = guess_type(imgid)[0]
if mimetype_fromid and mimetype_fromid.startswith('image/'):
mime_extensions = guess_all_extensions(mimetype_fromid)
if mime_extensions:
pic_data = elm_binary[0].text
if pic_data:
cdata = base64_decode(pic_data.strip())
fmt = identify(cdata)[0]
mi.cover_data = (fmt, cdata)
else:
prints(f"WARNING: Unsupported coverpage mime-type '{mimetype}' (id=#{imgid})")
def _parse_tags(root, mi, ctx):
# pick up genre but only from 1 section <title-info>; otherwise it is not consistent!
# Those are fallbacks: <src-title-info>
for genre_sec in ['title-info', 'src-title-info']:
# -- i18n Translations-- ?
tags = ctx.XPath('//fb:%s/fb:genre/text()' % genre_sec)(root)
if tags:
mi.tags = list(map(str, tags))
break
def _parse_series(root, mi, ctx):
# calibre supports only 1 series: use the 1-st one
# pick up sequence but only from 1 section in preferred order
# except <src-title-info>
xp_ti = '//fb:title-info/fb:sequence[1]'
xp_pi = '//fb:publish-info/fb:sequence[1]'
elms_sequence = ctx.XPath(f'{xp_ti}|{xp_pi}')(root)
if elms_sequence:
mi.series = elms_sequence[0].get('name', None)
if mi.series:
try:
mi.series_index = float('.'.join(elms_sequence[0].get('number', None).split()[:2]))
except Exception:
pass
def _parse_isbn(root, mi, ctx):
# some people try to put several isbn in this field, but it is not allowed. try to stick to the 1-st one in this case
isbn = ctx.XPath('normalize-space(//fb:publish-info/fb:isbn/text())')(root)
if isbn:
# some people try to put several isbn in this field, but it is not allowed. try to stick to the 1-st one in this case
if ',' in isbn:
isbn = isbn[:isbn.index(',')]
if check_isbn(isbn):
mi.isbn = isbn
def _parse_comments(root, mi, ctx):
# pick up annotation but only from 1 section <title-info>; fallback: <src-title-info>
for annotation_sec in ['title-info', 'src-title-info']:
elms_annotation = ctx.XPath('//fb:%s/fb:annotation' % annotation_sec)(root)
if elms_annotation:
mi.comments = tostring(elms_annotation[0])
# TODO: tags i18n, xslt?
break
def _parse_publisher(root, mi, ctx):
publisher = ctx.XPath('string(//fb:publish-info/fb:publisher/text())')(root)
if publisher:
mi.publisher = publisher
def _parse_pubdate(root, mi, ctx):
year = ctx.XPath('number(//fb:publish-info/fb:year/text())')(root)
if float.is_integer(year):
# only year is available, so use 2nd of June
mi.pubdate = parse_only_date(str(int(year)))
def _parse_language(root, mi, ctx):
language = ctx.XPath('string(//fb:title-info/fb:lang/text())')(root)
if language:
mi.language = language
mi.languages = [language]
def _get_fbroot(raw):
raw = xml_to_unicode(raw, strip_encoding_pats=True)[0]
root = safe_xml_fromstring(raw)
return ensure_namespace(root)
def _set_title(title_info, mi, ctx):
if not mi.is_null('title'):
ctx.clear_meta_tags(title_info, 'book-title')
title = ctx.get_or_create(title_info, 'book-title')
title.text = mi.title
def _set_comments(title_info, mi, ctx):
if not mi.is_null('comments'):
from calibre.utils.html2text import html2text
ctx.clear_meta_tags(title_info, 'annotation')
title = ctx.get_or_create(title_info, 'annotation')
ctx.text2fb2(title, html2text(mi.comments))
def _set_authors(title_info, mi, ctx):
if not mi.is_null('authors'):
ctx.clear_meta_tags(title_info, 'author')
for author in reversed(mi.authors):
author_parts = author.split()
if not author_parts:
continue
atag = ctx.create_tag(title_info, 'author')
if len(author_parts) == 1:
ctx.create_tag(atag, 'nickname').text = author
else:
ctx.create_tag(atag, 'first-name').text = author_parts[0]
author_parts = author_parts[1:]
if len(author_parts) > 1:
ctx.create_tag(atag, 'middle-name', at_start=False).text = author_parts[0]
author_parts = author_parts[1:]
if author_parts:
ctx.create_tag(atag, 'last-name', at_start=False).text = ' '.join(author_parts)
def _set_publisher(publish_info, mi, ctx):
if mi.is_null('publisher'):
return
ctx.clear_meta_tags(publish_info, 'publisher')
tag = ctx.create_tag(publish_info, 'publisher')
tag.text = mi.publisher
def _set_pubdate(publish_info, mi, ctx):
if mi.is_null('pubdate'):
return
ctx.clear_meta_tags(publish_info, 'year')
tag = ctx.create_tag(publish_info, 'year')
tag.text = strftime('%Y', mi.pubdate)
def _set_tags(title_info, mi, ctx):
if not mi.is_null('tags'):
ctx.clear_meta_tags(title_info, 'genre')
for t in mi.tags:
tag = ctx.create_tag(title_info, 'genre')
tag.text = t
def _set_series(title_info, mi, ctx):
if not mi.is_null('series'):
ctx.clear_meta_tags(title_info, 'sequence')
seq = ctx.get_or_create(title_info, 'sequence')
seq.set('name', mi.series)
try:
seq.set('number', '%g'%mi.series_index)
except:
seq.set('number', '1')
def _rnd_name(size=8, chars=ascii_letters + digits):
return ''.join(random.choice(chars) for x in range(size))
def _rnd_pic_file_name(prefix='calibre_cover_', size=32, ext='jpg'):
return prefix + _rnd_name(size=size) + '.' + ext
def _encode_into_jpeg(data):
data = save_cover_data_to(data)
return as_base64_unicode(data)
def _set_cover(title_info, mi, ctx):
if not mi.is_null('cover_data') and mi.cover_data[1]:
coverpage = ctx.get_or_create(title_info, 'coverpage')
cim_tag = ctx.get_or_create(coverpage, 'image')
if XLINK('href') in cim_tag.attrib:
cim_filename = cim_tag.attrib[XLINK('href')][1:]
else:
cim_filename = _rnd_pic_file_name('cover')
cim_tag.attrib[XLINK('href')] = '#' + cim_filename
fb2_root = cim_tag.getroottree().getroot()
cim_binary = ctx.get_or_create(fb2_root, 'binary', attribs={'id': cim_filename}, at_start=False)
cim_binary.attrib['content-type'] = 'image/jpeg'
cim_binary.text = _encode_into_jpeg(mi.cover_data[1])
def set_metadata(stream, mi, apply_null=False, update_timestamp=False):
stream.seek(0)
raw, zip_file_name = get_fb2_data(stream)
root = _get_fbroot(raw)
ctx = Context(root)
desc = ctx.get_or_create(root, 'description')
ti = ctx.get_or_create(desc, 'title-info')
pi = ctx.get_or_create(desc, 'publish-info')
indent = ti.text
_set_comments(ti, mi, ctx)
_set_series(ti, mi, ctx)
_set_tags(ti, mi, ctx)
_set_authors(ti, mi, ctx)
_set_title(ti, mi, ctx)
_set_publisher(pi, mi, ctx)
_set_pubdate(pi, mi, ctx)
_set_cover(ti, mi, ctx)
for child in ti:
child.tail = indent
# Apparently there exists FB2 reading software that chokes on the use of
# single quotes in xml declaration. Sigh. See
# https://www.mobileread.com/forums/showthread.php?p=2273184#post2273184
raw = b'<?xml version="1.0" encoding="UTF-8"?>\n'
raw += etree.tostring(root, method='xml', encoding='utf-8', xml_declaration=False)
stream.seek(0)
stream.truncate()
if zip_file_name:
from calibre.utils.zipfile import ZipFile
with ZipFile(stream, 'w') as zf:
zf.writestr(zip_file_name, raw)
else:
stream.write(raw)
def ensure_namespace(doc):
# Workaround for broken FB2 files produced by convertonlinefree.com. See
# https://bugs.launchpad.net/bugs/1404701
bare_tags = False
for x in ('description', 'body'):
for x in doc.findall(x):
if '{' not in x.tag:
bare_tags = True
break
if bare_tags:
import re
raw = etree.tostring(doc, encoding='unicode')
raw = re.sub(r'''<(description|body)\s+xmlns=['"]['"]>''', r'<\1>', raw)
doc = safe_xml_fromstring(raw)
return doc
| 15,214 | Python | .py | 379 | 32.44591 | 126 | 0.610746 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,511 | opf3.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/opf3.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import json
import re
from collections import defaultdict, namedtuple
from contextlib import suppress
from functools import wraps
from operator import attrgetter
from lxml import etree
from calibre import prints
from calibre.ebooks.metadata import authors_to_string, check_isbn, fmt_sidx, string_to_authors
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.book.json_codec import decode_is_multiple, encode_is_multiple, object_to_unicode
from calibre.ebooks.metadata.utils import create_manifest_item, ensure_unique, normalize_languages, parse_opf, pretty_print_opf
from calibre.ebooks.oeb.base import DC, OPF, OPF2_NSMAP
from calibre.utils.config import from_json, to_json
from calibre.utils.date import fix_only_date, is_date_undefined, isoformat, utcnow, w3cdtf
from calibre.utils.date import parse_date as parse_date_
from calibre.utils.iso8601 import parse_iso8601
from calibre.utils.localization import canonicalize_lang
from polyglot.builtins import iteritems
# Utils {{{
_xpath_cache = {}
_re_cache = {}
def uniq(vals):
''' Remove all duplicates from vals, while preserving order. '''
vals = vals or ()
seen = set()
seen_add = seen.add
return list(x for x in vals if x not in seen and not seen_add(x))
def dump_dict(cats):
return json.dumps(object_to_unicode(cats or {}), ensure_ascii=False, skipkeys=True)
def XPath(x):
try:
return _xpath_cache[x]
except KeyError:
_xpath_cache[x] = ans = etree.XPath(x, namespaces=OPF2_NSMAP)
return ans
def regex(r, flags=0):
try:
return _re_cache[(r, flags)]
except KeyError:
_re_cache[(r, flags)] = ans = re.compile(r, flags)
return ans
def remove_refines(e, refines):
for x in refines[e.get('id')]:
x.getparent().remove(x)
refines.pop(e.get('id'), None)
def remove_element(e, refines):
remove_refines(e, refines)
e.getparent().remove(e)
def properties_for_id(item_id, refines):
ans = {}
if item_id:
for elem in refines[item_id]:
key = elem.get('property')
if key:
val = (elem.text or '').strip()
if val:
ans[key] = val
return ans
def properties_for_id_with_scheme(item_id, prefixes, refines):
ans = defaultdict(list)
if item_id:
for elem in refines[item_id]:
key = elem.get('property')
if key:
val = (elem.text or '').strip()
if val:
scheme = elem.get('scheme') or None
scheme_ns = None
if scheme is not None:
p, r = scheme.partition(':')[::2]
if p and r:
ns = prefixes.get(p)
if ns:
scheme_ns = ns
scheme = r
ans[key].append((scheme_ns, scheme, val))
return ans
def getroot(elem):
while True:
q = elem.getparent()
if q is None:
return elem
elem = q
def ensure_id(elem):
root = getroot(elem)
eid = elem.get('id')
if not eid:
eid = ensure_unique('id', frozenset(XPath('//*/@id')(root)))
elem.set('id', eid)
return eid
def normalize_whitespace(text):
if not text:
return text
return re.sub(r'\s+', ' ', text).strip()
def simple_text(f):
@wraps(f)
def wrapper(*args, **kw):
return normalize_whitespace(f(*args, **kw))
return wrapper
def items_with_property(root, q, prefixes=None):
if prefixes is None:
prefixes = read_prefixes(root)
q = expand_prefix(q, known_prefixes).lower()
for item in XPath("./opf:manifest/opf:item[@properties]")(root):
for prop in (item.get('properties') or '').lower().split():
prop = expand_prefix(prop, prefixes)
if prop == q:
yield item
break
# }}}
# Prefixes {{{
# http://www.idpf.org/epub/vocab/package/pfx/
reserved_prefixes = {
'dcterms': 'http://purl.org/dc/terms/',
'epubsc': 'http://idpf.org/epub/vocab/sc/#',
'marc': 'http://id.loc.gov/vocabulary/',
'media': 'http://www.idpf.org/epub/vocab/overlays/#',
'onix': 'http://www.editeur.org/ONIX/book/codelists/current.html#',
'rendition':'http://www.idpf.org/vocab/rendition/#',
'schema': 'http://schema.org/',
'xsd': 'http://www.w3.org/2001/XMLSchema#',
}
CALIBRE_PREFIX = 'https://calibre-ebook.com'
known_prefixes = reserved_prefixes.copy()
known_prefixes['calibre'] = CALIBRE_PREFIX
def parse_prefixes(x):
return {m.group(1):m.group(2) for m in re.finditer(r'(\S+): \s*(\S+)', x)}
def read_prefixes(root):
ans = reserved_prefixes.copy()
ans.update(parse_prefixes(root.get('prefix') or ''))
return ans
def expand_prefix(raw, prefixes):
return regex(r'(\S+)\s*:\s*(\S+)').sub(lambda m:(prefixes.get(m.group(1), m.group(1)) + ':' + m.group(2)), raw or '')
def ensure_prefix(root, prefixes, prefix, value=None):
if prefixes is None:
prefixes = read_prefixes(root)
prefixes[prefix] = value or reserved_prefixes[prefix]
prefixes = {k:v for k, v in iteritems(prefixes) if reserved_prefixes.get(k) != v}
if prefixes:
root.set('prefix', ' '.join(f'{k}: {v}' for k, v in iteritems(prefixes)))
else:
root.attrib.pop('prefix', None)
# }}}
# Refines {{{
def read_refines(root):
ans = defaultdict(list)
for meta in XPath('./opf:metadata/opf:meta[@refines]')(root):
r = meta.get('refines') or ''
if r.startswith('#'):
ans[r[1:]].append(meta)
return ans
def refdef(prop, val, scheme=None):
return (prop, val, scheme)
def set_refines(elem, existing_refines, *new_refines):
eid = ensure_id(elem)
remove_refines(elem, existing_refines)
for ref in reversed(new_refines):
prop, val, scheme = ref
r = elem.makeelement(OPF('meta'))
r.set('refines', '#' + eid), r.set('property', prop)
r.text = val.strip()
if scheme:
r.set('scheme', scheme)
p = elem.getparent()
p.insert(p.index(elem)+1, r)
# }}}
# Identifiers {{{
def parse_identifier(ident, val, refines):
idid = ident.get('id')
refines = refines[idid]
scheme = None
lval = val.lower()
def finalize(scheme, val):
if not scheme or not val:
return None, None
scheme = scheme.lower()
if scheme in ('http', 'https'):
return None, None
if scheme.startswith('isbn'):
scheme = 'isbn'
if scheme == 'isbn':
val = val.split(':')[-1]
val = check_isbn(val)
if val is None:
return None, None
return scheme, val
# Try the OPF 2 style opf:scheme attribute, which will be present, for
# example, in EPUB 3 files that have had their metadata set by an
# application that only understands EPUB 2.
scheme = ident.get(OPF('scheme'))
if scheme and not lval.startswith('urn:'):
return finalize(scheme, val)
# Technically, we should be looking for refines that define the scheme, but
# the IDioticPF created such a bad spec that they got their own
# examples wrong, so I cannot be bothered doing this.
# http://www.idpf.org/epub/301/spec/epub-publications-errata/
# Parse the value for the scheme
if lval.startswith('urn:'):
val = val[4:]
prefix, rest = val.partition(':')[::2]
return finalize(prefix, rest)
def read_identifiers(root, prefixes, refines):
ans = defaultdict(list)
for ident in XPath('./opf:metadata/dc:identifier')(root):
val = (ident.text or '').strip()
if val:
scheme, val = parse_identifier(ident, val, refines)
if scheme and val:
ans[scheme].append(val)
return ans
def set_identifiers(root, prefixes, refines, new_identifiers, force_identifiers=False):
uid = root.get('unique-identifier')
package_identifier = None
for ident in XPath('./opf:metadata/dc:identifier')(root):
if uid is not None and uid == ident.get('id'):
package_identifier = ident
continue
val = (ident.text or '').strip()
if not val:
ident.getparent().remove(ident)
continue
scheme, val = parse_identifier(ident, val, refines)
if not scheme or not val or force_identifiers or scheme in new_identifiers:
remove_element(ident, refines)
continue
metadata = XPath('./opf:metadata')(root)[0]
for scheme, val in iteritems(new_identifiers):
ident = metadata.makeelement(DC('identifier'))
ident.text = f'{scheme}:{val}'
if package_identifier is None:
metadata.append(ident)
else:
p = package_identifier.getparent()
p.insert(p.index(package_identifier), ident)
def identifier_writer(name):
def writer(root, prefixes, refines, ival=None):
uid = root.get('unique-identifier')
package_identifier = None
for ident in XPath('./opf:metadata/dc:identifier')(root):
is_package_id = uid is not None and uid == ident.get('id')
if is_package_id:
package_identifier = ident
val = (ident.text or '').strip()
if (val.startswith(name + ':') or ident.get(OPF('scheme')) == name) and not is_package_id:
remove_element(ident, refines)
metadata = XPath('./opf:metadata')(root)[0]
if ival:
ident = metadata.makeelement(DC('identifier'))
ident.text = f'{name}:{ival}'
if package_identifier is None:
metadata.append(ident)
else:
p = package_identifier.getparent()
p.insert(p.index(package_identifier), ident)
return writer
set_application_id = identifier_writer('calibre')
set_uuid = identifier_writer('uuid')
# }}}
# Title {{{
def find_main_title(root, refines, remove_blanks=False):
first_title = main_title = None
for title in XPath('./opf:metadata/dc:title')(root):
if not title.text or not title.text.strip():
if remove_blanks:
remove_element(title, refines)
continue
if first_title is None:
first_title = title
props = properties_for_id(title.get('id'), refines)
if props.get('title-type') == 'main':
main_title = title
break
else:
main_title = first_title
return main_title
def find_subtitle(root, refines):
for title in XPath('./opf:metadata/dc:title')(root):
if not title.text or not title.text.strip():
continue
props = properties_for_id(title.get('id'), refines)
q = props.get('title-type') or ''
if 'subtitle' in q or 'sub-title' in q:
return title
@simple_text
def read_title(root, prefixes, refines):
main_title = find_main_title(root, refines)
if main_title is None:
return None
ans = main_title.text.strip()
st = find_subtitle(root, refines)
if st is not None and st is not main_title:
ans += ': ' + st.text.strip()
return ans
@simple_text
def read_title_sort(root, prefixes, refines):
main_title = find_main_title(root, refines)
if main_title is not None:
fa = properties_for_id(main_title.get('id'), refines).get('file-as')
if fa:
return fa
# Look for OPF 2.0 style title_sort
for m in XPath('./opf:metadata/opf:meta[@name="calibre:title_sort"]')(root):
ans = m.get('content')
if ans:
return ans
def set_title(root, prefixes, refines, title, title_sort=None):
main_title = find_main_title(root, refines, remove_blanks=True)
st = find_subtitle(root, refines)
if st is not None:
remove_element(st, refines)
if main_title is None:
m = XPath('./opf:metadata')(root)[0]
main_title = m.makeelement(DC('title'))
m.insert(0, main_title)
main_title.text = title or None
ts = [refdef('file-as', title_sort)] if title_sort else ()
set_refines(main_title, refines, refdef('title-type', 'main'), *ts)
for m in XPath('./opf:metadata/opf:meta[@name="calibre:title_sort"]')(root):
remove_element(m, refines)
# }}}
# Languages {{{
def read_languages(root, prefixes, refines):
ans = []
for lang in XPath('./opf:metadata/dc:language')(root):
val = canonicalize_lang((lang.text or '').strip())
if val and val not in ans and val != 'und':
ans.append(val)
return uniq(ans)
def set_languages(root, prefixes, refines, languages):
opf_languages = []
for lang in XPath('./opf:metadata/dc:language')(root):
remove_element(lang, refines)
val = (lang.text or '').strip()
if val:
opf_languages.append(val)
languages = list(filter(lambda x: x and x != 'und', normalize_languages(opf_languages, languages)))
if not languages:
# EPUB spec says dc:language is required
languages = ['und']
metadata = XPath('./opf:metadata')(root)[0]
for lang in uniq(languages):
l = metadata.makeelement(DC('language'))
l.text = lang
metadata.append(l)
# }}}
# Creator/Contributor {{{
Author = namedtuple('Author', 'name sort seq', defaults=(0,))
def is_relators_role(props, q):
for role in props.get('role'):
if role:
scheme_ns, scheme, role = role
if role.lower() == q and (scheme_ns is None or (scheme_ns, scheme) == (reserved_prefixes['marc'], 'relators')):
return True
return False
def read_authors(root, prefixes, refines):
roled_authors, unroled_authors = [], []
editors_map = {}
def author(item, props, val):
aus = None
file_as = props.get('file-as')
if file_as:
aus = file_as[0][-1]
else:
aus = item.get(OPF('file-as')) or None
seq = 0
ds = props.get('display-seq')
with suppress(Exception):
seq = int(ds[0][-1])
return Author(normalize_whitespace(val), normalize_whitespace(aus), seq)
for item in XPath('./opf:metadata/dc:creator')(root):
val = (item.text or '').strip()
if val:
props = properties_for_id_with_scheme(item.get('id'), prefixes, refines)
role = props.get('role')
opf_role = item.get(OPF('role'))
if role:
if is_relators_role(props, 'aut'):
roled_authors.append(author(item, props, val))
if is_relators_role(props, 'edt'):
# See https://bugs.launchpad.net/calibre/+bug/1950579
a = author(item, props, val)
editors_map[a.name] = a
elif opf_role:
if opf_role.lower() == 'aut':
roled_authors.append(author(item, props, val))
else:
unroled_authors.append(author(item, props, val))
if roled_authors or unroled_authors:
ans = uniq(roled_authors or unroled_authors)
else:
ans = uniq(editors_map.values())
ans.sort(key=attrgetter('seq'))
return ans
def set_authors(root, prefixes, refines, authors):
ensure_prefix(root, prefixes, 'marc')
removals = []
for role in ('aut', 'edt'):
for item in XPath('./opf:metadata/dc:creator')(root):
props = properties_for_id_with_scheme(item.get('id'), prefixes, refines)
opf_role = item.get(OPF('role'))
if (opf_role and opf_role.lower() != role) or (props.get('role') and not is_relators_role(props, role)):
continue
removals.append(item)
if removals:
break
for item in removals:
remove_element(item, refines)
metadata = XPath('./opf:metadata')(root)[0]
for author in authors:
if author.name:
a = metadata.makeelement(DC('creator'))
aid = ensure_id(a)
a.text = author.name
metadata.append(a)
m = metadata.makeelement(OPF('meta'), attrib={'refines':'#'+aid, 'property':'role', 'scheme':'marc:relators'})
m.text = 'aut'
metadata.append(m)
if author.sort:
m = metadata.makeelement(OPF('meta'), attrib={'refines':'#'+aid, 'property':'file-as'})
m.text = author.sort
metadata.append(m)
def read_book_producers(root, prefixes, refines):
ans = []
for item in XPath('./opf:metadata/dc:contributor')(root):
val = (item.text or '').strip()
if val:
props = properties_for_id_with_scheme(item.get('id'), prefixes, refines)
role = props.get('role')
opf_role = item.get(OPF('role'))
if role:
if is_relators_role(props, 'bkp'):
ans.append(normalize_whitespace(val))
elif opf_role and opf_role.lower() == 'bkp':
ans.append(normalize_whitespace(val))
return ans
def set_book_producers(root, prefixes, refines, producers):
for item in XPath('./opf:metadata/dc:contributor')(root):
props = properties_for_id_with_scheme(item.get('id'), prefixes, refines)
opf_role = item.get(OPF('role'))
if (opf_role and opf_role.lower() != 'bkp') or (props.get('role') and not is_relators_role(props, 'bkp')):
continue
remove_element(item, refines)
metadata = XPath('./opf:metadata')(root)[0]
for bkp in producers:
if bkp:
a = metadata.makeelement(DC('contributor'))
aid = ensure_id(a)
a.text = bkp
metadata.append(a)
m = metadata.makeelement(OPF('meta'), attrib={'refines':'#'+aid, 'property':'role', 'scheme':'marc:relators'})
m.text = 'bkp'
metadata.append(m)
# }}}
# Dates {{{
def parse_date(raw, is_w3cdtf=False):
raw = raw.strip()
if is_w3cdtf:
ans = parse_iso8601(raw, assume_utc=True)
if 'T' not in raw and ' ' not in raw:
ans = fix_only_date(ans)
else:
ans = parse_date_(raw, assume_utc=True)
if ' ' not in raw and 'T' not in raw and (ans.hour, ans.minute, ans.second) == (0, 0, 0):
ans = fix_only_date(ans)
return ans
def read_pubdate(root, prefixes, refines):
for date in XPath('./opf:metadata/dc:date')(root):
val = (date.text or '').strip()
if val:
try:
return parse_date(val)
except Exception:
continue
def set_pubdate(root, prefixes, refines, val):
for date in XPath('./opf:metadata/dc:date')(root):
remove_element(date, refines)
if not is_date_undefined(val):
val = isoformat(val)
m = XPath('./opf:metadata')(root)[0]
d = m.makeelement(DC('date'))
d.text = val
m.append(d)
def read_timestamp(root, prefixes, refines):
pq = '%s:timestamp' % CALIBRE_PREFIX
sq = '%s:w3cdtf' % reserved_prefixes['dcterms']
for meta in XPath('./opf:metadata/opf:meta[@property]')(root):
val = (meta.text or '').strip()
if val:
prop = expand_prefix(meta.get('property'), prefixes)
if prop.lower() == pq:
scheme = expand_prefix(meta.get('scheme'), prefixes).lower()
try:
return parse_date(val, is_w3cdtf=scheme == sq)
except Exception:
continue
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:timestamp"]')(root):
val = meta.get('content')
if val:
try:
return parse_date(val, is_w3cdtf=True)
except Exception:
continue
def create_timestamp(root, prefixes, m, val):
if not is_date_undefined(val):
ensure_prefix(root, prefixes, 'calibre', CALIBRE_PREFIX)
ensure_prefix(root, prefixes, 'dcterms')
val = w3cdtf(val)
d = m.makeelement(OPF('meta'), attrib={'property':'calibre:timestamp', 'scheme':'dcterms:W3CDTF'})
d.text = val
m.append(d)
def set_timestamp(root, prefixes, refines, val):
pq = '%s:timestamp' % CALIBRE_PREFIX
for meta in XPath('./opf:metadata/opf:meta')(root):
prop = expand_prefix(meta.get('property'), prefixes)
if prop.lower() == pq or meta.get('name') == 'calibre:timestamp':
remove_element(meta, refines)
create_timestamp(root, prefixes, XPath('./opf:metadata')(root)[0], val)
def read_last_modified(root, prefixes, refines):
pq = '%s:modified' % reserved_prefixes['dcterms']
sq = '%s:w3cdtf' % reserved_prefixes['dcterms']
for meta in XPath('./opf:metadata/opf:meta[@property]')(root):
val = (meta.text or '').strip()
if val:
prop = expand_prefix(meta.get('property'), prefixes)
if prop.lower() == pq:
scheme = expand_prefix(meta.get('scheme'), prefixes).lower()
try:
return parse_date(val, is_w3cdtf=scheme == sq)
except Exception:
continue
def set_last_modified(root, prefixes, refines, val=None):
pq = '%s:modified' % reserved_prefixes['dcterms']
val = w3cdtf(val or utcnow())
for meta in XPath('./opf:metadata/opf:meta[@property]')(root):
prop = expand_prefix(meta.get('property'), prefixes)
if prop.lower() == pq:
iid = meta.get('id')
if not iid or not refines[iid]:
break
else:
ensure_prefix(root, prefixes, 'dcterms')
m = XPath('./opf:metadata')(root)[0]
meta = m.makeelement(OPF('meta'), attrib={'property':'dcterms:modified', 'scheme':'dcterms:W3CDTF'})
m.append(meta)
meta.text = val
# }}}
# Comments {{{
def read_comments(root, prefixes, refines):
ans = ''
for dc in XPath('./opf:metadata/dc:description')(root):
if dc.text:
ans += '\n' + dc.text.strip()
return ans.strip()
def set_comments(root, prefixes, refines, val):
for dc in XPath('./opf:metadata/dc:description')(root):
remove_element(dc, refines)
m = XPath('./opf:metadata')(root)[0]
if val:
val = val.strip()
if val:
c = m.makeelement(DC('description'))
c.text = val
m.append(c)
# }}}
# Publisher {{{
@simple_text
def read_publisher(root, prefixes, refines):
for dc in XPath('./opf:metadata/dc:publisher')(root):
if dc.text:
return dc.text
def set_publisher(root, prefixes, refines, val):
for dc in XPath('./opf:metadata/dc:publisher')(root):
remove_element(dc, refines)
m = XPath('./opf:metadata')(root)[0]
if val:
val = val.strip()
if val:
c = m.makeelement(DC('publisher'))
c.text = normalize_whitespace(val)
m.append(c)
# }}}
# Tags {{{
def read_tags(root, prefixes, refines):
ans = []
for dc in XPath('./opf:metadata/dc:subject')(root):
if dc.text:
ans.extend(map(normalize_whitespace, dc.text.split(',')))
return uniq(list(filter(None, ans)))
def set_tags(root, prefixes, refines, val):
for dc in XPath('./opf:metadata/dc:subject')(root):
remove_element(dc, refines)
m = XPath('./opf:metadata')(root)[0]
if val:
val = uniq(list(filter(None, val)))
for x in val:
c = m.makeelement(DC('subject'))
c.text = normalize_whitespace(x)
if c.text:
m.append(c)
# }}}
# Rating {{{
def read_rating(root, prefixes, refines):
pq = '%s:rating' % CALIBRE_PREFIX
for meta in XPath('./opf:metadata/opf:meta[@property]')(root):
val = (meta.text or '').strip()
if val:
prop = expand_prefix(meta.get('property'), prefixes)
if prop.lower() == pq:
try:
return float(val)
except Exception:
continue
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:rating"]')(root):
val = meta.get('content')
if val:
try:
return float(val)
except Exception:
continue
def create_rating(root, prefixes, val):
ensure_prefix(root, prefixes, 'calibre', CALIBRE_PREFIX)
m = XPath('./opf:metadata')(root)[0]
d = m.makeelement(OPF('meta'), attrib={'property':'calibre:rating'})
d.text = val
m.append(d)
def set_rating(root, prefixes, refines, val):
pq = '%s:rating' % CALIBRE_PREFIX
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:rating"]')(root):
remove_element(meta, refines)
for meta in XPath('./opf:metadata/opf:meta[@property]')(root):
prop = expand_prefix(meta.get('property'), prefixes)
if prop.lower() == pq:
remove_element(meta, refines)
if val:
create_rating(root, prefixes, '%.2g' % float(val))
# }}}
# Series {{{
def read_series(root, prefixes, refines):
series_index = 1.0
for meta in XPath('./opf:metadata/opf:meta[@property="belongs-to-collection" and @id]')(root):
val = (meta.text or '').strip()
if val:
props = properties_for_id(meta.get('id'), refines)
if props.get('collection-type') == 'series':
try:
series_index = float(props.get('group-position').strip())
except Exception:
pass
return normalize_whitespace(val), series_index
for si in XPath('./opf:metadata/opf:meta[@name="calibre:series_index"]/@content')(root):
try:
series_index = float(si)
break
except:
pass
for s in XPath('./opf:metadata/opf:meta[@name="calibre:series"]/@content')(root):
s = normalize_whitespace(s)
if s:
return s, series_index
return None, series_index
def create_series(root, refines, series, series_index):
m = XPath('./opf:metadata')(root)[0]
d = m.makeelement(OPF('meta'), attrib={'property':'belongs-to-collection'})
d.text = series
m.append(d)
set_refines(d, refines, refdef('collection-type', 'series'), refdef('group-position', series_index))
def set_series(root, prefixes, refines, series, series_index):
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:series" or @name="calibre:series_index"]')(root):
remove_element(meta, refines)
for meta in XPath('./opf:metadata/opf:meta[@property="belongs-to-collection"]')(root):
remove_element(meta, refines)
if series:
create_series(root, refines, series, fmt_sidx(series_index))
# }}}
# User metadata {{{
def dict_reader(name, load=json.loads, try2=True):
pq = f'{CALIBRE_PREFIX}:{name}'
def reader(root, prefixes, refines):
for meta in XPath('./opf:metadata/opf:meta[@property]')(root):
val = (meta.text or '').strip()
if val:
prop = expand_prefix(meta.get('property'), prefixes)
if prop.lower() == pq:
try:
ans = load(val)
if isinstance(ans, dict):
return ans
except Exception:
continue
if try2:
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:%s"]' % name)(root):
val = meta.get('content')
if val:
try:
ans = load(val)
if isinstance(ans, dict):
return ans
except Exception:
continue
return reader
read_user_categories = dict_reader('user_categories')
_read_link_maps = dict_reader('link_maps')
_read_author_link_map = dict_reader('author_link_map')
def read_link_maps(root, prefixes, refines):
ans = _read_link_maps(root, prefixes, refines)
if ans is not None:
return ans
ans = _read_author_link_map(root, prefixes, refines)
if ans:
ans = {k: v for k, v in ans.items() if v}
if ans:
return {'authors': ans}
def dict_writer(name, serialize=dump_dict, remove2=True, extra_remove=''):
pq = f'{CALIBRE_PREFIX}:{name}'
def writer(root, prefixes, refines, val):
if remove2:
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:%s"]' % name)(root):
remove_element(meta, refines)
if extra_remove:
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:%s"]' % extra_remove)(root):
remove_element(meta, refines)
for meta in XPath('./opf:metadata/opf:meta[@property]')(root):
prop = expand_prefix(meta.get('property'), prefixes)
if prop.lower() == pq:
remove_element(meta, refines)
if val:
ensure_prefix(root, prefixes, 'calibre', CALIBRE_PREFIX)
m = XPath('./opf:metadata')(root)[0]
d = m.makeelement(OPF('meta'), attrib={'property':'calibre:%s' % name})
d.text = serialize(val)
m.append(d)
return writer
set_user_categories = dict_writer('user_categories')
set_link_maps = dict_writer('link_maps', extra_remove='author_link_map')
def deserialize_user_metadata(val):
val = json.loads(val, object_hook=from_json)
ans = {}
for name, fm in iteritems(val):
decode_is_multiple(fm)
ans[name] = fm
return ans
read_user_metadata3 = dict_reader('user_metadata', load=deserialize_user_metadata, try2=False)
def read_user_metadata2(root, remove_tags=False):
ans = {}
for meta in XPath('./opf:metadata/opf:meta[starts-with(@name, "calibre:user_metadata:")]')(root):
name = meta.get('name')
name = ':'.join(name.split(':')[2:])
if not name or not name.startswith('#'):
continue
fm = meta.get('content')
if remove_tags:
meta.getparent().remove(meta)
try:
fm = json.loads(fm, object_hook=from_json)
decode_is_multiple(fm)
ans[name] = fm
except Exception:
prints('Failed to read user metadata:', name)
import traceback
traceback.print_exc()
continue
return ans
def read_user_metadata(root, prefixes, refines):
return read_user_metadata3(root, prefixes, refines) or read_user_metadata2(root)
def serialize_user_metadata(val):
return json.dumps(object_to_unicode(val), ensure_ascii=False, default=to_json, indent=2, sort_keys=True)
set_user_metadata3 = dict_writer('user_metadata', serialize=serialize_user_metadata, remove2=False)
def set_user_metadata(root, prefixes, refines, val):
for meta in XPath('./opf:metadata/opf:meta[starts-with(@name, "calibre:user_metadata:")]')(root):
remove_element(meta, refines)
if val:
nval = {}
for name, fm in val.items():
fm = fm.copy()
if (fm.get('datatype', 'text') == 'composite' and
not fm.get('display', {}).get('composite_store_template_value_in_opf', True)):
fm['#value#'] = ''
encode_is_multiple(fm)
nval[name] = fm
set_user_metadata3(root, prefixes, refines, nval)
# }}}
# Covers {{{
def read_raster_cover(root, prefixes, refines):
def get_href(item):
mt = item.get('media-type')
if mt and 'xml' not in mt and 'html' not in mt:
href = item.get('href')
if href:
return href
for item in items_with_property(root, 'cover-image', prefixes):
href = get_href(item)
if href:
return href
for item_id in XPath('./opf:metadata/opf:meta[@name="cover"]/@content')(root):
for item in XPath('./opf:manifest/opf:item[@id and @href and @media-type]')(root):
if item.get('id') == item_id:
href = get_href(item)
if href:
return href
def set_unique_property(property_name, root, prefixes, href):
changed = False
for item in items_with_property(root, property_name, prefixes):
prop = normalize_whitespace(item.get('properties').replace(property_name, ''))
changed = True
if prop:
item.set('properties', prop)
else:
del item.attrib['properties']
for item in XPath('./opf:manifest/opf:item')(root):
if item.get('href') == href:
changed = True
item.set('properties', normalize_whitespace((item.get('properties') or '') + f' {property_name}'))
return changed
def ensure_is_only_raster_cover(root, prefixes, refines, raster_cover_item_href):
for item in XPath('./opf:metadata/opf:meta[@name="cover"]')(root):
remove_element(item, refines)
set_unique_property('cover-image', root, prefixes, raster_cover_item_href)
# }}}
# Reading/setting Metadata objects {{{
def first_spine_item(root, prefixes, refines):
for i in XPath('./opf:spine/opf:itemref/@idref')(root):
for item in XPath('./opf:manifest/opf:item')(root):
if item.get('id') == i:
return item.get('href') or None
def set_last_modified_in_opf(root):
prefixes, refines = read_prefixes(root), read_refines(root)
set_last_modified(root, prefixes, refines)
def read_metadata(root, ver=None, return_extra_data=False):
ans = Metadata(_('Unknown'), [_('Unknown')])
prefixes, refines = read_prefixes(root), read_refines(root)
identifiers = read_identifiers(root, prefixes, refines)
ids = {}
for key, vals in iteritems(identifiers):
if key == 'calibre':
ans.application_id = vals[0]
elif key == 'uuid':
ans.uuid = vals[0]
else:
ids[key] = vals[0]
ans.set_identifiers(ids)
ans.title = read_title(root, prefixes, refines) or ans.title
ans.title_sort = read_title_sort(root, prefixes, refines) or ans.title_sort
ans.languages = read_languages(root, prefixes, refines) or ans.languages
auts, aus = [], []
for a in read_authors(root, prefixes, refines):
auts.append(a.name), aus.append(a.sort)
ans.authors = auts or ans.authors
ans.author_sort = authors_to_string(aus) or ans.author_sort
bkp = read_book_producers(root, prefixes, refines)
if bkp:
if bkp[0]:
ans.book_producer = bkp[0]
pd = read_pubdate(root, prefixes, refines)
if not is_date_undefined(pd):
ans.pubdate = pd
ts = read_timestamp(root, prefixes, refines)
if not is_date_undefined(ts):
ans.timestamp = ts
lm = read_last_modified(root, prefixes, refines)
if not is_date_undefined(lm):
ans.last_modified = lm
ans.comments = read_comments(root, prefixes, refines) or ans.comments
ans.publisher = read_publisher(root, prefixes, refines) or ans.publisher
ans.tags = read_tags(root, prefixes, refines) or ans.tags
ans.rating = read_rating(root, prefixes, refines) or ans.rating
s, si = read_series(root, prefixes, refines)
if s:
ans.series, ans.series_index = s, si
ans.link_maps = read_link_maps(root, prefixes, refines) or ans.link_maps
ans.user_categories = read_user_categories(root, prefixes, refines) or ans.user_categories
for name, fm in iteritems(read_user_metadata(root, prefixes, refines) or {}):
try:
ans.set_user_metadata(name, fm)
except Exception:
import traceback
traceback.print_exc()
if return_extra_data:
ans = ans, ver, read_raster_cover(root, prefixes, refines), first_spine_item(root, prefixes, refines)
return ans
def get_metadata(stream):
root = parse_opf(stream)
return read_metadata(root)
def apply_metadata(root, mi, cover_prefix='', cover_data=None, apply_null=False, update_timestamp=False, force_identifiers=False, add_missing_cover=True):
prefixes, refines = read_prefixes(root), read_refines(root)
current_mi = read_metadata(root)
if apply_null:
def ok(x):
return True
else:
def ok(x):
return not mi.is_null(x)
if ok('identifiers'):
set_identifiers(root, prefixes, refines, mi.identifiers, force_identifiers=force_identifiers)
if ok('title'):
set_title(root, prefixes, refines, mi.title, mi.title_sort)
if ok('languages'):
set_languages(root, prefixes, refines, mi.languages)
if ok('book_producer'):
set_book_producers(root, prefixes, refines, (mi.book_producer,))
aus = string_to_authors(mi.author_sort or '')
authors = []
for i, aut in enumerate(mi.authors):
authors.append(Author(aut, aus[i] if i < len(aus) else None))
if authors or apply_null:
set_authors(root, prefixes, refines, authors)
if ok('pubdate'):
set_pubdate(root, prefixes, refines, mi.pubdate)
if update_timestamp and mi.timestamp is not None:
set_timestamp(root, prefixes, refines, mi.timestamp)
if ok('comments'):
set_comments(root, prefixes, refines, mi.comments)
if ok('publisher'):
set_publisher(root, prefixes, refines, mi.publisher)
if ok('tags'):
set_tags(root, prefixes, refines, mi.tags)
if ok('rating') and mi.rating is not None and float(mi.rating) > 0.1:
set_rating(root, prefixes, refines, mi.rating)
if ok('series'):
sidx = mi.series_index if isinstance(mi.series_index, (int, float)) else 1.0
set_series(root, prefixes, refines, mi.series, sidx)
if ok('link_maps'):
set_link_maps(root, prefixes, refines, getattr(mi, 'link_maps', None))
if ok('user_categories'):
set_user_categories(root, prefixes, refines, getattr(mi, 'user_categories', None))
# We ignore apply_null for the next two to match the behavior with opf2.py
if mi.application_id:
set_application_id(root, prefixes, refines, mi.application_id)
if mi.uuid:
set_uuid(root, prefixes, refines, mi.uuid)
current_mi.remove_stale_user_metadata(mi)
new_user_metadata, current_user_metadata = mi.get_all_user_metadata(True), current_mi.get_all_user_metadata(True)
missing = object()
for key in tuple(new_user_metadata):
meta = new_user_metadata.get(key)
if meta is None:
if apply_null:
new_user_metadata[key] = None
continue
dt = meta.get('datatype')
if dt == 'text' and meta.get('is_multiple'):
val = mi.get(key, [])
if val or apply_null:
current_user_metadata[key] = meta
elif dt in {'int', 'float', 'bool'}:
val = mi.get(key, missing)
if val is missing:
if apply_null:
current_user_metadata[key] = meta
elif apply_null or val is not None:
current_user_metadata[key] = meta
elif apply_null or not mi.is_null(key):
current_user_metadata[key] = meta
set_user_metadata(root, prefixes, refines, current_user_metadata)
raster_cover = read_raster_cover(root, prefixes, refines)
if not raster_cover and cover_data and add_missing_cover:
if cover_prefix and not cover_prefix.endswith('/'):
cover_prefix += '/'
name = cover_prefix + 'cover.jpg'
i = create_manifest_item(root, name, 'cover')
if i is not None:
ensure_is_only_raster_cover(root, prefixes, refines, name)
raster_cover = name
pretty_print_opf(root)
return raster_cover
def set_metadata(stream, mi, cover_prefix='', cover_data=None, apply_null=False, update_timestamp=False, force_identifiers=False, add_missing_cover=True):
root = parse_opf(stream)
return apply_metadata(
root, mi, cover_prefix=cover_prefix, cover_data=cover_data,
apply_null=apply_null, update_timestamp=update_timestamp,
force_identifiers=force_identifiers)
# }}}
if __name__ == '__main__':
import sys
print(get_metadata(open(sys.argv[-1], 'rb')))
| 40,461 | Python | .py | 977 | 32.895599 | 154 | 0.602383 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,512 | toc.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/toc.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid at kovidgoyal.net>'
import functools
import glob
import os
import re
from collections import Counter
from lxml import etree
from lxml.builder import ElementMaker
from calibre.constants import __appname__, __version__
from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.cleantext import clean_xml_chars
from calibre.utils.xml_parse import safe_xml_fromstring
from polyglot.urllib import unquote, urlparse
NCX_NS = "http://www.daisy.org/z3986/2005/ncx/"
CALIBRE_NS = "http://calibre.kovidgoyal.net/2009/metadata"
NSMAP = {None: NCX_NS, 'calibre':CALIBRE_NS}
E = ElementMaker(namespace=NCX_NS, nsmap=NSMAP)
C = ElementMaker(namespace=CALIBRE_NS, nsmap=NSMAP)
def parse_html_toc(data):
from html5_parser import parse
from lxml import etree
from calibre.utils.cleantext import clean_xml_chars
if isinstance(data, bytes):
data = xml_to_unicode(data, strip_encoding_pats=True, resolve_entities=True)[0]
root = parse(clean_xml_chars(data), maybe_xhtml=True, keep_doctype=False, sanitize_names=True)
for a in root.xpath('//*[@href and local-name()="a"]'):
purl = urlparse(unquote(a.get('href')))
href, fragment = purl[2], purl[5]
if not fragment:
fragment = None
else:
fragment = fragment.strip()
href = href.strip()
txt = etree.tostring(a, method='text', encoding='unicode')
yield href, fragment, txt
class TOC(list):
def __init__(self, href=None, fragment=None, text=None, parent=None,
play_order=0, base_path=os.getcwd(), type='unknown', author=None,
description=None, toc_thumbnail=None):
self.href = href
self.fragment = fragment
if not self.fragment:
self.fragment = None
self.text = text
self.parent = parent
self.base_path = base_path
self.play_order = play_order
self.type = type
self.author = author
self.description = description
self.toc_thumbnail = toc_thumbnail
def __str__(self):
lines = ['TOC: %s#%s %s'%(self.href, self.fragment, self.text)]
for child in self:
c = str(child).splitlines()
for l in c:
lines.append('\t'+l)
return '\n'.join(lines)
def count(self, type):
return len([i for i in self.flat() if i.type == type])
def purge(self, types, max=0):
remove = []
for entry in self.flat():
if entry.type in types:
remove.append(entry)
remove = remove[max:]
for entry in remove:
if entry.parent is None:
continue
entry.parent.remove(entry)
return remove
def remove(self, entry):
list.remove(self, entry)
entry.parent = None
def add_item(self, href, fragment, text, play_order=None, type='unknown',
author=None, description=None, toc_thumbnail=None):
if play_order is None:
play_order = (self[-1].play_order if len(self) else self.play_order) + 1
self.append(TOC(href=href, fragment=fragment, text=text, parent=self,
base_path=self.base_path, play_order=play_order,
type=type, author=author, description=description, toc_thumbnail=toc_thumbnail))
return self[-1]
def top_level_items(self):
for item in self:
if item.text is not None:
yield item
def depth(self):
depth = 1
for obj in self:
c = obj.depth()
if c > depth - 1:
depth = c + 1
return depth
def flat(self):
'Depth first iteration over the tree rooted at self'
yield self
for obj in self:
yield from obj.flat()
@property
def abspath(self):
'Return the file this toc entry points to as a absolute path to a file on the system.'
if self.href is None:
return None
path = self.href.replace('/', os.sep)
if not os.path.isabs(path):
path = os.path.join(self.base_path, path)
return path
def read_from_opf(self, opfreader):
toc = opfreader.soup.find('spine', toc=True)
if toc is not None:
toc = toc['toc']
if toc is None:
try:
toc = opfreader.soup.find('guide').find('reference', attrs={'type':'toc'})['href']
except:
for item in opfreader.manifest:
if 'toc' in item.href().lower():
toc = item.href()
break
if toc is not None:
if toc.lower() not in ('ncx', 'ncxtoc'):
toc = urlparse(unquote(toc))[2]
toc = toc.replace('/', os.sep)
if not os.path.isabs(toc):
toc = os.path.join(self.base_path, toc)
try:
if not os.path.exists(toc):
bn = os.path.basename(toc)
bn = bn.replace('_top.htm', '_toc.htm') # Bug in BAEN OPF files
toc = os.path.join(os.path.dirname(toc), bn)
self.read_html_toc(toc)
except:
print('WARNING: Could not read Table of Contents. Continuing anyway.')
else:
path = opfreader.manifest.item(toc.lower())
path = getattr(path, 'path', path)
if path and os.access(path, os.R_OK):
try:
self.read_ncx_toc(path)
except Exception as err:
print('WARNING: Invalid NCX file:', err)
return
cwd = os.path.abspath(self.base_path)
m = glob.glob(os.path.join(cwd, '*.ncx'))
if m:
toc = m[0]
self.read_ncx_toc(toc)
def read_ncx_toc(self, toc, root=None):
self.base_path = os.path.dirname(toc)
if root is None:
with open(toc, 'rb') as f:
raw = xml_to_unicode(f.read(), assume_utf8=True,
strip_encoding_pats=True)[0]
root = safe_xml_fromstring(raw)
xpn = {'re': 'http://exslt.org/regular-expressions'}
XPath = functools.partial(etree.XPath, namespaces=xpn)
def get_attr(node, default=None, attr='playorder'):
for name, val in node.attrib.items():
if name and val and name.lower().endswith(attr):
return val
return default
nl_path = XPath('./*[re:match(local-name(), "navlabel$", "i")]')
txt_path = XPath('./*[re:match(local-name(), "text$", "i")]')
content_path = XPath('./*[re:match(local-name(), "content$", "i")]')
np_path = XPath('./*[re:match(local-name(), "navpoint$", "i")]')
def process_navpoint(np, dest):
try:
play_order = int(get_attr(np, 1))
except:
play_order = 1
href = fragment = text = None
nd = dest
nl = nl_path(np)
if nl:
nl = nl[0]
text = ''
for txt in txt_path(nl):
text += etree.tostring(txt, method='text',
encoding='unicode', with_tail=False)
content = content_path(np)
if content and text:
content = content[0]
# if get_attr(content, attr='src'):
purl = urlparse(content.get('src'))
href, fragment = unquote(purl[2]), unquote(purl[5])
nd = dest.add_item(href, fragment, text)
nd.play_order = play_order
for c in np_path(np):
process_navpoint(c, nd)
nm = XPath('//*[re:match(local-name(), "navmap$", "i")]')(root)
if not nm:
raise ValueError('NCX files must have a <navmap> element.')
nm = nm[0]
for child in np_path(nm):
process_navpoint(child, self)
def read_html_toc(self, toc):
self.base_path = os.path.dirname(toc)
with open(toc, 'rb') as f:
parsed_toc = parse_html_toc(f.read())
for href, fragment, txt in parsed_toc:
add = True
for i in self.flat():
if i.href == href and i.fragment == fragment:
add = False
break
if add:
self.add_item(href, fragment, txt)
def render(self, stream, uid):
root = E.ncx(
E.head(
E.meta(name='dtb:uid', content=str(uid)),
E.meta(name='dtb:depth', content=str(self.depth())),
E.meta(name='dtb:generator', content='%s (%s)'%(__appname__,
__version__)),
E.meta(name='dtb:totalPageCount', content='0'),
E.meta(name='dtb:maxPageNumber', content='0'),
),
E.docTitle(E.text('Table of Contents')),
)
navmap = E.navMap()
root.append(navmap)
root.set('{http://www.w3.org/XML/1998/namespace}lang', 'en')
c = Counter()
def navpoint(parent, np):
text = np.text
if not text:
text = ''
c[1] += 1
item_id = 'num_%d'%c[1]
text = clean_xml_chars(text)
elem = E.navPoint(
E.navLabel(E.text(re.sub(r'\s+', ' ', text))),
E.content(src=str(np.href)+(('#' + str(np.fragment))
if np.fragment else '')),
id=item_id,
playOrder=str(np.play_order)
)
au = getattr(np, 'author', None)
if au:
au = re.sub(r'\s+', ' ', au)
elem.append(C.meta(au, name='author'))
desc = getattr(np, 'description', None)
if desc:
desc = re.sub(r'\s+', ' ', desc)
try:
elem.append(C.meta(desc, name='description'))
except ValueError:
elem.append(C.meta(clean_xml_chars(desc), name='description'))
idx = getattr(np, 'toc_thumbnail', None)
if idx:
elem.append(C.meta(idx, name='toc_thumbnail'))
parent.append(elem)
for np2 in np:
navpoint(elem, np2)
for np in self:
navpoint(navmap, np)
raw = etree.tostring(root, encoding='utf-8', xml_declaration=True,
pretty_print=True)
stream.write(raw)
| 10,875 | Python | .py | 261 | 29.137931 | 104 | 0.523022 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,513 | rar.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/rar.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Read metadata from RAR archives
'''
import os
from io import BytesIO
from calibre.utils.unrar import extract_member, names
def get_metadata(stream):
from calibre.ebooks.metadata.archive import is_comic
from calibre.ebooks.metadata.meta import get_metadata
file_names = list(names(stream))
if is_comic(file_names):
return get_metadata(stream, 'cbr')
for f in file_names:
stream_type = os.path.splitext(f)[1].lower()
if stream_type:
stream_type = stream_type[1:]
if stream_type in {'lit', 'opf', 'prc', 'mobi', 'fb2', 'epub',
'rb', 'imp', 'pdf', 'lrf', 'azw', 'azw1',
'azw3'}:
name, data = extract_member(stream, match=None, name=f)
stream = BytesIO(data)
stream.name = os.path.basename(name)
mi = get_metadata(stream, stream_type)
mi.timestamp = None
return mi
raise ValueError('No ebook found in RAR archive')
| 1,194 | Python | .py | 30 | 30.866667 | 74 | 0.592561 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,514 | tag_mapper.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/tag_mapper.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
from collections import deque
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import upper as icu_upper
from polyglot.builtins import as_unicode
def compile_pat(pat):
import regex
REGEX_FLAGS = regex.VERSION1 | regex.WORD | regex.FULLCASE | regex.IGNORECASE | regex.UNICODE
return regex.compile(pat, flags=REGEX_FLAGS)
def matcher(rule):
import unicodedata
def n(x):
return unicodedata.normalize('NFC', as_unicode(x or '', errors='replace'))
mt = rule['match_type']
if mt == 'one_of':
tags = {icu_lower(n(x.strip())) for x in rule['query'].split(',')}
return lambda x: x in tags
if mt == 'not_one_of':
tags = {icu_lower(n(x.strip())) for x in rule['query'].split(',')}
return lambda x: x not in tags
if mt == 'matches':
pat = compile_pat(n(rule['query']))
return lambda x: pat.match(x) is not None
if mt == 'not_matches':
pat = compile_pat(n(rule['query']))
return lambda x: pat.match(x) is None
if mt == 'has':
s = icu_lower(n(rule['query']))
return lambda x: s in x
return lambda x: False
def apply_rules(tag, rules):
ans = []
tags = deque()
tags.append(tag)
maxiter = 20
while tags and maxiter > 0:
tag = tags.popleft()
ltag = icu_lower(tag)
maxiter -= 1
for rule, matches in rules:
if matches(ltag):
ac = rule['action']
if ac == 'remove':
break
if ac == 'keep':
ans.append(tag)
break
if ac == 'replace':
if 'matches' in rule['match_type']:
tag = compile_pat(rule['query']).sub(rule['replace'], tag)
else:
tag = rule['replace']
if ',' in tag:
replacement_tags = []
self_added = False
for rtag in (x.strip() for x in tag.split(',')):
if icu_lower(rtag) == ltag:
if not self_added:
ans.append(rtag)
self_added = True
else:
replacement_tags.append(rtag)
tags.extendleft(reversed(replacement_tags))
else:
if icu_lower(tag) == ltag:
# Case change or self replacement
ans.append(tag)
break
tags.appendleft(tag)
break
if ac == 'capitalize':
ans.append(tag.capitalize())
break
if ac == 'titlecase':
from calibre.utils.titlecase import titlecase
ans.append(titlecase(tag))
break
if ac == 'lower':
ans.append(icu_lower(tag))
break
if ac == 'upper':
ans.append(icu_upper(tag))
break
if ac == 'split':
stags = list(filter(None, (x.strip() for x in tag.split(rule['replace']))))
if stags:
if stags[0] == tag:
ans.append(tag)
else:
tags.extendleft(reversed(stags))
break
else: # no rule matched, default keep
ans.append(tag)
ans.extend(tags)
return ans
def uniq(vals, kmap=icu_lower):
''' Remove all duplicates from vals, while preserving order. kmap must be a
callable that returns a hashable value for every item in vals '''
vals = vals or ()
lvals = (kmap(x) for x in vals)
seen = set()
seen_add = seen.add
return list(x for x, k in zip(vals, lvals) if k not in seen and not seen_add(k))
def map_tags(tags, rules=()):
if not tags:
return []
if not rules:
return list(tags)
rules = [(r, matcher(r)) for r in rules]
ans = []
for t in tags:
ans.extend(apply_rules(t, rules))
return uniq(list(filter(None, ans)))
def find_tests():
import unittest
class TestTagMapper(unittest.TestCase):
def test_tag_mapper(self):
def rule(action, query, replace=None, match_type='one_of'):
ans = {'action':action, 'query': query, 'match_type':match_type}
if replace is not None:
ans['replace'] = replace
return ans
def run(rules, tags, expected):
if isinstance(rules, dict):
rules = [rules]
if isinstance(tags, str):
tags = [x.strip() for x in tags.split(',')]
if isinstance(expected, str):
expected = [x.strip() for x in expected.split(',')]
ans = map_tags(tags, rules)
self.assertEqual(ans, expected)
run(rule('capitalize', 't1,t2'), 't1,x1', 'T1,x1')
run(rule('titlecase', 'some tag'), 'some tag,x1', 'Some Tag,x1')
run(rule('upper', 'ta,t2'), 'ta,x1', 'TA,x1')
run(rule('lower', 'ta,x1'), 'TA,X1', 'ta,x1')
run(rule('replace', 't1', 't2'), 't1,x1', 't2,x1')
run(rule('replace', '(.)1', r'\g<1>2', 'matches'), 't1,x1', 't2,x2')
run(rule('replace', '(.)1', r'\g<1>2,3', 'matches'), 't1,x1', 't2,3,x2')
run(rule('replace', 't1', 't2, t3'), 't1,x1', 't2,t3,x1')
run([rule('replace', 't1', 't2,t3'), rule('remove', 't2')], 't1,x1', 't3,x1')
run(rule('replace', 't1', 't1'), 't1,x1', 't1,x1')
run([rule('replace', 't1', 't2'), rule('replace', 't2', 't1')], 't1,t2', 't1,t2')
run(rule('replace', 'a', 'A'), 'a,b', 'A,b')
run(rule('replace', 'a,b', 'A,B'), 'a,b', 'A,B')
run(rule('replace', 'L', 'T', 'has'), 'L', 'T')
run(rule('split', '/', '/', 'has'), 'a/b/c,d', 'a,b,c,d')
run(rule('split', '/', '/', 'has'), '/,d', 'd')
run(rule('split', '/', '/', 'has'), '/a/', 'a')
run(rule('split', 'a,b', '/'), 'a,b', 'a,b')
run(rule('split', 'a b', ' ', 'has'), 'a b', 'a,b')
return unittest.defaultTestLoader.loadTestsFromTestCase(TestTagMapper)
if __name__ == '__main__':
from calibre.utils.run_tests import run_cli
run_cli(find_tests())
| 6,758 | Python | .py | 155 | 30.103226 | 97 | 0.480225 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,515 | opf_2_to_3.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/opf_2_to_3.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2018, Kovid Goyal <kovid at kovidgoyal.net>
from lxml import etree
from calibre.ebooks.metadata.opf3 import (
DC,
OPF,
XPath,
create_rating,
create_series,
create_timestamp,
encode_is_multiple,
ensure_id,
normalize_whitespace,
parse_date,
read_prefixes,
read_refines,
read_user_metadata2,
refdef,
remove_element,
set_last_modified,
set_refines,
set_user_metadata3,
)
from calibre.ebooks.metadata.utils import parse_opf, pretty_print_opf
from polyglot.builtins import itervalues
class Data:
pass
def upgrade_identifiers(root, data):
for ident in XPath('./opf:metadata/dc:identifier')(root):
val = (ident.text or '').strip()
lval = val.lower()
scheme = ident.attrib.pop(OPF('scheme'), None)
if lval.startswith('urn:'):
prefix, rest = val[4:].partition(':')[::2]
if prefix and rest:
scheme, val = prefix, rest
if scheme and val:
ident.text = f'{scheme}:{val}'
for attr in tuple(ident.attrib):
if attr != 'id':
del ident.attrib[attr]
def upgrade_title(root, data):
first_title = None
for title in XPath('./opf:metadata/dc:title')(root):
if not title.text or not title.text.strip():
remove_element(title, data.refines)
continue
if first_title is None:
first_title = title
title_sort = None
for m in XPath('./opf:metadata/opf:meta[@name="calibre:title_sort"]')(root):
ans = m.get('content')
if ans:
title_sort = ans
remove_element(m, data.refines)
if first_title is not None:
ts = [refdef('file-as', title_sort)] if title_sort else ()
set_refines(first_title, data.refines, refdef('title-type', 'main'), *ts)
def upgrade_languages(root, data):
langs = XPath('./opf:metadata/dc:language')(root)
if langs:
for lang in langs:
lang.attrib.clear()
else:
# EPUB spec says dc:language is required
metadata = XPath('./opf:metadata')(root)[0]
l = metadata.makeelement(DC('language'))
l.text = 'und'
metadata.append(l)
def upgrade_authors(root, data):
for which in 'creator', 'contributor':
for elem in XPath('./opf:metadata/dc:' + which)(root):
role = elem.attrib.pop(OPF('role'), None)
sort = elem.attrib.pop(OPF('file-as'), None)
if role or sort:
aid = ensure_id(elem)
metadata = elem.getparent()
if role:
m = metadata.makeelement(OPF('meta'), attrib={'refines':'#'+aid, 'property':'role', 'scheme':'marc:relators'})
m.text = role
metadata.append(m)
if sort:
m = metadata.makeelement(OPF('meta'), attrib={'refines':'#'+aid, 'property':'file-as'})
m.text = sort
metadata.append(m)
def upgrade_timestamp(root, data):
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:timestamp"]')(root):
m = meta.getparent()
remove_element(meta, data.refines)
val = meta.get('content')
if val:
try:
val = parse_date(val, is_w3cdtf=True)
except Exception:
pass
else:
create_timestamp(root, data.prefixes, m, val)
def upgrade_date(root, data):
found = False
for date in XPath('./opf:metadata/dc:date')(root):
val = date.text
if not val:
remove_element(date, data.refines)
continue
if found:
# only one dc:date allowed
remove_element(date, data.refines)
else:
found = True
def upgrade_rating(root, data):
rating = None
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:rating"]')(root):
remove_element(meta, data.refines)
rating = meta.get('content')
if rating is not None:
create_rating(root, data.prefixes, rating)
def upgrade_series(root, data):
series, series_index = None, '1.0'
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:series"]')(root):
remove_element(meta, data.refines)
series = meta.get('content')
for meta in XPath('./opf:metadata/opf:meta[@name="calibre:series_index"]')(root):
remove_element(meta, data.refines)
series_index = meta.get('content')
if series:
create_series(root, data.refines, series, series_index)
def upgrade_custom(root, data):
m = read_user_metadata2(root, remove_tags=True)
if m:
for fm in itervalues(m):
encode_is_multiple(fm)
set_user_metadata3(root, data.prefixes, data.refines, m)
def upgrade_meta(root, data):
for meta in XPath('./opf:metadata/opf:meta[@name]')(root):
name, content = meta.get('name'), meta.get('content') or ''
if name.startswith('rendition:'):
name = name.partition(':')[-1]
prop = None
if name in ('orientation', 'layout', 'spread'):
prop = 'rendition:' + name
elif name == 'fixed-layout':
prop = 'rendition:layout'
content = {'true': 'pre-paginated'}.get(content.lower(), 'reflowable')
elif name == 'orientation-lock':
prop = 'rendition:orientation'
content = {'portrait': 'portrait', 'landscape': 'landscape'}.get(content.lower(), 'auto')
if prop:
del meta.attrib['name']
del meta.attrib['content']
meta.set('property', prop)
meta.text = content
def upgrade_cover(root, data):
for item in XPath('./opf:metadata/opf:meta[@name="cover"]')(root):
# Google Play Books does not recognize covers unless the old style
# <meta name="cover"> is present, so leave it in
# remove_element(item, data.refines)
item_id = item.get('content')
for item in XPath('./opf:manifest/opf:item[@id and @href and @media-type]')(root):
if item.get('id') == item_id:
mt = (item.get('media-type') or '').lower()
if mt and 'xml' not in mt and 'html' not in mt:
item.set('properties', normalize_whitespace((item.get('properties') or '') + ' cover-image'))
def remove_invalid_attrs_in_dc_metadata(root, data):
for tag in XPath('//*[namespace-uri() = "{}"]'.format(DC('')[1:-1]))(root):
for k in tuple(tag.attrib):
if k != 'id':
del tag.attrib[k]
def upgrade_metadata(root):
data = Data()
data.prefixes = read_prefixes(root)
data.refines = read_refines(root)
upgrade_identifiers(root, data)
upgrade_title(root, data)
upgrade_languages(root, data)
upgrade_authors(root, data)
upgrade_timestamp(root, data)
upgrade_date(root, data)
upgrade_rating(root, data)
upgrade_series(root, data)
upgrade_custom(root, data)
upgrade_meta(root, data)
upgrade_cover(root, data)
remove_invalid_attrs_in_dc_metadata(root, data)
set_last_modified(root, data.prefixes, data.refines)
pretty_print_opf(root)
if __name__ == '__main__':
import sys
root = parse_opf(open(sys.argv[-1], 'rb'))
upgrade_metadata(root)
print(etree.tostring(root))
| 7,428 | Python | .py | 190 | 30.547368 | 130 | 0.596389 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,516 | opf.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/opf.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from lxml import etree
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.metadata.opf2 import OPF, pretty_print
from calibre.ebooks.metadata.opf3 import apply_metadata, read_metadata
from calibre.ebooks.metadata.utils import create_manifest_item, normalize_languages, parse_opf, parse_opf_version
from polyglot.builtins import iteritems
class DummyFile:
def __init__(self, raw):
self.raw = raw
def read(self):
return self.raw
def get_metadata2(root, ver):
opf = OPF(None, preparsed_opf=root, read_toc=False)
return opf.to_book_metadata(), ver, opf.raster_cover, opf.first_spine_item()
def get_metadata3(root, ver):
return read_metadata(root, ver=ver, return_extra_data=True)
def get_metadata_from_parsed(root):
ver = parse_opf_version(root.get('version'))
f = get_metadata2 if ver.major < 3 else get_metadata3
return f(root, ver)
def get_metadata(stream):
if isinstance(stream, bytes):
stream = DummyFile(stream)
root = parse_opf(stream)
return get_metadata_from_parsed(root)
def set_metadata_opf2(root, cover_prefix, mi, opf_version,
cover_data=None, apply_null=False, update_timestamp=False, force_identifiers=False, add_missing_cover=True):
mi = MetaInformation(mi)
for x in ('guide', 'toc', 'manifest', 'spine'):
setattr(mi, x, None)
opf = OPF(None, preparsed_opf=root, read_toc=False)
if mi.languages:
mi.languages = normalize_languages(list(opf.raw_languages) or [], mi.languages)
opf.smart_update(mi, apply_null=apply_null)
if getattr(mi, 'uuid', None):
opf.application_id = mi.uuid
if apply_null or force_identifiers:
opf.set_identifiers(mi.get_identifiers())
else:
orig = opf.get_identifiers()
orig.update(mi.get_identifiers())
opf.set_identifiers({k:v for k, v in iteritems(orig) if k and v})
if update_timestamp and mi.timestamp is not None:
opf.timestamp = mi.timestamp
raster_cover = opf.raster_cover
if raster_cover is None and cover_data is not None and add_missing_cover:
guide_raster_cover = opf.guide_raster_cover
i = None
if guide_raster_cover is not None:
i = guide_raster_cover
raster_cover = i.get('href')
else:
if cover_prefix and not cover_prefix.endswith('/'):
cover_prefix += '/'
name = cover_prefix + 'cover.jpg'
i = create_manifest_item(opf.root, name, 'cover')
if i is not None:
raster_cover = name
if i is not None:
if opf_version.major < 3:
[x.getparent().remove(x) for x in opf.root.xpath('//*[local-name()="meta" and @name="cover"]')]
m = opf.create_metadata_element('meta', is_dc=False)
m.set('name', 'cover'), m.set('content', i.get('id'))
else:
for x in opf.root.xpath('//*[local-name()="item" and contains(@properties, "cover-image")]'):
x.set('properties', x.get('properties').replace('cover-image', '').strip())
i.set('properties', 'cover-image')
with pretty_print:
return opf.render(), raster_cover
def set_metadata_opf3(root, cover_prefix, mi, opf_version,
cover_data=None, apply_null=False, update_timestamp=False, force_identifiers=False, add_missing_cover=True):
raster_cover = apply_metadata(
root, mi, cover_prefix=cover_prefix, cover_data=cover_data,
apply_null=apply_null, update_timestamp=update_timestamp,
force_identifiers=force_identifiers, add_missing_cover=add_missing_cover)
return etree.tostring(root, encoding='utf-8'), raster_cover
def set_metadata(stream, mi, cover_prefix='', cover_data=None, apply_null=False, update_timestamp=False, force_identifiers=False, add_missing_cover=True):
if isinstance(stream, bytes):
stream = DummyFile(stream)
root = parse_opf(stream)
ver = parse_opf_version(root.get('version'))
f = set_metadata_opf2 if ver.major < 3 else set_metadata_opf3
opfbytes, raster_cover = f(
root, cover_prefix, mi, ver, cover_data=cover_data,
apply_null=apply_null, update_timestamp=update_timestamp,
force_identifiers=force_identifiers, add_missing_cover=add_missing_cover)
return opfbytes, ver, raster_cover
| 4,508 | Python | .py | 89 | 42.786517 | 154 | 0.665833 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,517 | pml.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/pml.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Read meta information from TXT files
'''
import glob
import os
import re
from calibre import prepare_string_for_xml
from calibre.ebooks.metadata import MetaInformation
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.zipfile import ZipFile
def get_metadata(stream, extract_cover=True):
""" Return metadata as a L{MetaInfo} object """
mi = MetaInformation(_('Unknown'), [_('Unknown')])
stream.seek(0)
pml = b''
if stream.name.endswith('.pmlz'):
with TemporaryDirectory('_unpmlz') as tdir:
zf = ZipFile(stream)
zf.extractall(tdir)
pmls = glob.glob(os.path.join(tdir, '*.pml'))
for p in pmls:
with open(p, 'r+b') as p_stream:
pml += p_stream.read()
if extract_cover:
mi.cover_data = get_cover(os.path.splitext(os.path.basename(stream.name))[0], tdir, True)
else:
pml = stream.read()
if extract_cover:
mi.cover_data = get_cover(os.path.splitext(os.path.basename(stream.name))[0], os.path.abspath(os.path.dirname(stream.name)))
for comment in re.findall(br'(?ms)\\v.*?\\v', pml):
m = re.search(br'TITLE="(.*?)"', comment)
if m:
mi.title = re.sub('[\x00-\x1f]', '', prepare_string_for_xml(m.group(1).strip().decode('cp1252', 'replace')))
m = re.search(br'AUTHOR="(.*?)"', comment)
if m:
if mi.authors == [_('Unknown')]:
mi.authors = []
mi.authors.append(re.sub('[\x00-\x1f]', '', prepare_string_for_xml(m.group(1).strip().decode('cp1252', 'replace'))))
m = re.search(br'PUBLISHER="(.*?)"', comment)
if m:
mi.publisher = re.sub('[\x00-\x1f]', '', prepare_string_for_xml(m.group(1).strip().decode('cp1252', 'replace')))
m = re.search(br'COPYRIGHT="(.*?)"', comment)
if m:
mi.rights = re.sub('[\x00-\x1f]', '', prepare_string_for_xml(m.group(1).strip().decode('cp1252', 'replace')))
m = re.search(br'ISBN="(.*?)"', comment)
if m:
mi.isbn = re.sub('[\x00-\x1f]', '', prepare_string_for_xml(m.group(1).strip().decode('cp1252', 'replace')))
return mi
def get_cover(name, tdir, top_level=False):
cover_path = ''
cover_data = None
if top_level:
cover_path = os.path.join(tdir, 'cover.png') if os.path.exists(os.path.join(tdir, 'cover.png')) else ''
if not cover_path:
cover_path = os.path.join(tdir, name + '_img', 'cover.png') if os.path.exists(os.path.join(tdir, name + '_img', 'cover.png')) else os.path.join(
os.path.join(tdir, 'images'), 'cover.png') if os.path.exists(os.path.join(os.path.join(tdir, 'images'), 'cover.png')) else ''
if cover_path:
with open(cover_path, 'rb') as cstream:
cover_data = cstream.read()
return ('png', cover_data)
| 3,016 | Python | .py | 63 | 39.984127 | 152 | 0.592719 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,518 | utils.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/utils.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
from collections import namedtuple
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.oeb.base import OPF
from calibre.ebooks.oeb.polish.utils import guess_type
from calibre.spell import parse_lang_code
from calibre.utils.cleantext import clean_xml_chars
from calibre.utils.localization import lang_as_iso639_1
from calibre.utils.xml_parse import safe_xml_fromstring
OPFVersion = namedtuple('OPFVersion', 'major minor patch')
def parse_opf_version(raw):
parts = (raw or '').split('.')
try:
major = int(parts[0])
except Exception:
return OPFVersion(2, 0, 0)
try:
v = list(map(int, raw.split('.')))
except Exception:
v = [major, 0, 0]
while len(v) < 3:
v.append(0)
v = v[:3]
return OPFVersion(*v)
def parse_opf(stream_or_path):
stream = stream_or_path
needs_close = not hasattr(stream, 'read')
if needs_close:
stream = open(stream, 'rb')
try:
raw = stream.read()
finally:
if needs_close:
stream.close()
if not raw:
raise ValueError('Empty file: '+getattr(stream, 'name', 'stream'))
raw, encoding = xml_to_unicode(raw, strip_encoding_pats=True, resolve_entities=True, assume_utf8=True)
raw = raw[raw.find('<'):]
root = safe_xml_fromstring(clean_xml_chars(raw))
if root is None:
raise ValueError('Not an OPF file')
return root
def normalize_languages(opf_languages, mi_languages):
' Preserve original country codes and use 2-letter lang codes where possible '
def parse(x):
try:
return parse_lang_code(x)
except ValueError:
return None
opf_languages = filter(None, map(parse, opf_languages))
cc_map = {c.langcode:c.countrycode for c in opf_languages}
mi_languages = filter(None, map(parse, mi_languages))
def norm(x):
lc = x.langcode
cc = x.countrycode or cc_map.get(lc, None)
lc = lang_as_iso639_1(lc) or lc
if cc:
lc += '-' + cc
return lc
return list(map(norm, mi_languages))
def ensure_unique(template, existing):
b, e = template.rpartition('.')[::2]
if b and e:
e = '.' + e
else:
b, e = template, ''
q = template
c = 0
while q in existing:
c += 1
q = '%s-%d%s' % (b, c, e)
return q
def create_manifest_item(root, href_template, id_template, media_type=None):
all_ids = frozenset(root.xpath('//*/@id'))
all_hrefs = frozenset(root.xpath('//*/@href'))
href = ensure_unique(href_template, all_hrefs)
item_id = ensure_unique(id_template, all_ids)
manifest = root.find(OPF('manifest'))
if manifest is not None:
i = manifest.makeelement(OPF('item'))
i.set('href', href), i.set('id', item_id)
i.set('media-type', media_type or guess_type(href_template))
manifest.append(i)
return i
def pretty_print_opf(root):
from calibre.ebooks.oeb.polish.pretty import pretty_opf, pretty_xml_tree
pretty_opf(root)
pretty_xml_tree(root)
| 3,167 | Python | .py | 89 | 29.640449 | 106 | 0.645003 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,519 | zip.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/zip.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from calibre import CurrentDir
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.zipfile import ZipFile
def get_metadata(stream):
from calibre.ebooks.metadata.archive import is_comic
from calibre.ebooks.metadata.meta import get_metadata
stream_type = None
zf = ZipFile(stream, 'r')
names = zf.namelist()
if is_comic(names):
# Is probably a comic
return get_metadata(stream, 'cbz')
for f in names:
stream_type = os.path.splitext(f)[1].lower()
if stream_type:
stream_type = stream_type[1:]
if stream_type in ('lit', 'opf', 'prc', 'mobi', 'fb2', 'epub',
'rb', 'imp', 'pdf', 'lrf', 'azw', 'azw1', 'azw3'):
with TemporaryDirectory() as tdir:
with CurrentDir(tdir):
path = zf.extract(f)
mi = get_metadata(open(path,'rb'), stream_type)
if stream_type == 'opf' and mi.application_id is None:
try:
# zip archive opf files without an application_id were assumed not to have a cover
# reparse the opf and if cover exists read its data from zip archive for the metadata
nmi = zip_opf_metadata(path, zf)
nmi.timestamp = None
return nmi
except:
pass
mi.timestamp = None
return mi
raise ValueError('No ebook found in ZIP archive (%s)' % os.path.basename(getattr(stream, 'name', '') or '<stream>'))
def zip_opf_metadata(opfpath, zf):
from calibre.ebooks.metadata.opf2 import OPF
if hasattr(opfpath, 'read'):
f = opfpath
opfpath = getattr(f, 'name', os.getcwd())
else:
f = open(opfpath, 'rb')
opf = OPF(f, os.path.dirname(opfpath))
mi = opf.to_book_metadata()
# This is broken, in that it only works for
# when both the OPF file and the cover file are in the root of the
# zip file and the cover is an actual raster image, but I don't care
# enough to make it more robust
if getattr(mi, 'cover', None):
covername = os.path.basename(mi.cover)
mi.cover = None
names = zf.namelist()
if covername in names:
fmt = covername.rpartition('.')[-1]
data = zf.read(covername)
mi.cover_data = (fmt, data)
return mi
| 2,667 | Python | .py | 59 | 32.915254 | 120 | 0.555171 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,520 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
"""
Provides abstraction for metadata reading.writing from a variety of ebook formats.
"""
import os
import re
import sys
from contextlib import suppress
from calibre import force_unicode, guess_type, prints, relpath
from calibre.utils.config_base import tweaks
from polyglot.builtins import as_unicode, iteritems
from polyglot.urllib import quote, unquote, urlparse
try:
_author_pat = re.compile(tweaks['authors_split_regex'])
except Exception:
prints('Author split regexp:', tweaks['authors_split_regex'],
'is invalid, using default')
_author_pat = re.compile(r'(?i),?\s+(and|with)\s+')
def string_to_authors(raw):
if not raw:
return []
raw = raw.replace('&&', '\uffff')
raw = _author_pat.sub('&', raw)
authors = [a.strip().replace('\uffff', '&') for a in raw.split('&')]
return [a for a in authors if a]
def authors_to_string(authors):
if authors is not None:
return ' & '.join([a.replace('&', '&&') for a in authors if a])
else:
return ''
def remove_bracketed_text(src, brackets=None):
if brackets is None:
brackets = {'(': ')', '[': ']', '{': '}'}
from collections import Counter
counts = Counter()
total = 0
buf = []
src = force_unicode(src)
rmap = {v: k for k, v in iteritems(brackets)}
for char in src:
if char in brackets:
counts[char] += 1
total += 1
elif char in rmap:
idx = rmap[char]
if counts[idx] > 0:
counts[idx] -= 1
total -= 1
elif total < 1:
buf.append(char)
return ''.join(buf)
def author_to_author_sort(
author,
method=None,
copywords=None,
use_surname_prefixes=None,
surname_prefixes=None,
name_prefixes=None,
name_suffixes=None
):
if not author:
return ''
if method is None:
method = tweaks['author_sort_copy_method']
if method == 'copy':
return author
sauthor = remove_bracketed_text(author).strip()
if method == 'comma' and ',' in sauthor:
return author
tokens = sauthor.split()
if len(tokens) < 2:
return author
ltoks = frozenset(x.lower() for x in tokens)
copy_words = frozenset(x.lower() for x in (tweaks['author_name_copywords'] if copywords is None else copywords))
if ltoks.intersection(copy_words):
return author
author_use_surname_prefixes = tweaks['author_use_surname_prefixes'] if use_surname_prefixes is None else use_surname_prefixes
if author_use_surname_prefixes:
author_surname_prefixes = frozenset(x.lower() for x in (tweaks['author_surname_prefixes'] if surname_prefixes is None else surname_prefixes))
if len(tokens) == 2 and tokens[0].lower() in author_surname_prefixes:
return author
prefixes = {force_unicode(y).lower() for y in (tweaks['author_name_prefixes'] if name_prefixes is None else name_prefixes)}
prefixes |= {y+'.' for y in prefixes}
for first in range(len(tokens)):
if tokens[first].lower() not in prefixes:
break
else:
return author
suffixes = {force_unicode(y).lower() for y in (tweaks['author_name_suffixes'] if name_suffixes is None else name_suffixes)}
suffixes |= {y+'.' for y in suffixes}
for last in range(len(tokens) - 1, first - 1, -1):
if tokens[last].lower() not in suffixes:
break
else:
return author
suffix = ' '.join(tokens[last + 1:])
if author_use_surname_prefixes:
if last > first and tokens[last - 1].lower() in author_surname_prefixes:
tokens[last - 1] += ' ' + tokens[last]
last -= 1
atokens = tokens[last:last + 1] + tokens[first:last]
num_toks = len(atokens)
if suffix:
atokens.append(suffix)
if method != 'nocomma' and num_toks > 1:
atokens[0] += ','
return ' '.join(atokens)
def authors_to_sort_string(authors):
return ' & '.join(map(author_to_author_sort, authors))
_title_pats = {}
def get_title_sort_pat(lang=None):
ans = _title_pats.get(lang, None)
if ans is not None:
return ans
q = lang
from calibre.utils.localization import canonicalize_lang, get_lang
if lang is None:
q = tweaks['default_language_for_title_sort']
if q is None:
q = get_lang()
q = canonicalize_lang(q) if q else q
data = tweaks['per_language_title_sort_articles']
try:
ans = data.get(q, None)
except AttributeError:
ans = None # invalid tweak value
try:
ans = frozenset(ans) if ans is not None else frozenset(data['eng'])
except Exception:
ans = frozenset((r'A\s+', r'The\s+', r'An\s+'))
if ans:
ans = '|'.join(ans)
ans = '^(%s)'%ans
try:
ans = re.compile(ans, re.IGNORECASE)
except:
ans = re.compile(r'^(A|The|An)\s+', re.IGNORECASE)
else:
ans = re.compile('^$') # matches only the empty string
_title_pats[lang] = ans
return ans
quote_pairs = {
# https://en.wikipedia.org/wiki/Quotation_mark
'"': ('"',),
"'": ("'",),
'“': ('”','“'),
'”': ('”','”'),
'„': ('”','“'),
'‚': ('’','‘'),
'’': ('’','‘'),
'‘': ('’','‘'),
'‹': ('›',),
'›': ('‹',),
'《': ('》',),
'〈': ('〉',),
'»': ('«', '»'),
'«': ('«', '»'),
'「': ('」',),
'『': ('』',),
}
def title_sort(title, order=None, lang=None):
if order is None:
order = tweaks['title_series_sorting']
title = title.strip()
if order == 'strictly_alphabetic':
return title
if title and title[0] in quote_pairs:
q = title[0]
title = title[1:]
if title and title[-1] in quote_pairs[q]:
title = title[:-1]
match = get_title_sort_pat(lang).search(title)
if match:
try:
prep = match.group(1)
except IndexError:
pass
else:
if prep:
title = title[len(prep):] + ', ' + prep
if title[0] in quote_pairs:
q = title[0]
title = title[1:]
if title and title[-1] in quote_pairs[q]:
title = title[:-1]
return title.strip()
coding = list(zip(
[1000,900,500,400,100,90,50,40,10,9,5,4,1],
["M","CM","D","CD","C","XC","L","XL","X","IX","V","IV","I"]
))
def roman(num):
if num <= 0 or num >= 4000 or int(num) != num:
return str(num)
result = []
for d, r in coding:
while num >= d:
result.append(r)
num -= d
return ''.join(result)
def fmt_sidx(i, fmt='%.2f', use_roman=False):
if i is None or i == '':
i = 1
try:
i = float(i)
except Exception:
return str(i)
if int(i) == i:
return roman(int(i)) if use_roman else '%d'%int(i)
ans = fmt%i
if '.' in ans:
ans = ans.rstrip('0')
return ans
class Resource:
'''
Represents a resource (usually a file on the filesystem or a URL pointing
to the web. Such resources are commonly referred to in OPF files.
They have the interface:
:member:`path`
:member:`mime_type`
:method:`href`
'''
def __init__(self, href_or_path, basedir=os.getcwd(), is_path=True):
self._href = None
self._basedir = basedir
self.path = None
self.fragment = ''
try:
self.mime_type = guess_type(href_or_path)[0]
except:
self.mime_type = None
if self.mime_type is None:
self.mime_type = 'application/octet-stream'
if is_path:
path = href_or_path
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(basedir, path))
if isinstance(path, bytes):
path = path.decode(sys.getfilesystemencoding())
self.path = path
else:
url = urlparse(href_or_path)
if url[0] not in ('', 'file'):
self._href = href_or_path
else:
pc = url[2]
if isinstance(pc, str):
pc = pc.encode('utf-8')
pc = unquote(pc).decode('utf-8')
self.path = os.path.abspath(os.path.join(basedir, pc.replace('/', os.sep)))
self.fragment = unquote(url[-1])
def href(self, basedir=None):
'''
Return a URL pointing to this resource. If it is a file on the filesystem
the URL is relative to `basedir`.
`basedir`: If None, the basedir of this resource is used (see :method:`set_basedir`).
If this resource has no basedir, then the current working directory is used as the basedir.
'''
if basedir is None:
if self._basedir:
basedir = self._basedir
else:
basedir = os.getcwd()
if self.path is None:
return self._href
f = self.fragment.encode('utf-8') if isinstance(self.fragment, str) else self.fragment
frag = '#'+as_unicode(quote(f)) if self.fragment else ''
if self.path == basedir:
return ''+frag
try:
rpath = relpath(self.path, basedir)
except OSError: # On windows path and basedir could be on different drives
rpath = self.path
if isinstance(rpath, str):
rpath = rpath.encode('utf-8')
return as_unicode(quote(rpath.replace(os.sep, '/')))+frag
def set_basedir(self, path):
self._basedir = path
def basedir(self):
return self._basedir
def __repr__(self):
return 'Resource(%s, %s)'%(repr(self.path), repr(self.href()))
class ResourceCollection:
def __init__(self):
self._resources = []
def __iter__(self):
yield from self._resources
def __len__(self):
return len(self._resources)
def __getitem__(self, index):
return self._resources[index]
def __bool__(self):
return len(self._resources) > 0
def __str__(self):
resources = map(repr, self)
return '[%s]'%', '.join(resources)
def __repr__(self):
return str(self)
def append(self, resource):
if not isinstance(resource, Resource):
raise ValueError('Can only append objects of type Resource')
self._resources.append(resource)
def remove(self, resource):
self._resources.remove(resource)
def replace(self, start, end, items):
'Same as list[start:end] = items'
self._resources[start:end] = items
@staticmethod
def from_directory_contents(top, topdown=True):
collection = ResourceCollection()
for spec in os.walk(top, topdown=topdown):
path = os.path.abspath(os.path.join(spec[0], spec[1]))
res = Resource.from_path(path)
res.set_basedir(top)
collection.append(res)
return collection
def set_basedir(self, path):
for res in self:
res.set_basedir(path)
def MetaInformation(title, authors=(_('Unknown'),)):
''' Convenient encapsulation of book metadata, needed for compatibility
@param title: title or ``_('Unknown')`` or a MetaInformation object
@param authors: List of strings or []
'''
from calibre.ebooks.metadata.book.base import Metadata
mi = None
if hasattr(title, 'title') and hasattr(title, 'authors'):
mi = title
title = mi.title
authors = mi.authors
return Metadata(title, authors, other=mi)
def check_digit_for_isbn10(isbn):
check = sum((i+1)*int(isbn[i]) for i in range(9)) % 11
return 'X' if check == 10 else str(check)
def check_digit_for_isbn13(isbn):
check = 10 - sum((1 if i%2 ==0 else 3)*int(isbn[i]) for i in range(12)) % 10
if check == 10:
check = 0
return str(check)
def check_isbn10(isbn):
with suppress(Exception):
return check_digit_for_isbn10(isbn) == isbn[9]
return False
def check_isbn13(isbn):
with suppress(Exception):
return check_digit_for_isbn13(isbn) == isbn[12]
return False
def check_isbn(isbn, simple_sanitize=False):
if not isbn:
return None
if simple_sanitize:
isbn = isbn.upper().replace('-', '').strip().replace(' ', '')
else:
isbn = re.sub(r'[^0-9X]', '', isbn.upper())
il = len(isbn)
if il not in (10, 13):
return None
all_same = re.match(r'(\d)\1{9,12}$', isbn)
if all_same is not None:
return None
if il == 10:
return isbn if check_isbn10(isbn) else None
if il == 13:
return isbn if check_isbn13(isbn) else None
return None
def normalize_isbn(isbn):
if not isbn:
return isbn
ans = check_isbn(isbn)
if ans is None:
return isbn
if len(ans) == 10:
ans = '978' + ans[:9]
ans += check_digit_for_isbn13(ans)
return ans
def check_issn(issn):
if not issn:
return None
issn = re.sub(r'[^0-9X]', '', issn.upper())
try:
digits = tuple(map(int, issn[:7]))
products = [(8 - i) * d for i, d in enumerate(digits)]
check = 11 - sum(products) % 11
if (check == 10 and issn[7] == 'X') or check == int(issn[7]):
return issn
except Exception:
pass
return None
def format_isbn(isbn):
cisbn = check_isbn(isbn)
if not cisbn:
return isbn
i = cisbn
if len(i) == 10:
return '-'.join((i[:2], i[2:6], i[6:9], i[9]))
return '-'.join((i[:3], i[3:5], i[5:9], i[9:12], i[12]))
def check_doi(doi):
'Check if something that looks like a DOI is present anywhere in the string'
if not doi:
return None
doi_check = re.search(r'10\.\d{4}/\S+', doi)
if doi_check is not None:
return doi_check.group()
return None
def rating_to_stars(value, allow_half_stars=False, star='★', half='⯨'):
r = max(0, min(int(value or 0), 10))
ans = star * (r // 2)
if allow_half_stars and r % 2:
ans += half
return ans
| 14,369 | Python | .py | 412 | 27.213592 | 149 | 0.575602 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,521 | html.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/html.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Try to read metadata from an HTML file.
'''
import re
import unittest
from collections import defaultdict
from html5_parser import parse
from lxml.etree import Comment
from calibre import isbytestring, replace_entities
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.metadata import authors_to_string, string_to_authors
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.date import is_date_undefined, parse_date
from polyglot.builtins import iteritems
def get_metadata(stream):
src = stream.read()
return get_metadata_(src)
COMMENT_NAMES = {
'title': 'TITLE',
'authors': 'AUTHOR',
'publisher': 'PUBLISHER',
'isbn': 'ISBN',
'languages': 'LANGUAGE',
'pubdate': 'PUBDATE',
'timestamp': 'TIMESTAMP',
'series': 'SERIES',
'series_index': 'SERIESNUMBER',
'rating': 'RATING',
'comments': 'COMMENTS',
'tags': 'TAGS',
}
META_NAMES = {
'title' : ('dc.title', 'dcterms.title', 'title'),
'authors': ('author', 'dc.creator.aut', 'dcterms.creator.aut', 'dc.creator'),
'publisher': ('publisher', 'dc.publisher', 'dcterms.publisher'),
'isbn': ('isbn',),
'languages': ('dc.language', 'dcterms.language'),
'pubdate': ('pubdate', 'date of publication', 'dc.date.published', 'dc.date.publication', 'dc.date.issued', 'dcterms.issued'),
'timestamp': ('timestamp', 'date of creation', 'dc.date.created', 'dc.date.creation', 'dcterms.created'),
'series': ('series',),
'series_index': ('seriesnumber', 'series_index', 'series.index'),
'rating': ('rating',),
'comments': ('comments', 'dc.description'),
'tags': ('tags',),
}
rmap_comment = {v:k for k, v in iteritems(COMMENT_NAMES)}
rmap_meta = {v:k for k, l in iteritems(META_NAMES) for v in l}
# Extract an HTML attribute value, supports both single and double quotes and
# single quotes inside double quotes and vice versa.
attr_pat = r'''(?:(?P<sq>')|(?P<dq>"))(?P<content>(?(sq)[^']+|[^"]+))(?(sq)'|")'''
def handle_comment(data, comment_tags):
if not hasattr(handle_comment, 'pat'):
handle_comment.pat = re.compile(r'''(?P<name>\S+)\s*=\s*%s''' % attr_pat)
for match in handle_comment.pat.finditer(data):
x = match.group('name')
field = None
try:
field = rmap_comment[x]
except KeyError:
pass
if field:
comment_tags[field].append(replace_entities(match.group('content')))
def parse_metadata(src):
root = parse(src)
comment_tags = defaultdict(list)
meta_tags = defaultdict(list)
meta_tag_ids = defaultdict(list)
title = ''
identifier_pat = re.compile(r'(?:dc|dcterms)[.:]identifier(?:\.|$)', flags=re.IGNORECASE)
id_pat2 = re.compile(r'(?:dc|dcterms)[.:]identifier$', flags=re.IGNORECASE)
for comment in root.iterdescendants(tag=Comment):
if comment.text:
handle_comment(comment.text, comment_tags)
for q in root.iterdescendants(tag='title'):
if q.text:
title = q.text
break
for meta in root.iterdescendants(tag='meta'):
name, content = meta.get('name'), meta.get('content')
if not name or not content:
continue
if identifier_pat.match(name) is not None:
scheme = None
if id_pat2.match(name) is not None:
scheme = meta.get('scheme')
else:
elements = re.split(r'[.:]', name)
if len(elements) == 3 and not meta.get('scheme'):
scheme = elements[2].strip()
if scheme:
meta_tag_ids[scheme.lower()].append(content)
else:
x = name.lower()
field = None
try:
field = rmap_meta[x]
except KeyError:
try:
field = rmap_meta[x.replace(':', '.')]
except KeyError:
pass
if field:
meta_tags[field].append(content)
return comment_tags, meta_tags, meta_tag_ids, title
def get_metadata_(src, encoding=None):
# Meta data definitions as in
# https://www.mobileread.com/forums/showpost.php?p=712544&postcount=9
if isbytestring(src):
if not encoding:
src = xml_to_unicode(src)[0]
else:
src = src.decode(encoding, 'replace')
src = src[:150000] # Searching shouldn't take too long
comment_tags, meta_tags, meta_tag_ids, title_tag = parse_metadata(src)
def get_all(field):
ans = comment_tags.get(field, meta_tags.get(field, None))
if ans:
ans = [x.strip() for x in ans if x.strip()]
if not ans:
ans = None
return ans
def get(field):
ans = get_all(field)
if ans:
ans = ans[0]
return ans
# Title
title = get('title') or title_tag.strip() or _('Unknown')
# Author
authors = authors_to_string(get_all('authors')) or _('Unknown')
# Create MetaInformation with Title and Author
mi = Metadata(title, string_to_authors(authors))
# Single-value text fields
for field in ('publisher', 'isbn'):
val = get(field)
if val:
setattr(mi, field, val)
# Multi-value text fields
for field in ('languages',):
val = get_all(field)
if val:
setattr(mi, field, val)
# HTML fields
for field in ('comments',):
val = get(field)
if val:
setattr(mi, field, val.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
# Date fields
for field in ('pubdate', 'timestamp'):
try:
val = parse_date(get(field))
except:
pass
else:
if not is_date_undefined(val):
setattr(mi, field, val)
# SERIES
series = get('series')
if series:
pat = re.compile(r'\[([.0-9]+)\]$')
match = pat.search(series)
series_index = None
if match is not None:
try:
series_index = float(match.group(1))
except:
pass
series = series.replace(match.group(), '').strip()
mi.series = series
if series_index is None:
series_index = get('series_index')
try:
series_index = float(series_index)
except:
pass
if series_index is not None:
mi.series_index = series_index
# RATING
rating = get('rating')
if rating:
try:
mi.rating = float(rating)
if mi.rating < 0:
mi.rating = 0
if mi.rating > 10:
mi.rating = 0
except:
pass
# TAGS
tags = get_all('tags')
if tags:
tags = [x.strip() for s in tags for x in s.split(',') if x.strip()]
if tags:
mi.tags = tags
# IDENTIFIERS
for (k,v) in iteritems(meta_tag_ids):
v = [x.strip() for x in v if x.strip()]
if v:
mi.set_identifier(k, v[0])
return mi
class MetadataHtmlTest(unittest.TestCase):
def compare_metadata(self, meta_a, meta_b):
for attr in (
'title', 'authors', 'publisher', 'isbn', 'languages', 'pubdate', 'timestamp', 'series',
'series_index', 'rating', 'comments', 'tags', 'identifiers'
):
self.assertEqual(getattr(meta_a, attr), getattr(meta_b, attr))
def get_stream(self, test):
from io import BytesIO
raw = b'''\
<html>
<head>
'''
if test in {'title', 'meta_single', 'meta_multi', 'comment_single', 'comment_multi'}:
raw += b'''\
}
<title>A Title Tag &amp; Title Ⓒ</title>
'''
if test in {'meta_single', 'meta_multi', 'comment_single', 'comment_multi'}:
raw += b'''\
<meta name="dc:title" content="A Meta Tag &amp; Title Ⓒ" />
<meta name="dcterms.creator.aut" content="George Washington" />
<meta name="dc.publisher" content="Publisher A" />
<meta name="isbn" content="1234567890" />
<meta name="dc.language" content="English" />
<meta name="dc.date.published" content="2019-01-01" />
<meta name="dcterms.created" content="2018-01-01" />
<meta name="series" content="Meta Series" />
<meta name="seriesnumber" content="1" />
<meta name="rating" content="" />
<meta name="dc.description" content="" />
<meta name="tags" content="tag a, tag b" />
<meta name="dc.identifier.url" content="" />
<meta name="dc.identifier" scheme="" content="invalid" />
<meta name="dc.identifier." content="still invalid" />
<meta name="dc.identifier.conflicting" scheme="schemes" content="are also invalid" />
<meta name="dc.identifier.custom.subid" content="invalid too" />
'''
if test in {'meta_multi', 'comment_single', 'comment_multi'}:
raw += b'''\
<meta name="title" content="A Different Meta Tag &amp; Title Ⓒ" />
<meta name="author" content="John Adams with Thomas Jefferson" />
<meta name="publisher" content="Publisher B" />
<meta name="isbn" content="2345678901" />
<meta name="dcterms.language" content="Spanish" />
<meta name="date of publication" content="2017-01-01" />
<meta name="timestamp" content="2016-01-01" />
<meta name="series" content="Another Meta Series" />
<meta name="series.index" content="2" />
<meta name="rating" content="8" />
<meta name="comments" content="meta "comments" ♥ HTML &amp;" />
<meta name="tags" content="tag c" />
<meta name="dc.identifier.url" content="http://google.com/search?q=calibre" />
'''
if test in {'comment_single', 'comment_multi'}:
raw += b'''\
<!-- TITLE="A Comment Tag &amp; Title Ⓒ" -->
<!-- AUTHOR="James Madison and James Monroe" -->
<!-- PUBLISHER="Publisher C" -->
<!-- ISBN="3456789012" -->
<!-- LANGUAGE="French" -->
<!-- PUBDATE="2015-01-01" -->
<!-- TIMESTAMP="2014-01-01" -->
<!-- SERIES="Comment Series" -->
<!-- SERIESNUMBER="3" -->
<!-- RATING="20" -->
<!-- COMMENTS="comment "comments" ♥ HTML -- too &amp;" -->
<!-- TAGS="tag d" -->
'''
if test in {'comment_multi'}:
raw += b'''\
<!-- TITLE="Another Comment Tag &amp; Title Ⓒ" -->
<!-- AUTHOR="John Quincy Adams" -->
<!-- PUBLISHER="Publisher D" -->
<!-- ISBN="4567890123" -->
<!-- LANGUAGE="Japanese" -->
<!-- PUBDATE="2013-01-01" -->
<!-- TIMESTAMP="2012-01-01" -->
<!-- SERIES="Comment Series 2" -->
<!-- SERIESNUMBER="4" -->
<!-- RATING="1" -->
<!-- COMMENTS="comment "comments" ♥ HTML -- too &amp; for sure" -->
<!-- TAGS="tag e, tag f" -->
'''
raw += b'''\
</head>
<body>
</body>
</html>
'''
return BytesIO(raw)
def test_input_title(self):
stream_meta = get_metadata(self.get_stream('title'))
canon_meta = Metadata('A Title Tag & Title Ⓒ', [_('Unknown')])
self.compare_metadata(stream_meta, canon_meta)
def test_input_meta_single(self):
stream_meta = get_metadata(self.get_stream('meta_single'))
canon_meta = Metadata('A Meta Tag & Title Ⓒ', ['George Washington'])
canon_meta.publisher = 'Publisher A'
canon_meta.languages = ['English']
canon_meta.pubdate = parse_date('2019-01-01')
canon_meta.timestamp = parse_date('2018-01-01')
canon_meta.series = 'Meta Series'
canon_meta.series_index = float(1)
# canon_meta.rating = float(0)
# canon_meta.comments = ''
canon_meta.tags = ['tag a', 'tag b']
canon_meta.set_identifiers({'isbn': '1234567890'})
self.compare_metadata(stream_meta, canon_meta)
def test_input_meta_multi(self):
stream_meta = get_metadata(self.get_stream('meta_multi'))
canon_meta = Metadata('A Meta Tag & Title Ⓒ', ['George Washington', 'John Adams', 'Thomas Jefferson'])
canon_meta.publisher = 'Publisher A'
canon_meta.languages = ['English', 'Spanish']
canon_meta.pubdate = parse_date('2019-01-01')
canon_meta.timestamp = parse_date('2018-01-01')
canon_meta.series = 'Meta Series'
canon_meta.series_index = float(1)
canon_meta.rating = float(8)
canon_meta.comments = 'meta "comments" ♥ HTML &amp;'
canon_meta.tags = ['tag a', 'tag b', 'tag c']
canon_meta.set_identifiers({'isbn': '1234567890', 'url': 'http://google.com/search?q=calibre'})
self.compare_metadata(stream_meta, canon_meta)
def test_input_comment_single(self):
stream_meta = get_metadata(self.get_stream('comment_single'))
canon_meta = Metadata('A Comment Tag & Title Ⓒ', ['James Madison', 'James Monroe'])
canon_meta.publisher = 'Publisher C'
canon_meta.languages = ['French']
canon_meta.pubdate = parse_date('2015-01-01')
canon_meta.timestamp = parse_date('2014-01-01')
canon_meta.series = 'Comment Series'
canon_meta.series_index = float(3)
canon_meta.rating = float(0)
canon_meta.comments = 'comment "comments" ♥ HTML -- too &amp;'
canon_meta.tags = ['tag d']
canon_meta.set_identifiers({'isbn': '3456789012', 'url': 'http://google.com/search?q=calibre'})
self.compare_metadata(stream_meta, canon_meta)
def test_input_comment_multi(self):
stream_meta = get_metadata(self.get_stream('comment_multi'))
canon_meta = Metadata('A Comment Tag & Title Ⓒ', ['James Madison', 'James Monroe', 'John Quincy Adams'])
canon_meta.publisher = 'Publisher C'
canon_meta.languages = ['French', 'Japanese']
canon_meta.pubdate = parse_date('2015-01-01')
canon_meta.timestamp = parse_date('2014-01-01')
canon_meta.series = 'Comment Series'
canon_meta.series_index = float(3)
canon_meta.rating = float(0)
canon_meta.comments = 'comment "comments" ♥ HTML -- too &amp;'
canon_meta.tags = ['tag d', 'tag e', 'tag f']
canon_meta.set_identifiers({'isbn': '3456789012', 'url': 'http://google.com/search?q=calibre'})
self.compare_metadata(stream_meta, canon_meta)
def find_tests():
return unittest.TestLoader().loadTestsFromTestCase(MetadataHtmlTest)
| 14,888 | Python | .py | 356 | 33.508427 | 145 | 0.581633 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,522 | imp.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/imp.py | __license__ = 'GPL v3'
__copyright__ = '2008, Ashish Kulkarni <kulkarni.ashish@gmail.com>'
'''Read meta information from IMP files'''
import sys
from calibre.ebooks.metadata import MetaInformation, string_to_authors
MAGIC = (b'\x00\x01BOOKDOUG', b'\x00\x02BOOKDOUG')
def get_metadata(stream):
""" Return metadata as a L{MetaInfo} object """
title = 'Unknown'
mi = MetaInformation(title, ['Unknown'])
stream.seek(0)
try:
if stream.read(10) not in MAGIC:
print('Couldn\'t read IMP header from file', file=sys.stderr)
return mi
def cString(skip=0):
result = b''
while 1:
data = stream.read(1)
if data == b'\x00':
if not skip:
return result.decode('utf-8')
skip -= 1
result, data = b'', b''
result += data
stream.read(38) # skip past some uninteresting headers
cString()
category, title, author = cString(), cString(1), cString(2)
if title:
mi.title = title
if author:
mi.authors = string_to_authors(author)
mi.author = author
if category:
mi.category = category
except Exception as err:
msg = 'Couldn\'t read metadata from imp: %s with error %s'%(mi.title, str(err))
print(msg.encode('utf8'), file=sys.stderr)
return mi
| 1,464 | Python | .py | 39 | 27.717949 | 87 | 0.562456 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,523 | kdl.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/kdl.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
import socket
from mechanize import URLError
from calibre import browser
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.metadata.book.base import Metadata
from polyglot.builtins import codepoint_to_chr
from polyglot.urllib import parse_qs, quote_plus
URL = \
"http://ww2.kdl.org/libcat/WhatsNext.asp?AuthorLastName={0}&AuthorFirstName=&SeriesName=&BookTitle={1}&CategoryID=0&cmdSearch=Search&Search=1&grouping="
_ignore_starts = '\'"'+''.join(codepoint_to_chr(x) for x in list(range(0x2018, 0x201e))+[0x2032, 0x2033])
def get_series(title, authors, timeout=60):
mi = Metadata(title, authors)
if title and title[0] in _ignore_starts:
title = title[1:]
title = re.sub(r'^(A|The|An)\s+', '', title).strip()
if not title:
return mi
if isinstance(title, str):
title = title.encode('utf-8')
title = quote_plus(title)
author = authors[0].strip()
if not author:
return mi
if ',' in author:
author = author.split(',')[0]
else:
author = author.split()[-1]
url = URL.format(author, title)
br = browser()
try:
raw = br.open_novisit(url, timeout=timeout).read()
except URLError as e:
if isinstance(e.reason, socket.timeout):
raise Exception('KDL Server busy, try again later')
raise
if 'see the full results' not in raw:
return mi
raw = xml_to_unicode(raw)[0]
soup = BeautifulSoup(raw)
searcharea = soup.find('div', attrs={'class':'searcharea'})
if searcharea is None:
return mi
ss = searcharea.find('div', attrs={'class':'seriessearch'})
if ss is None:
return mi
a = ss.find('a', href=True)
if a is None:
return mi
href = a['href'].partition('?')[-1]
data = parse_qs(href)
series = data.get('SeriesName', [])
if not series:
return mi
series = series[0]
series = re.sub(r' series$', '', series).strip()
if series:
mi.series = series
ns = ss.nextSibling
if ns.contents:
raw = str(ns.contents[0])
raw = raw.partition('.')[0].strip()
try:
mi.series_index = int(raw)
except Exception:
pass
return mi
if __name__ == '__main__':
import sys
print(get_series(sys.argv[-2], [sys.argv[-1]]))
| 2,543 | Python | .py | 75 | 28.346667 | 152 | 0.639772 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,524 | author_mapper.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/author_mapper.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>
import re
from collections import deque
from calibre.utils.icu import capitalize, lower, upper
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import upper as icu_upper
def cap_author_token(token):
lt = lower(token)
if lt in ('von', 'de', 'el', 'van', 'le'):
return lt
# no digits no spez. characters
if re.match(r'([^\d\W]\.){2,}$', lt, re.UNICODE) is not None:
# Normalize tokens of the form J.K. to J. K.
parts = token.split('.')
return '. '.join(map(capitalize, parts)).strip()
scots_name = None
for x in ('mc', 'mac'):
if (token.lower().startswith(x) and len(token) > len(x) and
(
token[len(x)] == upper(token[len(x)]) or
lt == token
)):
scots_name = len(x)
break
ans = capitalize(token)
if scots_name is not None:
ans = ans[:scots_name] + upper(ans[scots_name]) + ans[scots_name+1:]
for x in ('-', "'"):
idx = ans.find(x)
if idx > -1 and len(ans) > idx+2:
ans = ans[:idx+1] + upper(ans[idx+1]) + ans[idx+2:]
return ans
def compile_pat(pat):
import regex
REGEX_FLAGS = regex.VERSION1 | regex.WORD | regex.FULLCASE | regex.IGNORECASE | regex.UNICODE
return regex.compile(pat, flags=REGEX_FLAGS)
def matcher(rule):
mt = rule['match_type']
if mt == 'one_of':
authors = {icu_lower(x.strip()) for x in rule['query'].split('&')}
return lambda x: x in authors
if mt == 'not_one_of':
authors = {icu_lower(x.strip()) for x in rule['query'].split('&')}
return lambda x: x not in authors
if mt == 'matches':
pat = compile_pat(rule['query'])
return lambda x: pat.match(x) is not None
if mt == 'not_matches':
pat = compile_pat(rule['query'])
return lambda x: pat.match(x) is None
if mt == 'has':
s = icu_lower(rule['query'])
return lambda x: s in x
return lambda x: False
def apply_rules(author, rules):
ans = []
authors = deque()
authors.append(author)
maxiter = 20
while authors and maxiter > 0:
author = authors.popleft()
lauthor = icu_lower(author)
maxiter -= 1
for rule, matches in rules:
ac = rule['action']
if matches(lauthor):
if ac == 'replace':
if 'matches' in rule['match_type']:
author = compile_pat(rule['query']).sub(rule['replace'], author)
else:
author = rule['replace']
if '&' in author:
replacement_authors = []
self_added = False
for rauthor in (x.strip() for x in author.split('&')):
if icu_lower(rauthor) == lauthor:
if not self_added:
ans.append(rauthor)
self_added = True
else:
replacement_authors.append(rauthor)
authors.extendleft(reversed(replacement_authors))
else:
if icu_lower(author) == lauthor:
# Case change or self replacement
ans.append(author)
break
authors.appendleft(author)
break
if ac == 'capitalize':
ans.append(' '.join(map(cap_author_token, author.split())))
break
if ac == 'lower':
ans.append(icu_lower(author))
break
if ac == 'upper':
ans.append(icu_upper(author))
break
else: # no rule matched, default keep
ans.append(author)
ans.extend(authors)
return ans
def uniq(vals, kmap=icu_lower):
''' Remove all duplicates from vals, while preserving order. kmap must be a
callable that returns a hashable value for every item in vals '''
vals = vals or ()
lvals = (kmap(x) for x in vals)
seen = set()
seen_add = seen.add
return list(x for x, k in zip(vals, lvals) if k not in seen and not seen_add(k))
def compile_rules(rules):
return tuple((r, matcher(r)) for r in rules)
def map_authors(authors, rules=()):
if not authors:
return []
if not rules:
return list(authors)
ans = []
for a in authors:
ans.extend(apply_rules(a, rules))
return uniq(list(filter(None, ans)))
def find_tests():
import unittest
class TestAuthorMapper(unittest.TestCase):
def test_author_mapper(self):
def rule(action, query, replace=None, match_type='one_of'):
ans = {'action':action, 'query': query, 'match_type':match_type}
if replace is not None:
ans['replace'] = replace
return ans
def run(rules, authors, expected):
if isinstance(rules, dict):
rules = [rules]
if isinstance(authors, str):
authors = [x.strip() for x in authors.split('&')]
if isinstance(expected, str):
expected = [x.strip() for x in expected.split('&')]
ans = map_authors(authors, compile_rules(rules))
self.assertEqual(ans, expected)
run(rule('capitalize', 't1&t2'), 't1&x1', 'T1&x1')
run(rule('upper', 'ta&t2'), 'ta&x1', 'TA&x1')
run(rule('lower', 'ta&x1'), 'TA&X1', 'ta&x1')
run(rule('replace', 't1', 't2'), 't1&x1', 't2&x1')
run(rule('replace', '(.)1', r'\g<1>2', 'matches'), 't1&x1', 't2&x2')
run(rule('replace', '(.)1', r'\g<1>2&3', 'matches'), 't1&x1', 't2&3&x2')
run(rule('replace', 't1', 't2 & t3'), 't1&x1', 't2&t3&x1')
run(rule('replace', 't1', 't1'), 't1&x1', 't1&x1')
run([rule('replace', 't1', 't2'), rule('replace', 't2', 't1')], 't1&t2', 't1&t2')
run(rule('replace', 'a', 'A'), 'a&b', 'A&b')
run(rule('replace', 'a&b', 'A&B'), 'a&b', 'A&B')
run(rule('replace', 'L', 'T', 'has'), 'L', 'T')
return unittest.defaultTestLoader.loadTestsFromTestCase(TestAuthorMapper)
if __name__ == '__main__':
from calibre.utils.run_tests import run_cli
run_cli(find_tests())
| 6,668 | Python | .py | 156 | 30.846154 | 97 | 0.516975 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,525 | opf3_test.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/opf3_test.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import unittest
from collections import defaultdict
from io import BytesIO
from calibre.ebooks.metadata.book import ALL_METADATA_FIELDS
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ebooks.metadata.opf3 import (
CALIBRE_PREFIX,
Author,
XPath,
apply_metadata,
ensure_is_only_raster_cover,
ensure_prefix,
expand_prefix,
parse_prefixes,
read_authors,
read_book_producers,
read_comments,
read_identifiers,
read_languages,
read_last_modified,
read_link_maps,
read_metadata,
read_prefixes,
read_pubdate,
read_publisher,
read_raster_cover,
read_rating,
read_refines,
read_series,
read_tags,
read_timestamp,
read_title,
read_title_sort,
read_user_categories,
read_user_metadata,
reserved_prefixes,
set_application_id,
set_authors,
set_book_producers,
set_comments,
set_identifiers,
set_languages,
set_link_maps,
set_pubdate,
set_publisher,
set_rating,
set_series,
set_tags,
set_timestamp,
set_title,
set_user_categories,
set_user_metadata,
)
# This import is needed to prevent a test from running slowly
from calibre.ebooks.oeb.polish.pretty import pretty_opf, pretty_xml_tree # noqa
from calibre.utils.xml_parse import safe_xml_fromstring
read_user_categories, set_user_categories, read_link_maps, set_link_maps
TEMPLATE = '''<package xmlns="http://www.idpf.org/2007/opf" version="3.0" prefix="calibre: %s" unique-identifier="uid"><metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">{metadata}</metadata><manifest>{manifest}</manifest></package>''' % CALIBRE_PREFIX # noqa
default_refines = defaultdict(list)
class TestOPF3(unittest.TestCase):
ae = unittest.TestCase.assertEqual
def get_opf(self, metadata='', manifest=''):
return safe_xml_fromstring(TEMPLATE.format(metadata=metadata, manifest=manifest))
def test_prefix_parsing(self): # {{{
self.ae(parse_prefixes('foaf: http://xmlns.com/foaf/spec/\n dbp: http://dbpedia.org/ontology/'),
{'foaf':'http://xmlns.com/foaf/spec/', 'dbp': 'http://dbpedia.org/ontology/'})
for raw, expanded in (
('onix:xxx', reserved_prefixes['onix'] + ':xxx'),
('xxx:onix', 'xxx:onix'),
('xxx', 'xxx'),
):
self.ae(expand_prefix(raw, reserved_prefixes.copy()), expanded)
root = self.get_opf()
ensure_prefix(root, read_prefixes(root), 'calibre', 'https://calibre-ebook.com')
ensure_prefix(root, read_prefixes(root), 'marc', reserved_prefixes['marc'])
self.ae(parse_prefixes(root.get('prefix')), {'calibre': 'https://calibre-ebook.com'})
# }}}
def test_identifiers(self): # {{{
def idt(val, scheme=None, iid=''):
return '<dc:identifier id="{id}" {scheme}>{val}</dc:identifier>'.format(scheme=('opf:scheme="%s"'%scheme if scheme else ''), val=val, id=iid)
def ri(root):
return dict(read_identifiers(root, read_prefixes(root), default_refines))
for m, result in (
(idt('abc', 'ISBN'), {}),
(idt('isbn:9780230739581'), {'isbn':['9780230739581']}),
(idt('urn:isbn:9780230739581'), {'isbn':['9780230739581']}),
(idt('9780230739581', 'ISBN'), {'isbn':['9780230739581']}),
(idt('isbn:9780230739581', 'ISBN'), {'isbn':['9780230739581']}),
(idt('key:val'), {'key':['val']}),
(idt('url:http://x'), {'url':['http://x']}),
(idt('a:1')+idt('a:2'), {'a':['1', '2']}),
):
self.ae(result, ri(self.get_opf(m)))
root = self.get_opf(metadata=idt('a:1')+idt('a:2')+idt('calibre:x')+idt('uuid:y'))
mi = read_metadata(root)
self.ae(mi.application_id, 'x')
set_application_id(root, {}, default_refines, 'y')
mi = read_metadata(root)
self.ae(mi.application_id, 'y')
root = self.get_opf(metadata=idt('i:1', iid='uid') + idt('r:1') + idt('o:1'))
set_identifiers(root, read_prefixes(root), default_refines, {'i':'2', 'o':'2'})
self.ae({'i':['2', '1'], 'r':['1'], 'o':['2']}, ri(root))
self.ae(1, len(XPath('//dc:identifier[@id="uid"]')(root)))
root = self.get_opf(metadata=idt('i:1', iid='uid') + idt('r:1') + idt('o:1'))
set_identifiers(root, read_prefixes(root), default_refines, {'i':'2', 'o':'2'}, force_identifiers=True)
self.ae({'i':['2', '1'], 'o':['2']}, ri(root))
root = self.get_opf(metadata=idt('i:1', iid='uid') + idt('r:1') + idt('o:1'))
set_application_id(root, {}, default_refines, 'y')
mi = read_metadata(root)
self.ae(mi.application_id, 'y')
# }}}
def test_title(self): # {{{
def rt(root):
return read_title(root, read_prefixes(root), read_refines(root))
def st(root, title, title_sort=None):
set_title(root, read_prefixes(root), read_refines(root), title, title_sort)
return rt(root)
root = self.get_opf('''<dc:title/><dc:title id='t'>xxx</dc:title>''')
self.ae(rt(root), 'xxx')
self.ae(st(root, 'abc', 'cba'), 'abc')
self.ae(read_title_sort(root, read_prefixes(root), read_refines(root)), 'cba')
root = self.get_opf('''<dc:title>yyy</dc:title><dc:title id='t'>x xx
</dc:title><meta refines='#t' property='title-type'>main</meta><meta name="calibre:title_sort" content="sorted"/>''')
self.ae(rt(root), 'x xx')
self.ae(read_title_sort(root, read_prefixes(root), read_refines(root)), 'sorted')
self.ae(st(root, 'abc'), 'abc')
# }}}
def test_languages(self): # {{{
def rl(root):
return read_languages(root, read_prefixes(root), read_refines(root))
def st(root, languages):
set_languages(root, read_prefixes(root), read_refines(root), languages)
return rl(root)
root = self.get_opf('''<dc:language>en-US</dc:language><dc:language>fr</dc:language>''')
self.ae(['eng', 'fra'], rl(root))
self.ae(st(root, ['de', 'de', 'es']), ['deu', 'spa'])
self.ae(st(root, []), [])
# }}}
def test_authors(self): # {{{
def rl(root):
return read_authors(root, read_prefixes(root), read_refines(root))
def st(root, authors):
set_authors(root, read_prefixes(root), read_refines(root), authors)
return rl(root)
root = self.get_opf('''<dc:creator>a b</dc:creator>''')
self.ae([Author('a b', None)], rl(root))
for scheme in ('scheme="marc:relators"', ''):
root = self.get_opf('''<dc:creator>a b</dc:creator><dc:creator id="1">c d</dc:creator>'''
'''<meta refines="#1" property="role" %s>aut</meta>''' % scheme)
self.ae([Author('c d', None)], rl(root))
root = self.get_opf('''<dc:creator>a b</dc:creator><dc:creator opf:role="aut">c d</dc:creator>''')
self.ae([Author('c d', None)], rl(root))
root = self.get_opf('''<dc:creator opf:file-as="b, a">a b</dc:creator><dc:creator id="1">c d</dc:creator>
<meta refines="#1" property="file-as">d, c</meta>''')
self.ae([Author('a b', 'b, a'), Author('c d', 'd, c')], rl(root))
authors = [Author('x y', 'y, x'), Author('u i', None)]
self.ae(authors, st(root, authors))
self.ae(root.get('prefix'), 'calibre: %s' % CALIBRE_PREFIX)
root = self.get_opf('''<dc:creator>a b</dc:creator><dc:creator opf:role="aut">c d</dc:creator>''')
self.ae([Author('c d', None)], rl(root))
self.ae(authors, st(root, authors))
root = self.get_opf('''<dc:creator id="1">a b</dc:creator>'''
'''<meta refines="#1" property="role">aut</meta>'''
'''<meta refines="#1" property="role">cow</meta>''')
self.ae([Author('a b', None)], rl(root))
# }}}
def test_book_producer(self): # {{{
def rl(root):
return read_book_producers(root, read_prefixes(root), read_refines(root))
def st(root, producers):
set_book_producers(root, read_prefixes(root), read_refines(root), producers)
return rl(root)
for scheme in ('scheme="marc:relators"', ''):
root = self.get_opf('''<dc:contributor>a b</dc:contributor><dc:contributor id="1">c d</dc:contributor>'''
'''<meta refines="#1" property="role" %s>bkp</meta>''' % scheme)
self.ae(['c d'], rl(root))
root = self.get_opf('''<dc:contributor>a b</dc:contributor><dc:contributor opf:role="bkp">c d</dc:contributor>''')
self.ae(['c d'], rl(root))
self.ae(['12'], st(root, ['12']))
# }}}
def test_dates(self): # {{{
from calibre.utils.date import utcnow
def rl(root):
p, r = read_prefixes(root), read_refines(root)
return read_pubdate(root, p, r), read_timestamp(root, p, r)
def st(root, pd, ts):
p, r = read_prefixes(root), read_refines(root)
set_pubdate(root, p, r, pd)
set_timestamp(root, p, r, ts)
return rl(root)
def ae(root, y1=None, y2=None):
x1, x2 = rl(root)
for x, y in ((x1, y1), (x2, y2)):
if y is None:
self.assertIsNone(x)
else:
self.ae(y, getattr(x, 'year', None))
root = self.get_opf('''<dc:date>1999-3-2</dc:date><meta property="calibre:timestamp" scheme="dcterms:W3CDTF">2001</meta>''')
ae(root, 1999, 2001)
n = utcnow()
q = n.replace(microsecond=0)
self.ae(st(root, n, n), (n, q))
root = self.get_opf('''<dc:date>1999-3-2</dc:date><meta name="calibre:timestamp" content="2001-1-1"/>''')
ae(root, 1999, 2001)
root = self.get_opf('''<meta property="dcterms:modified">2003</meta>''')
self.ae(read_last_modified(root, read_prefixes(root), read_refines(root)).year, 2003)
# }}}
def test_comments(self): # {{{
def rt(root):
return read_comments(root, read_prefixes(root), read_refines(root))
def st(root, val):
set_comments(root, read_prefixes(root), read_refines(root), val)
return rt(root)
root = self.get_opf('''<dc:description><span>one</span></dc:description><dc:description> xxx</dc:description>''')
self.ae('<span>one</span>\nxxx', rt(root))
self.ae('<a>p</a>', st(root, '<a>p</a> '))
# }}}
def test_publisher(self): # {{{
def rt(root):
return read_publisher(root, read_prefixes(root), read_refines(root))
def st(root, val):
set_publisher(root, read_prefixes(root), read_refines(root), val)
return rt(root)
root = self.get_opf('''<dc:publisher> one </dc:publisher><dc:publisher> xxx</dc:publisher>''')
self.ae('one', rt(root))
self.ae('<a>p</a>', st(root, '<a>p</a> '))
# }}}
def test_raster_cover(self): # {{{
def rt(root):
return read_raster_cover(root, read_prefixes(root), read_refines(root))
root = self.get_opf('<meta name="cover" content="cover"/>', '<item id="cover" media-type="image/jpeg" href="x.jpg"/>')
self.ae('x.jpg', rt(root))
root = self.get_opf('<meta name="cover" content="cover"/>',
'<item id="cover" media-type="image/jpeg" href="x.jpg"/><item media-type="image/jpeg" href="y.jpg" properties="cover-image"/>')
self.ae('y.jpg', rt(root))
ensure_is_only_raster_cover(root, read_prefixes(root), read_refines(root), 'x.jpg')
self.ae('x.jpg', rt(root))
self.ae(['x.jpg'], root.xpath('//*[@properties="cover-image"]/@href'))
self.assertFalse(root.xpath('//*[@name]'))
# }}}
def test_tags(self): # {{{
def rt(root):
return read_tags(root, read_prefixes(root), read_refines(root))
def st(root, val):
set_tags(root, read_prefixes(root), read_refines(root), val)
return rt(root)
root = self.get_opf('''<dc:subject> one, two </dc:subject><dc:subject> xxx</dc:subject>''')
self.ae('one,two,xxx'.split(','), rt(root))
self.ae('1,2,3'.split(','), st(root, '1,2,3'.split(',')))
# }}}
def test_rating(self): # {{{
def rt(root):
return read_rating(root, read_prefixes(root), read_refines(root))
def st(root, val):
set_rating(root, read_prefixes(root), read_refines(root), val)
return rt(root)
root = self.get_opf('''<meta name="calibre:rating" content="3"/>''')
self.ae(3, rt(root))
root = self.get_opf('''<meta name="calibre:rating" content="3"/><meta property="calibre:rating">5</meta>''')
self.ae(5, rt(root))
self.ae(1, st(root,1))
# }}}
def test_series(self): # {{{
def rt(root):
return read_series(root, read_prefixes(root), read_refines(root))
def st(root, val, i):
set_series(root, read_prefixes(root), read_refines(root), val, i)
return rt(root)
root = self.get_opf('''<meta name="calibre:series" content="xxx"/><meta name="calibre:series_index" content="5"/>''')
self.ae(('xxx', 5), rt(root))
root = self.get_opf('''<meta name="calibre:series" content="xxx"/><meta name="calibre:series_index" content="5"/>'''
'<meta property="belongs-to-collection" id="c02">yyy</meta><meta refines="#c02" property="collection-type">series</meta>'
'<meta refines="#c02" property="group-position">2.1</meta>')
self.ae(('yyy', 2.1), rt(root))
self.ae(('zzz', 3.3), st(root, 'zzz', 3.3))
# }}}
def test_user_metadata(self): # {{{
def rt(root, name):
f = globals()['read_' + name]
return f(root, read_prefixes(root), read_refines(root))
def st(root, name, val):
f = globals()['set_' + name]
f(root, read_prefixes(root), read_refines(root), val)
return rt(root, name)
for name in 'link_maps user_categories'.split():
root = self.get_opf('''<meta name="calibre:%s" content='{"1":1}'/>''' % name)
self.ae({'1':1}, rt(root, name))
root = self.get_opf(f'''<meta name="calibre:{name}" content='{{"1":1}}'/><meta property="calibre:{name}">{{"2":2}}</meta>''')
self.ae({'2':2}, rt(root, name))
self.ae({'3':3}, st(root, name, {3:3}))
def ru(root):
return read_user_metadata(root, read_prefixes(root), read_refines(root))
def su(root, val):
set_user_metadata(root, read_prefixes(root), read_refines(root), val)
return ru(root)
root = self.get_opf('''<meta name="calibre:user_metadata:#a" content='{"1":1}'/>''')
self.ae({'#a': {'1': 1, 'is_multiple': dict()}}, ru(root))
root = self.get_opf('''<meta name="calibre:user_metadata:#a" content='{"1":1}'/>'''
'''<meta property="calibre:user_metadata">{"#b":{"2":2}}</meta>''')
self.ae({'#b': {'2': 2, 'is_multiple': dict()}}, ru(root))
self.ae({'#c': {'3': 3, 'is_multiple': {}, 'is_multiple2': dict()}}, su(root, {'#c':{'3':3}}))
# }}}
def test_against_opf2(self): # {{{
# opf2 {{{
raw = '''<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="uuid_id" version="2.0">
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
<dc:identifier opf:scheme="calibre" id="calibre_id">1698</dc:identifier>
<dc:identifier opf:scheme="uuid" id="uuid_id">27106d11-0721-44bc-bcdd-2840f31aaec0</dc:identifier>
<dc:title>DOCX Demo</dc:title>
<dc:creator opf:file-as="Goyal, Kovid" opf:role="aut">Kovid Goyal</dc:creator>
<dc:contributor opf:file-as="calibre" opf:role="bkp">calibre (2.57.1) [http://calibre-ebook.com]</dc:contributor>
<dc:date>2016-02-17T10:53:08+00:00</dc:date>
<dc:description>Demonstration of DOCX support in calibre</dc:description>
<dc:publisher>Kovid Goyal</dc:publisher>
<dc:identifier opf:scheme="K">xxx</dc:identifier>
<dc:language>eng</dc:language>
<dc:subject>calibre</dc:subject>
<dc:subject>conversion</dc:subject>
<dc:subject>docs</dc:subject>
<dc:subject>ebook</dc:subject>
<meta content="{"Kovid Goyal": "https://kovidgoyal.net"}" name="calibre:author_link_map"/>
<meta content="Demos" name="calibre:series"/>
<meta content="1" name="calibre:series_index"/>
<meta content="10" name="calibre:rating"/>
<meta content="2015-12-11T16:28:36+00:00" name="calibre:timestamp"/>
<meta content="DOCX Demo" name="calibre:title_sort"/>
<meta content="{"crew.crow": [], "crew.moose": [], "crew": []}" name="calibre:user_categories"/>
<meta name="calibre:user_metadata:#number" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "Number",
"rec_index": 29, "#extra#": null,
"colnum": 12, "is_multiple2": {},
"category_sort": "value", "display":
{"number_format": null}, "search_terms":
["#number"], "is_editable": true,
"datatype": "int", "link_column":
"value", "#value#": 31, "is_custom":
true, "label": "number", "table":
"custom_column_12", "is_multiple": null,
"is_category": false}"/>
<meta name="calibre:user_metadata:#genre" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "Genre",
"rec_index": 26, "#extra#": null,
"colnum": 9, "is_multiple2": {},
"category_sort": "value", "display":
{"use_decorations": 0}, "search_terms":
["#genre"], "is_editable": true,
"datatype": "text", "link_column":
"value", "#value#": "Demos",
"is_custom": true, "label": "genre",
"table": "custom_column_9",
"is_multiple": null, "is_category": true}"/>
<meta name="calibre:user_metadata:#commetns"
content="{"kind": "field", "column":
"value", "is_csp": false, "name":
"My Comments", "rec_index": 23,
"#extra#": null, "colnum": 13,
"is_multiple2": {}, "category_sort":
"value", "display": {}, "search_terms":
["#commetns"], "is_editable": true,
"datatype": "comments", "link_column":
"value", "#value#":
"<div><b><i>Testing</i></b> extra
<font
color=\\"#aa0000\\">comments</font></div>",
"is_custom": true, "label": "commetns",
"table": "custom_column_13",
"is_multiple": null, "is_category": false}"/>
<meta name="calibre:user_metadata:#formats" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "Formats",
"rec_index": 25, "#extra#": null,
"colnum": 4, "is_multiple2": {},
"category_sort": "value", "display":
{"composite_template": "{formats}",
"contains_html": false, "use_decorations": 0,
"composite_sort": "text",
"make_category": false}, "search_terms":
["#formats"], "is_editable": true,
"datatype": "composite", "link_column":
"value", "#value#": "AZW3, DOCX, EPUB",
"is_custom": true, "label": "formats",
"table": "custom_column_4",
"is_multiple": null, "is_category": false}"/>
<meta name="calibre:user_metadata:#rating" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "My Rating",
"rec_index": 30, "#extra#": null,
"colnum": 1, "is_multiple2": {},
"category_sort": "value", "display": {},
"search_terms": ["#rating"],
"is_editable": true, "datatype":
"rating", "link_column": "value",
"#value#": 10, "is_custom": true,
"label": "rating", "table":
"custom_column_1", "is_multiple": null,
"is_category": true}"/>
<meta name="calibre:user_metadata:#series" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "My Series2",
"rec_index": 31, "#extra#": 1.0,
"colnum": 5, "is_multiple2": {},
"category_sort": "value", "display": {},
"search_terms": ["#series"],
"is_editable": true, "datatype":
"series", "link_column": "value",
"#value#": "s", "is_custom": true,
"label": "series", "table":
"custom_column_5", "is_multiple": null,
"is_category": true}"/>
<meta name="calibre:user_metadata:#tags" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "My Tags",
"rec_index": 33, "#extra#": null,
"colnum": 11, "is_multiple2":
{"ui_to_list": ",", "cache_to_list":
"|", "list_to_ui": ", "},
"category_sort": "value", "display":
{"is_names": false, "description": "A tag like
column for me"}, "search_terms": ["#tags"],
"is_editable": true, "datatype": "text",
"link_column": "value", "#value#":
["t1", "t2"], "is_custom": true,
"label": "tags", "table":
"custom_column_11", "is_multiple": "|",
"is_category": true}"/>
<meta name="calibre:user_metadata:#yesno" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "Yes/No",
"rec_index": 34, "#extra#": null,
"colnum": 7, "is_multiple2": {},
"category_sort": "value", "display": {},
"search_terms": ["#yesno"],
"is_editable": true, "datatype": "bool",
"link_column": "value", "#value#": false,
"is_custom": true, "label": "yesno",
"table": "custom_column_7",
"is_multiple": null, "is_category": false}"/>
<meta name="calibre:user_metadata:#myenum" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "My Enum",
"rec_index": 28, "#extra#": null,
"colnum": 6, "is_multiple2": {},
"category_sort": "value", "display":
{"enum_colors": [], "enum_values":
["One", "Two", "Three"],
"use_decorations": 0}, "search_terms":
["#myenum"], "is_editable": true,
"datatype": "enumeration", "link_column":
"value", "#value#": "Two",
"is_custom": true, "label": "myenum",
"table": "custom_column_6",
"is_multiple": null, "is_category": true}"/>
<meta name="calibre:user_metadata:#isbn" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "ISBN",
"rec_index": 27, "#extra#": null,
"colnum": 3, "is_multiple2": {},
"category_sort": "value", "display":
{"composite_template":
"{identifiers:select(isbn)}", "contains_html":
false, "use_decorations": 0, "composite_sort":
"text", "make_category": false},
"search_terms": ["#isbn"], "is_editable":
true, "datatype": "composite",
"link_column": "value", "#value#":
"", "is_custom": true, "label":
"isbn", "table": "custom_column_3",
"is_multiple": null, "is_category": false}"/>
<meta name="calibre:user_metadata:#authors" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "My Authors",
"rec_index": 22, "#extra#": null,
"colnum": 10, "is_multiple2":
{"ui_to_list": "&", "cache_to_list":
"|", "list_to_ui": " & "},
"category_sort": "value", "display":
{"is_names": true}, "search_terms":
["#authors"], "is_editable": true,
"datatype": "text", "link_column":
"value", "#value#": ["calibre, Kovid
Goyal"], "is_custom": true, "label":
"authors", "table": "custom_column_10",
"is_multiple": "|", "is_category":
true}"/>
<meta name="calibre:user_metadata:#date" content="{"kind":
"field", "column": "value",
"is_csp": false, "name": "My Date",
"rec_index": 24, "#extra#": null,
"colnum": 2, "is_multiple2": {},
"category_sort": "value", "display":
{"date_format": "dd-MM-yyyy",
"description": ""}, "search_terms":
["#date"], "is_editable": true,
"datatype": "datetime", "link_column":
"value", "#value#": {"__value__":
"2016-02-17T10:54:15+00:00", "__class__":
"datetime.datetime"}, "is_custom": true,
"label": "date", "table":
"custom_column_2", "is_multiple": null,
"is_category": false}"/>
</metadata><manifest><item href="start.html" media-type="text/html" id="m1"/></manifest><spine><itemref idref="m1"/></spine>
</package>''' # }}}
def compare_metadata(mi2, mi3):
self.ae(mi2.get_all_user_metadata(False), mi3.get_all_user_metadata(False))
for field in ALL_METADATA_FIELDS:
if field not in 'manifest spine':
v2, v3 = getattr(mi2, field, None), getattr(mi3, field, None)
self.ae(v2, v3, f'{field}: {v2!r} != {v3!r}')
mi2 = OPF(BytesIO(raw.encode('utf-8'))).to_book_metadata()
root = safe_xml_fromstring(raw)
root.set('version', '3.0')
mi3, _, raster_cover, first_spine_item = read_metadata(root, return_extra_data=True)
self.assertIsNone(raster_cover)
self.ae('start.html', first_spine_item)
compare_metadata(mi2, mi3)
apply_metadata(root, mi3, force_identifiers=True)
nmi = read_metadata(root)
compare_metadata(mi3, nmi)
mi3.tags = []
mi3.set('#tags', [])
mi3.set('#number', 0)
mi3.set('#commetns', '')
apply_metadata(root, mi3, update_timestamp=True)
self.assertFalse(root.xpath('//*/@name'))
nmi = read_metadata(root)
self.assertEqual(mi2.tags, nmi.tags)
self.assertEqual(mi2.get('#tags'), nmi.get('#tags'))
self.assertEqual(mi2.get('#commetns'), nmi.get('#commetns'))
self.assertEqual(0, nmi.get('#number'))
apply_metadata(root, mi3, apply_null=True)
nmi = read_metadata(root)
self.assertFalse(nmi.tags)
self.assertFalse(nmi.get('#tags'))
self.assertFalse(nmi.get('#commetns'))
self.assertIsNone(apply_metadata(root, mi3, cover_data=b'x', cover_prefix='xxx', add_missing_cover=False))
self.ae('xxx/cover.jpg', apply_metadata(root, mi3, cover_data=b'x', cover_prefix='xxx'))
# }}}
# Run tests {{{
def suite():
return unittest.TestLoader().loadTestsFromTestCase(TestOPF3)
class TestRunner(unittest.main):
def createTests(self):
self.test = suite()
def run(verbosity=4):
TestRunner(verbosity=verbosity, exit=False)
if __name__ == '__main__':
run(verbosity=4)
# }}}
| 31,370 | Python | .py | 552 | 47.581522 | 304 | 0.589578 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,526 | rb.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/rb.py | __license__ = 'GPL v3'
__copyright__ = '2008, Ashish Kulkarni <kulkarni.ashish@gmail.com>'
'''Read meta information from RB files'''
import struct
import sys
from calibre import prints
from calibre.ebooks.metadata import MetaInformation, string_to_authors
MAGIC = b'\xb0\x0c\xb0\x0c\x02\x00NUVO\x00\x00\x00\x00'
def get_metadata(stream):
""" Return metadata as a L{MetaInfo} object """
title = 'Unknown'
mi = MetaInformation(title, ['Unknown'])
stream.seek(0)
try:
if not stream.read(14) == MAGIC:
print('Couldn\'t read RB header from file', file=sys.stderr)
return mi
stream.read(10)
def read_i32():
return struct.unpack('<I', stream.read(4))[0]
stream.seek(read_i32())
toc_count = read_i32()
for i in range(toc_count):
stream.read(32)
length, offset, flag = read_i32(), read_i32(), read_i32()
if flag == 2:
break
else:
print('Couldn\'t find INFO from RB file', file=sys.stderr)
return mi
stream.seek(offset)
info = stream.read(length).decode('utf-8', 'replace').splitlines()
for line in info:
if '=' not in line:
continue
key, value = line.split('=')
if key.strip() == 'TITLE':
mi.title = value.strip()
elif key.strip() == 'AUTHOR':
mi.authors = string_to_authors(value)
except Exception as err:
msg = 'Couldn\'t read metadata from rb: %s with error %s'%(mi.title, str(err))
prints(msg, file=sys.stderr)
raise
return mi
| 1,673 | Python | .py | 45 | 28.422222 | 86 | 0.578135 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,527 | mobi.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/mobi.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2009, Kovid Goyal <kovid at kovidgoyal.net>
import io
import numbers
import os
from struct import pack, unpack
from calibre.ebooks import normalize
from calibre.ebooks.mobi import MAX_THUMB_DIMEN, MobiError
from calibre.ebooks.mobi.langcodes import iana2mobi
from calibre.ebooks.mobi.utils import rescale_image
from calibre.utils.date import now as nowf
from calibre.utils.imghdr import what
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from polyglot.builtins import codepoint_to_chr
'''
Retrieve and modify in-place Mobipocket book metadata.
'''
def is_image(ss):
if ss is None:
return False
return what(None, ss[:200]) is not None
class StreamSlicer:
def __init__(self, stream, start=0, stop=None):
self._stream = stream
self.start = start
if stop is None:
stream.seek(0, 2)
stop = stream.tell()
self.stop = stop
self._len = stop - start
def __len__(self):
return self._len
def __getitem__(self, key):
stream = self._stream
base = self.start
if isinstance(key, numbers.Integral):
stream.seek(base + key)
return stream.read(1)
if isinstance(key, slice):
start, stop, stride = key.indices(self._len)
if stride < 0:
start, stop = stop, start
size = stop - start
if size <= 0:
return b""
stream.seek(base + start)
data = stream.read(size)
if stride != 1:
data = data[::stride]
return data
raise TypeError("stream indices must be integers")
def __setitem__(self, key, value):
stream = self._stream
base = self.start
if isinstance(key, numbers.Integral):
if len(value) != 1:
raise ValueError("key and value lengths must match")
stream.seek(base + key)
return stream.write(value)
if isinstance(key, slice):
start, stop, stride = key.indices(self._len)
if stride < 0:
start, stop = stop, start
size = stop - start
if stride != 1:
value = value[::stride]
if len(value) != size:
raise ValueError("key and value lengths must match")
stream.seek(base + start)
return stream.write(value)
raise TypeError("stream indices must be integers")
def update(self, data_blocks):
# Rewrite the stream
stream = self._stream
base = self.start
stream.seek(base)
stream.truncate()
for block in data_blocks:
stream.write(block)
def truncate(self, value):
self._stream.truncate(value)
class MetadataUpdater:
DRM_KEY_SIZE = 48
def __init__(self, stream):
self.stream = stream
data = self.data = StreamSlicer(stream)
self.type = data[60:68]
if self.type != b"BOOKMOBI":
return
self.nrecs, = unpack('>H', data[76:78])
record0 = self.record0 = self.record(0)
mobi_header_length, = unpack('>I', record0[0x14:0x18])
if not mobi_header_length:
raise MobiError("Non-standard file format. Try 'Convert E-Books' with MOBI as Input and Output formats.")
self.encryption_type, = unpack('>H', record0[12:14])
codepage, = unpack('>I', record0[28:32])
self.codec = 'utf-8' if codepage == 65001 else 'cp1252'
image_base, = unpack('>I', record0[108:112])
flags, = self.flags, = unpack('>I', record0[128:132])
have_exth = self.have_exth = (flags & 0x40) != 0
self.cover_record = self.thumbnail_record = None
self.timestamp = None
self.pdbrecords = self.get_pdbrecords()
self.drm_block = None
if self.encryption_type != 0:
if self.have_exth:
self.drm_block = self.fetchDRMdata()
else:
raise MobiError('Unable to set metadata on DRM file without EXTH header')
self.original_exth_records = {}
if not have_exth:
self.create_exth()
self.have_exth = True
# Fetch timestamp, cover_record, thumbnail_record
self.fetchEXTHFields()
def fetchDRMdata(self):
''' Fetch the DRM keys '''
drm_offset = int(unpack('>I', self.record0[0xa8:0xac])[0])
self.drm_key_count = int(unpack('>I', self.record0[0xac:0xb0])[0])
drm_keys = b''
for x in range(self.drm_key_count):
base_addr = drm_offset + (x * self.DRM_KEY_SIZE)
drm_keys += self.record0[base_addr:base_addr + self.DRM_KEY_SIZE]
return drm_keys
def fetchEXTHFields(self):
stream = self.stream
record0 = self.record0
# 20:24 = mobiHeaderLength, 16=PDBHeader size
exth_off = unpack('>I', record0[20:24])[0] + 16 + record0.start
image_base, = unpack('>I', record0[108:112])
# Fetch EXTH block
exth = self.exth = StreamSlicer(stream, exth_off, record0.stop)
nitems, = unpack('>I', exth[8:12])
pos = 12
# Store any EXTH fields not specifiable in GUI
for i in range(nitems):
id, size = unpack('>II', exth[pos:pos + 8])
content = exth[pos + 8: pos + size]
pos += size
self.original_exth_records[id] = content
if id == 106:
self.timestamp = content
elif id == 201:
rindex, = self.cover_rindex, = unpack('>I', content)
if rindex != 0xffffffff:
self.cover_record = self.record(rindex + image_base)
elif id == 202:
rindex, = self.thumbnail_rindex, = unpack('>I', content)
if rindex > 0 and rindex != 0xffffffff:
self.thumbnail_record = self.record(rindex + image_base)
def patch(self, off, new_record0):
# Save the current size of each record
record_sizes = [len(new_record0)]
for i in range(1,self.nrecs-1):
record_sizes.append(self.pdbrecords[i+1][0]-self.pdbrecords[i][0])
# And the last one
record_sizes.append(self.data.stop - self.pdbrecords[self.nrecs-1][0])
# pdbrecord[0] is the offset of record0. It will not change
# record1 offset will be offset of record0 + len(new_record0)
updated_pdbrecords = [self.pdbrecords[0][0]]
record0_offset = self.pdbrecords[0][0]
updated_offset = record0_offset + len(new_record0)
for i in range(1,self.nrecs-1):
updated_pdbrecords.append(updated_offset)
updated_offset += record_sizes[i]
# Update the last pdbrecord
updated_pdbrecords.append(updated_offset)
# Read in current records 1 to last
data_blocks = [new_record0]
for i in range(1,self.nrecs):
data_blocks.append(self.data[self.pdbrecords[i][0]:self.pdbrecords[i][0] + record_sizes[i]])
# Rewrite the stream
self.record0.update(data_blocks)
# Rewrite the pdbrecords
self.update_pdbrecords(updated_pdbrecords)
# Truncate if necessary
if (updated_pdbrecords[-1] + record_sizes[-1]) < self.data.stop:
self.data.truncate(updated_pdbrecords[-1] + record_sizes[-1])
else:
self.data.stop = updated_pdbrecords[-1] + record_sizes[-1]
def patchSection(self, section, new):
off = self.pdbrecords[section][0]
self.patch(off, new)
def create_exth(self, new_title=None, exth=None):
# Add an EXTH block to record 0, rewrite the stream
if isinstance(new_title, str):
new_title = new_title.encode(self.codec, 'replace')
# Fetch the existing title
title_offset, = unpack('>L', self.record0[0x54:0x58])
title_length, = unpack('>L', self.record0[0x58:0x5c])
title_in_file, = unpack('%ds' % (title_length), self.record0[title_offset:title_offset + title_length])
# Adjust length to accommodate PrimaryINDX if necessary
mobi_header_length, = unpack('>L', self.record0[0x14:0x18])
if mobi_header_length == 0xe4:
# Patch mobi_header_length to 0xE8
self.record0[0x17] = b"\xe8"
self.record0[0xf4:0xf8] = pack('>L', 0xFFFFFFFF)
mobi_header_length = 0xe8
# Set EXTH flag (0x40)
self.record0[0x80:0x84] = pack('>L', self.flags|0x40)
if not exth:
# Construct an empty EXTH block
pad = b'\0' * 4
exth = [b'EXTH', pack('>II', 12, 0), pad]
exth = b''.join(exth)
# Update drm_offset(0xa8), title_offset(0x54)
if self.encryption_type != 0:
self.record0[0xa8:0xac] = pack('>L', 0x10 + mobi_header_length + len(exth))
self.record0[0xb0:0xb4] = pack('>L', len(self.drm_block))
self.record0[0x54:0x58] = pack('>L', 0x10 + mobi_header_length + len(exth) + len(self.drm_block))
else:
self.record0[0x54:0x58] = pack('>L', 0x10 + mobi_header_length + len(exth))
if new_title:
self.record0[0x58:0x5c] = pack('>L', len(new_title))
# Create an updated Record0
new_record0 = io.BytesIO()
new_record0.write(self.record0[:0x10 + mobi_header_length])
new_record0.write(exth)
if self.encryption_type != 0:
new_record0.write(self.drm_block)
new_record0.write(new_title if new_title else title_in_file)
# Pad to a 4-byte boundary
trail = len(new_record0.getvalue()) % 4
pad = b'\0' * (4 - trail) # Always pad w/ at least 1 byte
new_record0.write(pad)
new_record0.write(b'\0'*(1024*8))
# Rebuild the stream, update the pdbrecords pointers
self.patchSection(0,new_record0.getvalue())
# Update record0
self.record0 = self.record(0)
def hexdump(self, src, length=16):
# Diagnostic
FILTER=''.join([(len(repr(codepoint_to_chr(x)))==3) and codepoint_to_chr(x) or '.' for x in range(256)])
N=0
result=''
while src:
s,src = src[:length],src[length:]
hexa = ' '.join(["%02X"%ord(x) for x in s])
s = s.translate(FILTER)
result += "%04X %-*s %s\n" % (N, length*3, hexa, s)
N+=length
print(result)
def get_pdbrecords(self):
pdbrecords = []
for i in range(self.nrecs):
offset, a1,a2,a3,a4 = unpack('>LBBBB', self.data[78+i*8:78+i*8+8])
flags, val = a1, a2<<16|a3<<8|a4
pdbrecords.append([offset, flags, val])
return pdbrecords
def update_pdbrecords(self, updated_pdbrecords):
for (i, pdbrecord) in enumerate(updated_pdbrecords):
self.data[78+i*8:78+i*8 + 4] = pack('>L',pdbrecord)
# Refresh local copy
self.pdbrecords = self.get_pdbrecords()
def dump_pdbrecords(self):
# Diagnostic
print("MetadataUpdater.dump_pdbrecords()")
print("%10s %10s %10s" % ("offset","flags","val"))
for i in range(len(self.pdbrecords)):
pdbrecord = self.pdbrecords[i]
print(f"{pdbrecord[0]:10X} {pdbrecord[1]:10X} {pdbrecord[2]:10X}")
def record(self, n):
if n >= self.nrecs:
raise ValueError('non-existent record %r' % n)
offoff = 78 + (8 * n)
start, = unpack('>I', self.data[offoff + 0:offoff + 4])
stop = None
if n < (self.nrecs - 1):
stop, = unpack('>I', self.data[offoff + 8:offoff + 12])
return StreamSlicer(self.stream, start, stop)
def update(self, mi, asin=None):
mi.title = normalize(mi.title)
def update_exth_record(rec):
recs.append(rec)
if rec[0] in self.original_exth_records:
self.original_exth_records.pop(rec[0])
if self.type != b"BOOKMOBI":
raise MobiError("Setting metadata only supported for MOBI files of type 'BOOK'.\n"
"\tThis is a %r file of type %r" % (self.type[0:4], self.type[4:8]))
recs = []
added_501 = False
try:
from calibre.ebooks.conversion.config import load_defaults
prefs = load_defaults('mobi_output')
pas = prefs.get('prefer_author_sort', False)
kindle_pdoc = prefs.get('personal_doc', None)
share_not_sync = prefs.get('share_not_sync', False)
except:
pas = False
kindle_pdoc = None
share_not_sync = False
if mi.author_sort and pas:
# We want an EXTH field per author...
authors = mi.author_sort.split(' & ')
for author in authors:
update_exth_record((100, normalize(author).encode(self.codec, 'replace')))
elif mi.authors:
authors = mi.authors
for author in authors:
update_exth_record((100, normalize(author).encode(self.codec, 'replace')))
if mi.publisher:
update_exth_record((101, normalize(mi.publisher).encode(self.codec, 'replace')))
if mi.comments:
# Strip user annotations
a_offset = mi.comments.find('<div class="user_annotations">')
ad_offset = mi.comments.find('<hr class="annotations_divider" />')
if a_offset >= 0:
mi.comments = mi.comments[:a_offset]
if ad_offset >= 0:
mi.comments = mi.comments[:ad_offset]
update_exth_record((103, normalize(mi.comments).encode(self.codec, 'replace')))
if mi.isbn:
update_exth_record((104, mi.isbn.encode(self.codec, 'replace')))
if mi.tags:
# FIXME: Keep a single subject per EXTH field?
subjects = '; '.join(mi.tags)
update_exth_record((105, normalize(subjects).encode(self.codec, 'replace')))
if kindle_pdoc and kindle_pdoc in mi.tags:
added_501 = True
update_exth_record((501, b'PDOC'))
if mi.pubdate:
update_exth_record((106, str(mi.pubdate).encode(self.codec, 'replace')))
elif mi.timestamp:
update_exth_record((106, str(mi.timestamp).encode(self.codec, 'replace')))
elif self.timestamp:
update_exth_record((106, self.timestamp))
else:
update_exth_record((106, nowf().isoformat().encode(self.codec, 'replace')))
if self.cover_record is not None:
update_exth_record((201, pack('>I', self.cover_rindex)))
update_exth_record((203, pack('>I', 0)))
if self.thumbnail_record is not None:
update_exth_record((202, pack('>I', self.thumbnail_rindex)))
# Add a 113 record if not present to allow Amazon syncing
if (113 not in self.original_exth_records and
self.original_exth_records.get(501, None) == b'EBOK' and
not added_501 and not share_not_sync):
from uuid import uuid4
update_exth_record((113, str(uuid4()).encode(self.codec)))
if asin is not None:
update_exth_record((113, asin.encode(self.codec)))
update_exth_record((501, b'EBOK'))
update_exth_record((504, asin.encode(self.codec)))
# Add a 112 record with actual UUID
if getattr(mi, 'uuid', None):
update_exth_record((112,
("calibre:%s" % mi.uuid).encode(self.codec, 'replace')))
if 503 in self.original_exth_records:
update_exth_record((503, mi.title.encode(self.codec, 'replace')))
# Update book producer
if getattr(mi, 'book_producer', False):
update_exth_record((108, mi.book_producer.encode(self.codec, 'replace')))
# Set langcode in EXTH header
if not mi.is_null('language'):
lang = canonicalize_lang(mi.language)
lang = lang_as_iso639_1(lang) or lang
if lang:
update_exth_record((524, lang.encode(self.codec, 'replace')))
# Include remaining original EXTH fields
for id in sorted(self.original_exth_records):
recs.append((id, self.original_exth_records[id]))
recs = sorted(recs, key=lambda x:(x[0],x[0]))
exth = io.BytesIO()
for code, data in recs:
exth.write(pack('>II', code, len(data) + 8))
exth.write(data)
exth = exth.getvalue()
trail = len(exth) % 4
pad = b'\0' * (4 - trail) # Always pad w/ at least 1 byte
exth = [b'EXTH', pack('>II', len(exth) + 12, len(recs)), exth, pad]
exth = b''.join(exth)
if getattr(self, 'exth', None) is None:
raise MobiError('No existing EXTH record. Cannot update metadata.')
if not mi.is_null('language'):
self.record0[92:96] = iana2mobi(mi.language)
self.create_exth(exth=exth, new_title=mi.title)
# Fetch updated timestamp, cover_record, thumbnail_record
self.fetchEXTHFields()
if mi.cover_data[1] or mi.cover:
try:
data = mi.cover_data[1]
if not data:
with open(mi.cover, 'rb') as f:
data = f.read()
except:
pass
else:
if is_image(self.cover_record):
size = len(self.cover_record)
cover = rescale_image(data, size)
if len(cover) <= size:
cover += b'\0' * (size - len(cover))
self.cover_record[:] = cover
if is_image(self.thumbnail_record):
size = len(self.thumbnail_record)
thumbnail = rescale_image(data, size, dimen=MAX_THUMB_DIMEN)
if len(thumbnail) <= size:
thumbnail += b'\0' * (size - len(thumbnail))
self.thumbnail_record[:] = thumbnail
return
def set_metadata(stream, mi):
mu = MetadataUpdater(stream)
mu.update(mi)
return
def get_metadata(stream):
from calibre import CurrentDir
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.mobi.reader.headers import MetadataHeader
from calibre.ebooks.mobi.reader.mobi6 import MobiReader
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.img import save_cover_data_to
stream.seek(0)
try:
raw = stream.read(3)
except Exception:
raw = b''
stream.seek(0)
if raw == b'TPZ':
from calibre.ebooks.metadata.topaz import get_metadata
return get_metadata(stream)
from calibre.utils.logging import Log
log = Log()
try:
mi = MetaInformation(os.path.basename(stream.name), [_('Unknown')])
except:
mi = MetaInformation(_('Unknown'), [_('Unknown')])
mh = MetadataHeader(stream, log)
if mh.title and mh.title != _('Unknown'):
mi.title = mh.title
if mh.exth is not None:
if mh.exth.mi is not None:
mi = mh.exth.mi
else:
size = 1024**3
if hasattr(stream, 'seek') and hasattr(stream, 'tell'):
pos = stream.tell()
stream.seek(0, 2)
size = stream.tell()
stream.seek(pos)
if size < 4*1024*1024:
with TemporaryDirectory('_mobi_meta_reader') as tdir:
with CurrentDir(tdir):
mr = MobiReader(stream, log)
parse_cache = {}
mr.extract_content(tdir, parse_cache)
if mr.embedded_mi is not None:
mi = mr.embedded_mi
if hasattr(mh.exth, 'cover_offset'):
cover_index = mh.first_image_index + mh.exth.cover_offset
data = mh.section_data(int(cover_index))
else:
try:
data = mh.section_data(mh.first_image_index)
except Exception:
data = b''
if data and what(None, data) in {'jpg', 'jpeg', 'gif', 'png', 'bmp', 'webp'}:
try:
mi.cover_data = ('jpg', save_cover_data_to(data))
except Exception:
log.exception('Failed to read MOBI cover')
return mi
| 20,450 | Python | .py | 461 | 33.650759 | 118 | 0.57696 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,528 | odt.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/odt.py | #!/usr/bin/env python
#
# Copyright (C) 2006 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
import io
import json
import os
import re
from lxml.etree import fromstring, tostring
from odf.draw import Frame as odFrame
from odf.draw import Image as odImage
from odf.namespaces import DCNS, METANS, OFFICENS
from odf.opendocument import load as odLoad
from calibre.ebooks.metadata import MetaInformation, authors_to_string, check_isbn, string_to_authors
from calibre.utils.date import isoformat, parse_date
from calibre.utils.imghdr import identify
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from calibre.utils.zipfile import ZipFile, safe_replace
from polyglot.builtins import as_unicode
fields = {
'title': (DCNS, 'title'),
'description': (DCNS, 'description'),
'subject': (DCNS, 'subject'),
'creator': (DCNS, 'creator'),
'date': (DCNS, 'date'),
'language': (DCNS, 'language'),
'generator': (METANS, 'generator'),
'initial-creator': (METANS, 'initial-creator'),
'keyword': (METANS, 'keyword'),
'keywords': (METANS, 'keywords'),
'editing-duration': (METANS, 'editing-duration'),
'editing-cycles': (METANS, 'editing-cycles'),
'printed-by': (METANS, 'printed-by'),
'print-date': (METANS, 'print-date'),
'creation-date': (METANS, 'creation-date'),
'user-defined': (METANS, 'user-defined'),
# 'template': (METANS, 'template'),
}
def uniq(vals):
''' Remove all duplicates from vals, while preserving order. '''
vals = vals or ()
seen = set()
seen_add = seen.add
return list(x for x in vals if x not in seen and not seen_add(x))
def get_metadata(stream, extract_cover=True):
whitespace = re.compile(r'\s+')
def normalize(s):
return whitespace.sub(' ', s).strip()
with ZipFile(stream) as zf:
meta = zf.read('meta.xml')
root = fromstring(meta)
def find(field):
ns, tag = fields[field]
ans = root.xpath(f'//ns0:{tag}', namespaces={'ns0': ns})
if ans:
return normalize(tostring(ans[0], method='text', encoding='unicode', with_tail=False)).strip()
def find_all(field):
ns, tag = fields[field]
for x in root.xpath(f'//ns0:{tag}', namespaces={'ns0': ns}):
yield normalize(tostring(x, method='text', encoding='unicode', with_tail=False)).strip()
mi = MetaInformation(None, [])
title = find('title')
if title:
mi.title = title
creator = find('initial-creator') or find('creator')
if creator:
mi.authors = string_to_authors(creator)
desc = find('description')
if desc:
mi.comments = desc
lang = find('language')
if lang and canonicalize_lang(lang):
mi.languages = [canonicalize_lang(lang)]
keywords = []
for q in ('keyword', 'keywords'):
for kw in find_all(q):
keywords += [x.strip() for x in kw.split(',') if x.strip()]
mi.tags = uniq(keywords)
data = {}
for tag in root.xpath('//ns0:user-defined', namespaces={'ns0': fields['user-defined'][0]}):
name = (tag.get('{%s}name' % METANS) or '').lower()
vtype = tag.get('{%s}value-type' % METANS) or 'string'
val = tag.text
if name and val:
if vtype == 'boolean':
val = val == 'true'
data[name] = val
opfmeta = False # we need this later for the cover
opfnocover = False
if data.get('opf.metadata'):
# custom metadata contains OPF information
opfmeta = True
if data.get('opf.titlesort', ''):
mi.title_sort = data['opf.titlesort']
if data.get('opf.authors', ''):
mi.authors = string_to_authors(data['opf.authors'])
if data.get('opf.authorsort', ''):
mi.author_sort = data['opf.authorsort']
if data.get('opf.isbn', ''):
isbn = check_isbn(data['opf.isbn'])
if isbn is not None:
mi.isbn = isbn
if data.get('opf.publisher', ''):
mi.publisher = data['opf.publisher']
if data.get('opf.pubdate', ''):
mi.pubdate = parse_date(data['opf.pubdate'], assume_utc=True)
if data.get('opf.identifiers'):
try:
mi.identifiers = json.loads(data['opf.identifiers'])
except Exception:
pass
if data.get('opf.rating'):
try:
mi.rating = max(0, min(float(data['opf.rating']), 10))
except Exception:
pass
if data.get('opf.series', ''):
mi.series = data['opf.series']
if data.get('opf.seriesindex', ''):
try:
mi.series_index = float(data['opf.seriesindex'])
except Exception:
mi.series_index = 1.0
if data.get('opf.language', ''):
cl = canonicalize_lang(data['opf.language'])
if cl:
mi.languages = [cl]
opfnocover = data.get('opf.nocover', False)
if not opfnocover:
try:
read_cover(stream, zf, mi, opfmeta, extract_cover)
except Exception:
pass # Do not let an error reading the cover prevent reading other data
return mi
def set_metadata(stream, mi):
with ZipFile(stream) as zf:
raw = _set_metadata(zf.open('meta.xml').read(), mi)
# print(raw.decode('utf-8'))
stream.seek(os.SEEK_SET)
safe_replace(stream, "meta.xml", io.BytesIO(raw))
def _set_metadata(raw, mi):
root = fromstring(raw)
namespaces = {'office': OFFICENS, 'meta': METANS, 'dc': DCNS}
nsrmap = {v: k for k, v in namespaces.items()}
def xpath(expr, parent=root):
return parent.xpath(expr, namespaces=namespaces)
def remove(*tag_names):
for tag_name in tag_names:
ns = fields[tag_name][0]
tag_name = f'{nsrmap[ns]}:{tag_name}'
for x in xpath('descendant::' + tag_name, meta):
x.getparent().remove(x)
def add(tag, val=None):
ans = meta.makeelement('{%s}%s' % fields[tag])
ans.text = val
meta.append(ans)
return ans
def remove_user_metadata(*names):
for x in xpath('//meta:user-defined'):
q = (x.get('{%s}name' % METANS) or '').lower()
if q in names:
x.getparent().remove(x)
def add_um(name, val, vtype='string'):
ans = add('user-defined', val)
ans.set('{%s}value-type' % METANS, vtype)
ans.set('{%s}name' % METANS, name)
def add_user_metadata(name, val):
if not hasattr(add_user_metadata, 'sentinel_added'):
add_user_metadata.sentinel_added = True
remove_user_metadata('opf.metadata')
add_um('opf.metadata', 'true', 'boolean')
val_type = 'string'
if hasattr(val, 'strftime'):
val = isoformat(val, as_utc=True).split('T')[0]
val_type = 'date'
add_um(name, val, val_type)
meta = xpath('//office:meta')[0]
if not mi.is_null('title'):
remove('title')
add('title', mi.title)
if not mi.is_null('title_sort'):
remove_user_metadata('opf.titlesort')
add_user_metadata('opf.titlesort', mi.title_sort)
if not mi.is_null('authors'):
remove('initial-creator', 'creator')
val = authors_to_string(mi.authors)
add('initial-creator', val), add('creator', val)
remove_user_metadata('opf.authors')
add_user_metadata('opf.authors', val)
if not mi.is_null('author_sort'):
remove_user_metadata('opf.authorsort')
add_user_metadata('opf.authorsort', mi.author_sort)
if not mi.is_null('comments'):
remove('description')
add('description', mi.comments)
if not mi.is_null('tags'):
remove('keyword')
add('keyword', ', '.join(mi.tags))
if not mi.is_null('languages'):
lang = lang_as_iso639_1(mi.languages[0])
if lang:
remove('language')
add('language', lang)
if not mi.is_null('pubdate'):
remove_user_metadata('opf.pubdate')
add_user_metadata('opf.pubdate', mi.pubdate)
if not mi.is_null('publisher'):
remove_user_metadata('opf.publisher')
add_user_metadata('opf.publisher', mi.publisher)
if not mi.is_null('series'):
remove_user_metadata('opf.series', 'opf.seriesindex')
add_user_metadata('opf.series', mi.series)
add_user_metadata('opf.seriesindex', f'{mi.series_index}')
if not mi.is_null('identifiers'):
remove_user_metadata('opf.identifiers')
add_user_metadata('opf.identifiers', as_unicode(json.dumps(mi.identifiers)))
if not mi.is_null('rating'):
remove_user_metadata('opf.rating')
add_user_metadata('opf.rating', '%.2g' % mi.rating)
return tostring(root, encoding='utf-8', pretty_print=True)
def read_cover(stream, zin, mi, opfmeta, extract_cover):
# search for an draw:image in a draw:frame with the name 'opf.cover'
# if opf.metadata prop is false, just use the first image that
# has a proper size (borrowed from docx)
otext = odLoad(stream)
cover_href = None
cover_data = None
cover_frame = None
imgnum = 0
for frm in otext.topnode.getElementsByType(odFrame):
img = frm.getElementsByType(odImage)
if len(img) == 0:
continue
i_href = img[0].getAttribute('href')
try:
raw = zin.read(i_href)
except KeyError:
continue
try:
fmt, width, height = identify(raw)
except Exception:
continue
imgnum += 1
if opfmeta and frm.getAttribute('name').lower() == 'opf.cover':
cover_href = i_href
cover_data = (fmt, raw)
cover_frame = frm.getAttribute('name') # could have upper case
break
if cover_href is None and imgnum == 1 and 0.8 <= height/width <= 1.8 and height*width >= 12000:
# Pick the first image as the cover if it is of a suitable size
cover_href = i_href
cover_data = (fmt, raw)
if not opfmeta:
break
if cover_href is not None:
mi.cover = cover_href
mi.odf_cover_frame = cover_frame
if extract_cover:
if not cover_data:
raw = zin.read(cover_href)
try:
fmt = identify(raw)[0]
except Exception:
pass
else:
cover_data = (fmt, raw)
mi.cover_data = cover_data
| 11,756 | Python | .py | 282 | 32.287234 | 110 | 0.576698 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,529 | pdb.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/pdb.py | '''
Read meta information from pdb files.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import re
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.metadata.ereader import get_metadata as get_eReader
from calibre.ebooks.metadata.haodoo import get_metadata as get_Haodoo
from calibre.ebooks.metadata.plucker import get_metadata as get_plucker
from calibre.ebooks.pdb.header import PdbHeaderReader
MREADER = {
'PNPdPPrs' : get_eReader,
'PNRdPPrs' : get_eReader,
'DataPlkr' : get_plucker,
'BOOKMTIT' : get_Haodoo,
'BOOKMTIU' : get_Haodoo,
}
from calibre.ebooks.metadata.ereader import set_metadata as set_eReader
MWRITER = {
'PNPdPPrs' : set_eReader,
'PNRdPPrs' : set_eReader,
}
def get_metadata(stream, extract_cover=True):
"""
Return metadata as a L{MetaInfo} object
"""
pheader = PdbHeaderReader(stream)
MetadataReader = MREADER.get(pheader.ident, None)
if MetadataReader is None:
t = pheader.title
if isinstance(t, bytes):
t = t.decode('utf-8', 'replace')
return MetaInformation(t, [_('Unknown')])
return MetadataReader(stream, extract_cover)
def set_metadata(stream, mi):
stream.seek(0)
pheader = PdbHeaderReader(stream)
MetadataWriter = MWRITER.get(pheader.ident, None)
if MetadataWriter:
MetadataWriter(stream, mi)
stream.seek(0)
stream.write(re.sub('[^-A-Za-z0-9 ]+', '_', mi.title).ljust(31, '\x00')[:31].encode('ascii', 'replace') + b'\x00')
| 1,595 | Python | .py | 44 | 32.022727 | 118 | 0.705806 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,530 | xisbn.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/xisbn.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import json
import re
import threading
from calibre import browser
class xISBN:
'''
This class is used to find the ISBN numbers of "related" editions of a
book, given its ISBN. Useful when querying services for metadata by ISBN,
in case they do not have the ISBN for the particular edition.
'''
QUERY = 'http://xisbn.worldcat.org/webservices/xid/isbn/%s?method=getEditions&format=json&fl=form,year,lang,ed'
def __init__(self):
self.lock = threading.RLock()
self._data = []
self._map = {}
self.isbn_pat = re.compile(r'[^0-9X]', re.IGNORECASE)
def purify(self, isbn):
return self.isbn_pat.sub('', isbn.upper())
def fetch_data(self, isbn):
# xisbn service has been de-comissioned see
# https://www.oclc.org/developer/news/2018/xid-decommission.en.html
return []
url = self.QUERY%isbn
data = browser().open_novisit(url).read()
data = json.loads(data)
if data.get('stat', None) != 'ok':
return []
data = data.get('list', [])
ans = []
for rec in data:
forms = rec.get('form', [])
# Only get books, not audio/video
forms = [x for x in forms if x in ('BA', 'BC', 'BB', 'DA')]
if forms:
ans.append(rec)
return ans
def isbns_in_data(self, data):
for rec in data:
yield from rec.get('isbn', [])
def get_data(self, isbn):
isbn = self.purify(isbn)
with self.lock:
if isbn not in self._map:
try:
data = self.fetch_data(isbn)
except:
import traceback
traceback.print_exc()
data = []
id_ = len(self._data)
self._data.append(data)
for i in self.isbns_in_data(data):
self._map[i] = id_
self._map[isbn] = id_
return self._data[self._map[isbn]]
def get_associated_isbns(self, isbn):
data = self.get_data(isbn)
ans = set()
for rec in data:
for i in rec.get('isbn', []):
ans.add(i)
return ans
def get_isbn_pool(self, isbn):
data = self.get_data(isbn)
raw = tuple(x.get('isbn') for x in data if 'isbn' in x)
isbns = []
for x in raw:
isbns += x
isbns = frozenset(isbns)
min_year = 100000
for x in data:
try:
year = int(x['year'])
if year < min_year:
min_year = year
except:
continue
if min_year == 100000:
min_year = None
return isbns, min_year
xisbn = xISBN()
if __name__ == '__main__':
import pprint
import sys
isbn = sys.argv[-1]
print(pprint.pprint(xisbn.get_data(isbn)))
print()
print(xisbn.get_associated_isbns(isbn))
| 3,147 | Python | .py | 92 | 24.478261 | 115 | 0.532279 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,531 | docx.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/docx.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from io import BytesIO
from calibre.ebooks.docx.container import DOCX
from calibre.ebooks.docx.writer.container import update_doc_props, xml2str
from calibre.utils.imghdr import identify
from calibre.utils.xml_parse import safe_xml_fromstring
def get_cover(docx):
doc = docx.document
get = docx.namespace.get
images = docx.namespace.XPath(
'//*[name()="w:drawing" or name()="w:pict"]/descendant::*[(name()="a:blip" and @r:embed) or (name()="v:imagedata" and @r:id)][1]')
rid_map = docx.document_relationships[0]
for image in images(doc):
rid = get(image, 'r:embed') or get(image, 'r:id')
if rid in rid_map:
try:
raw = docx.read(rid_map[rid])
fmt, width, height = identify(raw)
except Exception:
continue
if width < 0 or height < 0:
continue
if 0.8 <= height/width <= 1.8 and height*width >= 160000:
return (fmt, raw)
def get_metadata(stream):
c = DOCX(stream, extract=False)
mi = c.metadata
try:
cdata = get_cover(c)
except Exception:
cdata = None
import traceback
traceback.print_exc()
c.close()
stream.seek(0)
if cdata is not None:
mi.cover_data = cdata
return mi
def set_metadata(stream, mi):
from calibre.utils.zipfile import safe_replace
c = DOCX(stream, extract=False)
dp_name, ap_name = c.get_document_properties_names()
dp_raw = c.read(dp_name)
try:
ap_raw = c.read(ap_name)
except Exception:
ap_raw = None
cp = safe_xml_fromstring(dp_raw)
update_doc_props(cp, mi, c.namespace)
replacements = {}
if ap_raw is not None:
ap = safe_xml_fromstring(ap_raw)
comp = ap.makeelement('{%s}Company' % c.namespace.namespaces['ep'])
for child in tuple(ap):
if child.tag == comp.tag:
ap.remove(child)
comp.text = mi.publisher
ap.append(comp)
replacements[ap_name] = BytesIO(xml2str(ap))
stream.seek(0)
safe_replace(stream, dp_name, BytesIO(xml2str(cp)), extra_replacements=replacements)
if __name__ == '__main__':
import sys
with open(sys.argv[-1], 'rb') as stream:
print(get_metadata(stream))
| 2,448 | Python | .py | 68 | 28.926471 | 138 | 0.618504 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,532 | test_author_sort.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/test_author_sort.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2021, Kovid Goyal <kovid at kovidgoyal.net>
import unittest
from calibre.ebooks.metadata import author_to_author_sort, remove_bracketed_text
from calibre.utils.config_base import Tweak, tweaks
class TestRemoveBracketedText(unittest.TestCase):
def test_brackets(self):
self.assertEqual(remove_bracketed_text('a[b]c(d)e{f}g<h>i'), 'aceg<h>i')
def test_nested(self):
self.assertEqual(remove_bracketed_text('a[[b]c(d)e{f}]g(h(i)j[k]l{m})n{{{o}}}p'), 'agnp')
def test_mismatched(self):
self.assertEqual(remove_bracketed_text('a[b(c]d)e'), 'ae')
self.assertEqual(remove_bracketed_text('a{b(c}d)e'), 'ae')
def test_extra_closed(self):
self.assertEqual(remove_bracketed_text('a]b}c)d'), 'abcd')
self.assertEqual(remove_bracketed_text('a[b]c]d(e)f{g)h}i}j)k]l'), 'acdfijkl')
def test_unclosed(self):
self.assertEqual(remove_bracketed_text('a]b[c'), 'ab')
self.assertEqual(remove_bracketed_text('a(b[c]d{e}f'), 'a')
self.assertEqual(remove_bracketed_text('a{b}c{d[e]f(g)h'), 'ac')
class TestAuthorToAuthorSort(unittest.TestCase):
def check_all_methods(self, name, invert=None, comma=None,
nocomma=None, copy=None):
methods = ('invert', 'copy', 'comma', 'nocomma')
if invert is None:
invert = name
if comma is None:
comma = invert
if nocomma is None:
nocomma = comma
if copy is None:
copy = name
results = (invert, copy, comma, nocomma)
for method, result in zip(methods, results):
self.assertEqual(author_to_author_sort(name, method), result)
def test_single(self):
self.check_all_methods('Aristotle')
def test_all_prefix(self):
self.check_all_methods('Mr. Dr Prof.')
def test_all_suffix(self):
self.check_all_methods('Senior Inc')
def test_copywords(self):
self.check_all_methods('Don "Team" Smith',
invert='Smith, Don "Team"',
nocomma='Smith Don "Team"')
self.check_all_methods('Don Team Smith')
def test_national(self):
c = tweaks['author_name_copywords']
try:
# Assume that 'author_name_copywords' is a common sequence type
i = c.index('National')
except ValueError:
# If "National" not found, check first without, then temporarily add
self.check_all_methods('National Lampoon',
invert='Lampoon, National',
nocomma='Lampoon National')
t = type(c)
with Tweak('author_name_copywords', c + t(['National'])):
self.check_all_methods('National Lampoon')
else:
# If "National" found, check with, then temporarily remove
self.check_all_methods('National Lampoon')
with Tweak('author_name_copywords', c[:i] + c[i + 1:]):
self.check_all_methods('National Lampoon',
invert='Lampoon, National',
nocomma='Lampoon National')
def test_method(self):
self.check_all_methods('Jane Doe',
invert='Doe, Jane',
nocomma='Doe Jane')
def test_invalid_methos(self):
# Invalid string defaults to invert
name = 'Jane, Q. van Doe[ed] Jr.'
self.assertEqual(author_to_author_sort(name, 'invert'),
author_to_author_sort(name, '__unknown__!(*T^U$'))
def test_prefix_suffix(self):
self.check_all_methods('Mrs. Jane Q. Doe III',
invert='Doe, Jane Q. III',
nocomma='Doe Jane Q. III')
def test_surname_prefix(self):
with Tweak('author_use_surname_prefixes', True):
self.check_all_methods('Leonardo Da Vinci',
invert='Da Vinci, Leonardo',
nocomma='Da Vinci Leonardo')
self.check_all_methods('Van Gogh')
self.check_all_methods('Van')
with Tweak('author_use_surname_prefixes', False):
self.check_all_methods('Leonardo Da Vinci',
invert='Vinci, Leonardo Da',
nocomma='Vinci Leonardo Da')
self.check_all_methods('Van Gogh',
invert='Gogh, Van',
nocomma='Gogh Van')
def test_comma(self):
self.check_all_methods('James Wesley, Rawles',
invert='Rawles, James Wesley,',
comma='James Wesley, Rawles',
nocomma='Rawles James Wesley,')
def test_brackets(self):
self.check_all_methods('Seventh Author [7]',
invert='Author, Seventh',
nocomma='Author Seventh')
self.check_all_methods('John [x]von Neumann (III)',
invert='Neumann, John von',
nocomma='Neumann John von')
def test_falsy(self):
self.check_all_methods('')
self.check_all_methods(None, '', '', '', '')
self.check_all_methods([], '', '', '', '')
def find_tests():
ans = unittest.defaultTestLoader.loadTestsFromTestCase(TestRemoveBracketedText)
ans.addTests(unittest.defaultTestLoader.loadTestsFromTestCase(TestAuthorToAuthorSort))
return ans
| 5,706 | Python | .py | 113 | 36.106195 | 97 | 0.552083 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,533 | rtf.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/rtf.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net>
"""
Edit metadata in RTF files.
"""
import codecs
import re
from calibre import force_unicode
from calibre.ebooks.metadata import MetaInformation
from polyglot.builtins import codepoint_to_chr, int_to_byte, string_or_bytes
title_pat = re.compile(br'\{\\info.*?\{\\title(.*?)(?<!\\)\}', re.DOTALL)
author_pat = re.compile(br'\{\\info.*?\{\\author(.*?)(?<!\\)\}', re.DOTALL)
comment_pat = re.compile(br'\{\\info.*?\{\\subject(.*?)(?<!\\)\}', re.DOTALL)
tags_pat = re.compile(br'\{\\info.*?\{\\category(.*?)(?<!\\)\}', re.DOTALL)
publisher_pat = re.compile(br'\{\\info.*?\{\\manager(.*?)(?<!\\)\}', re.DOTALL)
def get_document_info(stream):
"""
Extract the \\info block from an RTF file.
Return the info block as a string and the position in the file at which it
starts.
@param stream: File like object pointing to the RTF file.
"""
block_size = 4096
stream.seek(0)
found, block = False, b""
while not found:
prefix = block[-6:]
block = prefix + stream.read(block_size)
actual_block_size = len(block) - len(prefix)
if len(block) == len(prefix):
break
idx = block.find(br'{\info')
if idx >= 0:
found = True
pos = stream.tell() - actual_block_size + idx - len(prefix)
stream.seek(pos)
else:
if block.find(br'\sect') > -1:
break
if not found:
return None, 0
data, count, = [], 0
pos = stream.tell()
while True:
ch = stream.read(1)
if ch == b'\\':
data.append(ch + stream.read(1))
continue
if ch == b'{':
count += 1
elif ch == b'}':
count -= 1
data.append(ch)
if count == 0:
break
return b''.join(data), pos
def detect_codepage(stream):
pat = re.compile(br'\\ansicpg(\d+)')
match = pat.search(stream.read(512))
if match is not None:
num = match.group(1)
if num == b'0':
num = b'1252'
try:
codec = (b'cp'+num).decode('ascii')
codecs.lookup(codec)
return codec
except Exception:
pass
def encode(unistr):
if not isinstance(unistr, str):
unistr = force_unicode(unistr)
return ''.join(c if ord(c) < 128 else f'\\u{ord(c)}?' for c in unistr)
def decode(raw, codec):
# https://en.wikipedia.org/wiki/Rich_Text_Format#Character_encoding
def codepage(match):
try:
return int_to_byte(int(match.group(1), 16)).decode(codec)
except ValueError:
return '?'
def uni(match):
try:
return codepoint_to_chr(int(match.group(1)))
except Exception:
return '?'
if isinstance(raw, bytes):
raw = raw.decode('ascii', 'replace')
if codec is not None:
raw = re.sub(r"\\'([a-fA-F0-9]{2})", codepage, raw)
raw = re.sub(r'\\u([0-9]{3,5}).', uni, raw)
return raw
def get_metadata(stream):
"""
Return metadata as a L{MetaInfo} object
"""
stream.seek(0)
if stream.read(5) != br'{\rtf':
return MetaInformation(_('Unknown'))
block = get_document_info(stream)[0]
if not block:
return MetaInformation(_('Unknown'))
stream.seek(0)
cpg = detect_codepage(stream)
stream.seek(0)
title_match = title_pat.search(block)
if title_match is not None:
title = decode(title_match.group(1).strip(), cpg)
else:
title = _('Unknown')
author_match = author_pat.search(block)
if author_match is not None:
author = decode(author_match.group(1).strip(), cpg)
else:
author = None
mi = MetaInformation(title)
if author:
mi.authors = [x.strip() for x in author.split(',')]
comment_match = comment_pat.search(block)
if comment_match is not None:
comment = decode(comment_match.group(1).strip(), cpg)
mi.comments = comment
tags_match = tags_pat.search(block)
if tags_match is not None:
tags = decode(tags_match.group(1).strip(), cpg)
mi.tags = list(filter(None, (x.strip() for x in tags.split(','))))
publisher_match = publisher_pat.search(block)
if publisher_match is not None:
publisher = decode(publisher_match.group(1).strip(), cpg)
mi.publisher = publisher
return mi
def create_metadata(stream, options):
md = [r'{\info']
if options.title:
title = encode(options.title)
md.append(r'{\title %s}'%(title,))
if options.authors:
au = options.authors
if not isinstance(au, string_or_bytes):
au = ', '.join(au)
author = encode(au)
md.append(r'{\author %s}'%(author,))
comp = options.comment if hasattr(options, 'comment') else options.comments
if comp:
comment = encode(comp)
md.append(r'{\subject %s}'%(comment,))
if options.publisher:
publisher = encode(options.publisher)
md.append(r'{\manager %s}'%(publisher,))
if options.tags:
tags = ', '.join(options.tags)
tags = encode(tags)
md.append(r'{\category %s}'%(tags,))
if len(md) > 1:
md.append('}')
stream.seek(0)
src = stream.read()
ans = src[:6] + ''.join(md).encode('ascii') + src[6:]
stream.seek(0)
stream.write(ans)
def set_metadata(stream, options):
'''
Modify/add RTF metadata in stream
@param options: Object with metadata attributes title, author, comment, category
'''
def add_metadata_item(src, name, val):
index = src.rindex('}')
return src[:index] + r'{\ '[:-1] + name + ' ' + val + '}}'
src, pos = get_document_info(stream)
if src is None:
create_metadata(stream, options)
else:
src = src.decode('ascii')
olen = len(src)
base_pat = r'\{\\name(.*?)(?<!\\)\}'
def replace_or_create(src, name, val):
val = encode(val)
pat = re.compile(base_pat.replace('name', name), re.DOTALL)
src, num = pat.subn(r'{\\' + name.replace('\\', r'\\') + ' ' + val.replace('\\', r'\\') + '}', src)
if num == 0:
src = add_metadata_item(src, name, val)
return src
if options.title is not None:
src = replace_or_create(src, 'title', options.title)
if options.comments is not None:
src = replace_or_create(src, 'subject', options.comments)
if options.authors is not None:
src = replace_or_create(src, 'author', ', '.join(options.authors))
if options.tags is not None:
src = replace_or_create(src, 'category', ', '.join(options.tags))
if options.publisher is not None:
src = replace_or_create(src, 'manager', options.publisher)
stream.seek(pos + olen)
after = stream.read()
stream.seek(pos)
stream.truncate()
stream.write(src.encode('ascii'))
stream.write(after)
def find_tests():
import unittest
from io import BytesIO
from calibre.ebooks.metadata.book.base import Metadata
class Test(unittest.TestCase):
def test_rtf_metadata(self):
stream = BytesIO(br'{\rtf1\ansi\ansicpg1252}')
m = Metadata('Test ø̄title', ['Author One', 'Author БTwo'])
m.tags = 'tag1 見tag2'.split()
m.comments = '<p>some ⊹comments</p>'
m.publisher = 'publiSher'
set_metadata(stream, m)
stream.seek(0)
o = get_metadata(stream)
for attr in 'title authors publisher comments tags'.split():
self.assertEqual(getattr(m, attr), getattr(o, attr))
return unittest.defaultTestLoader.loadTestsFromTestCase(Test)
| 7,904 | Python | .py | 214 | 29.11215 | 111 | 0.578996 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,534 | snb.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/snb.py | '''Read meta information from SNB files'''
__license__ = 'GPL v3'
__copyright__ = '2010, Li Fanxi <lifanxi@freemindworld.com>'
import io
import os
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.snb.snbfile import SNBFile
from calibre.utils.xml_parse import safe_xml_fromstring
def get_metadata(stream, extract_cover=True):
""" Return metadata as a L{MetaInfo} object """
mi = MetaInformation(_('Unknown'), [_('Unknown')])
snbFile = SNBFile()
try:
if not hasattr(stream, 'write'):
snbFile.Parse(io.BytesIO(stream), True)
else:
stream.seek(0)
snbFile.Parse(stream, True)
meta = snbFile.GetFileStream('snbf/book.snbf')
if meta is not None:
meta = safe_xml_fromstring(meta)
mi.title = meta.find('.//head/name').text
mi.authors = [meta.find('.//head/author').text]
mi.language = meta.find('.//head/language').text.lower().replace('_', '-')
mi.publisher = meta.find('.//head/publisher').text
if extract_cover:
cover = meta.find('.//head/cover')
if cover is not None and cover.text is not None:
root, ext = os.path.splitext(cover.text)
if ext == '.jpeg':
ext = '.jpg'
mi.cover_data = (ext[-3:], snbFile.GetFileStream('snbc/images/' + cover.text))
except Exception:
import traceback
traceback.print_exc()
return mi
| 1,542 | Python | .py | 36 | 33.277778 | 98 | 0.592369 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,535 | search_internet.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/search_internet.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from polyglot.builtins import iteritems
from polyglot.urllib import quote, quote_plus
AUTHOR_SEARCHES = {
'goodreads':
'https://www.goodreads.com/book/author/{author}',
'wikipedia':
'https://en.wikipedia.org/w/index.php?search={author}',
'google':
'https://www.google.com/search?tbm=bks&q=inauthor:%22{author}%22',
'amzn':
'https://www.amazon.com/gp/search/ref=sr_adv_b/?search-alias=stripbooks&unfiltered=1&field-author={author}&sort=relevanceexprank'
}
BOOK_SEARCHES = {
'goodreads':
'https://www.goodreads.com/search?q={author}+{title}&search%5Bsource%5D=goodreads&search_type=books&tab=books',
'google':
'https://www.google.com/search?tbm=bks&q=inauthor:%22{author}%22+intitle:%22{title}%22',
'gws':
'https://www.google.com/search?q=inauthor:%22{author}%22+intitle:%22{title}%22',
'amzn':
'https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Dstripbooks&field-keywords={author}+{title}',
'gimg':
'https://www.google.com/images?q=%22{author}%22+%22{title}%22',
}
NAMES = {
'goodreads': _('Goodreads'),
'google': _('Google Books'),
'wikipedia': _('Wikipedia'),
'gws': _('Google web search'),
'amzn': _('Amazon'),
'gimg': _('Google Images'),
}
DEFAULT_AUTHOR_SOURCE = 'goodreads'
assert DEFAULT_AUTHOR_SOURCE in AUTHOR_SEARCHES
name_for = NAMES.get
all_book_searches = BOOK_SEARCHES.__iter__
all_author_searches = AUTHOR_SEARCHES.__iter__
def qquote(val, use_plus=True):
if not isinstance(val, bytes):
val = val.encode('utf-8', 'replace')
ans = quote_plus(val) if use_plus else quote(val)
if isinstance(ans, bytes):
ans = ans.decode('utf-8')
return ans
def specialised_quote(template, val):
return qquote(val, 'goodreads.com' not in template)
def url_for(template, data):
return template.format(**{k: specialised_quote(template, v) for k, v in iteritems(data)})
def url_for_author_search(key, **kw):
return url_for(AUTHOR_SEARCHES[key], kw)
def url_for_book_search(key, **kw):
return url_for(BOOK_SEARCHES[key], kw)
| 2,178 | Python | .py | 54 | 36.351852 | 133 | 0.687708 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,536 | epub.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/epub.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''Read meta information from epub files'''
import io
import os
import posixpath
from contextlib import closing, suppress
from calibre import CurrentDir
from calibre.ebooks.metadata.opf import get_metadata as get_metadata_from_opf
from calibre.ebooks.metadata.opf import set_metadata as set_metadata_opf
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.imghdr import what as what_image_type
from calibre.utils.localunzip import LocalZipFile
from calibre.utils.xml_parse import safe_xml_fromstring
from calibre.utils.zipfile import BadZipfile, ZipFile, safe_replace
class EPubException(Exception):
pass
class OCFException(EPubException):
pass
class ContainerException(OCFException):
pass
class Container(dict):
def __init__(self, stream=None, file_exists=None):
if not stream:
return
container = safe_xml_fromstring(stream.read())
if container.get('version', None) != '1.0':
raise EPubException("unsupported version of OCF")
rootfiles = container.xpath('./*[local-name()="rootfiles"]')
if not rootfiles:
raise EPubException("<rootfiles/> element missing")
for rootfile in rootfiles[0].xpath('./*[local-name()="rootfile"]'):
mt, fp = rootfile.get('media-type'), rootfile.get('full-path')
if not mt or not fp:
raise EPubException("<rootfile/> element malformed")
if file_exists and not file_exists(fp):
# Some Kobo epubs have multiple rootfile entries, but only one
# exists. Ignore the ones that don't exist.
continue
self[mt] = fp
class OCF:
MIMETYPE = 'application/epub+zip'
CONTAINER_PATH = 'META-INF/container.xml'
ENCRYPTION_PATH = 'META-INF/encryption.xml'
def __init__(self):
raise NotImplementedError('Abstract base class')
class Encryption:
OBFUSCATION_ALGORITHMS = frozenset(['http://ns.adobe.com/pdf/enc#RC',
'http://www.idpf.org/2008/embedding'])
def __init__(self, raw):
self.root = safe_xml_fromstring(raw) if raw else None
self.entries = {}
if self.root is not None:
for em in self.root.xpath('descendant::*[contains(name(), "EncryptionMethod")]'):
algorithm = em.get('Algorithm', '')
cr = em.getparent().xpath('descendant::*[contains(name(), "CipherReference")]')
if cr:
uri = cr[0].get('URI', '')
if uri and algorithm:
self.entries[uri] = algorithm
def is_encrypted(self, uri):
algo = self.entries.get(uri, None)
return algo is not None and algo not in self.OBFUSCATION_ALGORITHMS
class OCFReader(OCF):
def __init__(self):
try:
mimetype = self.read_bytes('mimetype').decode('utf-8').rstrip()
if mimetype != OCF.MIMETYPE:
print('WARNING: Invalid mimetype declaration', mimetype)
except:
print('WARNING: Epub doesn\'t contain a valid mimetype declaration')
try:
with closing(self.open(OCF.CONTAINER_PATH)) as f:
self.container = Container(f, self.exists)
except KeyError:
raise EPubException("missing OCF container.xml file")
self.opf_path = self.container[OPF.MIMETYPE]
if not self.opf_path:
raise EPubException("missing OPF package file entry in container")
self._opf_cached = self._encryption_meta_cached = None
@property
def opf(self):
if self._opf_cached is None:
try:
with closing(self.open(self.opf_path)) as f:
self._opf_cached = OPF(f, self.root, populate_spine=False)
except KeyError:
raise EPubException("missing OPF package file")
return self._opf_cached
@property
def encryption_meta(self):
if self._encryption_meta_cached is None:
try:
self._encryption_meta_cached = Encryption(self.read_bytes(self.ENCRYPTION_PATH))
except Exception:
self._encryption_meta_cached = Encryption(None)
return self._encryption_meta_cached
def read_bytes(self, name):
return self.open(name).read()
def exists(self, path):
try:
self.open(path).close()
return True
except OSError:
return False
class OCFZipReader(OCFReader):
def __init__(self, stream, mode='r', root=None):
if isinstance(stream, (LocalZipFile, ZipFile)):
self.archive = stream
else:
try:
self.archive = ZipFile(stream, mode=mode)
except BadZipfile:
raise EPubException("not a ZIP .epub OCF container")
self.root = root
if self.root is None:
name = getattr(stream, 'name', False)
if name:
self.root = os.path.abspath(os.path.dirname(name))
else:
self.root = os.getcwd()
super().__init__()
def open(self, name):
if isinstance(self.archive, LocalZipFile):
return self.archive.open(name)
return io.BytesIO(self.archive.read(name))
def read_bytes(self, name):
return self.archive.read(name)
def exists(self, path):
try:
self.archive.getinfo(path)
return True
except KeyError:
return False
def get_zip_reader(stream, root=None):
try:
zf = ZipFile(stream, mode='r')
except Exception:
stream.seek(0)
zf = LocalZipFile(stream)
return OCFZipReader(zf, root=root)
class OCFDirReader(OCFReader):
def __init__(self, path):
self.root = path
super().__init__()
def open(self, path):
return open(os.path.join(self.root, path), 'rb')
def read_bytes(self, path):
with self.open(path) as f:
return f.read()
def render_cover(cpage, zf, reader=None):
from calibre.ebooks import render_html_svg_workaround
from calibre.utils.logging import default_log
if not cpage:
return
if reader is not None and reader.encryption_meta.is_encrypted(cpage):
return
with TemporaryDirectory('_epub_meta') as tdir:
with CurrentDir(tdir):
zf.extractall()
cpage = os.path.join(tdir, cpage)
if not os.path.exists(cpage):
return
with suppress(Exception):
# In the case of manga, the first spine item may be an image
# already, so treat it as a raster cover.
file_format = what_image_type(cpage)
if file_format == "jpeg":
# Only JPEG is allowed since elsewhere we assume raster covers
# are JPEG. In principle we could convert other image formats
# but this is already an out-of-spec case that happens to
# arise in books from some stores.
with open(cpage, "rb") as source:
return source.read()
return render_html_svg_workaround(cpage, default_log, root=tdir)
def get_cover(raster_cover, first_spine_item, reader):
zf = reader.archive
if raster_cover:
if reader.encryption_meta.is_encrypted(raster_cover):
return
try:
return reader.read_bytes(raster_cover)
except Exception:
pass
return render_cover(first_spine_item, zf, reader=reader)
def get_metadata(stream, extract_cover=True):
""" Return metadata as a :class:`Metadata` object """
stream.seek(0)
reader = get_zip_reader(stream)
opfbytes = reader.read_bytes(reader.opf_path)
mi, ver, raster_cover, first_spine_item = get_metadata_from_opf(opfbytes)
if extract_cover:
base = posixpath.dirname(reader.opf_path)
if raster_cover:
raster_cover = posixpath.normpath(posixpath.join(base, raster_cover))
if first_spine_item:
first_spine_item = posixpath.normpath(posixpath.join(base, first_spine_item))
try:
cdata = get_cover(raster_cover, first_spine_item, reader)
if cdata is not None:
mi.cover_data = ('jpg', cdata)
except Exception:
import traceback
traceback.print_exc()
mi.timestamp = None
return mi
def get_quick_metadata(stream):
return get_metadata(stream, False)
def serialize_cover_data(new_cdata, cpath):
from calibre.utils.img import save_cover_data_to
return save_cover_data_to(new_cdata, data_fmt=os.path.splitext(cpath)[1][1:])
def set_metadata(stream, mi, apply_null=False, update_timestamp=False, force_identifiers=False, add_missing_cover=True):
stream.seek(0)
reader = get_zip_reader(stream, root=os.getcwd())
new_cdata = None
try:
new_cdata = mi.cover_data[1]
if not new_cdata:
raise Exception('no cover')
except Exception:
try:
with open(mi.cover, 'rb') as f:
new_cdata = f.read()
except Exception:
pass
opfbytes, ver, raster_cover = set_metadata_opf(
reader.read_bytes(reader.opf_path), mi, cover_prefix=posixpath.dirname(reader.opf_path),
cover_data=new_cdata, apply_null=apply_null, update_timestamp=update_timestamp,
force_identifiers=force_identifiers, add_missing_cover=add_missing_cover)
cpath = None
replacements = {}
if new_cdata and raster_cover:
try:
cpath = posixpath.join(posixpath.dirname(reader.opf_path),
raster_cover)
cover_replacable = not reader.encryption_meta.is_encrypted(cpath) and \
os.path.splitext(cpath)[1].lower() in ('.png', '.jpg', '.jpeg')
if cover_replacable:
replacements[cpath] = serialize_cover_data(new_cdata, cpath)
except Exception:
import traceback
traceback.print_exc()
if isinstance(reader.archive, LocalZipFile):
reader.archive.safe_replace(reader.container[OPF.MIMETYPE], opfbytes,
extra_replacements=replacements, add_missing=True)
else:
safe_replace(stream, reader.container[OPF.MIMETYPE], opfbytes,
extra_replacements=replacements, add_missing=True)
try:
if cpath is not None:
replacements[cpath].close()
os.remove(replacements[cpath].name)
except Exception:
pass
| 10,787 | Python | .py | 256 | 32.546875 | 120 | 0.624044 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,537 | edelweiss.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/edelweiss.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
import time
from threading import Thread
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
from calibre import as_unicode, random_user_agent
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.sources.base import Source
def clean_html(raw):
from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.cleantext import clean_ascii_chars
return clean_ascii_chars(xml_to_unicode(raw, strip_encoding_pats=True,
resolve_entities=True, assume_utf8=True)[0])
def parse_html(raw):
raw = clean_html(raw)
from html5_parser import parse
return parse(raw)
def astext(node):
from lxml import etree
return etree.tostring(node, method='text', encoding='unicode',
with_tail=False).strip()
class Worker(Thread): # {{{
def __init__(self, basic_data, relevance, result_queue, br, timeout, log, plugin):
Thread.__init__(self)
self.daemon = True
self.basic_data = basic_data
self.br, self.log, self.timeout = br, log, timeout
self.result_queue, self.plugin, self.sku = result_queue, plugin, self.basic_data['sku']
self.relevance = relevance
def run(self):
url = ('https://www.edelweiss.plus/GetTreelineControl.aspx?controlName=/uc/product/two_Enhanced.ascx&'
'sku={0}&idPrefix=content_1_{0}&mode=0'.format(self.sku))
try:
raw = self.br.open_novisit(url, timeout=self.timeout).read()
except:
self.log.exception('Failed to load comments page: %r'%url)
return
try:
mi = self.parse(raw)
mi.source_relevance = self.relevance
self.plugin.clean_downloaded_metadata(mi)
self.result_queue.put(mi)
except:
self.log.exception('Failed to parse details for sku: %s'%self.sku)
def parse(self, raw):
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.date import UNDEFINED_DATE
root = parse_html(raw)
mi = Metadata(self.basic_data['title'], self.basic_data['authors'])
# Identifiers
if self.basic_data['isbns']:
mi.isbn = self.basic_data['isbns'][0]
mi.set_identifier('edelweiss', self.sku)
# Tags
if self.basic_data['tags']:
mi.tags = self.basic_data['tags']
mi.tags = [t[1:].strip() if t.startswith('&') else t for t in mi.tags]
# Publisher
mi.publisher = self.basic_data['publisher']
# Pubdate
if self.basic_data['pubdate'] and self.basic_data['pubdate'].year != UNDEFINED_DATE:
mi.pubdate = self.basic_data['pubdate']
# Rating
if self.basic_data['rating']:
mi.rating = self.basic_data['rating']
# Comments
comments = ''
for cid in ('summary', 'contributorbio', 'quotes_reviews'):
cid = 'desc_{}{}-content'.format(cid, self.sku)
div = root.xpath('//*[@id="{}"]'.format(cid))
if div:
comments += self.render_comments(div[0])
if comments:
mi.comments = comments
mi.has_cover = self.plugin.cached_identifier_to_cover_url(self.sku) is not None
return mi
def render_comments(self, desc):
from lxml import etree
from calibre.library.comments import sanitize_comments_html
for c in desc.xpath('descendant::noscript'):
c.getparent().remove(c)
for a in desc.xpath('descendant::a[@href]'):
del a.attrib['href']
a.tag = 'span'
desc = etree.tostring(desc, method='html', encoding='unicode').strip()
# remove all attributes from tags
desc = re.sub(r'<([a-zA-Z0-9]+)\s[^>]+>', r'<\1>', desc)
# Collapse whitespace
# desc = re.sub('\n+', '\n', desc)
# desc = re.sub(' +', ' ', desc)
# Remove comments
desc = re.sub(r'(?s)<!--.*?-->', '', desc)
return sanitize_comments_html(desc)
# }}}
def get_basic_data(browser, log, *skus):
from mechanize import Request
from calibre.utils.date import parse_only_date
zeroes = ','.join('0' for sku in skus)
data = {
'skus': ','.join(skus),
'drc': zeroes,
'startPosition': '0',
'sequence': '1',
'selected': zeroes,
'itemID': '0',
'orderID': '0',
'mailingID': '',
'tContentWidth': '926',
'originalOrder': ','.join(type('')(i) for i in range(len(skus))),
'selectedOrderID': '0',
'selectedSortColumn': '0',
'listType': '1',
'resultType': '32',
'blockView': '1',
}
items_data_url = 'https://www.edelweiss.plus/GetTreelineControl.aspx?controlName=/uc/listviews/ListView_Title_Multi.ascx'
req = Request(items_data_url, data)
response = browser.open_novisit(req)
raw = response.read()
root = parse_html(raw)
for item in root.xpath('//div[@data-priority]'):
row = item.getparent().getparent()
sku = item.get('id').split('-')[-1]
isbns = [x.strip() for x in row.xpath('descendant::*[contains(@class, "pev_sku")]/text()')[0].split(',') if check_isbn(x.strip())]
isbns.sort(key=len, reverse=True)
try:
tags = [x.strip() for x in astext(row.xpath('descendant::*[contains(@class, "pev_categories")]')[0]).split('/')]
except IndexError:
tags = []
rating = 0
for bar in row.xpath('descendant::*[contains(@class, "bgdColorCommunity")]/@style'):
m = re.search(r'width: (\d+)px;.*max-width: (\d+)px', bar)
if m is not None:
rating = float(m.group(1)) / float(m.group(2))
break
try:
pubdate = parse_only_date(astext(row.xpath('descendant::*[contains(@class, "pev_shipDate")]')[0]
).split(':')[-1].split(u'\xa0')[-1].strip(), assume_utc=True)
except Exception:
log.exception('Error parsing published date')
pubdate = None
authors = []
for x in [x.strip() for x in row.xpath('descendant::*[contains(@class, "pev_contributor")]/@title')]:
authors.extend(a.strip() for a in x.split(','))
entry = {
'sku': sku,
'cover': row.xpath('descendant::img/@src')[0].split('?')[0],
'publisher': astext(row.xpath('descendant::*[contains(@class, "headerPublisher")]')[0]),
'title': astext(row.xpath('descendant::*[@id="title_{}"]'.format(sku))[0]),
'authors': authors,
'isbns': isbns,
'tags': tags,
'pubdate': pubdate,
'format': ' '.join(row.xpath('descendant::*[contains(@class, "pev_format")]/text()')).strip(),
'rating': rating,
}
if entry['cover'].startswith('/'):
entry['cover'] = None
yield entry
class Edelweiss(Source):
name = 'Edelweiss'
version = (2, 0, 1)
minimum_calibre_version = (3, 6, 0)
description = _('Downloads metadata and covers from Edelweiss - A catalog updated by book publishers')
capabilities = frozenset(['identify', 'cover'])
touched_fields = frozenset([
'title', 'authors', 'tags', 'pubdate', 'comments', 'publisher',
'identifier:isbn', 'identifier:edelweiss', 'rating'])
supports_gzip_transfer_encoding = True
has_html_comments = True
@property
def user_agent(self):
# Pass in an index to random_user_agent() to test with a particular
# user agent
return random_user_agent(allow_ie=False)
def _get_book_url(self, sku):
if sku:
return 'https://www.edelweiss.plus/#sku={}&page=1'.format(sku)
def get_book_url(self, identifiers): # {{{
sku = identifiers.get('edelweiss', None)
if sku:
return 'edelweiss', sku, self._get_book_url(sku)
# }}}
def get_cached_cover_url(self, identifiers): # {{{
sku = identifiers.get('edelweiss', None)
if not sku:
isbn = identifiers.get('isbn', None)
if isbn is not None:
sku = self.cached_isbn_to_identifier(isbn)
return self.cached_identifier_to_cover_url(sku)
# }}}
def create_query(self, log, title=None, authors=None, identifiers={}):
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import time
BASE_URL = ('https://www.edelweiss.plus/GetTreelineControl.aspx?'
'controlName=/uc/listviews/controls/ListView_data.ascx&itemID=0&resultType=32&dashboardType=8&itemType=1&dataType=products&keywordSearch&')
keywords = []
isbn = check_isbn(identifiers.get('isbn', None))
if isbn is not None:
keywords.append(isbn)
elif title:
title_tokens = list(self.get_title_tokens(title))
if title_tokens:
keywords.extend(title_tokens)
author_tokens = self.get_author_tokens(authors, only_first_author=True)
if author_tokens:
keywords.extend(author_tokens)
if not keywords:
return None
params = {
'q': (' '.join(keywords)).encode('utf-8'),
'_': type('')(int(time.time()))
}
return BASE_URL+urlencode(params)
# }}}
def identify(self, log, result_queue, abort, title=None, authors=None, # {{{
identifiers={}, timeout=30):
import json
br = self.browser
br.addheaders = [
('Referer', 'https://www.edelweiss.plus/'),
('X-Requested-With', 'XMLHttpRequest'),
('Cache-Control', 'no-cache'),
('Pragma', 'no-cache'),
]
if 'edelweiss' in identifiers:
items = [identifiers['edelweiss']]
else:
log.error('Currently Edelweiss returns random books for search queries')
return
query = self.create_query(log, title=title, authors=authors,
identifiers=identifiers)
if not query:
log.error('Insufficient metadata to construct query')
return
log('Using query URL:', query)
try:
raw = br.open(query, timeout=timeout).read().decode('utf-8')
except Exception as e:
log.exception('Failed to make identify query: %r'%query)
return as_unicode(e)
items = re.search(r'window[.]items\s*=\s*(.+?);', raw)
if items is None:
log.error('Failed to get list of matching items')
log.debug('Response text:')
log.debug(raw)
return
items = json.loads(items.group(1))
if (not items and identifiers and title and authors and
not abort.is_set()):
return self.identify(log, result_queue, abort, title=title,
authors=authors, timeout=timeout)
if not items:
return
workers = []
items = items[:5]
for i, item in enumerate(get_basic_data(self.browser, log, *items)):
sku = item['sku']
for isbn in item['isbns']:
self.cache_isbn_to_identifier(isbn, sku)
if item['cover']:
self.cache_identifier_to_cover_url(sku, item['cover'])
fmt = item['format'].lower()
if 'audio' in fmt or 'mp3' in fmt:
continue # Audio-book, ignore
workers.append(Worker(item, i, result_queue, br.clone_browser(), timeout, log, self))
if not workers:
return
for w in workers:
w.start()
# Don't send all requests at the same time
time.sleep(0.1)
while not abort.is_set():
a_worker_is_alive = False
for w in workers:
w.join(0.2)
if abort.is_set():
break
if w.is_alive():
a_worker_is_alive = True
if not a_worker_is_alive:
break
# }}}
def download_cover(self, log, result_queue, abort, # {{{
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
cached_url = self.get_cached_cover_url(identifiers)
if cached_url is None:
log.info('No cached cover found, running identify')
rq = Queue()
self.identify(log, rq, abort, title=title, authors=authors,
identifiers=identifiers)
if abort.is_set():
return
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
results.sort(key=self.identify_results_keygen(
title=title, authors=authors, identifiers=identifiers))
for mi in results:
cached_url = self.get_cached_cover_url(mi.identifiers)
if cached_url is not None:
break
if cached_url is None:
log.info('No cover found')
return
if abort.is_set():
return
br = self.browser
log('Downloading cover from:', cached_url)
try:
cdata = br.open_novisit(cached_url, timeout=timeout).read()
result_queue.put((self, cdata))
except:
log.exception('Failed to download cover from:', cached_url)
# }}}
if __name__ == '__main__':
from calibre.ebooks.metadata.sources.test import authors_test, comments_test, pubdate_test, test_identify_plugin, title_test
tests = [
( # A title and author search
{'title': 'The Husband\'s Secret', 'authors':['Liane Moriarty']},
[title_test('The Husband\'s Secret', exact=True),
authors_test(['Liane Moriarty'])]
),
( # An isbn present in edelweiss
{'identifiers':{'isbn': '9780312621360'}, },
[title_test('Flame: A Sky Chasers Novel', exact=True),
authors_test(['Amy Kathleen Ryan'])]
),
# Multiple authors and two part title and no general description
({'identifiers':{'edelweiss':'0321180607'}},
[title_test(
"XQuery From the Experts: A Guide to the W3C XML Query Language"
, exact=True), authors_test([
'Howard Katz', 'Don Chamberlin', 'Denise Draper', 'Mary Fernandez',
'Michael Kay', 'Jonathan Robie', 'Michael Rys', 'Jerome Simeon',
'Jim Tivy', 'Philip Wadler']), pubdate_test(2003, 8, 22),
comments_test('Jérôme Siméon'), lambda mi: bool(mi.comments and 'No title summary' not in mi.comments)
]),
]
start, stop = 0, len(tests)
tests = tests[start:stop]
test_identify_plugin(Edelweiss.name, tests)
| 15,453 | Python | .py | 356 | 32.761236 | 147 | 0.569063 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,538 | openlibrary.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/openlibrary.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.metadata.sources.base import Source
class OpenLibrary(Source):
name = 'Open Library'
version = (1, 0, 1)
minimum_calibre_version = (2, 80, 0)
description = _('Downloads covers from The Open Library')
capabilities = frozenset(['cover'])
OPENLIBRARY = 'https://covers.openlibrary.org/b/isbn/%s-L.jpg?default=false'
def download_cover(self, log, result_queue, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
return # site is currently down and timing out leading to slow metadata retrieval
if 'isbn' not in identifiers:
return
isbn = identifiers['isbn']
br = self.browser
try:
ans = br.open_novisit(self.OPENLIBRARY%isbn, timeout=timeout).read()
result_queue.put((self, ans))
except Exception as e:
if callable(getattr(e, 'getcode', None)) and e.getcode() == 404:
log.error('No cover for ISBN: %r found'%isbn)
else:
log.exception('Failed to download cover for ISBN:', isbn)
| 1,394 | Python | .py | 29 | 40.586207 | 90 | 0.652911 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,539 | worker.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/worker.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
# License: GPLv3 Copyright: 2012, Kovid Goyal <kovid at kovidgoyal.net>
import os
from collections import Counter
from functools import wraps
from io import BytesIO
from threading import Event, Thread
from calibre.customize.ui import metadata_plugins
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.opf2 import OPF, metadata_to_opf
from calibre.ebooks.metadata.sources.base import dump_caches, load_caches
from calibre.ebooks.metadata.sources.covers import download_cover, run_download
from calibre.ebooks.metadata.sources.identify import identify, msprefs
from calibre.ebooks.metadata.sources.update import patch_plugins
from calibre.utils.date import as_utc
from calibre.utils.logging import GUILog
from polyglot.builtins import iteritems
from polyglot.queue import Empty, Queue
def merge_result(oldmi, newmi, ensure_fields=None):
dummy = Metadata(_('Unknown'))
for f in msprefs['ignore_fields']:
if ':' in f or (ensure_fields and f in ensure_fields):
continue
setattr(newmi, f, getattr(dummy, f))
fields = set()
for plugin in metadata_plugins(['identify']):
fields |= plugin.touched_fields
def is_equal(x, y):
if hasattr(x, 'tzinfo'):
x = as_utc(x)
if hasattr(y, 'tzinfo'):
y = as_utc(y)
return x == y
for f in fields:
# Optimize so that set_metadata does not have to do extra work later
if not f.startswith('identifier:') and f not in ('series', 'series_index'):
if (not newmi.is_null(f) and is_equal(getattr(newmi, f),
getattr(oldmi, f))):
setattr(newmi, f, getattr(dummy, f))
if (newmi.series, newmi.series_index) == (oldmi.series, oldmi.series_index):
newmi.series = None
newmi.series_index = 1
return newmi
def shutdown_webengine_workers(func):
@wraps(func)
def wrapper(*a, **k):
from calibre.scraper.simple import cleanup_overseers
try:
return func(*a, **k)
finally:
cleanup_overseers()()
return wrapper
@shutdown_webengine_workers
def main(do_identify, covers, metadata, ensure_fields, tdir):
failed_ids = set()
failed_covers = set()
all_failed = True
log = GUILog()
patch_plugins()
for book_id, mi in iteritems(metadata):
mi = OPF(BytesIO(mi), basedir=tdir,
populate_spine=False).to_book_metadata()
title, authors, identifiers = mi.title, mi.authors, mi.identifiers
cdata = None
log.clear()
if do_identify:
results = []
try:
results = identify(log, Event(), title=title, authors=authors,
identifiers=identifiers)
except:
pass
if results:
all_failed = False
mi = merge_result(mi, results[0], ensure_fields=ensure_fields)
identifiers = mi.identifiers
if not mi.is_null('rating'):
# set_metadata expects a rating out of 10
mi.rating *= 2
with open(os.path.join(tdir, '%d.mi'%book_id), 'wb') as f:
f.write(metadata_to_opf(mi, default_lang='und'))
else:
log.error('Failed to download metadata for', title)
failed_ids.add(book_id)
if covers:
cdata = download_cover(log, title=title, authors=authors,
identifiers=identifiers)
if cdata is None:
failed_covers.add(book_id)
else:
with open(os.path.join(tdir, '%d.cover'%book_id), 'wb') as f:
f.write(cdata[-1])
all_failed = False
with open(os.path.join(tdir, '%d.log'%book_id), 'wb') as f:
f.write(log.plain_text.encode('utf-8'))
return failed_ids, failed_covers, all_failed
@shutdown_webengine_workers
def single_identify(title, authors, identifiers):
log = GUILog()
patch_plugins()
results = identify(log, Event(), title=title, authors=authors,
identifiers=identifiers)
return [metadata_to_opf(r) for r in results], [r.has_cached_cover_url for
r in results], dump_caches(), log.dump()
@shutdown_webengine_workers
def single_covers(title, authors, identifiers, caches, tdir):
patch_plugins()
load_caches(caches)
log = GUILog()
results = Queue()
worker = Thread(target=run_download, args=(log, results, Event()),
kwargs=dict(title=title, authors=authors, identifiers=identifiers))
worker.daemon = True
worker.start()
c = Counter()
while worker.is_alive():
try:
plugin, width, height, fmt, data = results.get(True, 1)
except Empty:
continue
else:
name = plugin.name
if plugin.can_get_multiple_covers:
name += '{%d}'%c[plugin.name]
c[plugin.name] += 1
name = '%s,,%s,,%s,,%s.cover'%(name, width, height, fmt)
with open(os.path.join(tdir, name), 'wb') as f:
f.write(data)
os.mkdir(os.path.join(tdir, name+'.done'))
return log.dump()
| 5,330 | Python | .py | 131 | 31.748092 | 83 | 0.615756 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,540 | test.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/test.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import sys
import tempfile
import time
from threading import Event
from calibre import prints, sanitize_file_name
from calibre.customize.ui import all_metadata_plugins
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.sources.base import get_cached_cover_urls
from calibre.ebooks.metadata.sources.prefs import msprefs
from calibre.utils.logging import ANSIStream, ThreadSafeLog
from polyglot.queue import Empty, Queue
def isbn_test(isbn):
isbn_ = check_isbn(isbn)
def test(mi):
misbn = check_isbn(mi.isbn)
if misbn and misbn == isbn_:
return True
prints('ISBN test failed. Expected: \'%s\' found \'%s\''%(isbn_, misbn))
return False
return test
def title_test(title, exact=False):
title = title.lower()
def test(mi):
mt = mi.title.lower()
if (exact and mt == title) or \
(not exact and title in mt):
return True
prints('Title test failed. Expected: \'%s\' found \'%s\''%(title, mt))
return False
return test
def authors_test(authors, subset=False):
authors = {x.lower() for x in authors}
def test(mi):
au = {x.lower() for x in mi.authors}
if msprefs['swap_author_names']:
def revert_to_fn_ln(a):
if ',' not in a:
return a
parts = a.split(',', 1)
t = parts[-1]
parts = parts[:-1]
parts.insert(0, t)
return ' '.join(parts)
au = {revert_to_fn_ln(x) for x in au}
if subset and authors.issubset(au):
return True
if au == authors:
return True
prints('Author test failed. Expected: \'%s\' found \'%s\''%(authors, au))
return False
return test
def tags_test(tags):
tags = {x.lower() for x in tags}
def test(mi):
t = {x.lower() for x in mi.tags}
if t == tags:
return True
prints('Tags test failed. Expected: \'%s\' found \'%s\''%(tags, t))
return False
return test
def series_test(series, series_index):
series = series.lower()
def test(mi):
ms = mi.series.lower() if mi.series else ''
if (ms == series) and (series_index == mi.series_index):
return True
if mi.series:
prints('Series test failed. Expected: \'%s [%d]\' found \'%s[%d]\''%
(series, series_index, ms, mi.series_index))
else:
prints('Series test failed. Expected: \'%s [%d]\' found no series'%
(series, series_index))
return False
return test
def comments_test(sentinel):
def test(mi):
comm = mi.comments.lower() if mi.comments else ''
if sentinel and sentinel.lower() in comm:
return True
prints('comments test failed. %s not in comments'%sentinel)
return False
return test
def pubdate_test(year, month, day):
def test(mi):
p = mi.pubdate
if p is not None and p.year == year and p.month == month and p.day == day:
return True
return False
return test
def init_test(tdir_name):
tdir = tempfile.gettempdir()
abort = Event()
log = ThreadSafeLog(level=ThreadSafeLog.DEBUG)
log.outputs = [ANSIStream(sys.stderr)]
return tdir, abort, log
def dump_log(lf):
prints(open(lf, 'rb').read().decode('utf-8'))
def test_identify(tests): # {{{
'''
:param tests: List of 2-tuples. Each two tuple is of the form (args,
test_funcs). args is a dict of keyword arguments to pass to
the identify method. test_funcs are callables that accept a
Metadata object and return True iff the object passes the
test.
'''
from calibre.ebooks.metadata.sources.identify import identify
tdir, abort, log = init_test('Full Identify')
times = []
for kwargs, test_funcs in tests:
log('')
log('#'*80)
log('### Running test with:', kwargs)
log('#'*80)
prints('Running test with:', kwargs)
args = (log, abort)
start_time = time.time()
results = identify(*args, **kwargs)
total_time = time.time() - start_time
times.append(total_time)
if not results:
prints('identify failed to find any results')
break
prints('Found', len(results), 'matches:', end=' ')
prints('Smaller relevance means better match')
for i, mi in enumerate(results):
prints('*'*30, 'Relevance:', i, '*'*30)
if mi.rating:
mi.rating *= 2
prints(mi)
prints('\nCached cover URLs :',
[x[0].name for x in get_cached_cover_urls(mi)])
prints('*'*75, '\n\n')
possibles = []
for mi in results:
test_failed = False
for tfunc in test_funcs:
if not tfunc(mi):
test_failed = True
break
if not test_failed:
possibles.append(mi)
if not possibles:
prints('ERROR: No results that passed all tests were found')
log.close()
raise SystemExit(1)
if results[0] is not possibles[0]:
prints('Most relevant result failed the tests')
raise SystemExit(1)
log('\n\n')
prints('Average time per query', sum(times)/len(times))
# }}}
def test_identify_plugin(name, tests, modify_plugin=lambda plugin:None, # {{{
fail_missing_meta=True):
'''
:param name: Plugin name
:param tests: List of 2-tuples. Each two tuple is of the form (args,
test_funcs). args is a dict of keyword arguments to pass to
the identify method. test_funcs are callables that accept a
Metadata object and return True iff the object passes the
test.
'''
plugin = None
for x in all_metadata_plugins():
if x.name == name and 'identify' in x.capabilities:
plugin = x
break
modify_plugin(plugin)
prints('Testing the identify function of', plugin.name)
prints('Using extra headers:', plugin.browser.addheaders)
tdir, abort, log = init_test(plugin.name)
times = []
for kwargs, test_funcs in tests:
log('')
log('#'*80)
log('### Running test with:', kwargs)
log('#'*80)
prints('Running test with:', kwargs)
rq = Queue()
args = (log, rq, abort)
start_time = time.time()
plugin.running_a_test = True
try:
err = plugin.identify(*args, **kwargs)
finally:
plugin.running_a_test = False
total_time = time.time() - start_time
times.append(total_time)
if err is not None:
prints('identify returned an error for args', args)
prints(err)
break
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
prints('Found', len(results), 'matches:', end=' ')
prints('Smaller relevance means better match')
results.sort(key=plugin.identify_results_keygen(
title=kwargs.get('title', None), authors=kwargs.get('authors',
None), identifiers=kwargs.get('identifiers', {})))
for i, mi in enumerate(results):
prints('*'*30, 'Relevance:', i, '*'*30)
if mi.rating:
mi.rating *= 2
prints(mi)
prints('\nCached cover URL :',
plugin.get_cached_cover_url(mi.identifiers))
prints('*'*75, '\n\n')
possibles = []
for mi in results:
test_failed = False
for tfunc in test_funcs:
if not tfunc(mi):
test_failed = True
break
if not test_failed:
possibles.append(mi)
if not possibles:
prints('ERROR: No results that passed all tests were found')
raise SystemExit(1)
good = [x for x in possibles if plugin.test_fields(x) is
None]
if not good:
prints('Failed to find', plugin.test_fields(possibles[0]))
if fail_missing_meta:
raise SystemExit(1)
if results[0] is not possibles[0]:
prints('Most relevant result failed the tests')
raise SystemExit(1)
if 'cover' in plugin.capabilities:
rq = Queue()
mi = results[0]
plugin.download_cover(log, rq, abort, title=mi.title,
authors=mi.authors, identifiers=mi.identifiers)
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
if not results and fail_missing_meta:
prints('Cover download failed')
raise SystemExit(1)
elif results:
cdata = results[0]
cover = os.path.join(tdir, plugin.name.replace(' ',
'')+'-%s-cover.jpg'%sanitize_file_name(mi.title.replace(' ',
'_')))
with open(cover, 'wb') as f:
f.write(cdata[-1])
prints('Cover downloaded to:', cover)
if len(cdata[-1]) < 10240:
prints('Downloaded cover too small')
raise SystemExit(1)
prints('Average time per query', sum(times)/len(times))
# }}}
| 10,081 | Python | .py | 265 | 27.615094 | 82 | 0.555897 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,541 | covers.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/covers.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import time
from io import StringIO
from threading import Event, Thread
from calibre.customize.ui import metadata_plugins
from calibre.ebooks.metadata.sources.base import create_log
from calibre.ebooks.metadata.sources.prefs import msprefs
from calibre.utils.img import image_from_data, image_to_data, remove_borders_from_image, save_cover_data_to
from calibre.utils.imghdr import identify
from polyglot.queue import Empty, Queue
class Worker(Thread):
def __init__(self, plugin, abort, title, authors, identifiers, timeout, rq, get_best_cover=False):
Thread.__init__(self)
self.daemon = True
self.plugin = plugin
self.abort = abort
self.get_best_cover = get_best_cover
self.buf = StringIO()
self.log = create_log(self.buf)
self.title, self.authors, self.identifiers = (title, authors,
identifiers)
self.timeout, self.rq = timeout, rq
self.time_spent = None
def run(self):
start_time = time.time()
if not self.abort.is_set():
try:
if self.plugin.can_get_multiple_covers:
self.plugin.download_cover(self.log, self.rq, self.abort,
title=self.title, authors=self.authors, get_best_cover=self.get_best_cover,
identifiers=self.identifiers, timeout=self.timeout)
else:
self.plugin.download_cover(self.log, self.rq, self.abort,
title=self.title, authors=self.authors,
identifiers=self.identifiers, timeout=self.timeout)
except:
self.log.exception('Failed to download cover from',
self.plugin.name)
self.time_spent = time.time() - start_time
def is_worker_alive(workers):
for w in workers:
if w.is_alive():
return True
return False
def process_result(log, result):
plugin, data = result
try:
if getattr(plugin, 'auto_trim_covers', False):
img = image_from_data(data)
nimg = remove_borders_from_image(img)
if nimg is not img:
data = image_to_data(nimg)
fmt, width, height = identify(data)
if width < 0 or height < 0:
raise ValueError('Could not read cover image dimensions')
if width < 50 or height < 50:
raise ValueError('Image too small')
data = save_cover_data_to(data)
except Exception:
log.exception('Invalid cover from', plugin.name)
return None
return (plugin, width, height, fmt, data)
def run_download(log, results, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
'''
Run the cover download, putting results into the queue :param:`results`.
Each result is a tuple of the form:
(plugin, width, height, fmt, bytes)
'''
if title == _('Unknown'):
title = None
if authors == [_('Unknown')]:
authors = None
plugins = [p for p in metadata_plugins(['cover']) if p.is_configured()]
rq = Queue()
workers = [Worker(p, abort, title, authors, identifiers, timeout, rq, get_best_cover=get_best_cover) for p
in plugins]
for w in workers:
w.start()
first_result_at = None
wait_time = msprefs['wait_after_first_cover_result']
found_results = {}
start_time = time.time() # Use a global timeout to workaround misbehaving plugins that hang
while time.time() - start_time < 301:
time.sleep(0.1)
try:
x = rq.get_nowait()
result = process_result(log, x)
if result is not None:
results.put(result)
found_results[result[0]] = result
if first_result_at is not None:
first_result_at = time.time()
except Empty:
pass
if not is_worker_alive(workers):
break
if first_result_at is not None and time.time() - first_result_at > wait_time:
log('Not waiting for any more results')
abort.set()
if abort.is_set():
break
while True:
try:
x = rq.get_nowait()
result = process_result(log, x)
if result is not None:
results.put(result)
found_results[result[0]] = result
except Empty:
break
for w in workers:
wlog = w.buf.getvalue().strip()
log('\n'+'*'*30, w.plugin.name, 'Covers', '*'*30)
log('Request extra headers:', w.plugin.browser.addheaders)
if w.plugin in found_results:
result = found_results[w.plugin]
log('Downloaded cover:', '%dx%d'%(result[1], result[2]))
else:
log('Failed to download valid cover')
if w.time_spent is None:
log('Download aborted')
else:
log('Took', w.time_spent, 'seconds')
if wlog:
log(wlog)
log('\n'+'*'*80)
def download_cover(log,
title=None, authors=None, identifiers={}, timeout=30):
'''
Synchronous cover download. Returns the "best" cover as per user
prefs/cover resolution.
Returned cover is a tuple: (plugin, width, height, fmt, data)
Returns None if no cover is found.
'''
rq = Queue()
abort = Event()
run_download(log, rq, abort, title=title, authors=authors,
identifiers=identifiers, timeout=timeout, get_best_cover=True)
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
cp = msprefs['cover_priorities']
def keygen(result):
plugin, width, height, fmt, data = result
return (cp.get(plugin.name, 1), 1/(width*height))
results.sort(key=keygen)
return results[0] if results else None
| 6,210 | Python | .py | 156 | 30.596154 | 110 | 0.603623 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,542 | cli.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/cli.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys
from io import BytesIO
from threading import Event
from calibre import prints
from calibre.customize.ui import all_metadata_plugins
from calibre.ebooks.metadata import string_to_authors
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.ebooks.metadata.sources.base import create_log
from calibre.ebooks.metadata.sources.covers import download_cover
from calibre.ebooks.metadata.sources.identify import identify
from calibre.ebooks.metadata.sources.update import patch_plugins
from calibre.utils.config import OptionParser
from calibre.utils.img import save_cover_data_to
def option_parser():
parser = OptionParser(_('''\
%prog [options]
Fetch book metadata from online sources. You must specify at least one
of title, authors or ISBN.
'''
))
parser.add_option('-t', '--title', help=_('Book title'))
parser.add_option('-a', '--authors', help=_('Book author(s)'))
parser.add_option('-i', '--isbn', help=_('Book ISBN'))
parser.add_option('-I', '--identifier', action='append', default=[], help=_(
'Identifiers such as ASIN/Goodreads id etc. Can be specified multiple times for multiple identifiers.'
' For example: ') + '--identifier asin:B0082BAJA0')
parser.add_option('-v', '--verbose', default=False, action='store_true',
help=_('Print the log to the console (stderr)'))
parser.add_option('-o', '--opf', help=_('Output the metadata in OPF format instead of human readable text.'), action='store_true', default=False)
parser.add_option('-c', '--cover',
help=_('Specify a filename. The cover, if available, will be saved to it. Without this option, no cover will be downloaded.'))
parser.add_option('-d', '--timeout', default='30',
help=_('Timeout in seconds. Default is 30'))
parser.add_option('-p', '--allowed-plugin', action='append', default=[],
help=_('Specify the name of a metadata download plugin to use.'
' By default, all metadata plugins will be used.'
' Can be specified multiple times for multiple plugins.'
' All plugin names: {}').format(', '.join(p.name for p in all_metadata_plugins())))
return parser
def main(args=sys.argv):
parser = option_parser()
opts, args = parser.parse_args(args)
buf = BytesIO()
log = create_log(buf)
abort = Event()
patch_plugins()
authors = []
if opts.authors:
authors = string_to_authors(opts.authors)
identifiers = {}
for idspec in opts.identifier:
k, v = idspec.partition(':')[::2]
if not k or not v:
raise SystemExit('Not a valid identifier: {}'.format(idspec))
identifiers[k] = v
if opts.isbn:
identifiers['isbn'] = opts.isbn
allowed_plugins = frozenset(opts.allowed_plugin)
results = identify(log, abort, title=opts.title, authors=authors,
identifiers=identifiers, timeout=int(opts.timeout),
allowed_plugins=allowed_plugins or None)
if not results:
prints(buf.getvalue(), file=sys.stderr)
prints('No results found', file=sys.stderr)
raise SystemExit(1)
result = results[0]
cf = None
if opts.cover and results:
cover = download_cover(log, title=opts.title, authors=authors,
identifiers=result.identifiers, timeout=int(opts.timeout))
if cover is None:
if not opts.opf:
prints('No cover found', file=sys.stderr)
else:
save_cover_data_to(cover[-1], opts.cover)
result.cover = cf = opts.cover
if opts.verbose:
prints(buf.getvalue(), file=sys.stderr)
if opts.opf:
getattr(sys.stdout, 'buffer', sys.stdout).write(metadata_to_opf(result))
print()
else:
prints(str(result))
if not opts.opf and opts.cover:
prints('Cover :', cf)
return 0
if __name__ == '__main__':
sys.exit(main())
| 4,184 | Python | .py | 93 | 38.365591 | 149 | 0.65635 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,543 | amazon.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/amazon.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
# License: GPLv3 Copyright: 2011, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import re
import socket
import string
import time
from functools import partial
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
from threading import Thread
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from mechanize import HTTPError
from calibre import as_unicode, browser, random_user_agent, xml_replace_entities
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.sources.base import Option, Source, fixauthors, fixcase
from calibre.ebooks.oeb.base import urlquote
from calibre.utils.icu import lower as icu_lower
from calibre.utils.localization import canonicalize_lang
from calibre.utils.random_ua import accept_header_for_ua
def sort_matches_preferring_kindle_editions(matches):
upos_map = {url:i for i, url in enumerate(matches)}
def skey(url):
opos = upos_map[url]
parts = url.split('/')
try:
idx = parts.index('dp')
except Exception:
idx = -1
if idx < 0 or idx + 1 >= len(parts) or not parts[idx+1].startswith('B'):
return 1, opos
return 0, opos
matches.sort(key=skey)
return matches
def iri_quote_plus(url):
ans = urlquote(url)
if isinstance(ans, bytes):
ans = ans.decode('utf-8')
return ans.replace('%20', '+')
def user_agent_is_ok(ua):
return 'Mobile/' not in ua and 'Mobile ' not in ua
class CaptchaError(Exception):
pass
class SearchFailed(ValueError):
pass
class UrlNotFound(ValueError):
def __init__(self, url):
ValueError.__init__(self, 'The URL {} was not found (HTTP 404)'.format(url))
class UrlTimedOut(ValueError):
def __init__(self, url):
ValueError.__init__(self, 'Timed out fetching {} try again later'.format(url))
def parse_html(raw):
try:
from html5_parser import parse
except ImportError:
# Old versions of calibre
import html5lib
return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)
else:
return parse(raw)
def parse_details_page(url, log, timeout, browser, domain):
from lxml.html import tostring
from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.cleantext import clean_ascii_chars
try:
from calibre.ebooks.metadata.sources.update import search_engines_module
get_data_for_cached_url = search_engines_module().get_data_for_cached_url
except Exception:
def get_data_for_cached_url(*a):
return None
raw = get_data_for_cached_url(url)
if raw:
log('Using cached details for url:', url)
else:
log('Downloading details from:', url)
try:
raw = browser.open_novisit(url, timeout=timeout).read().strip()
except Exception as e:
if callable(getattr(e, 'getcode', None)) and e.getcode() == 404:
log.error('URL not found: %r' % url)
raise UrlNotFound(url)
attr = getattr(e, 'args', [None])
attr = attr if attr else [None]
if isinstance(attr[0], socket.timeout):
msg = 'Details page timed out. Try again later.'
log.error(msg)
raise UrlTimedOut(url)
else:
msg = 'Failed to make details query: %r' % url
log.exception(msg)
raise ValueError('Could not make details query for {}'.format(url))
oraw = raw
if 'amazon.com.br' in url:
# amazon.com.br serves utf-8 but has an incorrect latin1 <meta> tag
raw = raw.decode('utf-8')
raw = xml_to_unicode(raw, strip_encoding_pats=True,
resolve_entities=True)[0]
if '<title>404 - ' in raw:
raise ValueError('Got a 404 page for: %r' % url)
if '>Could not find the requested document in the cache.<' in raw:
raise ValueError('No cached entry for %s found' % url)
try:
root = parse_html(clean_ascii_chars(raw))
except Exception:
msg = 'Failed to parse amazon details page: %r' % url
log.exception(msg)
raise ValueError(msg)
if domain == 'jp':
for a in root.xpath('//a[@href]'):
if ('black-curtain-redirect.html' in a.get('href')) or ('/black-curtain/save-eligibility/black-curtain' in a.get('href')):
url = a.get('href')
if url:
if url.startswith('/'):
url = 'https://amazon.co.jp' + a.get('href')
log('Black curtain redirect found, following')
return parse_details_page(url, log, timeout, browser, domain)
errmsg = root.xpath('//*[@id="errorMessage"]')
if errmsg:
msg = 'Failed to parse amazon details page: %r' % url
msg += tostring(errmsg, method='text', encoding='unicode').strip()
log.error(msg)
raise ValueError(msg)
from css_selectors import Select
selector = Select(root)
return oraw, root, selector
def parse_asin(root, log, url):
try:
link = root.xpath('//link[@rel="canonical" and @href]')
for l in link:
return l.get('href').rpartition('/')[-1]
except Exception:
log.exception('Error parsing ASIN for url: %r' % url)
class Worker(Thread): # Get details {{{
'''
Get book details from amazons book page in a separate thread
'''
def __init__(self, url, result_queue, browser, log, relevance, domain,
plugin, timeout=20, testing=False, preparsed_root=None,
cover_url_processor=None, filter_result=None):
Thread.__init__(self)
self.cover_url_processor = cover_url_processor
self.preparsed_root = preparsed_root
self.daemon = True
self.testing = testing
self.url, self.result_queue = url, result_queue
self.log, self.timeout = log, timeout
self.filter_result = filter_result or (lambda x, log: True)
self.relevance, self.plugin = relevance, plugin
self.browser = browser
self.cover_url = self.amazon_id = self.isbn = None
self.domain = domain
from lxml.html import tostring
self.tostring = tostring
months = { # {{{
'de': {
1: ['jän', 'januar'],
2: ['februar'],
3: ['märz'],
5: ['mai'],
6: ['juni'],
7: ['juli'],
10: ['okt', 'oktober'],
12: ['dez', 'dezember']
},
'it': {
1: ['gennaio', 'enn'],
2: ['febbraio', 'febbr'],
3: ['marzo'],
4: ['aprile'],
5: ['maggio', 'magg'],
6: ['giugno'],
7: ['luglio'],
8: ['agosto', 'ag'],
9: ['settembre', 'sett'],
10: ['ottobre', 'ott'],
11: ['novembre'],
12: ['dicembre', 'dic'],
},
'fr': {
1: ['janv'],
2: ['févr'],
3: ['mars'],
4: ['avril'],
5: ['mai'],
6: ['juin'],
7: ['juil'],
8: ['août'],
9: ['sept'],
10: ['oct', 'octobre'],
11: ['nov', 'novembre'],
12: ['déc', 'décembre'],
},
'br': {
1: ['janeiro'],
2: ['fevereiro'],
3: ['março'],
4: ['abril'],
5: ['maio'],
6: ['junho'],
7: ['julho'],
8: ['agosto'],
9: ['setembro'],
10: ['outubro'],
11: ['novembro'],
12: ['dezembro'],
},
'es': {
1: ['enero'],
2: ['febrero'],
3: ['marzo'],
4: ['abril'],
5: ['mayo'],
6: ['junio'],
7: ['julio'],
8: ['agosto'],
9: ['septiembre', 'setiembre'],
10: ['octubre'],
11: ['noviembre'],
12: ['diciembre'],
},
'se': {
1: ['januari'],
2: ['februari'],
3: ['mars'],
4: ['april'],
5: ['maj'],
6: ['juni'],
7: ['juli'],
8: ['augusti'],
9: ['september'],
10: ['oktober'],
11: ['november'],
12: ['december'],
},
'jp': {
1: ['1月'],
2: ['2月'],
3: ['3月'],
4: ['4月'],
5: ['5月'],
6: ['6月'],
7: ['7月'],
8: ['8月'],
9: ['9月'],
10: ['10月'],
11: ['11月'],
12: ['12月'],
},
'nl': {
1: ['januari'], 2: ['februari'], 3: ['maart'], 5: ['mei'], 6: ['juni'], 7: ['juli'], 8: ['augustus'], 10: ['oktober'],
}
} # }}}
self.english_months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
self.months = months.get(self.domain, {})
self.pd_xpath = '''
//h2[text()="Product Details" or \
text()="Produktinformation" or \
text()="Dettagli prodotto" or \
text()="Product details" or \
text()="Détails sur le produit" or \
text()="Detalles del producto" or \
text()="Detalhes do produto" or \
text()="Productgegevens" or \
text()="基本信息" or \
starts-with(text(), "登録情報")]/../div[@class="content"]
'''
# Editor: is for Spanish
self.publisher_xpath = '''
descendant::*[starts-with(text(), "Publisher:") or \
starts-with(text(), "Verlag:") or \
starts-with(text(), "Editore:") or \
starts-with(text(), "Editeur") or \
starts-with(text(), "Editor:") or \
starts-with(text(), "Editora:") or \
starts-with(text(), "Uitgever:") or \
starts-with(text(), "Utgivare:") or \
starts-with(text(), "出版社:")]
'''
self.pubdate_xpath = '''
descendant::*[starts-with(text(), "Publication Date:") or \
starts-with(text(), "Audible.com Release Date:")]
'''
self.publisher_names = {'Publisher', 'Uitgever', 'Verlag', 'Utgivare', 'Herausgeber',
'Editore', 'Editeur', 'Éditeur', 'Editor', 'Editora', '出版社'}
self.language_xpath = '''
descendant::*[
starts-with(text(), "Language:") \
or text() = "Language" \
or text() = "Sprache:" \
or text() = "Lingua:" \
or text() = "Idioma:" \
or starts-with(text(), "Langue") \
or starts-with(text(), "言語") \
or starts-with(text(), "Språk") \
or starts-with(text(), "语种")
]
'''
self.language_names = {'Language', 'Sprache', 'Språk',
'Lingua', 'Idioma', 'Langue', '言語', 'Taal', '语种'}
self.tags_xpath = '''
descendant::h2[
text() = "Look for Similar Items by Category" or
text() = "Ähnliche Artikel finden" or
text() = "Buscar productos similares por categoría" or
text() = "Ricerca articoli simili per categoria" or
text() = "Rechercher des articles similaires par rubrique" or
text() = "Procure por items similares por categoria" or
text() = "関連商品を探す"
]/../descendant::ul/li
'''
self.ratings_pat = re.compile(
r'([0-9.,]+) ?(out of|von|van|su|étoiles sur|つ星のうち|de un máximo de|de|av) '
r'([\d\.]+)( (stars|Sternen|stelle|estrellas|estrelas|sterren|stjärnor)){0,1}'
)
self.ratings_pat_cn = re.compile('([0-9.]+) 颗星,最多 5 颗星')
self.ratings_pat_jp = re.compile(r'\d+つ星のうち([\d\.]+)')
lm = {
'eng': ('English', 'Englisch', 'Engels', 'Engelska'),
'fra': ('French', 'Français'),
'ita': ('Italian', 'Italiano'),
'deu': ('German', 'Deutsch'),
'spa': ('Spanish', 'Espa\xf1ol', 'Espaniol'),
'jpn': ('Japanese', '日本語'),
'por': ('Portuguese', 'Português'),
'nld': ('Dutch', 'Nederlands',),
'chs': ('Chinese', '中文', '简体中文'),
'swe': ('Swedish', 'Svenska'),
}
self.lang_map = {}
for code, names in lm.items():
for name in names:
self.lang_map[name] = code
self.series_pat = re.compile(
r'''
\|\s* # Prefix
(Series)\s*:\s* # Series declaration
(?P<series>.+?)\s+ # The series name
\((Book)\s* # Book declaration
(?P<index>[0-9.]+) # Series index
\s*\)
''', re.X)
def delocalize_datestr(self, raw):
if self.domain == 'cn':
return raw.replace('年', '-').replace('月', '-').replace('日', '')
if not self.months:
return raw
ans = raw.lower()
for i, vals in self.months.items():
for x in vals:
ans = ans.replace(x, self.english_months[i])
ans = ans.replace(' de ', ' ')
return ans
def run(self):
try:
self.get_details()
except:
self.log.exception('get_details failed for url: %r' % self.url)
def get_details(self):
if self.preparsed_root is None:
raw, root, selector = parse_details_page(
self.url, self.log, self.timeout, self.browser, self.domain)
else:
raw, root, selector = self.preparsed_root
from css_selectors import Select
self.selector = Select(root)
self.parse_details(raw, root)
def parse_details(self, raw, root):
asin = parse_asin(root, self.log, self.url)
if not asin and root.xpath('//form[@action="/errors/validateCaptcha"]'):
raise CaptchaError(
'Amazon returned a CAPTCHA page, probably because you downloaded too many books. Wait for some time and try again.')
if self.testing:
import tempfile
import uuid
with tempfile.NamedTemporaryFile(prefix=(asin or type('')(uuid.uuid4())) + '_',
suffix='.html', delete=False) as f:
f.write(raw)
print('Downloaded HTML for', asin, 'saved in', f.name)
try:
title = self.parse_title(root)
except:
self.log.exception('Error parsing title for url: %r' % self.url)
title = None
try:
authors = self.parse_authors(root)
except:
self.log.exception('Error parsing authors for url: %r' % self.url)
authors = []
if not title or not authors or not asin:
self.log.error(
'Could not find title/authors/asin for %r' % self.url)
self.log.error('ASIN: %r Title: %r Authors: %r' % (asin, title,
authors))
return
mi = Metadata(title, authors)
idtype = 'amazon' if self.domain == 'com' else 'amazon_' + self.domain
mi.set_identifier(idtype, asin)
self.amazon_id = asin
try:
mi.rating = self.parse_rating(root)
except:
self.log.exception('Error parsing ratings for url: %r' % self.url)
try:
mi.comments = self.parse_comments(root, raw)
except:
self.log.exception('Error parsing comments for url: %r' % self.url)
try:
series, series_index = self.parse_series(root)
if series:
mi.series, mi.series_index = series, series_index
elif self.testing:
mi.series, mi.series_index = 'Dummy series for testing', 1
except:
self.log.exception('Error parsing series for url: %r' % self.url)
try:
mi.tags = self.parse_tags(root)
except:
self.log.exception('Error parsing tags for url: %r' % self.url)
try:
self.cover_url = self.parse_cover(root, raw)
except:
self.log.exception('Error parsing cover for url: %r' % self.url)
if self.cover_url_processor is not None and self.cover_url and self.cover_url.startswith('/'):
self.cover_url = self.cover_url_processor(self.cover_url)
mi.has_cover = bool(self.cover_url)
detail_bullets = root.xpath('//*[@data-feature-name="detailBullets"]')
non_hero = tuple(self.selector(
'div#bookDetails_container_div div#nonHeroSection')) or tuple(self.selector(
'#productDetails_techSpec_sections'))
feature_and_detail_bullets = root.xpath('//*[@data-feature-name="featureBulletsAndDetailBullets"]')
if detail_bullets:
self.parse_detail_bullets(root, mi, detail_bullets[0])
elif non_hero:
try:
self.parse_new_details(root, mi, non_hero[0])
except:
self.log.exception(
'Failed to parse new-style book details section')
elif feature_and_detail_bullets:
self.parse_detail_bullets(root, mi, feature_and_detail_bullets[0], ul_selector='ul')
else:
pd = root.xpath(self.pd_xpath)
if pd:
pd = pd[0]
try:
isbn = self.parse_isbn(pd)
if isbn:
self.isbn = mi.isbn = isbn
except:
self.log.exception(
'Error parsing ISBN for url: %r' % self.url)
try:
mi.publisher = self.parse_publisher(pd)
except:
self.log.exception(
'Error parsing publisher for url: %r' % self.url)
try:
mi.pubdate = self.parse_pubdate(pd)
except:
self.log.exception(
'Error parsing publish date for url: %r' % self.url)
try:
lang = self.parse_language(pd)
if lang:
mi.language = lang
except:
self.log.exception(
'Error parsing language for url: %r' % self.url)
else:
self.log.warning(
'Failed to find product description for url: %r' % self.url)
mi.source_relevance = self.relevance
if self.amazon_id:
if self.isbn:
self.plugin.cache_isbn_to_identifier(self.isbn, self.amazon_id)
if self.cover_url:
self.plugin.cache_identifier_to_cover_url(self.amazon_id,
self.cover_url)
self.plugin.clean_downloaded_metadata(mi)
if self.filter_result(mi, self.log):
self.result_queue.put(mi)
def totext(self, elem, only_printable=False):
res = self.tostring(elem, encoding='unicode', method='text')
if only_printable:
try:
filtered_characters = list(s for s in res if s.isprintable())
except AttributeError:
filtered_characters = list(s for s in res if s in string.printable)
res = ''.join(filtered_characters)
return res.strip()
def parse_title(self, root):
def sanitize_title(title):
ans = title.strip()
if not ans.startswith('['):
ans = re.sub(r'[(\[].*[)\]]', '', title).strip()
return ans
h1 = root.xpath('//h1[@id="title"]')
if h1:
h1 = h1[0]
for child in h1.xpath('./*[contains(@class, "a-color-secondary")]'):
h1.remove(child)
return sanitize_title(self.totext(h1))
# audiobooks
elem = root.xpath('//*[@id="productTitle"]')
if elem:
return sanitize_title(self.totext(elem[0]))
tdiv = root.xpath('//h1[contains(@class, "parseasinTitle")]')
if not tdiv:
span = root.xpath('//*[@id="ebooksTitle"]')
if span:
return sanitize_title(self.totext(span[0]))
h1 = root.xpath('//h1[@data-feature-name="title"]')
if h1:
return sanitize_title(self.totext(h1[0]))
raise ValueError('No title block found')
tdiv = tdiv[0]
actual_title = tdiv.xpath('descendant::*[@id="btAsinTitle"]')
if actual_title:
title = self.tostring(actual_title[0], encoding='unicode',
method='text').strip()
else:
title = self.tostring(tdiv, encoding='unicode',
method='text').strip()
return sanitize_title(title)
def parse_authors(self, root):
for sel in (
'#byline .author .contributorNameID',
'#byline .author a.a-link-normal',
'#bylineInfo .author .contributorNameID',
'#bylineInfo .author a.a-link-normal',
'#bylineInfo #bylineContributor',
'#bylineInfo #contributorLink',
):
matches = tuple(self.selector(sel))
if matches:
authors = [self.totext(x) for x in matches]
return [a for a in authors if a]
x = '//h1[contains(@class, "parseasinTitle")]/following-sibling::span/*[(name()="a" and @href) or (name()="span" and @class="contributorNameTrigger")]'
aname = root.xpath(x)
if not aname:
aname = root.xpath('''
//h1[contains(@class, "parseasinTitle")]/following-sibling::*[(name()="a" and @href) or (name()="span" and @class="contributorNameTrigger")]
''')
for x in aname:
x.tail = ''
authors = [self.tostring(x, encoding='unicode', method='text').strip() for x
in aname]
authors = [a for a in authors if a]
return authors
def parse_rating(self, root):
for x in root.xpath('//div[@id="cpsims-feature" or @id="purchase-sims-feature" or @id="rhf"]'):
# Remove the similar books section as it can cause spurious
# ratings matches
x.getparent().remove(x)
rating_paths = (
'//div[@data-feature-name="averageCustomerReviews" or @id="averageCustomerReviews"]',
'//div[@class="jumpBar"]/descendant::span[contains(@class,"asinReviewsSummary")]',
'//div[@class="buying"]/descendant::span[contains(@class,"asinReviewsSummary")]',
'//span[@class="crAvgStars"]/descendant::span[contains(@class,"asinReviewsSummary")]'
)
ratings = None
for p in rating_paths:
ratings = root.xpath(p)
if ratings:
break
def parse_ratings_text(text):
try:
m = self.ratings_pat.match(text)
return float(m.group(1).replace(',', '.')) / float(m.group(3)) * 5
except Exception:
pass
if ratings:
ratings = ratings[0]
for elem in ratings.xpath('descendant::*[@title]'):
t = elem.get('title').strip()
if self.domain == 'cn':
m = self.ratings_pat_cn.match(t)
if m is not None:
return float(m.group(1))
elif self.domain == 'jp':
m = self.ratings_pat_jp.match(t)
if m is not None:
return float(m.group(1))
else:
ans = parse_ratings_text(t)
if ans is not None:
return ans
for elem in ratings.xpath('descendant::span[@class="a-icon-alt"]'):
t = self.tostring(
elem, encoding='unicode', method='text', with_tail=False).strip()
ans = parse_ratings_text(t)
if ans is not None:
return ans
else:
# found in kindle book pages on amazon.com
for x in root.xpath('//a[@id="acrCustomerReviewLink"]'):
spans = x.xpath('./span')
if spans:
txt = self.tostring(spans[0], method='text', encoding='unicode', with_tail=False).strip()
try:
return float(txt.replace(',', '.'))
except Exception:
pass
def _render_comments(self, desc):
from calibre.library.comments import sanitize_comments_html
for c in desc.xpath('descendant::noscript'):
c.getparent().remove(c)
for c in desc.xpath('descendant::*[@class="seeAll" or'
' @class="emptyClear" or @id="collapsePS" or'
' @id="expandPS"]'):
c.getparent().remove(c)
for b in desc.xpath('descendant::b[@style]'):
# Bing highlights search results
s = b.get('style', '')
if 'color' in s:
b.tag = 'span'
del b.attrib['style']
for a in desc.xpath('descendant::a[@href]'):
del a.attrib['href']
a.tag = 'span'
for a in desc.xpath('descendant::span[@class="a-text-italic"]'):
a.tag = 'i'
for a in desc.xpath('descendant::span[@class="a-text-bold"]'):
a.tag = 'b'
desc = self.tostring(desc, method='html', encoding='unicode').strip()
desc = xml_replace_entities(desc, 'utf-8')
# Encoding bug in Amazon data U+fffd (replacement char)
# in some examples it is present in place of '
desc = desc.replace('\ufffd', "'")
# remove all attributes from tags
desc = re.sub(r'<([a-zA-Z0-9]+)\s[^>]+>', r'<\1>', desc)
# Collapse whitespace
# desc = re.sub('\n+', '\n', desc)
# desc = re.sub(' +', ' ', desc)
# Remove the notice about text referring to out of print editions
desc = re.sub(r'(?s)<em>--This text ref.*?</em>', '', desc)
# Remove comments
desc = re.sub(r'(?s)<!--.*?-->', '', desc)
return sanitize_comments_html(desc)
def parse_comments(self, root, raw):
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
ans = ''
ovr = tuple(self.selector('#drengr_MobileTabbedDescriptionOverviewContent_feature_div')) or tuple(
self.selector('#drengr_DesktopTabbedDescriptionOverviewContent_feature_div'))
if ovr:
ovr = ovr[0]
ovr.tag = 'div'
ans = self._render_comments(ovr)
ovr = tuple(self.selector('#drengr_MobileTabbedDescriptionEditorialsContent_feature_div')) or tuple(
self.selector('#drengr_DesktopTabbedDescriptionEditorialsContent_feature_div'))
if ovr:
ovr = ovr[0]
ovr.tag = 'div'
ans += self._render_comments(ovr)
else:
ns = tuple(self.selector('#bookDescription_feature_div noscript'))
if ns:
ns = ns[0]
if len(ns) == 0 and ns.text:
import html5lib
# html5lib parsed noscript as CDATA
ns = html5lib.parseFragment(
'<div>%s</div>' % (ns.text), treebuilder='lxml', namespaceHTMLElements=False)[0]
else:
ns.tag = 'div'
ans = self._render_comments(ns)
else:
desc = root.xpath('//div[@id="ps-content"]/div[@class="content"]')
if desc:
ans = self._render_comments(desc[0])
else:
ns = tuple(self.selector('#bookDescription_feature_div .a-expander-content'))
if ns:
ans = self._render_comments(ns[0])
# audiobooks
if not ans:
elem = root.xpath('//*[@id="audible_desktopTabbedDescriptionOverviewContent_feature_div"]')
if elem:
ans = self._render_comments(elem[0])
desc = root.xpath(
'//div[@id="productDescription"]/*[@class="content"]')
if desc:
ans += self._render_comments(desc[0])
else:
# Idiot chickens from amazon strike again. This data is now stored
# in a JS variable inside a script tag URL encoded.
m = re.search(br'var\s+iframeContent\s*=\s*"([^"]+)"', raw)
if m is not None:
try:
text = unquote(m.group(1)).decode('utf-8')
nr = parse_html(text)
desc = nr.xpath(
'//div[@id="productDescription"]/*[@class="content"]')
if desc:
ans += self._render_comments(desc[0])
except Exception as e:
self.log.warn(
'Parsing of obfuscated product description failed with error: %s' % as_unicode(e))
else:
desc = root.xpath('//div[@id="productDescription_fullView"]')
if desc:
ans += self._render_comments(desc[0])
return ans
def parse_series(self, root):
ans = (None, None)
# This is found on kindle pages for books on amazon.com
series = root.xpath('//*[@id="rpi-attribute-book_details-series"]')
if series:
spans = series[0].xpath('descendant::span')
if spans:
texts = [self.tostring(x, encoding='unicode', method='text', with_tail=False).strip() for x in spans]
texts = list(filter(None, texts))
if len(texts) == 2:
idxinfo, series = texts
m = re.search(r'[0-9.]+', idxinfo.strip())
if m is not None:
ans = series, float(m.group())
return ans
# This is found on the paperback/hardback pages for books on amazon.com
series = root.xpath('//div[@data-feature-name="seriesTitle"]')
if series:
series = series[0]
spans = series.xpath('./span')
if spans:
raw = self.tostring(
spans[0], encoding='unicode', method='text', with_tail=False).strip()
m = re.search(r'\s+([0-9.]+)$', raw.strip())
if m is not None:
series_index = float(m.group(1))
s = series.xpath('./a[@id="series-page-link"]')
if s:
series = self.tostring(
s[0], encoding='unicode', method='text', with_tail=False).strip()
if series:
ans = (series, series_index)
else:
series = root.xpath('//div[@id="seriesBulletWidget_feature_div"]')
if series:
a = series[0].xpath('descendant::a')
if a:
raw = self.tostring(a[0], encoding='unicode', method='text', with_tail=False)
if self.domain == 'jp':
m = re.search(r'(?P<index>[0-9.]+)\s*(?:巻|冊)\s*\(全\s*([0-9.]+)\s*(?:巻|冊)\):\s*(?P<series>.+)', raw.strip())
else:
m = re.search(r'(?:Book|Libro|Buch)\s+(?P<index>[0-9.]+)\s+(?:of|de|von)\s+([0-9.]+)\s*:\s*(?P<series>.+)', raw.strip())
if m is not None:
ans = (m.group('series').strip(), float(m.group('index')))
# This is found on Kindle edition pages on amazon.com
if ans == (None, None):
for span in root.xpath('//div[@id="aboutEbooksSection"]//li/span'):
text = (span.text or '').strip()
m = re.match(r'Book\s+([0-9.]+)', text)
if m is not None:
series_index = float(m.group(1))
a = span.xpath('./a[@href]')
if a:
series = self.tostring(
a[0], encoding='unicode', method='text', with_tail=False).strip()
if series:
ans = (series, series_index)
# This is found on newer Kindle edition pages on amazon.com
if ans == (None, None):
for b in root.xpath('//div[@id="reviewFeatureGroup"]/span/b'):
text = (b.text or '').strip()
m = re.match(r'Book\s+([0-9.]+)', text)
if m is not None:
series_index = float(m.group(1))
a = b.getparent().xpath('./a[@href]')
if a:
series = self.tostring(
a[0], encoding='unicode', method='text', with_tail=False).partition('(')[0].strip()
if series:
ans = series, series_index
if ans == (None, None):
desc = root.xpath('//div[@id="ps-content"]/div[@class="buying"]')
if desc:
raw = self.tostring(desc[0], method='text', encoding='unicode')
raw = re.sub(r'\s+', ' ', raw)
match = self.series_pat.search(raw)
if match is not None:
s, i = match.group('series'), float(match.group('index'))
if s:
ans = (s, i)
if ans[0]:
ans = (re.sub(r'\s+Series$', '', ans[0]).strip(), ans[1])
ans = (re.sub(r'\(.+?\s+Series\)$', '', ans[0]).strip(), ans[1])
return ans
def parse_tags(self, root):
ans = []
exclude_tokens = {'kindle', 'a-z'}
exclude = {'special features', 'by authors',
'authors & illustrators', 'books', 'new; used & rental textbooks'}
seen = set()
for li in root.xpath(self.tags_xpath):
for i, a in enumerate(li.iterdescendants('a')):
if i > 0:
# we ignore the first category since it is almost always
# too broad
raw = (a.text or '').strip().replace(',', ';')
lraw = icu_lower(raw)
tokens = frozenset(lraw.split())
if raw and lraw not in exclude and not tokens.intersection(exclude_tokens) and lraw not in seen:
ans.append(raw)
seen.add(lraw)
return ans
def parse_cover(self, root, raw=b""):
# Look for the image URL in javascript, using the first image in the
# image gallery as the cover
import json
imgpat = re.compile(r'"hiRes":"(.+?)","thumb"')
for script in root.xpath('//script'):
m = imgpat.search(script.text or '')
if m is not None:
return m.group(1)
imgpat = re.compile(r"""'imageGalleryData'\s*:\s*(\[\s*{.+])""")
for script in root.xpath('//script'):
m = imgpat.search(script.text or '')
if m is not None:
try:
return json.loads(m.group(1))[0]['mainUrl']
except Exception:
continue
def clean_img_src(src):
parts = src.split('/')
if len(parts) > 3:
bn = parts[-1]
sparts = bn.split('_')
if len(sparts) > 2:
bn = re.sub(r'\.\.jpg$', '.jpg', (sparts[0] + sparts[-1]))
return ('/'.join(parts[:-1])) + '/' + bn
imgpat2 = re.compile(r'var imageSrc = "([^"]+)"')
for script in root.xpath('//script'):
m = imgpat2.search(script.text or '')
if m is not None:
src = m.group(1)
url = clean_img_src(src)
if url:
return url
imgs = root.xpath(
'//img[(@id="prodImage" or @id="original-main-image" or @id="main-image" or @id="main-image-nonjs") and @src]')
if not imgs:
imgs = (
root.xpath('//div[@class="main-image-inner-wrapper"]/img[@src]') or
root.xpath('//div[@id="main-image-container" or @id="ebooks-main-image-container"]//img[@src]') or
root.xpath(
'//div[@id="mainImageContainer"]//img[@data-a-dynamic-image]')
)
for img in imgs:
try:
idata = json.loads(img.get('data-a-dynamic-image'))
except Exception:
imgs = ()
else:
mwidth = 0
try:
url = None
for iurl, (width, height) in idata.items():
if width > mwidth:
mwidth = width
url = iurl
return url
except Exception:
pass
for img in imgs:
src = img.get('src')
if 'data:' in src:
continue
if 'loading-' in src:
js_img = re.search(br'"largeImage":"(https?://[^"]+)",', raw)
if js_img:
src = js_img.group(1).decode('utf-8')
if ('/no-image-avail' not in src and 'loading-' not in src and '/no-img-sm' not in src):
self.log('Found image: %s' % src)
url = clean_img_src(src)
if url:
return url
def parse_detail_bullets(self, root, mi, container, ul_selector='.detail-bullet-list'):
try:
ul = next(self.selector(ul_selector, root=container))
except StopIteration:
return
for span in self.selector('.a-list-item', root=ul):
cells = span.xpath('./span')
if len(cells) >= 2:
self.parse_detail_cells(mi, cells[0], cells[1])
def parse_new_details(self, root, mi, non_hero):
table = non_hero.xpath('descendant::table')[0]
for tr in table.xpath('descendant::tr'):
cells = tr.xpath('descendant::*[local-name()="td" or local-name()="th"]')
if len(cells) == 2:
self.parse_detail_cells(mi, cells[0], cells[1])
def parse_detail_cells(self, mi, c1, c2):
name = self.totext(c1, only_printable=True).strip().strip(':').strip()
val = self.totext(c2)
val = val.replace('\u200e', '').replace('\u200f', '')
if not val:
return
if name in self.language_names:
ans = self.lang_map.get(val)
if not ans:
ans = canonicalize_lang(val)
if ans:
mi.language = ans
elif name in self.publisher_names:
pub = val.partition(';')[0].partition('(')[0].strip()
if pub:
mi.publisher = pub
date = val.rpartition('(')[-1].replace(')', '').strip()
try:
from calibre.utils.date import parse_only_date
date = self.delocalize_datestr(date)
mi.pubdate = parse_only_date(date, assume_utc=True)
except:
self.log.exception('Failed to parse pubdate: %s' % val)
elif name in {'ISBN', 'ISBN-10', 'ISBN-13'}:
ans = check_isbn(val)
if ans:
self.isbn = mi.isbn = ans
elif name in {'Publication date'}:
from calibre.utils.date import parse_only_date
date = self.delocalize_datestr(val)
mi.pubdate = parse_only_date(date, assume_utc=True)
def parse_isbn(self, pd):
items = pd.xpath(
'descendant::*[starts-with(text(), "ISBN")]')
if not items:
items = pd.xpath(
'descendant::b[contains(text(), "ISBN:")]')
for x in reversed(items):
if x.tail:
ans = check_isbn(x.tail.strip())
if ans:
return ans
def parse_publisher(self, pd):
for x in reversed(pd.xpath(self.publisher_xpath)):
if x.tail:
ans = x.tail.partition(';')[0]
return ans.partition('(')[0].strip()
def parse_pubdate(self, pd):
from calibre.utils.date import parse_only_date
for x in reversed(pd.xpath(self.pubdate_xpath)):
if x.tail:
date = x.tail.strip()
date = self.delocalize_datestr(date)
try:
return parse_only_date(date, assume_utc=True)
except Exception:
pass
for x in reversed(pd.xpath(self.publisher_xpath)):
if x.tail:
ans = x.tail
date = ans.rpartition('(')[-1].replace(')', '').strip()
date = self.delocalize_datestr(date)
try:
return parse_only_date(date, assume_utc=True)
except Exception:
pass
def parse_language(self, pd):
for x in reversed(pd.xpath(self.language_xpath)):
if x.tail:
raw = x.tail.strip().partition(',')[0].strip()
ans = self.lang_map.get(raw, None)
if ans:
return ans
ans = canonicalize_lang(ans)
if ans:
return ans
# }}}
class Amazon(Source):
name = 'Amazon.com'
version = (1, 3, 10)
minimum_calibre_version = (2, 82, 0)
description = _('Downloads metadata and covers from Amazon')
capabilities = frozenset(('identify', 'cover'))
touched_fields = frozenset(('title', 'authors', 'identifier:amazon',
'rating', 'comments', 'publisher', 'pubdate',
'languages', 'series', 'tags'))
has_html_comments = True
supports_gzip_transfer_encoding = True
prefer_results_with_isbn = False
AMAZON_DOMAINS = {
'com': _('US'),
'fr': _('France'),
'de': _('Germany'),
'uk': _('UK'),
'au': _('Australia'),
'it': _('Italy'),
'jp': _('Japan'),
'es': _('Spain'),
'br': _('Brazil'),
'in': _('India'),
'nl': _('Netherlands'),
'cn': _('China'),
'ca': _('Canada'),
'se': _('Sweden'),
}
SERVERS = {
'auto': _('Choose server automatically'),
'amazon': _('Amazon servers'),
'bing': _('Bing search cache'),
'google': _('Google search cache'),
'wayback': _('Wayback machine cache (slow)'),
'ddg': _('DuckDuckGo search and Google cache'),
}
options = (
Option('domain', 'choices', 'com', _('Amazon country website to use:'),
_('Metadata from Amazon will be fetched using this '
'country\'s Amazon website.'), choices=AMAZON_DOMAINS),
Option('server', 'choices', 'auto', _('Server to get data from:'),
_(
'Amazon has started blocking attempts to download'
' metadata from its servers. To get around this problem,'
' calibre can fetch the Amazon data from many different'
' places where it is cached. Choose the source you prefer.'
), choices=SERVERS),
Option('use_mobi_asin', 'bool', False, _('Use the MOBI-ASIN for metadata search'),
_(
'Enable this option to search for metadata with an'
' ASIN identifier from the MOBI file at the current country website,'
' unless any other amazon id is available. Note that if the'
' MOBI file came from a different Amazon country store, you could get'
' incorrect results.'
)),
Option('prefer_kindle_edition', 'bool', False, _('Prefer the Kindle edition, when available'),
_(
'When searching for a book and the search engine returns both paper and Kindle editions,'
' always prefer the Kindle edition, instead of whatever the search engine returns at the'
' top.')
),
)
def __init__(self, *args, **kwargs):
Source.__init__(self, *args, **kwargs)
self.set_amazon_id_touched_fields()
def id_from_url(self, url):
from polyglot.urllib import urlparse
purl = urlparse(url)
if purl.netloc and purl.path and '/dp/' in purl.path:
host_parts = tuple(x.lower() for x in purl.netloc.split('.'))
if 'amazon' in host_parts:
domain = host_parts[-1]
parts = purl.path.split('/')
idx = parts.index('dp')
try:
val = parts[idx+1]
except IndexError:
return
aid = 'amazon' if domain == 'com' else ('amazon_' + domain)
return aid, val
def test_fields(self, mi):
'''
Return the first field from self.touched_fields that is null on the
mi object
'''
for key in self.touched_fields:
if key.startswith('identifier:'):
key = key.partition(':')[-1]
if key == 'amazon':
if self.domain != 'com':
key += '_' + self.domain
if not mi.has_identifier(key):
return 'identifier: ' + key
elif mi.is_null(key):
return key
@property
def browser(self):
br = self._browser
if br is None:
ua = 'Mobile '
while not user_agent_is_ok(ua):
ua = random_user_agent(allow_ie=False)
# ua = 'Mozilla/5.0 (Linux; Android 8.0.0; VTR-L29; rv:63.0) Gecko/20100101 Firefox/63.0'
self._browser = br = browser(user_agent=ua)
br.set_handle_gzip(True)
if self.use_search_engine:
br.addheaders += [
('Accept', accept_header_for_ua(ua)),
('Upgrade-insecure-requests', '1'),
]
else:
br.addheaders += [
('Accept', accept_header_for_ua(ua)),
('Upgrade-insecure-requests', '1'),
('Referer', self.referrer_for_domain()),
]
return br
def save_settings(self, *args, **kwargs):
Source.save_settings(self, *args, **kwargs)
self.set_amazon_id_touched_fields()
def set_amazon_id_touched_fields(self):
ident_name = "identifier:amazon"
if self.domain != 'com':
ident_name += '_' + self.domain
tf = [x for x in self.touched_fields if not
x.startswith('identifier:amazon')] + [ident_name]
self.touched_fields = frozenset(tf)
def get_domain_and_asin(self, identifiers, extra_domains=()):
identifiers = {k.lower(): v for k, v in identifiers.items()}
for key, val in identifiers.items():
if key in ('amazon', 'asin'):
return 'com', val
if key.startswith('amazon_'):
domain = key.partition('_')[-1]
if domain and (domain in self.AMAZON_DOMAINS or domain in extra_domains):
return domain, val
if self.prefs['use_mobi_asin']:
val = identifiers.get('mobi-asin')
if val is not None:
return self.domain, val
return None, None
def referrer_for_domain(self, domain=None):
domain = domain or self.domain
return {
'uk': 'https://www.amazon.co.uk/',
'au': 'https://www.amazon.com.au/',
'br': 'https://www.amazon.com.br/',
'jp': 'https://www.amazon.co.jp/',
'mx': 'https://www.amazon.com.mx/',
}.get(domain, 'https://www.amazon.%s/' % domain)
def _get_book_url(self, identifiers): # {{{
domain, asin = self.get_domain_and_asin(
identifiers, extra_domains=('au', 'ca'))
if domain and asin:
url = None
r = self.referrer_for_domain(domain)
if r is not None:
url = r + 'dp/' + asin
if url:
idtype = 'amazon' if domain == 'com' else 'amazon_' + domain
return domain, idtype, asin, url
def get_book_url(self, identifiers):
ans = self._get_book_url(identifiers)
if ans is not None:
return ans[1:]
def get_book_url_name(self, idtype, idval, url):
if idtype == 'amazon':
return self.name
return 'A' + idtype.replace('_', '.')[1:]
# }}}
@property
def domain(self):
x = getattr(self, 'testing_domain', None)
if x is not None:
return x
domain = self.prefs['domain']
if domain not in self.AMAZON_DOMAINS:
domain = 'com'
return domain
@property
def server(self):
x = getattr(self, 'testing_server', None)
if x is not None:
return x
server = self.prefs['server']
if server not in self.SERVERS:
server = 'auto'
return server
@property
def use_search_engine(self):
return self.server != 'amazon'
def clean_downloaded_metadata(self, mi):
docase = (
mi.language == 'eng' or
(mi.is_null('language') and self.domain in {'com', 'uk', 'au'})
)
if mi.title and docase:
# Remove series information from title
m = re.search(r'\S+\s+(\(.+?\s+Book\s+\d+\))$', mi.title)
if m is not None:
mi.title = mi.title.replace(m.group(1), '').strip()
mi.title = fixcase(mi.title)
mi.authors = fixauthors(mi.authors)
if mi.tags and docase:
mi.tags = list(map(fixcase, mi.tags))
mi.isbn = check_isbn(mi.isbn)
if mi.series and docase:
mi.series = fixcase(mi.series)
if mi.title and mi.series:
for pat in (r':\s*Book\s+\d+\s+of\s+%s$', r'\(%s\)$', r':\s*%s\s+Book\s+\d+$'):
pat = pat % re.escape(mi.series)
q = re.sub(pat, '', mi.title, flags=re.I).strip()
if q and q != mi.title:
mi.title = q
break
def get_website_domain(self, domain):
return {'uk': 'co.uk', 'jp': 'co.jp', 'br': 'com.br', 'au': 'com.au'}.get(domain, domain)
def create_query(self, log, title=None, authors=None, identifiers={}, # {{{
domain=None, for_amazon=True):
try:
from urllib.parse import unquote_plus, urlencode
except ImportError:
from urllib import unquote_plus, urlencode
if domain is None:
domain = self.domain
idomain, asin = self.get_domain_and_asin(identifiers)
if idomain is not None:
domain = idomain
# See the amazon detailed search page to get all options
terms = []
q = {'search-alias': 'aps',
'unfiltered': '1',
}
if domain == 'com':
q['sort'] = 'relevanceexprank'
else:
q['sort'] = 'relevancerank'
isbn = check_isbn(identifiers.get('isbn', None))
if asin is not None:
q['field-keywords'] = asin
terms.append(asin)
elif isbn is not None:
q['field-isbn'] = isbn
if len(isbn) == 13:
terms.extend('({} OR {}-{})'.format(isbn, isbn[:3], isbn[3:]).split())
else:
terms.append(isbn)
else:
# Only return book results
q['search-alias'] = {'br': 'digital-text',
'nl': 'aps'}.get(domain, 'stripbooks')
if title:
title_tokens = list(self.get_title_tokens(title))
if title_tokens:
q['field-title'] = ' '.join(title_tokens)
terms.extend(title_tokens)
if authors:
author_tokens = list(self.get_author_tokens(authors,
only_first_author=True))
if author_tokens:
q['field-author'] = ' '.join(author_tokens)
terms.extend(author_tokens)
if not ('field-keywords' in q or 'field-isbn' in q or
('field-title' in q)):
# Insufficient metadata to make an identify query
log.error('Insufficient metadata to construct query, none of title, ISBN or ASIN supplied')
raise SearchFailed()
if not for_amazon:
return terms, domain
if domain == 'nl':
q['__mk_nl_NL'] = 'ÅMÅŽÕÑ'
if 'field-keywords' not in q:
q['field-keywords'] = ''
for f in 'field-isbn field-title field-author'.split():
q['field-keywords'] += ' ' + q.pop(f, '')
q['field-keywords'] = q['field-keywords'].strip()
encoded_q = dict([(x.encode('utf-8', 'ignore'), y.encode(
'utf-8', 'ignore')) for x, y in q.items()])
url_query = urlencode(encoded_q)
# amazon's servers want IRIs with unicode characters not percent esaped
parts = []
for x in url_query.split(b'&' if isinstance(url_query, bytes) else '&'):
k, v = x.split(b'=' if isinstance(x, bytes) else '=', 1)
parts.append('{}={}'.format(iri_quote_plus(unquote_plus(k)), iri_quote_plus(unquote_plus(v))))
url_query = '&'.join(parts)
url = 'https://www.amazon.%s/s/?' % self.get_website_domain(
domain) + url_query
return url, domain
# }}}
def get_cached_cover_url(self, identifiers): # {{{
url = None
domain, asin = self.get_domain_and_asin(identifiers)
if asin is None:
isbn = identifiers.get('isbn', None)
if isbn is not None:
asin = self.cached_isbn_to_identifier(isbn)
if asin is not None:
url = self.cached_identifier_to_cover_url(asin)
return url
# }}}
def parse_results_page(self, root, domain): # {{{
from lxml.html import tostring
matches = []
def title_ok(title):
title = title.lower()
bad = ['bulk pack', '[audiobook]', '[audio cd]',
'(a book companion)', '( slipcase with door )', ': free sampler']
if self.domain == 'com':
bad.extend(['(%s edition)' % x for x in ('spanish', 'german')])
for x in bad:
if x in title:
return False
if title and title[0] in '[{' and re.search(r'\(\s*author\s*\)', title) is not None:
# Bad entries in the catalog
return False
return True
for query in (
'//div[contains(@class, "s-result-list")]//h2/a[@href]',
'//div[contains(@class, "s-result-list")]//div[@data-index]//h5//a[@href]',
r'//li[starts-with(@id, "result_")]//a[@href and contains(@class, "s-access-detail-page")]',
):
result_links = root.xpath(query)
if result_links:
break
for a in result_links:
title = tostring(a, method='text', encoding='unicode')
if title_ok(title):
url = a.get('href')
if url.startswith('/'):
url = 'https://www.amazon.%s%s' % (
self.get_website_domain(domain), url)
matches.append(url)
if not matches:
# Previous generation of results page markup
for div in root.xpath(r'//div[starts-with(@id, "result_")]'):
links = div.xpath(r'descendant::a[@class="title" and @href]')
if not links:
# New amazon markup
links = div.xpath('descendant::h3/a[@href]')
for a in links:
title = tostring(a, method='text', encoding='unicode')
if title_ok(title):
url = a.get('href')
if url.startswith('/'):
url = 'https://www.amazon.%s%s' % (
self.get_website_domain(domain), url)
matches.append(url)
break
if not matches:
# This can happen for some user agents that Amazon thinks are
# mobile/less capable
for td in root.xpath(
r'//div[@id="Results"]/descendant::td[starts-with(@id, "search:Td:")]'):
for a in td.xpath(r'descendant::td[@class="dataColumn"]/descendant::a[@href]/span[@class="srTitle"]/..'):
title = tostring(a, method='text', encoding='unicode')
if title_ok(title):
url = a.get('href')
if url.startswith('/'):
url = 'https://www.amazon.%s%s' % (
self.get_website_domain(domain), url)
matches.append(url)
break
if not matches and root.xpath('//form[@action="/errors/validateCaptcha"]'):
raise CaptchaError('Amazon returned a CAPTCHA page. Recently Amazon has begun using statistical'
' profiling to block access to its website. As such this metadata plugin is'
' unlikely to ever work reliably.')
# Keep only the top 3 matches as the matches are sorted by relevance by
# Amazon so lower matches are not likely to be very relevant
return matches[:3]
# }}}
def search_amazon(self, br, testing, log, abort, title, authors, identifiers, timeout): # {{{
from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.cleantext import clean_ascii_chars
matches = []
query, domain = self.create_query(log, title=title, authors=authors,
identifiers=identifiers)
time.sleep(1)
try:
raw = br.open_novisit(query, timeout=timeout).read().strip()
except Exception as e:
if callable(getattr(e, 'getcode', None)) and \
e.getcode() == 404:
log.error('Query malformed: %r' % query)
raise SearchFailed()
attr = getattr(e, 'args', [None])
attr = attr if attr else [None]
if isinstance(attr[0], socket.timeout):
msg = _('Amazon timed out. Try again later.')
log.error(msg)
else:
msg = 'Failed to make identify query: %r' % query
log.exception(msg)
raise SearchFailed()
raw = clean_ascii_chars(xml_to_unicode(raw,
strip_encoding_pats=True, resolve_entities=True)[0])
if testing:
import tempfile
with tempfile.NamedTemporaryFile(prefix='amazon_results_',
suffix='.html', delete=False) as f:
f.write(raw.encode('utf-8'))
print('Downloaded html for results page saved in', f.name)
matches = []
found = '<title>404 - ' not in raw
if found:
try:
root = parse_html(raw)
except Exception:
msg = 'Failed to parse amazon page for query: %r' % query
log.exception(msg)
raise SearchFailed()
matches = self.parse_results_page(root, domain)
return matches, query, domain, None
# }}}
def search_search_engine(self, br, testing, log, abort, title, authors, identifiers, timeout, override_server=None): # {{{
from calibre.ebooks.metadata.sources.update import search_engines_module
se = search_engines_module()
terms, domain = self.create_query(log, title=title, authors=authors,
identifiers=identifiers, for_amazon=False)
site = self.referrer_for_domain(
domain)[len('https://'):].partition('/')[0]
matches = []
server = override_server or self.server
if server == 'bing':
urlproc, sfunc = se.bing_url_processor, se.bing_search
elif server == 'wayback':
urlproc, sfunc = se.wayback_url_processor, se.ddg_search
elif server == 'ddg':
urlproc, sfunc = se.ddg_url_processor, se.ddg_search
elif server == 'google':
urlproc, sfunc = se.google_url_processor, se.google_search
else: # auto or unknown
# urlproc, sfunc = se.google_url_processor, se.google_search
urlproc, sfunc = se.bing_url_processor, se.bing_search
try:
results, qurl = sfunc(terms, site, log=log, br=br, timeout=timeout)
except HTTPError as err:
if err.code == 429 and sfunc is se.google_search:
log('Got too many requests error from Google, trying via DuckDuckGo')
urlproc, sfunc = se.ddg_url_processor, se.ddg_search
results, qurl = sfunc(terms, site, log=log, br=br, timeout=timeout)
else:
raise
br.set_current_header('Referer', qurl)
for result in results:
if abort.is_set():
return matches, terms, domain, None
purl = urlparse(result.url)
if '/dp/' in purl.path and site in purl.netloc:
url = result.cached_url
if url is None:
url = se.get_cached_url(result.url, br, timeout=timeout)
if url is None:
log('Failed to find cached page for:', result.url)
continue
if url not in matches:
matches.append(url)
if len(matches) >= 3:
break
else:
log('Skipping non-book result:', result)
if not matches:
log('No search engine results for terms:', ' '.join(terms))
if urlproc is se.google_url_processor:
# Google does not cache adult titles
log('Trying the bing search engine instead')
return self.search_search_engine(br, testing, log, abort, title, authors, identifiers, timeout, 'bing')
return matches, terms, domain, urlproc
# }}}
def identify(self, log, result_queue, abort, title=None, authors=None, # {{{
identifiers={}, timeout=60):
'''
Note this method will retry without identifiers automatically if no
match is found with identifiers.
'''
testing = getattr(self, 'running_a_test', False)
udata = self._get_book_url(identifiers)
br = self.browser
log('User-agent:', br.current_user_agent())
log('Server:', self.server)
if testing:
print('User-agent:', br.current_user_agent())
if udata is not None and not self.use_search_engine:
# Try to directly get details page instead of running a search
# Cannot use search engine as the directly constructed URL is
# usually redirected to a full URL by amazon, and is therefore
# not cached
domain, idtype, asin, durl = udata
if durl is not None:
preparsed_root = parse_details_page(
durl, log, timeout, br, domain)
if preparsed_root is not None:
qasin = parse_asin(preparsed_root[1], log, durl)
if qasin == asin:
w = Worker(durl, result_queue, br, log, 0, domain,
self, testing=testing, preparsed_root=preparsed_root, timeout=timeout)
try:
w.get_details()
return
except Exception:
log.exception(
'get_details failed for url: %r' % durl)
func = self.search_search_engine if self.use_search_engine else self.search_amazon
try:
matches, query, domain, cover_url_processor = func(
br, testing, log, abort, title, authors, identifiers, timeout)
except SearchFailed:
return
if abort.is_set():
return
if not matches:
if identifiers and title and authors:
log('No matches found with identifiers, retrying using only'
' title and authors. Query: %r' % query)
time.sleep(1)
return self.identify(log, result_queue, abort, title=title,
authors=authors, timeout=timeout)
log.error('No matches found with query: %r' % query)
return
if self.prefs['prefer_kindle_edition']:
matches = sort_matches_preferring_kindle_editions(matches)
workers = [Worker(
url, result_queue, br, log, i, domain, self, testing=testing, timeout=timeout,
cover_url_processor=cover_url_processor, filter_result=partial(
self.filter_result, title, authors, identifiers)) for i, url in enumerate(matches)]
for w in workers:
# Don't send all requests at the same time
time.sleep(1)
w.start()
if abort.is_set():
return
while not abort.is_set():
a_worker_is_alive = False
for w in workers:
w.join(0.2)
if abort.is_set():
break
if w.is_alive():
a_worker_is_alive = True
if not a_worker_is_alive:
break
return None
# }}}
def filter_result(self, title, authors, identifiers, mi, log): # {{{
if not self.use_search_engine:
return True
if title is not None:
import regex
only_punctuation_pat = regex.compile(r'^\p{P}+$')
def tokenize_title(x):
ans = icu_lower(x).replace("'", '').replace('"', '').rstrip(':')
if only_punctuation_pat.match(ans) is not None:
ans = ''
return ans
tokens = {tokenize_title(x) for x in title.split() if len(x) > 3}
tokens.discard('')
if tokens:
result_tokens = {tokenize_title(x) for x in mi.title.split()}
result_tokens.discard('')
if not tokens.intersection(result_tokens):
log('Ignoring result:', mi.title, 'as its title does not match')
return False
if authors:
author_tokens = set()
for author in authors:
author_tokens |= {icu_lower(x) for x in author.split() if len(x) > 2}
result_tokens = set()
for author in mi.authors:
result_tokens |= {icu_lower(x) for x in author.split() if len(x) > 2}
if author_tokens and not author_tokens.intersection(result_tokens):
log('Ignoring result:', mi.title, 'by', ' & '.join(mi.authors), 'as its author does not match')
return False
return True
# }}}
def download_cover(self, log, result_queue, abort, # {{{
title=None, authors=None, identifiers={}, timeout=60, get_best_cover=False):
cached_url = self.get_cached_cover_url(identifiers)
if cached_url is None:
log.info('No cached cover found, running identify')
rq = Queue()
self.identify(log, rq, abort, title=title, authors=authors,
identifiers=identifiers)
if abort.is_set():
return
if abort.is_set():
return
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
results.sort(key=self.identify_results_keygen(
title=title, authors=authors, identifiers=identifiers))
for mi in results:
cached_url = self.get_cached_cover_url(mi.identifiers)
if cached_url is not None:
break
if cached_url is None:
log.info('No cover found')
return
if abort.is_set():
return
log('Downloading cover from:', cached_url)
br = self.browser
if self.use_search_engine:
br = br.clone_browser()
br.set_current_header('Referer', self.referrer_for_domain(self.domain))
try:
time.sleep(1)
cdata = br.open_novisit(
cached_url, timeout=timeout).read()
result_queue.put((self, cdata))
except:
log.exception('Failed to download cover from:', cached_url)
# }}}
def manual_tests(domain, **kw): # {{{
# To run these test use:
# calibre-debug -c "from calibre.ebooks.metadata.sources.amazon import *; manual_tests('com')"
from calibre.ebooks.metadata.sources.test import authors_test, comments_test, isbn_test, series_test, test_identify_plugin, title_test
all_tests = {}
all_tests['com'] = [ # {{{
( # Paperback with series
{'identifiers': {'amazon': '1423146786'}},
[title_test('Heroes of Olympus', exact=False), series_test('The Heroes of Olympus', 5)]
),
( # Kindle edition with series
{'identifiers': {'amazon': 'B0085UEQDO'}},
[title_test('Three Parts Dead', exact=True),
series_test('Craft Sequence', 1)]
),
( # + in title and uses id="main-image" for cover
{'identifiers': {'amazon': '1933988770'}},
[title_test(
'C++ Concurrency in Action: Practical Multithreading', exact=True)]
),
( # Different comments markup, using Book Description section
{'identifiers': {'amazon': '0982514506'}},
[title_test(
"Griffin's Destiny",
exact=True),
comments_test('Jelena'), comments_test('Ashinji'),
]
),
( # # in title
{'title': 'Expert C# 2008 Business Objects',
'authors': ['Lhotka']},
[title_test('Expert C#'),
authors_test(['Rockford Lhotka'])
]
),
( # No specific problems
{'identifiers': {'isbn': '0743273567'}},
[title_test('the great gatsby'),
authors_test(['f. Scott Fitzgerald'])]
),
]
# }}}
all_tests['de'] = [ # {{{
# series
(
{'identifiers': {'isbn': '3499275120'}},
[title_test('Vespasian: Das Schwert des Tribuns: Historischer Roman',
exact=False), authors_test(['Robert Fabbri']), series_test('Die Vespasian-Reihe', 1)
]
),
( # umlaut in title/authors
{'title': 'Flüsternde Wälder',
'authors': ['Nicola Förg']},
[title_test('Flüsternde Wälder'),
authors_test(['Nicola Förg'], subset=True)
]
),
(
{'identifiers': {'isbn': '9783453314979'}},
[title_test('Die letzten Wächter: Roman',
exact=False), authors_test(['Sergej Lukianenko'])
]
),
(
{'identifiers': {'isbn': '3548283519'}},
[title_test('Wer Wind Sät: Der Fünfte Fall Für Bodenstein Und Kirchhoff',
exact=False), authors_test(['Nele Neuhaus'])
]
),
] # }}}
all_tests['it'] = [ # {{{
(
{'identifiers': {'isbn': '8838922195'}},
[title_test('La briscola in cinque',
exact=True), authors_test(['Marco Malvaldi'])
]
),
] # }}}
all_tests['fr'] = [ # {{{
(
{'identifiers': {'amazon_fr': 'B07L7ST4RS'}},
[title_test('Le secret de Lola', exact=True),
authors_test(['Amélie BRIZIO'])
]
),
(
{'identifiers': {'isbn': '2221116798'}},
[title_test('L\'étrange voyage de Monsieur Daldry',
exact=True), authors_test(['Marc Levy'])
]
),
] # }}}
all_tests['es'] = [ # {{{
(
{'identifiers': {'isbn': '8483460831'}},
[title_test('Tiempos Interesantes',
exact=False), authors_test(['Terry Pratchett'])
]
),
] # }}}
all_tests['se'] = [ # {{{
(
{'identifiers': {'isbn': '9780552140287'}},
[title_test('Men At Arms: A Discworld Novel: 14',
exact=False), authors_test(['Terry Pratchett'])
]
),
] # }}}
all_tests['jp'] = [ # {{{
( # Adult filtering test
{'identifiers': {'isbn': '4799500066'}},
[title_test('Bitch Trap'), ]
),
( # isbn -> title, authors
{'identifiers': {'isbn': '9784101302720'}},
[title_test('精霊の守り人',
exact=True), authors_test(['上橋 菜穂子'])
]
),
( # title, authors -> isbn (will use Shift_JIS encoding in query.)
{'title': '考えない練習',
'authors': ['小池 龍之介']},
[isbn_test('9784093881067'), ]
),
] # }}}
all_tests['br'] = [ # {{{
(
{'title': 'A Ascensão da Sombra'},
[title_test('A Ascensão da Sombra'), authors_test(['Robert Jordan'])]
),
(
{'title': 'Guerra dos Tronos'},
[title_test('A Guerra dos Tronos. As Crônicas de Gelo e Fogo - Livro 1'), authors_test(['George R. R. Martin'])
]
),
] # }}}
all_tests['nl'] = [ # {{{
(
{'title': 'Freakonomics'},
[title_test('Freakonomics',
exact=True), authors_test(['Steven Levitt & Stephen Dubner & R. Kuitenbrouwer & O. Brenninkmeijer & A. van Den Berg'])
]
),
] # }}}
all_tests['cn'] = [ # {{{
(
{'identifiers': {'isbn': '9787115369512'}},
[title_test('若为自由故 自由软件之父理查德斯托曼传', exact=True),
authors_test(['[美]sam Williams', '邓楠,李凡希'])]
),
(
{'title': '爱上Raspberry Pi'},
[title_test('爱上Raspberry Pi',
exact=True), authors_test(['Matt Richardson', 'Shawn Wallace', '李凡希'])
]
),
] # }}}
all_tests['ca'] = [ # {{{
( # Paperback with series
{'identifiers': {'isbn': '9781623808747'}},
[title_test('Parting Shot', exact=True),
authors_test(['Mary Calmes'])]
),
( # # in title
{'title': 'Expert C# 2008 Business Objects',
'authors': ['Lhotka']},
[title_test('Expert C# 2008 Business Objects'),
authors_test(['Rockford Lhotka'])]
),
( # noscript description
{'identifiers': {'amazon_ca': '162380874X'}},
[title_test('Parting Shot', exact=True), authors_test(['Mary Calmes'])
]
),
] # }}}
all_tests['in'] = [ # {{{
( # Paperback with series
{'identifiers': {'amazon_in': '1423146786'}},
[title_test('The Heroes of Olympus, Book Five The Blood of Olympus', exact=True)]
),
] # }}}
def do_test(domain, start=0, stop=None, server='auto'):
tests = all_tests[domain]
if stop is None:
stop = len(tests)
tests = tests[start:stop]
test_identify_plugin(Amazon.name, tests, modify_plugin=lambda p: (
setattr(p, 'testing_domain', domain),
setattr(p, 'touched_fields', p.touched_fields - {'tags'}),
setattr(p, 'testing_server', server),
))
do_test(domain, **kw)
# }}}
| 78,345 | Python | .py | 1,787 | 30.156128 | 159 | 0.502124 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,544 | prefs.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/prefs.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from calibre.utils.config import JSONConfig
msprefs = JSONConfig('metadata_sources/global.json')
msprefs.defaults['txt_comments'] = False
msprefs.defaults['ignore_fields'] = []
msprefs.defaults['user_default_ignore_fields'] = []
msprefs.defaults['max_tags'] = 20
msprefs.defaults['wait_after_first_identify_result'] = 30 # seconds
msprefs.defaults['wait_after_first_cover_result'] = 60 # seconds
msprefs.defaults['swap_author_names'] = False
msprefs.defaults['fewer_tags'] = True
msprefs.defaults['find_first_edition_date'] = False
msprefs.defaults['append_comments'] = False
msprefs.defaults['tag_map_rules'] = ()
msprefs.defaults['author_map_rules'] = ()
msprefs.defaults['publisher_map_rules'] = ()
msprefs.defaults['id_link_rules'] = {}
msprefs.defaults['keep_dups'] = False
# Google covers are often poor quality (scans/errors) but they have high
# resolution, so they trump covers from better sources. So make sure they
# are only used if no other covers are found.
msprefs.defaults['cover_priorities'] = {'Google':2, 'Google Images':2, 'Big Book Search':2}
| 1,285 | Python | .py | 26 | 48.269231 | 91 | 0.754582 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,545 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/__init__.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 279 | Python | .py | 6 | 45.333333 | 82 | 0.724265 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,546 | google.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/google.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
# License: GPLv3 Copyright: 2011, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import os
import re
import sys
import tempfile
import time
import regex
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
from calibre import as_unicode, prepare_string_for_xml, replace_entities
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.metadata import authors_to_string, check_isbn
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.sources.base import Source
from calibre.utils.cleantext import clean_ascii_chars
from calibre.utils.localization import canonicalize_lang
NAMESPACES = {
'openSearch': 'http://a9.com/-/spec/opensearchrss/1.0/',
'atom': 'http://www.w3.org/2005/Atom',
'dc': 'http://purl.org/dc/terms',
'gd': 'http://schemas.google.com/g/2005'
}
def pretty_google_books_comments(raw):
raw = replace_entities(raw)
# Paragraphs in the comments are removed but whatever software googl uses
# to do this does not insert a space so we often find the pattern
# word.Capital in the comments which can be used to find paragraph markers.
parts = []
for x in re.split(r'([a-z)"”])(\.)([A-Z("“])', raw):
if x == '.':
parts.append('.</p>\n\n<p>')
else:
parts.append(prepare_string_for_xml(x))
raw = '<p>' + ''.join(parts) + '</p>'
return raw
def get_details(browser, url, timeout): # {{{
try:
raw = browser.open_novisit(url, timeout=timeout).read()
except Exception as e:
gc = getattr(e, 'getcode', lambda: -1)
if gc() != 403:
raise
# Google is throttling us, wait a little
time.sleep(2)
raw = browser.open_novisit(url, timeout=timeout).read()
return raw
# }}}
xpath_cache = {}
def XPath(x):
ans = xpath_cache.get(x)
if ans is None:
from lxml import etree
ans = xpath_cache[x] = etree.XPath(x, namespaces=NAMESPACES)
return ans
def to_metadata(browser, log, entry_, timeout, running_a_test=False): # {{{
from lxml import etree
# total_results = XPath('//openSearch:totalResults')
# start_index = XPath('//openSearch:startIndex')
# items_per_page = XPath('//openSearch:itemsPerPage')
entry = XPath('//atom:entry')
entry_id = XPath('descendant::atom:id')
url = XPath('descendant::atom:link[@rel="self"]/@href')
creator = XPath('descendant::dc:creator')
identifier = XPath('descendant::dc:identifier')
title = XPath('descendant::dc:title')
date = XPath('descendant::dc:date')
publisher = XPath('descendant::dc:publisher')
subject = XPath('descendant::dc:subject')
description = XPath('descendant::dc:description')
language = XPath('descendant::dc:language')
# print(etree.tostring(entry_, pretty_print=True))
def get_text(extra, x):
try:
ans = x(extra)
if ans:
ans = ans[0].text
if ans and ans.strip():
return ans.strip()
except:
log.exception('Programming error:')
return None
def get_extra_details():
raw = get_details(browser, details_url, timeout)
if running_a_test:
with open(os.path.join(tempfile.gettempdir(), 'Google-' + details_url.split('/')[-1] + '.xml'), 'wb') as f:
f.write(raw)
print('Book details saved to:', f.name, file=sys.stderr)
feed = etree.fromstring(
xml_to_unicode(clean_ascii_chars(raw), strip_encoding_pats=True)[0],
parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False)
)
return entry(feed)[0]
if isinstance(entry_, str):
google_id = entry_
details_url = 'https://www.google.com/books/feeds/volumes/' + google_id
extra = get_extra_details()
title_ = ': '.join([x.text for x in title(extra)]).strip()
authors = [x.text.strip() for x in creator(extra) if x.text]
else:
id_url = entry_id(entry_)[0].text
google_id = id_url.split('/')[-1]
details_url = url(entry_)[0]
title_ = ': '.join([x.text for x in title(entry_)]).strip()
authors = [x.text.strip() for x in creator(entry_) if x.text]
if not id_url or not title:
# Silently discard this entry
return None
extra = None
if not authors:
authors = [_('Unknown')]
if not title:
return None
if extra is None:
extra = get_extra_details()
mi = Metadata(title_, authors)
mi.identifiers = {'google': google_id}
mi.comments = get_text(extra, description)
lang = canonicalize_lang(get_text(extra, language))
if lang:
mi.language = lang
mi.publisher = get_text(extra, publisher)
# ISBN
isbns = []
for x in identifier(extra):
t = type('')(x.text).strip()
if t[:5].upper() in ('ISBN:', 'LCCN:', 'OCLC:'):
if t[:5].upper() == 'ISBN:':
t = check_isbn(t[5:])
if t:
isbns.append(t)
if isbns:
mi.isbn = sorted(isbns, key=len)[-1]
mi.all_isbns = isbns
# Tags
try:
btags = [x.text for x in subject(extra) if x.text]
tags = []
for t in btags:
atags = [y.strip() for y in t.split('/')]
for tag in atags:
if tag not in tags:
tags.append(tag)
except:
log.exception('Failed to parse tags:')
tags = []
if tags:
mi.tags = [x.replace(',', ';') for x in tags]
# pubdate
pubdate = get_text(extra, date)
if pubdate:
from calibre.utils.date import parse_date, utcnow
try:
default = utcnow().replace(day=15)
mi.pubdate = parse_date(pubdate, assume_utc=True, default=default)
except:
log.error('Failed to parse pubdate %r' % pubdate)
# Cover
mi.has_google_cover = None
for x in extra.xpath(
'//*[@href and @rel="http://schemas.google.com/books/2008/thumbnail"]'
):
mi.has_google_cover = x.get('href')
break
return mi
# }}}
class GoogleBooks(Source):
name = 'Google'
version = (1, 1, 1)
minimum_calibre_version = (2, 80, 0)
description = _('Downloads metadata and covers from Google Books')
capabilities = frozenset({'identify'})
touched_fields = frozenset({
'title', 'authors', 'tags', 'pubdate', 'comments', 'publisher',
'identifier:isbn', 'identifier:google', 'languages'
})
supports_gzip_transfer_encoding = True
cached_cover_url_is_reliable = False
GOOGLE_COVER = 'https://books.google.com/books?id=%s&printsec=frontcover&img=1'
DUMMY_IMAGE_MD5 = frozenset(
('0de4383ebad0adad5eeb8975cd796657', 'a64fa89d7ebc97075c1d363fc5fea71f')
)
def get_book_url(self, identifiers): # {{{
goog = identifiers.get('google', None)
if goog is not None:
return ('google', goog, 'https://books.google.com/books?id=%s' % goog)
# }}}
def id_from_url(self, url): # {{{
from polyglot.urllib import parse_qs, urlparse
purl = urlparse(url)
if purl.netloc == 'books.google.com':
q = parse_qs(purl.query)
gid = q.get('id')
if gid:
return 'google', gid[0]
# }}}
def create_query(self, title=None, authors=None, identifiers={}, capitalize_isbn=False): # {{{
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
BASE_URL = 'https://books.google.com/books/feeds/volumes?'
isbn = check_isbn(identifiers.get('isbn', None))
q = ''
if isbn is not None:
q += ('ISBN:' if capitalize_isbn else 'isbn:') + isbn
elif title or authors:
def build_term(prefix, parts):
return ' '.join('in' + prefix + ':' + x for x in parts)
title_tokens = list(self.get_title_tokens(title))
if title_tokens:
q += build_term('title', title_tokens)
author_tokens = list(self.get_author_tokens(authors, only_first_author=True))
if author_tokens:
q += ('+' if q else '') + build_term('author', author_tokens)
if not q:
return None
if not isinstance(q, bytes):
q = q.encode('utf-8')
return BASE_URL + urlencode({
'q': q,
'max-results': 20,
'start-index': 1,
'min-viewability': 'none',
})
# }}}
def download_cover( # {{{
self,
log,
result_queue,
abort,
title=None,
authors=None,
identifiers={},
timeout=30,
get_best_cover=False
):
cached_url = self.get_cached_cover_url(identifiers)
if cached_url is None:
log.info('No cached cover found, running identify')
rq = Queue()
self.identify(
log,
rq,
abort,
title=title,
authors=authors,
identifiers=identifiers
)
if abort.is_set():
return
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
results.sort(
key=self.identify_results_keygen(
title=title, authors=authors, identifiers=identifiers
)
)
for mi in results:
cached_url = self.get_cached_cover_url(mi.identifiers)
if cached_url is not None:
break
if cached_url is None:
log.info('No cover found')
return
br = self.browser
for candidate in (0, 1):
if abort.is_set():
return
url = cached_url + '&zoom={}'.format(candidate)
log('Downloading cover from:', cached_url)
try:
cdata = br.open_novisit(url, timeout=timeout).read()
if cdata:
if hashlib.md5(cdata).hexdigest() in self.DUMMY_IMAGE_MD5:
log.warning('Google returned a dummy image, ignoring')
else:
result_queue.put((self, cdata))
break
except Exception:
log.exception('Failed to download cover from:', cached_url)
# }}}
def get_cached_cover_url(self, identifiers): # {{{
url = None
goog = identifiers.get('google', None)
if goog is None:
isbn = identifiers.get('isbn', None)
if isbn is not None:
goog = self.cached_isbn_to_identifier(isbn)
if goog is not None:
url = self.cached_identifier_to_cover_url(goog)
return url
# }}}
def postprocess_downloaded_google_metadata(self, ans, relevance=0): # {{{
if not isinstance(ans, Metadata):
return ans
ans.source_relevance = relevance
goog = ans.identifiers['google']
for isbn in getattr(ans, 'all_isbns', []):
self.cache_isbn_to_identifier(isbn, goog)
if getattr(ans, 'has_google_cover', False):
self.cache_identifier_to_cover_url(goog, self.GOOGLE_COVER % goog)
if ans.comments:
ans.comments = pretty_google_books_comments(ans.comments)
self.clean_downloaded_metadata(ans)
return ans
# }}}
def get_all_details( # {{{
self,
br,
log,
entries,
abort,
result_queue,
timeout
):
from lxml import etree
for relevance, i in enumerate(entries):
try:
ans = self.postprocess_downloaded_google_metadata(to_metadata(br, log, i, timeout, self.running_a_test), relevance)
if isinstance(ans, Metadata):
result_queue.put(ans)
except Exception:
log.exception(
'Failed to get metadata for identify entry:', etree.tostring(i)
)
if abort.is_set():
break
# }}}
def identify_via_web_search( # {{{
self,
log,
result_queue,
abort,
title=None,
authors=None,
identifiers={},
timeout=30
):
from calibre.utils.filenames import ascii_text
isbn = check_isbn(identifiers.get('isbn', None))
q = []
strip_punc_pat = regex.compile(r'[\p{C}|\p{M}|\p{P}|\p{S}|\p{Z}]+', regex.UNICODE)
google_ids = []
check_tokens = set()
has_google_id = 'google' in identifiers
def to_check_tokens(*tokens):
for t in tokens:
if len(t) < 3:
continue
t = t.lower()
if t in ('and', 'not', 'the'):
continue
yield ascii_text(strip_punc_pat.sub('', t))
if has_google_id:
google_ids.append(identifiers['google'])
elif isbn is not None:
q.append(isbn)
elif title or authors:
title_tokens = list(self.get_title_tokens(title))
if title_tokens:
q += title_tokens
check_tokens |= set(to_check_tokens(*title_tokens))
author_tokens = list(self.get_author_tokens(authors, only_first_author=True))
if author_tokens:
q += author_tokens
check_tokens |= set(to_check_tokens(*author_tokens))
if not q and not google_ids:
return None
from calibre.ebooks.metadata.sources.update import search_engines_module
se = search_engines_module()
br = se.google_specialize_browser(se.browser())
if not has_google_id:
url = se.google_format_query(q, tbm='bks')
log('Making query:', url)
r = []
root = se.query(br, url, 'google', timeout=timeout, save_raw=r.append)
pat = re.compile(r'id=([^&]+)')
for q in se.google_parse_results(root, r[0], log=log, ignore_uncached=False):
m = pat.search(q.url)
if m is None or not q.url.startswith('https://books.google'):
continue
google_ids.append(m.group(1))
if not google_ids and isbn and (title or authors):
return self.identify_via_web_search(log, result_queue, abort, title, authors, {}, timeout)
found = False
seen = set()
for relevance, gid in enumerate(google_ids):
if gid in seen:
continue
seen.add(gid)
try:
ans = to_metadata(br, log, gid, timeout, self.running_a_test)
if isinstance(ans, Metadata):
if isbn:
if isbn not in ans.all_isbns:
log('Excluding', ans.title, 'by', authors_to_string(ans.authors), 'as it does not match the ISBN:', isbn,
'not in', ' '.join(ans.all_isbns))
continue
elif check_tokens:
candidate = set(to_check_tokens(*self.get_title_tokens(ans.title)))
candidate |= set(to_check_tokens(*self.get_author_tokens(ans.authors)))
if candidate.intersection(check_tokens) != check_tokens:
log('Excluding', ans.title, 'by', authors_to_string(ans.authors), 'as it does not match the query')
continue
ans = self.postprocess_downloaded_google_metadata(ans, relevance)
result_queue.put(ans)
found = True
except:
log.exception('Failed to get metadata for google books id:', gid)
if abort.is_set():
break
if not found and isbn and (title or authors):
return self.identify_via_web_search(log, result_queue, abort, title, authors, {}, timeout)
# }}}
def identify( # {{{
self,
log,
result_queue,
abort,
title=None,
authors=None,
identifiers={},
timeout=30
):
from lxml import etree
entry = XPath('//atom:entry')
identifiers = identifiers.copy()
br = self.browser
if 'google' in identifiers:
try:
ans = to_metadata(br, log, identifiers['google'], timeout, self.running_a_test)
if isinstance(ans, Metadata):
self.postprocess_downloaded_google_metadata(ans)
result_queue.put(ans)
return
except Exception:
log.exception('Failed to get metadata for Google identifier:', identifiers['google'])
del identifiers['google']
query = self.create_query(
title=title, authors=authors, identifiers=identifiers
)
if not query:
log.error('Insufficient metadata to construct query')
return
def make_query(query):
log('Making query:', query)
try:
raw = br.open_novisit(query, timeout=timeout).read()
except Exception as e:
log.exception('Failed to make identify query: %r' % query)
return False, as_unicode(e)
try:
feed = etree.fromstring(
xml_to_unicode(clean_ascii_chars(raw), strip_encoding_pats=True)[0],
parser=etree.XMLParser(recover=True, no_network=True, resolve_entities=False)
)
return True, entry(feed)
except Exception as e:
log.exception('Failed to parse identify results')
return False, as_unicode(e)
ok, entries = make_query(query)
if not ok:
return entries
if not entries and not abort.is_set():
log('No results found, doing a web search instead')
return self.identify_via_web_search(log, result_queue, abort, title, authors, identifiers, timeout)
# There is no point running these queries in threads as google
# throttles requests returning 403 Forbidden errors
self.get_all_details(br, log, entries, abort, result_queue, timeout)
# }}}
if __name__ == '__main__': # tests {{{
# To run these test use:
# calibre-debug src/calibre/ebooks/metadata/sources/google.py
from calibre.ebooks.metadata.sources.test import authors_test, test_identify_plugin, title_test
tests = [
({
'identifiers': {'google': 's7NIrgEACAAJ'},
}, [title_test('Ride Every Stride', exact=False)]),
({
'identifiers': {'isbn': '0743273567'},
'title': 'Great Gatsby',
'authors': ['Fitzgerald']
}, [
title_test('The great gatsby', exact=True),
authors_test(['F. Scott Fitzgerald'])
]),
({
'title': 'Flatland',
'authors': ['Abbott']
}, [title_test('Flatland', exact=False)]),
({
'title': 'The Blood Red Indian Summer: A Berger and Mitry Mystery',
'authors': ['David Handler'],
}, [title_test('The Blood Red Indian Summer: A Berger and Mitry Mystery')
]),
({
# requires using web search to find the book
'title': 'Dragon Done It',
'authors': ['Eric Flint'],
}, [
title_test('The dragon done it', exact=True),
authors_test(['Eric Flint', 'Mike Resnick'])
]),
]
test_identify_plugin(GoogleBooks.name, tests[:])
# }}}
| 20,122 | Python | .py | 516 | 28.596899 | 133 | 0.560004 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,547 | big_book_search.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/big_book_search.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.metadata.sources.base import Option, Source
def get_urls(br, tokens):
from urllib.parse import quote_plus
from html5_parser import parse
escaped = (quote_plus(x) for x in tokens if x and x.strip())
q = '+'.join(escaped)
url = 'https://bigbooksearch.com/please-dont-scrape-my-site-you-will-put-my-api-key-over-the-usage-limit-and-the-site-will-break/books/'+q
raw = br.open(url).read()
root = parse(raw.decode('utf-8'))
urls = [i.get('src') for i in root.xpath('//img[@src]')]
return urls
class BigBookSearch(Source):
name = 'Big Book Search'
version = (1, 0, 1)
minimum_calibre_version = (2, 80, 0)
description = _('Downloads multiple book covers from Amazon. Useful to find alternate covers.')
capabilities = frozenset(['cover'])
can_get_multiple_covers = True
options = (Option('max_covers', 'number', 5, _('Maximum number of covers to get'),
_('The maximum number of covers to process from the search result')),
)
supports_gzip_transfer_encoding = True
def download_cover(self, log, result_queue, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
if not title:
return
br = self.browser
tokens = tuple(self.get_title_tokens(title)) + tuple(self.get_author_tokens(authors))
urls = get_urls(br, tokens)
self.download_multiple_covers(title, authors, urls, get_best_cover, timeout, result_queue, abort, log)
def test():
import pprint
from calibre import browser
br = browser()
urls = get_urls(br, ['consider', 'phlebas', 'banks'])
pprint.pprint(urls)
if __name__ == '__main__':
test()
| 1,989 | Python | .py | 44 | 39.659091 | 142 | 0.668566 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,548 | base.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/base.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
import threading
from functools import total_ordering
from calibre import browser, random_user_agent
from calibre.customize import Plugin
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.author_mapper import cap_author_token
from calibre.utils.localization import canonicalize_lang, get_lang
from polyglot.builtins import cmp, iteritems
def create_log(ostream=None):
from calibre.utils.logging import FileStream, ThreadSafeLog
log = ThreadSafeLog(level=ThreadSafeLog.DEBUG)
log.outputs = [FileStream(ostream)]
return log
# Comparing Metadata objects for relevance {{{
words = ("the", "a", "an", "of", "and")
prefix_pat = re.compile(r'^(%s)\s+'%("|".join(words)))
trailing_paren_pat = re.compile(r'\(.*\)$')
whitespace_pat = re.compile(r'\s+')
def cleanup_title(s):
if not s:
s = _('Unknown')
s = s.strip().lower()
s = prefix_pat.sub(' ', s)
s = trailing_paren_pat.sub('', s)
s = whitespace_pat.sub(' ', s)
return s.strip()
@total_ordering
class InternalMetadataCompareKeyGen:
'''
Generate a sort key for comparison of the relevance of Metadata objects,
given a search query. This is used only to compare results from the same
metadata source, not across different sources.
The sort key ensures that an ascending order sort is a sort by order of
decreasing relevance.
The algorithm is:
* Prefer results that have at least one identifier the same as for the query
* Prefer results with a cached cover URL
* Prefer results with all available fields filled in
* Prefer results with the same language as the current user interface language
* Prefer results that are an exact title match to the query
* Prefer results with longer comments (greater than 10% longer)
* Use the relevance of the result as reported by the metadata source's search
engine
'''
def __init__(self, mi, source_plugin, title, authors, identifiers):
same_identifier = 2
idents = mi.get_identifiers()
for k, v in iteritems(identifiers):
if idents.get(k) == v:
same_identifier = 1
break
all_fields = 1 if source_plugin.test_fields(mi) is None else 2
exact_title = 1 if title and \
cleanup_title(title) == cleanup_title(mi.title) else 2
language = 1
if mi.language:
mil = canonicalize_lang(mi.language)
if mil != 'und' and mil != canonicalize_lang(get_lang()):
language = 2
has_cover = 2 if (not source_plugin.cached_cover_url_is_reliable or
source_plugin.get_cached_cover_url(mi.identifiers) is None) else 1
self.base = (same_identifier, has_cover, all_fields, language, exact_title)
self.comments_len = len((mi.comments or '').strip())
self.extra = getattr(mi, 'source_relevance', 0)
def compare_to_other(self, other):
a = cmp(self.base, other.base)
if a != 0:
return a
cx, cy = self.comments_len, other.comments_len
if cx and cy:
t = (cx + cy) / 20
delta = cy - cx
if abs(delta) > t:
return -1 if delta < 0 else 1
return cmp(self.extra, other.extra)
def __eq__(self, other):
return self.compare_to_other(other) == 0
def __ne__(self, other):
return self.compare_to_other(other) != 0
def __lt__(self, other):
return self.compare_to_other(other) < 0
def __le__(self, other):
return self.compare_to_other(other) <= 0
def __gt__(self, other):
return self.compare_to_other(other) > 0
def __ge__(self, other):
return self.compare_to_other(other) >= 0
# }}}
def get_cached_cover_urls(mi):
from calibre.customize.ui import metadata_plugins
plugins = list(metadata_plugins(['identify']))
for p in plugins:
url = p.get_cached_cover_url(mi.identifiers)
if url:
yield (p, url)
def dump_caches():
from calibre.customize.ui import metadata_plugins
return {p.name:p.dump_caches() for p in metadata_plugins(['identify'])}
def load_caches(dump):
from calibre.customize.ui import metadata_plugins
plugins = list(metadata_plugins(['identify']))
for p in plugins:
cache = dump.get(p.name, None)
if cache:
p.load_caches(cache)
def fixauthors(authors):
if not authors:
return authors
ans = []
for x in authors:
ans.append(' '.join(map(cap_author_token, x.split())))
return ans
def fixcase(x):
if x:
from calibre.utils.titlecase import titlecase
x = titlecase(x)
return x
class Option:
__slots__ = ['type', 'default', 'label', 'desc', 'name', 'choices']
def __init__(self, name, type_, default, label, desc, choices=None):
'''
:param name: The name of this option. Must be a valid python identifier
:param type_: The type of this option, one of ('number', 'string',
'bool', 'choices')
:param default: The default value for this option
:param label: A short (few words) description of this option
:param desc: A longer description of this option
:param choices: A dict of possible values, used only if type='choices'.
dict is of the form {key:human readable label, ...}
'''
self.name, self.type, self.default, self.label, self.desc = (name,
type_, default, label, desc)
if choices and not isinstance(choices, dict):
choices = dict([(x, x) for x in choices])
self.choices = choices
class Source(Plugin):
type = _('Metadata source')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
#: Set of capabilities supported by this plugin.
#: Useful capabilities are: 'identify', 'cover'
capabilities = frozenset()
#: List of metadata fields that can potentially be download by this plugin
#: during the identify phase
touched_fields = frozenset()
#: Set this to True if your plugin returns HTML formatted comments
has_html_comments = False
#: Setting this to True means that the browser object will indicate
#: that it supports gzip transfer encoding. This can speedup downloads
#: but make sure that the source actually supports gzip transfer encoding
#: correctly first
supports_gzip_transfer_encoding = False
#: Set this to True to ignore HTTPS certificate errors when connecting
#: to this source.
ignore_ssl_errors = False
#: Cached cover URLs can sometimes be unreliable (i.e. the download could
#: fail or the returned image could be bogus). If that is often the case
#: with this source, set to False
cached_cover_url_is_reliable = True
#: A list of :class:`Option` objects. They will be used to automatically
#: construct the configuration widget for this plugin
options = ()
#: A string that is displayed at the top of the config widget for this
#: plugin
config_help_message = None
#: If True this source can return multiple covers for a given query
can_get_multiple_covers = False
#: If set to True covers downloaded by this plugin are automatically trimmed.
auto_trim_covers = False
#: If set to True, and this source returns multiple results for a query,
#: some of which have ISBNs and some of which do not, the results without
#: ISBNs will be ignored
prefer_results_with_isbn = True
def __init__(self, *args, **kwargs):
Plugin.__init__(self, *args, **kwargs)
self.running_a_test = False # Set to True when using identify_test()
self._isbn_to_identifier_cache = {}
self._identifier_to_cover_url_cache = {}
self.cache_lock = threading.RLock()
self._config_obj = None
self._browser = None
self.prefs.defaults['ignore_fields'] = []
for opt in self.options:
self.prefs.defaults[opt.name] = opt.default
# Configuration {{{
def is_configured(self):
'''
Return False if your plugin needs to be configured before it can be
used. For example, it might need a username/password/API key.
'''
return True
def is_customizable(self):
return True
def customization_help(self):
return 'This plugin can only be customized using the GUI'
def config_widget(self):
from calibre.gui2.metadata.config import ConfigWidget
return ConfigWidget(self)
def save_settings(self, config_widget):
config_widget.commit()
@property
def prefs(self):
if self._config_obj is None:
from calibre.utils.config import JSONConfig
self._config_obj = JSONConfig('metadata_sources/%s.json'%self.name)
return self._config_obj
# }}}
# Browser {{{
@property
def user_agent(self):
# Pass in an index to random_user_agent() to test with a particular
# user agent
return random_user_agent()
@property
def browser(self):
if self._browser is None:
self._browser = browser(user_agent=self.user_agent, verify_ssl_certificates=not self.ignore_ssl_errors)
if self.supports_gzip_transfer_encoding:
self._browser.set_handle_gzip(True)
return self._browser.clone_browser()
# }}}
# Caching {{{
def get_related_isbns(self, id_):
with self.cache_lock:
for isbn, q in iteritems(self._isbn_to_identifier_cache):
if q == id_:
yield isbn
def cache_isbn_to_identifier(self, isbn, identifier):
with self.cache_lock:
self._isbn_to_identifier_cache[isbn] = identifier
def cached_isbn_to_identifier(self, isbn):
with self.cache_lock:
return self._isbn_to_identifier_cache.get(isbn, None)
def cache_identifier_to_cover_url(self, id_, url):
with self.cache_lock:
self._identifier_to_cover_url_cache[id_] = url
def cached_identifier_to_cover_url(self, id_):
with self.cache_lock:
return self._identifier_to_cover_url_cache.get(id_, None)
def dump_caches(self):
with self.cache_lock:
return {'isbn_to_identifier':self._isbn_to_identifier_cache.copy(),
'identifier_to_cover':self._identifier_to_cover_url_cache.copy()}
def load_caches(self, dump):
with self.cache_lock:
self._isbn_to_identifier_cache.update(dump['isbn_to_identifier'])
self._identifier_to_cover_url_cache.update(dump['identifier_to_cover'])
# }}}
# Utility functions {{{
def get_author_tokens(self, authors, only_first_author=True):
'''
Take a list of authors and return a list of tokens useful for an
AND search query. This function tries to return tokens in
first name middle names last name order, by assuming that if a comma is
in the author name, the name is in lastname, other names form.
'''
if authors:
# Leave ' in there for Irish names
remove_pat = re.compile(r'[!@#$%^&*()()「」{}`~"\s\[\]/]')
replace_pat = re.compile(r'[-+.:;,,。;:]')
if only_first_author:
authors = authors[:1]
for au in authors:
has_comma = ',' in au
au = replace_pat.sub(' ', au)
parts = au.split()
if has_comma:
# au probably in ln, fn form
parts = parts[1:] + parts[:1]
for tok in parts:
tok = remove_pat.sub('', tok).strip()
if len(tok) > 2 and tok.lower() not in ('von', 'van',
_('Unknown').lower()):
yield tok
def get_title_tokens(self, title, strip_joiners=True, strip_subtitle=False):
'''
Take a title and return a list of tokens useful for an AND search query.
Excludes connectives(optionally) and punctuation.
'''
if title:
# strip sub-titles
if strip_subtitle:
subtitle = re.compile(r'([\(\[\{].*?[\)\]\}]|[/:\\].*$)')
if len(subtitle.sub('', title)) > 1:
title = subtitle.sub('', title)
title_patterns = [(re.compile(pat, re.IGNORECASE), repl) for pat, repl in
[
# Remove things like: (2010) (Omnibus) etc.
(r'(?i)[({\[](\d{4}|omnibus|anthology|hardcover|audiobook|audio\scd|paperback|turtleback|mass\s*market|edition|ed\.)[\])}]', ''),
# Remove any strings that contain the substring edition inside
# parentheses
(r'(?i)[({\[].*?(edition|ed.).*?[\]})]', ''),
# Remove commas used a separators in numbers
(r'(\d+),(\d+)', r'\1\2'),
# Remove hyphens only if they have whitespace before them
(r'(\s-)', ' '),
# Replace other special chars with a space
(r'''[:,;!@$%^&*(){}.`~"\s\[\]/]《》「」“”''', ' '),
]]
for pat, repl in title_patterns:
title = pat.sub(repl, title)
tokens = title.split()
for token in tokens:
token = token.strip().strip('"').strip("'")
if token and (not strip_joiners or token.lower() not in ('a',
'and', 'the', '&')):
yield token
def split_jobs(self, jobs, num):
'Split a list of jobs into at most num groups, as evenly as possible'
groups = [[] for i in range(num)]
jobs = list(jobs)
while jobs:
for gr in groups:
try:
job = jobs.pop()
except IndexError:
break
gr.append(job)
return [g for g in groups if g]
def test_fields(self, mi):
'''
Return the first field from self.touched_fields that is null on the
mi object
'''
for key in self.touched_fields:
if key.startswith('identifier:'):
key = key.partition(':')[-1]
if not mi.has_identifier(key):
return 'identifier: ' + key
elif mi.is_null(key):
return key
def clean_downloaded_metadata(self, mi):
'''
Call this method in your plugin's identify method to normalize metadata
before putting the Metadata object into result_queue. You can of
course, use a custom algorithm suited to your metadata source.
'''
docase = mi.language == 'eng' or mi.is_null('language')
if docase and mi.title:
mi.title = fixcase(mi.title)
mi.authors = fixauthors(mi.authors)
if mi.tags and docase:
mi.tags = list(map(fixcase, mi.tags))
mi.isbn = check_isbn(mi.isbn)
def download_multiple_covers(self, title, authors, urls, get_best_cover, timeout, result_queue, abort, log, prefs_name='max_covers'):
if not urls:
log('No images found for, title: %r and authors: %r'%(title, authors))
return
import time
from threading import Thread
if prefs_name:
urls = urls[:self.prefs[prefs_name]]
if get_best_cover:
urls = urls[:1]
log('Downloading %d covers'%len(urls))
workers = [Thread(target=self.download_image, args=(u, timeout, log, result_queue)) for u in urls]
for w in workers:
w.daemon = True
w.start()
alive = True
start_time = time.time()
while alive and not abort.is_set() and time.time() - start_time < timeout:
alive = False
for w in workers:
if w.is_alive():
alive = True
break
abort.wait(0.1)
def download_image(self, url, timeout, log, result_queue):
try:
ans = self.browser.open_novisit(url, timeout=timeout).read()
result_queue.put((self, ans))
log('Downloaded cover from: %s'%url)
except Exception:
self.log.exception('Failed to download cover from: %r'%url)
# }}}
# Metadata API {{{
def get_book_url(self, identifiers):
'''
Return a 3-tuple or None. The 3-tuple is of the form:
(identifier_type, identifier_value, URL).
The URL is the URL for the book identified by identifiers at this
source. identifier_type, identifier_value specify the identifier
corresponding to the URL.
This URL must be browsable to by a human using a browser. It is meant
to provide a clickable link for the user to easily visit the books page
at this source.
If no URL is found, return None. This method must be quick, and
consistent, so only implement it if it is possible to construct the URL
from a known scheme given identifiers.
'''
return None
def get_book_url_name(self, idtype, idval, url):
'''
Return a human readable name from the return value of get_book_url().
'''
return self.name
def get_book_urls(self, identifiers):
'''
Override this method if you would like to return multiple URLs for this book.
Return a list of 3-tuples. By default this method simply calls :func:`get_book_url`.
'''
data = self.get_book_url(identifiers)
if data is None:
return ()
return (data,)
def get_cached_cover_url(self, identifiers):
'''
Return cached cover URL for the book identified by
the identifiers dictionary or None if no such URL exists.
Note that this method must only return validated URLs, i.e. not URLS
that could result in a generic cover image or a not found error.
'''
return None
def id_from_url(self, url):
'''
Parse a URL and return a tuple of the form:
(identifier_type, identifier_value).
If the URL does not match the pattern for the metadata source,
return None.
'''
return None
def identify_results_keygen(self, title=None, authors=None,
identifiers={}):
'''
Return a function that is used to generate a key that can sort Metadata
objects by their relevance given a search query (title, authors,
identifiers).
These keys are used to sort the results of a call to :meth:`identify`.
For details on the default algorithm see
:class:`InternalMetadataCompareKeyGen`. Re-implement this function in
your plugin if the default algorithm is not suitable.
'''
def keygen(mi):
return InternalMetadataCompareKeyGen(mi, self, title, authors,
identifiers)
return keygen
def identify(self, log, result_queue, abort, title=None, authors=None,
identifiers={}, timeout=30):
'''
Identify a book by its Title/Author/ISBN/etc.
If identifiers(s) are specified and no match is found and this metadata
source does not store all related identifiers (for example, all ISBNs
of a book), this method should retry with just the title and author
(assuming they were specified).
If this metadata source also provides covers, the URL to the cover
should be cached so that a subsequent call to the get covers API with
the same ISBN/special identifier does not need to get the cover URL
again. Use the caching API for this.
Every Metadata object put into result_queue by this method must have a
`source_relevance` attribute that is an integer indicating the order in
which the results were returned by the metadata source for this query.
This integer will be used by :meth:`compare_identify_results`. If the
order is unimportant, set it to zero for every result.
Make sure that any cover/ISBN mapping information is cached before the
Metadata object is put into result_queue.
:param log: A log object, use it to output debugging information/errors
:param result_queue: A result Queue, results should be put into it.
Each result is a Metadata object
:param abort: If abort.is_set() returns True, abort further processing
and return as soon as possible
:param title: The title of the book, can be None
:param authors: A list of authors of the book, can be None
:param identifiers: A dictionary of other identifiers, most commonly
{'isbn':'1234...'}
:param timeout: Timeout in seconds, no network request should hang for
longer than timeout.
:return: None if no errors occurred, otherwise a unicode representation
of the error suitable for showing to the user
'''
return None
def download_cover(self, log, result_queue, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
'''
Download a cover and put it into result_queue. The parameters all have
the same meaning as for :meth:`identify`. Put (self, cover_data) into
result_queue.
This method should use cached cover URLs for efficiency whenever
possible. When cached data is not present, most plugins simply call
identify and use its results.
If the parameter get_best_cover is True and this plugin can get
multiple covers, it should only get the "best" one.
'''
pass
# }}}
| 22,314 | Python | .py | 492 | 35.863821 | 145 | 0.615874 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,549 | search_engines.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/search_engines.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import re
import sys
import time
from collections import namedtuple
from contextlib import contextmanager
from functools import partial
from threading import Lock
try:
from urllib.parse import parse_qs, quote, quote_plus, urlencode, urlparse
except ImportError:
from urllib import quote, quote_plus, urlencode
from urlparse import parse_qs, urlparse
from lxml import etree
from calibre import browser as _browser
from calibre import prints as safe_print
from calibre import random_user_agent
from calibre.constants import cache_dir
from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.lock import ExclusiveFile
from calibre.utils.random_ua import accept_header_for_ua
current_version = (1, 2, 12)
minimum_calibre_version = (2, 80, 0)
webcache = {}
webcache_lock = Lock()
prints = partial(safe_print, file=sys.stderr)
Result = namedtuple('Result', 'url title cached_url')
@contextmanager
def rate_limit(name='test', time_between_visits=2, max_wait_seconds=5 * 60, sleep_time=0.2):
lock_file = os.path.join(cache_dir(), 'search-engine.' + name + '.lock')
with ExclusiveFile(lock_file, timeout=max_wait_seconds, sleep_time=sleep_time) as f:
try:
lv = float(f.read().decode('utf-8').strip())
except Exception:
lv = 0
# we cannot use monotonic() as this is cross process and historical
# data as well
delta = time.time() - lv
if delta < time_between_visits:
time.sleep(time_between_visits - delta)
try:
yield
finally:
f.seek(0)
f.truncate()
f.write(repr(time.time()).encode('utf-8'))
def tostring(elem):
return etree.tostring(elem, encoding='unicode', method='text', with_tail=False)
def browser():
ua = random_user_agent(allow_ie=False)
# ua = 'Mozilla/5.0 (Linux; Android 8.0.0; VTR-L29; rv:63.0) Gecko/20100101 Firefox/63.0'
br = _browser(user_agent=ua)
br.set_handle_gzip(True)
br.addheaders += [
('Accept', accept_header_for_ua(ua)),
('Upgrade-insecure-requests', '1'),
]
return br
def encode_query(**query):
q = {k.encode('utf-8'): v.encode('utf-8') for k, v in query.items()}
return urlencode(q).decode('utf-8')
def parse_html(raw):
try:
from html5_parser import parse
except ImportError:
# Old versions of calibre
import html5lib
return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)
else:
return parse(raw)
def query(br, url, key, dump_raw=None, limit=1, parser=parse_html, timeout=60, save_raw=None, simple_scraper=None):
with rate_limit(key):
if simple_scraper is None:
raw = br.open_novisit(url, timeout=timeout).read()
raw = xml_to_unicode(raw, strip_encoding_pats=True)[0]
else:
raw = simple_scraper(url, timeout=timeout)
if dump_raw is not None:
with open(dump_raw, 'w') as f:
f.write(raw)
if save_raw is not None:
save_raw(raw)
return parser(raw)
def quote_term(x):
ans = quote_plus(x.encode('utf-8'))
if isinstance(ans, bytes):
ans = ans.decode('utf-8')
return ans
# DDG + Wayback machine {{{
def ddg_url_processor(url):
return url
def ddg_term(t):
t = t.replace('"', '')
if t.lower() in {'map', 'news'}:
t = '"' + t + '"'
if t in {'OR', 'AND', 'NOT'}:
t = t.lower()
return t
def ddg_href(url):
if url.startswith('/'):
q = url.partition('?')[2]
url = parse_qs(q.encode('utf-8'))['uddg'][0].decode('utf-8')
return url
def wayback_machine_cached_url(url, br=None, log=prints, timeout=60):
q = quote_term(url)
br = br or browser()
try:
data = query(br, 'https://archive.org/wayback/available?url=' +
q, 'wayback', parser=json.loads, limit=0.25, timeout=timeout)
except Exception as e:
log('Wayback machine query failed for url: ' + url + ' with error: ' + str(e))
return None
try:
closest = data['archived_snapshots']['closest']
if closest['available']:
ans = closest['url'].replace('http:', 'https:', 1)
# get unmodified HTML
ans = ans.replace(closest['timestamp'], closest['timestamp'] + 'id_', 1)
return ans
except Exception:
pass
from pprint import pformat
log('Response from wayback machine:', pformat(data))
def wayback_url_processor(url):
if url.startswith('/'):
# Use original URL instead of absolutizing to wayback URL as wayback is
# slow
m = re.search('https?:', url)
if m is None:
url = 'https://web.archive.org' + url
else:
url = url[m.start():]
return url
ddg_scraper_storage = []
def ddg_search(terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60):
# https://duck.co/help/results/syntax
terms = [quote_term(ddg_term(t)) for t in terms]
if site is not None:
terms.append(quote_term(('site:' + site)))
q = '+'.join(terms)
url = 'https://duckduckgo.com/html/?q={q}&kp={kp}'.format(
q=q, kp=1 if safe_search else -1)
log('Making ddg query: ' + url)
from calibre.scraper.simple import read_url
br = br or browser()
root = query(br, url, 'ddg', dump_raw, timeout=timeout, simple_scraper=partial(read_url, ddg_scraper_storage))
ans = []
for a in root.xpath('//*[@class="results"]//*[@class="result__title"]/a[@href and @class="result__a"]'):
try:
ans.append(Result(ddg_href(a.get('href')), tostring(a), None))
except KeyError:
log('Failed to find ddg href in:', a.get('href'))
return ans, url
def ddg_develop():
br = browser()
for result in ddg_search('heroes abercrombie'.split(), 'www.amazon.com', dump_raw='/t/raw.html', br=br)[0]:
if '/dp/' in result.url:
print(result.title)
print(' ', result.url)
print(' ', get_cached_url(result.url, br))
print()
# }}}
# Bing {{{
def bing_term(t):
t = t.replace('"', '')
if t in {'OR', 'AND', 'NOT'}:
t = t.lower()
return t
def bing_url_processor(url):
return url
def bing_cached_url(url, br=None, log=prints, timeout=60):
# See https://support.microsoft.com/en-gb/topic/advanced-search-keywords-ea595928-5d63-4a0b-9c6b-0b769865e78a for operators
results, search_url = bing_search(['url:' + url], br=br, log=log, timeout=timeout)
for result in results:
return result.cached_url
def resolve_bing_wrapper_page(url, br, log):
raw = br.open_novisit(url).read().decode('utf-8', 'replace')
m = re.search(r'var u = "(.+)"', raw)
if m is None:
log('Failed to resolve bing wrapper page for url: ' + url)
return url
log('Resolved bing wrapped URL: ' + url + ' to ' + m.group(1))
return m.group(1)
def bing_search(terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60, show_user_agent=False, result_url_is_ok=lambda x: True):
# http://vlaurie.com/computers2/Articles/bing_advanced_search.htm
terms = [quote_term(bing_term(t)) for t in terms]
if site is not None:
terms.append(quote_term(('site:' + site)))
q = '+'.join(terms)
url = 'https://www.bing.com/search?q={q}'.format(q=q)
log('Making bing query: ' + url)
if br is None:
br = browser()
else:
br = br.clone_browser()
br.addheaders = [x for x in br.addheaders if x[0].lower() != 'user-agent']
ua = ''
from calibre.utils.random_ua import random_common_chrome_user_agent
while not ua:
ua = random_common_chrome_user_agent()
if show_user_agent:
print('User-agent:', ua)
br.addheaders.append(('User-agent', ua))
root = query(br, url, 'bing', dump_raw, timeout=timeout)
ans = []
for li in root.xpath('//*[@id="b_results"]/li[@class="b_algo"]'):
a = li.xpath('descendant::h2/a[@href]') or li.xpath('descendant::div[@class="b_algoheader"]/a[@href]')
a = a[0]
title = tostring(a)
try:
div = li.xpath('descendant::div[@class="b_attribution" and @u]')[0]
except IndexError:
log('Ignoring {!r} as it has no cached page'.format(title))
continue
d, w = div.get('u').split('|')[-2:]
cached_url = 'https://cc.bingj.com/cache.aspx?q={q}&d={d}&mkt=en-US&setlang=en-US&w={w}'.format(
q=q, d=d, w=w)
ans_url = a.get('href')
if ans_url.startswith('https://www.bing.com/'):
ans_url = resolve_bing_wrapper_page(ans_url, br, log)
if result_url_is_ok(ans_url):
ans.append(Result(ans_url, title, cached_url))
if not ans:
title = ' '.join(root.xpath('//title/text()'))
log('Failed to find any results on results page, with title:', title)
return ans, url
def bing_develop(terms='heroes abercrombie'):
if isinstance(terms, str):
terms = terms.split()
for result in bing_search(terms, 'www.amazon.com', dump_raw='/t/raw.html', show_user_agent=True)[0]:
if '/dp/' in result.url:
print(result.title)
print(' ', result.url)
print(' ', result.cached_url)
print()
# }}}
# Google {{{
def google_term(t):
t = t.replace('"', '')
if t in {'OR', 'AND', 'NOT'}:
t = t.lower()
return t
def google_url_processor(url):
return url
def google_cache_url_for_url(url):
if not isinstance(url, bytes):
url = url.encode('utf-8')
cu = quote(url, safe='')
if isinstance(cu, bytes):
cu = cu.decode('utf-8')
return 'https://webcache.googleusercontent.com/search?q=cache:' + cu
def google_get_cached_url(url, br=None, log=prints, timeout=60):
# Google's webcache was discontinued in september 2024
cached_url = google_cache_url_for_url(url)
br = google_specialize_browser(br or browser())
try:
raw = query(br, cached_url, 'google-cache', parser=lambda x: x.encode('utf-8'), timeout=timeout)
except Exception as err:
log('Failed to get cached URL from google for URL: {} with error: {}'.format(url, err))
else:
with webcache_lock:
webcache[cached_url] = raw
return cached_url
def canonicalize_url_for_cache_map(url):
try:
purl = urlparse(url)
except Exception:
return url
if '.amazon.' in purl.netloc:
url = url.split('&', 1)[0]
return url
def google_parse_results(root, raw, log=prints, ignore_uncached=True):
ans = []
seen = set()
for div in root.xpath('//*[@id="search"]//*[@id="rso"]//div[descendant::h3]'):
try:
a = div.xpath('descendant::a[@href]')[0]
except IndexError:
log('Ignoring div with no main result link')
continue
title = tostring(a)
src_url = a.get('href')
# print(f'{src_url=}')
curl = canonicalize_url_for_cache_map(src_url)
if curl in seen:
continue
seen.add(curl)
ans.append(Result(curl, title, None))
if not ans:
title = ' '.join(root.xpath('//title/text()'))
log('Failed to find any results on results page, with title:', title)
return ans
def google_consent_cookies():
# See https://github.com/benbusby/whoogle-search/pull/1054 for cookies
from base64 import standard_b64encode
from datetime import date
base = {'domain': '.google.com', 'path': '/'}
b = base.copy()
b['name'], b['value'] = 'CONSENT', 'PENDING+987'
yield b
template = b'\x08\x01\x128\x08\x14\x12+boq_identityfrontenduiserver_20231107.05_p0\x1a\x05en-US \x03\x1a\x06\x08\x80\xf1\xca\xaa\x06'
template.replace(b'20231107', date.today().strftime('%Y%m%d').encode('ascii'))
b = base.copy()
b['name'], b['value'] = 'SOCS', standard_b64encode(template).decode('ascii').rstrip('=')
yield b
def google_specialize_browser(br):
with webcache_lock:
if not hasattr(br, 'google_consent_cookie_added'):
for c in google_consent_cookies():
br.set_simple_cookie(c['name'], c['value'], c['domain'], path=c['path'])
br.google_consent_cookie_added = True
return br
def is_probably_book_asin(t):
return t and len(t) == 10 and t.startswith('B') and t.upper() == t
def is_asin_or_isbn(t):
from calibre.ebooks.metadata import check_isbn
return bool(check_isbn(t) or is_probably_book_asin(t))
def google_format_query(terms, site=None, tbm=None):
prevent_spelling_correction = False
for t in terms:
if is_asin_or_isbn(t):
prevent_spelling_correction = True
break
terms = [quote_term(google_term(t)) for t in terms]
if site is not None:
terms.append(quote_term(('site:' + site)))
q = '+'.join(terms)
url = 'https://www.google.com/search?q={q}'.format(q=q)
if tbm:
url += '&tbm=' + tbm
if prevent_spelling_correction:
url += '&nfpr=1'
return url
def google_search(terms, site=None, br=None, log=prints, safe_search=False, dump_raw=None, timeout=60):
url = google_format_query(terms, site)
log('Making google query: ' + url)
br = google_specialize_browser(br or browser())
r = []
root = query(br, url, 'google', dump_raw, timeout=timeout, save_raw=r.append)
return google_parse_results(root, r[0], log=log), url
def google_develop(search_terms='1423146786', raw_from=''):
if raw_from:
with open(raw_from, 'rb') as f:
raw = f.read()
results = google_parse_results(parse_html(raw), raw)
else:
br = browser()
results = google_search(search_terms.split(), 'www.amazon.com', dump_raw='/t/raw.html', br=br)[0]
for result in results:
if '/dp/' in result.url:
print(result.title)
print(' ', result.url)
print(' ', result.cached_url)
print()
# }}}
def get_cached_url(url, br=None, log=prints, timeout=60):
from threading import Lock, Thread
from polyglot.queue import Queue
print_lock = Lock()
q = Queue()
def safe_print(*a):
with print_lock:
log(*a)
def doit(func):
try:
q.put(func(url, br, safe_print, timeout))
except Exception as e:
safe_print(e)
q.put(None)
threads = []
threads.append(Thread(target=doit, args=(wayback_machine_cached_url,), daemon=True).start())
threads.append(Thread(target=doit, args=(bing_cached_url,), daemon=True).start())
while threads:
x = q.get()
if x is not None:
return x
threads.pop()
def get_data_for_cached_url(url):
with webcache_lock:
return webcache.get(url)
def resolve_url(url):
prefix, rest = url.partition(':')[::2]
if prefix == 'bing':
return bing_url_processor(rest)
if prefix == 'wayback':
return wayback_url_processor(rest)
return url
# if __name__ == '__main__':
# import sys
# func = sys.argv[-1]
# globals()[func]()
| 15,425 | Python | .py | 396 | 32.340909 | 157 | 0.619284 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,550 | identify.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/identify.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
import time
import unicodedata
from datetime import datetime
from io import StringIO
from operator import attrgetter
from threading import Thread
from calibre.customize.ui import all_metadata_plugins, metadata_plugins
from calibre.ebooks.metadata import authors_to_sort_string, check_issn
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.sources.base import create_log
from calibre.ebooks.metadata.sources.prefs import msprefs
from calibre.ebooks.metadata.xisbn import xisbn
from calibre.utils.date import UNDEFINED_DATE, as_utc, utc_tz
from calibre.utils.formatter import EvalFormatter
from calibre.utils.html2text import html2text
from calibre.utils.icu import lower, primary_sort_key
from polyglot.builtins import as_unicode, iteritems, itervalues
from polyglot.queue import Empty, Queue
from polyglot.urllib import quote, urlparse
# Download worker {{{
class Worker(Thread):
def __init__(self, plugin, kwargs, abort):
Thread.__init__(self)
self.daemon = True
self.plugin, self.kwargs, self.rq = plugin, kwargs, Queue()
self.abort = abort
self.buf = StringIO()
self.log = create_log(self.buf)
def run(self):
start = time.time()
try:
self.plugin.identify(self.log, self.rq, self.abort, **self.kwargs)
except:
self.log.exception('Plugin', self.plugin.name, 'failed')
self.plugin.dl_time_spent = time.time() - start
@property
def name(self):
return self.plugin.name
def is_worker_alive(workers):
for w in workers:
if w.is_alive():
return True
return False
# }}}
# Merge results from different sources {{{
class xISBN(Thread):
def __init__(self, isbn):
Thread.__init__(self)
self.isbn = isbn
self.isbns = frozenset()
self.min_year = None
self.daemon = True
self.exception = self.tb = None
def run(self):
try:
self.isbns, self.min_year = xisbn.get_isbn_pool(self.isbn)
except Exception as e:
import traceback
self.exception = e
self.tb = traceback.format_exception()
class ISBNMerge:
def __init__(self, log):
self.pools = {}
self.isbnless_results = []
self.results = []
self.log = log
# The xISBN service has been de-commissioned
# https://www.oclc.org/developer/news/2018/xid-decommission.en.html
self.use_xisbn = False
def isbn_in_pool(self, isbn):
if isbn:
for isbns, pool in iteritems(self.pools):
if isbn in isbns:
return pool
return None
def pool_has_result_from_same_source(self, pool, result):
results = pool[1]
for r in results:
if r.identify_plugin is result.identify_plugin:
return True
return False
def add_result(self, result):
isbn = result.isbn
if isbn:
pool = self.isbn_in_pool(isbn)
if pool is None:
isbns = min_year = None
if self.use_xisbn:
xw = xISBN(isbn)
xw.start()
xw.join(10)
if xw.is_alive():
self.log.error('Query to xISBN timed out')
self.use_xisbn = False
else:
if xw.exception:
self.log.error('Query to xISBN failed:')
self.log.debug(xw.tb)
else:
isbns, min_year = xw.isbns, xw.min_year
if not msprefs['find_first_edition_date']:
min_year = None
if not isbns:
isbns = frozenset([isbn])
if isbns in self.pools:
# xISBN had a brain fart
pool = self.pools[isbns]
else:
self.pools[isbns] = pool = (min_year, [])
if not self.pool_has_result_from_same_source(pool, result):
pool[1].append(result)
else:
self.isbnless_results.append(result)
def finalize(self):
has_isbn_result = False
for results in itervalues(self.pools):
if results:
has_isbn_result = True
break
isbn_sources = frozenset()
if has_isbn_result:
isbn_sources = self.merge_isbn_results()
# Now handle results that have no ISBNs
results = sorted(self.isbnless_results,
key=attrgetter('relevance_in_source'))
# Only use results that are from sources that have not also returned a
# result with an ISBN
results = [r for r in results if r.identify_plugin not in isbn_sources or not r.identify_plugin.prefer_results_with_isbn]
if results:
# Pick only the most relevant result from each source
seen = set()
for result in results:
if msprefs['keep_dups'] or result.identify_plugin not in seen:
seen.add(result.identify_plugin)
self.results.append(result)
result.average_source_relevance = \
result.relevance_in_source
self.merge_metadata_results()
return self.results
def merge_metadata_results(self, merge_on_identifiers=False):
'''
Merge results with identical title and authors or an identical
identifier
'''
# First title/author
groups = {}
for result in self.results:
title = lower(result.title if result.title else '')
key = (title, tuple(lower(x) for x in result.authors))
if key not in groups:
groups[key] = []
groups[key].append(result)
if len(groups) != len(self.results):
self.results = []
for rgroup in itervalues(groups):
rel = [r.average_source_relevance for r in rgroup]
if len(rgroup) > 1:
result = self.merge(rgroup, None, do_asr=False)
result.average_source_relevance = sum(rel)/len(rel)
else:
result = rgroup[0]
self.results.append(result)
if merge_on_identifiers:
# Now identifiers
groups, empty = {}, []
for result in self.results:
key = set()
for typ, val in iteritems(result.identifiers):
if typ and val:
key.add((typ, val))
if key:
key = frozenset(key)
match = None
for candidate in list(groups):
if candidate.intersection(key):
# We have at least one identifier in common
match = candidate.union(key)
results = groups.pop(candidate)
results.append(result)
groups[match] = results
break
if match is None:
groups[key] = [result]
else:
empty.append(result)
if len(groups) != len(self.results):
self.results = []
for rgroup in itervalues(groups):
rel = [r.average_source_relevance for r in rgroup]
if len(rgroup) > 1:
result = self.merge(rgroup, None, do_asr=False)
result.average_source_relevance = sum(rel)/len(rel)
elif rgroup:
result = rgroup[0]
self.results.append(result)
if empty:
self.results.extend(empty)
self.results.sort(key=attrgetter('average_source_relevance'))
def merge_isbn_results(self):
self.results = []
sources = set()
for min_year, results in itervalues(self.pools):
if results:
for r in results:
sources.add(r.identify_plugin)
self.results.append(self.merge(results, min_year))
self.results.sort(key=attrgetter('average_source_relevance'))
return sources
def length_merge(self, attr, results, null_value=None, shortest=True):
values = [getattr(x, attr) for x in results if not x.is_null(attr)]
values = [x for x in values if len(x) > 0]
if not values:
return null_value
values.sort(key=len, reverse=not shortest)
return values[0]
def random_merge(self, attr, results, null_value=None):
values = [getattr(x, attr) for x in results if not x.is_null(attr)]
return values[0] if values else null_value
def merge(self, results, min_year, do_asr=True):
ans = Metadata(_('Unknown'))
# We assume the shortest title has the least cruft in it
ans.title = self.length_merge('title', results, null_value=ans.title)
# No harm in having extra authors, maybe something useful like an
# editor or translator
ans.authors = self.length_merge('authors', results,
null_value=ans.authors, shortest=False)
# We assume the shortest publisher has the least cruft in it
ans.publisher = self.length_merge('publisher', results,
null_value=ans.publisher)
# We assume the smallest set of tags has the least cruft in it
ans.tags = self.length_merge('tags', results,
null_value=ans.tags, shortest=msprefs['fewer_tags'])
# We assume the longest series has the most info in it
ans.series = self.length_merge('series', results,
null_value=ans.series, shortest=False)
for r in results:
if r.series and r.series == ans.series:
ans.series_index = r.series_index
break
# Average the rating over all sources
ratings = []
for r in results:
rating = r.rating
if rating and rating > 0 and rating <= 5:
ratings.append(rating)
if ratings:
ans.rating = int(round(sum(ratings)/len(ratings)))
# Smallest language is likely to be valid
ans.language = self.length_merge('language', results,
null_value=ans.language)
# Choose longest comments
ans.comments = self.length_merge('comments', results,
null_value=ans.comments, shortest=False)
# Published date
if min_year:
for r in results:
year = getattr(r.pubdate, 'year', None)
if year == min_year:
ans.pubdate = r.pubdate
break
if getattr(ans.pubdate, 'year', None) == min_year:
min_date = datetime(min_year, ans.pubdate.month, ans.pubdate.day,
tzinfo=utc_tz)
else:
min_date = datetime(min_year, 1, 2, tzinfo=utc_tz)
ans.pubdate = min_date
else:
min_date = datetime(3001, 1, 1, tzinfo=utc_tz)
for r in results:
if r.pubdate is not None:
candidate = as_utc(r.pubdate)
if candidate < min_date:
min_date = candidate
if min_date.year < 3000:
ans.pubdate = min_date
# Identifiers
for r in results:
ans.identifiers.update(r.identifiers)
# Cover URL
ans.has_cached_cover_url = bool([r for r in results if
getattr(r, 'has_cached_cover_url', False)])
# Merge any other fields with no special handling (random merge)
touched_fields = set()
for r in results:
if hasattr(r, 'identify_plugin'):
touched_fields |= r.identify_plugin.touched_fields
for f in touched_fields:
if f.startswith('identifier:') or not ans.is_null(f):
continue
setattr(ans, f, self.random_merge(f, results,
null_value=getattr(ans, f)))
if do_asr:
avg = [x.relevance_in_source for x in results]
avg = sum(avg)/len(avg)
ans.average_source_relevance = avg
return ans
def merge_identify_results(result_map, log):
isbn_merge = ISBNMerge(log)
for plugin, results in iteritems(result_map):
for result in results:
isbn_merge.add_result(result)
return isbn_merge.finalize()
# }}}
def identify(log, abort, # {{{
title=None, authors=None, identifiers={}, timeout=30, allowed_plugins=None):
if title == _('Unknown'):
title = None
if authors == [_('Unknown')]:
authors = None
start_time = time.time()
plugins = [p for p in metadata_plugins(['identify'])
if p.is_configured() and (allowed_plugins is None or p.name in allowed_plugins)]
kwargs = {
'title': title,
'authors': authors,
'identifiers': identifiers,
'timeout': timeout,
}
log('Running identify query with parameters:')
log(kwargs)
log('Using plugins:', ', '.join(['%s %s' % (p.name, p.version) for p in plugins]))
log('The log from individual plugins is below')
workers = [Worker(p, kwargs, abort) for p in plugins]
for w in workers:
w.start()
first_result_at = None
results = {}
for p in plugins:
results[p] = []
logs = {w.plugin: w.buf for w in workers}
def get_results():
found = False
for w in workers:
try:
result = w.rq.get_nowait()
except Empty:
pass
else:
results[w.plugin].append(result)
found = True
return found
wait_time = msprefs['wait_after_first_identify_result']
while True:
time.sleep(0.2)
if get_results() and first_result_at is None:
first_result_at = time.time()
if not is_worker_alive(workers):
break
if (first_result_at is not None and time.time() - first_result_at > wait_time):
log.warn('Not waiting any longer for more results. Still running'
' sources:')
for worker in workers:
if worker.is_alive():
log.debug('\t' + worker.name)
abort.set()
break
while not abort.is_set() and get_results():
pass
sort_kwargs = dict(kwargs)
for k in list(sort_kwargs):
if k not in ('title', 'authors', 'identifiers'):
sort_kwargs.pop(k)
longest, lp = -1, ''
for plugin, presults in iteritems(results):
presults.sort(key=plugin.identify_results_keygen(**sort_kwargs))
# Throw away lower priority results from the same source that have exactly the same
# title and authors as a higher priority result
filter_results = set()
filtered_results = []
for r in presults:
key = (r.title, tuple(r.authors))
if key not in filter_results:
filtered_results.append(r)
filter_results.add(key)
results[plugin] = presults = filtered_results
plog = logs[plugin].getvalue().strip()
log('\n'+'*'*30, plugin.name, '%s' % (plugin.version,), '*'*30)
log('Found %d results'%len(presults))
time_spent = getattr(plugin, 'dl_time_spent', None)
if time_spent is None:
log('Downloading was aborted')
longest, lp = -1, plugin.name
else:
log('Downloading from', plugin.name, 'took', time_spent)
if time_spent > longest:
longest, lp = time_spent, plugin.name
for r in presults:
log('\n\n---')
try:
log(str(r))
except TypeError:
log(repr(r))
if plog:
log(plog)
log('\n'+'*'*80)
dummy = Metadata(_('Unknown'))
for i, result in enumerate(presults):
for f in plugin.prefs['ignore_fields']:
if ':' not in f:
setattr(result, f, getattr(dummy, f))
if f == 'series':
result.series_index = dummy.series_index
result.relevance_in_source = i
result.has_cached_cover_url = (
plugin.cached_cover_url_is_reliable and plugin.get_cached_cover_url(result.identifiers) is not None)
result.identify_plugin = plugin
if msprefs['txt_comments']:
if plugin.has_html_comments and result.comments:
result.comments = html2text(result.comments)
log('The identify phase took %.2f seconds'%(time.time() - start_time))
log('The longest time (%f) was taken by:'%longest, lp)
log('Merging results from different sources')
start_time = time.time()
results = merge_identify_results(results, log)
log('We have %d merged results, merging took: %.2f seconds' %
(len(results), time.time() - start_time))
tm_rules = msprefs['tag_map_rules']
pm_rules = msprefs['publisher_map_rules']
if tm_rules or pm_rules:
from calibre.ebooks.metadata.tag_mapper import map_tags
am_rules = msprefs['author_map_rules']
if am_rules:
from calibre.ebooks.metadata.author_mapper import compile_rules, map_authors
am_rules = compile_rules(am_rules)
# normalize unicode strings
def n(x):
return unicodedata.normalize('NFC', as_unicode(x or '', errors='replace'))
for r in results:
if r.tags:
r.tags = list(map(n, r.tags))
if r.authors:
r.authors = list(map(n, r.authors))
if r.author_sort:
r.author_sort = n(r.author_sort)
if r.title:
r.title = n(r.title)
if r.publisher:
r.publisher = n(r.publisher)
if r.comments:
r.comments = n(r.comments)
max_tags = msprefs['max_tags']
for r in results:
if tm_rules:
r.tags = map_tags(r.tags, tm_rules)
r.tags = r.tags[:max_tags]
if getattr(r.pubdate, 'year', 2000) <= UNDEFINED_DATE.year:
r.pubdate = None
if pm_rules and r.publisher:
pubs = map_tags([r.publisher], pm_rules)
r.publisher = pubs[0] if pubs else ''
if msprefs['swap_author_names']:
for r in results:
def swap_to_ln_fn(a):
if ',' in a:
return a
parts = a.split(None)
if len(parts) <= 1:
return a
surname = parts[-1]
return '%s, %s' % (surname, ' '.join(parts[:-1]))
r.authors = [swap_to_ln_fn(a) for a in r.authors]
if am_rules:
for r in results:
new_authors = map_authors(r.authors, am_rules)
if new_authors != r.authors:
r.authors = new_authors
r.author_sort = authors_to_sort_string(r.authors)
return results
# }}}
def urls_from_identifiers(identifiers, sort_results=False): # {{{
identifiers = {k.lower():v for k, v in iteritems(identifiers)}
ans = []
keys_left = set(identifiers)
def add(name, k, val, url):
ans.append((name, k, val, url))
keys_left.discard(k)
rules = msprefs['id_link_rules']
if rules:
formatter = EvalFormatter()
for k, val in iteritems(identifiers):
val = val.replace('|', ',')
vals = {
'id':str(quote(val if isinstance(val, bytes) else val.encode('utf-8'))),
'id_unquoted': str(val),
}
items = rules.get(k) or ()
for name, template in items:
try:
url = formatter.safe_format(template, vals, '', vals)
except Exception:
import traceback
traceback.format_exc()
continue
add(name, k, val, url)
for plugin in all_metadata_plugins():
try:
for id_type, id_val, url in plugin.get_book_urls(identifiers):
add(plugin.get_book_url_name(id_type, id_val, url), id_type, id_val, url)
except Exception:
pass
isbn = identifiers.get('isbn', None)
if isbn:
add(isbn, 'isbn', isbn,
'https://www.worldcat.org/isbn/'+isbn)
doi = identifiers.get('doi', None)
if doi:
add('DOI', 'doi', doi,
'https://dx.doi.org/'+doi)
arxiv = identifiers.get('arxiv', None)
if arxiv:
add('arXiv', 'arxiv', arxiv,
'https://arxiv.org/abs/'+arxiv)
oclc = identifiers.get('oclc', None)
if oclc:
add('OCLC', 'oclc', oclc,
'https://www.worldcat.org/oclc/'+oclc)
issn = check_issn(identifiers.get('issn', None))
if issn:
add(issn, 'issn', issn,
'https://www.worldcat.org/issn/'+issn)
q = {'http', 'https', 'file'}
for k, url in iteritems(identifiers):
if url and re.match(r'ur[il]\d*$', k) is not None:
url = url[:8].replace('|', ':') + url[8:].replace('|', ',')
if url.partition(':')[0].lower() in q:
parts = urlparse(url)
name = parts.netloc or parts.path
add(name, k, url, url)
for k in tuple(keys_left):
val = identifiers.get(k)
if val:
url = val[:8].replace('|', ':') + val[8:].replace('|', ',')
if url.partition(':')[0].lower() in q:
parts = urlparse(url)
name = parts.netloc or parts.path
add(name, k, url, url)
if sort_results:
def url_key(x):
return primary_sort_key(str(x[0]))
ans = sorted(ans, key=url_key)
return ans
# }}}
def tests(start=0, limit=256): # tests {{{
# To run these test use: calibre-debug -c "from calibre.ebooks.metadata.sources.identify import tests; tests()"
from calibre.ebooks.metadata.sources.test import authors_test, test_identify, title_test
tests = [
(
{'title':'Magykal Papers',
'authors':['Sage']},
[title_test('Septimus Heap: The Magykal Papers', exact=True)],
),
( # An e-book ISBN not on Amazon, one of the authors is unknown to Amazon
{'identifiers':{'isbn': '9780307459671'},
'title':'Invisible Gorilla', 'authors':['Christopher Chabris']},
[title_test('The Invisible Gorilla: And Other Ways Our Intuitions Deceive Us', exact=True)]
),
( # Test absence of identifiers
{'title':'Learning Python',
'authors':['Lutz']},
[title_test('Learning Python',
exact=True), authors_test(['Mark J. Lutz', 'David Ascher'])
]
),
( # Sophisticated comment formatting
{'identifiers':{'isbn': '9781416580829'}},
[title_test('Angels & Demons',
exact=True), authors_test(['Dan Brown'])]
),
( # A newer book
{'identifiers':{'isbn': '9780316044981'}},
[title_test('The Heroes', exact=True),
authors_test(['Joe Abercrombie'])]
),
]
# test_identify(tests[1:2])
test_identify(tests[start:limit])
# }}}
| 24,073 | Python | .py | 583 | 29.523156 | 129 | 0.551114 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,551 | google_images.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/google_images.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8
from __future__ import absolute_import, division, print_function, unicode_literals
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from collections import OrderedDict
from calibre import random_user_agent
from calibre.ebooks.metadata.sources.base import Option, Source
def parse_html(raw):
try:
from html5_parser import parse
except ImportError:
# Old versions of calibre
import html5lib
return html5lib.parse(raw, treebuilder='lxml', namespaceHTMLElements=False)
else:
return parse(raw)
def imgurl_from_id(raw, tbnid):
from json import JSONDecoder
q = '"{}",['.format(tbnid)
start_pos = raw.index(q)
if start_pos < 100:
return
jd = JSONDecoder()
data = jd.raw_decode('[' + raw[start_pos:])[0]
# from pprint import pprint
# pprint(data)
url_num = 0
for x in data:
if isinstance(x, list) and len(x) == 3:
q = x[0]
if hasattr(q, 'lower') and q.lower().startswith('http'):
url_num += 1
if url_num > 1:
return q
def parse_google_markup(raw):
root = parse_html(raw)
# newer markup pages use data-docid not data-tbnid
results = root.xpath('//div/@data-tbnid') or root.xpath('//div/@data-docid')
ans = OrderedDict()
for tbnid in results:
try:
imgurl = imgurl_from_id(raw, tbnid)
except Exception:
continue
if imgurl:
ans[imgurl] = True
return list(ans)
class GoogleImages(Source):
name = 'Google Images'
version = (1, 0, 6)
minimum_calibre_version = (2, 80, 0)
description = _('Downloads covers from a Google Image search. Useful to find larger/alternate covers.')
capabilities = frozenset(['cover'])
can_get_multiple_covers = True
supports_gzip_transfer_encoding = True
options = (Option('max_covers', 'number', 5, _('Maximum number of covers to get'),
_('The maximum number of covers to process from the Google search result')),
Option('size', 'choices', 'svga', _('Cover size'),
_('Search for covers larger than the specified size'),
choices=OrderedDict((
('any', _('Any size'),),
('l', _('Large'),),
('qsvga', _('Larger than %s')%'400x300',),
('vga', _('Larger than %s')%'640x480',),
('svga', _('Larger than %s')%'600x800',),
('xga', _('Larger than %s')%'1024x768',),
('2mp', _('Larger than %s')%'2 MP',),
('4mp', _('Larger than %s')%'4 MP',),
))),
)
def download_cover(self, log, result_queue, abort,
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
if not title:
return
timeout = max(60, timeout) # Needs at least a minute
title = ' '.join(self.get_title_tokens(title))
author = ' '.join(self.get_author_tokens(authors))
urls = self.get_image_urls(title, author, log, abort, timeout)
self.download_multiple_covers(title, authors, urls, get_best_cover, timeout, result_queue, abort, log)
@property
def user_agent(self):
return random_user_agent(allow_ie=False)
def get_image_urls(self, title, author, log, abort, timeout):
from calibre.utils.cleantext import clean_ascii_chars
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
br = self.browser
q = urlencode({'as_q': ('%s %s'%(title, author)).encode('utf-8')})
if isinstance(q, bytes):
q = q.decode('utf-8')
sz = self.prefs['size']
if sz == 'any':
sz = ''
elif sz == 'l':
sz = 'isz:l,'
else:
sz = 'isz:lt,islt:%s,' % sz
# See https://www.google.com/advanced_image_search to understand this
# URL scheme
url = 'https://www.google.com/search?as_st=y&tbm=isch&{}&as_epq=&as_oq=&as_eq=&cr=&as_sitesearch=&safe=images&tbs={}iar:t,ift:jpg'.format(q, sz)
log('Search URL: ' + url)
# See https://github.com/benbusby/whoogle-search/pull/1054 for cookies
br.set_simple_cookie('CONSENT', 'PENDING+987', '.google.com', path='/')
template = b'\x08\x01\x128\x08\x14\x12+boq_identityfrontenduiserver_20231107.05_p0\x1a\x05en-US \x03\x1a\x06\x08\x80\xf1\xca\xaa\x06'
from base64 import standard_b64encode
from datetime import date
template.replace(b'20231107', date.today().strftime('%Y%m%d').encode('ascii'))
br.set_simple_cookie('SOCS', standard_b64encode(template).decode('ascii').rstrip('='), '.google.com', path='/')
# br.set_debug_http(True)
raw = clean_ascii_chars(br.open(url).read().decode('utf-8'))
# with open('/t/raw.html', 'w') as f:
# f.write(raw)
return parse_google_markup(raw)
def test_raw():
import sys
raw = open(sys.argv[-1]).read()
for x in parse_google_markup(raw):
print(x)
def test(title='Star Trek: Section 31: Control', authors=('David Mack',)):
try:
from queue import Queue
except ImportError:
from Queue import Queue
from threading import Event
from calibre.utils.logging import default_log
p = GoogleImages(None)
p.log = default_log
rq = Queue()
p.download_cover(default_log, rq, Event(), title=title, authors=authors)
print('Downloaded', rq.qsize(), 'covers')
if __name__ == '__main__':
test()
| 5,840 | Python | .py | 136 | 33.926471 | 152 | 0.587221 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,552 | update.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/sources/update.py | #!/usr/bin/env python
# vim:fileencoding=utf-8
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from __future__ import absolute_import, division, print_function, unicode_literals
import bz2
import hashlib
import json
import sys
import time
from threading import Thread
import calibre.ebooks.metadata.sources.search_engines as builtin_search_engines
from calibre import as_unicode, prints
from calibre.constants import numeric_version
from calibre.ebooks.metadata.sources.base import Source
from calibre.prints import debug_print
from calibre.utils.config import JSONConfig
from calibre.utils.https import get_https_resource_securely
from polyglot.builtins import iteritems, itervalues
cache = JSONConfig('metadata-sources-cache.json')
UPDATE_INTERVAL = 12 * 60 * 60
current_search_engines = builtin_search_engines
def search_engines_module():
return current_search_engines
def load_plugin(src):
src = src.encode('utf-8')
ns = {}
exec(src, ns)
for x in itervalues(ns):
if isinstance(x, type) and issubclass(x, Source) and x is not Source:
return x
class PatchedSearchEngines:
def __init__(self, ns):
self.__ns = ns
def __getattr__(self, attr):
try:
return self.__ns[attr]
except KeyError:
raise AttributeError('{} not present in search_engines_module'.format(attr))
def patch_search_engines(src):
global current_search_engines
src = src.encode('utf-8')
ns = {}
try:
exec(src, ns)
except Exception:
mcv = None
else:
mcv = ns.get('minimum_calibre_version')
if mcv is None or mcv > numeric_version:
return
cv = ns.get('current_version')
if cv is None or cv <= builtin_search_engines.current_version:
return
current_search_engines = PatchedSearchEngines(ns)
def patch_plugins():
from calibre.customize.ui import patch_metadata_plugins
patches = {}
for name, val in iteritems(cache):
if name == 'hashes':
continue
if name == 'search_engines':
patch_search_engines(val)
try:
p = load_plugin(val)
except Exception:
p = None
if p is not None:
patches[p.name] = p
patch_metadata_plugins(patches)
def update_needed():
needed = {}
current_hashes = cache.get('hashes', {})
hashes = get_https_resource_securely(
'https://code.calibre-ebook.com/metadata-sources/hashes.json')
hashes = bz2.decompress(hashes)
hashes = json.loads(hashes)
for k, v in iteritems(hashes):
if current_hashes.get(k) != v:
needed[k] = v
remove = set(current_hashes) - set(hashes)
if remove:
with cache:
for k in remove:
current_hashes.pop(k, None)
del cache[k]
cache['hashes'] = current_hashes
return needed
def update_plugin(name, updated, expected_hash):
raw = get_https_resource_securely('https://code.calibre-ebook.com/metadata-sources/' + name)
h = hashlib.sha1(raw).hexdigest()
if h != expected_hash:
raise ValueError('Actual hash did not match expected hash, probably an update occurred while downloading')
plugin = bz2.decompress(raw).decode('utf-8')
updated[name] = plugin, h
def main(report_error=prints, report_action=prints):
try:
if time.time() - cache.mtime() < UPDATE_INTERVAL:
report_action('Metadata sources cache was recently updated not updating again')
return
try:
report_action('Fetching metadata source hashes...')
needed = update_needed()
except Exception as e:
report_error(
'Failed to get metadata sources hashes with error: {}'.format(as_unicode(e)))
return
if not needed:
cache.touch()
return
updated = {}
for name, expected_hash in iteritems(needed):
report_action('Updating metadata source {}...'.format(name))
try:
update_plugin(name, updated, expected_hash)
except Exception as e:
report_error('Failed to get plugin {} with error: {}'.format(
name, as_unicode(e)))
break
else:
hashes = cache.get('hashes', {})
for name in updated:
hashes[name] = updated[name][1]
with cache:
cache['hashes'] = hashes
for name in updated:
cache[name] = updated[name][0]
finally:
update_sources.worker = None
def update_sources(wait_for_completion=False):
if update_sources.worker is not None:
return False
update_sources.errors = errs = []
update_sources.worker = t = Thread(
target=main, args=(errs.append, debug_print), name='MSourcesUpdater')
t.daemon = True
t.start()
if wait_for_completion:
t.join()
return True
update_sources.worker = None
if __name__ == '__main__':
def re(x):
prints(x, file=sys.stderr)
re.ok = False
re.ok = True
main(re)
if not re.ok:
raise SystemExit(1)
| 5,238 | Python | .py | 148 | 27.898649 | 114 | 0.633867 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,553 | json_codec.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/book/json_codec.py | '''
Created on 4 Jun 2010
@author: charles
'''
import json
import traceback
from datetime import datetime, time
from calibre import isbytestring
from calibre.constants import filesystem_encoding, preferred_encoding
from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.library.field_metadata import FieldMetadata
from polyglot.binary import as_base64_unicode, from_base64_bytes
from polyglot.builtins import as_bytes, iteritems, itervalues
# Translate datetimes to and from strings. The string form is the datetime in
# UTC. The returned date is also UTC
def string_to_datetime(src):
from calibre.utils.iso8601 import parse_iso8601
if src != "None":
try:
return parse_iso8601(src)
except Exception:
pass
return None
def datetime_to_string(dateval):
from calibre.utils.date import UNDEFINED_DATE, isoformat, local_tz
if dateval is None:
return "None"
if not isinstance(dateval, datetime):
dateval = datetime.combine(dateval, time())
if hasattr(dateval, 'tzinfo') and dateval.tzinfo is None:
dateval = dateval.replace(tzinfo=local_tz)
if dateval <= UNDEFINED_DATE:
return "None"
return isoformat(dateval)
def encode_thumbnail(thumbnail):
'''
Encode the image part of a thumbnail, then return the 3 part tuple
'''
from calibre.utils.imghdr import identify
if thumbnail is None:
return None
if not isinstance(thumbnail, (tuple, list)):
try:
width, height = identify(as_bytes(thumbnail))[1:]
if width < 0 or height < 0:
return None
thumbnail = (width, height, thumbnail)
except Exception:
return None
return (thumbnail[0], thumbnail[1], as_base64_unicode(thumbnail[2]))
def decode_thumbnail(tup):
'''
Decode an encoded thumbnail into its 3 component parts
'''
if tup is None:
return None
return (tup[0], tup[1], from_base64_bytes(tup[2]))
def object_to_unicode(obj, enc=preferred_encoding):
def dec(x):
return x.decode(enc, 'replace')
if isbytestring(obj):
return dec(obj)
if isinstance(obj, (list, tuple)):
return [dec(x) if isbytestring(x) else object_to_unicode(x) for x in obj]
if isinstance(obj, dict):
ans = {}
for k, v in obj.items():
k = object_to_unicode(k)
v = object_to_unicode(v)
ans[k] = v
return ans
return obj
def encode_is_multiple(fm):
if fm.get('is_multiple', None):
# migrate is_multiple back to a character
fm['is_multiple2'] = fm.get('is_multiple', {})
dt = fm.get('datatype', None)
if dt == 'composite':
fm['is_multiple'] = ','
else:
fm['is_multiple'] = '|'
else:
fm['is_multiple'] = None
fm['is_multiple2'] = {}
def decode_is_multiple(fm):
im = fm.get('is_multiple2', None)
if im:
fm['is_multiple'] = im
del fm['is_multiple2']
else:
# Must migrate the is_multiple from char to dict
im = fm.get('is_multiple', {})
if im:
dt = fm.get('datatype', None)
if dt == 'composite':
im = {'cache_to_list': ',', 'ui_to_list': ',',
'list_to_ui': ', '}
elif fm.get('display', {}).get('is_names', False):
im = {'cache_to_list': '|', 'ui_to_list': '&',
'list_to_ui': ', '}
else:
im = {'cache_to_list': '|', 'ui_to_list': ',',
'list_to_ui': ', '}
elif im is None:
im = {}
fm['is_multiple'] = im
class JsonCodec:
def __init__(self, field_metadata=None):
self.field_metadata = field_metadata or FieldMetadata()
def encode_to_file(self, file_, booklist):
data = json.dumps(self.encode_booklist_metadata(booklist), indent=2)
if not isinstance(data, bytes):
data = data.encode('utf-8')
file_.write(data)
def encode_booklist_metadata(self, booklist):
result = []
for book in booklist:
result.append(self.encode_book_metadata(book))
return result
def encode_book_metadata(self, book):
result = {}
for key in SERIALIZABLE_FIELDS:
result[key] = self.encode_metadata_attr(book, key)
return result
def encode_metadata_attr(self, book, key):
if key == 'user_metadata':
meta = book.get_all_user_metadata(make_copy=True)
for fm in itervalues(meta):
if fm['datatype'] == 'datetime':
fm['#value#'] = datetime_to_string(fm['#value#'])
encode_is_multiple(fm)
return meta
if key in self.field_metadata:
datatype = self.field_metadata[key]['datatype']
else:
datatype = None
value = book.get(key)
if key == 'thumbnail':
return encode_thumbnail(value)
elif isbytestring(value): # str includes bytes
enc = filesystem_encoding if key == 'lpath' else preferred_encoding
return object_to_unicode(value, enc=enc)
elif datatype == 'datetime':
return datetime_to_string(value)
else:
return object_to_unicode(value)
def decode_from_file(self, file_, booklist, book_class, prefix):
js = []
try:
js = json.load(file_)
for item in js:
entry = self.raw_to_book(item, book_class, prefix)
if entry is not None:
booklist.append(entry)
except:
print('exception during JSON decode_from_file')
traceback.print_exc()
def raw_to_book(self, json_book, book_class, prefix):
try:
book = book_class(prefix, json_book.get('lpath', None))
for key,val in iteritems(json_book):
meta = self.decode_metadata(key, val)
if key == 'user_metadata':
book.set_all_user_metadata(meta)
else:
if key == 'classifiers':
key = 'identifiers'
setattr(book, key, meta)
return book
except:
print('exception during JSON decoding')
traceback.print_exc()
def decode_metadata(self, key, value):
if key == 'classifiers':
key = 'identifiers'
if key == 'user_metadata':
for fm in itervalues(value):
if fm['datatype'] == 'datetime':
fm['#value#'] = string_to_datetime(fm['#value#'])
decode_is_multiple(fm)
return value
elif key in self.field_metadata:
if self.field_metadata[key]['datatype'] == 'datetime':
return string_to_datetime(value)
if key == 'thumbnail':
return decode_thumbnail(value)
return value
| 7,056 | Python | .py | 187 | 28.176471 | 81 | 0.577131 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,554 | formatter.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/book/formatter.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from numbers import Number
from calibre.ebooks.metadata.book import ALL_METADATA_FIELDS, TOP_LEVEL_IDENTIFIERS
from calibre.utils.formatter import TemplateFormatter
class SafeFormat(TemplateFormatter):
def __init__(self):
TemplateFormatter.__init__(self)
def get_value(self, orig_key, args, kwargs):
if not orig_key or isinstance(orig_key, Number):
return ''
key = orig_key = orig_key.lower()
if (key != 'title_sort' and key not in TOP_LEVEL_IDENTIFIERS and
key not in ALL_METADATA_FIELDS):
from calibre.ebooks.metadata.book.base import field_metadata
key = field_metadata.search_term_to_field_key(key)
if key is None or (self.book and
key not in self.book.all_field_keys()):
if hasattr(self.book, orig_key):
key = orig_key
else:
raise ValueError(_('Value: unknown field ') + orig_key)
try:
b = self.book.get_user_metadata(key, False)
except:
b = None
if b and b['datatype'] in {'int', 'float'} and self.book.get(key, None) is None:
v = ''
else:
v = self.book.format_field(key, series_with_index=False)[1]
if v is None:
return ''
if v == '':
return ''
return v
| 1,512 | Python | .py | 36 | 31.444444 | 88 | 0.576975 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,555 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/book/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
All fields must have a NULL value represented as None for simple types,
an empty list/dictionary for complex types and (None, None) for cover_data
'''
SOCIAL_METADATA_FIELDS = frozenset((
'tags', # Ordered list
'rating', # A floating point number between 0 and 10
'comments', # A simple HTML enabled string
'series', # A simple string
'series_index', # A floating point number
# Of the form { scheme1:value1, scheme2:value2}
# For example: {'isbn':'123456789', 'doi':'xxxx', ... }
'identifiers',
))
'''
The list of names that convert to identifiers when in get and set.
'''
TOP_LEVEL_IDENTIFIERS = frozenset((
'isbn',
))
PUBLICATION_METADATA_FIELDS = frozenset((
'title', # title must never be None. Should be _('Unknown')
# Pseudo field that can be set, but if not set is auto generated
# from title and languages
'title_sort',
'authors', # Ordered list. Must never be None, can be [_('Unknown')]
'author_sort_map', # Map of sort strings for each author
# Pseudo field that can be set, but if not set is auto generated
# from authors and languages
'author_sort',
'book_producer',
'timestamp', # Dates and times must be timezone aware
'pubdate',
'last_modified',
'rights',
# So far only known publication type is periodical:calibre
# If None, means book
'publication_type',
'uuid', # A UUID usually of type 4
'languages', # ordered list of languages in this publication
'publisher', # Simple string, no special semantics
# Absolute path to image file encoded in filesystem_encoding
'cover',
# Of the form (format, data) where format is, e.g. 'jpeg', 'png', 'gif'...
'cover_data',
# Either thumbnail data, or an object with the attribute
# image_path which is the path to an image file, encoded
# in filesystem_encoding
'thumbnail',
))
BOOK_STRUCTURE_FIELDS = frozenset((
# These are used by code, Null values are None.
'toc', 'spine', 'guide', 'manifest',
))
USER_METADATA_FIELDS = frozenset((
# A dict of dicts similar to field_metadata. Each field description dict
# also contains a value field with the key #value#.
'user_metadata',
))
DEVICE_METADATA_FIELDS = frozenset((
'device_collections', # Ordered list of strings
'lpath', # Unicode, / separated
'size', # In bytes
'mime', # Mimetype of the book file being represented
))
CALIBRE_METADATA_FIELDS = frozenset((
'application_id', # An application id, currently set to the db_id.
'db_id', # the calibre primary key of the item.
'formats', # list of formats (extensions) for this book
# a dict of user category names, where the value is a list of item names
# from the book that are in that category
'user_categories',
# a dict of items to associated hyperlink
'link_maps',
))
ALL_METADATA_FIELDS = SOCIAL_METADATA_FIELDS.union(
PUBLICATION_METADATA_FIELDS).union(
BOOK_STRUCTURE_FIELDS).union(
USER_METADATA_FIELDS).union(
DEVICE_METADATA_FIELDS).union(
CALIBRE_METADATA_FIELDS)
# All fields except custom fields
STANDARD_METADATA_FIELDS = SOCIAL_METADATA_FIELDS.union(
PUBLICATION_METADATA_FIELDS).union(
BOOK_STRUCTURE_FIELDS).union(
DEVICE_METADATA_FIELDS).union(
CALIBRE_METADATA_FIELDS)
# Metadata fields that smart update must do special processing to copy.
SC_FIELDS_NOT_COPIED = frozenset(('title', 'title_sort', 'authors',
'author_sort', 'author_sort_map',
'cover_data', 'tags', 'languages',
'identifiers'))
# Metadata fields that smart update should copy only if the source is not None
SC_FIELDS_COPY_NOT_NULL = frozenset(('device_collections', 'lpath', 'size', 'comments', 'thumbnail'))
# Metadata fields that smart update should copy without special handling
SC_COPYABLE_FIELDS = SOCIAL_METADATA_FIELDS.union(
PUBLICATION_METADATA_FIELDS).union(
BOOK_STRUCTURE_FIELDS).union(
DEVICE_METADATA_FIELDS).union(
CALIBRE_METADATA_FIELDS) - \
SC_FIELDS_NOT_COPIED.union(
SC_FIELDS_COPY_NOT_NULL)
SERIALIZABLE_FIELDS = SOCIAL_METADATA_FIELDS.union(
USER_METADATA_FIELDS).union(
PUBLICATION_METADATA_FIELDS).union(
CALIBRE_METADATA_FIELDS).union(
DEVICE_METADATA_FIELDS) - \
frozenset(('device_collections', 'formats',
'cover_data'))
# these are rebuilt when needed
| 5,282 | Python | .py | 114 | 37.280702 | 102 | 0.599884 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,556 | base.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/book/base.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import copy
import traceback
from contextlib import suppress
from calibre import prints
from calibre.constants import DEBUG
from calibre.ebooks.metadata.book import ALL_METADATA_FIELDS, SC_COPYABLE_FIELDS, SC_FIELDS_COPY_NOT_NULL, STANDARD_METADATA_FIELDS, TOP_LEVEL_IDENTIFIERS
from calibre.library.field_metadata import FieldMetadata
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import sort_key
from calibre.utils.localization import ngettext
from polyglot.builtins import iteritems, string_or_bytes
# Special sets used to optimize the performance of getting and setting
# attributes on Metadata objects
SIMPLE_GET = frozenset(STANDARD_METADATA_FIELDS - TOP_LEVEL_IDENTIFIERS)
SIMPLE_SET = frozenset(SIMPLE_GET - {'identifiers'})
def human_readable(size, precision=2):
""" Convert a size in bytes into megabytes """
ans = size/(1024*1024)
if ans < 0.1:
return '<0.1 MB'
return ('%.'+str(precision)+'f'+ ' MB') % ans
NULL_VALUES = {
'user_metadata': {},
'cover_data' : (None, None),
'tags' : [],
'identifiers' : {},
'languages' : [],
'device_collections': [],
'author_sort_map': {},
'authors' : [_('Unknown')],
'author_sort' : _('Unknown'),
'title' : _('Unknown'),
'user_categories' : {},
'link_maps' : {},
'language' : 'und'
}
field_metadata = FieldMetadata()
def reset_field_metadata():
global field_metadata
field_metadata = FieldMetadata()
def ck(typ):
return icu_lower(typ).strip().replace(':', '').replace(',', '')
def cv(val):
return val.strip().replace(',', '|')
class Metadata:
'''
A class representing all the metadata for a book. The various standard metadata
fields are available as attributes of this object. You can also stick
arbitrary attributes onto this object.
Metadata from custom columns should be accessed via the get() method,
passing in the lookup name for the column, for example: "#mytags".
Use the :meth:`is_null` method to test if a field is null.
This object also has functions to format fields into strings.
The list of standard metadata fields grows with time is in
:data:`STANDARD_METADATA_FIELDS`.
Please keep the method based API of this class to a minimum. Every method
becomes a reserved field name.
'''
__calibre_serializable__ = True
def __init__(self, title, authors=(_('Unknown'),), other=None, template_cache=None,
formatter=None):
'''
@param title: title or ``_('Unknown')``
@param authors: List of strings or []
@param other: None or a metadata object
'''
_data = copy.deepcopy(NULL_VALUES)
_data.pop('language')
object.__setattr__(self, '_data', _data)
if other is not None:
self.smart_update(other)
else:
if title:
self.title = title
if authors:
# List of strings or []
self.author = list(authors) if authors else [] # Needed for backward compatibility
self.authors = list(authors) if authors else []
from calibre.ebooks.metadata.book.formatter import SafeFormat
self.formatter = SafeFormat() if formatter is None else formatter
self.template_cache = template_cache
def is_null(self, field):
'''
Return True if the value of field is null in this object.
'null' means it is unknown or evaluates to False. So a title of
_('Unknown') is null or a language of 'und' is null.
Be careful with numeric fields since this will return True for zero as
well as None.
Also returns True if the field does not exist.
'''
try:
null_val = NULL_VALUES.get(field, None)
val = getattr(self, field, None)
return not val or val == null_val
except:
return True
def set_null(self, field):
null_val = copy.copy(NULL_VALUES.get(field))
setattr(self, field, null_val)
def __getattribute__(self, field):
_data = object.__getattribute__(self, '_data')
if field in SIMPLE_GET:
return _data.get(field, None)
if field in TOP_LEVEL_IDENTIFIERS:
return _data.get('identifiers').get(field, None)
if field == 'language':
try:
return _data.get('languages', [])[0]
except:
return NULL_VALUES['language']
try:
return object.__getattribute__(self, field)
except AttributeError:
pass
if field in _data['user_metadata']:
d = _data['user_metadata'][field]
val = d['#value#']
if val is None and d['datatype'] == 'composite':
d['#value#'] = 'RECURSIVE_COMPOSITE FIELD (Metadata) ' + field
val = d['#value#'] = self.formatter.safe_format(
d['display']['composite_template'],
self,
_('TEMPLATE ERROR'),
self, column_name=field,
template_cache=self.template_cache).strip()
return val
if field.startswith('#') and field.endswith('_index'):
try:
return self.get_extra(field[:-6])
except:
pass
raise AttributeError(
'Metadata object has no attribute named: '+ repr(field))
def __setattr__(self, field, val, extra=None):
_data = object.__getattribute__(self, '_data')
if field in SIMPLE_SET:
if val is None:
val = copy.copy(NULL_VALUES.get(field, None))
_data[field] = val
elif field in TOP_LEVEL_IDENTIFIERS:
field, val = self._clean_identifier(field, val)
identifiers = _data['identifiers']
identifiers.pop(field, None)
if val:
identifiers[field] = val
elif field == 'identifiers':
if not val:
val = copy.copy(NULL_VALUES.get('identifiers', None))
self.set_identifiers(val)
elif field == 'language':
langs = []
if val and val.lower() != 'und':
langs = [val]
_data['languages'] = langs
elif field in _data['user_metadata']:
d = _data['user_metadata'][field]
d['#value#'] = val
d['#extra#'] = extra
else:
# You are allowed to stick arbitrary attributes onto this object as
# long as they don't conflict with global or user metadata names
# Don't abuse this privilege
self.__dict__[field] = val
def __iter__(self):
return iter(object.__getattribute__(self, '_data'))
def has_key(self, key):
return key in STANDARD_METADATA_FIELDS or key in object.__getattribute__(self, '_data')['user_metadata']
def _evaluate_all_composites(self):
custom_fields = object.__getattribute__(self, '_data')['user_metadata']
for field in custom_fields:
self._evaluate_composite(field)
def _evaluate_composite(self, field):
f = object.__getattribute__(self, '_data')['user_metadata'].get(field, None)
if f is not None:
if f['datatype'] == 'composite' and f['#value#'] is None:
self.get(field)
def deepcopy(self, class_generator=lambda : Metadata(None)):
''' Do not use this method unless you know what you are doing, if you
want to create a simple clone of this object, use :meth:`deepcopy_metadata`
instead. Class_generator must be a function that returns an instance
of Metadata or a subclass of it.'''
# We don't need to evaluate all the composites here because we
# are returning a "real" Metadata instance that has __get_attribute__.
m = class_generator()
if not isinstance(m, Metadata):
return None
object.__setattr__(m, '__dict__', copy.deepcopy(self.__dict__))
return m
def deepcopy_metadata(self):
# We don't need to evaluate all the composites here because we
# are returning a "real" Metadata instance that has __get_attribute__.
m = Metadata(None)
object.__setattr__(m, '_data', copy.deepcopy(object.__getattribute__(self, '_data')))
# Also copy these two top-level attributes as they can appear in templates.
with suppress(AttributeError):
object.__setattr__(m, 'id', copy.copy(self.__getattribute__('id')))
with suppress(AttributeError):
object.__setattr__(m, 'has_cover', copy.copy(self.__getattribute__('has_cover')))
return m
def get(self, field, default=None):
try:
return self.__getattribute__(field)
except AttributeError:
return default
def get_extra(self, field, default=None):
# Don't need to evaluate all composites because a composite can't have
# an extra value
_data = object.__getattribute__(self, '_data')
if field in _data['user_metadata']:
try:
return _data['user_metadata'][field]['#extra#']
except:
return default
raise AttributeError(
'Metadata object has no attribute named: '+ repr(field))
def set(self, field, val, extra=None):
self.__setattr__(field, val, extra)
def get_identifiers(self):
'''
Return a copy of the identifiers dictionary.
The dict is small, and the penalty for using a reference where a copy is
needed is large. Also, we don't want any manipulations of the returned
dict to show up in the book.
'''
ans = object.__getattribute__(self, '_data')['identifiers']
if not ans:
ans = {}
return copy.deepcopy(ans)
def _clean_identifier(self, typ, val):
if typ:
typ = ck(typ)
if val:
val = cv(val)
return typ, val
def set_identifiers(self, identifiers):
'''
Set all identifiers. Note that if you previously set ISBN, calling
this method will delete it.
'''
cleaned = {ck(k):cv(v) for k, v in iteritems(identifiers) if k and v}
object.__getattribute__(self, '_data')['identifiers'] = cleaned
def set_identifier(self, typ, val):
'If val is empty, deletes identifier of type typ'
typ, val = self._clean_identifier(typ, val)
if not typ:
return
identifiers = object.__getattribute__(self, '_data')['identifiers']
identifiers.pop(typ, None)
if val:
identifiers[typ] = val
def has_identifier(self, typ):
identifiers = object.__getattribute__(self, '_data')['identifiers']
return typ in identifiers
# field-oriented interface. Intended to be the same as in LibraryDatabase
def standard_field_keys(self):
'''
return a list of all possible keys, even if this book doesn't have them
'''
return STANDARD_METADATA_FIELDS
def custom_field_keys(self):
'''
return a list of the custom fields in this book
'''
return iter(object.__getattribute__(self, '_data')['user_metadata'])
def all_field_keys(self):
'''
All field keys known by this instance, even if their value is None
'''
_data = object.__getattribute__(self, '_data')
return frozenset(ALL_METADATA_FIELDS.union(frozenset(_data['user_metadata'])))
def metadata_for_field(self, key):
'''
return metadata describing a standard or custom field.
'''
if key not in self.custom_field_keys():
return self.get_standard_metadata(key, make_copy=False)
return self.get_user_metadata(key, make_copy=False)
def all_non_none_fields(self):
'''
Return a dictionary containing all non-None metadata fields, including
the custom ones.
'''
result = {}
_data = object.__getattribute__(self, '_data')
for attr in STANDARD_METADATA_FIELDS:
v = _data.get(attr, None)
if v is not None:
result[attr] = v
# separate these because it uses the self.get(), not _data.get()
for attr in TOP_LEVEL_IDENTIFIERS:
v = self.get(attr, None)
if v is not None:
result[attr] = v
for attr in _data['user_metadata']:
v = self.get(attr, None)
if v is not None:
result[attr] = v
if _data['user_metadata'][attr]['datatype'] == 'series':
result[attr+'_index'] = _data['user_metadata'][attr]['#extra#']
return result
# End of field-oriented interface
# Extended interfaces. These permit one to get copies of metadata dictionaries, and to
# get and set custom field metadata
def get_standard_metadata(self, field, make_copy):
'''
return field metadata from the field if it is there. Otherwise return
None. field is the key name, not the label. Return a copy if requested,
just in case the user wants to change values in the dict.
'''
if field in field_metadata and field_metadata[field]['kind'] == 'field':
if make_copy:
return copy.deepcopy(field_metadata[field])
return field_metadata[field]
return None
def get_all_standard_metadata(self, make_copy):
'''
return a dict containing all the standard field metadata associated with
the book.
'''
if not make_copy:
return field_metadata
res = {}
for k in field_metadata:
if field_metadata[k]['kind'] == 'field':
res[k] = copy.deepcopy(field_metadata[k])
return res
def get_all_user_metadata(self, make_copy):
'''
return a dict containing all the custom field metadata associated with
the book.
'''
# Must evaluate all composites because we are returning a dict, not a
# Metadata instance
self._evaluate_all_composites()
_data = object.__getattribute__(self, '_data')
user_metadata = _data['user_metadata']
if not make_copy:
return user_metadata
res = {}
for k in user_metadata:
res[k] = copy.deepcopy(user_metadata[k])
return res
def get_user_metadata(self, field, make_copy):
'''
return field metadata from the object if it is there. Otherwise return
None. field is the key name, not the label. Return a copy if requested,
just in case the user wants to change values in the dict.
'''
_data = object.__getattribute__(self, '_data')['user_metadata']
if field in _data:
# Must evaluate the field because it might be a composite. It won't
# be evaluated on demand because we are returning its dict, not a
# Metadata instance
self._evaluate_composite(field)
if make_copy:
return copy.deepcopy(_data[field])
return _data[field]
return None
def set_all_user_metadata(self, metadata):
'''
store custom field metadata into the object. Field is the key name
not the label
'''
if metadata is None:
traceback.print_stack()
return
um = {}
for key, meta in iteritems(metadata):
m = meta.copy()
if '#value#' not in m:
if m['datatype'] == 'text' and m['is_multiple']:
m['#value#'] = []
else:
m['#value#'] = None
um[key] = m
_data = object.__getattribute__(self, '_data')
_data['user_metadata'] = um
def set_user_metadata(self, field, metadata):
'''
store custom field metadata for one column into the object. Field is
the key name not the label
'''
if field is not None:
if not field.startswith('#'):
raise AttributeError(
'Custom field name %s must begin with \'#\''%repr(field))
if metadata is None:
traceback.print_stack()
return
m = dict(metadata)
# Copying the elements should not be necessary. The objects referenced
# in the dict should not change. Of course, they can be replaced.
# for k,v in iteritems(metadata):
# m[k] = copy.copy(v)
if '#value#' not in m:
if m['datatype'] == 'text' and m['is_multiple']:
m['#value#'] = []
else:
m['#value#'] = None
_data = object.__getattribute__(self, '_data')
_data['user_metadata'][field] = m
def remove_stale_user_metadata(self, other_mi):
'''
Remove user metadata keys (custom column keys) if they
don't exist in 'other_mi', which must be a metadata object
'''
me = self.get_all_user_metadata(make_copy=False)
other = set(other_mi.custom_field_keys())
new = {}
for k,v in me.items():
if k in other:
new[k] = v
self.set_all_user_metadata(new)
def template_to_attribute(self, other, ops):
'''
Takes a list [(src,dest), (src,dest)], evaluates the template in the
context of other, then copies the result to self[dest]. This is on a
best-efforts basis. Some assignments can make no sense.
'''
if not ops:
return
from calibre.ebooks.metadata.book.formatter import SafeFormat
formatter = SafeFormat()
for op in ops:
try:
src = op[0]
dest = op[1]
val = formatter.safe_format(src, other, 'PLUGBOARD TEMPLATE ERROR', other)
if dest == 'tags':
self.set(dest, [f.strip() for f in val.split(',') if f.strip()])
elif dest == 'authors':
self.set(dest, [f.strip() for f in val.split('&') if f.strip()])
else:
self.set(dest, val)
except:
if DEBUG:
traceback.print_exc()
# Old Metadata API {{{
def print_all_attributes(self):
for x in STANDARD_METADATA_FIELDS:
prints('%s:'%x, getattr(self, x, 'None'))
for x in self.custom_field_keys():
meta = self.get_user_metadata(x, make_copy=False)
if meta is not None:
prints(x, meta)
prints('--------------')
def smart_update(self, other, replace_metadata=False):
'''
Merge the information in `other` into self. In case of conflicts, the information
in `other` takes precedence, unless the information in `other` is NULL.
'''
def copy_not_none(dest, src, attr):
v = getattr(src, attr, None)
if v is not None and v != NULL_VALUES.get(attr, None):
setattr(dest, attr, copy.deepcopy(v))
unknown = _('Unknown')
if other.title and other.title != unknown:
self.title = other.title
if hasattr(other, 'title_sort'):
self.title_sort = other.title_sort
if other.authors and (
other.authors[0] != unknown or (
not self.authors or (
len(self.authors) == 1 and self.authors[0] == unknown and
getattr(self, 'author_sort', None) == unknown
)
)
):
self.authors = list(other.authors)
if hasattr(other, 'author_sort_map'):
self.author_sort_map = dict(other.author_sort_map)
if hasattr(other, 'author_sort'):
self.author_sort = other.author_sort
if replace_metadata:
# SPECIAL_FIELDS = frozenset(['lpath', 'size', 'comments', 'thumbnail'])
for attr in SC_COPYABLE_FIELDS:
setattr(self, attr, getattr(other, attr, 1.0 if
attr == 'series_index' else None))
self.tags = other.tags
self.cover_data = getattr(other, 'cover_data',
NULL_VALUES['cover_data'])
self.set_all_user_metadata(other.get_all_user_metadata(make_copy=True))
for x in SC_FIELDS_COPY_NOT_NULL:
copy_not_none(self, other, x)
if callable(getattr(other, 'get_identifiers', None)):
self.set_identifiers(other.get_identifiers())
# language is handled below
else:
for attr in SC_COPYABLE_FIELDS:
copy_not_none(self, other, attr)
for x in SC_FIELDS_COPY_NOT_NULL:
copy_not_none(self, other, x)
if other.tags:
# Case-insensitive but case preserving merging
lotags = [t.lower() for t in other.tags]
lstags = [t.lower() for t in self.tags]
ot, st = map(frozenset, (lotags, lstags))
for t in st.intersection(ot):
sidx = lstags.index(t)
oidx = lotags.index(t)
self.tags[sidx] = other.tags[oidx]
self.tags += [t for t in other.tags if t.lower() in ot-st]
if getattr(other, 'cover_data', False):
other_cover = other.cover_data[-1]
self_cover = self.cover_data[-1] if self.cover_data else b''
if not self_cover:
self_cover = b''
if not other_cover:
other_cover = b''
if len(other_cover) > len(self_cover):
self.cover_data = other.cover_data
if callable(getattr(other, 'custom_field_keys', None)):
for x in other.custom_field_keys():
meta = other.get_user_metadata(x, make_copy=True)
if meta is not None:
self_tags = self.get(x, [])
if isinstance(self_tags, string_or_bytes):
self_tags = []
self.set_user_metadata(x, meta) # get... did the deepcopy
other_tags = other.get(x, [])
if meta['datatype'] == 'text' and meta['is_multiple']:
# Case-insensitive but case preserving merging
lotags = [t.lower() for t in other_tags]
try:
lstags = [t.lower() for t in self_tags]
except TypeError:
# Happens if x is not a text, is_multiple field
# on self
lstags = []
self_tags = []
ot, st = map(frozenset, (lotags, lstags))
for t in st.intersection(ot):
sidx = lstags.index(t)
oidx = lotags.index(t)
self_tags[sidx] = other_tags[oidx]
self_tags += [t for t in other_tags if t.lower() in ot-st]
setattr(self, x, self_tags)
my_comments = getattr(self, 'comments', '')
other_comments = getattr(other, 'comments', '')
if not my_comments:
my_comments = ''
if not other_comments:
other_comments = ''
if len(other_comments.strip()) > len(my_comments.strip()):
self.comments = other_comments
# Copy all the non-none identifiers
if callable(getattr(other, 'get_identifiers', None)):
d = self.get_identifiers()
s = other.get_identifiers()
d.update([v for v in iteritems(s) if v[1] is not None])
self.set_identifiers(d)
else:
# other structure not Metadata. Copy the top-level identifiers
for attr in TOP_LEVEL_IDENTIFIERS:
copy_not_none(self, other, attr)
other_lang = getattr(other, 'languages', [])
if other_lang and other_lang != ['und']:
self.languages = list(other_lang)
if not getattr(self, 'series', None):
self.series_index = None
def format_series_index(self, val=None):
from calibre.ebooks.metadata import fmt_sidx
v = self.series_index if val is None else val
try:
x = float(v)
except Exception:
x = 1
return fmt_sidx(x)
def authors_from_string(self, raw):
from calibre.ebooks.metadata import string_to_authors
self.authors = string_to_authors(raw)
def format_authors(self):
from calibre.ebooks.metadata import authors_to_string
return authors_to_string(self.authors)
def format_tags(self):
return ', '.join([str(t) for t in sorted(self.tags, key=sort_key)])
def format_rating(self, v=None, divide_by=1):
if v is None:
if self.rating is not None:
return str(self.rating/divide_by)
return 'None'
return str(v/divide_by)
def format_field(self, key, series_with_index=True):
'''
Returns the tuple (display_name, formatted_value)
'''
name, val, ign, ign = self.format_field_extended(key, series_with_index)
return (name, val)
def format_field_extended(self, key, series_with_index=True):
from calibre.ebooks.metadata import authors_to_string
'''
returns the tuple (display_name, formatted_value, original_value,
field_metadata)
'''
from calibre.utils.date import format_date
# Handle custom series index
if key.startswith('#') and key.endswith('_index'):
tkey = key[:-6] # strip the _index
cmeta = self.get_user_metadata(tkey, make_copy=False)
if cmeta and cmeta['datatype'] == 'series':
if self.get(tkey):
res = self.get_extra(tkey)
return (str(cmeta['name']+'_index'),
self.format_series_index(res), res, cmeta)
else:
return (str(cmeta['name']+'_index'), '', '', cmeta)
if key in self.custom_field_keys():
res = self.get(key, None) # get evaluates all necessary composites
cmeta = self.get_user_metadata(key, make_copy=False)
name = str(cmeta['name'])
if res is None or res == '': # can't check "not res" because of numeric fields
return (name, res, None, None)
orig_res = res
datatype = cmeta['datatype']
if datatype == 'text' and cmeta['is_multiple']:
res = cmeta['is_multiple']['list_to_ui'].join(res)
elif datatype == 'series' and series_with_index:
if self.get_extra(key) is not None:
res = res + \
' [%s]'%self.format_series_index(val=self.get_extra(key))
elif datatype == 'datetime':
res = format_date(res, cmeta['display'].get('date_format','dd MMM yyyy'))
elif datatype == 'bool':
res = _('Yes') if res else _('No')
elif datatype == 'rating':
res = '%.2g'%(res/2)
elif datatype in ['int', 'float']:
try:
fmt = cmeta['display'].get('number_format', None)
res = fmt.format(res)
except:
pass
return (name, str(res), orig_res, cmeta)
# convert top-level ids into their value
if key in TOP_LEVEL_IDENTIFIERS:
fmeta = field_metadata['identifiers']
name = key
res = self.get(key, None)
return (name, res, res, fmeta)
# Translate aliases into the standard field name
fmkey = field_metadata.search_term_to_field_key(key)
if fmkey in field_metadata and field_metadata[fmkey]['kind'] == 'field':
res = self.get(key, None)
fmeta = field_metadata[fmkey]
name = str(fmeta['name'])
if res is None or res == '':
return (name, res, None, None)
orig_res = res
name = str(fmeta['name'])
datatype = fmeta['datatype']
if key == 'authors':
res = authors_to_string(res)
elif key == 'series_index':
res = self.format_series_index(res)
elif datatype == 'text' and fmeta['is_multiple']:
if isinstance(res, dict):
res = [k + ':' + v for k,v in res.items()]
res = fmeta['is_multiple']['list_to_ui'].join(sorted(filter(None, res), key=sort_key))
elif datatype == 'series' and series_with_index:
res = res + ' [%s]'%self.format_series_index()
elif datatype == 'datetime':
res = format_date(res, fmeta['display'].get('date_format','dd MMM yyyy'))
elif datatype == 'rating':
res = '%.2g'%(res/2)
elif key == 'size':
res = human_readable(res)
return (name, str(res), orig_res, fmeta)
return (None, None, None, None)
def __unicode__representation__(self):
'''
A string representation of this object, suitable for printing to
console
'''
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.date import isoformat
ans = []
def fmt(x, y):
ans.append('%-20s: %s'%(str(x), str(y)))
fmt('Title', self.title)
if self.title_sort:
fmt('Title sort', self.title_sort)
if self.authors:
fmt('Author(s)', authors_to_string(self.authors) +
((' [' + self.author_sort + ']')
if self.author_sort and self.author_sort != _('Unknown') else ''))
if self.publisher:
fmt('Publisher', self.publisher)
if getattr(self, 'book_producer', False):
fmt('Book Producer', self.book_producer)
if self.tags:
fmt('Tags', ', '.join([str(t) for t in self.tags]))
if self.series:
fmt('Series', self.series + ' #%s'%self.format_series_index())
if not self.is_null('languages'):
fmt('Languages', ', '.join(self.languages))
if self.rating is not None:
fmt('Rating', ('%.2g'%(float(self.rating)/2)) if self.rating
else '')
if self.timestamp is not None:
fmt('Timestamp', isoformat(self.timestamp))
if self.pubdate is not None:
fmt('Published', isoformat(self.pubdate))
if self.rights is not None:
fmt('Rights', str(self.rights))
if self.identifiers:
fmt('Identifiers', ', '.join(['%s:%s'%(k, v) for k, v in
iteritems(self.identifiers)]))
if self.comments:
fmt('Comments', self.comments)
for key in self.custom_field_keys():
val = self.get(key, None)
if val:
(name, val) = self.format_field(key)
fmt(name, str(val))
return '\n'.join(ans)
def to_html(self):
'''
A HTML representation of this object.
'''
from calibre.ebooks.metadata import authors_to_string
from calibre.utils.date import isoformat
ans = [(_('Title'), str(self.title))]
ans += [(_('Author(s)'), (authors_to_string(self.authors) if self.authors else _('Unknown')))]
ans += [(_('Publisher'), str(self.publisher))]
ans += [(_('Producer'), str(self.book_producer))]
ans += [(_('Comments'), str(self.comments))]
ans += [('ISBN', str(self.isbn))]
ans += [(_('Tags'), ', '.join([str(t) for t in self.tags]))]
if self.series:
ans += [(ngettext('Series', 'Series', 1), str(self.series) + ' #%s'%self.format_series_index())]
ans += [(_('Languages'), ', '.join(self.languages))]
if self.timestamp is not None:
ans += [(_('Timestamp'), str(isoformat(self.timestamp, as_utc=False, sep=' ')))]
if self.pubdate is not None:
ans += [(_('Published'), str(isoformat(self.pubdate, as_utc=False, sep=' ')))]
if self.rights is not None:
ans += [(_('Rights'), str(self.rights))]
for key in self.custom_field_keys():
val = self.get(key, None)
if val:
(name, val) = self.format_field(key)
ans += [(name, val)]
for i, x in enumerate(ans):
ans[i] = '<tr><td><b>%s</b></td><td>%s</td></tr>'%x
return '<table>%s</table>'%'\n'.join(ans)
__str__ = __unicode__representation__
def __nonzero__(self):
return bool(self.title or self.author or self.comments or self.tags)
__bool__ = __nonzero__
# }}}
def field_from_string(field, raw, field_metadata):
''' Parse the string raw to return an object that is suitable for calling
set() on a Metadata object. '''
dt = field_metadata['datatype']
val = object
if dt in {'int', 'float'}:
val = int(raw) if dt == 'int' else float(raw)
elif dt == 'rating':
val = float(raw) * 2
elif dt == 'datetime':
from calibre.utils.iso8601 import parse_iso8601
try:
val = parse_iso8601(raw, require_aware=True)
except Exception:
from calibre.utils.date import parse_only_date
val = parse_only_date(raw)
elif dt == 'bool':
if raw.lower() in {'true', 'yes', 'y'}:
val = True
elif raw.lower() in {'false', 'no', 'n'}:
val = False
else:
raise ValueError('Unknown value for %s: %s'%(field, raw))
elif dt == 'text':
ism = field_metadata['is_multiple']
if ism:
val = [x.strip() for x in raw.split(ism['ui_to_list'])]
if field == 'identifiers':
val = {x.partition(':')[0]:x.partition(':')[-1] for x in val}
elif field == 'languages':
from calibre.utils.localization import canonicalize_lang
val = [canonicalize_lang(x) for x in val]
val = [x for x in val if x]
if val is object:
val = raw
return val
| 35,448 | Python | .py | 790 | 32.875949 | 154 | 0.547885 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,557 | serialize.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/book/serialize.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from calibre.constants import preferred_encoding
from calibre.ebooks.metadata.book import SERIALIZABLE_FIELDS
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.imghdr import what
from polyglot.binary import as_base64_unicode
from polyglot.builtins import iteritems
def ensure_unicode(obj, enc=preferred_encoding):
if isinstance(obj, str):
return obj
if isinstance(obj, bytes):
return obj.decode(enc, 'replace')
if isinstance(obj, (list, tuple)):
return [ensure_unicode(x) for x in obj]
if isinstance(obj, dict):
return {ensure_unicode(k): ensure_unicode(v) for k, v in iteritems(obj)}
return obj
def serialize_cover(path):
with open(path, 'rb') as f:
cd = f.read()
return what(None, cd), cd
def read_cover(mi):
if mi.cover_data and mi.cover_data[1]:
return mi
if mi.cover:
try:
mi.cover_data = serialize_cover(mi.cover)
except OSError:
pass
return mi
def metadata_as_dict(mi, encode_cover_data=False):
if hasattr(mi, 'to_book_metadata'):
mi = mi.to_book_metadata()
ans = {}
for field in SERIALIZABLE_FIELDS:
if field != 'cover' and not mi.is_null(field):
val = getattr(mi, field)
ans[field] = ensure_unicode(val)
if mi.cover_data and mi.cover_data[1]:
if encode_cover_data:
ans['cover_data'] = [mi.cover_data[0], as_base64_unicode(mi.cover_data[1])]
else:
ans['cover_data'] = mi.cover_data
um = mi.get_all_user_metadata(False)
if um:
ans['user_metadata'] = um
return ans
def metadata_from_dict(src):
ans = Metadata('Unknown')
for key, value in iteritems(src):
if key == 'user_metadata':
ans.set_all_user_metadata(value)
else:
setattr(ans, key, value)
return ans
| 1,988 | Python | .py | 56 | 29 | 87 | 0.651042 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,558 | render.py | kovidgoyal_calibre/src/calibre/ebooks/metadata/book/render.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from contextlib import suppress
from functools import partial
from calibre import force_unicode, prepare_string_for_xml
from calibre.constants import filesystem_encoding
from calibre.db.constants import DATA_DIR_NAME
from calibre.ebooks.metadata import fmt_sidx, rating_to_stars
from calibre.ebooks.metadata.search_internet import DEFAULT_AUTHOR_SOURCE, name_for, qquote, url_for_author_search, url_for_book_search
from calibre.ebooks.metadata.sources.identify import urls_from_identifiers
from calibre.library.comments import comments_to_html, markdown
from calibre.utils.date import format_date, is_date_undefined
from calibre.utils.formatter import EvalFormatter
from calibre.utils.icu import sort_key
from calibre.utils.localization import calibre_langcode_to_name, ngettext
from calibre.utils.serialize import json_dumps
from polyglot.binary import as_hex_unicode
default_sort = ('title', 'title_sort', 'authors', 'author_sort', 'series', 'rating', 'pubdate', 'tags', 'publisher', 'identifiers')
def field_sort(mi, name):
try:
title = mi.metadata_for_field(name)['name']
except:
title = 'zzz'
return {x:(i, None) for i, x in enumerate(default_sort)}.get(name, (10000, sort_key(title)))
def displayable_field_keys(mi):
for k in mi.all_field_keys():
try:
m = mi.metadata_for_field(k)
except:
continue
if (
m is not None and m.get('kind') == 'field' and m.get('datatype') is not None and
k not in ('au_map', 'marked', 'ondevice', 'cover', 'series_sort', 'in_tag_browser') and
not k.endswith('_index')
):
yield k
def get_field_list(mi):
for field in sorted(displayable_field_keys(mi), key=partial(field_sort, mi)):
yield field, True
def action(main, **keys):
keys['type'] = main
return 'action:' + as_hex_unicode(json_dumps(keys))
def search_action(search_term, value, **k):
return action('search', term=search_term, value=value, **k)
def search_action_with_data(search_term, value, book_id, field=None, **k):
field = field or search_term
return search_action(search_term, value, field=field, book_id=book_id, **k)
def notes_action(**keys):
return 'notes:' + as_hex_unicode(json_dumps(keys))
DEFAULT_AUTHOR_LINK = f'search-{DEFAULT_AUTHOR_SOURCE}'
def author_search_href(which, title=None, author=None):
if which == 'calibre':
return 'calibre', _('Search the calibre library for books by %s') % author
search_type, key = 'author', which
if which.endswith('-book'):
key, search_type = which.rpartition('-')[::2]
name = name_for(key)
if name is None:
search_type = 'author'
return author_search_href(DEFAULT_AUTHOR_LINK.partition('-')[2], title=title, author=author)
if search_type == 'author':
tt = _('Search {0} for the author: {1}').format(name, author)
else:
tt = _('Search {0} for the book: {1} by the author {2}').format(name, title, author)
func = url_for_book_search if search_type == 'book' else url_for_author_search
return func(key, title=title, author=author), tt
def render_author_link(default_author_link, author, book_title=None, author_sort=None):
book_title = book_title or ''
if default_author_link.startswith('search-'):
which_src = default_author_link.partition('-')[2]
link, lt = author_search_href(which_src, title=book_title, author=author)
else:
formatter = EvalFormatter()
vals = {'author': qquote(author), 'title': qquote(book_title), 'author_sort': qquote(author_sort or author)}
link = lt = formatter.safe_format(default_author_link, vals, '', vals)
return link, lt
def mi_to_html(
mi,
field_list=None, default_author_link=None, use_roman_numbers=True,
rating_font='Liberation Serif', rtl=False, comments_heading_pos='hide',
for_qt=False, vertical_fields=(), show_links=True, item_id_if_has_note=None
):
link_markup = '↗️'
if for_qt:
link_markup = '<img valign="bottom" src="calibre-icon:///external-link.png" width=16 height=16>'
note_markup = '<img valign="bottom" src="calibre-icon:///notes.png" width=16 height=16>'
def get_link_map(column):
try:
return mi.link_maps[column]
except Exception:
return {}
def add_other_links(field, field_value):
if show_links:
link = get_link_map(field).get(field_value)
if link:
link = prepare_string_for_xml(link, True)
link = ' <a title="{0}: {1}" href="{1}">{2}</a>'.format(_('Click to open'), link, link_markup)
else:
link = ''
note = ''
item_id = None if item_id_if_has_note is None else item_id_if_has_note(field, field_value)
if item_id is not None:
note = ' <a title="{}" href="{}">{}</a>'.format(
_('Show notes for: {}').format(field_value), notes_action(field=field, value=field_value, item_id=item_id), note_markup)
return link + note
return ''
if field_list is None:
field_list = get_field_list(mi)
ans = []
comment_fields = []
isdevice = not hasattr(mi, 'id')
row = '<td class="title">%s</td><td class="value">%s</td>'
p = prepare_string_for_xml
a = partial(prepare_string_for_xml, attribute=True)
book_id = getattr(mi, 'id', 0)
title_sep = '\xa0'
for field in (field for field, display in field_list if display):
try:
metadata = mi.metadata_for_field(field)
except:
continue
if not metadata:
continue
def value_list(sep, vals):
if field in vertical_fields:
return '<br/>'.join(vals)
return sep.join(vals)
if field == 'sort':
field = 'title_sort'
if metadata['is_custom'] and metadata['datatype'] in {'bool', 'int', 'float'}:
isnull = mi.get(field) is None
else:
isnull = mi.is_null(field)
if isnull:
continue
name = metadata['name']
if not name:
name = field
name += title_sep
disp = metadata['display']
if (metadata['datatype'] == 'comments' or field == 'comments'
or disp.get('composite_show_in_comments', '')):
val = getattr(mi, field)
if val:
ctype = disp.get('interpret_as') or 'html'
val = force_unicode(val)
if ctype == 'long-text':
val = '<pre style="white-space:pre-wrap">%s</pre>' % p(val)
elif ctype == 'short-text':
val = '<span>%s</span>' % p(val)
elif ctype == 'markdown':
val = markdown(val)
else:
val = comments_to_html(val)
heading_position = disp.get('heading_position', comments_heading_pos)
if heading_position == 'side':
ans.append((field, row % (name, val)))
else:
if heading_position == 'above':
val = f'<h3 class="comments-heading">{p(name)}</h3>{val}'
comment_fields.append('<div id="{}" class="comments">{}</div>'.format(field.replace('#', '_'), val))
elif metadata['datatype'] == 'rating':
val = getattr(mi, field)
if val:
star_string = rating_to_stars(val, disp.get('allow_half_stars', False))
ans.append((field,
'<td class="title">%s</td><td class="rating value" '
'style=\'font-family:"%s"\'>%s</td>'%(
name, rating_font, star_string)))
elif metadata['datatype'] == 'composite' and not disp.get('composite_show_in_comments', ''):
val = getattr(mi, field)
if val:
val = force_unicode(val)
if disp.get('contains_html', False):
ans.append((field, row % (name, comments_to_html(val))))
else:
if not metadata['is_multiple']:
val = '<a href="{}" title="{}">{}</a>'.format(
search_action(field, val, book_id=book_id),
_('Click to see books with {0}: {1}').format(metadata['name'], a(val)), p(val))
else:
all_vals = [v.strip()
for v in val.split(metadata['is_multiple']['cache_to_list']) if v.strip()]
if show_links:
links = ['<a href="{}" title="{}">{}</a>'.format(
search_action(field, x, book_id=book_id), _('Click to see books with {0}: {1}').format(
metadata['name'], a(x)), p(x)) for x in all_vals]
else:
links = all_vals
val = value_list(metadata['is_multiple']['list_to_ui'], links)
ans.append((field, row % (name, val)))
elif field == 'path':
if mi.path:
path = force_unicode(mi.path, filesystem_encoding)
scheme = 'devpath' if isdevice else 'path'
loc = path if isdevice else book_id
extra = ''
if isdevice:
durl = path
if durl.startswith('mtp:::'):
durl = ':::'.join((durl.split(':::'))[2:])
extra = '<br><span style="font-size:smaller">%s</span>'%(
prepare_string_for_xml(durl))
if show_links:
num_of_folders = 1
if isdevice:
text = _('Click to open')
else:
data_path = os.path.join(path, DATA_DIR_NAME)
with suppress(OSError):
for dirpath, dirnames, filenames in os.walk(data_path):
if filenames:
num_of_folders = 2
break
text = _('Book files')
name = ngettext('Folder', 'Folders', num_of_folders) + title_sep
links = ['<a href="{}" title="{}">{}</a>{}'.format(action(scheme, book_id=book_id, loc=loc),
prepare_string_for_xml(path, True), text, extra)]
if num_of_folders > 1:
links.append('<a href="{}" title="{}">{}</a>'.format(
action('data-path', book_id=book_id, loc=book_id),
prepare_string_for_xml(data_path, True), _('Data files')))
link = value_list(', ', links)
else:
link = prepare_string_for_xml(path, True)
ans.append((field, row % (name, link)))
elif field == 'formats':
# Don't need show_links here because formats are removed from mi on
# cross library displays.
if isdevice:
continue
path = mi.path or ''
bpath = ''
if path:
h, t = os.path.split(path)
bpath = os.sep.join((os.path.basename(h), t))
data = ({
'fmt':x, 'path':a(path or ''), 'fname':a(mi.format_files.get(x, '')),
'ext':x.lower(), 'id':book_id, 'bpath':bpath, 'sep':os.sep,
'action':action('format', book_id=book_id, fmt=x, path=path or '', fname=mi.format_files.get(x, ''))
} for x in mi.formats)
fmts = ['<a title="{bpath}{sep}{fname}.{ext}" href="{action}">{fmt}</a>'.format(**x)
for x in data]
ans.append((field, row % (name, value_list(', ', fmts))))
elif field == 'identifiers':
urls = urls_from_identifiers(mi.identifiers, sort_results=True)
if show_links:
links = [
'<a href="{}" title="{}:{}">{}</a>'.format(
action('identifier', book_id=book_id, url=url, name=namel, id_type=id_typ, value=id_val, field='identifiers'),
a(id_typ), a(id_val), p(namel))
for namel, id_typ, id_val, url in urls]
links = value_list(', ', links)
else:
links = ', '.join(mi.identifiers)
if links:
ans.append((field, row % (_('Ids')+title_sep, links)))
elif field == 'authors':
authors = []
for aut in mi.authors:
link = ''
if show_links:
if default_author_link:
link, lt = render_author_link(default_author_link, aut, mi.title, mi.author_sort_map.get(aut) or aut)
else:
aut = p(aut)
if link:
val = '<a title="%s" href="%s">%s</a>'%(a(lt), action('author', book_id=book_id,
url=link, name=aut, title=lt), aut)
else:
val = aut
val += add_other_links('authors', aut)
authors.append(val)
ans.append((field, row % (name, value_list(' & ', authors))))
elif field == 'languages':
if not mi.languages:
continue
names = filter(None, map(calibre_langcode_to_name, mi.languages))
if show_links:
names = ['<a href="{}" title="{}">{}</a>'.format(search_action_with_data('languages', n, book_id), _(
'Search calibre for books with the language: {}').format(n), n) for n in names]
ans.append((field, row % (name, value_list(', ', names))))
elif field == 'publisher':
if not mi.publisher:
continue
if show_links:
val = '<a href="{}" title="{}">{}</a>'.format(
search_action_with_data('publisher', mi.publisher, book_id),
_('Click to see books with {0}: {1}').format(metadata['name'], a(mi.publisher)),
p(mi.publisher))
val += add_other_links('publisher', mi.publisher)
else:
val = p(mi.publisher)
ans.append((field, row % (name, val)))
elif field == 'title':
# otherwise title gets metadata['datatype'] == 'text'
# treatment below with a click to search link (which isn't
# too bad), and a right-click 'Delete' option to delete
# the title (which is bad).
val = mi.format_field(field)[-1]
ans.append((field, row % (name, val)))
else:
val = unescaped_val = mi.format_field(field)[-1]
if val is None:
continue
val = p(val)
if show_links:
if metadata['datatype'] == 'series':
sidx = mi.get(field+'_index')
if sidx is None:
sidx = 1.0
try:
st = metadata['search_terms'][0]
except Exception:
st = field
series = getattr(mi, field)
val = _(
'%(sidx)s of <a href="%(href)s" title="%(tt)s">'
'<span class="%(cls)s">%(series)s</span></a>') % dict(
sidx=fmt_sidx(sidx, use_roman=use_roman_numbers), cls="series_name",
series=p(series), href=search_action_with_data(st, series, book_id, field),
tt=p(_('Click to see books in this series')))
val += add_other_links(field, series)
elif metadata['datatype'] == 'datetime':
aval = getattr(mi, field)
if is_date_undefined(aval):
continue
aval = format_date(aval, 'yyyy-MM-dd')
key = field if field != 'timestamp' else 'date'
if val == aval:
val = '<a href="{}" title="{}">{}</a>'.format(
search_action_with_data(key, str(aval), book_id, None, original_value=val), a(
_('Click to see books with {0}: {1}').format(metadata['name'] or field, val)), val)
else:
val = '<a href="{}" title="{}">{}</a>'.format(
search_action_with_data(key, str(aval), book_id, None, original_value=val), a(
_('Click to see books with {0}: {1} (derived from {2})').format(
metadata['name'] or field, aval, val)), val)
elif metadata['datatype'] == 'text' and metadata['is_multiple']:
try:
st = metadata['search_terms'][0]
except Exception:
st = field
all_vals = mi.get(field)
if not metadata.get('display', {}).get('is_names', False):
all_vals = sorted(all_vals, key=sort_key)
links = []
for x in all_vals:
v = '<a href="{}" title="{}">{}</a>'.format(
search_action_with_data(st, x, book_id, field), _('Click to see books with {0}: {1}').format(
metadata['name'] or field, a(x)), p(x))
v += add_other_links(field, x)
links.append(v)
val = value_list(metadata['is_multiple']['list_to_ui'], links)
elif metadata['datatype'] == 'text' or metadata['datatype'] == 'enumeration':
# text/is_multiple handled above so no need to add the test to the if
try:
st = metadata['search_terms'][0]
except Exception:
st = field
v = '<a href="{}" title="{}">{}</a>'.format(
search_action_with_data(st, unescaped_val, book_id, field), a(
_('Click to see books with {0}: {1}').format(metadata['name'] or field, val)), val)
val = v + add_other_links(field, val)
elif metadata['datatype'] == 'bool':
val = '<a href="{}" title="{}">{}</a>'.format(
search_action_with_data(field, val, book_id, None), a(
_('Click to see books with {0}: {1}').format(metadata['name'] or field, val)), val)
else:
try:
aval = str(getattr(mi, field))
if not aval:
continue
if val == aval:
val = '<a href="{}" title="{}">{}</a>'.format(
search_action_with_data(field, str(aval), book_id, None, original_value=val), a(
_('Click to see books with {0}: {1}').format(metadata['name'] or field, val)), val)
else:
val = '<a href="{}" title="{}">{}</a>'.format(
search_action_with_data(field, str(aval), book_id, None, original_value=val), a(
_('Click to see books with {0}: {1} (derived from {2})').format(
metadata['name'] or field, aval, val)), val)
except Exception:
import traceback
traceback.print_exc()
ans.append((field, row % (name, val)))
dc = getattr(mi, 'device_collections', [])
if dc:
dc = ', '.join(sorted(dc, key=sort_key))
ans.append(('device_collections',
row % (_('Collections')+':', dc)))
def classname(field):
try:
dt = mi.metadata_for_field(field)['datatype']
except:
dt = 'text'
return 'datatype_%s'%dt
ans = ['<tr id="%s" class="%s">%s</tr>'%(fieldl.replace('#', '_'),
classname(fieldl), html) for fieldl, html in ans]
# print '\n'.join(ans)
direction = 'rtl' if rtl else 'ltr'
rans = f'<table class="fields" style="direction: {direction}; '
if not for_qt:
# This causes wasted space at the edge of the table in Qt's rich text
# engine, see https://bugs.launchpad.net/calibre/+bug/1881488
margin = 'left' if rtl else 'right'
rans += f'margin-{margin}: auto; '
return '{}">{}</table>'.format(rans, '\n'.join(ans)), comment_fields
| 21,186 | Python | .py | 410 | 36.290244 | 140 | 0.497733 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,559 | lcid.py | kovidgoyal_calibre/src/calibre/ebooks/docx/lcid.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
lcid = {
1078: 'af', # Afrikaans - South Africa
1052: 'sq', # Albanian - Albania
1118: 'am', # Amharic - Ethiopia
1025: 'ar', # Arabic - Saudi Arabia
5121: 'ar', # Arabic - Algeria
15361: 'ar', # Arabic - Bahrain
3073: 'ar', # Arabic - Egypt
2049: 'ar', # Arabic - Iraq
11265: 'ar', # Arabic - Jordan
13313: 'ar', # Arabic - Kuwait
12289: 'ar', # Arabic - Lebanon
4097: 'ar', # Arabic - Libya
6145: 'ar', # Arabic - Morocco
8193: 'ar', # Arabic - Oman
16385: 'ar', # Arabic - Qatar
10241: 'ar', # Arabic - Syria
7169: 'ar', # Arabic - Tunisia
14337: 'ar', # Arabic - U.A.E.
9217: 'ar', # Arabic - Yemen
1067: 'hy', # Armenian - Armenia
1101: 'as', # Assamese
2092: 'az', # Azeri (Cyrillic)
1068: 'az', # Azeri (Latin)
1069: 'eu', # Basque
1059: 'be', # Belarusian
1093: 'bn', # Bengali (India)
2117: 'bn', # Bengali (Bangladesh)
5146: 'bs', # Bosnian (Bosnia/Herzegovina)
1026: 'bg', # Bulgarian
1109: 'my', # Burmese
1027: 'ca', # Catalan
1116: 'chr', # Cherokee - United States
2052: 'zh', # Chinese - People's Republic of China
4100: 'zh', # Chinese - Singapore
1028: 'zh', # Chinese - Taiwan
3076: 'zh', # Chinese - Hong Kong SAR
5124: 'zh', # Chinese - Macao SAR
1050: 'hr', # Croatian
4122: 'hr', # Croatian (Bosnia/Herzegovina)
1029: 'cs', # Czech
1030: 'da', # Danish
1125: 'dv', # Divehi
1043: 'nl', # Dutch - Netherlands
2067: 'nl', # Dutch - Belgium
1126: 'bin', # Edo
1033: 'en', # English - United States
2057: 'en', # English - United Kingdom
3081: 'en', # English - Australia
10249: 'en', # English - Belize
4105: 'en', # English - Canada
9225: 'en', # English - Caribbean
15369: 'en', # English - Hong Kong SAR
16393: 'en', # English - India
14345: 'en', # English - Indonesia
6153: 'en', # English - Ireland
8201: 'en', # English - Jamaica
17417: 'en', # English - Malaysia
5129: 'en', # English - New Zealand
13321: 'en', # English - Philippines
18441: 'en', # English - Singapore
7177: 'en', # English - South Africa
11273: 'en', # English - Trinidad
12297: 'en', # English - Zimbabwe
1061: 'et', # Estonian
1080: 'fo', # Faroese
1065: None, # TODO: Farsi
1124: 'fil', # Filipino
1035: 'fi', # Finnish
1036: 'fr', # French - France
2060: 'fr', # French - Belgium
11276: 'fr', # French - Cameroon
3084: 'fr', # French - Canada
9228: 'fr', # French - Democratic Rep. of Congo
12300: 'fr', # French - Cote d'Ivoire
15372: 'fr', # French - Haiti
5132: 'fr', # French - Luxembourg
13324: 'fr', # French - Mali
6156: 'fr', # French - Monaco
14348: 'fr', # French - Morocco
58380: 'fr', # French - North Africa
8204: 'fr', # French - Reunion
10252: 'fr', # French - Senegal
4108: 'fr', # French - Switzerland
7180: 'fr', # French - West Indies
1122: 'fy', # Frisian - Netherlands
1127: None, # TODO: Fulfulde - Nigeria
1071: 'mk', # FYRO Macedonian
2108: 'ga', # Gaelic (Ireland)
1084: 'gd', # Gaelic (Scotland)
1110: 'gl', # Galician
1079: 'ka', # Georgian
1031: 'de', # German - Germany
3079: 'de', # German - Austria
5127: 'de', # German - Liechtenstein
4103: 'de', # German - Luxembourg
2055: 'de', # German - Switzerland
1032: 'el', # Greek
1140: 'gn', # Guarani - Paraguay
1095: 'gu', # Gujarati
1128: 'ha', # Hausa - Nigeria
1141: 'haw', # Hawaiian - United States
1037: 'he', # Hebrew
1081: 'hi', # Hindi
1038: 'hu', # Hungarian
1129: None, # TODO: Ibibio - Nigeria
1039: 'is', # Icelandic
1136: 'ig', # Igbo - Nigeria
1057: 'id', # Indonesian
1117: 'iu', # Inuktitut
1040: 'it', # Italian - Italy
2064: 'it', # Italian - Switzerland
1041: 'ja', # Japanese
1099: 'kn', # Kannada
1137: 'kr', # Kanuri - Nigeria
2144: 'ks', # Kashmiri
1120: 'ks', # Kashmiri (Arabic)
1087: 'kk', # Kazakh
1107: 'km', # Khmer
1111: 'kok', # Konkani
1042: 'ko', # Korean
1088: 'ky', # Kyrgyz (Cyrillic)
1108: 'lo', # Lao
1142: 'la', # Latin
1062: 'lv', # Latvian
1063: 'lt', # Lithuanian
1086: 'ms', # Malay - Malaysia
2110: 'ms', # Malay - Brunei Darussalam
1100: 'ml', # Malayalam
1082: 'mt', # Maltese
1112: 'mni', # Manipuri
1153: 'mi', # Maori - New Zealand
1102: 'mr', # Marathi
1104: 'mn', # Mongolian (Cyrillic)
2128: 'mn', # Mongolian (Mongolian)
1121: 'ne', # Nepali
2145: 'ne', # Nepali - India
1044: 'no', # Norwegian (Bokmᅢᆬl)
2068: 'no', # Norwegian (Nynorsk)
1096: 'or', # Oriya
1138: 'om', # Oromo
1145: 'pap', # Papiamentu
1123: 'ps', # Pashto
1045: 'pl', # Polish
1046: 'pt', # Portuguese - Brazil
2070: 'pt', # Portuguese - Portugal
1094: 'pa', # Punjabi
2118: 'pa', # Punjabi (Pakistan)
1131: 'qu', # Quecha - Bolivia
2155: 'qu', # Quecha - Ecuador
3179: 'qu', # Quecha - Peru
1047: 'rm', # Rhaeto-Romanic
1048: 'ro', # Romanian
2072: 'ro', # Romanian - Moldava
1049: 'ru', # Russian
2073: 'ru', # Russian - Moldava
1083: 'se', # Sami (Lappish)
1103: 'sa', # Sanskrit
1132: 'nso', # Sepedi
3098: 'sr', # Serbian (Cyrillic)
2074: 'sr', # Serbian (Latin)
1113: 'sd', # Sindhi - India
2137: 'sd', # Sindhi - Pakistan
1115: 'si', # Sinhalese - Sri Lanka
1051: 'sk', # Slovak
1060: 'sl', # Slovenian
1143: 'so', # Somali
1070: 'wen', # Sorbian
3082: 'es', # Spanish - Spain (Modern Sort)
1034: 'es', # Spanish - Spain (Traditional Sort)
11274: 'es', # Spanish - Argentina
16394: 'es', # Spanish - Bolivia
13322: 'es', # Spanish - Chile
9226: 'es', # Spanish - Colombia
5130: 'es', # Spanish - Costa Rica
7178: 'es', # Spanish - Dominican Republic
12298: 'es', # Spanish - Ecuador
17418: 'es', # Spanish - El Salvador
4106: 'es', # Spanish - Guatemala
18442: 'es', # Spanish - Honduras
58378: 'es', # Spanish - Latin America
2058: 'es', # Spanish - Mexico
19466: 'es', # Spanish - Nicaragua
6154: 'es', # Spanish - Panama
15370: 'es', # Spanish - Paraguay
10250: 'es', # Spanish - Peru
20490: 'es', # Spanish - Puerto Rico
21514: 'es', # Spanish - United States
14346: 'es', # Spanish - Uruguay
8202: 'es', # Spanish - Venezuela
1072: None, # TODO: Sutu
1089: 'sw', # Swahili
1053: 'sv', # Swedish
2077: 'sv', # Swedish - Finland
1114: 'syr', # Syriac
1064: 'tg', # Tajik
1119: None, # TODO: Tamazight (Arabic)
2143: None, # TODO: Tamazight (Latin)
1097: 'ta', # Tamil
1092: 'tt', # Tatar
1098: 'te', # Telugu
1054: 'th', # Thai
2129: 'bo', # Tibetan - Bhutan
1105: 'bo', # Tibetan - People's Republic of China
2163: 'ti', # Tigrigna - Eritrea
1139: 'ti', # Tigrigna - Ethiopia
1073: 'ts', # Tsonga
1074: 'tn', # Tswana
1055: 'tr', # Turkish
1090: 'tk', # Turkmen
1152: 'ug', # Uighur - China
1058: 'uk', # Ukrainian
1056: 'ur', # Urdu
2080: 'ur', # Urdu - India
2115: 'uz', # Uzbek (Cyrillic)
1091: 'uz', # Uzbek (Latin)
1075: 've', # Venda
1066: 'vi', # Vietnamese
1106: 'cy', # Welsh
1076: 'xh', # Xhosa
1144: 'ii', # Yi
1085: 'yi', # Yiddish
1130: 'yo', # Yoruba
1077: 'zu' # Zulu
}
| 7,829 | Python | .py | 228 | 29.394737 | 61 | 0.550171 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,560 | settings.py | kovidgoyal_calibre/src/calibre/ebooks/docx/settings.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
class Settings:
def __init__(self, namespace):
self.default_tab_stop = 720 / 20
self.namespace = namespace
def __call__(self, root):
for dts in self.namespace.XPath('//w:defaultTabStop[@w:val]')(root):
try:
self.default_tab_stop = int(self.namespace.get(dts, 'w:val')) / 20
except (ValueError, TypeError, AttributeError):
pass
| 529 | Python | .py | 13 | 32.384615 | 82 | 0.59725 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,561 | tables.py | kovidgoyal_calibre/src/calibre/ebooks/docx/tables.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from lxml.html.builder import TABLE, TD, TR
from calibre.ebooks.docx.block_styles import ParagraphStyle, binary_property, border_props, border_to_css, inherit, read_border
from calibre.ebooks.docx.block_styles import read_shd as rs
from calibre.ebooks.docx.char_styles import RunStyle
from polyglot.builtins import iteritems, itervalues
# Read from XML {{{
read_shd = rs
edges = ('left', 'top', 'right', 'bottom')
def _read_width(elem, get):
ans = inherit
try:
w = int(get(elem, 'w:w'))
except (TypeError, ValueError):
w = 0
typ = get(elem, 'w:type', 'auto')
if typ == 'nil':
ans = '0'
elif typ == 'auto':
ans = 'auto'
elif typ == 'dxa':
ans = '%.3gpt' % (w/20)
elif typ == 'pct':
ans = '%.3g%%' % (w/50)
return ans
def read_width(parent, dest, XPath, get):
ans = inherit
for tblW in XPath('./w:tblW')(parent):
ans = _read_width(tblW, get)
setattr(dest, 'width', ans)
def read_cell_width(parent, dest, XPath, get):
ans = inherit
for tblW in XPath('./w:tcW')(parent):
ans = _read_width(tblW, get)
setattr(dest, 'width', ans)
def read_padding(parent, dest, XPath, get):
name = 'tblCellMar' if parent.tag.endswith('}tblPr') else 'tcMar'
ans = {x:inherit for x in edges}
for mar in XPath('./w:%s' % name)(parent):
for x in edges:
for edge in XPath('./w:%s' % x)(mar):
ans[x] = _read_width(edge, get)
for x in edges:
setattr(dest, 'cell_padding_%s' % x, ans[x])
def read_justification(parent, dest, XPath, get):
left = right = inherit
for jc in XPath('./w:jc[@w:val]')(parent):
val = get(jc, 'w:val')
if not val:
continue
if val == 'left':
right = 'auto'
elif val == 'right':
left = 'auto'
elif val == 'center':
left = right = 'auto'
setattr(dest, 'margin_left', left)
setattr(dest, 'margin_right', right)
def read_spacing(parent, dest, XPath, get):
ans = inherit
for cs in XPath('./w:tblCellSpacing')(parent):
ans = _read_width(cs, get)
setattr(dest, 'spacing', ans)
def read_float(parent, dest, XPath, get):
ans = inherit
for x in XPath('./w:tblpPr')(parent):
ans = {k.rpartition('}')[-1]: v for k, v in iteritems(x.attrib)}
setattr(dest, 'float', ans)
def read_indent(parent, dest, XPath, get):
ans = inherit
for cs in XPath('./w:tblInd')(parent):
ans = _read_width(cs, get)
setattr(dest, 'indent', ans)
border_edges = ('left', 'top', 'right', 'bottom', 'insideH', 'insideV')
def read_borders(parent, dest, XPath, get):
name = 'tblBorders' if parent.tag.endswith('}tblPr') else 'tcBorders'
read_border(parent, dest, XPath, get, border_edges, name)
def read_height(parent, dest, XPath, get):
ans = inherit
for rh in XPath('./w:trHeight')(parent):
rule = get(rh, 'w:hRule', 'auto')
if rule in {'auto', 'atLeast', 'exact'}:
val = get(rh, 'w:val')
ans = (rule, val)
setattr(dest, 'height', ans)
def read_vertical_align(parent, dest, XPath, get):
ans = inherit
for va in XPath('./w:vAlign')(parent):
val = get(va, 'w:val')
ans = {'center': 'middle', 'top': 'top', 'bottom': 'bottom'}.get(val, 'middle')
setattr(dest, 'vertical_align', ans)
def read_col_span(parent, dest, XPath, get):
ans = inherit
for gs in XPath('./w:gridSpan')(parent):
try:
ans = int(get(gs, 'w:val'))
except (TypeError, ValueError):
continue
setattr(dest, 'col_span', ans)
def read_merge(parent, dest, XPath, get):
for x in ('hMerge', 'vMerge'):
ans = inherit
for m in XPath('./w:%s' % x)(parent):
ans = get(m, 'w:val', 'continue')
setattr(dest, x, ans)
def read_band_size(parent, dest, XPath, get):
for x in ('Col', 'Row'):
ans = 1
for y in XPath('./w:tblStyle%sBandSize' % x)(parent):
try:
ans = int(get(y, 'w:val'))
except (TypeError, ValueError):
continue
setattr(dest, '%s_band_size' % x.lower(), ans)
def read_look(parent, dest, XPath, get):
ans = 0
for x in XPath('./w:tblLook')(parent):
try:
ans = int(get(x, 'w:val'), 16)
except (ValueError, TypeError):
continue
setattr(dest, 'look', ans)
# }}}
def clone(style):
if style is None:
return None
try:
ans = type(style)(style.namespace)
except TypeError:
return None
ans.update(style)
return ans
class Style:
is_bidi = False
def update(self, other):
for prop in self.all_properties:
nval = getattr(other, prop)
if nval is not inherit:
setattr(self, prop, nval)
def apply_bidi(self):
self.is_bidi = True
def convert_spacing(self):
ans = {}
if self.spacing is not inherit:
if self.spacing in {'auto', '0'}:
ans['border-collapse'] = 'collapse'
else:
ans['border-collapse'] = 'separate'
ans['border-spacing'] = self.spacing
return ans
def convert_border(self):
c = {}
for x in edges:
border_to_css(x, self, c)
val = getattr(self, 'padding_%s' % x)
if val is not inherit:
c['padding-%s' % x] = '%.3gpt' % val
if self.is_bidi:
for a in ('padding-%s', 'border-%s-style', 'border-%s-color', 'border-%s-width'):
l, r = c.get(a % 'left'), c.get(a % 'right')
if l is not None:
c[a % 'right'] = l
if r is not None:
c[a % 'left'] = r
return c
class RowStyle(Style):
all_properties = ('height', 'cantSplit', 'hidden', 'spacing',)
def __init__(self, namespace, trPr=None):
self.namespace = namespace
if trPr is None:
for p in self.all_properties:
setattr(self, p, inherit)
else:
for p in ('hidden', 'cantSplit'):
setattr(self, p, binary_property(trPr, p, namespace.XPath, namespace.get))
for p in ('spacing', 'height'):
f = globals()['read_%s' % p]
f(trPr, self, namespace.XPath, namespace.get)
self._css = None
@property
def css(self):
if self._css is None:
c = self._css = {}
if self.hidden is True:
c['display'] = 'none'
if self.cantSplit is True:
c['page-break-inside'] = 'avoid'
if self.height is not inherit:
rule, val = self.height
if rule != 'auto':
try:
c['min-height' if rule == 'atLeast' else 'height'] = '%.3gpt' % (int(val)/20)
except (ValueError, TypeError):
pass
c.update(self.convert_spacing())
return self._css
class CellStyle(Style):
all_properties = ('background_color', 'cell_padding_left', 'cell_padding_right', 'cell_padding_top',
'cell_padding_bottom', 'width', 'vertical_align', 'col_span', 'vMerge', 'hMerge', 'row_span',
) + tuple(k % edge for edge in border_edges for k in border_props)
def __init__(self, namespace, tcPr=None):
self.namespace = namespace
if tcPr is None:
for p in self.all_properties:
setattr(self, p, inherit)
else:
for x in ('borders', 'shd', 'padding', 'cell_width', 'vertical_align', 'col_span', 'merge'):
f = globals()['read_%s' % x]
f(tcPr, self, namespace.XPath, namespace.get)
self.row_span = inherit
self._css = None
@property
def css(self):
if self._css is None:
self._css = c = {}
if self.background_color is not inherit:
c['background-color'] = self.background_color
if self.width not in (inherit, 'auto'):
c['width'] = self.width
c['vertical-align'] = 'top' if self.vertical_align is inherit else self.vertical_align
for x in edges:
val = getattr(self, 'cell_padding_%s' % x)
if val not in (inherit, 'auto'):
c['padding-%s' % x] = val
elif val is inherit and x in {'left', 'right'}:
c['padding-%s' % x] = '%.3gpt' % (115/20)
# In Word, tables are apparently rendered with some default top and
# bottom padding irrespective of the cellMargin values. Simulate
# that here.
for x in ('top', 'bottom'):
if c.get('padding-%s' % x, '0pt') == '0pt':
c['padding-%s' % x] = '0.5ex'
c.update(self.convert_border())
return self._css
class TableStyle(Style):
all_properties = (
'width', 'float', 'cell_padding_left', 'cell_padding_right', 'cell_padding_top',
'cell_padding_bottom', 'margin_left', 'margin_right', 'background_color',
'spacing', 'indent', 'overrides', 'col_band_size', 'row_band_size', 'look', 'bidi',
) + tuple(k % edge for edge in border_edges for k in border_props)
def __init__(self, namespace, tblPr=None):
self.namespace = namespace
if tblPr is None:
for p in self.all_properties:
setattr(self, p, inherit)
else:
self.overrides = inherit
self.bidi = binary_property(tblPr, 'bidiVisual', namespace.XPath, namespace.get)
for x in ('width', 'float', 'padding', 'shd', 'justification', 'spacing', 'indent', 'borders', 'band_size', 'look'):
f = globals()['read_%s' % x]
f(tblPr, self, self.namespace.XPath, self.namespace.get)
parent = tblPr.getparent()
if self.namespace.is_tag(parent, 'w:style'):
self.overrides = {}
for tblStylePr in self.namespace.XPath('./w:tblStylePr[@w:type]')(parent):
otype = self.namespace.get(tblStylePr, 'w:type')
orides = self.overrides[otype] = {}
for tblPr in self.namespace.XPath('./w:tblPr')(tblStylePr):
orides['table'] = TableStyle(self.namespace, tblPr)
for trPr in self.namespace.XPath('./w:trPr')(tblStylePr):
orides['row'] = RowStyle(self.namespace, trPr)
for tcPr in self.namespace.XPath('./w:tcPr')(tblStylePr):
orides['cell'] = CellStyle(self.namespace, tcPr)
for pPr in self.namespace.XPath('./w:pPr')(tblStylePr):
orides['para'] = ParagraphStyle(self.namespace, pPr)
for rPr in self.namespace.XPath('./w:rPr')(tblStylePr):
orides['run'] = RunStyle(self.namespace, rPr)
self._css = None
def resolve_based_on(self, parent):
for p in self.all_properties:
val = getattr(self, p)
if val is inherit:
setattr(self, p, getattr(parent, p))
@property
def css(self):
if self._css is None:
c = self._css = {}
if self.width not in (inherit, 'auto'):
c['width'] = self.width
for x in ('background_color', 'margin_left', 'margin_right'):
val = getattr(self, x)
if val is not inherit:
c[x.replace('_', '-')] = val
if self.indent not in (inherit, 'auto') and self.margin_left != 'auto':
c['margin-left'] = self.indent
if self.float is not inherit:
for x in ('left', 'top', 'right', 'bottom'):
val = self.float.get('%sFromText' % x, 0)
try:
val = '%.3gpt' % (int(val) / 20)
except (ValueError, TypeError):
val = '0'
c['margin-%s' % x] = val
if 'tblpXSpec' in self.float:
c['float'] = 'right' if self.float['tblpXSpec'] in {'right', 'outside'} else 'left'
else:
page = self.page
page_width = page.width - page.margin_left - page.margin_right
try:
x = int(self.float['tblpX']) / 20
except (KeyError, ValueError, TypeError):
x = 0
c['float'] = 'left' if (x/page_width) < 0.65 else 'right'
c.update(self.convert_spacing())
if 'border-collapse' not in c:
c['border-collapse'] = 'collapse'
c.update(self.convert_border())
return self._css
class Table:
def __init__(self, namespace, tbl, styles, para_map, is_sub_table=False):
self.namespace = namespace
self.tbl = tbl
self.styles = styles
self.is_sub_table = is_sub_table
# Read Table Style
style = {'table':TableStyle(self.namespace)}
for tblPr in self.namespace.XPath('./w:tblPr')(tbl):
for ts in self.namespace.XPath('./w:tblStyle[@w:val]')(tblPr):
style_id = self.namespace.get(ts, 'w:val')
s = styles.get(style_id)
if s is not None:
if s.table_style is not None:
style['table'].update(s.table_style)
if s.paragraph_style is not None:
if 'paragraph' in style:
style['paragraph'].update(s.paragraph_style)
else:
style['paragraph'] = s.paragraph_style
if s.character_style is not None:
if 'run' in style:
style['run'].update(s.character_style)
else:
style['run'] = s.character_style
style['table'].update(TableStyle(self.namespace, tblPr))
self.table_style, self.paragraph_style = style['table'], style.get('paragraph', None)
self.run_style = style.get('run', None)
self.overrides = self.table_style.overrides
if self.overrides is inherit:
self.overrides = {}
if 'wholeTable' in self.overrides and 'table' in self.overrides['wholeTable']:
self.table_style.update(self.overrides['wholeTable']['table'])
self.style_map = {}
self.paragraphs = []
self.cell_map = []
rows = self.namespace.XPath('./w:tr')(tbl)
for r, tr in enumerate(rows):
overrides = self.get_overrides(r, None, len(rows), None)
self.resolve_row_style(tr, overrides)
cells = self.namespace.XPath('./w:tc')(tr)
self.cell_map.append([])
for c, tc in enumerate(cells):
overrides = self.get_overrides(r, c, len(rows), len(cells))
self.resolve_cell_style(tc, overrides, r, c, len(rows), len(cells))
self.cell_map[-1].append(tc)
for p in self.namespace.XPath('./w:p')(tc):
para_map[p] = self
self.paragraphs.append(p)
self.resolve_para_style(p, overrides)
self.handle_merged_cells()
self.sub_tables = {x:Table(namespace, x, styles, para_map, is_sub_table=True) for x in self.namespace.XPath('./w:tr/w:tc/w:tbl')(tbl)}
@property
def bidi(self):
return self.table_style.bidi is True
def override_allowed(self, name):
'Check if the named override is allowed by the tblLook element'
if name.endswith('Cell') or name == 'wholeTable':
return True
look = self.table_style.look
if (look & 0x0020 and name == 'firstRow') or (look & 0x0040 and name == 'lastRow') or \
(look & 0x0080 and name == 'firstCol') or (look & 0x0100 and name == 'lastCol'):
return True
if name.startswith('band'):
if name.endswith('Horz'):
return not bool(look & 0x0200)
if name.endswith('Vert'):
return not bool(look & 0x0400)
return False
def get_overrides(self, r, c, num_of_rows, num_of_cols_in_row):
'List of possible overrides for the given para'
overrides = ['wholeTable']
def divisor(m, n):
return (m - (m % n)) // n
if c is not None:
odd_column_band = (divisor(c, self.table_style.col_band_size) % 2) == 1
overrides.append('band%dVert' % (1 if odd_column_band else 2))
odd_row_band = (divisor(r, self.table_style.row_band_size) % 2) == 1
overrides.append('band%dHorz' % (1 if odd_row_band else 2))
# According to the OOXML spec columns should have higher override
# priority than rows, but Word seems to do it the other way around.
if c is not None:
if c == 0:
overrides.append('firstCol')
if c >= num_of_cols_in_row - 1:
overrides.append('lastCol')
if r == 0:
overrides.append('firstRow')
if r >= num_of_rows - 1:
overrides.append('lastRow')
if c is not None:
if r == 0:
if c == 0:
overrides.append('nwCell')
if c == num_of_cols_in_row - 1:
overrides.append('neCell')
if r == num_of_rows - 1:
if c == 0:
overrides.append('swCell')
if c == num_of_cols_in_row - 1:
overrides.append('seCell')
return tuple(filter(self.override_allowed, overrides))
def resolve_row_style(self, tr, overrides):
rs = RowStyle(self.namespace)
for o in overrides:
if o in self.overrides:
ovr = self.overrides[o]
ors = ovr.get('row', None)
if ors is not None:
rs.update(ors)
for trPr in self.namespace.XPath('./w:trPr')(tr):
rs.update(RowStyle(self.namespace, trPr))
if self.bidi:
rs.apply_bidi()
self.style_map[tr] = rs
def resolve_cell_style(self, tc, overrides, row, col, rows, cols_in_row):
cs = CellStyle(self.namespace)
for o in overrides:
if o in self.overrides:
ovr = self.overrides[o]
ors = ovr.get('cell', None)
if ors is not None:
cs.update(ors)
for tcPr in self.namespace.XPath('./w:tcPr')(tc):
cs.update(CellStyle(self.namespace, tcPr))
for x in edges:
p = 'cell_padding_%s' % x
val = getattr(cs, p)
if val is inherit:
setattr(cs, p, getattr(self.table_style, p))
is_inside_edge = (
(x == 'left' and col > 0) or
(x == 'top' and row > 0) or
(x == 'right' and col < cols_in_row - 1) or
(x == 'bottom' and row < rows -1)
)
inside_edge = ('insideH' if x in {'top', 'bottom'} else 'insideV') if is_inside_edge else None
for prop in border_props:
if not prop.startswith('border'):
continue
eprop = prop % x
iprop = (prop % inside_edge) if inside_edge else None
val = getattr(cs, eprop)
if val is inherit and iprop is not None:
# Use the insideX borders if the main cell borders are not
# specified
val = getattr(cs, iprop)
if val is inherit:
val = getattr(self.table_style, iprop)
if not is_inside_edge and val == 'none':
# Cell borders must override table borders even when the
# table border is not null and the cell border is null.
val = 'hidden'
setattr(cs, eprop, val)
if self.bidi:
cs.apply_bidi()
self.style_map[tc] = cs
def resolve_para_style(self, p, overrides):
text_styles = [clone(self.paragraph_style), clone(self.run_style)]
for o in overrides:
if o in self.overrides:
ovr = self.overrides[o]
for i, name in enumerate(('para', 'run')):
ops = ovr.get(name, None)
if ops is not None:
if text_styles[i] is None:
text_styles[i] = ops
else:
text_styles[i].update(ops)
self.style_map[p] = text_styles
def handle_merged_cells(self):
if not self.cell_map:
return
# Handle vMerge
max_col_num = max(len(r) for r in self.cell_map)
for c in range(max_col_num):
cells = [row[c] if c < len(row) else None for row in self.cell_map]
runs = [[]]
for cell in cells:
try:
s = self.style_map[cell]
except KeyError: # cell is None
s = CellStyle(self.namespace)
if s.vMerge == 'restart':
runs.append([cell])
elif s.vMerge == 'continue':
runs[-1].append(cell)
else:
runs.append([])
for run in runs:
if len(run) > 1:
self.style_map[run[0]].row_span = len(run)
for tc in run[1:]:
tc.getparent().remove(tc)
# Handle hMerge
for cells in self.cell_map:
runs = [[]]
for cell in cells:
try:
s = self.style_map[cell]
except KeyError: # cell is None
s = CellStyle(self.namespace)
if s.col_span is not inherit:
runs.append([])
continue
if s.hMerge == 'restart':
runs.append([cell])
elif s.hMerge == 'continue':
runs[-1].append(cell)
else:
runs.append([])
for run in runs:
if len(run) > 1:
self.style_map[run[0]].col_span = len(run)
for tc in run[1:]:
tc.getparent().remove(tc)
def __iter__(self):
yield from self.paragraphs
for t in itervalues(self.sub_tables):
yield from t
def apply_markup(self, rmap, page, parent=None):
table = TABLE('\n\t\t')
if self.bidi:
table.set('dir', 'rtl')
self.table_style.page = page
style_map = {}
if parent is None:
try:
first_para = rmap[next(iter(self))]
except StopIteration:
return
parent = first_para.getparent()
idx = parent.index(first_para)
parent.insert(idx, table)
else:
parent.append(table)
for row in self.namespace.XPath('./w:tr')(self.tbl):
tr = TR('\n\t\t\t')
style_map[tr] = self.style_map[row]
tr.tail = '\n\t\t'
table.append(tr)
for tc in self.namespace.XPath('./w:tc')(row):
td = TD()
style_map[td] = s = self.style_map[tc]
if s.col_span is not inherit:
td.set('colspan', str(s.col_span))
if s.row_span is not inherit:
td.set('rowspan', str(s.row_span))
td.tail = '\n\t\t\t'
tr.append(td)
for x in self.namespace.XPath('./w:p|./w:tbl')(tc):
if x.tag.endswith('}p'):
td.append(rmap[x])
else:
self.sub_tables[x].apply_markup(rmap, page, parent=td)
if len(tr):
tr[-1].tail = '\n\t\t'
if len(table):
table[-1].tail = '\n\t'
table_style = self.table_style.css
if table_style:
table.set('class', self.styles.register(table_style, 'table'))
for elem, style in iteritems(style_map):
css = style.css
if css:
elem.set('class', self.styles.register(css, elem.tag))
class Tables:
def __init__(self, namespace):
self.tables = []
self.para_map = {}
self.sub_tables = set()
self.namespace = namespace
def register(self, tbl, styles):
if tbl in self.sub_tables:
return
self.tables.append(Table(self.namespace, tbl, styles, self.para_map))
self.sub_tables |= set(self.tables[-1].sub_tables)
def apply_markup(self, object_map, page_map):
rmap = {v:k for k, v in iteritems(object_map)}
for table in self.tables:
table.apply_markup(rmap, page_map[table.tbl])
def para_style(self, p):
table = self.para_map.get(p, None)
if table is not None:
return table.style_map.get(p, (None, None))[0]
def run_style(self, p):
table = self.para_map.get(p, None)
if table is not None:
return table.style_map.get(p, (None, None))[1]
| 25,751 | Python | .py | 599 | 30.540902 | 142 | 0.517423 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,562 | names.py | kovidgoyal_calibre/src/calibre/ebooks/docx/names.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from lxml.etree import XPath as X
from calibre.utils.filenames import ascii_text
from polyglot.builtins import iteritems
# Names {{{
TRANSITIONAL_NAMES = {
'DOCUMENT' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/officeDocument',
'DOCPROPS' : 'http://schemas.openxmlformats.org/package/2006/relationships/metadata/core-properties',
'APPPROPS' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/extended-properties',
'STYLES' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/styles',
'NUMBERING' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/numbering',
'FONTS' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/fontTable',
'EMBEDDED_FONT' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/font',
'IMAGES' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/image',
'LINKS' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/hyperlink',
'FOOTNOTES' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/footnotes',
'ENDNOTES' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/endnotes',
'THEMES' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/theme',
'SETTINGS' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/settings',
'WEB_SETTINGS' : 'http://schemas.openxmlformats.org/officeDocument/2006/relationships/webSettings',
}
STRICT_NAMES = {
k:v.replace('http://schemas.openxmlformats.org/officeDocument/2006', 'http://purl.oclc.org/ooxml/officeDocument')
for k, v in iteritems(TRANSITIONAL_NAMES)
}
TRANSITIONAL_NAMESPACES = {
'mo': 'http://schemas.microsoft.com/office/mac/office/2008/main',
'o': 'urn:schemas-microsoft-com:office:office',
've': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
'mc': 'http://schemas.openxmlformats.org/markup-compatibility/2006',
# Text Content
'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main',
'w10': 'urn:schemas-microsoft-com:office:word',
'wne': 'http://schemas.microsoft.com/office/word/2006/wordml',
'xml': 'http://www.w3.org/XML/1998/namespace',
# Drawing
'a': 'http://schemas.openxmlformats.org/drawingml/2006/main',
'a14': 'http://schemas.microsoft.com/office/drawing/2010/main',
'm': 'http://schemas.openxmlformats.org/officeDocument/2006/math',
'mv': 'urn:schemas-microsoft-com:mac:vml',
'pic': 'http://schemas.openxmlformats.org/drawingml/2006/picture',
'v': 'urn:schemas-microsoft-com:vml',
'wp': 'http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing',
# Properties (core and extended)
'cp': 'http://schemas.openxmlformats.org/package/2006/metadata/core-properties',
'dc': 'http://purl.org/dc/elements/1.1/',
'ep': 'http://schemas.openxmlformats.org/officeDocument/2006/extended-properties',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
# Content Types
'ct': 'http://schemas.openxmlformats.org/package/2006/content-types',
# Package Relationships
'r': 'http://schemas.openxmlformats.org/officeDocument/2006/relationships',
'pr': 'http://schemas.openxmlformats.org/package/2006/relationships',
# Dublin Core document properties
'dcmitype': 'http://purl.org/dc/dcmitype/',
'dcterms': 'http://purl.org/dc/terms/',
# SVG embeds
'asvg': 'http://schemas.microsoft.com/office/drawing/2016/SVG/main',
}
STRICT_NAMESPACES = {
k:v.replace(
'http://schemas.openxmlformats.org/officeDocument/2006', 'http://purl.oclc.org/ooxml/officeDocument').replace(
'http://schemas.openxmlformats.org/wordprocessingml/2006', 'http://purl.oclc.org/ooxml/wordprocessingml').replace(
'http://schemas.openxmlformats.org/drawingml/2006', 'http://purl.oclc.org/ooxml/drawingml')
for k, v in iteritems(TRANSITIONAL_NAMESPACES)
}
SVG_BLIP_URI = '{96DAC541-7B7A-43D3-8B79-37D633B846F1}'
USE_LOCAL_DPI_URI = '{28A0092B-C50C-407E-A947-70E740481C1C}'
# }}}
def barename(x):
return x.rpartition('}')[-1]
def XML(x):
return '{{{}}}{}'.format(TRANSITIONAL_NAMESPACES['xml'], x)
def generate_anchor(name, existing):
x = y = 'id_' + re.sub(r'[^0-9a-zA-Z_]', '', ascii_text(name)).lstrip('_')
c = 1
while y in existing:
y = '%s_%d' % (x, c)
c += 1
return y
class DOCXNamespace:
def __init__(self, transitional=True):
self.xpath_cache = {}
if transitional:
self.namespaces = TRANSITIONAL_NAMESPACES.copy()
self.names = TRANSITIONAL_NAMES.copy()
else:
self.namespaces = STRICT_NAMESPACES.copy()
self.names = STRICT_NAMES.copy()
def XPath(self, expr):
ans = self.xpath_cache.get(expr, None)
if ans is None:
self.xpath_cache[expr] = ans = X(expr, namespaces=self.namespaces)
return ans
def is_tag(self, x, q):
tag = getattr(x, 'tag', x)
ns, name = q.partition(':')[0::2]
return f'{{{self.namespaces.get(ns, None)}}}{name}' == tag
def expand(self, name, sep=':'):
ns, tag = name.partition(sep)[::2]
if ns and tag:
tag = f'{{{self.namespaces[ns]}}}{tag}'
return tag or ns
def get(self, x, attr, default=None):
return x.attrib.get(self.expand(attr), default)
def ancestor(self, elem, name):
try:
return self.XPath('ancestor::%s[1]' % name)(elem)[0]
except IndexError:
return None
def children(self, elem, *args):
return self.XPath('|'.join('child::%s' % a for a in args))(elem)
def descendants(self, elem, *args):
return self.XPath('|'.join('descendant::%s' % a for a in args))(elem)
def makeelement(self, root, tag, append=True, **attrs):
ans = root.makeelement(self.expand(tag), **{self.expand(k, sep='_'):v for k, v in iteritems(attrs)})
if append:
root.append(ans)
return ans
| 6,226 | Python | .py | 122 | 45.163934 | 122 | 0.682132 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,563 | block_styles.py | kovidgoyal_calibre/src/calibre/ebooks/docx/block_styles.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import numbers
from collections import OrderedDict
from polyglot.builtins import iteritems
class Inherit:
def __eq__(self, other):
return other is self
def __hash__(self):
return id(self)
def __lt__(self, other):
return False
def __gt__(self, other):
return other is not self
def __ge__(self, other):
if self is other:
return True
return True
def __le__(self, other):
if self is other:
return True
return False
inherit = Inherit()
def binary_property(parent, name, XPath, get):
vals = XPath('./w:%s' % name)(parent)
if not vals:
return inherit
val = get(vals[0], 'w:val', 'on')
return True if val in {'on', '1', 'true'} else False
def simple_color(col, auto='currentColor'):
if not col or col == 'auto' or len(col) != 6:
return auto
return '#'+col
def simple_float(val, mult=1.0):
try:
return float(val) * mult
except (ValueError, TypeError, AttributeError, KeyError):
pass
def twips(val, mult=0.05):
''' Parse val as either a pure number representing twentieths of a point or a number followed by the suffix pt, representing pts.'''
try:
return float(val) * mult
except (ValueError, TypeError, AttributeError, KeyError):
if val and val.endswith('pt') and mult == 0.05:
return twips(val[:-2], mult=1.0)
LINE_STYLES = { # {{{
'basicBlackDashes': 'dashed',
'basicBlackDots': 'dotted',
'basicBlackSquares': 'dashed',
'basicThinLines': 'solid',
'dashDotStroked': 'groove',
'dashed': 'dashed',
'dashSmallGap': 'dashed',
'dotDash': 'dashed',
'dotDotDash': 'dashed',
'dotted': 'dotted',
'double': 'double',
'inset': 'inset',
'nil': 'none',
'none': 'none',
'outset': 'outset',
'single': 'solid',
'thick': 'solid',
'thickThinLargeGap': 'double',
'thickThinMediumGap': 'double',
'thickThinSmallGap' : 'double',
'thinThickLargeGap': 'double',
'thinThickMediumGap': 'double',
'thinThickSmallGap': 'double',
'thinThickThinLargeGap': 'double',
'thinThickThinMediumGap': 'double',
'thinThickThinSmallGap': 'double',
'threeDEmboss': 'ridge',
'threeDEngrave': 'groove',
'triple': 'double',
} # }}}
# Read from XML {{{
border_props = ('padding_%s', 'border_%s_width', 'border_%s_style', 'border_%s_color')
border_edges = ('left', 'top', 'right', 'bottom', 'between')
def read_single_border(parent, edge, XPath, get):
color = style = width = padding = None
for elem in XPath('./w:%s' % edge)(parent):
c = get(elem, 'w:color')
if c is not None:
color = simple_color(c)
s = get(elem, 'w:val')
if s is not None:
style = LINE_STYLES.get(s, 'solid')
space = get(elem, 'w:space')
if space is not None:
try:
padding = float(space)
except (ValueError, TypeError):
pass
sz = get(elem, 'w:sz')
if sz is not None:
# we dont care about art borders (they are only used for page borders)
try:
width = min(96, max(2, float(sz))) / 8
except (ValueError, TypeError):
pass
return {p:v for p, v in zip(border_props, (padding, width, style, color))}
def read_border(parent, dest, XPath, get, border_edges=border_edges, name='pBdr'):
vals = {k % edge:inherit for edge in border_edges for k in border_props}
for border in XPath('./w:' + name)(parent):
for edge in border_edges:
for prop, val in iteritems(read_single_border(border, edge, XPath, get)):
if val is not None:
vals[prop % edge] = val
for key, val in iteritems(vals):
setattr(dest, key, val)
def border_to_css(edge, style, css):
bs = getattr(style, 'border_%s_style' % edge)
bc = getattr(style, 'border_%s_color' % edge)
bw = getattr(style, 'border_%s_width' % edge)
if isinstance(bw, numbers.Number):
# WebKit needs at least 1pt to render borders and 3pt to render double borders
bw = max(bw, (3 if bs == 'double' else 1))
if bs is not inherit and bs is not None:
css['border-%s-style' % edge] = bs
if bc is not inherit and bc is not None:
css['border-%s-color' % edge] = bc
if bw is not inherit and bw is not None:
if isinstance(bw, numbers.Number):
bw = '%.3gpt' % bw
css['border-%s-width' % edge] = bw
def read_indent(parent, dest, XPath, get):
padding_left = padding_right = text_indent = inherit
for indent in XPath('./w:ind')(parent):
l, lc = get(indent, 'w:left'), get(indent, 'w:leftChars')
pl = simple_float(lc, 0.01) if lc is not None else simple_float(l, 0.05) if l is not None else None
if pl is not None:
padding_left = '{:.3g}{}'.format(pl, 'em' if lc is not None else 'pt')
r, rc = get(indent, 'w:right'), get(indent, 'w:rightChars')
pr = simple_float(rc, 0.01) if rc is not None else simple_float(r, 0.05) if r is not None else None
if pr is not None:
padding_right = '{:.3g}{}'.format(pr, 'em' if rc is not None else 'pt')
h, hc = get(indent, 'w:hanging'), get(indent, 'w:hangingChars')
fl, flc = get(indent, 'w:firstLine'), get(indent, 'w:firstLineChars')
h = h if h is None else '-'+h
hc = hc if hc is None else '-'+hc
ti = (simple_float(hc, 0.01) if hc is not None else simple_float(h, 0.05) if h is not None else
simple_float(flc, 0.01) if flc is not None else simple_float(fl, 0.05) if fl is not None else None)
if ti is not None:
text_indent = '{:.3g}{}'.format(ti, 'em' if hc is not None or (h is None and flc is not None) else 'pt')
setattr(dest, 'margin_left', padding_left)
setattr(dest, 'margin_right', padding_right)
setattr(dest, 'text_indent', text_indent)
def read_justification(parent, dest, XPath, get):
ans = inherit
for jc in XPath('./w:jc[@w:val]')(parent):
val = get(jc, 'w:val')
if not val:
continue
if val in {'both', 'distribute'} or 'thai' in val or 'kashida' in val:
ans = 'justify'
elif val in {'left', 'center', 'right', 'start', 'end'}:
ans = val
elif val in {'start', 'end'}:
ans = {'start':'left'}.get(val, 'right')
setattr(dest, 'text_align', ans)
def read_spacing(parent, dest, XPath, get):
padding_top = padding_bottom = line_height = inherit
for s in XPath('./w:spacing')(parent):
a, al, aa = get(s, 'w:after'), get(s, 'w:afterLines'), get(s, 'w:afterAutospacing')
pb = None if aa in {'on', '1', 'true'} else simple_float(al, 0.02) if al is not None else simple_float(a, 0.05) if a is not None else None
if pb is not None:
padding_bottom = '{:.3g}{}'.format(pb, 'ex' if al is not None else 'pt')
b, bl, bb = get(s, 'w:before'), get(s, 'w:beforeLines'), get(s, 'w:beforeAutospacing')
pt = None if bb in {'on', '1', 'true'} else simple_float(bl, 0.02) if bl is not None else simple_float(b, 0.05) if b is not None else None
if pt is not None:
padding_top = '{:.3g}{}'.format(pt, 'ex' if bl is not None else 'pt')
l, lr = get(s, 'w:line'), get(s, 'w:lineRule', 'auto')
if l is not None:
lh = simple_float(l, 0.05) if lr in {'exact', 'atLeast'} else simple_float(l, 1/240.0)
if lh is not None:
line_height = '{:.3g}{}'.format(lh, 'pt' if lr in {'exact', 'atLeast'} else '')
setattr(dest, 'margin_top', padding_top)
setattr(dest, 'margin_bottom', padding_bottom)
setattr(dest, 'line_height', line_height)
def read_shd(parent, dest, XPath, get):
ans = inherit
for shd in XPath('./w:shd[@w:fill]')(parent):
val = get(shd, 'w:fill')
if val:
ans = simple_color(val, auto='transparent')
setattr(dest, 'background_color', ans)
def read_numbering(parent, dest, XPath, get):
lvl = num_id = inherit
for np in XPath('./w:numPr')(parent):
for ilvl in XPath('./w:ilvl[@w:val]')(np):
try:
lvl = int(get(ilvl, 'w:val'))
except (ValueError, TypeError):
pass
for num in XPath('./w:numId[@w:val]')(np):
num_id = get(num, 'w:val')
setattr(dest, 'numbering_id', num_id)
setattr(dest, 'numbering_level', lvl)
class Frame:
all_attributes = ('drop_cap', 'h', 'w', 'h_anchor', 'h_rule', 'v_anchor', 'wrap',
'h_space', 'v_space', 'lines', 'x_align', 'y_align', 'x', 'y')
def __init__(self, fp, XPath, get):
self.drop_cap = get(fp, 'w:dropCap', 'none')
try:
self.h = int(get(fp, 'w:h'))/20
except (ValueError, TypeError):
self.h = 0
try:
self.w = int(get(fp, 'w:w'))/20
except (ValueError, TypeError):
self.w = None
try:
self.x = int(get(fp, 'w:x'))/20
except (ValueError, TypeError):
self.x = 0
try:
self.y = int(get(fp, 'w:y'))/20
except (ValueError, TypeError):
self.y = 0
self.h_anchor = get(fp, 'w:hAnchor', 'page')
self.h_rule = get(fp, 'w:hRule', 'auto')
self.v_anchor = get(fp, 'w:vAnchor', 'page')
self.wrap = get(fp, 'w:wrap', 'around')
self.x_align = get(fp, 'w:xAlign')
self.y_align = get(fp, 'w:yAlign')
try:
self.h_space = int(get(fp, 'w:hSpace'))/20
except (ValueError, TypeError):
self.h_space = 0
try:
self.v_space = int(get(fp, 'w:vSpace'))/20
except (ValueError, TypeError):
self.v_space = 0
try:
self.lines = int(get(fp, 'w:lines'))
except (ValueError, TypeError):
self.lines = 1
def css(self, page):
is_dropcap = self.drop_cap in {'drop', 'margin'}
ans = {'overflow': 'hidden'}
if is_dropcap:
ans['float'] = 'left'
ans['margin'] = '0'
ans['padding-right'] = '0.2em'
else:
if self.h_rule != 'auto':
t = 'min-height' if self.h_rule == 'atLeast' else 'height'
ans[t] = '%.3gpt' % self.h
if self.w is not None:
ans['width'] = '%.3gpt' % self.w
ans['padding-top'] = ans['padding-bottom'] = '%.3gpt' % self.v_space
if self.wrap not in {None, 'none'}:
ans['padding-left'] = ans['padding-right'] = '%.3gpt' % self.h_space
if self.x_align is None:
fl = 'left' if self.x/page.width < 0.5 else 'right'
else:
fl = 'right' if self.x_align == 'right' else 'left'
ans['float'] = fl
return ans
def __eq__(self, other):
for x in self.all_attributes:
if getattr(other, x, inherit) != getattr(self, x):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def read_frame(parent, dest, XPath, get):
ans = inherit
for fp in XPath('./w:framePr')(parent):
ans = Frame(fp, XPath, get)
setattr(dest, 'frame', ans)
# }}}
class ParagraphStyle:
all_properties = (
'adjustRightInd', 'autoSpaceDE', 'autoSpaceDN', 'bidi',
'contextualSpacing', 'keepLines', 'keepNext', 'mirrorIndents',
'pageBreakBefore', 'snapToGrid', 'suppressLineNumbers',
'suppressOverlap', 'topLinePunct', 'widowControl', 'wordWrap',
# Border margins padding
'border_left_width', 'border_left_style', 'border_left_color', 'padding_left',
'border_top_width', 'border_top_style', 'border_top_color', 'padding_top',
'border_right_width', 'border_right_style', 'border_right_color', 'padding_right',
'border_bottom_width', 'border_bottom_style', 'border_bottom_color', 'padding_bottom',
'border_between_width', 'border_between_style', 'border_between_color', 'padding_between',
'margin_left', 'margin_top', 'margin_right', 'margin_bottom',
# Misc.
'text_indent', 'text_align', 'line_height', 'background_color',
'numbering_id', 'numbering_level', 'font_family', 'font_size', 'color', 'frame',
'cs_font_size', 'cs_font_family',
)
def __init__(self, namespace, pPr=None):
self.namespace = namespace
self.linked_style = None
if pPr is None:
for p in self.all_properties:
setattr(self, p, inherit)
else:
for p in (
'adjustRightInd', 'autoSpaceDE', 'autoSpaceDN', 'bidi',
'contextualSpacing', 'keepLines', 'keepNext', 'mirrorIndents',
'pageBreakBefore', 'snapToGrid', 'suppressLineNumbers',
'suppressOverlap', 'topLinePunct', 'widowControl', 'wordWrap',
):
setattr(self, p, binary_property(pPr, p, namespace.XPath, namespace.get))
for x in ('border', 'indent', 'justification', 'spacing', 'shd', 'numbering', 'frame'):
f = read_funcs[x]
f(pPr, self, namespace.XPath, namespace.get)
for s in namespace.XPath('./w:pStyle[@w:val]')(pPr):
self.linked_style = namespace.get(s, 'w:val')
self.font_family = self.font_size = self.color = self.cs_font_size = self.cs_font_family = inherit
self._css = None
self._border_key = None
def update(self, other):
for prop in self.all_properties:
nval = getattr(other, prop)
if nval is not inherit:
setattr(self, prop, nval)
if other.linked_style is not None:
self.linked_style = other.linked_style
def resolve_based_on(self, parent):
for p in self.all_properties:
val = getattr(self, p)
if val is inherit:
setattr(self, p, getattr(parent, p))
@property
def css(self):
if self._css is None:
self._css = c = OrderedDict()
if self.keepLines is True:
c['page-break-inside'] = 'avoid'
if self.pageBreakBefore is True:
c['page-break-before'] = 'always'
if self.keepNext is True:
c['page-break-after'] = 'avoid'
for edge in ('left', 'top', 'right', 'bottom'):
border_to_css(edge, self, c)
val = getattr(self, 'padding_%s' % edge)
if val is not inherit:
c['padding-%s' % edge] = '%.3gpt' % val
val = getattr(self, 'margin_%s' % edge)
if val is not inherit:
c['margin-%s' % edge] = val
if self.line_height not in {inherit, '1'}:
c['line-height'] = self.line_height
for x in ('text_indent', 'background_color', 'font_family', 'font_size', 'color'):
val = getattr(self, x)
if val is not inherit:
if x == 'font_size':
val = '%.3gpt' % val
c[x.replace('_', '-')] = val
ta = self.text_align
if ta is not inherit:
if self.bidi is True:
ta = {'left':'right', 'right':'left'}.get(ta, ta)
c['text-align'] = ta
return self._css
@property
def border_key(self):
if self._border_key is None:
k = []
for edge in border_edges:
for prop in border_props:
prop = prop % edge
k.append(getattr(self, prop))
self._border_key = tuple(k)
return self._border_key
def has_identical_borders(self, other_style):
return self.border_key == getattr(other_style, 'border_key', None)
def clear_borders(self):
for edge in border_edges[:-1]:
for prop in ('width', 'color', 'style'):
setattr(self, f'border_{edge}_{prop}', inherit)
def clone_border_styles(self):
style = ParagraphStyle(self.namespace)
for edge in border_edges[:-1]:
for prop in ('width', 'color', 'style'):
attr = f'border_{edge}_{prop}'
setattr(style, attr, getattr(self, attr))
return style
def apply_between_border(self):
for prop in ('width', 'color', 'style'):
setattr(self, 'border_bottom_%s' % prop, getattr(self, 'border_between_%s' % prop))
def has_visible_border(self):
for edge in border_edges[:-1]:
bw, bs = getattr(self, 'border_%s_width' % edge), getattr(self, 'border_%s_style' % edge)
if bw is not inherit and bw and bs is not inherit and bs != 'none':
return True
return False
read_funcs = {k[5:]:v for k, v in iteritems(globals()) if k.startswith('read_')}
| 17,257 | Python | .py | 391 | 34.452685 | 146 | 0.556767 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,564 | images.py | kovidgoyal_calibre/src/calibre/ebooks/docx/images.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import re
from lxml.html.builder import HR, IMG
from calibre import sanitize_file_name
from calibre.constants import iswindows
from calibre.ebooks.docx.names import SVG_BLIP_URI, barename
from calibre.utils.filenames import ascii_filename
from calibre.utils.img import image_to_data, resize_to_fit
from calibre.utils.imghdr import what
from polyglot.builtins import iteritems, itervalues
class LinkedImageNotFound(ValueError):
def __init__(self, fname):
ValueError.__init__(self, fname)
self.fname = fname
def image_filename(x):
return sanitize_file_name(re.sub(r'[^0-9a-zA-Z.-]', '_', ascii_filename(x)).lstrip('_').lstrip('.'))
def emu_to_pt(x):
return x / 12700
def pt_to_emu(x):
return int(x * 12700)
def get_image_properties(parent, XPath, get):
width = height = None
for extent in XPath('./wp:extent')(parent):
try:
width = emu_to_pt(int(extent.get('cx')))
except (TypeError, ValueError):
pass
try:
height = emu_to_pt(int(extent.get('cy')))
except (TypeError, ValueError):
pass
ans = {}
if width is not None:
ans['width'] = '%.3gpt' % width
if height is not None:
ans['height'] = '%.3gpt' % height
alt = None
title = None
for docPr in XPath('./wp:docPr')(parent):
alt = docPr.get('descr') or alt
title = docPr.get('title') or title
if docPr.get('hidden', None) in {'true', 'on', '1'}:
ans['display'] = 'none'
transforms = []
for graphic in XPath('./a:graphic')(parent):
for xfrm in XPath('descendant::a:xfrm')(graphic):
rot = xfrm.get('rot')
if rot:
try:
rot = int(rot) / 60000
except Exception:
rot = None
if rot:
transforms.append(f'rotate({rot:g}deg)')
fliph = xfrm.get('flipH')
if fliph in ('1', 'true'):
transforms.append('scaleX(-1)')
flipv = xfrm.get('flipV')
if flipv in ('1', 'true'):
transforms.append('scaleY(-1)')
if transforms:
ans['transform'] = ' '.join(transforms)
return ans, alt, title
def get_image_margins(elem):
ans = {}
for w, css in iteritems({'L':'left', 'T':'top', 'R':'right', 'B':'bottom'}):
val = elem.get('dist%s' % w, None)
if val is not None:
try:
val = emu_to_pt(val)
except (TypeError, ValueError):
continue
ans['padding-%s' % css] = '%.3gpt' % val
return ans
def get_hpos(anchor, page_width, XPath, get, width_frac):
for ph in XPath('./wp:positionH')(anchor):
rp = ph.get('relativeFrom', None)
if rp == 'leftMargin':
return 0 + width_frac
if rp == 'rightMargin':
return 1 + width_frac
al = None
almap = {'left':0, 'center':0.5, 'right':1}
for align in XPath('./wp:align')(ph):
al = almap.get(align.text)
if al is not None:
if rp == 'page':
return al
return al + width_frac
for po in XPath('./wp:posOffset')(ph):
try:
pos = emu_to_pt(int(po.text))
except (TypeError, ValueError):
continue
return pos/page_width + width_frac
for sp in XPath('./wp:simplePos')(anchor):
try:
x = emu_to_pt(sp.get('x', None))
except (TypeError, ValueError):
continue
return x/page_width + width_frac
return 0
class Images:
def __init__(self, namespace, log):
self.namespace = namespace
self.rid_map = {}
self.used = {}
self.resized = {}
self.names = set()
self.all_images = set()
self.links = []
self.log = log
def __call__(self, relationships_by_id):
self.rid_map = relationships_by_id
def read_image_data(self, fname, base=None):
if fname.startswith('file://'):
src = fname[len('file://'):]
if iswindows and src and src[0] == '/':
src = src[1:]
if not src or not os.path.exists(src):
raise LinkedImageNotFound(src)
with open(src, 'rb') as rawsrc:
raw = rawsrc.read()
else:
try:
raw = self.docx.read(fname)
except KeyError:
raise LinkedImageNotFound(fname)
base = base or image_filename(fname.rpartition('/')[-1]) or 'image'
ext = what(None, raw) or base.rpartition('.')[-1] or 'jpeg'
if ext == 'emf':
# For an example, see: https://bugs.launchpad.net/bugs/1224849
self.log('Found an EMF image: %s, trying to extract embedded raster image' % fname)
from calibre.utils.wmf.emf import emf_unwrap
try:
raw = emf_unwrap(raw)
except Exception:
self.log.exception('Failed to extract embedded raster image from EMF')
else:
ext = 'png'
base = base.rpartition('.')[0]
if not base:
base = 'image'
base += '.' + ext
return raw, base
def unique_name(self, base):
exists = frozenset(itervalues(self.used))
c = 1
name = base
while name in exists:
n, e = base.rpartition('.')[0::2]
name = '%s-%d.%s' % (n, c, e)
c += 1
return name
def resize_image(self, raw, base, max_width, max_height):
resized, img = resize_to_fit(raw, max_width, max_height)
if resized:
base, ext = os.path.splitext(base)
base = base + '-%dx%d%s' % (max_width, max_height, ext)
raw = image_to_data(img, fmt=ext[1:])
return raw, base, resized
def generate_filename(self, rid, base=None, rid_map=None, max_width=None, max_height=None):
rid_map = self.rid_map if rid_map is None else rid_map
fname = rid_map[rid]
key = (fname, max_width, max_height)
ans = self.used.get(key)
if ans is not None:
return ans
raw, base = self.read_image_data(fname, base=base)
resized = False
if max_width is not None and max_height is not None:
raw, base, resized = self.resize_image(raw, base, max_width, max_height)
name = self.unique_name(base)
self.used[key] = name
if max_width is not None and max_height is not None and not resized:
okey = (fname, None, None)
if okey in self.used:
return self.used[okey]
self.used[okey] = name
with open(os.path.join(self.dest_dir, name), 'wb') as f:
f.write(raw)
self.all_images.add('images/' + name)
return name
def pic_to_img(self, pic, alt, parent, title):
XPath, get = self.namespace.XPath, self.namespace.get
name = None
link = None
for hl in XPath('descendant::a:hlinkClick[@r:id]')(parent):
link = {'id':get(hl, 'r:id')}
tgt = hl.get('tgtFrame', None)
if tgt:
link['target'] = tgt
title = hl.get('tooltip', None)
if title:
link['title'] = title
for pr in XPath('descendant::pic:cNvPr')(pic):
name = pr.get('name', None)
if name:
name = image_filename(name)
alt = pr.get('descr') or alt
for a in XPath('descendant::a:blip[@r:embed or @r:link]')(pic):
rid = get(a, 'r:embed') or get(a, 'r:link')
for asvg in XPath(f'./a:extLst/a:ext[@uri="{SVG_BLIP_URI}"]/asvg:svgBlip[@r:embed or @r:link]')(a):
svg_rid = get(asvg, 'r:embed') or get(asvg, 'r:link')
if svg_rid and svg_rid in self.rid_map:
rid = svg_rid
break
if rid and rid in self.rid_map:
try:
src = self.generate_filename(rid, name)
except LinkedImageNotFound as err:
self.log.warn('Linked image: %s not found, ignoring' % err.fname)
continue
img = IMG(src='images/%s' % src)
img.set('alt', alt or 'Image')
if title:
img.set('title', title)
if link is not None:
self.links.append((img, link, self.rid_map))
return img
def drawing_to_html(self, drawing, page):
XPath, get = self.namespace.XPath, self.namespace.get
# First process the inline pictures
for inline in XPath('./wp:inline')(drawing):
style, alt, title = get_image_properties(inline, XPath, get)
for pic in XPath('descendant::pic:pic')(inline):
ans = self.pic_to_img(pic, alt, inline, title)
if ans is not None:
if style:
ans.set('style', '; '.join(f'{k}: {v}' for k, v in iteritems(style)))
yield ans
# Now process the floats
for anchor in XPath('./wp:anchor')(drawing):
style, alt, title = get_image_properties(anchor, XPath, get)
self.get_float_properties(anchor, style, page)
for pic in XPath('descendant::pic:pic')(anchor):
ans = self.pic_to_img(pic, alt, anchor, title)
if ans is not None:
if style:
ans.set('style', '; '.join(f'{k}: {v}' for k, v in iteritems(style)))
yield ans
def pict_to_html(self, pict, page):
XPath, get = self.namespace.XPath, self.namespace.get
# First see if we have an <hr>
is_hr = len(pict) == 1 and get(pict[0], 'o:hr') in {'t', 'true'}
if is_hr:
style = {}
hr = HR()
try:
pct = float(get(pict[0], 'o:hrpct'))
except (ValueError, TypeError, AttributeError):
pass
else:
if pct > 0:
style['width'] = '%.3g%%' % pct
align = get(pict[0], 'o:hralign', 'center')
if align in {'left', 'right'}:
style['margin-left'] = '0' if align == 'left' else 'auto'
style['margin-right'] = 'auto' if align == 'left' else '0'
if style:
hr.set('style', '; '.join((f'{k}:{v}' for k, v in iteritems(style))))
yield hr
for imagedata in XPath('descendant::v:imagedata[@r:id]')(pict):
rid = get(imagedata, 'r:id')
if rid in self.rid_map:
try:
src = self.generate_filename(rid)
except LinkedImageNotFound as err:
self.log.warn('Linked image: %s not found, ignoring' % err.fname)
continue
style = get(imagedata.getparent(), 'style')
img = IMG(src='images/%s' % src)
alt = get(imagedata, 'o:title')
img.set('alt', alt or 'Image')
if 'position:absolute' in style:
img.set('style', 'display: block')
yield img
def get_float_properties(self, anchor, style, page):
XPath, get = self.namespace.XPath, self.namespace.get
if 'display' not in style:
style['display'] = 'block'
padding = get_image_margins(anchor)
width = float(style.get('width', '100pt')[:-2])
page_width = page.width - page.margin_left - page.margin_right
if page_width <= 0:
# Ignore margins
page_width = page.width
hpos = get_hpos(anchor, page_width, XPath, get, width/(2*page_width))
wrap_elem = None
dofloat = False
for child in reversed(anchor):
bt = barename(child.tag)
if bt in {'wrapNone', 'wrapSquare', 'wrapThrough', 'wrapTight', 'wrapTopAndBottom'}:
wrap_elem = child
dofloat = bt not in {'wrapNone', 'wrapTopAndBottom'}
break
if wrap_elem is not None:
padding.update(get_image_margins(wrap_elem))
wt = wrap_elem.get('wrapText', None)
hpos = 0 if wt == 'right' else 1 if wt == 'left' else hpos
if dofloat:
style['float'] = 'left' if hpos < 0.65 else 'right'
else:
ml, mr = (None, None) if hpos < 0.34 else ('auto', None) if hpos > 0.65 else ('auto', 'auto')
if ml is not None:
style['margin-left'] = ml
if mr is not None:
style['margin-right'] = mr
style.update(padding)
def to_html(self, elem, page, docx, dest_dir):
dest = os.path.join(dest_dir, 'images')
if not os.path.exists(dest):
os.mkdir(dest)
self.dest_dir, self.docx = dest, docx
if elem.tag.endswith('}drawing'):
yield from self.drawing_to_html(elem, page)
else:
yield from self.pict_to_html(elem, page)
| 13,462 | Python | .py | 321 | 30.062305 | 115 | 0.523064 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,565 | container.py | kovidgoyal_calibre/src/calibre/ebooks/docx/container.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import shutil
import sys
from lxml import etree
from calibre import guess_type, walk
from calibre.ebooks.docx import InvalidDOCX
from calibre.ebooks.docx.names import DOCXNamespace
from calibre.ebooks.metadata import authors_to_sort_string, string_to_authors
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.utils.localization import canonicalize_lang
from calibre.utils.logging import default_log
from calibre.utils.xml_parse import safe_xml_fromstring
from calibre.utils.zipfile import ZipFile
def fromstring(raw, parser=None):
return safe_xml_fromstring(raw)
# Read metadata {{{
def read_doc_props(raw, mi, XPath):
root = fromstring(raw)
titles = XPath('//dc:title')(root)
if titles:
title = titles[0].text
if title and title.strip():
mi.title = title.strip()
tags = []
for subject in XPath('//dc:subject')(root):
if subject.text and subject.text.strip():
tags.append(subject.text.strip().replace(',', '_'))
for keywords in XPath('//cp:keywords')(root):
if keywords.text and keywords.text.strip():
for x in keywords.text.split():
tags.extend(y.strip() for y in x.split(',') if y.strip())
if tags:
mi.tags = tags
authors = XPath('//dc:creator')(root)
aut = []
for author in authors:
if author.text and author.text.strip():
aut.extend(string_to_authors(author.text))
if aut:
mi.authors = aut
mi.author_sort = authors_to_sort_string(aut)
desc = XPath('//dc:description')(root)
if desc:
raw = etree.tostring(desc[0], method='text', encoding='unicode')
raw = raw.replace('_x000d_', '') # Word 2007 mangles newlines in the summary
mi.comments = raw.strip()
langs = []
for lang in XPath('//dc:language')(root):
if lang.text and lang.text.strip():
l = canonicalize_lang(lang.text)
if l:
langs.append(l)
if langs:
mi.languages = langs
def read_app_props(raw, mi):
root = fromstring(raw)
company = root.xpath('//*[local-name()="Company"]')
if company and company[0].text and company[0].text.strip():
mi.publisher = company[0].text.strip()
def read_default_style_language(raw, mi, XPath):
root = fromstring(raw)
for lang in XPath('/w:styles/w:docDefaults/w:rPrDefault/w:rPr/w:lang/@w:val')(root):
lang = canonicalize_lang(lang)
if lang:
mi.languages = [lang]
break
# }}}
class DOCX:
def __init__(self, path_or_stream, log=None, extract=True):
self.docx_is_transitional = True
stream = path_or_stream if hasattr(path_or_stream, 'read') else open(path_or_stream, 'rb')
self.name = getattr(stream, 'name', None) or '<stream>'
self.log = log or default_log
if extract:
self.extract(stream)
else:
self.init_zipfile(stream)
self.read_content_types()
self.read_package_relationships()
self.namespace = DOCXNamespace(self.docx_is_transitional)
def init_zipfile(self, stream):
self.zipf = ZipFile(stream)
self.names = frozenset(self.zipf.namelist())
def extract(self, stream):
self.tdir = PersistentTemporaryDirectory('docx_container')
try:
zf = ZipFile(stream)
zf.extractall(self.tdir)
except:
self.log.exception('DOCX appears to be invalid ZIP file, trying a'
' more forgiving ZIP parser')
from calibre.utils.localunzip import extractall
stream.seek(0)
extractall(stream, self.tdir)
self.names = {}
for f in walk(self.tdir):
name = os.path.relpath(f, self.tdir).replace(os.sep, '/')
self.names[name] = f
def exists(self, name):
return name in self.names
def read(self, name):
if hasattr(self, 'zipf'):
return self.zipf.open(name).read()
path = self.names[name]
with open(path, 'rb') as f:
return f.read()
def read_content_types(self):
try:
raw = self.read('[Content_Types].xml')
except KeyError:
raise InvalidDOCX('The file %s docx file has no [Content_Types].xml' % self.name)
root = fromstring(raw)
self.content_types = {}
self.default_content_types = {}
for item in root.xpath('//*[local-name()="Types"]/*[local-name()="Default" and @Extension and @ContentType]'):
self.default_content_types[item.get('Extension').lower()] = item.get('ContentType')
for item in root.xpath('//*[local-name()="Types"]/*[local-name()="Override" and @PartName and @ContentType]'):
name = item.get('PartName').lstrip('/')
self.content_types[name] = item.get('ContentType')
def content_type(self, name):
if name in self.content_types:
return self.content_types[name]
ext = name.rpartition('.')[-1].lower()
if ext in self.default_content_types:
return self.default_content_types[ext]
return guess_type(name)[0]
def read_package_relationships(self):
try:
raw = self.read('_rels/.rels')
except KeyError:
raise InvalidDOCX('The file %s docx file has no _rels/.rels' % self.name)
root = fromstring(raw)
self.relationships = {}
self.relationships_rmap = {}
for item in root.xpath('//*[local-name()="Relationships"]/*[local-name()="Relationship" and @Type and @Target]'):
target = item.get('Target').lstrip('/')
typ = item.get('Type')
if target == 'word/document.xml':
self.docx_is_transitional = typ != 'http://purl.oclc.org/ooxml/officeDocument/relationships/officeDocument'
self.relationships[typ] = target
self.relationships_rmap[target] = typ
@property
def document_name(self):
name = self.relationships.get(self.namespace.names['DOCUMENT'], None)
if name is None:
names = tuple(n for n in self.names if n == 'document.xml' or n.endswith('/document.xml'))
if not names:
raise InvalidDOCX('The file %s docx file has no main document' % self.name)
name = names[0]
return name
@property
def document(self):
return fromstring(self.read(self.document_name))
@property
def document_relationships(self):
return self.get_relationships(self.document_name)
def get_relationships(self, name):
base = '/'.join(name.split('/')[:-1])
by_id, by_type = {}, {}
parts = name.split('/')
name = '/'.join(parts[:-1] + ['_rels', parts[-1] + '.rels'])
try:
raw = self.read(name)
except KeyError:
pass
else:
root = fromstring(raw)
for item in root.xpath('//*[local-name()="Relationships"]/*[local-name()="Relationship" and @Type and @Target]'):
target = item.get('Target')
if item.get('TargetMode', None) != 'External' and not target.startswith('#'):
target = '/'.join((base, target.lstrip('/')))
typ = item.get('Type')
Id = item.get('Id')
by_id[Id] = by_type[typ] = target
return by_id, by_type
def get_document_properties_names(self):
name = self.relationships.get(self.namespace.names['DOCPROPS'], None)
if name is None:
names = tuple(n for n in self.names if n.lower() == 'docprops/core.xml')
if names:
name = names[0]
yield name
name = self.relationships.get(self.namespace.names['APPPROPS'], None)
if name is None:
names = tuple(n for n in self.names if n.lower() == 'docprops/app.xml')
if names:
name = names[0]
yield name
@property
def metadata(self):
mi = Metadata(_('Unknown'))
dp_name, ap_name = self.get_document_properties_names()
if dp_name:
try:
raw = self.read(dp_name)
except KeyError:
pass
else:
read_doc_props(raw, mi, self.namespace.XPath)
if mi.is_null('language'):
try:
raw = self.read('word/styles.xml')
except KeyError:
pass
else:
read_default_style_language(raw, mi, self.namespace.XPath)
ap_name = self.relationships.get(self.namespace.names['APPPROPS'], None)
if ap_name:
try:
raw = self.read(ap_name)
except KeyError:
pass
else:
read_app_props(raw, mi)
return mi
def close(self):
if hasattr(self, 'zipf'):
self.zipf.close()
else:
try:
shutil.rmtree(self.tdir)
except OSError:
pass
if __name__ == '__main__':
d = DOCX(sys.argv[-1], extract=False)
print(d.metadata)
| 9,362 | Python | .py | 230 | 31.291304 | 125 | 0.590454 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,566 | char_styles.py | kovidgoyal_calibre/src/calibre/ebooks/docx/char_styles.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import OrderedDict
from calibre.ebooks.docx.block_styles import LINE_STYLES, binary_property, inherit, read_shd, simple_color, simple_float # noqa
# Read from XML {{{
def read_text_border(parent, dest, XPath, get):
border_color = border_style = border_width = padding = inherit
elems = XPath('./w:bdr')(parent)
if elems and elems[0].attrib:
border_color = simple_color('auto')
border_style = 'none'
border_width = 1
for elem in elems:
color = get(elem, 'w:color')
if color is not None:
border_color = simple_color(color)
style = get(elem, 'w:val')
if style is not None:
border_style = LINE_STYLES.get(style, 'solid')
space = get(elem, 'w:space')
if space is not None:
try:
padding = float(space)
except (ValueError, TypeError):
pass
sz = get(elem, 'w:sz')
if sz is not None:
# we dont care about art borders (they are only used for page borders)
try:
# A border of less than 1pt is not rendered by WebKit
border_width = min(96, max(8, float(sz))) / 8
except (ValueError, TypeError):
pass
setattr(dest, 'border_color', border_color)
setattr(dest, 'border_style', border_style)
setattr(dest, 'border_width', border_width)
setattr(dest, 'padding', padding)
def read_color(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:color[@w:val]')(parent):
val = get(col, 'w:val')
if not val:
continue
ans = simple_color(val)
setattr(dest, 'color', ans)
def convert_highlight_color(val):
return {
'darkBlue': '#000080', 'darkCyan': '#008080', 'darkGray': '#808080',
'darkGreen': '#008000', 'darkMagenta': '#800080', 'darkRed': '#800000', 'darkYellow': '#808000',
'lightGray': '#c0c0c0'}.get(val, val)
def read_highlight(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:highlight[@w:val]')(parent):
val = get(col, 'w:val')
if not val:
continue
if not val or val == 'none':
val = 'transparent'
else:
val = convert_highlight_color(val)
ans = val
setattr(dest, 'highlight', ans)
def read_lang(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:lang[@w:val]')(parent):
val = get(col, 'w:val')
if not val:
continue
try:
code = int(val, 16)
except (ValueError, TypeError):
ans = val
else:
from calibre.ebooks.docx.lcid import lcid
val = lcid.get(code, None)
if val:
ans = val
setattr(dest, 'lang', ans)
def read_letter_spacing(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:spacing[@w:val]')(parent):
val = simple_float(get(col, 'w:val'), 0.05)
if val is not None:
ans = val
setattr(dest, 'letter_spacing', ans)
def read_underline(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:u[@w:val]')(parent):
val = get(col, 'w:val')
if val:
style = {
'dotted': 'dotted', 'dash': 'dashed', 'dashDotDotHeavy': 'dotted', 'dashDotHeavy': 'dashed', 'dashedHeavy': 'dashed',
'dashLong': 'dashed', 'dashLongHeavy': 'dashed', 'dotDash': 'dotted', 'dotDotDash': 'dotted', 'dottedHeavy': 'dotted',
'double': 'double', 'none': 'none', 'single': 'solid', 'thick': 'solid', 'wave': 'wavy', 'wavyDouble': 'wavy',
'wavyHeavy': 'wavy', 'words': 'solid'}.get(val, 'solid')
if style == 'none':
ans = 'none'
else:
ans = 'underline ' + style
color = get(col, 'w:color')
if color and color != 'auto':
ans += ' #' + color
setattr(dest, 'text_decoration', ans)
def read_vert_align(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:vertAlign[@w:val]')(parent):
val = get(col, 'w:val')
if val and val in {'baseline', 'subscript', 'superscript'}:
ans = val
setattr(dest, 'vert_align', ans)
def read_position(parent, dest, XPath, get):
ans = inherit
for col in XPath('./w:position[@w:val]')(parent):
val = get(col, 'w:val')
try:
ans = float(val)/2.0
except Exception:
pass
setattr(dest, 'position', ans)
def read_font(parent, dest, XPath, get):
ff = inherit
for col in XPath('./w:rFonts')(parent):
val = get(col, 'w:asciiTheme')
if val:
val = '|%s|' % val
else:
val = get(col, 'w:ascii')
if val:
ff = val
setattr(dest, 'font_family', ff)
for col in XPath('./w:sz[@w:val]')(parent):
val = simple_float(get(col, 'w:val'), 0.5)
if val is not None:
setattr(dest, 'font_size', val)
return
setattr(dest, 'font_size', inherit)
def read_font_cs(parent, dest, XPath, get):
ff = inherit
for col in XPath('./w:rFonts')(parent):
val = get(col, 'w:csTheme')
if val:
val = '|%s|' % val
else:
val = get(col, 'w:cs')
if val:
ff = val
setattr(dest, 'cs_font_family', ff)
for col in XPath('./w:szCS[@w:val]')(parent):
val = simple_float(get(col, 'w:val'), 0.5)
if val is not None:
setattr(dest, 'font_size', val)
return
setattr(dest, 'cs_font_size', inherit)
# }}}
class RunStyle:
all_properties = {
'b', 'bCs', 'caps', 'cs', 'dstrike', 'emboss', 'i', 'iCs', 'imprint',
'rtl', 'shadow', 'smallCaps', 'strike', 'vanish', 'webHidden',
'border_color', 'border_style', 'border_width', 'padding', 'color', 'highlight', 'background_color',
'letter_spacing', 'font_size', 'text_decoration', 'vert_align', 'lang', 'font_family', 'position',
'cs_font_size', 'cs_font_family'
}
toggle_properties = {
'b', 'bCs', 'caps', 'emboss', 'i', 'iCs', 'imprint', 'shadow', 'smallCaps', 'strike', 'vanish',
}
def __init__(self, namespace, rPr=None):
self.namespace = namespace
self.linked_style = None
if rPr is None:
for p in self.all_properties:
setattr(self, p, inherit)
else:
X, g = namespace.XPath, namespace.get
for p in (
'b', 'bCs', 'caps', 'cs', 'dstrike', 'emboss', 'i', 'iCs', 'imprint', 'rtl', 'shadow',
'smallCaps', 'strike', 'vanish', 'webHidden',
):
setattr(self, p, binary_property(rPr, p, X, g))
read_font(rPr, self, X, g)
read_font_cs(rPr, self, X, g)
read_text_border(rPr, self, X, g)
read_color(rPr, self, X, g)
read_highlight(rPr, self, X, g)
read_shd(rPr, self, X, g)
read_letter_spacing(rPr, self, X, g)
read_underline(rPr, self, X, g)
read_vert_align(rPr, self, X, g)
read_position(rPr, self, X, g)
read_lang(rPr, self, X, g)
for s in X('./w:rStyle[@w:val]')(rPr):
self.linked_style = g(s, 'w:val')
self._css = None
def update(self, other):
for prop in self.all_properties:
nval = getattr(other, prop)
if nval is not inherit:
setattr(self, prop, nval)
if other.linked_style is not None:
self.linked_style = other.linked_style
def resolve_based_on(self, parent):
for p in self.all_properties:
val = getattr(self, p)
if val is inherit:
setattr(self, p, getattr(parent, p))
def get_border_css(self, ans):
for x in ('color', 'style', 'width'):
val = getattr(self, 'border_'+x)
if x == 'width' and val is not inherit:
val = '%.3gpt' % val
if val is not inherit:
ans['border-%s' % x] = val
def clear_border_css(self):
for x in ('color', 'style', 'width'):
setattr(self, 'border_'+x, inherit)
@property
def css(self):
if self._css is None:
c = self._css = OrderedDict()
td = set()
if self.text_decoration is not inherit:
td.add(self.text_decoration)
if self.strike and self.strike is not inherit:
td.add('line-through')
if self.dstrike and self.dstrike is not inherit:
td.add('line-through')
if td:
c['text-decoration'] = ' '.join(td)
if self.caps is True:
c['text-transform'] = 'uppercase'
if self.i is True:
c['font-style'] = 'italic'
if self.shadow and self.shadow is not inherit:
c['text-shadow'] = '2px 2px'
if self.smallCaps is True:
c['font-variant'] = 'small-caps'
if self.vanish is True or self.webHidden is True:
c['display'] = 'none'
self.get_border_css(c)
if self.padding is not inherit:
c['padding'] = '%.3gpt' % self.padding
for x in ('color', 'background_color'):
val = getattr(self, x)
if val is not inherit:
c[x.replace('_', '-')] = val
for x in ('letter_spacing', 'font_size'):
val = getattr(self, x)
if val is not inherit:
c[x.replace('_', '-')] = '%.3gpt' % val
if self.position is not inherit:
c['vertical-align'] = '%.3gpt' % self.position
if self.highlight is not inherit and self.highlight != 'transparent':
c['background-color'] = self.highlight
if self.b:
c['font-weight'] = 'bold'
if self.font_family is not inherit:
c['font-family'] = self.font_family
return self._css
def same_border(self, other):
return self.get_border_css({}) == other.get_border_css({})
| 10,464 | Python | .py | 260 | 29.830769 | 134 | 0.529551 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,567 | toc.py | kovidgoyal_calibre/src/calibre/ebooks/docx/toc.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import namedtuple
from itertools import count
from lxml.etree import tostring
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.oeb.polish.toc import elem_to_toc_text
from polyglot.builtins import iteritems
def from_headings(body, log, namespace, num_levels=3):
' Create a TOC from headings in the document '
tocroot = TOC()
all_heading_nodes = body.xpath('//*[@data-heading-level]')
level_prev = {i+1:None for i in range(num_levels)}
level_prev[0] = tocroot
level_item_map = {i:frozenset(
x for x in all_heading_nodes if int(x.get('data-heading-level')) == i)
for i in range(1, num_levels+1)}
item_level_map = {e:i for i, elems in iteritems(level_item_map) for e in elems}
idcount = count()
def ensure_id(elem):
ans = elem.get('id', None)
if not ans:
ans = 'toc_id_%d' % (next(idcount) + 1)
elem.set('id', ans)
return ans
for item in all_heading_nodes:
lvl = plvl = item_level_map.get(item, None)
if lvl is None:
continue
parent = None
while parent is None:
plvl -= 1
parent = level_prev[plvl]
lvl = plvl + 1
elem_id = ensure_id(item)
text = elem_to_toc_text(item)
toc = parent.add_item('index.html', elem_id, text)
level_prev[lvl] = toc
for i in range(lvl+1, num_levels+1):
level_prev[i] = None
if len(tuple(tocroot.flat())) > 1:
log('Generating Table of Contents from headings')
return tocroot
def structure_toc(entries):
indent_vals = sorted({x.indent for x in entries})
last_found = [None for i in indent_vals]
newtoc = TOC()
if len(indent_vals) > 6:
for x in entries:
newtoc.add_item('index.html', x.anchor, x.text)
return newtoc
def find_parent(level):
candidates = last_found[:level]
for x in reversed(candidates):
if x is not None:
return x
return newtoc
for item in entries:
level = indent_vals.index(item.indent)
parent = find_parent(level)
last_found[level] = parent.add_item('index.html', item.anchor,
item.text)
for i in range(level+1, len(last_found)):
last_found[i] = None
return newtoc
def link_to_txt(a, styles, object_map):
if len(a) > 1:
for child in a:
run = object_map.get(child, None)
if run is not None:
rs = styles.resolve(run)
if rs.css.get('display', None) == 'none':
a.remove(child)
return tostring(a, method='text', with_tail=False, encoding='unicode').strip()
def from_toc(docx, link_map, styles, object_map, log, namespace):
XPath, get, ancestor = namespace.XPath, namespace.get, namespace.ancestor
toc_level = None
level = 0
TI = namedtuple('TI', 'text anchor indent')
toc = []
for tag in XPath('//*[(@w:fldCharType and name()="w:fldChar") or name()="w:hyperlink" or name()="w:instrText"]')(docx):
n = tag.tag.rpartition('}')[-1]
if n == 'fldChar':
t = get(tag, 'w:fldCharType')
if t == 'begin':
level += 1
elif t == 'end':
level -= 1
if toc_level is not None and level < toc_level:
break
elif n == 'instrText':
if level > 0 and tag.text and tag.text.strip().startswith('TOC '):
toc_level = level
elif n == 'hyperlink':
if toc_level is not None and level >= toc_level and tag in link_map:
a = link_map[tag]
href = a.get('href', None)
txt = link_to_txt(a, styles, object_map)
p = ancestor(tag, 'w:p')
if txt and href and p is not None:
ps = styles.resolve_paragraph(p)
try:
ml = int(ps.margin_left[:-2])
except (TypeError, ValueError, AttributeError):
ml = 0
if ps.text_align in {'center', 'right'}:
ml = 0
toc.append(TI(txt, href[1:], ml))
if toc:
log('Found Word Table of Contents, using it to generate the Table of Contents')
return structure_toc(toc)
def create_toc(docx, body, link_map, styles, object_map, log, namespace):
ans = from_toc(docx, link_map, styles, object_map, log, namespace) or from_headings(body, log, namespace)
# Remove heading level attributes
for h in body.xpath('//*[@data-heading-level]'):
del h.attrib['data-heading-level']
return ans
| 4,869 | Python | .py | 118 | 31.483051 | 123 | 0.571187 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,568 | to_html.py | kovidgoyal_calibre/src/calibre/ebooks/docx/to_html.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import errno
import math
import numbers
import os
import re
import sys
import uuid
from collections import OrderedDict, defaultdict
from lxml import html
from lxml.html.builder import BODY, BR, DD, DIV, DL, DT, H1, HEAD, HTML, LINK, META, SPAN, TITLE, A, P
from calibre import guess_type
from calibre.ebooks.docx.cleanup import cleanup_markup
from calibre.ebooks.docx.container import DOCX, fromstring
from calibre.ebooks.docx.fields import Fields
from calibre.ebooks.docx.fonts import Fonts, is_symbol_font, map_symbol_text
from calibre.ebooks.docx.footnotes import Footnotes
from calibre.ebooks.docx.images import Images
from calibre.ebooks.docx.names import XML, generate_anchor
from calibre.ebooks.docx.numbering import Numbering
from calibre.ebooks.docx.settings import Settings
from calibre.ebooks.docx.styles import PageProperties, Styles, inherit
from calibre.ebooks.docx.tables import Tables
from calibre.ebooks.docx.theme import Theme
from calibre.ebooks.docx.toc import create_toc
from calibre.ebooks.metadata.opf2 import OPFCreator
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from polyglot.builtins import iteritems, itervalues
NBSP = '\xa0'
class Text:
def __init__(self, elem, attr, buf):
self.elem, self.attr, self.buf = elem, attr, buf
self.elems = [self.elem]
def add_elem(self, elem):
self.elems.append(elem)
setattr(self.elem, self.attr, ''.join(self.buf))
self.elem, self.attr, self.buf = elem, 'tail', []
def __iter__(self):
return iter(self.elems)
def html_lang(docx_lang):
lang = canonicalize_lang(docx_lang)
if lang and lang != 'und':
lang = lang_as_iso639_1(lang)
if lang:
return lang
class Convert:
def __init__(self, path_or_stream, dest_dir=None, log=None, detect_cover=True, notes_text=None, notes_nopb=False, nosupsub=False):
self.docx = DOCX(path_or_stream, log=log)
self.namespace = self.docx.namespace
self.ms_pat = re.compile(r'\s{2,}')
self.ws_pat = re.compile(r'[\n\r\t]')
self.log = self.docx.log
self.detect_cover = detect_cover
self.notes_text = notes_text or _('Notes')
self.notes_nopb = notes_nopb
self.nosupsub = nosupsub
self.dest_dir = dest_dir or os.getcwd()
self.mi = self.docx.metadata
self.body = BODY()
self.theme = Theme(self.namespace)
self.settings = Settings(self.namespace)
self.tables = Tables(self.namespace)
self.fields = Fields(self.namespace)
self.styles = Styles(self.namespace, self.tables)
self.images = Images(self.namespace, self.log)
self.object_map = OrderedDict()
self.html = HTML(
HEAD(
META(charset='utf-8'),
TITLE(self.mi.title or _('Unknown')),
LINK(rel='stylesheet', type='text/css', href='docx.css'),
),
self.body
)
self.html.text='\n\t'
self.html[0].text='\n\t\t'
self.html[0].tail='\n'
for child in self.html[0]:
child.tail = '\n\t\t'
self.html[0][-1].tail = '\n\t'
self.html[1].text = self.html[1].tail = '\n'
lang = html_lang(self.mi.language)
if lang:
self.html.set('lang', lang)
self.doc_lang = lang
else:
self.doc_lang = None
def __call__(self):
doc = self.docx.document
relationships_by_id, relationships_by_type = self.docx.document_relationships
self.resolve_alternate_content(doc)
self.fields(doc, self.log)
self.read_styles(relationships_by_type)
self.images(relationships_by_id)
self.layers = OrderedDict()
self.framed = [[]]
self.frame_map = {}
self.framed_map = {}
self.anchor_map = {}
self.link_map = defaultdict(list)
self.link_source_map = {}
self.toc_anchor = None
self.block_runs = []
paras = []
self.log.debug('Converting Word markup to HTML')
self.read_page_properties(doc)
self.current_rels = relationships_by_id
for wp, page_properties in iteritems(self.page_map):
self.current_page = page_properties
if wp.tag.endswith('}p'):
p = self.convert_p(wp)
self.body.append(p)
paras.append(wp)
self.read_block_anchors(doc)
self.styles.apply_contextual_spacing(paras)
self.mark_block_runs(paras)
# Apply page breaks at the start of every section, except the first
# section (since that will be the start of the file)
self.styles.apply_section_page_breaks(self.section_starts[1:])
notes_header = None
orig_rid_map = self.images.rid_map
if self.footnotes.has_notes:
self.body.append(H1(self.notes_text))
notes_header = self.body[-1]
notes_header.set('class', 'notes-header')
for anchor, text, note in self.footnotes:
dl = DL(id=anchor)
dl.set('class', 'footnote')
self.body.append(dl)
dl.append(DT('[', A('�' + text, href='#back_%s' % anchor, title=text)))
dl[-1][0].tail = ']'
dl.append(DD())
paras = []
self.images.rid_map = self.current_rels = note.rels[0]
for wp in note:
if wp.tag.endswith('}tbl'):
self.tables.register(wp, self.styles)
self.page_map[wp] = self.current_page
else:
p = self.convert_p(wp)
dl[-1].append(p)
paras.append(wp)
self.styles.apply_contextual_spacing(paras)
self.mark_block_runs(paras)
for p, wp in iteritems(self.object_map):
if len(p) > 0 and not p.text and len(p[0]) > 0 and not p[0].text and p[0][0].get('class', None) == 'tab':
# Paragraph uses tabs for indentation, convert to text-indent
parent = p[0]
tabs = []
for child in parent:
if child.get('class', None) == 'tab':
tabs.append(child)
if child.tail:
break
else:
break
indent = len(tabs) * self.settings.default_tab_stop
style = self.styles.resolve(wp)
if style.text_indent is inherit or (hasattr(style.text_indent, 'endswith') and style.text_indent.endswith('pt')):
if style.text_indent is not inherit:
indent = float(style.text_indent[:-2]) + indent
style.text_indent = '%.3gpt' % indent
parent.text = tabs[-1].tail or ''
for i in tabs:
parent.remove(i)
self.images.rid_map = orig_rid_map
self.resolve_links()
self.styles.cascade(self.layers)
self.tables.apply_markup(self.object_map, self.page_map)
numbered = []
for html_obj, obj in iteritems(self.object_map):
raw = obj.get('calibre_num_id', None)
if raw is not None:
lvl, num_id = raw.partition(':')[0::2]
try:
lvl = int(lvl)
except (TypeError, ValueError):
lvl = 0
numbered.append((html_obj, num_id, lvl))
self.numbering.apply_markup(numbered, self.body, self.styles, self.object_map, self.images)
self.apply_frames()
if len(self.body) > 0:
self.body.text = '\n\t'
for child in self.body:
child.tail = '\n\t'
self.body[-1].tail = '\n'
self.log.debug('Converting styles to CSS')
self.styles.generate_classes()
for html_obj, obj in iteritems(self.object_map):
style = self.styles.resolve(obj)
if style is not None:
css = style.css
if css:
cls = self.styles.class_name(css)
if cls:
html_obj.set('class', cls)
for html_obj, css in iteritems(self.framed_map):
cls = self.styles.class_name(css)
if cls:
html_obj.set('class', cls)
if notes_header is not None:
for h in self.namespace.children(self.body, 'h1', 'h2', 'h3'):
notes_header.tag = h.tag
cls = h.get('class', None)
if cls and cls != 'notes-header':
notes_header.set('class', '%s notes-header' % cls)
break
self.fields.polish_markup(self.object_map)
self.log.debug('Cleaning up redundant markup generated by Word')
self.cover_image = cleanup_markup(self.log, self.html, self.styles, self.dest_dir, self.detect_cover, self.namespace.XPath)
return self.write(doc)
def read_page_properties(self, doc):
current = []
self.page_map = OrderedDict()
self.section_starts = []
for p in self.namespace.descendants(doc, 'w:p', 'w:tbl'):
if p.tag.endswith('}tbl'):
self.tables.register(p, self.styles)
current.append(p)
continue
sect = tuple(self.namespace.descendants(p, 'w:sectPr'))
if sect:
pr = PageProperties(self.namespace, sect)
paras = current + [p]
for x in paras:
self.page_map[x] = pr
self.section_starts.append(paras[0])
current = []
else:
current.append(p)
if current:
self.section_starts.append(current[0])
last = self.namespace.XPath('./w:body/w:sectPr')(doc)
pr = PageProperties(self.namespace, last)
for x in current:
self.page_map[x] = pr
def resolve_alternate_content(self, doc):
# For proprietary extensions in Word documents use the fallback, spec
# compliant form
# See https://wiki.openoffice.org/wiki/OOXML/Markup_Compatibility_and_Extensibility
for ac in self.namespace.descendants(doc, 'mc:AlternateContent'):
choices = self.namespace.XPath('./mc:Choice')(ac)
fallbacks = self.namespace.XPath('./mc:Fallback')(ac)
if fallbacks:
for choice in choices:
ac.remove(choice)
if len(fallbacks) == 1 and ac.getparent().tag.endswith('}r') and len(fallbacks[0]) == 1:
q = fallbacks[0][0]
if q.tag and (q.tag.endswith('}drawing') or q.tag.endswith('}pict')):
p = ac.getparent()
idx = p.index(ac)
p.insert(idx, q)
p.remove(ac)
def read_styles(self, relationships_by_type):
def get_name(rtype, defname):
name = relationships_by_type.get(rtype, None)
if name is None:
cname = self.docx.document_name.split('/')
cname[-1] = defname
if self.docx.exists('/'.join(cname)):
name = name
if name and name.startswith('word/word') and not self.docx.exists(name):
name = name.partition('/')[2]
return name
nname = get_name(self.namespace.names['NUMBERING'], 'numbering.xml')
sname = get_name(self.namespace.names['STYLES'], 'styles.xml')
sename = get_name(self.namespace.names['SETTINGS'], 'settings.xml')
fname = get_name(self.namespace.names['FONTS'], 'fontTable.xml')
tname = get_name(self.namespace.names['THEMES'], 'theme1.xml')
foname = get_name(self.namespace.names['FOOTNOTES'], 'footnotes.xml')
enname = get_name(self.namespace.names['ENDNOTES'], 'endnotes.xml')
numbering = self.numbering = Numbering(self.namespace)
footnotes = self.footnotes = Footnotes(self.namespace)
fonts = self.fonts = Fonts(self.namespace)
foraw = enraw = None
forel, enrel = ({}, {}), ({}, {})
if sename is not None:
try:
seraw = self.docx.read(sename)
except KeyError:
self.log.warn('Settings %s do not exist' % sename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
self.log.warn('Settings %s file missing' % sename)
else:
self.settings(fromstring(seraw))
if foname is not None:
try:
foraw = self.docx.read(foname)
except KeyError:
self.log.warn('Footnotes %s do not exist' % foname)
else:
forel = self.docx.get_relationships(foname)
if enname is not None:
try:
enraw = self.docx.read(enname)
except KeyError:
self.log.warn('Endnotes %s do not exist' % enname)
else:
enrel = self.docx.get_relationships(enname)
footnotes(fromstring(foraw) if foraw else None, forel, fromstring(enraw) if enraw else None, enrel)
if fname is not None:
embed_relationships = self.docx.get_relationships(fname)[0]
try:
raw = self.docx.read(fname)
except KeyError:
self.log.warn('Fonts table %s does not exist' % fname)
else:
fonts(fromstring(raw), embed_relationships, self.docx, self.dest_dir)
if tname is not None:
try:
raw = self.docx.read(tname)
except KeyError:
self.log.warn('Styles %s do not exist' % sname)
else:
self.theme(fromstring(raw))
styles_loaded = False
if sname is not None:
try:
raw = self.docx.read(sname)
except KeyError:
self.log.warn('Styles %s do not exist' % sname)
else:
self.styles(fromstring(raw), fonts, self.theme)
styles_loaded = True
if not styles_loaded:
self.styles(None, fonts, self.theme)
if nname is not None:
try:
raw = self.docx.read(nname)
except KeyError:
self.log.warn('Numbering styles %s do not exist' % nname)
else:
numbering(fromstring(raw), self.styles, self.docx.get_relationships(nname)[0])
self.styles.resolve_numbering(numbering)
def write(self, doc):
toc = create_toc(doc, self.body, self.resolved_link_map, self.styles, self.object_map, self.log, self.namespace)
raw = html.tostring(self.html, encoding='utf-8', doctype='<!DOCTYPE html>')
with open(os.path.join(self.dest_dir, 'index.html'), 'wb') as f:
f.write(raw)
css = self.styles.generate_css(self.dest_dir, self.docx, self.notes_nopb, self.nosupsub)
if css:
with open(os.path.join(self.dest_dir, 'docx.css'), 'wb') as f:
f.write(css.encode('utf-8'))
opf = OPFCreator(self.dest_dir, self.mi)
opf.toc = toc
opf.create_manifest_from_files_in([self.dest_dir])
for item in opf.manifest:
if item.media_type == 'text/html':
item.media_type = guess_type('a.xhtml')[0]
opf.create_spine(['index.html'])
if self.cover_image is not None:
opf.guide.set_cover(self.cover_image)
def process_guide(E, guide):
if self.toc_anchor is not None:
guide.append(E.reference(
href='index.html#' + self.toc_anchor, title=_('Table of Contents'), type='toc'))
toc_file = os.path.join(self.dest_dir, 'toc.ncx')
with open(os.path.join(self.dest_dir, 'metadata.opf'), 'wb') as of, open(toc_file, 'wb') as ncx:
opf.render(of, ncx, 'toc.ncx', process_guide=process_guide)
if os.path.getsize(toc_file) == 0:
os.remove(toc_file)
return os.path.join(self.dest_dir, 'metadata.opf')
def read_block_anchors(self, doc):
doc_anchors = frozenset(self.namespace.XPath('./w:body/w:bookmarkStart[@w:name]')(doc))
if doc_anchors:
current_bm = set()
rmap = {v:k for k, v in iteritems(self.object_map)}
for p in self.namespace.descendants(doc, 'w:p', 'w:bookmarkStart[@w:name]'):
if p.tag.endswith('}p'):
if current_bm and p in rmap:
para = rmap[p]
if 'id' not in para.attrib:
para.set('id', generate_anchor(next(iter(current_bm)), frozenset(itervalues(self.anchor_map))))
for name in current_bm:
self.anchor_map[name] = para.get('id')
current_bm = set()
elif p in doc_anchors:
anchor = self.namespace.get(p, 'w:name')
if anchor:
current_bm.add(anchor)
def convert_p(self, p):
dest = P()
self.object_map[dest] = p
style = self.styles.resolve_paragraph(p)
self.layers[p] = []
self.frame_map[p] = style.frame
self.add_frame(dest, style.frame)
current_anchor = None
current_hyperlink = None
hl_xpath = self.namespace.XPath('ancestor::w:hyperlink[1]')
def p_parent(x):
# Ensure that nested <w:p> tags are handled. These can occur if a
# textbox is present inside a paragraph.
while True:
x = x.getparent()
try:
if x.tag.endswith('}p'):
return x
except AttributeError:
break
for x in self.namespace.descendants(p, 'w:r', 'w:bookmarkStart', 'w:hyperlink', 'w:instrText'):
if p_parent(x) is not p:
continue
if x.tag.endswith('}r'):
span = self.convert_run(x)
if current_anchor is not None:
(dest if len(dest) == 0 else span).set('id', current_anchor)
current_anchor = None
if current_hyperlink is not None:
try:
hl = hl_xpath(x)[0]
self.link_map[hl].append(span)
self.link_source_map[hl] = self.current_rels
x.set('is-link', '1')
except IndexError:
current_hyperlink = None
dest.append(span)
self.layers[p].append(x)
elif x.tag.endswith('}bookmarkStart'):
anchor = self.namespace.get(x, 'w:name')
if anchor and anchor not in self.anchor_map and anchor != '_GoBack':
# _GoBack is a special bookmark inserted by Word 2010 for
# the return to previous edit feature, we ignore it
old_anchor = current_anchor
self.anchor_map[anchor] = current_anchor = generate_anchor(anchor, frozenset(itervalues(self.anchor_map)))
if old_anchor is not None:
# The previous anchor was not applied to any element
for a, t in tuple(self.anchor_map.items()):
if t == old_anchor:
self.anchor_map[a] = current_anchor
elif x.tag.endswith('}hyperlink'):
current_hyperlink = x
elif x.tag.endswith('}instrText') and x.text and x.text.strip().startswith('TOC '):
old_anchor = current_anchor
anchor = str(uuid.uuid4())
self.anchor_map[anchor] = current_anchor = generate_anchor('toc', frozenset(itervalues(self.anchor_map)))
self.toc_anchor = current_anchor
if old_anchor is not None:
# The previous anchor was not applied to any element
for a, t in tuple(iteritems(self.anchor_map)):
if t == old_anchor:
self.anchor_map[a] = current_anchor
if current_anchor is not None:
if dest.get('id'):
# this bookmark was at the end of the paragraph
if len(dest):
if dest[-1].get('id'):
self.anchor_map[current_anchor] = dest[-1].get('id')
else:
dest[-1].set('id', current_anchor)
else:
self.anchor_map[current_anchor] = dest.get('id')
else:
# This paragraph had no <w:r> descendants
dest.set('id', current_anchor)
current_anchor = None
m = re.match(r'heading\s+(\d+)$', style.style_name or '', re.IGNORECASE)
if m is not None:
n = min(6, max(1, int(m.group(1))))
dest.tag = 'h%d' % n
dest.set('data-heading-level', str(n))
if style.bidi is True:
dest.set('dir', 'rtl')
border_runs = []
common_borders = []
for span in dest:
run = self.object_map[span]
style = self.styles.resolve_run(run)
if not border_runs or border_runs[-1][1].same_border(style):
border_runs.append((span, style))
elif border_runs:
if len(border_runs) > 1:
common_borders.append(border_runs)
border_runs = []
for border_run in common_borders:
spans = []
bs = {}
for span, style in border_run:
style.get_border_css(bs)
style.clear_border_css()
spans.append(span)
if bs:
cls = self.styles.register(bs, 'text_border')
wrapper = self.wrap_elems(spans, SPAN())
wrapper.set('class', cls)
if not dest.text and len(dest) == 0 and not style.has_visible_border():
# Empty paragraph add a non-breaking space so that it is rendered
# by WebKit
dest.text = NBSP
# If the last element in a block is a <br> the <br> is not rendered in
# HTML, unless it is followed by a trailing space. Word, on the other
# hand inserts a blank line for trailing <br>s.
if len(dest) > 0 and not dest[-1].tail:
if dest[-1].tag == 'br':
dest[-1].tail = NBSP
elif len(dest[-1]) > 0 and dest[-1][-1].tag == 'br' and not dest[-1][-1].tail:
dest[-1][-1].tail = NBSP
return dest
def wrap_elems(self, elems, wrapper):
p = elems[0].getparent()
idx = p.index(elems[0])
p.insert(idx, wrapper)
wrapper.tail = elems[-1].tail
elems[-1].tail = None
for elem in elems:
try:
p.remove(elem)
except ValueError:
# Probably a hyperlink that spans multiple
# paragraphs,theoretically we should break this up into
# multiple hyperlinks, but I can't be bothered.
elem.getparent().remove(elem)
wrapper.append(elem)
return wrapper
def resolve_links(self):
self.resolved_link_map = {}
for hyperlink, spans in iteritems(self.link_map):
relationships_by_id = self.link_source_map[hyperlink]
span = spans[0]
if len(spans) > 1:
span = self.wrap_elems(spans, SPAN())
span.tag = 'a'
self.resolved_link_map[hyperlink] = span
tgt = self.namespace.get(hyperlink, 'w:tgtFrame')
if tgt:
span.set('target', tgt)
tt = self.namespace.get(hyperlink, 'w:tooltip')
if tt:
span.set('title', tt)
rid = self.namespace.get(hyperlink, 'r:id')
if rid and rid in relationships_by_id:
span.set('href', relationships_by_id[rid])
continue
anchor = self.namespace.get(hyperlink, 'w:anchor')
if anchor and anchor in self.anchor_map:
span.set('href', '#' + self.anchor_map[anchor])
continue
self.log.warn('Hyperlink with unknown target (rid=%s, anchor=%s), ignoring' %
(rid, anchor))
# hrefs that point nowhere give epubcheck a hernia. The element
# should be styled explicitly by Word anyway.
# span.set('href', '#')
rmap = {v:k for k, v in iteritems(self.object_map)}
for hyperlink, runs in self.fields.hyperlink_fields:
spans = [rmap[r] for r in runs if r in rmap]
if not spans:
continue
span = spans[0]
if len(spans) > 1:
span = self.wrap_elems(spans, SPAN())
span.tag = 'a'
tgt = hyperlink.get('target', None)
if tgt:
span.set('target', tgt)
tt = hyperlink.get('title', None)
if tt:
span.set('title', tt)
url = hyperlink.get('url', None)
if url is None:
anchor = hyperlink.get('anchor', None)
if anchor in self.anchor_map:
span.set('href', '#' + self.anchor_map[anchor])
continue
self.log.warn('Hyperlink field with unknown anchor: %s' % anchor)
else:
if url in self.anchor_map:
span.set('href', '#' + self.anchor_map[url])
continue
span.set('href', url)
for img, link, relationships_by_id in self.images.links:
parent = img.getparent()
idx = parent.index(img)
a = A(img)
a.tail, img.tail = img.tail, None
parent.insert(idx, a)
tgt = link.get('target', None)
if tgt:
a.set('target', tgt)
tt = link.get('title', None)
if tt:
a.set('title', tt)
rid = link['id']
if rid in relationships_by_id:
dest = relationships_by_id[rid]
if dest.startswith('#'):
if dest[1:] in self.anchor_map:
a.set('href', '#' + self.anchor_map[dest[1:]])
else:
a.set('href', dest)
def convert_run(self, run):
ans = SPAN()
self.object_map[ans] = run
text = Text(ans, 'text', [])
for child in run:
if self.namespace.is_tag(child, 'w:t'):
if not child.text:
continue
space = child.get(XML('space'), None)
preserve = False
ctext = child.text
if space != 'preserve':
# Remove leading and trailing whitespace. Word ignores
# leading and trailing whitespace without preserve unless
# the element is only whitespace.
stripped = ctext.strip(' \n\r\t')
if stripped:
ctext = stripped
# Only use a <span> with white-space:pre-wrap if this element
# actually needs it, i.e. if it has more than one
# consecutive space or it has newlines or tabs.
multi_spaces = self.ms_pat.search(ctext) is not None
preserve = multi_spaces or self.ws_pat.search(ctext) is not None
if preserve:
text.add_elem(SPAN(ctext, style="white-space:pre-wrap"))
ans.append(text.elem)
else:
text.buf.append(ctext)
elif self.namespace.is_tag(child, 'w:cr'):
text.add_elem(BR())
ans.append(text.elem)
elif self.namespace.is_tag(child, 'w:br'):
typ = self.namespace.get(child, 'w:type')
if typ in {'column', 'page'}:
br = BR(style='page-break-after:always')
else:
clear = child.get('clear', None)
if clear in {'all', 'left', 'right'}:
br = BR(style='clear:%s'%('both' if clear == 'all' else clear))
else:
br = BR()
text.add_elem(br)
ans.append(text.elem)
elif self.namespace.is_tag(child, 'w:drawing') or self.namespace.is_tag(child, 'w:pict'):
for img in self.images.to_html(child, self.current_page, self.docx, self.dest_dir):
text.add_elem(img)
ans.append(text.elem)
elif self.namespace.is_tag(child, 'w:footnoteReference') or self.namespace.is_tag(child, 'w:endnoteReference'):
anchor, name = self.footnotes.get_ref(child)
if anchor and name:
l = A(name, id='back_%s' % anchor, href='#' + anchor, title=name)
l.set('class', 'noteref')
l.set('role', 'doc-noteref')
text.add_elem(l)
ans.append(text.elem)
elif self.namespace.is_tag(child, 'w:tab'):
spaces = int(math.ceil((self.settings.default_tab_stop / 36) * 6))
text.add_elem(SPAN(NBSP * spaces))
ans.append(text.elem)
ans[-1].set('class', 'tab')
elif self.namespace.is_tag(child, 'w:noBreakHyphen'):
text.buf.append('\u2011')
elif self.namespace.is_tag(child, 'w:softHyphen'):
text.buf.append('\u00ad')
if text.buf:
setattr(text.elem, text.attr, ''.join(text.buf))
style = self.styles.resolve_run(run)
if style.vert_align in {'superscript', 'subscript'}:
if ans.text or len(ans):
ans.set('data-docx-vert', 'sup' if style.vert_align == 'superscript' else 'sub')
if style.lang is not inherit:
lang = html_lang(style.lang)
if lang is not None and lang != self.doc_lang:
ans.set('lang', lang)
if style.rtl is True:
ans.set('dir', 'rtl')
if is_symbol_font(style.font_family):
for elem in text:
if elem.text:
elem.text = map_symbol_text(elem.text, style.font_family)
if elem.tail:
elem.tail = map_symbol_text(elem.tail, style.font_family)
style.font_family = 'sans-serif'
return ans
def add_frame(self, html_obj, style):
last_run = self.framed[-1]
if style is inherit:
if last_run:
self.framed.append([])
return
if last_run:
if last_run[-1][1] == style:
last_run.append((html_obj, style))
else:
self.framed[-1].append((html_obj, style))
else:
last_run.append((html_obj, style))
def apply_frames(self):
for run in filter(None, self.framed):
style = run[0][1]
paras = tuple(x[0] for x in run)
parent = paras[0].getparent()
idx = parent.index(paras[0])
frame = DIV(*paras)
parent.insert(idx, frame)
self.framed_map[frame] = css = style.css(self.page_map[self.object_map[paras[0]]])
self.styles.register(css, 'frame')
if not self.block_runs:
return
rmap = {v:k for k, v in iteritems(self.object_map)}
for border_style, blocks in self.block_runs:
paras = tuple(rmap[p] for p in blocks)
for p in paras:
if p.tag == 'li':
has_li = True
break
else:
has_li = False
parent = paras[0].getparent()
if parent.tag in ('ul', 'ol'):
ul = parent
parent = ul.getparent()
idx = parent.index(ul)
frame = DIV(ul)
elif has_li:
def top_level_tag(x):
while True:
q = x.getparent()
if q is parent or q is None:
break
x = q
return x
paras = tuple(map(top_level_tag, paras))
idx = parent.index(paras[0])
frame = DIV(*paras)
else:
idx = parent.index(paras[0])
frame = DIV(*paras)
parent.insert(idx, frame)
self.framed_map[frame] = css = border_style.css
self.styles.register(css, 'frame')
def mark_block_runs(self, paras):
def process_run(run):
max_left = max_right = 0
has_visible_border = None
for p in run:
style = self.styles.resolve_paragraph(p)
if has_visible_border is None:
has_visible_border = style.has_visible_border()
if isinstance(style.margin_left, numbers.Number):
max_left = max(style.margin_left, max_left)
if isinstance(style.margin_right, numbers.Number):
max_right = max(style.margin_right, max_right)
if has_visible_border:
style.margin_left = style.margin_right = inherit
if p is not run[0]:
style.padding_top = 0
else:
border_style = style.clone_border_styles()
if has_visible_border:
border_style.margin_top, style.margin_top = style.margin_top, inherit
if p is not run[-1]:
style.padding_bottom = 0
else:
if has_visible_border:
border_style.margin_bottom, style.margin_bottom = style.margin_bottom, inherit
style.clear_borders()
if p is not run[-1]:
style.apply_between_border()
if has_visible_border:
border_style.margin_left, border_style.margin_right = max_left,max_right
self.block_runs.append((border_style, run))
run = []
for p in paras:
if run and self.frame_map.get(p) == self.frame_map.get(run[-1]):
style = self.styles.resolve_paragraph(p)
last_style = self.styles.resolve_paragraph(run[-1])
if style.has_identical_borders(last_style):
run.append(p)
continue
if len(run) > 1:
process_run(run)
run = [p]
if len(run) > 1:
process_run(run)
if __name__ == '__main__':
import shutil
from calibre.utils.logging import default_log
default_log.filter_level = default_log.DEBUG
dest_dir = os.path.join(os.getcwd(), 'docx_input')
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
os.mkdir(dest_dir)
Convert(sys.argv[-1], dest_dir=dest_dir, log=default_log)()
| 35,825 | Python | .py | 787 | 31.560356 | 134 | 0.528061 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,569 | numbering.py | kovidgoyal_calibre/src/calibre/ebooks/docx/numbering.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import re
import string
from collections import Counter, defaultdict
from functools import partial
from lxml.html.builder import OL, SPAN, UL
from calibre.ebooks.docx.block_styles import ParagraphStyle
from calibre.ebooks.docx.char_styles import RunStyle, inherit
from calibre.ebooks.metadata import roman
from polyglot.builtins import iteritems
STYLE_MAP = {
'aiueo': 'hiragana',
'aiueoFullWidth': 'hiragana',
'hebrew1': 'hebrew',
'iroha': 'katakana-iroha',
'irohaFullWidth': 'katakana-iroha',
'lowerLetter': 'lower-alpha',
'lowerRoman': 'lower-roman',
'none': 'none',
'upperLetter': 'upper-alpha',
'upperRoman': 'upper-roman',
'chineseCounting': 'cjk-ideographic',
'decimalZero': 'decimal-leading-zero',
}
def alphabet(val, lower=True):
x = string.ascii_lowercase if lower else string.ascii_uppercase
return x[(abs(val - 1)) % len(x)]
alphabet_map = {
'lower-alpha':alphabet, 'upper-alpha':partial(alphabet, lower=False),
'lower-roman':lambda x:roman(x).lower(), 'upper-roman':roman,
'decimal-leading-zero': lambda x: '0%d' % x
}
class Level:
def __init__(self, namespace, lvl=None):
self.namespace = namespace
self.restart = None
self.start = 0
self.fmt = 'decimal'
self.para_link = None
self.paragraph_style = self.character_style = None
self.is_numbered = False
self.num_template = None
self.bullet_template = None
self.pic_id = None
if lvl is not None:
self.read_from_xml(lvl)
def copy(self):
ans = Level(self.namespace)
for x in ('restart', 'pic_id', 'start', 'fmt', 'para_link', 'paragraph_style', 'character_style', 'is_numbered', 'num_template', 'bullet_template'):
setattr(ans, x, getattr(self, x))
return ans
def format_template(self, counter, ilvl, template):
def sub(m):
x = int(m.group(1)) - 1
if x > ilvl or x not in counter:
return ''
val = counter[x] - (0 if x == ilvl else 1)
formatter = alphabet_map.get(self.fmt, lambda x: '%d' % x)
return formatter(val)
return re.sub(r'%(\d+)', sub, template).rstrip() + '\xa0'
def read_from_xml(self, lvl, override=False):
XPath, get = self.namespace.XPath, self.namespace.get
for lr in XPath('./w:lvlRestart[@w:val]')(lvl):
try:
self.restart = int(get(lr, 'w:val'))
except (TypeError, ValueError):
pass
for lr in XPath('./w:start[@w:val]')(lvl):
try:
self.start = int(get(lr, 'w:val'))
except (TypeError, ValueError):
pass
for rPr in XPath('./w:rPr')(lvl):
ps = RunStyle(self.namespace, rPr)
if self.character_style is None:
self.character_style = ps
else:
self.character_style.update(ps)
lt = None
for lr in XPath('./w:lvlText[@w:val]')(lvl):
lt = get(lr, 'w:val')
for lr in XPath('./w:numFmt[@w:val]')(lvl):
val = get(lr, 'w:val')
if val == 'bullet':
self.is_numbered = False
cs = self.character_style
if lt in {'\uf0a7', 'o'} or (
cs is not None and cs.font_family is not inherit and cs.font_family.lower() in {'wingdings', 'symbol'}):
self.fmt = {'\uf0a7':'square', 'o':'circle'}.get(lt, 'disc')
else:
self.bullet_template = lt
for lpid in XPath('./w:lvlPicBulletId[@w:val]')(lvl):
self.pic_id = get(lpid, 'w:val')
else:
self.is_numbered = True
self.fmt = STYLE_MAP.get(val, 'decimal')
if lt and re.match(r'%\d+\.$', lt) is None:
self.num_template = lt
for lr in XPath('./w:pStyle[@w:val]')(lvl):
self.para_link = get(lr, 'w:val')
for pPr in XPath('./w:pPr')(lvl):
ps = ParagraphStyle(self.namespace, pPr)
if self.paragraph_style is None:
self.paragraph_style = ps
else:
self.paragraph_style.update(ps)
def css(self, images, pic_map, rid_map):
ans = {'list-style-type': self.fmt}
if self.pic_id:
rid = pic_map.get(self.pic_id, None)
if rid:
try:
fname = images.generate_filename(rid, rid_map=rid_map, max_width=20, max_height=20)
except Exception:
fname = None
else:
ans['list-style-image'] = 'url("images/%s")' % fname
return ans
def char_css(self):
try:
css = self.character_style.css
except AttributeError:
css = {}
css.pop('font-family', None)
return css
class NumberingDefinition:
def __init__(self, namespace, parent=None, an_id=None):
self.namespace = namespace
XPath, get = self.namespace.XPath, self.namespace.get
self.levels = {}
self.abstract_numbering_definition_id = an_id
if parent is not None:
for lvl in XPath('./w:lvl')(parent):
try:
ilvl = int(get(lvl, 'w:ilvl', 0))
except (TypeError, ValueError):
ilvl = 0
self.levels[ilvl] = Level(namespace, lvl)
def copy(self):
ans = NumberingDefinition(self.namespace, an_id=self.abstract_numbering_definition_id)
for l, lvl in iteritems(self.levels):
ans.levels[l] = lvl.copy()
return ans
class Numbering:
def __init__(self, namespace):
self.namespace = namespace
self.definitions = {}
self.instances = {}
self.counters = defaultdict(Counter)
self.starts = {}
self.pic_map = {}
def __call__(self, root, styles, rid_map):
' Read all numbering style definitions '
XPath, get = self.namespace.XPath, self.namespace.get
self.rid_map = rid_map
for npb in XPath('./w:numPicBullet[@w:numPicBulletId]')(root):
npbid = get(npb, 'w:numPicBulletId')
for idata in XPath('descendant::v:imagedata[@r:id]')(npb):
rid = get(idata, 'r:id')
self.pic_map[npbid] = rid
lazy_load = {}
for an in XPath('./w:abstractNum[@w:abstractNumId]')(root):
an_id = get(an, 'w:abstractNumId')
nsl = XPath('./w:numStyleLink[@w:val]')(an)
if nsl:
lazy_load[an_id] = get(nsl[0], 'w:val')
else:
nd = NumberingDefinition(self.namespace, an, an_id=an_id)
self.definitions[an_id] = nd
def create_instance(n, definition):
nd = definition.copy()
start_overrides = {}
for lo in XPath('./w:lvlOverride')(n):
try:
ilvl = int(get(lo, 'w:ilvl'))
except (ValueError, TypeError):
ilvl = None
for so in XPath('./w:startOverride[@w:val]')(lo):
try:
start_override = int(get(so, 'w:val'))
except (TypeError, ValueError):
pass
else:
start_overrides[ilvl] = start_override
for lvl in XPath('./w:lvl')(lo)[:1]:
nilvl = get(lvl, 'w:ilvl')
ilvl = nilvl if ilvl is None else ilvl
alvl = nd.levels.get(ilvl, None)
if alvl is None:
alvl = Level(self.namespace)
alvl.read_from_xml(lvl, override=True)
for ilvl, so in iteritems(start_overrides):
try:
nd.levels[ilvl].start = start_override
except KeyError:
pass
return nd
next_pass = {}
for n in XPath('./w:num[@w:numId]')(root):
an_id = None
num_id = get(n, 'w:numId')
for an in XPath('./w:abstractNumId[@w:val]')(n):
an_id = get(an, 'w:val')
d = self.definitions.get(an_id, None)
if d is None:
next_pass[num_id] = (an_id, n)
continue
self.instances[num_id] = create_instance(n, d)
numbering_links = styles.numbering_style_links
for an_id, style_link in iteritems(lazy_load):
num_id = numbering_links[style_link]
self.definitions[an_id] = self.instances[num_id].copy()
for num_id, (an_id, n) in iteritems(next_pass):
d = self.definitions.get(an_id, None)
if d is not None:
self.instances[num_id] = create_instance(n, d)
for num_id, d in iteritems(self.instances):
self.starts[num_id] = {lvl:d.levels[lvl].start for lvl in d.levels}
def get_pstyle(self, num_id, style_id):
d = self.instances.get(num_id, None)
if d is not None:
for ilvl, lvl in iteritems(d.levels):
if lvl.para_link == style_id:
return ilvl
def get_para_style(self, num_id, lvl):
d = self.instances.get(num_id, None)
if d is not None:
lvl = d.levels.get(lvl, None)
return getattr(lvl, 'paragraph_style', None)
def update_counter(self, counter, levelnum, levels):
counter[levelnum] += 1
for ilvl, lvl in iteritems(levels):
restart = lvl.restart
if (restart is None and ilvl == levelnum + 1) or restart == levelnum + 1:
counter[ilvl] = lvl.start
def apply_markup(self, items, body, styles, object_map, images):
seen_instances = set()
for p, num_id, ilvl in items:
d = self.instances.get(num_id, None)
if d is not None:
lvl = d.levels.get(ilvl, None)
if lvl is not None:
an_id = d.abstract_numbering_definition_id
counter = self.counters[an_id]
if ilvl not in counter or num_id not in seen_instances:
counter[ilvl] = self.starts[num_id][ilvl]
seen_instances.add(num_id)
p.tag = 'li'
p.set('value', '%s' % counter[ilvl])
p.set('list-lvl', str(ilvl))
p.set('list-id', num_id)
if lvl.num_template is not None:
val = lvl.format_template(counter, ilvl, lvl.num_template)
p.set('list-template', val)
elif lvl.bullet_template is not None:
val = lvl.format_template(counter, ilvl, lvl.bullet_template)
p.set('list-template', val)
self.update_counter(counter, ilvl, d.levels)
templates = {}
def commit(current_run):
if not current_run:
return
start = current_run[0]
parent = start.getparent()
idx = parent.index(start)
d = self.instances[start.get('list-id')]
ilvl = int(start.get('list-lvl'))
lvl = d.levels[ilvl]
lvlid = start.get('list-id') + start.get('list-lvl')
has_template = 'list-template' in start.attrib
wrap = (OL if lvl.is_numbered or has_template else UL)('\n\t')
if has_template:
wrap.set('lvlid', lvlid)
else:
wrap.set('class', styles.register(lvl.css(images, self.pic_map, self.rid_map), 'list'))
ccss = lvl.char_css()
if ccss:
ccss = styles.register(ccss, 'bullet')
parent.insert(idx, wrap)
last_val = None
for child in current_run:
wrap.append(child)
child.tail = '\n\t'
if has_template:
span = SPAN()
span.text = child.text
child.text = None
for gc in child:
span.append(gc)
child.append(span)
span = SPAN(child.get('list-template'))
if ccss:
span.set('class', ccss)
last = templates.get(lvlid, '')
if span.text and len(span.text) > len(last):
templates[lvlid] = span.text
child.insert(0, span)
for attr in ('list-lvl', 'list-id', 'list-template'):
child.attrib.pop(attr, None)
val = int(child.get('value'))
if last_val == val - 1 or wrap.tag == 'ul' or (last_val is None and val == 1):
child.attrib.pop('value')
last_val = val
current_run[-1].tail = '\n'
del current_run[:]
parents = set()
for child in body.iterdescendants('li'):
parents.add(child.getparent())
for parent in parents:
current_run = []
for child in parent:
if child.tag == 'li':
if current_run:
last = current_run[-1]
if (last.get('list-id') , last.get('list-lvl')) != (child.get('list-id'), child.get('list-lvl')):
commit(current_run)
current_run.append(child)
else:
commit(current_run)
commit(current_run)
# Convert the list items that use custom text for bullets into tables
# so that they display correctly
for wrap in body.xpath('//ol[@lvlid]'):
wrap.attrib.pop('lvlid')
wrap.tag = 'div'
wrap.set('style', 'display:table')
for i, li in enumerate(wrap.iterchildren('li')):
li.tag = 'div'
li.attrib.pop('value', None)
li.set('style', 'display:table-row')
obj = object_map[li]
bs = styles.para_cache[obj]
if i == 0:
wrap.set('style', 'display:table; padding-left:%s' %
bs.css.get('margin-left', '0'))
bs.css.pop('margin-left', None)
for child in li:
child.set('style', 'display:table-cell')
| 14,750 | Python | .py | 340 | 30.026471 | 156 | 0.514552 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,570 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/docx/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
class InvalidDOCX(ValueError):
pass
| 152 | Python | .py | 5 | 27.6 | 61 | 0.704225 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,571 | fonts.py | kovidgoyal_calibre/src/calibre/ebooks/docx/fonts.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import re
from collections import namedtuple
from calibre.ebooks.docx.block_styles import binary_property, inherit
from calibre.utils.filenames import ascii_filename
from calibre.utils.fonts.scanner import NoFonts, font_scanner
from calibre.utils.fonts.utils import is_truetype_font, panose_to_css_generic_family
from calibre.utils.icu import ord_string
from polyglot.builtins import codepoint_to_chr, iteritems
Embed = namedtuple('Embed', 'name key subsetted')
def has_system_fonts(name):
try:
return bool(font_scanner.fonts_for_family(name))
except NoFonts:
return False
def get_variant(bold=False, italic=False):
return {(False, False):'Regular', (False, True):'Italic',
(True, False):'Bold', (True, True):'BoldItalic'}[(bold, italic)]
def find_fonts_matching(fonts, style='normal', stretch='normal'):
for font in fonts:
if font['font-style'] == style and font['font-stretch'] == stretch:
yield font
def weight_key(font):
w = font['font-weight']
try:
return abs(int(w) - 400)
except Exception:
return abs({'normal': 400, 'bold': 700}.get(w, 1000000) - 400)
def get_best_font(fonts, style, stretch):
try:
return sorted(find_fonts_matching(fonts, style, stretch), key=weight_key)[0]
except Exception:
pass
class Family:
def __init__(self, elem, embed_relationships, XPath, get):
self.name = self.family_name = get(elem, 'w:name')
self.alt_names = tuple(get(x, 'w:val') for x in XPath('./w:altName')(elem))
if self.alt_names and not has_system_fonts(self.name):
for x in self.alt_names:
if has_system_fonts(x):
self.family_name = x
break
self.embedded = {}
for x in ('Regular', 'Bold', 'Italic', 'BoldItalic'):
for y in XPath('./w:embed%s[@r:id]' % x)(elem):
rid = get(y, 'r:id')
key = get(y, 'w:fontKey')
subsetted = get(y, 'w:subsetted') in {'1', 'true', 'on'}
if rid in embed_relationships:
self.embedded[x] = Embed(embed_relationships[rid], key, subsetted)
self.generic_family = 'auto'
for x in XPath('./w:family[@w:val]')(elem):
self.generic_family = get(x, 'w:val', 'auto')
ntt = binary_property(elem, 'notTrueType', XPath, get)
self.is_ttf = ntt is inherit or not ntt
self.panose1 = None
self.panose_name = None
for x in XPath('./w:panose1[@w:val]')(elem):
try:
v = get(x, 'w:val')
v = tuple(int(v[i:i+2], 16) for i in range(0, len(v), 2))
except (TypeError, ValueError, IndexError):
pass
else:
self.panose1 = v
self.panose_name = panose_to_css_generic_family(v)
self.css_generic_family = {'roman':'serif', 'swiss':'sans-serif', 'modern':'monospace',
'decorative':'fantasy', 'script':'cursive'}.get(self.generic_family, None)
self.css_generic_family = self.css_generic_family or self.panose_name or 'serif'
SYMBOL_MAPS = { # {{{
'Wingdings': (' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '🖉', '✂', 'âœ�', '👓', 'ğŸ•', '🕮', '🕯', '🕿', '✆', '🖂', '🖃', '📪', '📫', '📬', 'ğŸ“', '🗀', 'ğŸ—�', 'ğŸ—�', 'ğŸ—�', 'ğŸ—�', '🗄', 'â�³', '🖮', '🖰', '🖲', '🖳', '🖴', '🖫', '🖬', '✇', 'âœ�', 'ğŸ–�', '✌', 'ğŸ–�', 'ğŸ‘�', 'ğŸ‘�', '☜', 'â˜�', '☜', '🖗', 'ğŸ–�', '☺', 'ğŸ˜�', '☹', '💣', '🕱', 'ğŸ�³', 'ğŸ�±', '✈', '☼', '🌢', 'â�„', '🕆', 'âœ�', '🕈', '✠', '✡', '☪', '☯', '🕉', '☸', '♈', '♉', '♊', '♋', '♌', 'â™�', 'â™�', 'â™�', 'â™�', '♑', 'â™’', '♓', '🙰', '🙵', 'âš«', '🔾', 'â—¼', 'ğŸ��', 'ğŸ��', 'â�‘', 'â�’', 'ğŸ�Ÿ', 'â§«', 'â—†', 'â�–', 'ğŸ�™', '⌧', '⮹', '⌘', 'ğŸ�µ', 'ğŸ�¶', '🙶', '🙷', ' ', '🄋', 'â�€', 'â��', 'â�‚', 'â�ƒ', 'â�„', 'â�…', 'â�†', 'â�‡', 'â�ˆ', 'â�‰', '🄌', 'â�Š', 'â�‹', 'â�Œ', 'â��', 'â��', 'â��', 'â��', 'â�‘', 'â�’', 'â�“', '🙢', '🙠', '🙡', '🙣', '🙦', '🙤', '🙥', '🙧', '∙', '•', 'â¬�', 'â˜', 'ğŸ�†', 'ğŸ�ˆ', 'ğŸ�Š', 'ğŸ�‹', '🔿', 'â–ª', 'ğŸ��', '🟀', 'ğŸŸ�', '★', '🟋', 'ğŸŸ�', '🟓', '🟑', 'â¯�', '⌖', 'â¯�', 'â¯�', '⯑', '✪', '✰', 'ğŸ•�', '🕑', '🕒', '🕓', '🕔', '🕕', '🕖', '🕗', '🕘', '🕙', '🕚', '🕛', 'â®°', 'â®±', '⮲', '⮳', 'â®´', '⮵', 'â®¶', 'â®·', '🙪', '🙫', '🙕', '🙔', '🙗', '🙖', 'ğŸ™�', '🙑', '🙒', '🙓', '⌫', '⌦', '⮘', '⮚', 'â®™', 'â®›', '⮈', '⮊', '⮉', '⮋', '🡨', '🡪', '🡩', '🡫', '🡬', 'ğŸ¡', '🡯', '🡮', '🡸', '🡺', '🡹', '🡻', '🡼', '🡽', '🡿', '🡾', '⇦', '⇨', '⇧', '⇩', '⬄', '⇳', 'â¬�', '⬀', '⬃', '⬂', '🢬', 'ğŸ¢', '🗶', '✓', '🗷', '🗹', ' '), # noqa
'Wingdings 2': (' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '🖊', '🖋', '🖌', 'ğŸ–�', '✄', '✀', '🕾', '🕽', '🗅', '🗆', '🗇', '🗈', '🗉', '🗊', '🗋', '🗌', 'ğŸ—�', '📋', '🗑', '🗔', '🖵', '🖶', '🖷', '🖸', 'ğŸ–', '🖯', '🖱', '🖒', '🖓', '🖘', '🖙', '🖚', '🖛', '👈', '👉', '🖜', 'ğŸ–�', 'ğŸ–�', '🖟', '🖠', '🖡', '👆', '👇', '🖢', '🖣', '🖑', '🗴', '🗸', '🗵', '☑', '⮽', '☒', '⮾', '⮿', '🛇', '⦸', '🙱', '🙴', '🙲', '🙳', '‽', '🙹', '🙺', '🙻', '🙦', '🙤', '🙥', '🙧', '🙚', '🙘', '🙙', '🙛', '⓪', 'â‘ ', 'â‘¡', 'â‘¢', 'â‘£', '⑤', 'â‘¥', '⑦', 'â‘§', '⑨', 'â‘©', 'â“¿', 'â�¶', 'â�·', 'â�¸', 'â�¹', 'â�º', 'â�»', 'â�¼', 'â�½', 'â�¾', 'â�¿', ' ', '☉', '🌕', '☽', '☾', '⸿', 'âœ�', '🕇', '🕜', 'ğŸ•�', 'ğŸ•�', '🕟', '🕠', '🕡', '🕢', '🕣', '🕤', '🕥', '🕦', '🕧', '🙨', '🙩', 'â‹…', 'ğŸ�„', 'â¦�', 'â—�', 'â—�', 'ğŸ�…', 'ğŸ�‡', 'ğŸ�‰', '⊙', '⦿', 'ğŸ�Œ', 'ğŸ��', 'â—¾', 'â– ', 'â–¡', 'ğŸ�‘', 'ğŸ�’', 'ğŸ�“', 'ğŸ�”', 'â–£', 'ğŸ�•', 'ğŸ�–', 'ğŸ�—', 'ğŸ�˜', '⬩', '⬥', 'â—‡', 'ğŸ�š', 'â—ˆ', 'ğŸ�›', 'ğŸ�œ', 'ğŸ��', 'ğŸ��', '⬪', '⬧', 'â—Š', 'ğŸ� ', 'â—–', 'â——', '⯊', '⯋', '⯀', 'â¯�', '⬟', '⯂', '⬣', '⬢', '⯃', '⯄', 'ğŸ�¡', 'ğŸ�¢', 'ğŸ�£', 'ğŸ�¤', 'ğŸ�¥', 'ğŸ�¦', 'ğŸ�§', 'ğŸ�¨', 'ğŸ�©', 'ğŸ�ª', 'ğŸ�«', 'ğŸ�¬', 'ğŸ�', 'ğŸ�®', 'ğŸ�¯', 'ğŸ�°', 'ğŸ�±', 'ğŸ�²', 'ğŸ�³', 'ğŸ�´', 'ğŸ�µ', 'ğŸ�¶', 'ğŸ�·', 'ğŸ�¸', 'ğŸ�¹', 'ğŸ�º', 'ğŸ�»', 'ğŸ�¼', 'ğŸ�½', 'ğŸ�¾', 'ğŸ�¿', '🟀', '🟂', '🟄', '🟆', '🟉', '🟊', '✶', '🟌', 'ğŸŸ�', 'ğŸŸ�', '🟒', '✹', '🟃', '🟇', '✯', 'ğŸŸ�', '🟔', '⯌', 'â¯�', '※', 'â�‚', ' ', ' ', ' ', ' ', ' ', ' ',), # noqa
'Wingdings 3': (' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', 'â ', 'â¢', 'â¡', 'â£', 'â¤', 'â¥', 'â§', 'â¦', 'â°', 'â²', 'â±', 'â³', 'â¶', 'â¸', 'â»', 'â½', 'â¤', 'â¥', 'âª', 'â¬', 'â«', 'â', 'â�', 'â® ', '⮡', '⮢', '⮣', '⮤', '⮥', '⮦', 'â®§', 'â®�', '⮑', 'â®’', '⮓', '⮀', '⮃', 'â¾', 'â¿', '⮄', '⮆', 'â®…', '⮇', 'â®�', 'â®�', 'â®�', '⮌', 'â®', 'â¯', 'â�‹', '⌤', '⌃', '⌥', 'â�£', 'â�½', '⇪', '⮸', '🢠', '🢡', '🢢', '🢣', '🢤', '🢥', '🢦', '🢧', '🢨', '🢩', '🢪', '🢫', 'ğŸ¡�', '🡒', '🡑', '🡓', '🡔', '🡕', '🡗', '🡖', '🡘', '🡙', 'â–²', 'â–¼', 'â–³', 'â–½', 'â—€', 'â–¶', 'â—�', 'â–·', 'â—£', 'â—¢', 'â—¤', 'â—¥', 'ğŸ�€', 'ğŸ�‚', 'ğŸ��', ' ', 'ğŸ�ƒ', '⯅', '⯆', '⯇', '⯈', '⮜', 'â®�', 'â®�', '⮟', 'ğŸ �', '🠒', '🠑', '🠓', '🠔', '🠖', '🠕', '🠗', '🠘', '🠚', '🠙', '🠛', '🠜', 'ğŸ �', 'ğŸ �', '🠟', '🠀', '🠂', 'ğŸ �', '🠃', '🠄', '🠆', '🠅', '🠇', '🠈', '🠊', '🠉', '🠋', 'ğŸ ', '🠢', '🠤', '🠦', '🠨', '🠪', '🠬', '🢜', 'ğŸ¢�', 'ğŸ¢�', '🢟', '🠮', '🠰', '🠲', '🠴', '🠶', '🠸', '🠺', '🠹', '🠻', '🢘', '🢚', '🢙', '🢛', '🠼', '🠾', '🠽', '🠿', '🡀', '🡂', 'ğŸ¡�', '🡃', '🡄', '🡆', '🡅', '🡇', '⮨', '⮩', '⮪', '⮫', '⮬', 'â®', 'â®®', '⮯', '🡠', '🡢', '🡡', '🡣', '🡤', '🡥', '🡧', '🡦', '🡰', '🡲', '🡱', '🡳', '🡴', '🡵', '🡷', '🡶', '🢀', '🢂', 'ğŸ¢�', '🢃', '🢄', '🢅', '🢇', '🢆', 'ğŸ¢�', '🢒', '🢑', '🢓', '🢔', '🢕', '🢗', '🢖', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ',), # noqa
'Webdings': (' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '🕷', '🕸', '🕲', '🕶', 'ğŸ�†', 'ğŸ�–', '🖇', '🗨', '🗩', '🗰', '🗱', '🌶', 'ğŸ�—', '🙾', '🙼', '🗕', '🗖', '🗗', 'â�´', 'â�µ', 'â�¶', 'â�·', 'â�ª', 'â�©', 'â�®', 'â�', 'â�¸', 'â�¹', 'â�º', '🗚', '🗳', '🛠', 'ğŸ�—', 'ğŸ�˜', 'ğŸ�™', 'ğŸ�š', 'ğŸ�œ', 'ğŸ�', 'ğŸ�›', 'ğŸ� ', 'ğŸ�–', 'ğŸ��', '🛣', 'ğŸ”�', 'ğŸ�”', 'ğŸ‘�', '👂', 'ğŸ��', 'ğŸ�•', '🛤', 'ğŸ�Ÿ', '🛳', '🕬', '🕫', '🕨', '🔈', 'ğŸ�”', 'ğŸ�•', '🗬', '🙽', 'ğŸ—', '🗪', '🗫', 'â®”', '✔', '🚲', '⬜', '🛡', '📦', '🛱', '⬛', '🚑', '🛈', '🛩', '🛰', '🟈', '🕴', '⬤', '🛥', '🚔', '🗘', '🗙', 'â�“', '🛲', '🚇', 'ğŸš�', '⛳', '⦸', '⊖', 'ğŸš', '🗮', 'â��', '🗯', '🗲', ' ', '🚹', '🚺', '🛉', '🛊', '🚼', '👽', 'ğŸ�‹', 'â›·', 'ğŸ�‚', 'ğŸ�Œ', 'ğŸ�Š', 'ğŸ�„', 'ğŸ��', 'ğŸ��', '🚘', '🗠', '🛢', '📠', 'ğŸ�·', '📣', '👪', '🗡', '🗢', '🗣', '✯', '🖄', '🖅', '🖃', '🖆', '🖹', '🖺', '🖻', '🕵', '🕰', '🖽', '🖾', '📋', '🗒', '🗓', '🕮', '📚', 'ğŸ—�', '🗟', '🗃', '🗂', '🖼', 'ğŸ�', 'ğŸ�œ', 'ğŸ�˜', 'ğŸ�™', 'ğŸ�§', '💿', 'ğŸ��', '📷', 'ğŸ�Ÿ', 'ğŸ�¬', '📽', '📹', '📾', '📻', 'ğŸ�š', 'ğŸ�›', '📺', '💻', '🖥', '🖦', '🖧', 'ğŸ�¹', 'ğŸ�®', 'ğŸ�®', '🕻', '🕼', 'ğŸ–�', '🖀', '🖨', '🖩', '🖿', '🖪', '🗜', '🔒', '🔓', 'ğŸ—�', '📥', '📤', '🕳', '🌣', '🌤', '🌥', '🌦', 'â˜�', '🌨', '🌧', '🌩', '🌪', '🌬', '🌫', '🌜', '🌡', '🛋', 'ğŸ›�', 'ğŸ�½', 'ğŸ�¸', 'ğŸ›�', 'ğŸ›�', 'â“…', '♿', '🛆', '🖈', 'ğŸ�“', '🗤', '🗥', '🗦', '🗧', '🛪', 'ğŸ�¿', 'ğŸ�¦', 'ğŸ�Ÿ', 'ğŸ�•', 'ğŸ�ˆ', '🙬', '🙮', 'ğŸ™', '🙯', '🗺', 'ğŸŒ�', 'ğŸŒ�', 'ğŸŒ�', '🕊',), # noqa
'Symbol': (' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '!', '∀', '#', '∃', '%', '&', 'âˆ�', '(', ')', '*', '+', ',', '−', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '≅', 'Α', 'Î’', 'Χ', 'Δ', 'Ε', 'Φ', 'Γ', 'Η', 'Ι', 'Ï‘', 'Λ', 'Μ', 'Î�', 'Î�', 'Ο', 'Î ', 'Θ', 'Ρ', 'Σ', 'Τ', 'Î¥', 'Ï‚', 'Ω', 'Î�', 'Ψ', 'Ζ', '[', '∴', ']', '⊥', '_', '', 'α', 'β', 'χ', 'δ', 'ε', 'φ', 'γ', 'η', 'ι', 'Ï•', 'λ', 'μ', 'ν', 'ξ', 'ο', 'Ï€', 'θ', 'Ï�', 'σ', 'Ï„', 'Ï…', 'Ï–', 'ω', 'ξ', 'ψ', 'ζ', '{', '|', '}', '~', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '€', 'Ï’', '′', '≤', 'â�„', 'âˆ�', 'Æ’', '♣', '♥', '♦', 'â™ ', '↔', 'â†�', '↑', '→', '↓', '°', '±', '″', '≥', '×', 'âˆ�', '∂', '•', '÷', '≠', '≡', '≈', '…', 'â��', 'â�¯', '↲', 'ℵ', 'â„‘', 'ℜ', '℘', '⊗', '⊕', '∅', '∩', '∪', '⊃', '⊇', '⊄', '⊂', '⊆', '∈', '∉', '∠', '∂', '®', '©', 'â„¢', 'âˆ�', '√', 'â‹…', '¬', '∦', '∧', '⇔', 'â‡�', '⇑', '⇒', '⇓', 'â—Š', '〈', '®', '©', 'â„¢', '∑', 'â�›', 'â�œ', 'â��', 'â�¡', 'â�¢', 'â�£', 'â�§', 'â�¨', 'â�©', 'â�ª', ' ', '〉', '∫', '⌠', 'â�®', '⌡', 'â��', 'â�Ÿ', 'â� ', 'â�¤', 'â�¥', 'â�¦', 'â�ª', 'â�«', 'â�¬', ' ',), # noqa
} # }}}
SYMBOL_FONT_NAMES = frozenset(n.lower() for n in SYMBOL_MAPS)
def is_symbol_font(family):
try:
return family.lower() in SYMBOL_FONT_NAMES
except AttributeError:
return False
def do_map(m, points):
base = 0xf000
limit = len(m) + base
for p in points:
if base < p < limit:
yield m[p - base]
else:
yield codepoint_to_chr(p)
def map_symbol_text(text, font):
m = SYMBOL_MAPS[font]
if isinstance(text, bytes):
text = text.decode('utf-8')
return ''.join(do_map(m, ord_string(text)))
class Fonts:
def __init__(self, namespace):
self.namespace = namespace
self.fonts = {}
self.used = set()
def __call__(self, root, embed_relationships, docx, dest_dir):
for elem in self.namespace.XPath('//w:font[@w:name]')(root):
self.fonts[self.namespace.get(elem, 'w:name')] = Family(elem, embed_relationships, self.namespace.XPath, self.namespace.get)
def family_for(self, name, bold=False, italic=False):
f = self.fonts.get(name, None)
if f is None:
return 'serif'
variant = get_variant(bold, italic)
self.used.add((name, variant))
name = f.name if variant in f.embedded else f.family_name
if is_symbol_font(name):
return name
return '"{}", {}'.format(name.replace('"', ''), f.css_generic_family)
def embed_fonts(self, dest_dir, docx):
defs = []
dest_dir = os.path.join(dest_dir, 'fonts')
for name, variant in self.used:
f = self.fonts[name]
if variant in f.embedded:
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
fname = self.write(name, dest_dir, docx, variant)
if fname is not None:
d = {'font-family':'"%s"' % name.replace('"', ''), 'src': 'url("fonts/%s")' % fname}
if 'Bold' in variant:
d['font-weight'] = 'bold'
if 'Italic' in variant:
d['font-style'] = 'italic'
d = [f'{k}: {v}' for k, v in iteritems(d)]
d = ';\n\t'.join(d)
defs.append('@font-face {\n\t%s\n}\n' % d)
return '\n'.join(defs)
def write(self, name, dest_dir, docx, variant):
f = self.fonts[name]
ef = f.embedded[variant]
raw = docx.read(ef.name)
prefix = raw[:32]
if ef.key:
key = re.sub(r'[^A-Fa-f0-9]', '', ef.key)
key = bytearray(reversed(tuple(int(key[i:i+2], 16) for i in range(0, len(key), 2))))
prefix = bytearray(prefix)
prefix = bytes(bytearray(prefix[i]^key[i % len(key)] for i in range(len(prefix))))
if not is_truetype_font(prefix):
return None
ext = 'otf' if prefix.startswith(b'OTTO') else 'ttf'
fname = ascii_filename(f'{name} - {variant}.{ext}').replace(' ', '_').replace('&', '_')
with open(os.path.join(dest_dir, fname), 'wb') as dest:
dest.write(prefix)
dest.write(raw[32:])
return fname
| 15,620 | Python | .py | 153 | 92.470588 | 1,946 | 0.365817 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,572 | index.py | kovidgoyal_calibre/src/calibre/ebooks/docx/index.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from operator import itemgetter
from lxml import etree
from calibre.utils.icu import partition_by_first_letter, sort_key
from polyglot.builtins import iteritems
def get_applicable_xe_fields(index, xe_fields, XPath, expand):
iet = index.get('entry-type', None)
xe_fields = [xe for xe in xe_fields if xe.get('entry-type', None) == iet]
lr = index.get('letter-range', None)
if lr is not None:
sl, el = lr.partition('-')[0::2]
sl, el = sl.strip(), el.strip()
if sl and el:
def inrange(text):
return sl <= text[0] <= el
xe_fields = [xe for xe in xe_fields if inrange(xe.get('text', ''))]
bmark = index.get('bookmark', None)
if bmark is None:
return xe_fields
attr = expand('w:name')
bookmarks = {b for b in XPath('//w:bookmarkStart')(xe_fields[0]['start_elem']) if b.get(attr, None) == bmark}
ancestors = XPath('ancestor::w:bookmarkStart')
def contained(xe):
# Check if the xe field is contained inside a bookmark with the
# specified name
return bool(set(ancestors(xe['start_elem'])) & bookmarks)
return [xe for xe in xe_fields if contained(xe)]
def make_block(expand, style, parent, pos):
p = parent.makeelement(expand('w:p'))
parent.insert(pos, p)
if style is not None:
ppr = p.makeelement(expand('w:pPr'))
p.append(ppr)
ps = ppr.makeelement(expand('w:pStyle'))
ppr.append(ps)
ps.set(expand('w:val'), style)
r = p.makeelement(expand('w:r'))
p.append(r)
t = r.makeelement(expand('w:t'))
t.set(expand('xml:space'), 'preserve')
r.append(t)
return p, t
def add_xe(xe, t, expand):
run = t.getparent()
idx = run.index(t)
t.text = xe.get('text') or ' '
pt = xe.get('page-number-text', None)
if pt:
p = t.getparent().getparent()
r = p.makeelement(expand('w:r'))
p.append(r)
t2 = r.makeelement(expand('w:t'))
t2.set(expand('xml:space'), 'preserve')
t2.text = ' [%s]' % pt
r.append(t2)
# put separate entries on separate lines
run.insert(idx + 1, run.makeelement(expand('w:br')))
return xe['anchor'], run
def process_index(field, index, xe_fields, log, XPath, expand):
'''
We remove all the word generated index markup and replace it with our own
that is more suitable for an ebook.
'''
styles = []
heading_text = index.get('heading', None)
heading_style = 'IndexHeading'
start_pos = None
for elem in field.contents:
if elem.tag.endswith('}p'):
s = XPath('descendant::pStyle/@w:val')(elem)
if s:
styles.append(s[0])
p = elem.getparent()
if start_pos is None:
start_pos = (p, p.index(elem))
p.remove(elem)
xe_fields = get_applicable_xe_fields(index, xe_fields, XPath, expand)
if not xe_fields:
return [], []
if heading_text is not None:
groups = partition_by_first_letter(xe_fields, key=itemgetter('text'))
items = []
for key, fields in iteritems(groups):
items.append(key), items.extend(fields)
if styles:
heading_style = styles[0]
else:
items = sorted(xe_fields, key=lambda x:sort_key(x['text']))
hyperlinks = []
blocks = []
for item in reversed(items):
is_heading = not isinstance(item, dict)
style = heading_style if is_heading else None
p, t = make_block(expand, style, *start_pos)
if is_heading:
text = heading_text
if text.lower().startswith('a'):
text = item + text[1:]
t.text = text
else:
hyperlinks.append(add_xe(item, t, expand))
blocks.append(p)
return hyperlinks, blocks
def split_up_block(block, a, text, parts, ldict):
prefix = parts[:-1]
a.text = parts[-1]
parent = a.getparent()
style = 'display:block; margin-left: %.3gem'
for i, prefix in enumerate(prefix):
m = 1.5 * i
span = parent.makeelement('span', style=style % m)
ldict[span] = i
parent.append(span)
span.text = prefix
span = parent.makeelement('span', style=style % ((i + 1) * 1.5))
parent.append(span)
span.append(a)
ldict[span] = len(prefix)
"""
The merge algorithm is a little tricky.
We start with a list of elementary blocks. Each is an HtmlElement, a p node
with a list of child nodes. The last child may be a link, and the earlier ones are
just text.
The list is in reverse order from what we want in the index.
There is a dictionary ldict which records the level of each child node.
Now we want to do a reduce-like operation, combining all blocks with the same
top level index entry into a single block representing the structure of all
references, subentries, etc. under that top entry.
Here's the algorithm.
Given a block p and the next block n, and the top level entries p1 and n1 in each
block, which we assume have the same text:
Start with (p, p1) and (n, n1).
Given (p, p1, ..., pk) and (n, n1, ..., nk) which we want to merge:
If there are no more levels in n, and we have a link in nk,
then add the link from nk to the links for pk.
This might be the first link for pk, or we might get a list of references.
Otherwise nk+1 is the next level in n. Look for a matching entry in p. It must have
the same text, it must follow pk, it must come before we find any other p entries at
the same level as pk, and it must have the same level as nk+1.
If we find such a matching entry, go back to the start with (p ... pk+1) and (n ... nk+1).
If there is no matching entry, then because of the original reversed order we want
to insert nk+1 and all following entries from n into p immediately following pk.
"""
def find_match(prev_block, pind, nextent, ldict):
curlevel = ldict.get(prev_block[pind], -1)
if curlevel < 0:
return -1
for p in range(pind+1, len(prev_block)):
trylev = ldict.get(prev_block[p], -1)
if trylev <= curlevel:
return -1
if trylev > (curlevel+1):
continue
if prev_block[p].text_content() == nextent.text_content():
return p
return -1
def add_link(pent, nent, ldict):
na = nent.xpath('descendant::a[1]')
# If there is no link, leave it as text
if not na or len(na) == 0:
return
na = na[0]
pa = pent.xpath('descendant::a')
if pa and len(pa) > 0:
# Put on same line with a comma
pa = pa[-1]
pa.tail = ', '
p = pa.getparent()
p.insert(p.index(pa) + 1, na)
else:
# substitute link na for plain text in pent
pent.text = ""
pent.append(na)
def merge_blocks(prev_block, next_block, pind, nind, next_path, ldict):
# First elements match. Any more in next?
if len(next_path) == (nind + 1):
nextent = next_block[nind]
add_link(prev_block[pind], nextent, ldict)
return
nind = nind + 1
nextent = next_block[nind]
prevent = find_match(prev_block, pind, nextent, ldict)
if prevent > 0:
merge_blocks(prev_block, next_block, prevent, nind, next_path, ldict)
return
# Want to insert elements into previous block
while nind < len(next_block):
# insert takes it out of old
pind = pind + 1
prev_block.insert(pind, next_block[nind])
next_block.getparent().remove(next_block)
def polish_index_markup(index, blocks):
# Blocks are in reverse order at this point
path_map = {}
ldict = {}
for block in blocks:
cls = block.get('class', '') or ''
block.set('class', (cls + ' index-entry').lstrip())
a = block.xpath('descendant::a[1]')
text = ''
if a:
text = etree.tostring(a[0], method='text', with_tail=False, encoding='unicode').strip()
if ':' in text:
path_map[block] = parts = list(filter(None, (x.strip() for x in text.split(':'))))
if len(parts) > 1:
split_up_block(block, a[0], text, parts, ldict)
else:
# try using a span all the time
path_map[block] = [text]
parent = a[0].getparent()
span = parent.makeelement('span', style='display:block; margin-left: 0em')
parent.append(span)
span.append(a[0])
ldict[span] = 0
for br in block.xpath('descendant::br'):
br.tail = None
# We want a single block for each main entry
prev_block = blocks[0]
for block in blocks[1:]:
pp, pn = path_map[prev_block], path_map[block]
if pp[0] == pn[0]:
merge_blocks(prev_block, block, 0, 0, pn, ldict)
else:
prev_block = block
| 8,949 | Python | .py | 226 | 32.570796 | 113 | 0.61323 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,573 | cleanup.py | kovidgoyal_calibre/src/calibre/ebooks/docx/cleanup.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from polyglot.builtins import itervalues
NBSP = '\xa0'
def mergeable(previous, current):
if previous.tail or current.tail:
return False
if previous.get('class', None) != current.get('class', None):
return False
if current.get('id', False):
return False
for attr in ('style', 'lang', 'dir'):
if previous.get(attr) != current.get(attr):
return False
try:
return next(previous.itersiblings()) is current
except StopIteration:
return False
def append_text(parent, text):
if len(parent) > 0:
parent[-1].tail = (parent[-1].tail or '') + text
else:
parent.text = (parent.text or '') + text
def merge(parent, span):
if span.text:
append_text(parent, span.text)
for child in span:
parent.append(child)
if span.tail:
append_text(parent, span.tail)
span.getparent().remove(span)
def merge_run(run):
parent = run[0]
for span in run[1:]:
merge(parent, span)
def liftable(css):
# A <span> is liftable if all its styling would work just as well if it is
# specified on the parent element.
prefixes = {x.partition('-')[0] for x in css}
return not (prefixes - {'text', 'font', 'letter', 'color', 'background'})
def add_text(elem, attr, text):
old = getattr(elem, attr) or ''
setattr(elem, attr, old + text)
def lift(span):
# Replace an element by its content (text, children and tail)
parent = span.getparent()
idx = parent.index(span)
try:
last_child = span[-1]
except IndexError:
last_child = None
if span.text:
if idx == 0:
add_text(parent, 'text', span.text)
else:
add_text(parent[idx - 1], 'tail', span.text)
for child in reversed(span):
parent.insert(idx, child)
parent.remove(span)
if span.tail:
if last_child is None:
if idx == 0:
add_text(parent, 'text', span.tail)
else:
add_text(parent[idx - 1], 'tail', span.tail)
else:
add_text(last_child, 'tail', span.tail)
def before_count(root, tag, limit=10):
body = root.xpath('//body[1]')
if not body:
return limit
ans = 0
for elem in body[0].iterdescendants():
if elem is tag:
return ans
ans += 1
if ans > limit:
return limit
def wrap_contents(tag_name, elem):
wrapper = elem.makeelement(tag_name)
wrapper.text, elem.text = elem.text, ''
for child in elem:
elem.remove(child)
wrapper.append(child)
elem.append(wrapper)
def cleanup_markup(log, root, styles, dest_dir, detect_cover, XPath):
# Apply vertical-align
for span in root.xpath('//span[@data-docx-vert]'):
wrap_contents(span.attrib.pop('data-docx-vert'), span)
# Move <hr>s outside paragraphs, if possible.
pancestor = XPath('|'.join('ancestor::%s[1]' % x for x in ('p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6')))
for hr in root.xpath('//span/hr'):
p = pancestor(hr)
if p:
p = p[0]
descendants = tuple(p.iterdescendants())
if descendants[-1] is hr:
parent = p.getparent()
idx = parent.index(p)
parent.insert(idx+1, hr)
hr.tail = '\n\t'
# Merge consecutive spans that have the same styling
current_run = []
for span in root.xpath('//span'):
if not current_run:
current_run.append(span)
else:
last = current_run[-1]
if mergeable(last, span):
current_run.append(span)
else:
if len(current_run) > 1:
merge_run(current_run)
current_run = [span]
# Process dir attributes
class_map = dict(itervalues(styles.classes))
parents = ('p', 'div') + tuple('h%d' % i for i in range(1, 7))
for parent in root.xpath('//*[(%s)]' % ' or '.join('name()="%s"' % t for t in parents)):
# Ensure that children of rtl parents that are not rtl have an
# explicit dir set. Also, remove dir from children if it is the same as
# that of the parent.
if len(parent):
parent_dir = parent.get('dir')
for child in parent.iterchildren('span'):
child_dir = child.get('dir')
if parent_dir == 'rtl' and child_dir != 'rtl':
child_dir = 'ltr'
child.set('dir', child_dir)
if child_dir and child_dir == parent_dir:
child.attrib.pop('dir')
# Remove unnecessary span tags that are the only child of a parent block
# element
for parent in root.xpath('//*[(%s) and count(span)=1]' % ' or '.join('name()="%s"' % t for t in parents)):
if len(parent) == 1 and not parent.text and not parent[0].tail and not parent[0].get('id', None):
# We have a block whose contents are entirely enclosed in a <span>
span = parent[0]
span_class = span.get('class', None)
span_css = class_map.get(span_class, {})
span_dir = span.get('dir')
if liftable(span_css) and (not span_dir or span_dir == parent.get('dir')):
pclass = parent.get('class', None)
if span_class:
pclass = (pclass + ' ' + span_class) if pclass else span_class
parent.set('class', pclass)
parent.text = span.text
parent.remove(span)
if span.get('lang'):
parent.set('lang', span.get('lang'))
if span.get('dir'):
parent.set('dir', span.get('dir'))
for child in span:
parent.append(child)
# Make spans whose only styling is bold or italic into <b> and <i> tags
for span in root.xpath('//span[@class and not(@style)]'):
css = class_map.get(span.get('class', None), {})
if len(css) == 1:
if css == {'font-style':'italic'}:
span.tag = 'i'
del span.attrib['class']
elif css == {'font-weight':'bold'}:
span.tag = 'b'
del span.attrib['class']
# Get rid of <span>s that have no styling
for span in root.xpath('//span[not(@class or @id or @style or @lang or @dir)]'):
lift(span)
# Convert <p><br style="page-break-after:always"> </p> style page breaks
# into something the viewer will render as a page break
for p in root.xpath('//p[br[@style="page-break-after:always"]]'):
if len(p) == 1 and (not p[0].tail or not p[0].tail.strip()):
p.remove(p[0])
prefix = p.get('style', '')
if prefix:
prefix += '; '
p.set('style', prefix + 'page-break-after:always')
p.text = NBSP if not p.text else p.text
if detect_cover:
# Check if the first image in the document is possibly a cover
img = root.xpath('//img[@src][1]')
if img:
img = img[0]
path = os.path.join(dest_dir, img.get('src'))
if os.path.exists(path) and before_count(root, img, limit=10) < 5:
from calibre.utils.imghdr import identify
try:
with open(path, 'rb') as imf:
fmt, width, height = identify(imf)
except:
width, height, fmt = 0, 0, None # noqa
del fmt
try:
is_cover = 0.8 <= height/width <= 1.8 and height*width >= 160000
except ZeroDivisionError:
is_cover = False
if is_cover:
log.debug('Detected an image that looks like a cover')
img.getparent().remove(img)
return path
| 8,076 | Python | .py | 199 | 30.296482 | 110 | 0.546359 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,574 | theme.py | kovidgoyal_calibre/src/calibre/ebooks/docx/theme.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
class Theme:
def __init__(self, namespace):
self.major_latin_font = 'Cambria'
self.minor_latin_font = 'Calibri'
self.namespace = namespace
def __call__(self, root):
for fs in self.namespace.XPath('//a:fontScheme')(root):
for mj in self.namespace.XPath('./a:majorFont')(fs):
for l in self.namespace.XPath('./a:latin[@typeface]')(mj):
self.major_latin_font = l.get('typeface')
for mj in self.namespace.XPath('./a:minorFont')(fs):
for l in self.namespace.XPath('./a:latin[@typeface]')(mj):
self.minor_latin_font = l.get('typeface')
def resolve_font_family(self, ff):
if ff.startswith('|'):
ff = ff[1:-1]
ff = self.major_latin_font if ff.startswith('major') else self.minor_latin_font
return ff
| 985 | Python | .py | 21 | 37 | 91 | 0.580982 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,575 | fields.py | kovidgoyal_calibre/src/calibre/ebooks/docx/fields.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from calibre.ebooks.docx.index import polish_index_markup, process_index
from polyglot.builtins import iteritems, native_string_type
class Field:
def __init__(self, start):
self.start = start
self.end = None
self.contents = []
self.buf = []
self.instructions = None
self.name = None
def add_instr(self, elem):
self.add_raw(elem.text)
def add_raw(self, raw):
if not raw:
return
if self.name is None:
# There are cases where partial index entries end with
# a significant space, along the lines of
# <>Summary <> ... <>Hearing<>.
# No known examples of starting with a space yet.
# self.name, raw = raw.strip().partition(' ')[0::2]
self.name, raw = raw.lstrip().partition(' ')[0::2]
self.buf.append(raw)
def finalize(self):
self.instructions = ''.join(self.buf)
del self.buf
WORD, FLAG = 0, 1
scanner = re.Scanner([
(r'\\\S{1}', lambda s, t: (t, FLAG)), # A flag of the form \x
(r'"[^"]*"', lambda s, t: (t[1:-1], WORD)), # Quoted word
(r'[^\s\\"]\S*', lambda s, t: (t, WORD)), # A non-quoted word, must not start with a backslash or a space or a quote
(r'\s+', None),
], flags=re.DOTALL)
null = object()
def parser(name, field_map, default_field_name=None):
field_map = dict(x.split(':') for x in field_map.split())
def parse(raw, log=None):
ans = {}
last_option = None
raw = raw.replace('\\\\', '\x01').replace('\\"', '\x02')
for token, token_type in scanner.scan(raw)[0]:
token = token.replace('\x01', '\\').replace('\x02', '"')
if token_type is FLAG:
last_option = field_map.get(token[1], null)
if last_option is not None:
ans[last_option] = None
elif token_type is WORD:
if last_option is None:
ans[default_field_name] = token
else:
ans[last_option] = token
last_option = None
ans.pop(null, None)
return ans
parse.__name__ = native_string_type('parse_' + name)
return parse
parse_hyperlink = parser('hyperlink',
'l:anchor m:image-map n:target o:title t:target', 'url')
parse_xe = parser('xe',
'b:bold i:italic f:entry-type r:page-range-bookmark t:page-number-text y:yomi', 'text')
parse_index = parser('index',
'b:bookmark c:columns-per-page d:sequence-separator e:first-page-number-separator'
' f:entry-type g:page-range-separator h:heading k:crossref-separator'
' l:page-number-separator p:letter-range s:sequence-name r:run-together y:yomi z:langcode')
parse_ref = parser('ref',
'd:separator f:footnote h:hyperlink n:number p:position r:relative-number t:suppress w:number-full-context')
parse_noteref = parser('noteref',
'f:footnote h:hyperlink p:position')
class Fields:
def __init__(self, namespace):
self.namespace = namespace
self.fields = []
self.index_bookmark_counter = 0
self.index_bookmark_prefix = 'index-'
def __call__(self, doc, log):
all_ids = frozenset(self.namespace.XPath('//*/@w:id')(doc))
c = 0
while self.index_bookmark_prefix in all_ids:
c += 1
self.index_bookmark_prefix = self.index_bookmark_prefix.replace('-', '%d-' % c)
stack = []
for elem in self.namespace.XPath(
'//*[name()="w:p" or name()="w:r" or'
' name()="w:instrText" or'
' (name()="w:fldChar" and (@w:fldCharType="begin" or @w:fldCharType="end") or'
' name()="w:fldSimple")]')(doc):
if elem.tag.endswith('}fldChar'):
typ = self.namespace.get(elem, 'w:fldCharType')
if typ == 'begin':
stack.append(Field(elem))
self.fields.append(stack[-1])
else:
try:
stack.pop().end = elem
except IndexError:
pass
elif elem.tag.endswith('}instrText'):
if stack:
stack[-1].add_instr(elem)
elif elem.tag.endswith('}fldSimple'):
field = Field(elem)
instr = self.namespace.get(elem, 'w:instr')
if instr:
field.add_raw(instr)
self.fields.append(field)
for r in self.namespace.XPath('descendant::w:r')(elem):
field.contents.append(r)
else:
if stack:
stack[-1].contents.append(elem)
field_types = ('hyperlink', 'xe', 'index', 'ref', 'noteref')
parsers = {x.upper():getattr(self, 'parse_'+x) for x in field_types}
parsers.update({x:getattr(self, 'parse_'+x) for x in field_types})
field_parsers = {f.upper():globals()['parse_%s' % f] for f in field_types}
field_parsers.update({f:globals()['parse_%s' % f] for f in field_types})
for f in field_types:
setattr(self, '%s_fields' % f, [])
unknown_fields = {'TOC', 'toc', 'PAGEREF', 'pageref'} # The TOC and PAGEREF fields are handled separately
for field in self.fields:
field.finalize()
if field.instructions:
func = parsers.get(field.name, None)
if func is not None:
func(field, field_parsers[field.name], log)
elif field.name not in unknown_fields:
log.warn('Encountered unknown field: %s, ignoring it.' % field.name)
unknown_fields.add(field.name)
def get_runs(self, field):
all_runs = []
current_runs = []
# We only handle spans in a single paragraph
# being wrapped in <a>
for x in field.contents:
if x.tag.endswith('}p'):
if current_runs:
all_runs.append(current_runs)
current_runs = []
elif x.tag.endswith('}r'):
current_runs.append(x)
if current_runs:
all_runs.append(current_runs)
return all_runs
def parse_hyperlink(self, field, parse_func, log):
# Parse hyperlink fields
hl = parse_func(field.instructions, log)
if hl:
if 'target' in hl and hl['target'] is None:
hl['target'] = '_blank'
for runs in self.get_runs(field):
self.hyperlink_fields.append((hl, runs))
def parse_ref(self, field, parse_func, log):
ref = parse_func(field.instructions, log)
dest = ref.get(None, None)
if dest is not None and 'hyperlink' in ref:
for runs in self.get_runs(field):
self.hyperlink_fields.append(({'anchor':dest}, runs))
else:
log.warn(f'Unsupported reference field ({field.name}), ignoring: {ref!r}')
parse_noteref = parse_ref
def parse_xe(self, field, parse_func, log):
# Parse XE fields
if None in (field.start, field.end):
return
xe = parse_func(field.instructions, log)
if xe:
# We insert a synthetic bookmark around this index item so that we
# can link to it later
def WORD(x):
return self.namespace.expand('w:' + x)
self.index_bookmark_counter += 1
bmark = xe['anchor'] = '%s%d' % (self.index_bookmark_prefix, self.index_bookmark_counter)
p = field.start.getparent()
bm = p.makeelement(WORD('bookmarkStart'))
bm.set(WORD('id'), bmark), bm.set(WORD('name'), bmark)
p.insert(p.index(field.start), bm)
p = field.end.getparent()
bm = p.makeelement(WORD('bookmarkEnd'))
bm.set(WORD('id'), bmark)
p.insert(p.index(field.end) + 1, bm)
xe['start_elem'] = field.start
self.xe_fields.append(xe)
def parse_index(self, field, parse_func, log):
if not field.contents:
return
idx = parse_func(field.instructions, log)
hyperlinks, blocks = process_index(field, idx, self.xe_fields, log, self.namespace.XPath, self.namespace.expand)
if not blocks:
return
for anchor, run in hyperlinks:
self.hyperlink_fields.append(({'anchor':anchor}, [run]))
self.index_fields.append((idx, blocks))
def polish_markup(self, object_map):
if not self.index_fields:
return
rmap = {v:k for k, v in iteritems(object_map)}
for idx, blocks in self.index_fields:
polish_index_markup(idx, [rmap[b] for b in blocks])
def test_parse_fields(return_tests=False):
import unittest
class TestParseFields(unittest.TestCase):
def test_hyperlink(self):
def ae(x, y):
return self.assertEqual(parse_hyperlink(x, None), y)
ae(r'\l anchor1', {'anchor':'anchor1'})
ae(r'www.calibre-ebook.com', {'url':'www.calibre-ebook.com'})
ae(r'www.calibre-ebook.com \t target \o tt', {'url':'www.calibre-ebook.com', 'target':'target', 'title': 'tt'})
ae(r'"c:\\Some Folder"', {'url': 'c:\\Some Folder'})
ae(r'xxxx \y yyyy', {'url': 'xxxx'})
def test_xe(self):
def ae(x, y):
return self.assertEqual(parse_xe(x, None), y)
ae(r'"some name"', {'text':'some name'})
ae(r'name \b \i', {'text':'name', 'bold':None, 'italic':None})
ae(r'xxx \y a', {'text':'xxx', 'yomi':'a'})
def test_index(self):
def ae(x, y):
return self.assertEqual(parse_index(x, None), y)
ae(r'', {})
ae(r'\b \c 1', {'bookmark':None, 'columns-per-page': '1'})
suite = unittest.TestLoader().loadTestsFromTestCase(TestParseFields)
if return_tests:
return suite
unittest.TextTestRunner(verbosity=4).run(suite)
if __name__ == '__main__':
test_parse_fields()
| 10,301 | Python | .py | 229 | 33.90393 | 123 | 0.555423 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,576 | dump.py | kovidgoyal_calibre/src/calibre/ebooks/docx/dump.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import shutil
import sys
from lxml import etree
from calibre import walk
from calibre.utils.xml_parse import safe_xml_fromstring
from calibre.utils.zipfile import ZipFile
def pretty_all_xml_in_dir(path):
for f in walk(path):
if f.endswith('.xml') or f.endswith('.rels'):
with open(f, 'r+b') as stream:
raw = stream.read()
if raw:
root = safe_xml_fromstring(raw)
stream.seek(0)
stream.truncate()
stream.write(etree.tostring(root, pretty_print=True, encoding='utf-8', xml_declaration=True))
def do_dump(path, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
with ZipFile(path) as zf:
zf.extractall(dest)
pretty_all_xml_in_dir(dest)
def dump(path):
dest = os.path.splitext(os.path.basename(path))[0]
dest += '-dumped'
do_dump(path, dest)
print(path, 'dumped to', dest)
if __name__ == '__main__':
dump(sys.argv[-1])
| 1,130 | Python | .py | 33 | 27.242424 | 113 | 0.618652 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,577 | styles.py | kovidgoyal_calibre/src/calibre/ebooks/docx/styles.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import textwrap
from collections import Counter, OrderedDict
from calibre.ebooks.docx.block_styles import ParagraphStyle, inherit, twips
from calibre.ebooks.docx.char_styles import RunStyle
from calibre.ebooks.docx.tables import TableStyle
from polyglot.builtins import iteritems, itervalues
class PageProperties:
'''
Class representing page level properties (page size/margins) read from
sectPr elements.
'''
def __init__(self, namespace, elems=()):
self.width, self.height = 595.28, 841.89 # pts, A4
self.margin_left = self.margin_right = 72 # pts
def setval(attr, val):
val = twips(val)
if val is not None:
setattr(self, attr, val)
for sectPr in elems:
for pgSz in namespace.XPath('./w:pgSz')(sectPr):
w, h = namespace.get(pgSz, 'w:w'), namespace.get(pgSz, 'w:h')
setval('width', w), setval('height', h)
for pgMar in namespace.XPath('./w:pgMar')(sectPr):
l, r = namespace.get(pgMar, 'w:left'), namespace.get(pgMar, 'w:right')
setval('margin_left', l), setval('margin_right', r)
class Style:
'''
Class representing a <w:style> element. Can contain block, character, etc. styles.
'''
def __init__(self, namespace, elem):
self.namespace = namespace
self.name_path = namespace.XPath('./w:name[@w:val]')
self.based_on_path = namespace.XPath('./w:basedOn[@w:val]')
self.resolved = False
self.style_id = namespace.get(elem, 'w:styleId')
self.style_type = namespace.get(elem, 'w:type')
names = self.name_path(elem)
self.name = namespace.get(names[-1], 'w:val') if names else None
based_on = self.based_on_path(elem)
self.based_on = namespace.get(based_on[0], 'w:val') if based_on else None
if self.style_type == 'numbering':
self.based_on = None
self.is_default = namespace.get(elem, 'w:default') in {'1', 'on', 'true'}
self.paragraph_style = self.character_style = self.table_style = None
if self.style_type in {'paragraph', 'character', 'table'}:
if self.style_type == 'table':
for tblPr in namespace.XPath('./w:tblPr')(elem):
ts = TableStyle(namespace, tblPr)
if self.table_style is None:
self.table_style = ts
else:
self.table_style.update(ts)
if self.style_type in {'paragraph', 'table'}:
for pPr in namespace.XPath('./w:pPr')(elem):
ps = ParagraphStyle(namespace, pPr)
if self.paragraph_style is None:
self.paragraph_style = ps
else:
self.paragraph_style.update(ps)
for rPr in namespace.XPath('./w:rPr')(elem):
rs = RunStyle(namespace, rPr)
if self.character_style is None:
self.character_style = rs
else:
self.character_style.update(rs)
if self.style_type in {'numbering', 'paragraph'}:
self.numbering_style_link = None
for x in namespace.XPath('./w:pPr/w:numPr/w:numId[@w:val]')(elem):
self.numbering_style_link = namespace.get(x, 'w:val')
def resolve_based_on(self, parent):
if parent.table_style is not None:
if self.table_style is None:
self.table_style = TableStyle(self.namespace)
self.table_style.resolve_based_on(parent.table_style)
if parent.paragraph_style is not None:
if self.paragraph_style is None:
self.paragraph_style = ParagraphStyle(self.namespace)
self.paragraph_style.resolve_based_on(parent.paragraph_style)
if parent.character_style is not None:
if self.character_style is None:
self.character_style = RunStyle(self.namespace)
self.character_style.resolve_based_on(parent.character_style)
class Styles:
'''
Collection of all styles defined in the document. Used to get the final styles applicable to elements in the document markup.
'''
def __init__(self, namespace, tables):
self.namespace = namespace
self.id_map = OrderedDict()
self.para_cache = {}
self.para_char_cache = {}
self.run_cache = {}
self.classes = {}
self.counter = Counter()
self.default_styles = {}
self.tables = tables
self.numbering_style_links = {}
self.default_paragraph_style = self.default_character_style = None
def __iter__(self):
yield from itervalues(self.id_map)
def __getitem__(self, key):
return self.id_map[key]
def __len__(self):
return len(self.id_map)
def get(self, key, default=None):
return self.id_map.get(key, default)
def __call__(self, root, fonts, theme):
self.fonts, self.theme = fonts, theme
self.default_paragraph_style = self.default_character_style = None
if root is not None:
for s in self.namespace.XPath('//w:style')(root):
s = Style(self.namespace, s)
if s.style_id:
self.id_map[s.style_id] = s
if s.is_default:
self.default_styles[s.style_type] = s
if getattr(s, 'numbering_style_link', None) is not None:
self.numbering_style_links[s.style_id] = s.numbering_style_link
for dd in self.namespace.XPath('./w:docDefaults')(root):
for pd in self.namespace.XPath('./w:pPrDefault')(dd):
for pPr in self.namespace.XPath('./w:pPr')(pd):
ps = ParagraphStyle(self.namespace, pPr)
if self.default_paragraph_style is None:
self.default_paragraph_style = ps
else:
self.default_paragraph_style.update(ps)
for pd in self.namespace.XPath('./w:rPrDefault')(dd):
for pPr in self.namespace.XPath('./w:rPr')(pd):
ps = RunStyle(self.namespace, pPr)
if self.default_character_style is None:
self.default_character_style = ps
else:
self.default_character_style.update(ps)
def resolve(s, p):
if p is not None:
if not p.resolved:
resolve(p, self.get(p.based_on))
s.resolve_based_on(p)
s.resolved = True
for s in self:
if not s.resolved:
resolve(s, self.get(s.based_on))
def para_val(self, parent_styles, direct_formatting, attr):
val = getattr(direct_formatting, attr)
if val is inherit:
for ps in reversed(parent_styles):
pval = getattr(ps, attr)
if pval is not inherit:
val = pval
break
return val
def run_val(self, parent_styles, direct_formatting, attr):
val = getattr(direct_formatting, attr)
if val is not inherit:
return val
if attr in direct_formatting.toggle_properties:
# The spec (section 17.7.3) does not make sense, so we follow the behavior
# of Word, which seems to only consider the document default if the
# property has not been defined in any styles.
vals = [int(getattr(rs, attr)) for rs in parent_styles if rs is not self.default_character_style and getattr(rs, attr) is not inherit]
if vals:
return sum(vals) % 2 == 1
if self.default_character_style is not None:
return getattr(self.default_character_style, attr) is True
return False
for rs in reversed(parent_styles):
rval = getattr(rs, attr)
if rval is not inherit:
return rval
return val
def resolve_paragraph(self, p):
ans = self.para_cache.get(p, None)
if ans is None:
linked_style = None
ans = self.para_cache[p] = ParagraphStyle(self.namespace)
ans.style_name = None
direct_formatting = None
is_section_break = False
for pPr in self.namespace.XPath('./w:pPr')(p):
ps = ParagraphStyle(self.namespace, pPr)
if direct_formatting is None:
direct_formatting = ps
else:
direct_formatting.update(ps)
if self.namespace.XPath('./w:sectPr')(pPr):
is_section_break = True
if direct_formatting is None:
direct_formatting = ParagraphStyle(self.namespace)
parent_styles = []
if self.default_paragraph_style is not None:
parent_styles.append(self.default_paragraph_style)
ts = self.tables.para_style(p)
if ts is not None:
parent_styles.append(ts)
default_para = self.default_styles.get('paragraph', None)
if direct_formatting.linked_style is not None:
ls = linked_style = self.get(direct_formatting.linked_style)
if ls is not None:
ans.style_name = ls.name
ps = ls.paragraph_style
if ps is not None:
parent_styles.append(ps)
if ls.character_style is not None:
self.para_char_cache[p] = ls.character_style
elif default_para is not None:
if default_para.paragraph_style is not None:
parent_styles.append(default_para.paragraph_style)
if default_para.character_style is not None:
self.para_char_cache[p] = default_para.character_style
def has_numbering(block_style):
num_id, lvl = getattr(block_style, 'numbering_id', inherit), getattr(block_style, 'numbering_level', inherit)
return num_id is not None and num_id is not inherit and lvl is not None and lvl is not inherit
is_numbering = has_numbering(direct_formatting)
is_section_break = is_section_break and not self.namespace.XPath('./w:r')(p)
if is_numbering and not is_section_break:
num_id, lvl = direct_formatting.numbering_id, direct_formatting.numbering_level
p.set('calibre_num_id', f'{lvl}:{num_id}')
ps = self.numbering.get_para_style(num_id, lvl)
if ps is not None:
parent_styles.append(ps)
if (
not is_numbering and not is_section_break and linked_style is not None and has_numbering(linked_style.paragraph_style)
):
num_id, lvl = linked_style.paragraph_style.numbering_id, linked_style.paragraph_style.numbering_level
p.set('calibre_num_id', f'{lvl}:{num_id}')
is_numbering = True
ps = self.numbering.get_para_style(num_id, lvl)
if ps is not None:
parent_styles.append(ps)
for attr in ans.all_properties:
if not (is_numbering and attr == 'text_indent'): # skip text-indent for lists
setattr(ans, attr, self.para_val(parent_styles, direct_formatting, attr))
ans.linked_style = direct_formatting.linked_style
return ans
def resolve_run(self, r):
ans = self.run_cache.get(r, None)
if ans is None:
p = self.namespace.XPath('ancestor::w:p[1]')(r)
p = p[0] if p else None
ans = self.run_cache[r] = RunStyle(self.namespace)
direct_formatting = None
for rPr in self.namespace.XPath('./w:rPr')(r):
rs = RunStyle(self.namespace, rPr)
if direct_formatting is None:
direct_formatting = rs
else:
direct_formatting.update(rs)
if direct_formatting is None:
direct_formatting = RunStyle(self.namespace)
parent_styles = []
default_char = self.default_styles.get('character', None)
if self.default_character_style is not None:
parent_styles.append(self.default_character_style)
pstyle = self.para_char_cache.get(p, None)
if pstyle is not None:
parent_styles.append(pstyle)
# As best as I can understand the spec, table overrides should be
# applied before paragraph overrides, but word does it
# this way, see the December 2007 table header in the demo
# document.
ts = self.tables.run_style(p)
if ts is not None:
parent_styles.append(ts)
if direct_formatting.linked_style is not None:
ls = getattr(self.get(direct_formatting.linked_style), 'character_style', None)
if ls is not None:
parent_styles.append(ls)
elif default_char is not None and default_char.character_style is not None:
parent_styles.append(default_char.character_style)
for attr in ans.all_properties:
setattr(ans, attr, self.run_val(parent_styles, direct_formatting, attr))
if ans.font_family is not inherit:
ff = self.theme.resolve_font_family(ans.font_family)
ans.font_family = self.fonts.family_for(ff, ans.b, ans.i)
return ans
def resolve(self, obj):
if obj.tag.endswith('}p'):
return self.resolve_paragraph(obj)
if obj.tag.endswith('}r'):
return self.resolve_run(obj)
def cascade(self, layers):
self.body_font_family = 'serif'
self.body_font_size = '10pt'
self.body_color = 'currentColor'
def promote_property(char_styles, block_style, prop):
vals = {getattr(s, prop) for s in char_styles}
if len(vals) == 1:
# All the character styles have the same value
for s in char_styles:
setattr(s, prop, inherit)
setattr(block_style, prop, next(iter(vals)))
for p, runs in iteritems(layers):
has_links = '1' in {r.get('is-link', None) for r in runs}
char_styles = [self.resolve_run(r) for r in runs]
block_style = self.resolve_paragraph(p)
for prop in ('font_family', 'font_size', 'cs_font_family', 'cs_font_size', 'color'):
if has_links and prop == 'color':
# We cannot promote color as browser rendering engines will
# override the link color setting it to blue, unless the
# color is specified on the link element itself
continue
promote_property(char_styles, block_style, prop)
for s in char_styles:
if s.text_decoration == 'none':
# The default text decoration is 'none'
s.text_decoration = inherit
def promote_most_common(block_styles, prop, default, inherit_means=None):
c = Counter()
for s in block_styles:
val = getattr(s, prop)
if val is inherit and inherit_means is not None:
val = inherit_means
if val is not inherit:
c[val] += 1
val = None
if c:
val = c.most_common(1)[0][0]
for s in block_styles:
oval = getattr(s, prop)
if oval is inherit and inherit_means is not None:
oval = inherit_means
if oval is inherit:
if default != val:
setattr(s, prop, default)
elif oval == val:
setattr(s, prop, inherit)
return val
block_styles = tuple(self.resolve_paragraph(p) for p in layers)
ff = promote_most_common(block_styles, 'font_family', self.body_font_family)
if ff is not None:
self.body_font_family = ff
fs = promote_most_common(block_styles, 'font_size', int(self.body_font_size[:2]))
if fs is not None:
self.body_font_size = '%.3gpt' % fs
color = promote_most_common(block_styles, 'color', self.body_color, inherit_means='currentColor')
if color is not None:
self.body_color = color
def resolve_numbering(self, numbering):
# When a numPr element appears inside a paragraph style, the lvl info
# must be discarded and pStyle used instead.
self.numbering = numbering
for style in self:
ps = style.paragraph_style
if ps is not None and ps.numbering_id is not inherit:
lvl = numbering.get_pstyle(ps.numbering_id, style.style_id)
if lvl is None:
ps.numbering_id = ps.numbering_level = inherit
else:
ps.numbering_level = lvl
def apply_contextual_spacing(self, paras):
last_para = None
for p in paras:
if last_para is not None:
ls = self.resolve_paragraph(last_para)
ps = self.resolve_paragraph(p)
if ls.linked_style is not None and ls.linked_style == ps.linked_style:
if ls.contextualSpacing is True:
ls.margin_bottom = 0
if ps.contextualSpacing is True:
ps.margin_top = 0
last_para = p
def apply_section_page_breaks(self, paras):
for p in paras:
ps = self.resolve_paragraph(p)
ps.pageBreakBefore = True
def register(self, css, prefix):
h = hash(frozenset(iteritems(css)))
ans, _ = self.classes.get(h, (None, None))
if ans is None:
self.counter[prefix] += 1
ans = '%s_%d' % (prefix, self.counter[prefix])
self.classes[h] = (ans, css)
return ans
def generate_classes(self):
for bs in itervalues(self.para_cache):
css = bs.css
if css:
self.register(css, 'block')
for bs in itervalues(self.run_cache):
css = bs.css
if css:
self.register(css, 'text')
def class_name(self, css):
h = hash(frozenset(iteritems(css)))
return self.classes.get(h, (None, None))[0]
def generate_css(self, dest_dir, docx, notes_nopb, nosupsub):
ef = self.fonts.embed_fonts(dest_dir, docx)
s = '''\
body { font-family: %s; font-size: %s; %s }
/* In word all paragraphs have zero margins unless explicitly specified in a style */
p, h1, h2, h3, h4, h5, h6, div { margin: 0; padding: 0 }
/* In word headings only have bold font if explicitly specified,
similarly the font size is the body font size, unless explicitly set. */
h1, h2, h3, h4, h5, h6 { font-weight: normal; font-size: 1rem }
ul, ol { margin: 0; padding: 0; padding-inline-start: 0; padding-inline-end: 0; margin-block-start: 0; margin-block-end: 0 }
/* The word hyperlink styling will set text-decoration to underline if needed */
a { text-decoration: none }
sup.noteref a { text-decoration: none }
h1.notes-header { page-break-before: always }
dl.footnote dt { font-size: large }
dl.footnote dt a { text-decoration: none }
'''
if not notes_nopb:
s += '''\
dl.footnote { page-break-after: always }
dl.footnote:last-of-type { page-break-after: avoid }
'''
s = s + '''\
span.tab { white-space: pre }
p.index-entry { text-indent: 0pt; }
p.index-entry a:visited { color: blue }
p.index-entry a:hover { color: red }
'''
if nosupsub:
s = s + '''\
sup { vertical-align: top }
sub { vertical-align: bottom }
'''
body_color = ''
if self.body_color.lower() not in ('currentcolor', 'inherit'):
body_color = f'color: {self.body_color};'
prefix = textwrap.dedent(s) % (self.body_font_family, self.body_font_size, body_color)
if ef:
prefix = ef + '\n' + prefix
ans = []
for (cls, css) in sorted(itervalues(self.classes), key=lambda x:x[0]):
b = (f'\t{k}: {v};' for k, v in iteritems(css))
b = '\n'.join(b)
ans.append('.{} {{\n{}\n}}\n'.format(cls, b.rstrip(';')))
return prefix + '\n' + '\n'.join(ans)
| 21,305 | Python | .py | 433 | 35.510393 | 146 | 0.556715 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,578 | footnotes.py | kovidgoyal_calibre/src/calibre/ebooks/docx/footnotes.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import OrderedDict
class Note:
def __init__(self, namespace, parent, rels):
self.type = namespace.get(parent, 'w:type', 'normal')
self.parent = parent
self.rels = rels
self.namespace = namespace
def __iter__(self):
yield from self.namespace.descendants(self.parent, 'w:p', 'w:tbl')
class Footnotes:
def __init__(self, namespace):
self.namespace = namespace
self.footnotes = {}
self.endnotes = {}
self.counter = 0
self.notes = OrderedDict()
def __call__(self, footnotes, footnotes_rels, endnotes, endnotes_rels):
XPath, get = self.namespace.XPath, self.namespace.get
if footnotes is not None:
for footnote in XPath('./w:footnote[@w:id]')(footnotes):
fid = get(footnote, 'w:id')
if fid:
self.footnotes[fid] = Note(self.namespace, footnote, footnotes_rels)
if endnotes is not None:
for endnote in XPath('./w:endnote[@w:id]')(endnotes):
fid = get(endnote, 'w:id')
if fid:
self.endnotes[fid] = Note(self.namespace, endnote, endnotes_rels)
def get_ref(self, ref):
fid = self.namespace.get(ref, 'w:id')
notes = self.footnotes if ref.tag.endswith('}footnoteReference') else self.endnotes
note = notes.get(fid, None)
if note is not None and note.type == 'normal':
self.counter += 1
anchor = 'note_%d' % self.counter
self.notes[anchor] = (str(self.counter), note)
return anchor, str(self.counter)
return None, None
def __iter__(self):
for anchor, (counter, note) in self.notes.items():
yield anchor, counter, note
@property
def has_notes(self):
return bool(self.notes)
| 1,978 | Python | .py | 47 | 32.680851 | 91 | 0.59238 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,579 | links.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/links.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import posixpath
import re
from uuid import uuid4
from calibre.ebooks.oeb.base import urlquote
from calibre.utils.filenames import ascii_text
from calibre.utils.localization import __
from polyglot.urllib import urlparse
def start_text(tag, prefix_len=0, top_level=True):
ans = tag.text or ''
limit = 50 - prefix_len
if len(ans) < limit:
for child in tag.iterchildren('*'):
ans += start_text(child, len(ans), top_level=False) + (child.tail or '')
if len(ans) >= limit:
break
if top_level and len(ans) > limit:
ans = ans[:limit] + '...'
return ans
class TOCItem:
def __init__(self, title, bmark, level):
self.title, self.bmark, self.level = title, bmark, level
self.is_first = self.is_last = False
def serialize(self, body, makeelement):
p = makeelement(body, 'w:p', append=False)
ppr = makeelement(p, 'w:pPr')
makeelement(ppr, 'w:pStyle', w_val="Normal")
makeelement(ppr, 'w:ind', w_left='0', w_firstLineChars='0', w_firstLine='0', w_leftChars=str(200 * self.level))
if self.is_first:
makeelement(ppr, 'w:pageBreakBefore', w_val='off')
r = makeelement(p, 'w:r')
makeelement(r, 'w:fldChar', w_fldCharType='begin')
r = makeelement(p, 'w:r')
makeelement(r, 'w:instrText').text = r' TOC \h '
r[0].set('{http://www.w3.org/XML/1998/namespace}space', 'preserve')
r = makeelement(p, 'w:r')
makeelement(r, 'w:fldChar', w_fldCharType='separate')
hl = makeelement(p, 'w:hyperlink', w_anchor=self.bmark)
r = makeelement(hl, 'w:r')
rpr = makeelement(r, 'w:rPr')
makeelement(rpr, 'w:color', w_val='0000FF', w_themeColor='hyperlink')
makeelement(rpr, 'w:u', w_val='single')
makeelement(r, 'w:t').text = self.title
if self.is_last:
r = makeelement(p, 'w:r')
makeelement(r, 'w:fldChar', w_fldCharType='end')
body.insert(0, p)
def sanitize_bookmark_name(base):
# Max length allowed by Word appears to be 40, we use 32 to leave some
# space for making the name unique
return re.sub(r'[^0-9a-zA-Z]', '_', ascii_text(base))[:32].rstrip('_')
class LinksManager:
def __init__(self, namespace, document_relationships, log):
self.namespace = namespace
self.log = log
self.document_relationships = document_relationships
self.top_anchor = str(uuid4().hex)
self.anchor_map = {}
self.used_bookmark_names = set()
self.bmark_id = 0
self.document_hrefs = set()
self.external_links = {}
self.toc = []
def bookmark_for_anchor(self, anchor, current_item, html_tag):
key = (current_item.href, anchor)
if key in self.anchor_map:
return self.anchor_map[key]
if anchor == self.top_anchor:
name = ('Top of %s' % posixpath.basename(current_item.href))
self.document_hrefs.add(current_item.href)
else:
name = start_text(html_tag).strip() or anchor
name = sanitize_bookmark_name(name)
i, bname = 0, name
while name in self.used_bookmark_names:
i += 1
name = bname + ('_%d' % i)
self.anchor_map[key] = name
self.used_bookmark_names.add(name)
return name
@property
def bookmark_id(self):
self.bmark_id += 1
return self.bmark_id
def serialize_hyperlink(self, parent, link):
item, url, tooltip = link
purl = urlparse(url)
href = purl.path
def make_link(parent, anchor=None, id=None, tooltip=None):
kw = {}
if anchor is not None:
kw['w_anchor'] = anchor
elif id is not None:
kw['r_id'] = id
if tooltip:
kw['w_tooltip'] = tooltip
return self.namespace.makeelement(parent, 'w:hyperlink', **kw)
if not purl.scheme:
href = item.abshref(href)
if href not in self.document_hrefs:
href = urlquote(href)
if href in self.document_hrefs:
key = (href, purl.fragment or self.top_anchor)
if key in self.anchor_map:
bmark = self.anchor_map[key]
else:
bmark = self.anchor_map[(href, self.top_anchor)]
return make_link(parent, anchor=bmark, tooltip=tooltip)
else:
self.log.warn('Ignoring internal hyperlink with href (%s) pointing to unknown destination' % url)
if purl.scheme in {'http', 'https', 'ftp'}:
if url not in self.external_links:
self.external_links[url] = self.document_relationships.add_relationship(url, self.namespace.names['LINKS'], target_mode='External')
return make_link(parent, id=self.external_links[url], tooltip=tooltip)
return parent
def process_toc_node(self, toc, level=0):
href = toc.href
if href:
purl = urlparse(href)
href = purl.path
if href in self.document_hrefs:
key = (href, purl.fragment or self.top_anchor)
if key in self.anchor_map:
bmark = self.anchor_map[key]
else:
bmark = self.anchor_map[(href, self.top_anchor)]
self.toc.append(TOCItem(toc.title, bmark, level))
for child in toc:
self.process_toc_node(child, level+1)
def process_toc_links(self, oeb):
self.toc = []
has_toc = oeb.toc and oeb.toc.count() > 1
if not has_toc:
return
for child in oeb.toc:
self.process_toc_node(child)
if self.toc:
self.toc[0].is_first = True
self.toc[-1].is_last = True
def serialize_toc(self, body, primary_heading_style):
pbb = body[0].xpath('//*[local-name()="pageBreakBefore"]')[0]
pbb.set('{%s}val' % self.namespace.namespaces['w'], 'on')
for block in reversed(self.toc):
block.serialize(body, self.namespace.makeelement)
title = __('Table of Contents')
makeelement = self.namespace.makeelement
p = makeelement(body, 'w:p', append=False)
ppr = makeelement(p, 'w:pPr')
if primary_heading_style is not None:
makeelement(ppr, 'w:pStyle', w_val=primary_heading_style.id)
makeelement(ppr, 'w:pageBreakBefore', w_val='off')
makeelement(makeelement(p, 'w:r'), 'w:t').text = title
body.insert(0, p)
| 6,767 | Python | .py | 155 | 33.606452 | 147 | 0.581424 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,580 | tables.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/tables.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import namedtuple
from calibre.ebooks.docx.writer.styles import border_edges
from calibre.ebooks.docx.writer.styles import read_css_block_borders as rcbb
from calibre.ebooks.docx.writer.utils import convert_color
from polyglot.builtins import iteritems
class Dummy:
pass
Border = namedtuple('Border', 'css_style style width color level')
border_style_weight = {
x:100-i for i, x in enumerate(('double', 'solid', 'dashed', 'dotted', 'ridge', 'outset', 'groove', 'inset'))}
class SpannedCell:
def __init__(self, spanning_cell, horizontal=True):
self.spanning_cell = spanning_cell
self.horizontal = horizontal
self.row_span = self.col_span = 1
def resolve_borders(self):
pass
def serialize(self, tr, makeelement):
tc = makeelement(tr, 'w:tc')
tcPr = makeelement(tc, 'w:tcPr')
makeelement(tcPr, 'w:%sMerge' % ('h' if self.horizontal else 'v'), w_val='continue')
makeelement(tc, 'w:p')
def applicable_borders(self, edge):
return self.spanning_cell.applicable_borders(edge)
def read_css_block_borders(self, css):
obj = Dummy()
rcbb(obj, css, store_css_style=True)
for edge in border_edges:
setattr(self, 'border_' + edge, Border(
getattr(obj, 'border_%s_css_style' % edge),
getattr(obj, 'border_%s_style' % edge),
getattr(obj, 'border_%s_width' % edge),
getattr(obj, 'border_%s_color' % edge),
self.BLEVEL
))
setattr(self, 'padding_' + edge, getattr(obj, 'padding_' + edge))
def as_percent(x):
if x and x.endswith('%'):
try:
return float(x.rstrip('%'))
except Exception:
pass
def convert_width(tag_style):
if tag_style is not None:
w = tag_style._get('width')
wp = as_percent(w)
if w == 'auto':
return ('auto', 0)
elif wp is not None:
return ('pct', int(wp * 50))
else:
try:
return ('dxa', int(float(tag_style['width']) * 20))
except Exception:
pass
return ('auto', 0)
class Cell:
BLEVEL = 2
def __init__(self, row, html_tag, tag_style=None):
self.row = row
self.table = self.row.table
self.html_tag = html_tag
try:
self.row_span = max(0, int(html_tag.get('rowspan', 1)))
except Exception:
self.row_span = 1
try:
self.col_span = max(0, int(html_tag.get('colspan', 1)))
except Exception:
self.col_span = 1
if tag_style is None:
self.valign = 'center'
else:
self.valign = {'top':'top', 'bottom':'bottom', 'middle':'center'}.get(tag_style._get('vertical-align'))
self.items = []
self.width = convert_width(tag_style)
self.background_color = None if tag_style is None else convert_color(tag_style.backgroundColor)
read_css_block_borders(self, tag_style)
def add_block(self, block):
self.items.append(block)
block.parent_items = self.items
def add_table(self, table):
self.items.append(table)
return table
def serialize(self, parent, makeelement):
tc = makeelement(parent, 'w:tc')
tcPr = makeelement(tc, 'w:tcPr')
makeelement(tcPr, 'w:tcW', w_type=self.width[0], w_w=str(self.width[1]))
# For some reason, Word 2007 refuses to honor <w:shd> at the table or row
# level, despite what the specs say, so we inherit and apply at the
# cell level
bc = self.background_color or self.row.background_color or self.row.table.background_color
if bc:
makeelement(tcPr, 'w:shd', w_val="clear", w_color="auto", w_fill=bc)
b = makeelement(tcPr, 'w:tcBorders', append=False)
for edge, border in iteritems(self.borders):
if border is not None and border.width > 0 and border.style != 'none':
makeelement(b, 'w:' + edge, w_val=border.style, w_sz=str(border.width), w_color=border.color)
if len(b) > 0:
tcPr.append(b)
m = makeelement(tcPr, 'w:tcMar', append=False)
for edge in border_edges:
padding = getattr(self, 'padding_' + edge)
if edge in {'top', 'bottom'} or (edge == 'left' and self is self.row.first_cell) or (edge == 'right' and self is self.row.last_cell):
padding += getattr(self.row, 'padding_' + edge)
if padding > 0:
makeelement(m, 'w:' + edge, w_type='dxa', w_w=str(int(padding * 20)))
if len(m) > 0:
tcPr.append(m)
if self.valign is not None:
makeelement(tcPr, 'w:vAlign', w_val=self.valign)
if self.row_span > 1:
makeelement(tcPr, 'w:vMerge', w_val='restart')
if self.col_span > 1:
makeelement(tcPr, 'w:hMerge', w_val='restart')
item = None
for item in self.items:
item.serialize(tc)
if item is None or isinstance(item, Table):
# Word 2007 requires the last element in a table cell to be a paragraph
makeelement(tc, 'w:p')
def applicable_borders(self, edge):
if edge == 'left':
items = {self.table, self.row, self} if self.row.first_cell is self else {self}
elif edge == 'top':
items = ({self.table} if self.table.first_row is self.row else set()) | {self, self.row}
elif edge == 'right':
items = {self.table, self, self.row} if self.row.last_cell is self else {self}
elif edge == 'bottom':
items = ({self.table} if self.table.last_row is self.row else set()) | {self, self.row}
return {getattr(x, 'border_' + edge) for x in items}
def resolve_border(self, edge):
# In Word cell borders override table borders, and Word ignores row
# borders, so we consolidate all borders as cell borders
# In HTML the priority is as described here:
# http://www.w3.org/TR/CSS21/tables.html#border-conflict-resolution
neighbor = self.neighbor(edge)
borders = self.applicable_borders(edge)
if neighbor is not None:
nedge = {'left':'right', 'top':'bottom', 'right':'left', 'bottom':'top'}[edge]
borders |= neighbor.applicable_borders(nedge)
for b in borders:
if b.css_style == 'hidden':
return None
def weight(border):
return (
0 if border.css_style == 'none' else 1,
border.width,
border_style_weight.get(border.css_style, 0),
border.level)
border = sorted(borders, key=weight)[-1]
return border
def resolve_borders(self):
self.borders = {edge:self.resolve_border(edge) for edge in border_edges}
def neighbor(self, edge):
idx = self.row.cells.index(self)
ans = None
if edge == 'left':
ans = self.row.cells[idx-1] if idx > 0 else None
elif edge == 'right':
ans = self.row.cells[idx+1] if (idx + 1) < len(self.row.cells) else None
elif edge == 'top':
ridx = self.table.rows.index(self.row)
if ridx > 0 and idx < len(self.table.rows[ridx-1].cells):
ans = self.table.rows[ridx-1].cells[idx]
elif edge == 'bottom':
ridx = self.table.rows.index(self.row)
if ridx + 1 < len(self.table.rows) and idx < len(self.table.rows[ridx+1].cells):
ans = self.table.rows[ridx+1].cells[idx]
return getattr(ans, 'spanning_cell', ans)
class Row:
BLEVEL = 1
def __init__(self, table, html_tag, tag_style=None):
self.table = table
self.html_tag = html_tag
self.orig_tag_style = tag_style
self.cells = []
self.current_cell = None
self.background_color = None if tag_style is None else convert_color(tag_style.backgroundColor)
read_css_block_borders(self, tag_style)
@property
def first_cell(self):
return self.cells[0] if self.cells else None
@property
def last_cell(self):
return self.cells[-1] if self.cells else None
def start_new_cell(self, html_tag, tag_style):
self.current_cell = Cell(self, html_tag, tag_style)
def finish_tag(self, html_tag):
if self.current_cell is not None:
if html_tag is self.current_cell.html_tag:
self.cells.append(self.current_cell)
self.current_cell = None
def add_block(self, block):
if self.current_cell is None:
self.start_new_cell(self.html_tag, self.orig_tag_style)
self.current_cell.add_block(block)
def add_table(self, table):
if self.current_cell is None:
self.current_cell = Cell(self, self.html_tag, self.orig_tag_style)
return self.current_cell.add_table(table)
def serialize(self, parent, makeelement):
tr = makeelement(parent, 'w:tr')
for cell in self.cells:
cell.serialize(tr, makeelement)
class Table:
BLEVEL = 0
def __init__(self, namespace, html_tag, tag_style=None):
self.namespace = namespace
self.html_tag = html_tag
self.orig_tag_style = tag_style
self.rows = []
self.current_row = None
self.width = convert_width(tag_style)
self.background_color = None if tag_style is None else convert_color(tag_style.backgroundColor)
self.jc = None
self.float = None
self.margin_left = self.margin_right = self.margin_top = self.margin_bottom = None
if tag_style is not None:
ml, mr = tag_style._get('margin-left'), tag_style.get('margin-right')
if ml == 'auto':
self.jc = 'center' if mr == 'auto' else 'right'
self.float = tag_style['float']
for edge in border_edges:
setattr(self, 'margin_' + edge, tag_style['margin-' + edge])
read_css_block_borders(self, tag_style)
@property
def first_row(self):
return self.rows[0] if self.rows else None
@property
def last_row(self):
return self.rows[-1] if self.rows else None
def finish_tag(self, html_tag):
if self.current_row is not None:
self.current_row.finish_tag(html_tag)
if self.current_row.html_tag is html_tag:
self.rows.append(self.current_row)
self.current_row = None
table_ended = self.html_tag is html_tag
if table_ended:
self.expand_spanned_cells()
for row in self.rows:
for cell in row.cells:
cell.resolve_borders()
return table_ended
def expand_spanned_cells(self):
# Expand horizontally
for row in self.rows:
for cell in tuple(row.cells):
idx = row.cells.index(cell)
if cell.col_span > 1 and (cell is row.cells[-1] or not isinstance(row.cells[idx+1], SpannedCell)):
row.cells[idx:idx+1] = [cell] + [SpannedCell(cell, horizontal=True) for i in range(1, cell.col_span)]
# Expand vertically
for r, row in enumerate(self.rows):
for idx, cell in enumerate(row.cells):
if cell.row_span > 1:
for nrow in self.rows[r+1:]:
sc = SpannedCell(cell, horizontal=False)
try:
tcell = nrow.cells[idx]
except Exception:
tcell = None
if tcell is None:
nrow.cells.extend([SpannedCell(nrow.cells[-1], horizontal=True) for i in range(idx - len(nrow.cells))])
nrow.cells.append(sc)
else:
if isinstance(tcell, SpannedCell):
# Conflict between rowspan and colspan
break
else:
nrow.cells.insert(idx, sc)
def start_new_row(self, html_tag, html_style):
if self.current_row is not None:
self.rows.append(self.current_row)
self.current_row = Row(self, html_tag, html_style)
def start_new_cell(self, html_tag, html_style):
if self.current_row is None:
self.start_new_row(html_tag, None)
self.current_row.start_new_cell(html_tag, html_style)
def add_block(self, block):
self.current_row.add_block(block)
def add_table(self, table):
if self.current_row is None:
self.current_row = Row(self, self.html_tag, self.orig_tag_style)
return self.current_row.add_table(table)
def serialize(self, parent):
makeelement = self.namespace.makeelement
rows = [r for r in self.rows if r.cells]
if not rows:
return
tbl = makeelement(parent, 'w:tbl')
tblPr = makeelement(tbl, 'w:tblPr')
makeelement(tblPr, 'w:tblW', w_type=self.width[0], w_w=str(self.width[1]))
if self.float in {'left', 'right'}:
kw = {'w_vertAnchor':'text', 'w_horzAnchor':'text', 'w_tblpXSpec':self.float}
for edge in border_edges:
val = getattr(self, 'margin_' + edge) or 0
if {self.float, edge} == {'left', 'right'}:
val = max(val, 2)
kw['w_' + edge + 'FromText'] = str(max(0, int(val *20)))
makeelement(tblPr, 'w:tblpPr', **kw)
if self.jc is not None:
makeelement(tblPr, 'w:jc', w_val=self.jc)
for row in rows:
row.serialize(tbl, makeelement)
| 13,929 | Python | .py | 308 | 34.512987 | 145 | 0.57892 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,581 | images.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/images.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import posixpath
from collections import namedtuple
from functools import partial
from lxml import etree
from calibre import fit_image
from calibre.ebooks.docx.images import pt_to_emu
from calibre.ebooks.docx.names import SVG_BLIP_URI, USE_LOCAL_DPI_URI
from calibre.ebooks.oeb.base import urlquote, urlunquote
from calibre.utils.filenames import ascii_filename
from calibre.utils.imghdr import identify
from calibre.utils.resources import get_image_path as I
Image = namedtuple('Image', 'rid fname width height fmt item')
def as_num(x):
try:
return float(x)
except Exception:
pass
return 0
def get_image_margins(style):
ans = {}
for edge in 'Left Right Top Bottom'.split():
val = as_num(getattr(style, 'padding' + edge)) + as_num(getattr(style, 'margin' + edge))
ans['dist' + edge[0]] = str(pt_to_emu(val))
return ans
class ImagesManager:
def __init__(self, oeb, document_relationships, opts, svg_rasterizer):
self.oeb, self.log = oeb, oeb.log
self.svg_rasterizer = svg_rasterizer
self.page_width, self.page_height = opts.output_profile.width_pts, opts.output_profile.height_pts
self.images = {}
self.seen_filenames = set()
self.document_relationships = document_relationships
self.count = 0
self.svg_images = {}
def read_svg(self, href):
if href not in self.svg_images:
item = self.oeb.manifest.hrefs.get(href) or self.oeb.manifest.hrefs.get(urlquote(href))
if item is None:
self.log.warning('Failed to find image:', href)
return
image_fname = 'media/' + self.create_filename(href, 'svg')
image_rid = self.document_relationships.add_image(image_fname)
self.svg_images[href] = Image(image_rid, image_fname, -1, -1, 'svg', item)
return self.svg_images[href]
def read_image(self, href):
if href not in self.images:
item = self.oeb.manifest.hrefs.get(href) or self.oeb.manifest.hrefs.get(urlquote(href))
try:
if item is None or not isinstance(item.data, bytes):
self.log.warning('Failed to find image:', href)
return
except FileNotFoundError:
self.log.warning('Failed to find image:', href)
return
try:
fmt, width, height = identify(item.data)
except Exception:
self.log.warning('Replacing corrupted image with blank: %s' % href)
item.data = I('blank.png', data=True, allow_user_override=False)
fmt, width, height = identify(item.data)
image_fname = 'media/' + self.create_filename(href, fmt)
image_rid = self.document_relationships.add_image(image_fname)
self.images[href] = Image(image_rid, image_fname, width, height, fmt, item)
item.unload_data_from_memory()
return self.images[href]
def add_image(self, img, block, stylizer, bookmark=None, as_block=False):
src = img.get('src')
if not src:
return
href = self.abshref(src)
try:
rid = self.read_image(href).rid
except AttributeError:
return
drawing = self.create_image_markup(img, stylizer, href, as_block=as_block)
block.add_image(drawing, bookmark=bookmark)
return rid
def create_image_markup(self, html_img, stylizer, href, as_block=False):
# TODO: img inside a link (clickable image)
svg_rid = ''
svghref = self.svg_rasterizer.svg_originals.get(href)
if svghref:
si = self.read_svg(svghref)
if si:
svg_rid = si.rid
style = stylizer.style(html_img)
floating = style['float']
if floating not in {'left', 'right'}:
floating = None
if as_block:
ml, mr = style._get('margin-left'), style._get('margin-right')
if ml == 'auto':
floating = 'center' if mr == 'auto' else 'right'
if mr == 'auto':
floating = 'center' if ml == 'auto' else 'right'
else:
parent = html_img.getparent()
if len(parent) == 1 and not (parent.text or '').strip() and not (html_img.tail or '').strip():
pstyle = stylizer.style(parent)
if 'block' in pstyle['display']:
# We have an inline image alone inside a block
as_block = True
floating = pstyle['float']
if floating not in {'left', 'right'}:
floating = None
if pstyle['text-align'] in ('center', 'right'):
floating = pstyle['text-align']
floating = floating or 'left'
fake_margins = floating is None
self.count += 1
img = self.images[href]
name = urlunquote(posixpath.basename(href))
width, height = style.img_size(img.width, img.height)
scaled, width, height = fit_image(width, height, self.page_width, self.page_height)
width, height = map(pt_to_emu, (width, height))
makeelement, namespaces = self.document_relationships.namespace.makeelement, self.document_relationships.namespace.namespaces
root = etree.Element('root', nsmap=namespaces)
ans = makeelement(root, 'w:drawing', append=False)
if floating is None:
parent = makeelement(ans, 'wp:inline')
else:
parent = makeelement(ans, 'wp:anchor', **get_image_margins(style))
# The next three lines are boilerplate that Word requires, even
# though the DOCX specs define defaults for all of them
parent.set('simplePos', '0'), parent.set('relativeHeight', '1'), parent.set('behindDoc',"0"), parent.set('locked', "0")
parent.set('layoutInCell', "1"), parent.set('allowOverlap', '1')
makeelement(parent, 'wp:simplePos', x='0', y='0')
makeelement(makeelement(parent, 'wp:positionH', relativeFrom='margin'), 'wp:align').text = floating
makeelement(makeelement(parent, 'wp:positionV', relativeFrom='line'), 'wp:align').text = 'top'
makeelement(parent, 'wp:extent', cx=str(width), cy=str(height))
if fake_margins:
# DOCX does not support setting margins for inline images, so we
# fake it by using effect extents to simulate margins
makeelement(parent, 'wp:effectExtent', **{k[-1].lower():v for k, v in get_image_margins(style).items()})
else:
makeelement(parent, 'wp:effectExtent', l='0', r='0', t='0', b='0')
if floating is not None:
# The idiotic Word requires this to be after the extent settings
if as_block:
makeelement(parent, 'wp:wrapTopAndBottom')
else:
makeelement(parent, 'wp:wrapSquare', wrapText='bothSides')
self.create_docx_image_markup(parent, name, html_img.get('alt') or name, img.rid, width, height, svg_rid=svg_rid)
return ans
def create_docx_image_markup(self, parent, name, alt, img_rid, width, height, svg_rid=''):
makeelement, namespaces = self.document_relationships.namespace.makeelement, self.document_relationships.namespace.namespaces
makeelement(parent, 'wp:docPr', id=str(self.count), name=name, descr=alt)
makeelement(makeelement(parent, 'wp:cNvGraphicFramePr'), 'a:graphicFrameLocks', noChangeAspect="1")
g = makeelement(parent, 'a:graphic')
gd = makeelement(g, 'a:graphicData', uri=namespaces['pic'])
pic = makeelement(gd, 'pic:pic')
nvPicPr = makeelement(pic, 'pic:nvPicPr')
makeelement(nvPicPr, 'pic:cNvPr', id='0', name=name, descr=alt)
makeelement(nvPicPr, 'pic:cNvPicPr')
bf = makeelement(pic, 'pic:blipFill')
blip = makeelement(bf, 'a:blip', r_embed=img_rid)
if svg_rid:
ext_list = makeelement(blip, 'a:extLst')
makeelement(makeelement(ext_list, 'a:ext', uri=USE_LOCAL_DPI_URI), 'a14:useLocalDpi', val='0')
makeelement(makeelement(ext_list, 'a:ext', uri=SVG_BLIP_URI), 'asvg:svgBlip', r_embed=svg_rid)
makeelement(makeelement(bf, 'a:stretch'), 'a:fillRect')
spPr = makeelement(pic, 'pic:spPr')
xfrm = makeelement(spPr, 'a:xfrm')
makeelement(xfrm, 'a:off', x='0', y='0'), makeelement(xfrm, 'a:ext', cx=str(width), cy=str(height))
makeelement(makeelement(spPr, 'a:prstGeom', prst='rect'), 'a:avLst')
def create_filename(self, href, fmt):
fname = ascii_filename(urlunquote(posixpath.basename(href)))
fname = posixpath.splitext(fname)[0]
fname = fname[:75].rstrip('.') or 'image'
num = 0
base = fname
while fname.lower() in self.seen_filenames:
num += 1
fname = base + str(num)
self.seen_filenames.add(fname.lower())
fname += os.extsep + fmt.lower()
return fname
def serialize(self, images_map):
for img in self.images.values():
images_map['word/' + img.fname] = partial(self.get_data, img.item)
def get_svg_data(img):
return img.item.data_as_bytes_or_none
for img in self.svg_images.values():
images_map['word/' + img.fname] = partial(get_svg_data, img)
def get_data(self, item):
try:
return item.data
finally:
item.unload_data_from_memory(False)
def create_cover_markup(self, img, preserve_aspect_ratio, width, height):
self.count += 1
makeelement, namespaces = self.document_relationships.namespace.makeelement, self.document_relationships.namespace.namespaces
if preserve_aspect_ratio:
if img.width >= img.height:
ar = img.height / img.width
height = ar * width
else:
ar = img.width / img.height
width = ar * height
root = etree.Element('root', nsmap=namespaces)
ans = makeelement(root, 'w:drawing', append=False)
parent = makeelement(ans, 'wp:anchor', **{'dist'+edge:'0' for edge in 'LRTB'})
parent.set('simplePos', '0'), parent.set('relativeHeight', '1'), parent.set('behindDoc',"0"), parent.set('locked', "0")
parent.set('layoutInCell', "1"), parent.set('allowOverlap', '1')
makeelement(parent, 'wp:simplePos', x='0', y='0')
makeelement(makeelement(parent, 'wp:positionH', relativeFrom='page'), 'wp:align').text = 'center'
makeelement(makeelement(parent, 'wp:positionV', relativeFrom='page'), 'wp:align').text = 'center'
width, height = map(pt_to_emu, (width, height))
makeelement(parent, 'wp:extent', cx=str(width), cy=str(height))
makeelement(parent, 'wp:effectExtent', l='0', r='0', t='0', b='0')
makeelement(parent, 'wp:wrapTopAndBottom')
self.create_docx_image_markup(parent, 'cover.jpg', _('Cover'), img.rid, width, height)
return ans
def write_cover_block(self, body, cover_image):
makeelement, namespaces = self.document_relationships.namespace.makeelement, self.document_relationships.namespace.namespaces
pbb = body[0].xpath('//*[local-name()="pageBreakBefore"]')[0]
pbb.set('{%s}val' % namespaces['w'], 'on')
p = makeelement(body, 'w:p', append=False)
body.insert(0, p)
r = makeelement(p, 'w:r')
r.append(cover_image)
| 11,722 | Python | .py | 225 | 41.693333 | 133 | 0.6113 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,582 | container.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/container.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import textwrap
from lxml import etree
from lxml.builder import ElementMaker
from calibre import guess_type
from calibre.constants import __appname__, numeric_version
from calibre.ebooks.docx.names import DOCXNamespace
from calibre.ebooks.metadata import authors_to_string
from calibre.ebooks.pdf.render.common import PAPER_SIZES
from calibre.utils.date import utcnow
from calibre.utils.localization import canonicalize_lang, lang_as_iso639_1
from calibre.utils.zipfile import ZipFile
from polyglot.builtins import iteritems, native_string_type
def xml2str(root, pretty_print=False, with_tail=False):
if hasattr(etree, 'cleanup_namespaces'):
etree.cleanup_namespaces(root)
ans = etree.tostring(root, encoding='utf-8', xml_declaration=True,
pretty_print=pretty_print, with_tail=with_tail)
return ans
def page_size(opts):
width, height = PAPER_SIZES[opts.docx_page_size]
if opts.docx_custom_page_size is not None:
width, height = map(float, opts.docx_custom_page_size.partition('x')[0::2])
return width, height
def page_margin(opts, which):
val = getattr(opts, 'docx_page_margin_' + which)
if val == 0.0:
val = getattr(opts, 'margin_' + which)
return val
def page_effective_area(opts):
width, height = page_size(opts)
width -= page_margin(opts, 'left') + page_margin(opts, 'right')
height -= page_margin(opts, 'top') + page_margin(opts, 'bottom')
return width, height # in pts
def create_skeleton(opts, namespaces=None):
namespaces = namespaces or DOCXNamespace().namespaces
def w(x):
return '{{{}}}{}'.format(namespaces['w'], x)
dn = {k:v for k, v in iteritems(namespaces) if k in {'w', 'r', 'm', 've', 'o', 'wp', 'w10', 'wne', 'a', 'pic'}}
E = ElementMaker(namespace=dn['w'], nsmap=dn)
doc = E.document()
body = E.body()
doc.append(body)
width, height = page_size(opts)
width, height = int(20 * width), int(20 * height)
def margin(which):
val = page_margin(opts, which)
return w(which), str(int(val * 20))
body.append(E.sectPr(
E.pgSz(**{w('w'):str(width), w('h'):str(height)}),
E.pgMar(**dict(map(margin, 'left top right bottom'.split()))),
E.cols(**{w('space'):'720'}),
E.docGrid(**{w('linePitch'):"360"}),
))
dn = {k:v for k, v in iteritems(namespaces) if k in tuple('wra') + ('wp',)}
E = ElementMaker(namespace=dn['w'], nsmap=dn)
styles = E.styles(
E.docDefaults(
E.rPrDefault(
E.rPr(
E.rFonts(**{w('asciiTheme'):"minorHAnsi", w('eastAsiaTheme'):"minorEastAsia", w('hAnsiTheme'):"minorHAnsi", w('cstheme'):"minorBidi"}),
E.sz(**{w('val'):'22'}),
E.szCs(**{w('val'):'22'}),
E.lang(**{w('val'):'en-US', w('eastAsia'):"en-US", w('bidi'):"ar-SA"})
)
),
E.pPrDefault(
E.pPr(
E.spacing(**{w('after'):"0", w('line'):"276", w('lineRule'):"auto"})
)
)
)
)
return doc, styles, body
def update_doc_props(root, mi, namespace):
def setm(name, text=None, ns='dc'):
ans = root.makeelement(f'{{{namespace.namespaces[ns]}}}{name}')
for child in tuple(root):
if child.tag == ans.tag:
root.remove(child)
ans.text = text
root.append(ans)
return ans
setm('title', mi.title)
setm('creator', authors_to_string(mi.authors))
if mi.tags:
setm('keywords', ', '.join(mi.tags), ns='cp')
if mi.comments:
setm('description', mi.comments)
if mi.languages:
l = canonicalize_lang(mi.languages[0])
setm('language', lang_as_iso639_1(l) or l)
class DocumentRelationships:
def __init__(self, namespace):
self.rmap = {}
self.namespace = namespace
for typ, target in iteritems({
namespace.names['STYLES']: 'styles.xml',
namespace.names['NUMBERING']: 'numbering.xml',
namespace.names['WEB_SETTINGS']: 'webSettings.xml',
namespace.names['FONTS']: 'fontTable.xml',
}):
self.add_relationship(target, typ)
def get_relationship_id(self, target, rtype, target_mode=None):
return self.rmap.get((target, rtype, target_mode))
def add_relationship(self, target, rtype, target_mode=None):
ans = self.get_relationship_id(target, rtype, target_mode)
if ans is None:
ans = 'rId%d' % (len(self.rmap) + 1)
self.rmap[(target, rtype, target_mode)] = ans
return ans
def add_image(self, target):
return self.add_relationship(target, self.namespace.names['IMAGES'])
def serialize(self):
namespaces = self.namespace.namespaces
E = ElementMaker(namespace=namespaces['pr'], nsmap={None:namespaces['pr']})
relationships = E.Relationships()
for (target, rtype, target_mode), rid in iteritems(self.rmap):
r = E.Relationship(Id=rid, Type=rtype, Target=target)
if target_mode is not None:
r.set('TargetMode', target_mode)
relationships.append(r)
return xml2str(relationships)
class DOCX:
def __init__(self, opts, log):
self.namespace = DOCXNamespace()
namespaces = self.namespace.namespaces
self.opts, self.log = opts, log
self.document_relationships = DocumentRelationships(self.namespace)
self.font_table = etree.Element('{%s}fonts' % namespaces['w'], nsmap={k:namespaces[k] for k in 'wr'})
self.numbering = etree.Element('{%s}numbering' % namespaces['w'], nsmap={k:namespaces[k] for k in 'wr'})
E = ElementMaker(namespace=namespaces['pr'], nsmap={None:namespaces['pr']})
self.embedded_fonts = E.Relationships()
self.fonts = {}
self.images = {}
# Boilerplate {{{
@property
def contenttypes(self):
E = ElementMaker(namespace=self.namespace.namespaces['ct'], nsmap={None:self.namespace.namespaces['ct']})
types = E.Types()
for partname, mt in iteritems({
"/word/footnotes.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml",
"/word/document.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml",
"/word/numbering.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml",
"/word/styles.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml",
"/word/endnotes.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml",
"/word/settings.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml",
"/word/theme/theme1.xml": "application/vnd.openxmlformats-officedocument.theme+xml",
"/word/fontTable.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.fontTable+xml",
"/word/webSettings.xml": "application/vnd.openxmlformats-officedocument.wordprocessingml.webSettings+xml",
"/docProps/core.xml": "application/vnd.openxmlformats-package.core-properties+xml",
"/docProps/app.xml": "application/vnd.openxmlformats-officedocument.extended-properties+xml",
}):
types.append(E.Override(PartName=partname, ContentType=mt))
added = {'png', 'gif', 'jpeg', 'jpg', 'svg', 'xml'}
for ext in added:
types.append(E.Default(Extension=ext, ContentType=guess_type('a.'+ext)[0]))
for ext, mt in iteritems({
"rels": "application/vnd.openxmlformats-package.relationships+xml",
"odttf": "application/vnd.openxmlformats-officedocument.obfuscatedFont",
}):
added.add(ext)
types.append(E.Default(Extension=ext, ContentType=mt))
for fname in self.images:
ext = fname.rpartition(os.extsep)[-1]
if ext not in added:
added.add(ext)
mt = guess_type('a.' + ext)[0]
if mt:
types.append(E.Default(Extension=ext, ContentType=mt))
return xml2str(types)
@property
def appproperties(self):
E = ElementMaker(namespace=self.namespace.namespaces['ep'], nsmap={None:self.namespace.namespaces['ep']})
props = E.Properties(
E.Application(__appname__),
E.AppVersion('%02d.%04d' % numeric_version[:2]),
E.DocSecurity('0'),
E.HyperlinksChanged('false'),
E.LinksUpToDate('true'),
E.ScaleCrop('false'),
E.SharedDoc('false'),
)
if self.mi.publisher:
props.append(E.Company(self.mi.publisher))
return xml2str(props)
@property
def containerrels(self):
return textwrap.dedent('''\
<?xml version='1.0' encoding='utf-8'?>
<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
<Relationship Id="rId3" Type="{APPPROPS}" Target="docProps/app.xml"/>
<Relationship Id="rId2" Type="{DOCPROPS}" Target="docProps/core.xml"/>
<Relationship Id="rId1" Type="{DOCUMENT}" Target="word/document.xml"/>
</Relationships>'''.format(**self.namespace.names)).encode('utf-8')
@property
def websettings(self):
E = ElementMaker(namespace=self.namespace.namespaces['w'], nsmap={'w':self.namespace.namespaces['w']})
ws = E.webSettings(
E.optimizeForBrowser, E.allowPNG, E.doNotSaveAsSingleFile)
return xml2str(ws)
# }}}
def convert_metadata(self, mi):
namespaces = self.namespace.namespaces
E = ElementMaker(namespace=namespaces['cp'], nsmap={x:namespaces[x] for x in 'cp dc dcterms xsi'.split()})
cp = E.coreProperties(E.revision("1"), E.lastModifiedBy('calibre'))
ts = utcnow().isoformat(native_string_type('T')).rpartition('.')[0] + 'Z'
for x in 'created modified'.split():
x = cp.makeelement('{{{}}}{}'.format(namespaces['dcterms'], x), **{'{%s}type' % namespaces['xsi']:'dcterms:W3CDTF'})
x.text = ts
cp.append(x)
self.mi = mi
update_doc_props(cp, self.mi, self.namespace)
return xml2str(cp)
def create_empty_document(self, mi):
self.document, self.styles = create_skeleton(self.opts)[:2]
def write(self, path_or_stream, mi, create_empty_document=False):
if create_empty_document:
self.create_empty_document(mi)
with ZipFile(path_or_stream, 'w') as zf:
zf.writestr('[Content_Types].xml', self.contenttypes)
zf.writestr('_rels/.rels', self.containerrels)
zf.writestr('docProps/core.xml', self.convert_metadata(mi))
zf.writestr('docProps/app.xml', self.appproperties)
zf.writestr('word/webSettings.xml', self.websettings)
zf.writestr('word/document.xml', xml2str(self.document))
zf.writestr('word/styles.xml', xml2str(self.styles))
zf.writestr('word/numbering.xml', xml2str(self.numbering))
zf.writestr('word/fontTable.xml', xml2str(self.font_table))
zf.writestr('word/_rels/document.xml.rels', self.document_relationships.serialize())
zf.writestr('word/_rels/fontTable.xml.rels', xml2str(self.embedded_fonts))
for fname, data_getter in iteritems(self.images):
zf.writestr(fname, data_getter())
for fname, data in iteritems(self.fonts):
zf.writestr(fname, data)
if __name__ == '__main__':
d = DOCX(None, None)
print(d.websettings)
| 11,913 | Python | .py | 241 | 40.190871 | 155 | 0.624226 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,583 | from_html.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/from_html.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import re
from collections import Counter
from calibre.ebooks.docx.writer.container import create_skeleton, page_effective_area, page_size
from calibre.ebooks.docx.writer.fonts import FontsManager
from calibre.ebooks.docx.writer.images import ImagesManager
from calibre.ebooks.docx.writer.links import LinksManager
from calibre.ebooks.docx.writer.lists import ListsManager
from calibre.ebooks.docx.writer.styles import FloatSpec, StylesManager
from calibre.ebooks.docx.writer.tables import Table
from calibre.ebooks.oeb.base import XPath, barename
from calibre.ebooks.oeb.stylizer import Style as St
from calibre.ebooks.oeb.stylizer import Stylizer as Sz
from calibre.utils.localization import lang_as_iso639_1
from polyglot.builtins import string_or_bytes
def lang_for_tag(tag):
for attr in ('lang', '{http://www.w3.org/XML/1998/namespace}lang'):
val = lang_as_iso639_1(tag.get(attr))
if val:
return val
class Style(St):
def __init__(self, *args, **kwargs):
St.__init__(self, *args, **kwargs)
self._letterSpacing = None
@property
def letterSpacing(self):
if self._letterSpacing is not None:
val = self._get('letter-spacing')
if val == 'normal':
self._letterSpacing = val
else:
self._letterSpacing = self._unit_convert(val)
return self._letterSpacing
class Stylizer(Sz):
def style(self, element):
try:
return self._styles[element]
except KeyError:
return Style(element, self)
class TextRun:
ws_pat = soft_hyphen_pat = None
def __init__(self, namespace, style, first_html_parent, lang=None):
self.first_html_parent = first_html_parent
if self.ws_pat is None:
TextRun.ws_pat = self.ws_pat = re.compile(r'\s+')
TextRun.soft_hyphen_pat = self.soft_hyphen_pat = re.compile('(\u00ad)')
self.style = style
self.texts = []
self.link = None
self.lang = lang
self.parent_style = None
self.makeelement = namespace.makeelement
self.descendant_style = None
def add_text(self, text, preserve_whitespace, bookmark=None, link=None):
if not preserve_whitespace:
text = self.ws_pat.sub(' ', text)
if text.strip() != text:
# If preserve_whitespace is False, Word ignores leading and
# trailing whitespace
preserve_whitespace = True
self.texts.append((text, preserve_whitespace, bookmark))
self.link = link
def add_break(self, clear='none', bookmark=None):
self.texts.append((None, clear, bookmark))
def add_image(self, drawing, bookmark=None):
self.texts.append((drawing, None, bookmark))
def serialize(self, p, links_manager):
makeelement = self.makeelement
parent = p if self.link is None else links_manager.serialize_hyperlink(p, self.link)
r = makeelement(parent, 'w:r')
rpr = makeelement(r, 'w:rPr', append=False)
if getattr(self.descendant_style, 'id', None) is not None:
makeelement(rpr, 'w:rStyle', w_val=self.descendant_style.id)
if self.lang:
makeelement(rpr, 'w:lang', w_bidi=self.lang, w_val=self.lang, w_eastAsia=self.lang)
if len(rpr) > 0:
r.append(rpr)
def add_text(text, preserve_whitespace):
t = makeelement(r, 'w:t')
t.text = text
if preserve_whitespace:
t.set('{http://www.w3.org/XML/1998/namespace}space', 'preserve')
for text, preserve_whitespace, bookmark in self.texts:
if bookmark is not None:
bid = links_manager.bookmark_id
makeelement(r, 'w:bookmarkStart', w_id=str(bid), w_name=bookmark)
if text is None:
makeelement(r, 'w:br', w_clear=preserve_whitespace)
elif hasattr(text, 'xpath'):
r.append(text)
else:
if text:
for x in self.soft_hyphen_pat.split(text):
if x == '\u00ad':
# trailing spaces in <w:t> before a soft hyphen are
# ignored, so put them in a preserve whitespace
# element with a single space.
if not preserve_whitespace and len(r) and r[-1].text and r[-1].text.endswith(' '):
r[-1].text = r[-1].text.rstrip()
add_text(' ', True)
makeelement(r, 'w:softHyphen')
elif x:
if not preserve_whitespace and x.startswith(' ') and len(r) and r[-1].tag and 'softHyphen' in r[-1].tag:
x = x.lstrip()
add_text(' ', True)
add_text(x, preserve_whitespace)
else:
add_text('', preserve_whitespace)
if bookmark is not None:
makeelement(r, 'w:bookmarkEnd', w_id=str(bid))
def __repr__(self):
return repr(self.texts)
def is_empty(self):
if not self.texts:
return True
if len(self.texts) == 1 and self.texts[0][:2] == ('', False):
return True
return False
@property
def style_weight(self):
ans = 0
for text, preserve_whitespace, bookmark in self.texts:
if isinstance(text, str):
ans += len(text)
return ans
class Block:
def __init__(self, namespace, styles_manager, links_manager, html_block, style, is_table_cell=False, float_spec=None, is_list_item=False, parent_bg=None):
self.force_not_empty = False
self.namespace = namespace
self.bookmarks = set()
self.list_tag = (html_block, style) if is_list_item else None
self.is_first_block = False
self.numbering_id = None
self.parent_items = None
self.html_block = html_block
self.html_tag = barename(html_block.tag)
self.float_spec = float_spec
if float_spec is not None:
float_spec.blocks.append(self)
self.html_style = style
self.style = styles_manager.create_block_style(style, html_block, is_table_cell=is_table_cell, parent_bg=parent_bg)
self.styles_manager, self.links_manager = styles_manager, links_manager
self.keep_next = False
self.runs = []
self.skipped = False
self.linked_style = None
self.page_break_before = style['page-break-before'] == 'always'
self.keep_lines = style['page-break-inside'] == 'avoid'
self.page_break_after = False
self.block_lang = None
def resolve_skipped(self, next_block):
if not self.is_empty():
return
if len(self.html_block) > 0 and self.html_block[0] is next_block.html_block:
self.skipped = True
if self.list_tag is not None:
next_block.list_tag = self.list_tag
def add_text(self, text, style, ignore_leading_whitespace=False, html_parent=None, is_parent_style=False, bookmark=None, link=None, lang=None):
ws = style['white-space']
preserve_whitespace = ws in {'pre', 'pre-wrap', '-o-pre-wrap'}
ts = self.styles_manager.create_text_style(style, is_parent_style=is_parent_style)
if self.runs and ts == self.runs[-1].style and link == self.runs[-1].link and lang == self.runs[-1].lang:
run = self.runs[-1]
else:
run = TextRun(self.namespace, ts, self.html_block if html_parent is None else html_parent, lang=lang)
self.runs.append(run)
if ignore_leading_whitespace and not preserve_whitespace:
text = text.lstrip()
if preserve_whitespace or ws == 'pre-line':
for text in text.splitlines():
run.add_text(text, preserve_whitespace, bookmark=bookmark, link=link)
bookmark = None
run.add_break()
else:
run.add_text(text, preserve_whitespace, bookmark=bookmark, link=link)
def add_break(self, clear='none', bookmark=None):
if self.runs:
run = self.runs[-1]
else:
run = TextRun(self.namespace, self.styles_manager.create_text_style(self.html_style), self.html_block)
self.runs.append(run)
run.add_break(clear=clear, bookmark=bookmark)
def add_image(self, drawing, bookmark=None):
if self.runs:
run = self.runs[-1]
else:
run = TextRun(self.namespace, self.styles_manager.create_text_style(self.html_style), self.html_block)
self.runs.append(run)
run.add_image(drawing, bookmark=bookmark)
def serialize(self, body):
makeelement = self.namespace.makeelement
p = makeelement(body, 'w:p')
end_bookmarks = []
for bmark in self.bookmarks:
end_bookmarks.append(str(self.links_manager.bookmark_id))
makeelement(p, 'w:bookmarkStart', w_id=end_bookmarks[-1], w_name=bmark)
if self.block_lang:
rpr = makeelement(p, 'w:rPr')
makeelement(rpr, 'w:lang', w_val=self.block_lang, w_bidi=self.block_lang, w_eastAsia=self.block_lang)
ppr = makeelement(p, 'w:pPr')
if self.keep_next:
makeelement(ppr, 'w:keepNext')
if self.float_spec is not None:
self.float_spec.serialize(self, ppr)
if self.numbering_id is not None:
numpr = makeelement(ppr, 'w:numPr')
makeelement(numpr, 'w:ilvl', w_val=str(self.numbering_id[1]))
makeelement(numpr, 'w:numId', w_val=str(self.numbering_id[0]))
if self.linked_style is not None:
makeelement(ppr, 'w:pStyle', w_val=self.linked_style.id)
elif self.style.id:
makeelement(ppr, 'w:pStyle', w_val=self.style.id)
if self.is_first_block:
makeelement(ppr, 'w:pageBreakBefore', w_val='off')
elif self.page_break_before:
makeelement(ppr, 'w:pageBreakBefore', w_val='on')
if self.keep_lines:
makeelement(ppr, 'w:keepLines', w_val='on')
for run in self.runs:
run.serialize(p, self.links_manager)
for bmark in end_bookmarks:
makeelement(p, 'w:bookmarkEnd', w_id=bmark)
def __repr__(self):
return 'Block(%r)' % self.runs
__str__ = __repr__
def is_empty(self):
if self.force_not_empty:
return False
for run in self.runs:
if not run.is_empty():
return False
return True
class Blocks:
def __init__(self, namespace, styles_manager, links_manager):
self.top_bookmark = None
self.namespace = namespace
self.styles_manager = styles_manager
self.links_manager = links_manager
self.all_blocks = []
self.pos = 0
self.current_block = None
self.items = []
self.tables = []
self.current_table = None
self.open_html_blocks = set()
self.html_tag_start_blocks = {}
def current_or_new_block(self, html_tag, tag_style):
return self.current_block or self.start_new_block(html_tag, tag_style)
def end_current_block(self):
if self.current_block is not None:
self.all_blocks.append(self.current_block)
if self.current_table is not None and self.current_table.current_row is not None:
self.current_table.add_block(self.current_block)
else:
self.block_map[self.current_block] = len(self.items)
self.items.append(self.current_block)
self.current_block.parent_items = self.items
self.current_block = None
def start_new_block(self, html_block, style, is_table_cell=False, float_spec=None, is_list_item=False):
parent_bg = None
if html_block is not None:
p = html_block.getparent()
b = self.html_tag_start_blocks.get(p)
if b is not None:
ps = self.styles_manager.styles_for_html_blocks.get(p)
if ps is not None and ps.background_color is not None:
parent_bg = ps.background_color
self.end_current_block()
self.current_block = Block(
self.namespace, self.styles_manager, self.links_manager, html_block, style,
is_table_cell=is_table_cell, float_spec=float_spec, is_list_item=is_list_item,
parent_bg=parent_bg)
self.html_tag_start_blocks[html_block] = self.current_block
self.open_html_blocks.add(html_block)
return self.current_block
def start_new_table(self, html_tag, tag_style=None):
self.current_table = Table(self.namespace, html_tag, tag_style)
self.tables.append(self.current_table)
def start_new_row(self, html_tag, tag_style):
if self.current_table is None:
self.start_new_table(html_tag)
self.current_table.start_new_row(html_tag, tag_style)
def start_new_cell(self, html_tag, tag_style):
if self.current_table is None:
self.start_new_table(html_tag)
self.current_table.start_new_cell(html_tag, tag_style)
def finish_tag(self, html_tag):
if self.current_block is not None and html_tag in self.open_html_blocks:
start_block = self.html_tag_start_blocks.get(html_tag)
if start_block is not None and start_block.html_style['page-break-after'] == 'always':
self.current_block.page_break_after = True
self.end_current_block()
self.open_html_blocks.discard(html_tag)
if self.current_table is not None:
table_finished = self.current_table.finish_tag(html_tag)
if table_finished:
table = self.tables[-1]
del self.tables[-1]
if self.tables:
self.current_table = self.tables[-1]
self.current_table.add_table(table)
else:
self.current_table = None
self.block_map[table] = len(self.items)
self.items.append(table)
def serialize(self, body):
for item in self.items:
item.serialize(body)
def delete_block_at(self, pos=None):
pos = self.pos if pos is None else pos
block = self.all_blocks[pos]
del self.all_blocks[pos]
bpos = self.block_map.pop(block, None)
if bpos is not None:
del self.items[bpos]
else:
items = self.items if block.parent_items is None else block.parent_items
items.remove(block)
block.parent_items = None
if block.float_spec is not None:
block.float_spec.blocks.remove(block)
try:
next_block = self.all_blocks[pos]
next_block.bookmarks.update(block.bookmarks)
for attr in 'page_break_after page_break_before'.split():
setattr(next_block, attr, getattr(block, attr))
except (IndexError, KeyError):
pass
def __enter__(self):
self.pos = len(self.all_blocks)
self.block_map = {}
def __exit__(self, etype, value, traceback):
if value is not None:
return # Since there was an exception, the data structures are not in a consistent state
if self.current_block is not None:
self.all_blocks.append(self.current_block)
self.current_block = None
if len(self.all_blocks) > self.pos and self.all_blocks[self.pos].is_empty():
# Delete the empty block corresponding to the <body> tag when the
# body tag has no inline content before its first sub-block
self.delete_block_at(self.pos)
if self.pos > 0 and self.pos < len(self.all_blocks):
# Insert a page break corresponding to the start of the html file
self.all_blocks[self.pos].page_break_before = True
if self.top_bookmark is not None:
self.all_blocks[self.pos].bookmarks.add(self.top_bookmark)
self.top_bookmark = None
self.block_map = {}
def apply_page_break_after(self):
for i, block in enumerate(self.all_blocks):
if block.page_break_after and i < len(self.all_blocks) - 1:
next_block = self.all_blocks[i + 1]
if next_block.parent_items is block.parent_items and block.parent_items is self.items:
next_block.page_break_before = True
def resolve_language(self):
default_lang = self.styles_manager.document_lang
for block in self.all_blocks:
count = Counter()
for run in block.runs:
count[run.lang] += 1
if count:
block.block_lang = bl = count.most_common(1)[0][0]
for run in block.runs:
if run.lang == bl:
run.lang = None
if bl == default_lang:
block.block_lang = None
def __repr__(self):
return 'Block(%r)' % self.runs
class Convert:
# Word does not apply default styling to hyperlinks, so we ensure they get
# default styling (the conversion pipeline does not apply any styling to
# them).
base_css = '''
a[href] { text-decoration: underline; color: blue }
'''
def __init__(self, oeb, docx, mi, add_cover, add_toc):
self.oeb, self.docx, self.add_cover, self.add_toc = oeb, docx, add_cover, add_toc
self.log, self.opts = docx.log, docx.opts
self.mi = mi
self.cover_img = None
p = self.opts.output_profile
p.width_pts, p.height_pts = page_effective_area(self.opts)
def __call__(self):
from calibre.ebooks.oeb.transforms.rasterize import SVGRasterizer
self.svg_rasterizer = SVGRasterizer(base_css=self.base_css)
self.svg_rasterizer(self.oeb, self.opts)
self.styles_manager = StylesManager(self.docx.namespace, self.log, self.mi.language)
self.links_manager = LinksManager(self.docx.namespace, self.docx.document_relationships, self.log)
self.images_manager = ImagesManager(self.oeb, self.docx.document_relationships, self.opts, self.svg_rasterizer)
self.lists_manager = ListsManager(self.docx)
self.fonts_manager = FontsManager(self.docx.namespace, self.oeb, self.opts)
self.blocks = Blocks(self.docx.namespace, self.styles_manager, self.links_manager)
self.current_link = self.current_lang = None
for item in self.oeb.spine:
self.log.debug('Processing', item.href)
self.process_item(item)
if self.add_toc:
self.links_manager.process_toc_links(self.oeb)
if self.add_cover and self.oeb.metadata.cover and str(self.oeb.metadata.cover[0]) in self.oeb.manifest.ids:
cover_id = str(self.oeb.metadata.cover[0])
item = self.oeb.manifest.ids[cover_id]
self.cover_img = self.images_manager.read_image(item.href)
all_blocks = self.blocks.all_blocks
remove_blocks = []
for i, block in enumerate(all_blocks):
try:
nb = all_blocks[i+1]
except IndexError:
break
block.resolve_skipped(nb)
if block.skipped:
remove_blocks.append((i, block))
for pos, block in reversed(remove_blocks):
self.blocks.delete_block_at(pos)
self.blocks.all_blocks[0].is_first_block = True
self.blocks.apply_page_break_after()
self.blocks.resolve_language()
if self.cover_img is not None:
self.cover_img = self.images_manager.create_cover_markup(self.cover_img, self.opts.preserve_cover_aspect_ratio, *page_size(self.opts))
self.lists_manager.finalize(all_blocks)
self.styles_manager.finalize(all_blocks)
self.write()
def process_item(self, item):
self.current_item = item
stylizer = self.svg_rasterizer.stylizer(item)
self.abshref = self.images_manager.abshref = item.abshref
self.current_lang = lang_for_tag(item.data) or self.styles_manager.document_lang
for i, body in enumerate(XPath('//h:body')(item.data)):
with self.blocks:
self.blocks.top_bookmark = self.links_manager.bookmark_for_anchor(self.links_manager.top_anchor, self.current_item, body)
self.process_tag(body, stylizer, is_first_tag=i == 0)
def process_tag(self, html_tag, stylizer, is_first_tag=False, float_spec=None):
tagname = barename(html_tag.tag)
tag_style = stylizer.style(html_tag)
ignore_tag_contents = tagname in {'script', 'style', 'title', 'meta'} or tag_style.is_hidden
display = tag_style._get('display')
is_block = False
if not ignore_tag_contents:
previous_link = self.current_link
if tagname == 'a' and html_tag.get('href'):
self.current_link = (self.current_item, html_tag.get('href'), html_tag.get('title'))
previous_lang = self.current_lang
tag_lang = lang_for_tag(html_tag)
if tag_lang:
self.current_lang = tag_lang
is_float = tag_style['float'] in {'left', 'right'} and not is_first_tag
if float_spec is None and is_float:
float_spec = FloatSpec(self.docx.namespace, html_tag, tag_style)
if display in {'inline', 'inline-block'} or tagname == 'br': # <br> has display:block but we dont want to start a new paragraph
if is_float and float_spec.is_dropcaps:
self.add_block_tag(tagname, html_tag, tag_style, stylizer, float_spec=float_spec)
float_spec = None
else:
self.add_inline_tag(tagname, html_tag, tag_style, stylizer)
elif display == 'list-item':
self.add_block_tag(tagname, html_tag, tag_style, stylizer, is_list_item=True)
elif display.startswith('table') or display == 'inline-table':
if display == 'table-cell':
self.blocks.start_new_cell(html_tag, tag_style)
self.add_block_tag(tagname, html_tag, tag_style, stylizer, is_table_cell=True)
elif display == 'table-row':
self.blocks.start_new_row(html_tag, tag_style)
elif display in {'table', 'inline-table'}:
self.blocks.end_current_block()
self.blocks.start_new_table(html_tag, tag_style)
else:
if tagname == 'img' and is_float:
# Image is floating so dont start a new paragraph for it
self.add_inline_tag(tagname, html_tag, tag_style, stylizer)
else:
if tagname == 'hr':
for edge in 'right bottom left'.split():
tag_style.set('border-%s-style' % edge, 'none')
self.add_block_tag(tagname, html_tag, tag_style, stylizer, float_spec=float_spec)
for child in html_tag.iterchildren():
if isinstance(getattr(child, 'tag', None), string_or_bytes):
self.process_tag(child, stylizer, float_spec=float_spec)
else: # Comment/PI/etc.
tail = getattr(child, 'tail', None)
if tail:
block = self.create_block_from_parent(html_tag, stylizer)
block.add_text(tail, tag_style, is_parent_style=False, link=self.current_link, lang=self.current_lang)
is_block = html_tag in self.blocks.open_html_blocks
self.blocks.finish_tag(html_tag)
if is_block and tag_style['page-break-after'] == 'avoid':
self.blocks.all_blocks[-1].keep_next = True
self.current_link = previous_link
self.current_lang = previous_lang
# Now, process the tail if any
if display == 'table-row':
return # We ignore the tail for these tags
ignore_whitespace_tail = is_block or display.startswith('table')
if not is_first_tag and html_tag.tail and (not ignore_whitespace_tail or not html_tag.tail.isspace()):
# Ignore trailing space after a block tag, as otherwise it will
# become a new empty paragraph
block = self.create_block_from_parent(html_tag, stylizer)
block.add_text(html_tag.tail, stylizer.style(html_tag.getparent()), is_parent_style=True, link=self.current_link, lang=self.current_lang)
def create_block_from_parent(self, html_tag, stylizer):
parent = html_tag.getparent()
block = self.blocks.current_or_new_block(parent, stylizer.style(parent))
# Do not inherit page-break-before from parent
block.page_break_before = False
return block
def add_block_tag(self, tagname, html_tag, tag_style, stylizer, is_table_cell=False, float_spec=None, is_list_item=False):
block = self.blocks.start_new_block(
html_tag, tag_style, is_table_cell=is_table_cell, float_spec=float_spec, is_list_item=is_list_item)
anchor = html_tag.get('id') or html_tag.get('name')
if anchor:
block.bookmarks.add(self.bookmark_for_anchor(anchor, html_tag))
if tagname == 'img':
self.images_manager.add_image(html_tag, block, stylizer, as_block=True)
else:
text = html_tag.text
is_list_item = tagname == 'li'
has_sublist = is_list_item and len(html_tag) and isinstance(html_tag[0].tag, str) and barename(html_tag[0].tag) in ('ul', 'ol') and len(html_tag[0])
if text and has_sublist and not text.strip():
text = '' # whitespace only, ignore
if text:
block.add_text(text, tag_style, ignore_leading_whitespace=True, is_parent_style=True, link=self.current_link, lang=self.current_lang)
elif has_sublist:
block.force_not_empty = True
def add_inline_tag(self, tagname, html_tag, tag_style, stylizer):
anchor = html_tag.get('id') or html_tag.get('name') or None
bmark = None
if anchor:
bmark = self.bookmark_for_anchor(anchor, html_tag)
if tagname == 'br':
if html_tag.tail or html_tag is not tuple(html_tag.getparent().iterchildren('*'))[-1]:
block = self.create_block_from_parent(html_tag, stylizer)
block.add_break(clear={'both':'all', 'left':'left', 'right':'right'}.get(tag_style['clear'], 'none'), bookmark=bmark)
elif tagname == 'img':
block = self.create_block_from_parent(html_tag, stylizer)
self.images_manager.add_image(html_tag, block, stylizer, bookmark=bmark)
else:
if html_tag.text:
block = self.create_block_from_parent(html_tag, stylizer)
block.add_text(html_tag.text, tag_style, is_parent_style=False, bookmark=bmark, link=self.current_link, lang=self.current_lang)
elif bmark:
block = self.create_block_from_parent(html_tag, stylizer)
block.add_text('', tag_style, is_parent_style=False, bookmark=bmark, link=self.current_link, lang=self.current_lang)
def bookmark_for_anchor(self, anchor, html_tag):
return self.links_manager.bookmark_for_anchor(anchor, self.current_item, html_tag)
def write(self):
self.docx.document, self.docx.styles, body = create_skeleton(self.opts)
self.blocks.serialize(body)
body.append(body[0]) # Move <sectPr> to the end
if self.links_manager.toc:
self.links_manager.serialize_toc(body, self.styles_manager.primary_heading_style)
if self.cover_img is not None:
self.images_manager.write_cover_block(body, self.cover_img)
self.styles_manager.serialize(self.docx.styles)
self.images_manager.serialize(self.docx.images)
self.fonts_manager.serialize(self.styles_manager.text_styles, self.docx.font_table, self.docx.embedded_fonts, self.docx.fonts)
self.lists_manager.serialize(self.docx.numbering)
| 28,602 | Python | .py | 557 | 39.732496 | 160 | 0.606516 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,584 | utils.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/utils.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.color3 import parse_color_string
def int_or_zero(raw):
try:
return int(raw)
except (ValueError, TypeError, AttributeError):
return 0
# convert_color() {{{
def convert_color(value):
if not value:
return
if value.lower() == 'currentcolor':
return 'auto'
val = parse_color_string(value)
if val is None:
return
if val.alpha < 0.01:
return
return f'{int(val.red * 255):02X}{int(val.green * 255):02X}{int(val.blue * 255):02X}'
def test_convert_color(return_tests=False):
import unittest
class TestColors(unittest.TestCase):
def test_color_conversion(self):
ae = self.assertEqual
cc = convert_color
ae(None, cc(None))
ae(None, cc('transparent'))
ae(None, cc('none'))
ae(None, cc('#12j456'))
ae('auto', cc('currentColor'))
ae('F0F8FF', cc('AliceBlue'))
ae('000000', cc('black'))
ae('FF0000', cc('red'))
ae('00FF00', cc('lime'))
ae(cc('#001'), '000011')
ae('12345D', cc('#12345d'))
ae('FFFFFF', cc('rgb(255, 255, 255)'))
ae('FF0000', cc('rgba(255, 0, 0, 23)'))
tests = unittest.defaultTestLoader.loadTestsFromTestCase(TestColors)
if return_tests:
return tests
unittest.TextTestRunner(verbosity=4).run(tests)
# }}}
| 1,533 | Python | .py | 45 | 26.222222 | 89 | 0.578591 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,585 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
| 112 | Python | .py | 3 | 34.666667 | 61 | 0.673077 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,586 | fonts.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/fonts.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import defaultdict
from uuid import uuid4
from calibre.ebooks.oeb.base import OEB_STYLES
from calibre.ebooks.oeb.transforms.subset import find_font_face_rules
def obfuscate_font_data(data, key):
prefix = bytearray(data[:32])
key = bytearray(reversed(key.bytes))
prefix = bytes(bytearray(prefix[i]^key[i % len(key)] for i in range(len(prefix))))
return prefix + data[32:]
class FontsManager:
def __init__(self, namespace, oeb, opts):
self.namespace = namespace
self.oeb, self.log, self.opts = oeb, oeb.log, opts
def serialize(self, text_styles, fonts, embed_relationships, font_data_map):
makeelement = self.namespace.makeelement
font_families, seen = set(), set()
for ts in text_styles:
if ts.font_family:
lf = ts.font_family.lower()
if lf not in seen:
seen.add(lf)
font_families.add(ts.font_family)
family_map = {}
for family in sorted(font_families):
family_map[family] = makeelement(fonts, 'w:font', w_name=family)
embedded_fonts = []
for item in self.oeb.manifest:
if item.media_type in OEB_STYLES and hasattr(item.data, 'cssRules'):
embedded_fonts.extend(find_font_face_rules(item, self.oeb))
num = 0
face_map = defaultdict(set)
rel_map = {}
for ef in embedded_fonts:
ff = ef['font-family'][0]
if ff not in font_families:
continue
num += 1
bold = ef['weight'] > 400
italic = ef['font-style'] != 'normal'
tag = 'Regular'
if bold or italic:
tag = 'Italic'
if bold and italic:
tag = 'BoldItalic'
elif bold:
tag = 'Bold'
if tag in face_map[ff]:
continue
face_map[ff].add(tag)
font = family_map[ff]
key = uuid4()
item = ef['item']
rid = rel_map.get(item)
if rid is None:
rel_map[item] = rid = 'rId%d' % num
fname = 'fonts/font%d.odttf' % num
makeelement(embed_relationships, 'Relationship', Id=rid, Type=self.namespace.names['EMBEDDED_FONT'], Target=fname)
font_data_map['word/' + fname] = obfuscate_font_data(item.data, key)
makeelement(font, 'w:embed' + tag, r_id=rid,
w_fontKey='{%s}' % key.urn.rpartition(':')[-1].upper(),
w_subsetted="true" if self.opts.subset_embedded_fonts else "false")
| 2,795 | Python | .py | 64 | 32.109375 | 130 | 0.55719 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,587 | lists.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/lists.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
from collections import defaultdict
from operator import attrgetter
from polyglot.builtins import iteritems, itervalues
LIST_STYLES = frozenset(
'disc circle square decimal decimal-leading-zero lower-roman upper-roman'
' lower-greek lower-alpha lower-latin upper-alpha upper-latin hiragana hebrew'
' katakana-iroha cjk-ideographic'.split())
STYLE_MAP = {
'disc': 'bullet',
'circle': 'o',
'square': '\uf0a7',
'decimal': 'decimal',
'decimal-leading-zero': 'decimalZero',
'lower-roman': 'lowerRoman',
'upper-roman': 'upperRoman',
'lower-alpha': 'lowerLetter',
'lower-latin': 'lowerLetter',
'upper-alpha': 'upperLetter',
'upper-latin': 'upperLetter',
'hiragana': 'aiueo',
'hebrew': 'hebrew1',
'katakana-iroha': 'iroha',
'cjk-ideographic': 'chineseCounting',
}
def find_list_containers(list_tag, tag_style):
node = list_tag
stylizer = tag_style._stylizer
ans = []
while True:
parent = node.getparent()
if parent is None or parent is node:
break
node = parent
style = stylizer.style(node)
lst = (style._style.get('list-style-type', None) or '').lower()
if lst in LIST_STYLES:
ans.append(node)
return ans
class NumberingDefinition:
def __init__(self, top_most, stylizer, namespace):
self.namespace = namespace
self.top_most = top_most
self.stylizer = stylizer
self.level_map = defaultdict(list)
self.num_id = None
def finalize(self):
items_for_level = defaultdict(list)
container_for_level = {}
type_for_level = {}
for ilvl, items in iteritems(self.level_map):
for container, list_tag, block, list_type, tag_style in items:
items_for_level[ilvl].append(list_tag)
container_for_level[ilvl] = container
type_for_level[ilvl] = list_type
self.levels = tuple(
Level(type_for_level[ilvl], container_for_level[ilvl], items_for_level[ilvl], ilvl=ilvl)
for ilvl in sorted(self.level_map)
)
def __hash__(self):
return hash(self.levels)
def link_blocks(self):
for ilvl, items in iteritems(self.level_map):
for container, list_tag, block, list_type, tag_style in items:
block.numbering_id = (self.num_id + 1, ilvl)
def serialize(self, parent):
makeelement = self.namespace.makeelement
an = makeelement(parent, 'w:abstractNum', w_abstractNumId=str(self.num_id))
makeelement(an, 'w:multiLevelType', w_val='hybridMultilevel')
makeelement(an, 'w:name', w_val='List %d' % (self.num_id + 1))
for level in self.levels:
level.serialize(an, makeelement)
class Level:
def __init__(self, list_type, container, items, ilvl=0):
self.ilvl = ilvl
try:
self.start = int(container.get('start'))
except Exception:
self.start = 1
if items:
try:
self.start = int(items[0].get('value'))
except Exception:
pass
if list_type in {'disc', 'circle', 'square'}:
self.num_fmt = 'bullet'
self.lvl_text = '\uf0b7' if list_type == 'disc' else STYLE_MAP[list_type]
else:
self.lvl_text = f'%{self.ilvl + 1}.'
self.num_fmt = STYLE_MAP.get(list_type, 'decimal')
def __hash__(self):
return hash((self.start, self.num_fmt, self.lvl_text))
def serialize(self, parent, makeelement):
lvl = makeelement(parent, 'w:lvl', w_ilvl=str(self.ilvl))
makeelement(lvl, 'w:start', w_val=str(self.start))
makeelement(lvl, 'w:numFmt', w_val=self.num_fmt)
makeelement(lvl, 'w:lvlText', w_val=self.lvl_text)
makeelement(lvl, 'w:lvlJc', w_val='left')
makeelement(makeelement(lvl, 'w:pPr'), 'w:ind', w_hanging='360', w_left=str(1152 + self.ilvl * 360))
if self.num_fmt == 'bullet':
ff = {'\uf0b7':'Symbol', '\uf0a7':'Wingdings'}.get(self.lvl_text, 'Courier New')
makeelement(makeelement(lvl, 'w:rPr'), 'w:rFonts', w_ascii=ff, w_hAnsi=ff, w_hint="default")
class ListsManager:
def __init__(self, docx):
self.namespace = docx.namespace
self.lists = {}
def finalize(self, all_blocks):
lists = {}
for block in all_blocks:
if block.list_tag is not None:
list_tag, tag_style = block.list_tag
list_type = (tag_style['list-style-type'] or '').lower()
if list_type not in LIST_STYLES:
continue
container_tags = find_list_containers(list_tag, tag_style)
if not container_tags:
continue
top_most = container_tags[-1]
if top_most not in lists:
lists[top_most] = NumberingDefinition(top_most, tag_style._stylizer, self.namespace)
l = lists[top_most]
ilvl = len(container_tags) - 1
l.level_map[ilvl].append((container_tags[0], list_tag, block, list_type, tag_style))
[nd.finalize() for nd in itervalues(lists)]
definitions = {}
for defn in itervalues(lists):
try:
defn = definitions[defn]
except KeyError:
definitions[defn] = defn
defn.num_id = len(definitions) - 1
defn.link_blocks()
self.definitions = sorted(itervalues(definitions), key=attrgetter('num_id'))
def serialize(self, parent):
for defn in self.definitions:
defn.serialize(parent)
makeelement = self.namespace.makeelement
for defn in self.definitions:
n = makeelement(parent, 'w:num', w_numId=str(defn.num_id + 1))
makeelement(n, 'w:abstractNumId', w_val=str(defn.num_id))
| 6,059 | Python | .py | 142 | 33.176056 | 108 | 0.595315 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,588 | styles.py | kovidgoyal_calibre/src/calibre/ebooks/docx/writer/styles.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import numbers
from collections import Counter, defaultdict
from operator import attrgetter
from lxml import etree
from tinycss.css21 import CSS21Parser
from calibre.ebooks import parse_css_length
from calibre.ebooks.docx.writer.utils import convert_color, int_or_zero
from calibre.utils.localization import lang_as_iso639_1
from polyglot.builtins import iteritems
css_parser = CSS21Parser()
border_edges = ('left', 'top', 'right', 'bottom')
border_props = ('padding_%s', 'border_%s_width', 'border_%s_style', 'border_%s_color')
ignore = object()
def parse_css_font_family(raw):
decl, errs = css_parser.parse_style_attr('font-family:' + raw)
if decl:
for token in decl[0].value:
if token.type in 'STRING IDENT':
val = token.value
if val == 'inherit':
break
yield val
def css_font_family_to_docx(raw):
generic = {'serif':'Cambria', 'sansserif':'Candara', 'sans-serif':'Candara', 'fantasy':'Comic Sans', 'cursive':'Segoe Script'}
for ff in parse_css_font_family(raw):
return generic.get(ff.lower(), ff)
def bmap(x):
return 'on' if x else 'off'
def is_dropcaps(html_tag, tag_style):
return len(html_tag) < 2 and len(etree.tostring(html_tag, method='text', encoding='unicode', with_tail=False)) < 5 and tag_style['float'] == 'left'
class CombinedStyle:
def __init__(self, bs, rs, blocks, namespace):
self.bs, self.rs, self.blocks = bs, rs, blocks
self.namespace = namespace
self.id = self.name = self.seq = None
self.outline_level = None
def apply(self):
for block in self.blocks:
block.linked_style = self
for run in block.runs:
run.parent_style = self.rs
def serialize(self, styles, normal_style):
makeelement = self.namespace.makeelement
def w(x):
return '{{{}}}{}'.format(self.namespace.namespaces['w'], x)
block = makeelement(styles, 'w:style', w_styleId=self.id, w_type='paragraph')
makeelement(block, 'w:name', w_val=self.name)
makeelement(block, 'w:qFormat')
if self is not normal_style:
makeelement(block, 'w:basedOn', w_val=normal_style.id)
if self.seq == 0:
block.set(w('default'), '1')
pPr = makeelement(block, 'w:pPr')
self.bs.serialize_properties(pPr, normal_style.bs)
if self.outline_level is not None:
makeelement(pPr, 'w:outlineLvl', w_val=str(self.outline_level + 1))
rPr = makeelement(block, 'w:rPr')
self.rs.serialize_properties(rPr, normal_style.rs)
class FloatSpec:
def __init__(self, namespace, html_tag, tag_style):
self.makeelement = namespace.makeelement
self.is_dropcaps = is_dropcaps(html_tag, tag_style)
self.blocks = []
if self.is_dropcaps:
self.dropcaps_lines = 3
else:
self.x_align = tag_style['float']
self.w = self.h = None
if tag_style._get('width') != 'auto':
self.w = int(20 * max(tag_style['min-width'], tag_style['width']))
if tag_style._get('height') == 'auto':
self.h_rule = 'auto'
else:
if tag_style['min-height'] > 0:
self.h_rule, self.h = 'atLeast', tag_style['min-height']
else:
self.h_rule, self.h = 'exact', tag_style['height']
self.h = int(20 * self.h)
self.h_space = int(20 * max(tag_style['margin-right'], tag_style['margin-left']))
self.v_space = int(20 * max(tag_style['margin-top'], tag_style['margin-bottom']))
read_css_block_borders(self, tag_style)
def serialize(self, block, parent):
if self.is_dropcaps:
attrs = dict(w_dropCap='drop', w_lines=str(self.dropcaps_lines), w_wrap='around', w_vAnchor='text', w_hAnchor='text')
else:
attrs = dict(
w_wrap='around', w_vAnchor='text', w_hAnchor='text', w_xAlign=self.x_align, w_y='1',
w_hSpace=str(self.h_space), w_vSpace=str(self.v_space), w_hRule=self.h_rule
)
if self.w is not None:
attrs['w_w'] = str(self.w)
if self.h is not None:
attrs['w_h'] = str(self.h)
self.makeelement(parent, 'w:framePr', **attrs)
# Margins are already applied by the frame style, so override them to
# be zero on individual blocks
self.makeelement(parent, 'w:ind', w_left='0', w_leftChars='0', w_right='0', w_rightChars='0')
attrs = {}
if block is self.blocks[0]:
attrs.update(dict(w_before='0', w_beforeLines='0'))
if block is self.blocks[-1]:
attrs.update(dict(w_after='0', w_afterLines='0'))
if attrs:
self.makeelement(parent, 'w:spacing', **attrs)
# Similarly apply the same border and padding properties to all blocks
# in this floatspec
bdr = self.makeelement(parent, 'w:pBdr')
for edge in border_edges:
padding = getattr(self, 'padding_' + edge)
width = getattr(self, 'border_%s_width' % edge)
bstyle = getattr(self, 'border_%s_style' % edge)
self.makeelement(
bdr, 'w:'+edge, w_space=str(padding), w_val=bstyle, w_sz=str(width), w_color=getattr(self, 'border_%s_color' % edge))
class DOCXStyle:
ALL_PROPS = ()
TYPE = 'paragraph'
def __init__(self, namespace):
self.namespace = namespace
self.w = lambda x: '{{{}}}{}'.format(namespace.namespaces['w'], x)
self.id = self.name = None
self.next_style = None
self.calculate_hash()
def calculate_hash(self):
self._hash = hash(tuple(
getattr(self, x) for x in self.ALL_PROPS))
def makeelement(self, parent, name, **attrs):
return parent.makeelement(self.w(name), **{self.w(k):v for k, v in iteritems(attrs)})
def __hash__(self):
return self._hash
def __eq__(self, other):
for x in self.ALL_PROPS:
if getattr(self, x) != getattr(other, x, None):
return False
return True
def __ne__(self, other):
return not self == other
def __repr__(self):
return etree.tostring(self.serialize(etree.Element(self.__class__.__name__, nsmap={'w':self.namespace.namespaces['w']})), pretty_print=True)
__str__ = __repr__
def serialize(self, styles, normal_style):
makeelement = self.makeelement
style = makeelement(styles, 'style', styleId=self.id, type=self.TYPE)
style.append(makeelement(style, 'name', val=self.name))
if self is not normal_style:
style.append(makeelement(style, 'basedOn', val=normal_style.id))
styles.append(style)
return style
LINE_STYLES = {
'none' : 'none',
'hidden': 'none',
'dotted': 'dotted',
'dashed': 'dashed',
'solid' : 'single',
'double': 'double',
'groove': 'threeDEngrave',
'ridge' : 'threeDEmboss',
'inset' : 'inset',
'outset': 'outset',
}
def convert_underline(items):
style = 'solid'
has_underline = False
color = 'auto'
for x in items:
if x in {'solid', 'double', 'dotted', 'dashed', 'wavy'}:
style = {'solid': 'single', 'wavy': 'wave', 'dashed': 'dash'}.get(x, x)
elif x in {'underline', 'overline', 'line-through', 'blink', 'none'}:
if x == 'underline':
has_underline = True
elif x == 'none':
has_underline = False
else:
color = convert_color(x)
if has_underline:
return style + ' ' + color
return ''
class TextStyle(DOCXStyle):
ALL_PROPS = ('font_family', 'font_size', 'bold', 'italic', 'color',
'background_color', 'underline', 'strike', 'dstrike', 'caps',
'shadow', 'small_caps', 'spacing', 'vertical_align', 'padding',
'border_style', 'border_width', 'border_color')
TYPE = 'character'
def __init__(self, namespace, css, is_parent_style=False):
self.font_family = css_font_family_to_docx(css['font-family'])
try:
self.font_size = max(0, int(float(css['font-size']) * 2)) # stylizer normalizes all font sizes into pts
except (ValueError, TypeError, AttributeError):
self.font_size = None
fw = css['font-weight']
self.bold = (fw.lower() if hasattr(fw, 'lower') else fw) in {'bold', 'bolder'} or int_or_zero(fw) >= 700
self.italic = css['font-style'].lower() in {'italic', 'oblique'}
self.color = convert_color(css['color'])
self.background_color = None if is_parent_style else convert_color(css.backgroundColor)
td = set((css.effective_text_decoration or '').split())
self.underline = convert_underline(td)
self.dstrike = 'line-through' in td and 'overline' in td
self.strike = not self.dstrike and 'line-through' in td
self.text_transform = css['text-transform'] # TODO: If lowercase or capitalize, transform the actual text
self.caps = self.text_transform == 'uppercase'
self.small_caps = css['font-variant'].lower() in {'small-caps', 'smallcaps'}
self.shadow = css['text-shadow'] not in {'none', None}
try:
self.spacing = int(float(css['letter-spacing']) * 20)
except (ValueError, TypeError, AttributeError):
self.spacing = None
va = css.first_vertical_align
if isinstance(va, numbers.Number):
self.vertical_align = str(int(va * 2))
else:
val = {
'top':'superscript', 'text-top':'superscript', 'sup':'superscript', 'super':'superscript',
'bottom':'subscript', 'text-bottom':'subscript', 'sub':'subscript'}.get(va)
self.vertical_align = val or 'baseline'
self.padding = self.border_color = self.border_width = self.border_style = None
if not is_parent_style:
# DOCX does not support individual borders/padding for inline content
for edge in border_edges:
# In DOCX padding can only be a positive integer
try:
padding = max(0, int(css['padding-' + edge]))
except ValueError:
padding = 0
if self.padding is None:
self.padding = padding
elif self.padding != padding:
self.padding = ignore
val = css['border-%s-width' % edge]
if not isinstance(val, numbers.Number):
val = {'thin':0.2, 'medium':1, 'thick':2}.get(val, 0)
val = min(96, max(2, int(val * 8)))
if self.border_width is None:
self.border_width = val
elif self.border_width != val:
self.border_width = ignore
color = convert_color(css['border-%s-color' % edge])
if self.border_color is None:
self.border_color = color
elif self.border_color != color:
self.border_color = ignore
style = LINE_STYLES.get(css['border-%s-style' % edge].lower(), 'none')
if self.border_style is None:
self.border_style = style
elif self.border_style != style:
self.border_style = ignore
if self.padding in (None, ignore):
self.padding = 0
if self.border_width in (None, ignore):
self.border_width = 0
if self.border_style in (None, ignore):
self.border_style = 'none'
if self.border_color in (None, ignore):
self.border_color = 'auto'
if self.border_style == 'none':
self.border_width, self.border_color = 0, 'auto'
DOCXStyle.__init__(self, namespace)
def serialize_borders(self, bdr, normal_style):
w = self.w
is_normal_style = self is normal_style
if is_normal_style or self.padding != normal_style.padding:
bdr.set(w('space'), str(self.padding))
if is_normal_style or self.border_width != normal_style.border_width:
bdr.set(w('sz'), str(self.border_width))
if is_normal_style or self.border_style != normal_style.border_style:
bdr.set(w('val'), self.border_style)
if is_normal_style or self.border_color != normal_style.border_color:
bdr.set(w('color'), self.border_color)
return bdr
def serialize(self, styles, normal_style):
makeelement = self.makeelement
style_root = DOCXStyle.serialize(self, styles, normal_style)
style = makeelement(style_root, 'rPr')
self.serialize_properties(style, normal_style)
if len(style) > 0:
style_root.append(style)
return style_root
def serialize_properties(self, rPr, normal_style):
makeelement = self.makeelement
is_normal_style = self is normal_style
if is_normal_style or self.font_family != normal_style.font_family:
rPr.append(makeelement(
rPr, 'rFonts', **{k:self.font_family for k in 'ascii cs eastAsia hAnsi'.split()}))
for name, attr, vmap in (('sz', 'font_size', str), ('b', 'bold', bmap), ('i', 'italic', bmap)):
val = getattr(self, attr)
if is_normal_style or getattr(normal_style, attr) != val:
for suffix in ('', 'Cs'):
rPr.append(makeelement(rPr, name + suffix, val=vmap(val)))
def check_attr(attr):
val = getattr(self, attr)
return is_normal_style or (val != getattr(normal_style, attr))
if check_attr('color'):
rPr.append(makeelement(rPr, 'color', val=self.color or 'auto'))
if check_attr('background_color'):
rPr.append(makeelement(rPr, 'shd', fill=self.background_color or 'auto'))
if check_attr('underline'):
style, color = self.underline.partition(' ')[::2]
if color != 'auto':
rPr.append(makeelement(rPr, 'u', val=style, color=color))
else:
rPr.append(makeelement(rPr, 'u', val=style))
if check_attr('dstrike'):
rPr.append(makeelement(rPr, 'dstrike', val=bmap(self.dstrike)))
if check_attr('strike'):
rPr.append(makeelement(rPr, 'strike', val=bmap(self.strike)))
if check_attr('caps'):
rPr.append(makeelement(rPr, 'caps', val=bmap(self.caps)))
if check_attr('small_caps'):
rPr.append(makeelement(rPr, 'smallCaps', val=bmap(self.small_caps)))
if check_attr('shadow'):
rPr.append(makeelement(rPr, 'shadow', val=bmap(self.shadow)))
if check_attr('spacing'):
rPr.append(makeelement(rPr, 'spacing', val=str(self.spacing or 0)))
if is_normal_style:
rPr.append(makeelement(rPr, 'vertAlign', val=self.vertical_align if self.vertical_align in {'superscript', 'subscript'} else 'baseline'))
elif self.vertical_align != normal_style.vertical_align:
if self.vertical_align in {'superscript', 'subscript', 'baseline'}:
rPr.append(makeelement(rPr, 'vertAlign', val=self.vertical_align))
else:
rPr.append(makeelement(rPr, 'position', val=self.vertical_align))
bdr = self.serialize_borders(makeelement(rPr, 'bdr'), normal_style)
if bdr.attrib:
rPr.append(bdr)
class DescendantTextStyle:
def __init__(self, parent_style, child_style):
self.id = self.name = None
self.makeelement = child_style.makeelement
p = []
def add(name, **props):
p.append((name, frozenset(iteritems(props))))
def vals(attr):
return getattr(parent_style, attr), getattr(child_style, attr)
def check(attr):
pval, cval = vals(attr)
return pval != cval
if parent_style.font_family != child_style.font_family:
add('rFonts', **{k:child_style.font_family for k in 'ascii cs eastAsia hAnsi'.split()})
for name, attr in (('sz', 'font_size'), ('b', 'bold'), ('i', 'italic')):
pval, cval = vals(attr)
if pval != cval:
val = 'on' if attr in {'bold', 'italic'} else str(cval) # bold, italic are toggle properties
for suffix in ('', 'Cs'):
add(name + suffix, val=val)
if check('color'):
add('color', val=child_style.color or 'auto')
if check('background_color'):
add('shd', fill=child_style.background_color or 'auto')
if check('underline'):
if not child_style.underline:
add('u', val='none')
else:
style, color = child_style.underline.partition(' ')[::2]
add('u', val=style, color=color)
if check('dstrike'):
add('dstrike', val=bmap(child_style.dstrike))
if check('strike'):
add('strike', val='on') # toggle property
if check('caps'):
add('caps', val='on') # toggle property
if check('small_caps'):
add('smallCaps', val='on') # toggle property
if check('shadow'):
add('shadow', val='on') # toggle property
if check('spacing'):
add('spacing', val=str(child_style.spacing or 0))
if check('vertical_align'):
val = child_style.vertical_align
if val in {'superscript', 'subscript', 'baseline'}:
add('vertAlign', val=val)
else:
add('position', val=val)
bdr = {}
if check('padding'):
bdr['space'] = str(child_style.padding)
if check('border_width'):
bdr['sz'] = str(child_style.border_width)
if check('border_style'):
bdr['val'] = child_style.border_style
if check('border_color'):
bdr['color'] = child_style.border_color
if bdr:
add('bdr', **bdr)
self.properties = tuple(p)
self._hash = hash(self.properties)
def __hash__(self):
return self._hash
def __eq__(self, other):
return self.properties == other.properties
def __ne__(self, other):
return self.properties != other.properties
def serialize(self, styles):
makeelement = self.makeelement
style = makeelement(styles, 'style', styleId=self.id, type='character')
style.append(makeelement(style, 'name', val=self.name))
rpr = makeelement(style, 'rPr')
style.append(rpr)
for name, attrs in self.properties:
rpr.append(makeelement(style, name, **dict(attrs)))
styles.append(style)
return style
def read_css_block_borders(self, css, store_css_style=False):
for edge in border_edges:
if css is None:
setattr(self, 'padding_' + edge, 0)
setattr(self, 'margin_' + edge, 0)
setattr(self, 'css_margin_' + edge, '')
setattr(self, 'border_%s_width' % edge, 2)
setattr(self, 'border_%s_color' % edge, None)
setattr(self, 'border_%s_style' % edge, 'none')
if store_css_style:
setattr(self, 'border_%s_css_style' % edge, 'none')
else:
# In DOCX padding can only be a positive integer
try:
setattr(self, 'padding_' + edge, max(0, int(css['padding-' + edge])))
except ValueError:
setattr(self, 'padding_' + edge, 0) # invalid value for padding
# In DOCX margin must be a positive integer in twips (twentieth of a point)
try:
setattr(self, 'margin_' + edge, max(0, int(css['margin-' + edge] * 20)))
except ValueError:
setattr(self, 'margin_' + edge, 0) # e.g.: margin: auto
setattr(self, 'css_margin_' + edge, css._style.get('margin-' + edge, ''))
val = css['border-%s-width' % edge]
if not isinstance(val, numbers.Number):
val = {'thin':0.2, 'medium':1, 'thick':2}.get(val, 0)
val = min(96, max(2, int(val * 8)))
setattr(self, 'border_%s_width' % edge, val)
setattr(self, 'border_%s_color' % edge, convert_color(css['border-%s-color' % edge]) or 'auto')
setattr(self, 'border_%s_style' % edge, LINE_STYLES.get(css['border-%s-style' % edge].lower(), 'none'))
if store_css_style:
setattr(self, 'border_%s_css_style' % edge, css['border-%s-style' % edge].lower())
class BlockStyle(DOCXStyle):
ALL_PROPS = tuple(
'text_align css_text_indent text_indent line_height background_color'.split(
) + ['margin_' + edge for edge in border_edges
] + ['css_margin_' + edge for edge in border_edges
] + [x%edge for edge in border_edges for x in border_props]
)
def __init__(self, namespace, css, html_block, is_table_cell=False, parent_bg=None):
read_css_block_borders(self, css)
if is_table_cell:
for edge in border_edges:
setattr(self, 'border_%s_style' % edge, 'none')
setattr(self, 'border_%s_width' % edge, 0)
setattr(self, 'padding_' + edge, 0)
setattr(self, 'margin_' + edge, 0)
if css is None:
self.text_indent = 0
self.css_text_indent = None
self.line_height = 280
self.background_color = None
self.text_align = 'left'
else:
try:
self.text_indent = int(css['text-indent'] * 20)
self.css_text_indent = css._get('text-indent')
except (TypeError, ValueError):
self.text_indent = 0
self.css_text_indent = None
try:
self.line_height = max(0, int(css.lineHeight * 20))
except (TypeError, ValueError):
self.line_height = max(0, int(1.2 * css.fontSize * 20))
self.background_color = None if is_table_cell else convert_color(css['background-color'])
if not is_table_cell and self.background_color is None:
self.background_color = parent_bg
try:
ws = css['white-space'].lower()
preserve_whitespace = ws in {'pre', 'pre-wrap'}
except Exception:
preserve_whitespace = False
try:
aval = css['text-align'].lower()
if preserve_whitespace:
aval = 'start'
self.text_align = {'start':'left', 'left':'left', 'end':'right', 'right':'right', 'center':'center', 'justify':'both', 'centre':'center'}.get(
aval, 'left')
except AttributeError:
self.text_align = 'left'
DOCXStyle.__init__(self, namespace)
def serialize_borders(self, bdr, normal_style):
w = self.w
for edge in border_edges:
e = bdr.makeelement(w(edge))
padding = getattr(self, 'padding_' + edge)
if (self is normal_style and padding > 0) or (padding != getattr(normal_style, 'padding_' + edge)):
e.set(w('space'), str(padding))
width = getattr(self, 'border_%s_width' % edge)
bstyle = getattr(self, 'border_%s_style' % edge)
if (self is normal_style and width > 0 and bstyle != 'none'
) or width != getattr(normal_style, 'border_%s_width' % edge
) or bstyle != getattr(normal_style, 'border_%s_style' % edge):
e.set(w('val'), bstyle)
e.set(w('sz'), str(width))
e.set(w('color'), getattr(self, 'border_%s_color' % edge))
if e.attrib:
bdr.append(e)
return bdr
def serialize(self, styles, normal_style):
makeelement = self.makeelement
style_root = DOCXStyle.serialize(self, styles, normal_style)
style = makeelement(style_root, 'pPr')
self.serialize_properties(style, normal_style)
if len(style) > 0:
style_root.append(style)
return style_root
def serialize_properties(self, pPr, normal_style):
makeelement, w = self.makeelement, self.w
spacing = makeelement(pPr, 'spacing')
for edge, attr in iteritems({'top':'before', 'bottom':'after'}):
getter = attrgetter('css_margin_' + edge)
css_val, css_unit = parse_css_length(getter(self))
if css_unit in ('em', 'ex'):
lines = max(0, int(css_val * (50 if css_unit == 'ex' else 100)))
if (self is normal_style and lines > 0) or getter(self) != getter(normal_style):
spacing.set(w(attr + 'Lines'), str(lines))
else:
getter = attrgetter('margin_' + edge)
val = getter(self)
if (self is normal_style and val > 0) or val != getter(normal_style):
spacing.set(w(attr), str(val))
if self is normal_style or self.line_height != normal_style.line_height:
spacing.set(w('line'), str(self.line_height))
spacing.set(w('lineRule'), 'atLeast')
if spacing.attrib:
pPr.append(spacing)
ind = makeelement(pPr, 'ind')
for edge in ('left', 'right'):
getter = attrgetter('css_margin_' + edge)
css_val, css_unit = parse_css_length(getter(self))
if css_unit in ('em', 'ex'):
chars = max(0, int(css_val * (50 if css_unit == 'ex' else 100)))
if (self is normal_style and chars > 0) or getter(self) != getter(normal_style):
ind.set(w(edge + 'Chars'), str(chars))
else:
getter = attrgetter('margin_' + edge)
val = getter(self)
if (self is normal_style and val > 0) or val != getter(normal_style):
ind.set(w(edge), str(val))
ind.set(w(edge + 'Chars'), '0') # This is needed to override any declaration in the parent style
css_val, css_unit = parse_css_length(self.css_text_indent)
if css_unit in ('em', 'ex'):
chars = int(css_val * (50 if css_unit == 'ex' else 100))
if css_val >= 0:
if (self is normal_style and chars > 0) or self.css_text_indent != normal_style.css_text_indent:
ind.set(w('firstLineChars'), str(chars))
else:
if (self is normal_style and chars < 0) or self.css_text_indent != normal_style.css_text_indent:
ind.set(w('hangingChars'), str(abs(chars)))
else:
val = self.text_indent
if val >= 0:
if (self is normal_style and val > 0) or self.text_indent != normal_style.text_indent:
ind.set(w('firstLine'), str(val))
ind.set(w('firstLineChars'), '0') # This is needed to override any declaration in the parent style
else:
if (self is normal_style and val < 0) or self.text_indent != normal_style.text_indent:
ind.set(w('hanging'), str(abs(val)))
ind.set(w('hangingChars'), '0')
if ind.attrib:
pPr.append(ind)
if (self is normal_style and self.background_color) or self.background_color != normal_style.background_color:
pPr.append(makeelement(pPr, 'shd', val='clear', color='auto', fill=self.background_color or 'auto'))
pbdr = self.serialize_borders(pPr.makeelement(w('pBdr')), normal_style)
if len(pbdr):
pPr.append(pbdr)
if self is normal_style or self.text_align != normal_style.text_align:
pPr.append(makeelement(pPr, 'jc', val=self.text_align))
if self is not normal_style and self.next_style is not None:
pPr.append(makeelement(pPr, 'next', val=self.next_style))
class StylesManager:
def __init__(self, namespace, log, document_lang):
self.namespace = namespace
self.document_lang = lang_as_iso639_1(document_lang) or 'en'
self.log = log
self.block_styles, self.text_styles = {}, {}
self.styles_for_html_blocks = {}
def create_text_style(self, css_style, is_parent_style=False):
ans = TextStyle(self.namespace, css_style, is_parent_style=is_parent_style)
existing = self.text_styles.get(ans, None)
if existing is None:
self.text_styles[ans] = ans
else:
ans = existing
return ans
def create_block_style(self, css_style, html_block, is_table_cell=False, parent_bg=None):
ans = BlockStyle(self.namespace, css_style, html_block, is_table_cell=is_table_cell, parent_bg=parent_bg)
existing = self.block_styles.get(ans, None)
if existing is None:
self.block_styles[ans] = ans
else:
ans = existing
self.styles_for_html_blocks[html_block] = ans
return ans
def finalize(self, all_blocks):
block_counts, run_counts = Counter(), Counter()
block_rmap, run_rmap = defaultdict(list), defaultdict(list)
used_pairs = defaultdict(list)
heading_styles = defaultdict(list)
headings = frozenset('h1 h2 h3 h4 h5 h6'.split())
pure_block_styles = set()
for block in all_blocks:
bs = block.style
block_counts[bs] += 1
block_rmap[block.style].append(block)
local_run_counts = Counter()
for run in block.runs:
count = run.style_weight
run_counts[run.style] += count
local_run_counts[run.style] += count
run_rmap[run.style].append(run)
if local_run_counts:
rs = local_run_counts.most_common(1)[0][0]
used_pairs[(bs, rs)].append(block)
if block.html_tag in headings:
heading_styles[block.html_tag].append((bs, rs))
else:
pure_block_styles.add(bs)
self.pure_block_styles = sorted(pure_block_styles, key=block_counts.__getitem__)
bnum = len(str(max(1, len(pure_block_styles) - 1)))
for i, bs in enumerate(self.pure_block_styles):
bs.id = bs.name = f'%0{bnum}d Block' % i
bs.seq = i
if i == 0:
self.normal_pure_block_style = bs
counts = Counter()
smap = {}
for (bs, rs), blocks in iteritems(used_pairs):
s = CombinedStyle(bs, rs, blocks, self.namespace)
smap[(bs, rs)] = s
counts[s] += sum(1 for b in blocks if not b.is_empty())
for i, heading_tag in enumerate(sorted(heading_styles)):
styles = sorted((smap[k] for k in heading_styles[heading_tag]), key=counts.__getitem__)
styles = list(filter(lambda s:s.outline_level is None, styles))
if styles:
heading_style = styles[-1]
heading_style.outline_level = i
snum = len(str(max(1, len(counts) - 1)))
heading_styles = []
for i, (style, count) in enumerate(counts.most_common()):
if i == 0:
self.normal_style = style
style.id = style.name = 'Normal'
else:
if style.outline_level is None:
val = f'Para %0{snum}d' % i
else:
val = 'Heading %d' % (style.outline_level + 1)
heading_styles.append(style)
style.id = style.name = val
style.seq = i
self.combined_styles = sorted(counts, key=attrgetter('seq'))
[ls.apply() for ls in self.combined_styles]
descendant_style_map = {}
ds_counts = Counter()
for block in all_blocks:
for run in block.runs:
if run.parent_style is not run.style and run.parent_style and run.style:
ds = DescendantTextStyle(run.parent_style, run.style)
if ds.properties:
run.descendant_style = descendant_style_map.get(ds)
if run.descendant_style is None:
run.descendant_style = descendant_style_map[ds] = ds
ds_counts[run.descendant_style] += run.style_weight
rnum = len(str(max(1, len(ds_counts) - 1)))
for i, (text_style, count) in enumerate(ds_counts.most_common()):
text_style.id = 'Text%d' % i
text_style.name = f'%0{rnum}d Text' % i
text_style.seq = i
self.descendant_text_styles = sorted(descendant_style_map, key=attrgetter('seq'))
self.log.debug('%d Text Styles %d Combined styles' % tuple(map(len, (
self.descendant_text_styles, self.combined_styles))))
self.primary_heading_style = None
if heading_styles:
heading_styles.sort(key=attrgetter('outline_level'))
self.primary_heading_style = heading_styles[0]
else:
ms = 0
for s in self.combined_styles:
if s.rs.font_size > ms:
self.primary_heading_style = s
ms = s.rs.font_size
def serialize(self, styles):
lang = styles.xpath('descendant::*[local-name()="lang"]')[0]
for k in tuple(lang.attrib):
lang.attrib[k] = self.document_lang
for style in self.combined_styles:
style.serialize(styles, self.normal_style)
for style in self.descendant_text_styles:
style.serialize(styles)
for style in sorted(self.pure_block_styles, key=attrgetter('seq')):
style.serialize(styles, self.normal_pure_block_style)
| 34,205 | Python | .py | 694 | 37.608069 | 158 | 0.56661 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,589 | unsmarten.py | kovidgoyal_calibre/src/calibre/ebooks/textile/unsmarten.py | __license__ = 'GPL 3'
__copyright__ = '2011, Leigh Parry <leighparry@blueyonder.co.uk>'
__docformat__ = 'restructuredtext en'
import re
def unsmarten(txt):
txt = re.sub('¢|¢|¢', r'{c\}', txt) # cent
txt = re.sub('£|£|£', r'{L-}', txt) # pound
txt = re.sub('¥|¥|¥', r'{Y=}', txt) # yen
txt = re.sub('©|©|©', r'{(c)}', txt) # copyright
txt = re.sub('®|®|®', r'{(r)}', txt) # registered
txt = re.sub('¼|¼|¼', r'{1/4}', txt) # quarter
txt = re.sub('½|½|½', r'{1/2}', txt) # half
txt = re.sub('¾|¾|¾', r'{3/4}', txt) # three-quarter
txt = re.sub('À|À|À', r'{A`)}', txt) # A-grave
txt = re.sub('Á|Á|Á', r"{A'}", txt) # A-acute
txt = re.sub('Â|Â|Â', r'{A^}', txt) # A-circumflex
txt = re.sub('Ã|Ã|Ã', r'{A~}', txt) # A-tilde
txt = re.sub('Ä|Ä|Ä', r'{A"}', txt) # A-umlaut
txt = re.sub('Å|Å|Å', r'{Ao}', txt) # A-ring
txt = re.sub('Æ|Æ|Æ', r'{AE}', txt) # AE
txt = re.sub('Ç|Ç|Ç', r'{C,}', txt) # C-cedilla
txt = re.sub('È|È|È', r'{E`}', txt) # E-grave
txt = re.sub('É|É|É', r"{E'}", txt) # E-acute
txt = re.sub('Ê|Ê|Ê', r'{E^}', txt) # E-circumflex
txt = re.sub('Ë|Ë|Ë', r'{E"}', txt) # E-umlaut
txt = re.sub('Ì|Ì|Ì', r'{I`}', txt) # I-grave
txt = re.sub('Í|Í|Í', r"{I'}", txt) # I-acute
txt = re.sub('Î|Î|Î', r'{I^}', txt) # I-circumflex
txt = re.sub('Ï|Ï|Ï', r'{I"}', txt) # I-umlaut
txt = re.sub('Ð|Ð|Ð', r'{D-}', txt) # ETH
txt = re.sub('Ñ|Ñ|Ñ', r'{N~}', txt) # N-tilde
txt = re.sub('Ò|Ò|Ò', r'{O`}', txt) # O-grave
txt = re.sub('Ó|Ó|Ó', r"{O'}", txt) # O-acute
txt = re.sub('Ô|Ô|Ô', r'{O^}', txt) # O-circumflex
txt = re.sub('Õ|Õ|Õ', r'{O~}', txt) # O-tilde
txt = re.sub('Ö|Ö|Ö', r'{O"}', txt) # O-umlaut
txt = re.sub('×|×|×', r'{x}', txt) # dimension
txt = re.sub('Ø|Ø|Ø', r'{O/}', txt) # O-slash
txt = re.sub('Ù|Ù|Ù', r"{U`}", txt) # U-grave
txt = re.sub('Ú|Ú|Ú', r"{U'}", txt) # U-acute
txt = re.sub('Û|Û|Û', r'{U^}', txt) # U-circumflex
txt = re.sub('Ü|Ü|Ü', r'{U"}', txt) # U-umlaut
txt = re.sub('Ý|Ý|Ý', r"{Y'}", txt) # Y-grave
txt = re.sub('ß|ß|ß', r'{sz}', txt) # sharp-s
txt = re.sub('à|à|à', r'{a`}', txt) # a-grave
txt = re.sub('á|á|á', r"{a'}", txt) # a-acute
txt = re.sub('â|â|â', r'{a^}', txt) # a-circumflex
txt = re.sub('ã|ã|ã', r'{a~}', txt) # a-tilde
txt = re.sub('ä|ä|ä', r'{a"}', txt) # a-umlaut
txt = re.sub('å|å|å', r'{ao}', txt) # a-ring
txt = re.sub('æ|æ|æ', r'{ae}', txt) # ae
txt = re.sub('ç|ç|ç', r'{c,}', txt) # c-cedilla
txt = re.sub('è|è|è', r'{e`}', txt) # e-grave
txt = re.sub('é|é|é', r"{e'}", txt) # e-acute
txt = re.sub('ê|ê|ê', r'{e^}', txt) # e-circumflex
txt = re.sub('ë|ë|ë', r'{e"}', txt) # e-umlaut
txt = re.sub('ì|ì|ì', r'{i`}', txt) # i-grave
txt = re.sub('í|í|í', r"{i'}", txt) # i-acute
txt = re.sub('î|î|î', r'{i^}', txt) # i-circumflex
txt = re.sub('ï|ï|ï', r'{i"}', txt) # i-umlaut
txt = re.sub('ð|ð|ð', r'{d-}', txt) # eth
txt = re.sub('ñ|ñ|ñ', r'{n~}', txt) # n-tilde
txt = re.sub('ò|ò|ò', r'{o`}', txt) # o-grave
txt = re.sub('ó|ó|ó', r"{o'}", txt) # o-acute
txt = re.sub('ô|ô|ô', r'{o^}', txt) # o-circumflex
txt = re.sub('õ|õ|õ', r'{o~}', txt) # o-tilde
txt = re.sub('ö|ö|ö', r'{o"}', txt) # o-umlaut
txt = re.sub('ø|ø|ø', r'{o/}', txt) # o-stroke
txt = re.sub('ù|ù|ù', r'{u`}', txt) # u-grave
txt = re.sub('ú|ú|ú', r"{u'}", txt) # u-acute
txt = re.sub('û|û|û', r'{u^}', txt) # u-circumflex
txt = re.sub('ü|ü|ü', r'{u"}', txt) # u-umlaut
txt = re.sub('ý|ý|ý', r"{y'}", txt) # y-acute
txt = re.sub('ÿ|ÿ|ÿ', r'{y"}', txt) # y-umlaut
txt = re.sub('Č|Č|Č', r'{Cˇ}', txt) # C-caron
txt = re.sub('č|č|č', r'{cˇ}', txt) # c-caron
txt = re.sub('Ď|Ď|Ď', r'{Dˇ}', txt) # D-caron
txt = re.sub('ď|ď|ď', r'{dˇ}', txt) # d-caron
txt = re.sub('Ě|Ě|Ě', r'{Eˇ}', txt) # E-caron
txt = re.sub('ě|ě|ě', r'{eˇ}', txt) # e-caron
txt = re.sub('Ĺ|Ĺ|Ĺ', r"{L'}", txt) # L-acute
txt = re.sub('ĺ|ĺ|ĺ', r"{l'}", txt) # l-acute
txt = re.sub('Ľ|Ľ|Ľ', r'{Lˇ}', txt) # L-caron
txt = re.sub('ľ|ľ|ľ', r'{lˇ}', txt) # l-caron
txt = re.sub('Ň|Ň|Ň', r'{Nˇ}', txt) # N-caron
txt = re.sub('ň|ň|ň', r'{nˇ}', txt) # n-caron
txt = re.sub('Œ|Œ|Œ', r'{OE}', txt) # OE
txt = re.sub('œ|œ|œ', r'{oe}', txt) # oe
txt = re.sub('Ŕ|Ŕ|Ŕ', r"{R'}", txt) # R-acute
txt = re.sub('ŕ|ŕ|ŕ', r"{r'}", txt) # r-acute
txt = re.sub('Ř|Ř|Ř', r'{Rˇ}', txt) # R-caron
txt = re.sub('ř|ř|ř', r'{rˇ}', txt) # r-caron
txt = re.sub('Ŝ|Ŝ', r'{S^}', txt) # S-circumflex
txt = re.sub('ŝ|ŝ', r'{s^}', txt) # s-circumflex
txt = re.sub('Š|Š|Š', r'{Sˇ}', txt) # S-caron
txt = re.sub('š|š|š', r'{sˇ}', txt) # s-caron
txt = re.sub('Ť|Ť|Ť', r'{Tˇ}', txt) # T-caron
txt = re.sub('ť|ť|ť', r'{tˇ}', txt) # t-caron
txt = re.sub('Ů|Ů|Ů', r'{U°}', txt) # U-ring
txt = re.sub('ů|ů|ů', r'{u°}', txt) # u-ring
txt = re.sub('Ž|Ž|Ž', r'{Zˇ}', txt) # Z-caron
txt = re.sub('ž|ž|ž', r'{zˇ}', txt) # z-caron
txt = re.sub('•|•|•', r'{*}', txt) # bullet
txt = re.sub('₣|₣', r'{Fr}', txt) # Franc
txt = re.sub('₤|₤', r'{L=}', txt) # Lira
txt = re.sub('₨|₨', r'{Rs}', txt) # Rupee
txt = re.sub('€|€|€', r'{C=}', txt) # euro
txt = re.sub('™|™|™', r'{tm}', txt) # trademark
txt = re.sub('♠|♠|♠', r'{spade}', txt) # spade
txt = re.sub('♣|♣|♣', r'{club}', txt) # club
txt = re.sub('♥|♥|♥', r'{heart}', txt) # heart
txt = re.sub('♦|♦|♦', r'{diamond}', txt) # diamond
# Move into main code?
# txt = re.sub(u'\xa0', r'p. ', txt) # blank paragraph
# txt = re.sub(u'\n\n\n\n', r'\n\np. \n\n', txt) # blank paragraph
# txt = re.sub(u'\n \n', r'\n<br />\n', txt) # blank paragraph - br tag
return txt
| 7,682 | Python | .py | 117 | 59.581197 | 82 | 0.450061 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,590 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/textile/__init__.py | from .functions import Textile, textile, textile_restricted
if False:
textile, textile_restricted, Textile
__all__ = ['textile', 'textile_restricted']
| 157 | Python | .py | 4 | 36.75 | 59 | 0.761589 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,591 | functions.py | kovidgoyal_calibre/src/calibre/ebooks/textile/functions.py | #!/usr/bin/env python
"""
PyTextile
A Humane Web Text Generator
"""
# Last upstream version basis
# __version__ = '2.1.4'
# __date__ = '2009/12/04'
__copyright__ = """
Copyright (c) 2011, Leigh Parry <leighparry@blueyonder.co.uk>
Copyright (c) 2011, John Schember <john@nachtimwald.com>
Copyright (c) 2009, Jason Samsa, http://jsamsa.com/
Copyright (c) 2004, Roberto A. F. De Almeida, http://dealmeida.net/
Copyright (c) 2003, Mark Pilgrim, http://diveintomark.org/
Original PHP Version:
Copyright (c) 2003-2004, Dean Allen <dean@textism.com>
All rights reserved.
Thanks to Carlo Zottmann <carlo@g-blog.net> for refactoring
Textile's procedural code into a class framework
Additions and fixes Copyright (c) 2006 Alex Shiels http://thresholdstate.com/
"""
__license__ = """
L I C E N S E
=============
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name Textile nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import re
import uuid
from calibre.utils.smartypants import smartyPants
from polyglot.urllib import urlopen, urlparse
def _normalize_newlines(string):
out = re.sub(r'\r\n', '\n', string)
out = re.sub(r'\n{3,}', '\n\n', out)
out = re.sub(r'\n\s*\n', '\n\n', out)
out = re.sub(r'"$', '" ', out)
return out
def getimagesize(url):
"""
Attempts to determine an image's width and height, and returns a string
suitable for use in an <img> tag, or None in case of failure.
Requires that PIL is installed.
>>> getimagesize("http://www.google.com/intl/en_ALL/images/logo.gif")
... #doctest: +ELLIPSIS, +SKIP
'width="..." height="..."'
"""
from PIL import ImageFile
try:
p = ImageFile.Parser()
f = urlopen(url)
while True:
s = f.read(1024)
if not s:
break
p.feed(s)
if p.image:
return 'width="%i" height="%i"' % p.image.size
except (OSError, ValueError):
return None
class Textile:
hlgn = r'(?:\<(?!>)|(?<!<)\>|\<\>|\=|[()]+(?! ))'
vlgn = r'[\-^~]'
clas = r'(?:\([^)]+\))'
lnge = r'(?:\[[^\]]+\])'
styl = r'(?:\{[^}]+\})'
cspn = r'(?:\\\d+)'
rspn = r'(?:\/\d+)'
a = fr'(?:{hlgn}|{vlgn})*'
s = fr'(?:{cspn}|{rspn})*'
c = r'(?:%s)*' % '|'.join([clas, styl, lnge, hlgn])
pnct = r'[-!"#$%&()*+,/:;<=>?@\'\[\\\]\.^_`{|}~]'
# urlch = r'[\w"$\-_.+!*\'(),";/?:@=&%#{}|\\^~\[\]`]'
urlch = r'[\w"$\-_.+*\'(),";\/?:@=&%#{}|\\^~\[\]`]'
url_schemes = ('http', 'https', 'ftp', 'mailto')
btag = ('bq', 'bc', 'notextile', 'pre', 'h[1-6]', r'fn\d+', 'p')
btag_lite = ('bq', 'bc', 'p')
macro_defaults = [
(re.compile(r'{(c\||\|c)}'), r'¢'), # cent
(re.compile(r'{(L-|-L)}'), r'£'), # pound
(re.compile(r'{(Y=|=Y)}'), r'¥'), # yen
(re.compile(r'{\(c\)}'), r'©'), # copyright
(re.compile(r'{\(r\)}'), r'®'), # registered
(re.compile(r'{(\+_|_\+)}'), r'±'), # plus-minus
(re.compile(r'{1/4}'), r'¼'), # quarter
(re.compile(r'{1/2}'), r'½'), # half
(re.compile(r'{3/4}'), r'¾'), # three-quarter
(re.compile(r'{(A`|`A)}'), r'À'), # A-acute
(re.compile(r'{(A\'|\'A)}'), r'Á'), # A-grave
(re.compile(r'{(A\^|\^A)}'), r'Â'), # A-circumflex
(re.compile(r'{(A~|~A)}'), r'Ã'), # A-tilde
(re.compile(r'{(A\"|\"A)}'), r'Ä'), # A-diaeresis
(re.compile(r'{(Ao|oA)}'), r'Å'), # A-ring
(re.compile(r'{(AE)}'), r'Æ'), # AE
(re.compile(r'{(C,|,C)}'), r'Ç'), # C-cedilla
(re.compile(r'{(E`|`E)}'), r'È'), # E-acute
(re.compile(r'{(E\'|\'E)}'), r'É'), # E-grave
(re.compile(r'{(E\^|\^E)}'), r'Ê'), # E-circumflex
(re.compile(r'{(E\"|\"E)}'), r'Ë'), # E-diaeresis
(re.compile(r'{(I`|`I)}'), r'Ì'), # I-acute
(re.compile(r'{(I\'|\'I)}'), r'Í'), # I-grave
(re.compile(r'{(I\^|\^I)}'), r'Î'), # I-circumflex
(re.compile(r'{(I\"|\"I)}'), r'Ï'), # I-diaeresis
(re.compile(r'{(D-|-D)}'), r'Ð'), # ETH
(re.compile(r'{(N~|~N)}'), r'Ñ'), # N-tilde
(re.compile(r'{(O`|`O)}'), r'Ò'), # O-acute
(re.compile(r'{(O\'|\'O)}'), r'Ó'), # O-grave
(re.compile(r'{(O\^|\^O)}'), r'Ô'), # O-circumflex
(re.compile(r'{(O~|~O)}'), r'Õ'), # O-tilde
(re.compile(r'{(O\"|\"O)}'), r'Ö'), # O-diaeresis
(re.compile(r'{x}'), r'×'), # dimension
(re.compile(r'{(O\/|\/O)}'), r'Ø'), # O-slash
(re.compile(r'{(U`|`U)}'), r'Ù'), # U-acute
(re.compile(r'{(U\'|\'U)}'), r'Ú'), # U-grave
(re.compile(r'{(U\^|\^U)}'), r'Û'), # U-circumflex
(re.compile(r'{(U\"|\"U)}'), r'Ü'), # U-diaeresis
(re.compile(r'{(Y\'|\'Y)}'), r'Ý'), # Y-grave
(re.compile(r'{sz}'), r'ß'), # sharp-s
(re.compile(r'{(a`|`a)}'), r'à'), # a-grave
(re.compile(r'{(a\'|\'a)}'), r'á'), # a-acute
(re.compile(r'{(a\^|\^a)}'), r'â'), # a-circumflex
(re.compile(r'{(a~|~a)}'), r'ã'), # a-tilde
(re.compile(r'{(a\"|\"a)}'), r'ä'), # a-diaeresis
(re.compile(r'{(ao|oa)}'), r'å'), # a-ring
(re.compile(r'{ae}'), r'æ'), # ae
(re.compile(r'{(c,|,c)}'), r'ç'), # c-cedilla
(re.compile(r'{(e`|`e)}'), r'è'), # e-grave
(re.compile(r'{(e\'|\'e)}'), r'é'), # e-acute
(re.compile(r'{(e\^|\^e)}'), r'ê'), # e-circumflex
(re.compile(r'{(e\"|\"e)}'), r'ë'), # e-diaeresis
(re.compile(r'{(i`|`i)}'), r'ì'), # i-grave
(re.compile(r'{(i\'|\'i)}'), r'í'), # i-acute
(re.compile(r'{(i\^|\^i)}'), r'î'), # i-circumflex
(re.compile(r'{(i\"|\"i)}'), r'ï'), # i-diaeresis
(re.compile(r'{(d-|-d)}'), r'ð'), # eth
(re.compile(r'{(n~|~n)}'), r'ñ'), # n-tilde
(re.compile(r'{(o`|`o)}'), r'ò'), # o-grave
(re.compile(r'{(o\'|\'o)}'), r'ó'), # o-acute
(re.compile(r'{(o\^|\^o)}'), r'ô'), # o-circumflex
(re.compile(r'{(o~|~o)}'), r'õ'), # o-tilde
(re.compile(r'{(o\"|\"o)}'), r'ö'), # o-diaeresis
(re.compile(r'{(o\/|\/o)}'), r'ø'), # o-stroke
(re.compile(r'{(u`|`u)}'), r'ù'), # u-grave
(re.compile(r'{(u\'|\'u)}'), r'ú'), # u-acute
(re.compile(r'{(u\^|\^u)}'), r'û'), # u-circumflex
(re.compile(r'{(u\"|\"u)}'), r'ü'), # u-diaeresis
(re.compile(r'{(y\'|\'y)}'), r'ý'), # y-acute
(re.compile(r'{(y\"|\"y)}'), r'ÿ'), # y-diaeresis
(re.compile(r'{(C\ˇ|\ˇC)}'), r'Č'), # C-caron
(re.compile(r'{(c\ˇ|\ˇc)}'), r'č'), # c-caron
(re.compile(r'{(D\ˇ|\ˇD)}'), r'Ď'), # D-caron
(re.compile(r'{(d\ˇ|\ˇd)}'), r'ď'), # d-caron
(re.compile(r'{(E\ˇ|\ˇE)}'), r'Ě'), # E-caron
(re.compile(r'{(e\ˇ|\ˇe)}'), r'ě'), # e-caron
(re.compile(r'{(L\'|\'L)}'), r'Ĺ'), # L-acute
(re.compile(r'{(l\'|\'l)}'), r'ĺ'), # l-acute
(re.compile(r'{(L\ˇ|\ˇL)}'), r'Ľ'), # L-caron
(re.compile(r'{(l\ˇ|\ˇl)}'), r'ľ'), # l-caron
(re.compile(r'{(N\ˇ|\ˇN)}'), r'Ň'), # N-caron
(re.compile(r'{(n\ˇ|\ˇn)}'), r'ň'), # n-caron
(re.compile(r'{OE}'), r'Œ'), # OE
(re.compile(r'{oe}'), r'œ'), # oe
(re.compile(r'{(R\'|\'R)}'), r'Ŕ'), # R-acute
(re.compile(r'{(r\'|\'r)}'), r'ŕ'), # r-acute
(re.compile(r'{(R\ˇ|\ˇR)}'), r'Ř'), # R-caron
(re.compile(r'{(r\ˇ|\ˇr)}'), r'ř'), # r-caron
(re.compile(r'{(S\^|\^S)}'), r'Ŝ'), # S-circumflex
(re.compile(r'{(s\^|\^s)}'), r'ŝ'), # s-circumflex
(re.compile(r'{(S\ˇ|\ˇS)}'), r'Š'), # S-caron
(re.compile(r'{(s\ˇ|\ˇs)}'), r'š'), # s-caron
(re.compile(r'{(T\ˇ|\ˇT)}'), r'Ť'), # T-caron
(re.compile(r'{(t\ˇ|\ˇt)}'), r'ť'), # t-caron
(re.compile(r'{(U\°|\°U)}'), r'Ů'), # U-ring
(re.compile(r'{(u\°|\°u)}'), r'ů'), # u-ring
(re.compile(r'{(Z\ˇ|\ˇZ)}'), r'Ž'), # Z-caron
(re.compile(r'{(z\ˇ|\ˇz)}'), r'ž'), # z-caron
(re.compile(r'{\*}'), r'•'), # bullet
(re.compile(r'{Fr}'), r'₣'), # Franc
(re.compile(r'{(L=|=L)}'), r'₤'), # Lira
(re.compile(r'{Rs}'), r'₨'), # Rupee
(re.compile(r'{(C=|=C)}'), r'€'), # euro
(re.compile(r'{tm}'), r'™'), # trademark
(re.compile(r'{spades?}'), r'♠'), # spade
(re.compile(r'{clubs?}'), r'♣'), # club
(re.compile(r'{hearts?}'), r'♥'), # heart
(re.compile(r'{diam(onds?|s)}'), r'♦'), # diamond
(re.compile(r'{"}'), r'"'), # double-quote
(re.compile(r"{'}"), r'''), # single-quote
(re.compile(r"{(’|'/|/')}"), r'’'), # closing-single-quote - apostrophe
(re.compile(r"{(‘|\\'|'\\)}"), r'‘'), # opening-single-quote
(re.compile(r'{(”|"/|/")}'), r'”'), # closing-double-quote
(re.compile(r'{(“|\\"|"\\)}'), r'“'), # opening-double-quote
]
glyph_defaults = [
(re.compile(r'(\d+\'?\"?)( ?)x( ?)(?=\d+)'), r'\1\2×\3'), # dimension sign
(re.compile(r'(\d+)\'(\s)', re.I), r'\1′\2'), # prime
(re.compile(r'(\d+)\"(\s)', re.I), r'\1″\2'), # prime-double
(re.compile(r'\b([A-Z][A-Z0-9]{2,})\b(?:[(]([^)]*)[)])'), r'<acronym title="\2">\1</acronym>'), # 3+ uppercase acronym
(re.compile(r'\b([A-Z][A-Z\'\-]+[A-Z])(?=[\s.,\)>])'), r'<span class="caps">\1</span>'), # 3+ uppercase
(re.compile(r'\b(\s{0,1})?\.{3}'), r'\1…'), # ellipsis
(re.compile(r'^[\*_-]{3,}$', re.M), r'<hr />'), # <hr> scene-break
(re.compile(r'(^|[^-])--([^-]|$)'), r'\1—\2'), # em dash
(re.compile(r'\s-(?:\s|$)'), r' – '), # en dash
(re.compile(r'\b( ?)[([]TM[])]', re.I), r'\1™'), # trademark
(re.compile(r'\b( ?)[([]R[])]', re.I), r'\1®'), # registered
(re.compile(r'\b( ?)[([]C[])]', re.I), r'\1©'), # copyright
]
def __init__(self, restricted=False, lite=False, noimage=False):
"""docstring for __init__"""
self.restricted = restricted
self.lite = lite
self.noimage = noimage
self.get_sizes = False
self.fn = {}
self.urlrefs = {}
self.shelf = {}
self.rel = ''
self.html_type = 'xhtml'
def textile(self, text, rel=None, head_offset=0, html_type='xhtml'):
"""
>>> import textile
>>> textile.textile('some textile')
u'\\t<p>some textile</p>'
"""
self.html_type = html_type
# text = type(u'')(text)
text = _normalize_newlines(text)
if self.restricted:
text = self.encode_html(text, quotes=False)
if rel:
self.rel = ' rel="%s"' % rel
text = self.getRefs(text)
text = self.block(text, int(head_offset))
text = self.retrieve(text)
text = smartyPants(text, 'q')
return text
def pba(self, input, element=None):
"""
Parse block attributes.
>>> t = Textile()
>>> t.pba(r'\3')
''
>>> t.pba(r'\\3', element='td')
' colspan="3"'
>>> t.pba(r'/4', element='td')
' rowspan="4"'
>>> t.pba(r'\\3/4', element='td')
' colspan="3" rowspan="4"'
>>> t.vAlign('^')
'top'
>>> t.pba('^', element='td')
' style="vertical-align:top;"'
>>> t.pba('{line-height:18px}')
' style="line-height:18px;"'
>>> t.pba('(foo-bar)')
' class="foo-bar"'
>>> t.pba('(#myid)')
' id="myid"'
>>> t.pba('(foo-bar#myid)')
' class="foo-bar" id="myid"'
>>> t.pba('((((')
' style="padding-left:4em;"'
>>> t.pba(')))')
' style="padding-right:3em;"'
>>> t.pba('[fr]')
' lang="fr"'
"""
style = []
aclass = ''
lang = ''
colspan = ''
rowspan = ''
id = ''
if not input:
return ''
matched = input
if element == 'td':
m = re.search(r'\\(\d+)', matched)
if m:
colspan = m.group(1)
m = re.search(r'/(\d+)', matched)
if m:
rowspan = m.group(1)
if element == 'td' or element == 'tr':
m = re.search(r'(%s)' % self.vlgn, matched)
if m:
style.append("vertical-align:%s;" % self.vAlign(m.group(1)))
m = re.search(r'\{([^}]*)\}', matched)
if m:
style.append(m.group(1).rstrip(';') + ';')
matched = matched.replace(m.group(0), '')
m = re.search(r'\[([^\]]+)\]', matched, re.U)
if m:
lang = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'\(([^()]+)\)', matched, re.U)
if m:
aclass = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'([(]+)', matched)
if m:
style.append("padding-left:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'([)]+)', matched)
if m:
style.append("padding-right:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'(%s)' % self.hlgn, matched)
if m:
style.append("text-align:%s;" % self.hAlign(m.group(1)))
m = re.search(r'^(.*)#(.*)$', aclass)
if m:
id = m.group(2)
aclass = m.group(1)
if self.restricted:
if lang:
return ' lang="%s"'
else:
return ''
result = []
if style:
result.append(' style="%s"' % "".join(style))
if aclass:
result.append(' class="%s"' % aclass)
if lang:
result.append(' lang="%s"' % lang)
if id:
result.append(' id="%s"' % id)
if colspan:
result.append(' colspan="%s"' % colspan)
if rowspan:
result.append(' rowspan="%s"' % rowspan)
return ''.join(result)
def hasRawText(self, text):
"""
checks whether the text has text not already enclosed by a block tag
>>> t = Textile()
>>> t.hasRawText('<p>foo bar biz baz</p>')
False
>>> t.hasRawText(' why yes, yes it does')
True
"""
r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip()
r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)
return '' != r
def table(self, text):
r"""
>>> t = Textile()
>>> t.table('|one|two|three|\n|a|b|c|')
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>\n\n'
"""
text = text + "\n\n"
pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s':self.s, 'a':self.a, 'c':self.c}, re.S|re.M|re.U)
return pattern.sub(self.fTable, text)
def fTable(self, match):
tatts = self.pba(match.group(1), 'table')
rows = []
for row in [x for x in match.group(2).split('\n') if x]:
rmtch = re.search(fr'^({self.a}{self.c}\. )(.*)', row.lstrip())
if rmtch:
ratts = self.pba(rmtch.group(1), 'tr')
row = rmtch.group(2)
else:
ratts = ''
cells = []
for cell in row.split('|')[1:-1]:
ctyp = 'd'
if re.search(r'^_', cell):
ctyp = "h"
cmtch = re.search(fr'^(_?{self.s}{self.a}{self.c}\. )(.*)', cell)
if cmtch:
catts = self.pba(cmtch.group(1), 'td')
cell = cmtch.group(2)
else:
catts = ''
cell = self.graf(self.span(cell))
cells.append(f'\t\t\t<t{ctyp}{catts}>{cell}</t{ctyp}>')
rows.append("\t\t<tr{}>\n{}\n\t\t</tr>".format(ratts, '\n'.join(cells)))
cells = []
catts = None
return "\t<table{}>\n{}\n\t</table>\n\n".format(tatts, '\n'.join(rows))
def lists(self, text):
"""
>>> t = Textile()
>>> t.lists("* one\\n* two\\n* three")
'\\t<ul>\\n\\t\\t<li>one</li>\\n\\t\\t<li>two</li>\\n\\t\\t<li>three</li>\\n\\t</ul>'
"""
pattern = re.compile(r'^([#*]+%s .*)$(?![^#*])' % self.c, re.U|re.M|re.S)
return pattern.sub(self.fList, text)
def fList(self, match):
text = match.group(0).split("\n")
result = []
lists = []
for i, line in enumerate(text):
try:
nextline = text[i+1]
except IndexError:
nextline = ''
m = re.search(fr"^([#*]+)({self.a}{self.c}) (.*)$", line, re.S)
if m:
tl, atts, content = m.groups()
nl = ''
nm = re.search(r'^([#*]+)\s.*', nextline)
if nm:
nl = nm.group(1)
if tl not in lists:
lists.append(tl)
atts = self.pba(atts)
line = f"\t<{self.lT(tl)}l{atts}>\n\t\t<li>{self.graf(content)}"
else:
line = "\t\t<li>" + self.graf(content)
if len(nl) <= len(tl):
line = line + "</li>"
for k in reversed(lists):
if len(k) > len(nl):
line = line + "\n\t</%sl>" % self.lT(k)
if len(k) > 1:
line = line + "</li>"
lists.remove(k)
result.append(line)
return "\n".join(result)
def lT(self, input):
if re.search(r'^#+', input):
return 'o'
else:
return 'u'
def doPBr(self, in_):
return re.compile(r'<(p)([^>]*?)>(.*)(</\1>)', re.S).sub(self.doBr, in_)
def doBr(self, match):
if self.html_type == 'html':
content = re.sub(r'(.+)(?:(?<!<br>)|(?<!<br />))\n(?![#*\s|])', '\\1<br>', match.group(3))
else:
content = re.sub(r'(.+)(?:(?<!<br>)|(?<!<br />))\n(?![#*\s|])', '\\1<br />', match.group(3))
return f'<{match.group(1)}{match.group(2)}>{content}{match.group(4)}'
def block(self, text, head_offset=0):
"""
>>> t = Textile()
>>> t.block('h1. foobar baby')
'\\t<h1>foobar baby</h1>'
"""
if not self.lite:
tre = '|'.join(self.btag)
else:
tre = '|'.join(self.btag_lite)
text = text.split('\n\n')
tag = 'p'
atts = cite = graf = ext = c1 = ''
out = []
anon = False
for line in text:
pattern = fr'^({tre})({self.a}{self.c})\.(\.?)(?::(\S+))? (.*)$'
match = re.search(pattern, line, re.S)
if match:
if ext:
out.append(out.pop() + c1)
tag, atts, ext, cite, graf = match.groups()
h_match = re.search(r'h([1-6])', tag)
if h_match:
head_level, = h_match.groups()
tag = 'h%i' % max(1,
min(int(head_level) + head_offset,
6))
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext,
cite, graf)
# leave off c1 if this block is extended,
# we'll close it at the start of the next block
if ext:
line = f"{o1}{o2}{content}{c2}"
else:
line = f"{o1}{o2}{content}{c2}{c1}"
else:
anon = True
if ext or not re.search(r'^\s', line):
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext,
cite, line)
# skip $o1/$c1 because this is part of a continuing
# extended block
if tag == 'p' and not self.hasRawText(content):
line = content
else:
line = f"{o2}{content}{c2}"
else:
line = self.graf(line)
line = self.doPBr(line)
if self.html_type == 'xhtml':
line = re.sub(r'<br>', '<br />', line)
if ext and anon:
out.append(out.pop() + "\n" + line)
else:
out.append(line)
if not ext:
tag = 'p'
atts = ''
cite = ''
graf = ''
if ext:
out.append(out.pop() + c1)
return '\n\n'.join(out)
def fBlock(self, tag, atts, ext, cite, content):
"""
>>> t = Textile()
>>> t.fBlock("bq", "", None, "", "Hello BlockQuote")
('\\t<blockquote>\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bq", "", None, "http://google.com", "Hello BlockQuote")
('\\t<blockquote cite="http://google.com">\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bc", "", None, "", 'printf "Hello, World";') # doctest: +ELLIPSIS
('<pre>', '<code>', ..., '</code>', '</pre>')
>>> t.fBlock("h1", "", None, "", "foobar")
('', '\\t<h1>', 'foobar', '</h1>', '')
"""
atts = self.pba(atts)
o1 = o2 = c2 = c1 = ''
m = re.search(r'fn(\d+)', tag)
if m:
tag = 'p'
if m.group(1) in self.fn:
fnid = self.fn[m.group(1)]
else:
fnid = m.group(1)
atts = atts + ' id="fn%s"' % fnid
if atts.find('class=') < 0:
atts = atts + ' class="footnote"'
content = ('<sup>%s</sup>' % m.group(1)) + content
if tag == 'bq':
cite = self.checkRefs(cite)
if cite:
cite = ' cite="%s"' % cite
else:
cite = ''
o1 = f"\t<blockquote{cite}{atts}>\n"
o2 = "\t\t<p%s>" % atts
c2 = "</p>"
c1 = "\n\t</blockquote>"
elif tag == 'bc':
o1 = "<pre%s>" % atts
o2 = "<code%s>" % atts
c2 = "</code>"
c1 = "</pre>"
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
elif tag == 'notextile':
content = self.shelve(content)
o1 = o2 = ''
c1 = c2 = ''
elif tag == 'pre':
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
o1 = "<pre%s>" % atts
o2 = c2 = ''
c1 = '</pre>'
else:
o2 = f"\t<{tag}{atts}>"
c2 = "</%s>" % tag
content = self.graf(content)
return o1, o2, content, c2, c1
def footnoteRef(self, text):
"""
>>> t = Textile()
>>> t.footnoteRef('foo[1] ') # doctest: +ELLIPSIS
'foo<sup class="footnote"><a href="#fn...">1</a></sup> '
"""
return re.sub(r'\b\[([0-9]+)\](\s)?', self.footnoteID, text)
def footnoteID(self, match):
id, t = match.groups()
if id not in self.fn:
self.fn[id] = str(uuid.uuid4())
fnid = self.fn[id]
if not t:
t = ''
return f'<sup class="footnote"><a href="#fn{fnid}">{id}</a></sup>{t}'
def glyphs(self, text):
"""
>>> t = Textile()
>>> t.glyphs("apostrophe's")
'apostrophe’s'
>>> t.glyphs("back in '88")
'back in ’88'
>>> t.glyphs('foo ...')
'foo …'
>>> t.glyphs('--')
'—'
>>> t.glyphs('FooBar[tm]')
'FooBar™'
>>> t.glyphs("<p><cite>Cat's Cradle</cite> by Vonnegut</p>")
'<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'
"""
# fix: hackish
text = re.sub(r'"\Z', '\" ', text)
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
rules = []
if re.search(r'{.+?}', line):
rules = self.macro_defaults + self.glyph_defaults
else:
rules = self.glyph_defaults
for s, r in rules:
line = s.sub(r, line)
result.append(line)
return ''.join(result)
def macros_only(self, text):
# fix: hackish
text = re.sub(r'"\Z', '\" ', text)
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
rules = []
if re.search(r'{.+?}', line):
rules = self.macro_defaults
for s, r in rules:
line = s.sub(r, line)
result.append(line)
return ''.join(result)
def vAlign(self, input):
d = {'^':'top', '-':'middle', '~':'bottom'}
return d.get(input, '')
def hAlign(self, input):
d = {'<':'left', '=':'center', '>':'right', '<>': 'justify'}
return d.get(input, '')
def getRefs(self, text):
"""
what is this for?
"""
pattern = re.compile(r'(?:(?<=^)|(?<=\s))\[(.+)\]((?:http(?:s?):\/\/|\/)\S+)(?=\s|$)', re.U)
text = pattern.sub(self.refs, text)
return text
def refs(self, match):
flag, url = match.groups()
self.urlrefs[flag] = url
return ''
def checkRefs(self, url):
return self.urlrefs.get(url, url)
def isRelURL(self, url):
"""
Identify relative urls.
>>> t = Textile()
>>> t.isRelURL("http://www.google.com/")
False
>>> t.isRelURL("/foo")
True
"""
(scheme, netloc) = urlparse(url)[0:2]
return not scheme and not netloc
def relURL(self, url):
scheme = urlparse(url)[0]
if self.restricted and scheme and scheme not in self.url_schemes:
return '#'
return url
def shelve(self, text):
id = str(uuid.uuid4()) + 'c'
self.shelf[id] = text
return id
def retrieve(self, text):
"""
>>> t = Textile()
>>> id = t.shelve("foobar")
>>> t.retrieve(id)
'foobar'
"""
while True:
old = text
for k, v in self.shelf.items():
text = text.replace(k, v)
if text == old:
break
return text
def encode_html(self, text, quotes=True):
a = (
('&', '&'),
('<', '<'),
('>', '>')
)
if quotes:
a = a + (
("'", '''),
('"', '"')
)
for k, v in a:
text = text.replace(k, v)
return text
def graf(self, text):
if not self.lite:
text = self.noTextile(text)
text = self.code(text)
text = self.links(text)
if not self.noimage:
text = self.image(text)
if not self.lite:
text = self.lists(text)
text = self.table(text)
text = self.span(text)
text = self.footnoteRef(text)
text = self.glyphs(text)
return text.rstrip('\n')
def links(self, text):
"""
>>> t = Textile()
>>> t.links('fooobar "Google":http://google.com/foobar/ and hello world "flickr":http://flickr.com/photos/jsamsa/ ') # doctest: +ELLIPSIS
'fooobar ... and hello world ...'
"""
text = self.macros_only(text)
punct = '!"#$%&\'*+,-./:;=?@\\^_`|~'
pattern = r'''
(?P<pre> [\s\[{{(]|[{}] )?
" # start
(?P<atts> {} )
(?P<text> [^"]+? )
\s?
(?: \(([^)]+?)\)(?=") )? # $title
":
(?P<url> (?:ftp|https?)? (?: :// )? [-A-Za-z0-9+&@#/?=~_()|!:,.;]*[-A-Za-z0-9+&@#/=~_()|] )
(?P<post> [^\w\/;]*? )
(?=<|\s|$)
'''.format(re.escape(punct), self.c)
text = re.compile(pattern, re.X).sub(self.fLink, text)
return text
def fLink(self, match):
pre, atts, text, title, url, post = match.groups()
if pre is None:
pre = ''
# assume ) at the end of the url is not actually part of the url
# unless the url also contains a (
if url.endswith(')') and not url.find('(') > -1:
post = url[-1] + post
url = url[:-1]
url = self.checkRefs(url)
atts = self.pba(atts)
if title:
atts = atts + ' title="%s"' % self.encode_html(title)
if not self.noimage:
text = self.image(text)
text = self.span(text)
text = self.glyphs(text)
url = self.relURL(url)
out = f'<a href="{self.encode_html(url)}"{atts}{self.rel}>{text}</a>'
out = self.shelve(out)
return ''.join([pre, out, post])
def span(self, text):
"""
>>> t = Textile()
>>> t.span(r"hello %(bob)span *strong* and **bold**% goodbye")
'hello <span class="bob">span <strong>strong</strong> and <b>bold</b></span> goodbye'
"""
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^')
pnct = ".,\"'?!;:"
for qtag in qtags:
pattern = re.compile(r"""
(?:^|(?<=[\s>%(pnct)s\(])|\[|([\]}]))
(%(qtag)s)(?!%(qtag)s)
(%(c)s)
(?::(\S+))?
([^\s%(qtag)s]+|\S[^%(qtag)s\n]*[^\s%(qtag)s\n])
([%(pnct)s]*)
%(qtag)s
(?:$|([\]}])|(?=%(selfpnct)s{1,2}|\s))
""" % {'qtag':qtag, 'c':self.c, 'pnct':pnct,
'selfpnct':self.pnct}, re.X)
text = pattern.sub(self.fSpan, text)
return text
def fSpan(self, match):
_, tag, atts, cite, content, end, _ = match.groups()
qtags = {
'*': 'strong',
'**': 'b',
'??': 'cite',
'_' : 'em',
'__': 'i',
'-' : 'del',
'%' : 'span',
'+' : 'ins',
'~' : 'sub',
'^' : 'sup'
}
tag = qtags[tag]
atts = self.pba(atts)
if cite:
atts = atts + 'cite="%s"' % cite
content = self.span(content)
out = f"<{tag}{atts}>{content}{end}</{tag}>"
return out
def image(self, text):
"""
>>> t = Textile()
>>> t.image('!/imgs/myphoto.jpg!:http://jsamsa.com')
'<a href="http://jsamsa.com"><img src="/imgs/myphoto.jpg" alt="" /></a>'
"""
pattern = re.compile(r"""
(?:[\[{])? # pre
\! # opening !
(%s) # optional style,class atts
(?:\. )? # optional dot-space
([^\s(!]+) # presume this is the src
\s? # optional space
(?:\(([^\)]+)\))? # optional title
\! # closing
(?::(\S+))? # optional href
(?:[\]}]|(?=\s|$)) # lookahead: space or end of string
""" % self.c, re.U|re.X)
return pattern.sub(self.fImage, text)
def fImage(self, match):
# (None, '', '/imgs/myphoto.jpg', None, None)
atts, url, title, href = match.groups()
atts = self.pba(atts)
if title:
atts = atts + f' title="{title}" alt="{title}"'
else:
atts = atts + ' alt=""'
if not self.isRelURL(url) and self.get_sizes:
size = getimagesize(url)
if (size):
atts += " %s" % size
if href:
href = self.checkRefs(href)
url = self.checkRefs(url)
url = self.relURL(url)
out = []
if href:
out.append('<a href="%s" class="img">' % href)
if self.html_type == 'html':
out.append(f'<img src="{url}"{atts}>')
else:
out.append(f'<img src="{url}"{atts} />')
if href:
out.append('</a>')
return ''.join(out)
def code(self, text):
text = self.doSpecial(text, '<code>', '</code>', self.fCode)
text = self.doSpecial(text, '@', '@', self.fCode)
text = self.doSpecial(text, '<pre>', '</pre>', self.fPre)
return text
def fCode(self, match):
before, text, after = match.groups()
if after is None:
after = ''
# text needs to be escaped
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, self.shelve('<code>%s</code>' % text), after])
def fPre(self, match):
before, text, after = match.groups()
if after is None:
after = ''
# text needs to be escapedd
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, '<pre>', self.shelve(text), '</pre>', after])
def doSpecial(self, text, start, end, method=None):
if method is None:
method = self.fSpecial
pattern = re.compile(fr'(^|\s|[\[({{>]){re.escape(start)}(.*?){re.escape(end)}(\s|$|[\])}}])?', re.M|re.S)
return pattern.sub(method, text)
def fSpecial(self, match):
"""
special blocks like notextile or code
"""
before, text, after = match.groups()
if after is None:
after = ''
return ''.join([before, self.shelve(self.encode_html(text)), after])
def noTextile(self, text):
text = self.doSpecial(text, '<notextile>', '</notextile>', self.fTextile)
return self.doSpecial(text, '==', '==', self.fTextile)
def fTextile(self, match):
before, notextile, after = match.groups()
if after is None:
after = ''
return ''.join([before, self.shelve(notextile), after])
def textile(text, head_offset=0, html_type='xhtml', encoding=None, output=None):
"""
this function takes additional parameters:
head_offset - offset to apply to heading levels (default: 0)
html_type - 'xhtml' or 'html' style tags (default: 'xhtml')
"""
return Textile().textile(text, head_offset=head_offset,
html_type=html_type)
def textile_restricted(text, lite=True, noimage=True, html_type='xhtml'):
"""
Restricted version of Textile designed for weblog comments and other
untrusted input.
Raw HTML is escaped.
Style attributes are disabled.
rel='nofollow' is added to external links.
When lite=True is set (the default):
Block tags are restricted to p, bq, and bc.
Lists and tables are disabled.
When noimage=True is set (the default):
Image tags are disabled.
"""
return Textile(restricted=True, lite=lite,
noimage=noimage).textile(text, rel='nofollow',
html_type=html_type)
| 38,485 | Python | .py | 907 | 31.985667 | 193 | 0.441228 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,592 | develop.py | kovidgoyal_calibre/src/calibre/ebooks/pdf/develop.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
import sys
from qt.core import QApplication, QMarginsF, QPageLayout, QPageSize, QUrl
from qt.webengine import QWebEnginePage
from calibre.gui2 import load_builtin_fonts, must_use_qt
from calibre.utils.podofo import get_podofo
OUTPUT = '/t/dev.pdf'
class Renderer(QWebEnginePage):
def do_print(self, ok):
p = QPageLayout(QPageSize(QPageSize(QPageSize.PageSizeId.A4)), QPageLayout.Orientation.Portrait, QMarginsF(72, 0, 72, 0))
self.printToPdf(self.print_finished, p)
def print_finished(self, pdf_data):
with open(OUTPUT, 'wb') as f:
f.write(pdf_data)
QApplication.instance().exit(0)
podofo = get_podofo()
doc = podofo.PDFDoc()
doc.load(pdf_data)
def main():
must_use_qt()
load_builtin_fonts()
renderer = Renderer()
renderer.setUrl(QUrl.fromLocalFile(sys.argv[-1]))
renderer.loadFinished.connect(renderer.do_print)
QApplication.instance().exec()
print('Output written to:', OUTPUT)
if __name__ == '__main__':
main()
| 1,130 | Python | .py | 29 | 33.793103 | 129 | 0.701287 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,593 | image_writer.py | kovidgoyal_calibre/src/calibre/ebooks/pdf/image_writer.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
from qt.core import QMarginsF, QPageLayout, QPageSize, QSizeF
from calibre.constants import filesystem_encoding
from calibre.ebooks.pdf.render.common import cicero, cm, didot, inch, mm, pica
from calibre.ebooks.pdf.render.serialize import PDFStream
from calibre.utils.img import image_and_format_from_data
from calibre.utils.imghdr import identify
from polyglot.builtins import as_unicode
class PDFMetadata: # {{{
def __init__(self, mi=None):
from calibre import force_unicode
from calibre.ebooks.metadata import authors_to_string
self.title = _('Unknown')
self.author = _('Unknown')
self.tags = ''
self.mi = mi
if mi is not None:
if mi.title:
self.title = mi.title
if mi.authors:
self.author = authors_to_string(mi.authors)
if mi.tags:
self.tags = ', '.join(mi.tags)
self.title = force_unicode(self.title)
self.author = force_unicode(self.author)
# }}}
# Page layout {{{
def parse_pdf_page_size(spec, unit='inch', dpi=72.0):
width, sep, height = spec.lower().partition('x')
if height:
try:
width = float(width.replace(',', '.'))
height = float(height.replace(',', '.'))
except Exception:
pass
else:
if unit == 'devicepixel':
factor = 72.0 / dpi
else:
factor = {
'point':1.0, 'inch':inch, 'cicero':cicero,
'didot':didot, 'pica':pica, 'millimeter':mm,
'centimeter':cm
}.get(unit, 1.0)
return QPageSize(QSizeF(factor*width, factor*height), QPageSize.Unit.Point, matchPolicy=QPageSize.SizeMatchPolicy.ExactMatch)
def get_page_size(opts, for_comic=False):
use_profile = opts.use_profile_size and opts.output_profile.short_name != 'default' and opts.output_profile.width <= 9999
if use_profile:
w = (opts.output_profile.comic_screen_size[0] if for_comic else
opts.output_profile.width)
h = (opts.output_profile.comic_screen_size[1] if for_comic else
opts.output_profile.height)
dpi = opts.output_profile.dpi
factor = 72.0 / dpi
page_size = QPageSize(QSizeF(factor * w, factor * h), QPageSize.Unit.Point, matchPolicy=QPageSize.SizeMatchPolicy.ExactMatch)
else:
page_size = None
if opts.custom_size is not None:
page_size = parse_pdf_page_size(opts.custom_size, opts.unit, opts.output_profile.dpi)
if page_size is None:
page_size = QPageSize(getattr(QPageSize.PageSizeId, opts.paper_size.capitalize()))
return page_size
def get_page_layout(opts, for_comic=False):
page_size = get_page_size(opts, for_comic)
def m(which):
return max(0, getattr(opts, 'pdf_page_margin_' + which) or getattr(opts, 'margin_' + which))
margins = QMarginsF(m('left'), m('top'), m('right'), m('bottom'))
ans = QPageLayout(page_size, QPageLayout.Orientation.Portrait, margins)
ans.setMode(QPageLayout.Mode.FullPageMode)
return ans
# }}}
class Image: # {{{
def __init__(self, path_or_bytes):
if not isinstance(path_or_bytes, bytes):
with open(path_or_bytes, 'rb') as f:
path_or_bytes = f.read()
self.img_data = path_or_bytes
fmt, width, height = identify(path_or_bytes)
self.img, self.fmt = image_and_format_from_data(path_or_bytes)
self.width, self.height = self.img.width(), self.img.height()
self.cache_key = self.img.cacheKey()
# }}}
def draw_image_page(writer, img, preserve_aspect_ratio=True):
if img.fmt == 'jpeg':
ref = writer.add_jpeg_image(img.img_data, img.width, img.height, img.cache_key, depth=img.img.depth())
else:
ref = writer.add_image(img.img, img.cache_key)
page_size = tuple(writer.page_size)
scaling = list(writer.page_size)
translation = [0, 0]
img_ar = img.width / img.height
page_ar = page_size[0]/page_size[1]
if preserve_aspect_ratio and page_ar != img_ar:
if page_ar > img_ar:
scaling[0] = img_ar * page_size[1]
translation[0] = (page_size[0] - scaling[0]) / 2
else:
scaling[1] = page_size[0] / img_ar
translation[1] = (page_size[1] - scaling[1]) / 2
writer.draw_image_with_transform(ref, translation=translation, scaling=scaling)
def convert(images, output_path, opts, metadata, report_progress):
with open(output_path, 'wb') as buf:
page_layout = get_page_layout(opts, for_comic=True)
page_size = page_layout.fullRectPoints().size()
writer = PDFStream(buf, (page_size.width(), page_size.height()), compress=True)
writer.apply_fill(color=(1, 1, 1))
pdf_metadata = PDFMetadata(metadata)
writer.set_metadata(pdf_metadata.title, pdf_metadata.author, pdf_metadata.tags, pdf_metadata.mi)
for i, path in enumerate(images):
img = Image(as_unicode(path, filesystem_encoding))
preserve_aspect_ratio = opts.preserve_cover_aspect_ratio if i == 0 else True
draw_image_page(writer, img, preserve_aspect_ratio=preserve_aspect_ratio)
writer.end_page()
report_progress((i + 1) / len(images), _('Rendered {0} of {1} pages').format(i + 1, len(images)))
writer.end()
| 5,530 | Python | .py | 116 | 39.008621 | 137 | 0.628503 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,594 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/pdf/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Used for pdf output for comic2pdf
'''
| 187 | Python | .py | 7 | 25.285714 | 56 | 0.694915 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,595 | pdftohtml.py | kovidgoyal_calibre/src/calibre/ebooks/pdf/pdftohtml.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net>
import errno
import os
import re
import shutil
import subprocess
from calibre import CurrentDir, prints, xml_replace_entities
from calibre.constants import bundled_binaries_dir, isbsd, iswindows
from calibre.ebooks import ConversionError, DRMError
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.cleantext import clean_xml_chars
from calibre.utils.ipc import eintr_retry_call
PDFTOHTML = 'pdftohtml' + ('.exe' if iswindows else '')
def popen(cmd, **kw):
if iswindows:
kw['creationflags'] = subprocess.DETACHED_PROCESS
return subprocess.Popen(cmd, **kw)
if bbd := bundled_binaries_dir():
PDFTOHTML = os.path.join(bbd, PDFTOHTML)
PDFTOTEXT = os.path.join(os.path.dirname(PDFTOHTML), 'pdftotext' + ('.exe' if iswindows else ''))
def pdftohtml(output_dir, pdf_path, no_images, as_xml=False):
'''
Convert the pdf into html using the pdftohtml app.
This will write the html as index.html into output_dir.
It will also write all extracted images to the output_dir
'''
pdfsrc = os.path.join(output_dir, 'src.pdf')
index = os.path.join(output_dir, 'index.'+('xml' if as_xml else 'html'))
with open(pdf_path, 'rb') as src, open(pdfsrc, 'wb') as dest:
shutil.copyfileobj(src, dest)
with CurrentDir(output_dir):
def a(x):
return os.path.basename(x)
exe = PDFTOHTML
cmd = [exe, '-enc', 'UTF-8', '-noframes', '-p', '-nomerge',
'-nodrm', a(pdfsrc), a(index)]
if isbsd:
cmd.remove('-nodrm')
if no_images:
cmd.append('-i')
if as_xml:
cmd.append('-xml')
logf = PersistentTemporaryFile('pdftohtml_log')
try:
p = popen(cmd, stderr=logf._fd, stdout=logf._fd,
stdin=subprocess.PIPE)
except OSError as err:
if err.errno == errno.ENOENT:
raise ConversionError(
_('Could not find pdftohtml, check it is in your PATH'))
else:
raise
ret = eintr_retry_call(p.wait)
logf.flush()
logf.close()
out = open(logf.name, 'rb').read().decode('utf-8', 'replace').strip()
if ret != 0:
raise ConversionError('pdftohtml failed with return code: %d\n%s' % (ret, out))
if out:
prints("pdftohtml log:")
prints(out)
if not os.path.exists(index) or os.stat(index).st_size < 100:
raise DRMError()
if not as_xml:
with open(index, 'r+b') as i:
raw = i.read().decode('utf-8', 'replace')
raw = flip_images(raw)
raw = raw.replace('<head', '<!-- created by calibre\'s pdftohtml -->\n <head', 1)
i.seek(0)
i.truncate()
# versions of pdftohtml >= 0.20 output self closing <br> tags, this
# breaks the pdf heuristics regexps, so replace them
raw = raw.replace('<br/>', '<br>')
raw = re.sub(r'<a\s+name=(\d+)', r'<a id="\1"', raw, flags=re.I)
raw = re.sub(r'<a id="(\d+)"', r'<a id="p\1"', raw, flags=re.I)
raw = re.sub(r'<a href="index.html#(\d+)"', r'<a href="#p\1"', raw, flags=re.I)
raw = xml_replace_entities(raw)
raw = re.sub('[\u00a0\u2029]', ' ', raw)
i.write(raw.encode('utf-8'))
cmd = [exe, '-f', '1', '-l', '1', '-xml', '-i', '-enc', 'UTF-8', '-noframes', '-p', '-nomerge',
'-nodrm', '-q', '-stdout', a(pdfsrc)]
if isbsd:
cmd.remove('-nodrm')
p = popen(cmd, stdout=subprocess.PIPE)
raw = p.stdout.read().strip()
if p.wait() == 0 and raw:
parse_outline(raw, output_dir)
try:
os.remove(pdfsrc)
except:
pass
def parse_outline(raw, output_dir):
from lxml import etree
from calibre.utils.xml_parse import safe_xml_fromstring
raw = clean_xml_chars(xml_to_unicode(raw, strip_encoding_pats=True, assume_utf8=True)[0])
outline = safe_xml_fromstring(raw).xpath('(//outline)[1]')
if outline:
from calibre.ebooks.oeb.polish.toc import TOC, create_ncx
outline = outline[0]
toc = TOC()
count = [0]
def process_node(node, toc):
for child in node.iterchildren('*'):
if child.tag == 'outline':
parent = toc.children[-1] if toc.children else toc
process_node(child, parent)
else:
if child.text:
page = child.get('page', '1')
toc.add(child.text, 'index.html', 'p' + page)
count[0] += 1
process_node(outline, toc)
if count[0] > 2:
root = create_ncx(toc, (lambda x:x), 'pdftohtml', 'en', 'pdftohtml')
with open(os.path.join(output_dir, 'toc.ncx'), 'wb') as f:
f.write(etree.tostring(root, pretty_print=True, with_tail=False, encoding='utf-8', xml_declaration=True))
def flip_image(img, flip):
from calibre.utils.img import flip_image, image_and_format_from_data, image_to_data
with open(img, 'r+b') as f:
img, fmt = image_and_format_from_data(f.read())
img = flip_image(img, horizontal='x' in flip, vertical='y' in flip)
f.seek(0), f.truncate()
f.write(image_to_data(img, fmt=fmt))
def flip_images(raw):
for match in re.finditer('<IMG[^>]+/?>', raw, flags=re.I):
img = match.group()
m = re.search(r'class="(x|y|xy)flip"', img)
if m is None:
continue
flip = m.group(1)
src = re.search(r'src="([^"]+)"', img)
if src is None:
continue
img = src.group(1)
if not os.path.exists(img):
continue
flip_image(img, flip)
raw = re.sub(r'<STYLE.+?</STYLE>\s*', '', raw, flags=re.I|re.DOTALL)
counter = 0
def add_alt(m):
nonlocal counter
counter += 1
return m.group(1).rstrip('/') + f' alt="Image {counter}"/>'
raw = re.sub('(<IMG[^>]+)/?>', add_alt, raw, flags=re.I)
return raw
| 6,395 | Python | .py | 147 | 33.47619 | 121 | 0.559916 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,596 | html_writer.py | kovidgoyal_calibre/src/calibre/ebooks/pdf/html_writer.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
# Imports {{{
import copy
import json
import os
import signal
import sys
from collections import namedtuple
from functools import lru_cache
from itertools import count, repeat
from html5_parser import parse
from qt.core import QApplication, QByteArray, QMarginsF, QObject, QPageLayout, Qt, QTimer, QUrl, pyqtSignal, sip
from qt.webengine import (
QWebEnginePage,
QWebEngineProfile,
QWebEngineSettings,
QWebEngineUrlRequestInterceptor,
QWebEngineUrlRequestJob,
QWebEngineUrlSchemeHandler,
)
from calibre import detect_ncpus, human_readable, prepare_string_for_xml
from calibre.constants import FAKE_HOST, FAKE_PROTOCOL, __appname__, __version__, ismacos, iswindows
from calibre.ebooks.metadata.xmp import metadata_to_xmp_packet
from calibre.ebooks.oeb.base import XHTML, XPath
from calibre.ebooks.oeb.polish.container import Container as ContainerBase
from calibre.ebooks.oeb.polish.toc import get_toc
from calibre.ebooks.oeb.polish.utils import guess_type
from calibre.ebooks.pdf.image_writer import PDFMetadata, get_page_layout
from calibre.gui2 import setup_unix_signals
from calibre.srv.render_book import check_for_maths
from calibre.utils.fonts.sfnt.container import Sfnt, UnsupportedFont
from calibre.utils.fonts.sfnt.errors import NoGlyphs
from calibre.utils.fonts.sfnt.merge import merge_truetype_fonts_for_pdf
from calibre.utils.fonts.sfnt.subset import pdf_subset
from calibre.utils.logging import default_log
from calibre.utils.monotonic import monotonic
from calibre.utils.podofo import add_image_page, dedup_type3_fonts, get_podofo, remove_unused_fonts, set_metadata_implementation
from calibre.utils.resources import get_path as P
from calibre.utils.short_uuid import uuid4
from calibre.utils.webengine import secure_webengine, send_reply, setup_profile
from polyglot.builtins import as_bytes, iteritems
from polyglot.urllib import urlparse
OK, KILL_SIGNAL = range(0, 2)
HANG_TIME = 60 # seconds
# }}}
# Utils {{{
def data_as_pdf_doc(data):
podofo = get_podofo()
ans = podofo.PDFDoc()
ans.load(data)
return ans
def preprint_js():
ans = getattr(preprint_js, 'ans', None)
if ans is None:
ans = preprint_js.ans = P('pdf-preprint.js', data=True).decode('utf-8').replace('HYPHEN_CHAR', 'true' if ismacos else 'false', 1)
return ans
def last_tag(root):
return tuple(root.iterchildren('*'))[-1]
def create_skeleton(container):
spine_name = tuple(container.spine_names)[-1][0]
root = container.parsed(spine_name)
root = copy.deepcopy(root)
body = None
for child in tuple(root.iterchildren('*')):
if body is None:
if child.tag == XHTML('body') or child.tag == 'body':
body = child
else:
root.remove(child)
if body is None:
body = last_tag(root)
body.text = body.tail = None
del body[:]
name = container.add_file(spine_name, b'', modify_name_if_needed=True)
container.replace(name, root)
return name
def local_name(x):
return x.split('}', 1)[-1].lower()
def fix_fullscreen_images(container):
def is_svg_fs_markup(names, svg):
if svg is not None:
if len(names) == 2 or len(names) == 3:
if names[-1] == 'image' and names[-2] == 'svg':
if len(names) == 2 or names[0] == 'div':
if svg.get('width') == '100%' and svg.get('height') == '100%':
return True
return False
for file_name, is_linear in container.spine_names:
root = container.parsed(file_name)
root_kids = tuple(root.iterchildren('*'))
if not root_kids:
continue
body = root_kids[-1]
child_tags = []
for child in body.iterchildren('*'):
tag = local_name(child.tag)
if tag in ('script', 'style'):
continue
child_tags.append(tag)
if len(child_tags) > 1:
break
if len(child_tags) == 1 and child_tags[0] in ('div', 'svg'):
names = []
svg = None
for elem in body.iterdescendants('*'):
name = local_name(elem.tag)
if name != 'style' and name != 'script':
names.append(name)
if name == 'svg':
svg = elem
if is_svg_fs_markup(names, svg):
svg.set('width', '100vw')
svg.set('height', '100vh')
container.dirty(file_name)
# }}}
# Renderer {{{
class Container(ContainerBase):
tweak_mode = True
is_dir = True
def __init__(self, opf_path, log, root_dir=None):
ContainerBase.__init__(self, root_dir or os.path.dirname(opf_path), opf_path, log)
class UrlSchemeHandler(QWebEngineUrlSchemeHandler):
def __init__(self, container, parent=None):
QWebEngineUrlSchemeHandler.__init__(self, parent)
self.allowed_hosts = (FAKE_HOST,)
self.container = container
def requestStarted(self, rq):
if bytes(rq.requestMethod()) != b'GET':
return self.fail_request(rq, QWebEngineUrlRequestJob.Error.RequestDenied)
url = rq.requestUrl()
host = url.host()
if host not in self.allowed_hosts or url.scheme() != FAKE_PROTOCOL:
return self.fail_request(rq)
path = url.path()
if path.startswith('/book/'):
name = path[len('/book/'):]
try:
mime_type = self.container.mime_map.get(name) or guess_type(name)
try:
with self.container.open(name) as f:
q = os.path.abspath(f.name)
if not q.startswith(self.container.root):
raise FileNotFoundError('Attempt to leave sandbox')
data = f.read()
except FileNotFoundError:
print(f'Could not find file {name} in book', file=sys.stderr)
rq.fail(QWebEngineUrlRequestJob.Error.UrlNotFound)
return
data = as_bytes(data)
mime_type = {
# Prevent warning in console about mimetype of fonts
'application/vnd.ms-opentype':'application/x-font-ttf',
'application/x-font-truetype':'application/x-font-ttf',
'application/font-sfnt': 'application/x-font-ttf',
}.get(mime_type, mime_type)
send_reply(rq, mime_type, data)
except Exception:
import traceback
traceback.print_exc()
return self.fail_request(rq, QWebEngineUrlRequestJob.Error.RequestFailed)
elif path.startswith('/mathjax/'):
try:
ignore, ignore, base, rest = path.split('/', 3)
except ValueError:
print(f'Could not find file {path} in mathjax', file=sys.stderr)
rq.fail(QWebEngineUrlRequestJob.Error.UrlNotFound)
return
try:
mime_type = guess_type(rest)
if base == 'loader' and '/' not in rest and '\\' not in rest:
data = P(rest, allow_user_override=False, data=True)
elif base == 'data':
q = os.path.abspath(os.path.join(mathjax_dir(), rest))
if not q.startswith(mathjax_dir()):
raise FileNotFoundError('')
with open(q, 'rb') as f:
data = f.read()
else:
raise FileNotFoundError('')
send_reply(rq, mime_type, data)
except FileNotFoundError:
print(f'Could not find file {path} in mathjax', file=sys.stderr)
rq.fail(QWebEngineUrlRequestJob.Error.UrlNotFound)
return
except Exception:
import traceback
traceback.print_exc()
return self.fail_request(rq, QWebEngineUrlRequestJob.Error.RequestFailed)
else:
return self.fail_request(rq)
def fail_request(self, rq, fail_code=None):
if fail_code is None:
fail_code = QWebEngineUrlRequestJob.Error.UrlNotFound
rq.fail(fail_code)
print(f"Blocking FAKE_PROTOCOL request: {rq.requestUrl().toString()} with code: {fail_code}", file=sys.stderr)
# }}}
class Renderer(QWebEnginePage):
work_done = pyqtSignal(object, object)
def __init__(self, opts, parent, log):
QWebEnginePage.__init__(self, parent.profile, parent)
secure_webengine(self)
self.working = False
self.log = log
self.load_complete = False
self.settle_time = 0
self.wait_for_title = None
s = self.settings()
s.setAttribute(QWebEngineSettings.WebAttribute.JavascriptEnabled, True)
s.setFontSize(QWebEngineSettings.FontSize.DefaultFontSize, int(opts.pdf_default_font_size))
s.setFontSize(QWebEngineSettings.FontSize.DefaultFixedFontSize, int(opts.pdf_mono_font_size))
s.setFontSize(QWebEngineSettings.FontSize.MinimumLogicalFontSize, 8)
s.setFontSize(QWebEngineSettings.FontSize.MinimumFontSize, 8)
std = {
'serif': opts.pdf_serif_family,
'sans' : opts.pdf_sans_family,
'mono' : opts.pdf_mono_family
}.get(opts.pdf_standard_font, opts.pdf_serif_family)
if std:
s.setFontFamily(QWebEngineSettings.FontFamily.StandardFont, std)
if opts.pdf_serif_family:
s.setFontFamily(QWebEngineSettings.FontFamily.SerifFont, opts.pdf_serif_family)
if opts.pdf_sans_family:
s.setFontFamily(QWebEngineSettings.FontFamily.SansSerifFont, opts.pdf_sans_family)
if opts.pdf_mono_family:
s.setFontFamily(QWebEngineSettings.FontFamily.FixedFont, opts.pdf_mono_family)
self.titleChanged.connect(self.title_changed)
self.loadStarted.connect(self.load_started)
self.loadProgress.connect(self.load_progress)
self.loadFinished.connect(self.load_finished)
self.load_hang_check_timer = t = QTimer(self)
self.load_started_at = 0
t.setTimerType(Qt.TimerType.VeryCoarseTimer)
t.setInterval(HANG_TIME * 1000)
t.setSingleShot(True)
t.timeout.connect(self.on_load_hang)
def load_started(self):
self.load_started_at = monotonic()
self.load_complete = False
self.load_hang_check_timer.start()
def load_progress(self, amt):
self.load_hang_check_timer.start()
def on_load_hang(self):
self.log(self.log_prefix, f'Loading not complete after {int(monotonic() - self.load_started_at)} seconds, aborting.')
self.load_finished(False)
def title_changed(self, title):
if self.wait_for_title and title == self.wait_for_title and self.load_complete:
QTimer.singleShot(self.settle_time, self.print_to_pdf)
@property
def log_prefix(self):
return os.path.basename(self.url().toLocalFile()) + ':'
def load_finished(self, ok):
self.load_complete = True
self.load_hang_check_timer.stop()
if not ok:
self.working = False
self.work_done.emit(self, f'Load of {self.url().toString()} failed')
return
if self.wait_for_title and self.title() != self.wait_for_title:
self.log(self.log_prefix, 'Load finished, waiting for title to change to:', self.wait_for_title)
return
QTimer.singleShot(int(1000 * self.settle_time), self.print_to_pdf)
def javaScriptConsoleMessage(self, level, message, linenum, source_id):
try:
self.log(f'{source_id}:{linenum}:{message}')
except Exception:
pass
def print_to_pdf(self):
self.runJavaScript(preprint_js(), self.start_print)
def start_print(self, *a):
self.printToPdf(self.printing_done, self.page_layout)
def printing_done(self, pdf_data):
self.working = False
if not sip.isdeleted(self):
self.work_done.emit(self, bytes(pdf_data))
def convert_html_file(self, path, page_layout, settle_time=0, wait_for_title=None):
self.working = True
self.load_complete = False
self.wait_for_title = wait_for_title
self.settle_time = settle_time
self.page_layout = page_layout
url = QUrl(f'{FAKE_PROTOCOL}://{FAKE_HOST}/')
url.setPath(path)
self.setUrl(url)
self.job_started_at = monotonic()
class RequestInterceptor(QWebEngineUrlRequestInterceptor):
def interceptRequest(self, request_info):
method = bytes(request_info.requestMethod())
if method not in (b'GET', b'HEAD'):
self.log.warn(f'Blocking URL request with method: {method}')
request_info.block(True)
return
qurl = request_info.requestUrl()
if qurl.scheme() not in (FAKE_PROTOCOL,):
self.log.warn(f'Blocking URL request {qurl.toString()} as it is not for a resource in the book')
request_info.block(True)
return
class RenderManager(QObject):
def __init__(self, opts, log, container):
QObject.__init__(self)
self.interceptor = RequestInterceptor(self)
self.has_maths = {}
self.interceptor.log = self.log = log
ans = QWebEngineProfile(QApplication.instance())
setup_profile(ans)
self.url_handler = UrlSchemeHandler(container, parent=ans)
ans.installUrlSchemeHandler(QByteArray(FAKE_PROTOCOL.encode('ascii')), self.url_handler)
ua = 'calibre-pdf-output ' + __version__
ans.setHttpUserAgent(ua)
s = ans.settings()
s.setDefaultTextEncoding('utf-8')
ans.setUrlRequestInterceptor(self.interceptor)
self.profile = ans
self.opts = opts
self.workers = []
self.max_workers = detect_ncpus()
if iswindows:
self.original_signal_handlers = {}
else:
self.original_signal_handlers = setup_unix_signals(self)
def create_worker(self):
worker = Renderer(self.opts, self, self.log)
worker.work_done.connect(self.work_done)
self.workers.append(worker)
def signal_received(self, read_fd):
try:
os.read(read_fd, 1024)
except OSError:
return
QApplication.instance().exit(KILL_SIGNAL)
def block_signal_handlers(self):
for sig in self.original_signal_handlers:
signal.signal(sig, lambda x, y: None)
def restore_signal_handlers(self):
for sig, handler in self.original_signal_handlers.items():
signal.signal(sig, handler)
def run_loop(self):
self.block_signal_handlers()
try:
return QApplication.exec()
finally:
self.restore_signal_handlers()
def convert_html_files(self, jobs, settle_time=0, wait_for_title=None, has_maths=None):
self.has_maths = has_maths or {}
self.render_count = 0
self.total_count = len(jobs)
while len(self.workers) < min(len(jobs), self.max_workers):
self.create_worker()
self.pending = list(jobs)
self.log(f'Rendering {len(self.pending)} HTML files')
self.results = {}
self.settle_time = settle_time
self.wait_for_title = wait_for_title
QTimer.singleShot(0, self.assign_work)
ret = self.run_loop()
self.has_maths = {}
if ret == KILL_SIGNAL:
raise SystemExit('Kill signal received')
if ret != OK:
raise SystemExit('Unknown error occurred')
return self.results
def evaljs(self, js):
if not self.workers:
self.create_worker()
w = self.workers[0]
self.evaljs_result = None
w.runJavaScript(js, self.evaljs_callback)
QApplication.exec()
return self.evaljs_result
def evaljs_callback(self, result):
self.evaljs_result = result
QApplication.instance().exit(0)
def assign_work(self):
free_workers = [w for w in self.workers if not w.working]
while free_workers and self.pending:
html_file, page_layout, result_key = self.pending.pop()
w = free_workers.pop()
w.result_key = result_key
wait_for_title = self.wait_for_title
settle_time = self.settle_time
if self.has_maths.get(result_key):
wait_for_title = 'mathjax-load-complete'
settle_time *= 2
w.convert_html_file(html_file, page_layout, settle_time=settle_time, wait_for_title=wait_for_title)
def work_done(self, worker, result):
self.results[worker.result_key] = result
for w in self.workers:
if not w.working and w.job_started_at > 0:
time_taken = monotonic() - w.job_started_at
self.render_count += 1
self.log.debug(f'Rendered: {worker.result_key} in {time_taken:.1f} seconds ({self.render_count}/{self.total_count})')
w.job_started_at = 0
if self.pending:
self.assign_work()
else:
for w in self.workers:
if w.working:
return
QApplication.instance().exit(OK)
def resolve_margins(margins, page_layout):
old_margins = page_layout.marginsPoints()
def m(which):
ans = getattr(margins, which, None)
if ans is None:
ans = getattr(old_margins, which)()
return ans
return Margins(*map(m, 'left top right bottom'.split()))
def job_for_name(container, name, margins, page_layout):
index_file = '/book/' + name
if margins:
page_layout = QPageLayout(page_layout)
page_layout.setUnits(QPageLayout.Unit.Point)
new_margins = QMarginsF(*resolve_margins(margins, page_layout))
page_layout.setMargins(new_margins)
return index_file, page_layout, name
# }}}
# Metadata {{{
def update_metadata(pdf_doc, pdf_metadata):
if pdf_metadata.mi:
xmp_packet = metadata_to_xmp_packet(pdf_metadata.mi)
set_metadata_implementation(
pdf_doc, pdf_metadata.title, pdf_metadata.mi.authors,
pdf_metadata.mi.book_producer, pdf_metadata.mi.tags, xmp_packet)
def add_cover(pdf_doc, cover_data, page_layout, opts):
r = page_layout.fullRect(QPageLayout.Unit.Point)
add_image_page(pdf_doc, cover_data, page_size=(r.left(), r.top(), r.width(), r.height()), preserve_aspect_ratio=opts.preserve_cover_aspect_ratio)
# }}}
# Margin groups {{{
Margins = namedtuple('Margins', 'left top right bottom')
MarginFile = namedtuple('MarginFile', 'name margins')
def dict_to_margins(val, d=None):
return Margins(val.get('left', d), val.get('top', d), val.get('right', d), val.get('bottom', d))
def create_margin_files(container):
for name, is_linear in container.spine_names:
root = container.parsed(name)
margins = root.get('data-calibre-pdf-output-page-margins')
if margins:
margins = dict_to_margins(json.loads(margins))
yield MarginFile(name, margins)
# }}}
# Link handling {{{
def add_anchors_markup(root, uuid, anchors):
body = last_tag(root)
div = body.makeelement(
XHTML('div'), id=uuid,
style='display:block !important; page-break-before: always !important; break-before: always !important; white-space: pre-wrap !important'
)
div.text = '\n\n'
body.append(div)
c = count()
def a(anchor):
num = next(c)
a = div.makeelement(
XHTML('a'), href='#' + anchor,
style='min-width: 10px !important; min-height: 10px !important;'
' border: solid 1px rgba(0, 0, 0, 0) !important; text-decoration: none !important'
)
a.text = a.tail = ' '
if num % 8 == 0:
# prevent too many anchors on a line as it causes chromium to
# rescale the viewport
a.tail = '\n'
div.append(a)
for anchor in anchors:
a(anchor)
a(uuid)
def add_all_links(container, margin_files):
uuid = uuid4()
name_anchor_map = {}
for name, is_linear in container.spine_names:
root = container.parsed(name)
name_anchor_map[name] = frozenset(root.xpath('//*/@id'))
for margin_file in margin_files:
name = margin_file.name
anchors = name_anchor_map.get(name, set())
add_anchors_markup(container.parsed(name), uuid, anchors)
container.dirty(name)
return uuid
def make_anchors_unique(container, log):
mapping = {}
count = 0
base = None
spine_names = set()
def replacer(url):
if replacer.file_type not in ('text', 'ncx'):
return url
if not url:
return url
if '#' not in url:
url += '#'
if url.startswith('#'):
href, frag = base, url[1:]
name = base
else:
href, frag = url.partition('#')[::2]
name = container.href_to_name(href, base)
if not name:
return url.rstrip('#')
if not frag and name in spine_names:
replacer.replaced = True
return 'https://calibre-pdf-anchor.n#' + name
key = name, frag
new_frag = mapping.get(key)
if new_frag is None:
if name in spine_names:
log.warn(f'Link anchor: {name}#{frag} not found, linking to top of file instead')
replacer.replaced = True
return 'https://calibre-pdf-anchor.n#' + name
return url.rstrip('#')
replacer.replaced = True
return 'https://calibre-pdf-anchor.a#' + new_frag
if url.startswith('#'):
return '#' + new_frag
return href + '#' + new_frag
name_anchor_map = {}
for spine_name, is_linear in container.spine_names:
spine_names.add(spine_name)
root = container.parsed(spine_name)
for elem in root.xpath('//*[@id]'):
count += 1
key = spine_name, elem.get('id')
if key not in mapping:
new_id = mapping[key] = f'a{count}'
elem.set('id', new_id)
body = last_tag(root)
if not body.get('id'):
count += 1
body.set('id', f'a{count}')
name_anchor_map[spine_name] = body.get('id')
for name in container.mime_map:
base = name
replacer.replaced = False
container.replace_links(name, replacer)
return name_anchor_map
class AnchorLocation:
__slots__ = ('pagenum', 'left', 'top', 'zoom')
def __init__(self, pagenum=1, left=0, top=0, zoom=0):
self.pagenum, self.left, self.top, self.zoom = pagenum, left, top, zoom
def __repr__(self):
return 'AnchorLocation(pagenum={}, left={}, top={}, zoom={})'.format(*self.as_tuple)
@property
def as_tuple(self):
return self.pagenum, self.left, self.top, self.zoom
def get_anchor_locations(name, pdf_doc, first_page_num, toc_uuid, log):
ans = {}
anchors = pdf_doc.extract_anchors()
try:
toc_pagenum = anchors.pop(toc_uuid)[0]
except KeyError:
toc_pagenum = None
if toc_pagenum is None:
log.warn(f'Failed to find ToC anchor in {name}')
toc_pagenum = 0
if toc_pagenum > 1:
pdf_doc.delete_pages(toc_pagenum, pdf_doc.page_count() - toc_pagenum + 1)
for anchor, loc in iteritems(anchors):
loc = list(loc)
loc[0] += first_page_num - 1
ans[anchor] = AnchorLocation(*loc)
return ans
def fix_links(pdf_doc, anchor_locations, name_anchor_map, mark_links, log):
pc = pdf_doc.page_count()
def replace_link(url):
purl = urlparse(url)
if purl.scheme != 'https' or purl.netloc not in ('calibre-pdf-anchor.a', 'calibre-pdf-anchor.n'):
return
loc = None
if purl.netloc == 'calibre-pdf-anchor.a':
loc = anchor_locations.get(purl.fragment)
if loc is None:
log.warn(f'Anchor location for link to {purl.fragment} not found')
else:
loc = anchor_locations.get(name_anchor_map.get(purl.fragment))
if loc is None:
log.warn(f'Anchor location for link to {purl.fragment} not found')
if loc is None:
return None
if loc.pagenum > pc:
log.warn(f'Anchor location for link to {purl.fragment} is past the end of the document, moving it to last page')
loc.pagenum = pc
return loc.as_tuple
pdf_doc.alter_links(replace_link, mark_links)
# }}}
# Outline creation {{{
class PDFOutlineRoot:
def __init__(self, pdf_doc):
self.pdf_doc = pdf_doc
self.root_item = None
def create(self, title, pagenum, as_child, left, top, zoom):
if self.root_item is None:
self.root_item = self.pdf_doc.create_outline(title, pagenum, left, top, zoom)
else:
self.root_item = self.root_item.create(title, pagenum, False, left, top, zoom)
return self.root_item
def annotate_toc(toc, anchor_locations, name_anchor_map, log):
for child in toc.iterdescendants():
frag = child.frag
try:
if '.' in frag:
loc = anchor_locations[name_anchor_map[frag]]
else:
loc = anchor_locations[frag]
except Exception:
log.warn(f'Could not find anchor location for ToC entry: {child.title} with href: {frag}')
loc = AnchorLocation(1, 0, 0, 0)
child.pdf_loc = loc
def add_toc(pdf_parent, toc_parent, log, pdf_doc):
for child in toc_parent:
title, loc = child.title, child.pdf_loc
try:
pdf_child = pdf_parent.create(title, loc.pagenum, True, loc.left, loc.top, loc.zoom)
except ValueError:
if loc.pagenum > 1:
log.warn(f'TOC node: {title} at page: {loc.pagenum} is beyond end of file, moving it to last page')
pdf_child = pdf_parent.create(title, pdf_doc.page_count(), True, loc.left, loc.top, loc.zoom)
else:
log.warn(f'Ignoring TOC node: {title} at page: {loc.pagenum}')
continue
if len(child):
add_toc(pdf_child, child, log, pdf_doc)
def get_page_number_display_map(render_manager, opts, num_pages, log):
num_pages *= 2
default_map = {n:n for n in range(1, num_pages + 1)}
if opts.pdf_page_number_map:
js = '''
function map_num(n) { return eval(MAP_EXPRESSION); }
var ans = {};
for (var i=1; i <= NUM_PAGES; i++) ans[i] = map_num(i);
JSON.stringify(ans);
'''.replace('MAP_EXPRESSION', json.dumps(opts.pdf_page_number_map), 1).replace(
'NUM_PAGES', str(num_pages), 1)
result = render_manager.evaljs(js)
try:
result = json.loads(result)
if not isinstance(result, dict):
raise ValueError('Not a dict')
except Exception:
log.warn(f'Could not do page number mapping, got unexpected result: {repr(result)}')
else:
default_map = {int(k): int(v) for k, v in iteritems(result)}
return default_map
def add_pagenum_toc(root, toc, opts, page_number_display_map):
body = last_tag(root)
indents = []
for i in range(1, 7):
indents.extend((i, 1.4*i))
css = '''
.calibre-pdf-toc table { width: 100%% }
.calibre-pdf-toc table tr td:last-of-type { text-align: right }
.calibre-pdf-toc .level-0 {
font-size: larger;
}
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
.calibre-pdf-toc .level-%d td:first-of-type { padding-left: %.1gem }
''' % tuple(indents) + (opts.extra_css or '')
style = body.makeelement(XHTML('style'), type='text/css')
style.text = css
body.append(style)
body.set('class', 'calibre-pdf-toc')
def E(tag, cls=None, text=None, tail=None, parent=None, **attrs):
ans = body.makeelement(XHTML(tag), **attrs)
ans.text, ans.tail = text, tail
if cls is not None:
ans.set('class', cls)
if parent is not None:
parent.append(ans)
return ans
E('h2', text=(opts.toc_title or _('Table of Contents')), parent=body)
table = E('table', parent=body)
for level, node in toc.iterdescendants(level=0):
tr = E('tr', cls='level-%d' % level, parent=table)
E('td', text=node.title or _('Unknown'), parent=tr)
num = node.pdf_loc.pagenum
num = page_number_display_map.get(num, num)
E('td', text=f'{num}', parent=tr)
# }}}
# Fonts {{{
def all_glyph_ids_in_w_arrays(arrays, as_set=False):
ans = set()
for w in arrays:
i = 0
while i + 1 < len(w):
elem = w[i]
next_elem = w[i+1]
if isinstance(next_elem, list):
ans |= set(range(elem, elem + len(next_elem)))
i += 2
else:
ans |= set(range(elem, next_elem + 1))
i += 3
return ans if as_set else sorted(ans)
def fonts_are_identical(fonts):
sentinel = object()
for key in ('ToUnicode', 'Data', 'W', 'W2'):
prev_val = sentinel
for f in fonts:
val = f[key]
if prev_val is not sentinel and prev_val != val:
return False
prev_val = val
return True
def merge_font_files(fonts, log):
# As of Qt 5.15.1 Chromium has switched to harfbuzz and dropped sfntly. It
# now produces font descriptors whose W arrays dont match the glyph width
# information from the hhea table, in contravention of the PDF spec. So
# we can no longer merge font descriptors, all we can do is merge the
# actual sfnt data streams into a single stream and subset it to contain
# only the glyphs from all W arrays.
# choose the largest font as the base font
fonts.sort(key=lambda f: len(f['Data'] or b''), reverse=True)
descendant_fonts = [f for f in fonts if f['Subtype'] != 'Type0']
total_size = sum(len(f['Data']) for f in descendant_fonts)
merged_sfnt = merge_truetype_fonts_for_pdf(tuple(f['sfnt'] for f in descendant_fonts), log)
if False:
# As of Qt 6.7.2 webengine produces W arrays that do not contain all
# used glyph ids, so we cannot subset. Can be tested by
# echo 'this is a test boulder sentence' > test.txt; ebook-convert test.txt .pdf
w_arrays = tuple(filter(None, (f['W'] for f in descendant_fonts)))
glyph_ids = all_glyph_ids_in_w_arrays(w_arrays, as_set=True)
h_arrays = tuple(filter(None, (f['W2'] for f in descendant_fonts)))
glyph_ids |= all_glyph_ids_in_w_arrays(h_arrays, as_set=True)
try:
pdf_subset(merged_sfnt, glyph_ids)
except NoGlyphs:
log.warn(f'Subsetting of {fonts[0]["BaseFont"]} failed with no glyphs found, ignoring')
font_data = merged_sfnt()[0]
log(f'Merged {len(fonts)} instances of {fonts[0]["BaseFont"]} reducing size from {human_readable(total_size)} to {human_readable(len(font_data))}')
return font_data, tuple(f['Reference'] for f in descendant_fonts)
def merge_fonts(pdf_doc, log):
all_fonts = pdf_doc.list_fonts(True)
base_font_map = {}
def mergeable(fonts):
has_type0 = False
for font in fonts:
if font['Subtype'] == 'Type0':
has_type0 = True
if not font['Encoding'] or not font['Encoding'].startswith('Identity-'):
return False
else:
if not font['Data']:
return False
try:
sfnt = Sfnt(font['Data'])
except UnsupportedFont:
return False
font['sfnt'] = sfnt
if b'glyf' not in sfnt:
return False
return has_type0
for f in all_fonts:
base_font_map.setdefault(f['BaseFont'], []).append(f)
for name, fonts in iteritems(base_font_map):
if mergeable(fonts):
font_data, references = merge_font_files(fonts, log)
pdf_doc.merge_fonts(font_data, references)
def test_merge_fonts():
path = sys.argv[-1]
podofo = get_podofo()
pdf_doc = podofo.PDFDoc()
pdf_doc.open(path)
from calibre.utils.logging import default_log
merge_fonts(pdf_doc, default_log)
out = path.rpartition('.')[0] + '-merged.pdf'
pdf_doc.save(out)
print('Merged PDF written to', out)
# }}}
# Header/footer {{{
PAGE_NUMBER_TEMPLATE = '<footer><div style="margin: auto">_PAGENUM_</div></footer>'
def add_header_footer(manager, opts, pdf_doc, container, page_number_display_map, page_layout, page_margins_map, pdf_metadata, report_progress, toc, log):
header_template, footer_template = opts.pdf_header_template, opts.pdf_footer_template
if not footer_template and opts.pdf_page_numbers:
footer_template = PAGE_NUMBER_TEMPLATE
if not header_template and not footer_template:
return
report_progress(0.8, _('Adding headers and footers'))
name = create_skeleton(container)
root = container.parsed(name)
reset_css = 'margin: 0; padding: 0; border-width: 0; background-color: unset; column-count: unset; column-width: unset;'
root.set('style', reset_css)
body = last_tag(root)
body.attrib.pop('id', None)
body.set('style', reset_css)
job = job_for_name(container, name, Margins(0, 0, 0, 0), page_layout)
page_layout = job[1]
def m(tag_name, text=None, style=None, **attrs):
ans = root.makeelement(XHTML(tag_name), **attrs)
if text is not None:
ans.text = text
if style is not None:
style = '; '.join(f'{k}: {v}' for k, v in iteritems(style))
ans.set('style', style)
return ans
justify = 'flex-end'
if header_template:
justify = 'space-between' if footer_template else 'flex-start'
def create_toc_stack(iterator):
ans = []
for level, child in iterator:
pdf_loc = getattr(child, 'pdf_loc', None)
if pdf_loc is not None and pdf_loc.pagenum > 0:
ans.append((level, pdf_loc.pagenum, child.title))
return ans
def stack_to_map(stack):
ans = []
stack_pos = 0
current, page_for_current, level_for_current = '', -1, -1
stack_len = len(stack)
for page in range(1, pdf_doc.page_count() + 1):
while stack_pos < stack_len:
level, pagenum, title = stack[stack_pos]
if pagenum != page:
break
if pagenum != page_for_current or level > level_for_current:
page_for_current = pagenum
level_for_current = level
current = title
stack_pos += 1
ans.append(current)
return ans
def page_counts_map(iterator):
pagenums = []
for level, child in iterator:
pdf_loc = getattr(child, 'pdf_loc', None)
if pdf_loc is not None and pdf_loc.pagenum > 0:
pagenums.append(pdf_loc.pagenum)
stack = []
for i, pagenum in enumerate(pagenums):
next_page_num = pagenums[i + 1] if i + 1 < len(pagenums) else (pdf_doc.page_count() + 1)
stack.append((pagenum, next_page_num - pagenum))
totals = []
section_nums = []
stack_len = len(stack)
stack_pos = 0
current, page_for_current, counter = 0, -1, 0
for page in range(1, pdf_doc.page_count() + 1):
while stack_pos < stack_len:
pagenum, pages = stack[stack_pos]
if pagenum != page:
break
if pagenum != page_for_current:
current = pages
page_for_current = pagenum
counter = 0
stack_pos += 1
counter += 1
totals.append(current)
section_nums.append(counter)
return totals, section_nums
if toc is None:
page_toc_map = stack_to_map(())
toplevel_toc_map = stack_to_map(())
toplevel_pagenum_map, toplevel_pages_map = page_counts_map(())
else:
page_toc_map = stack_to_map(create_toc_stack(toc.iterdescendants(level=0)))
def tc():
for x in toc:
yield 0, x
toplevel_toc_map = stack_to_map(create_toc_stack(tc()))
toplevel_pagenum_map, toplevel_pages_map = page_counts_map(tc())
dpi = 96 # dont know how to query Qt for this, seems to be the same on all platforms
def pt_to_px(pt): return int(pt * dpi / 72)
def create_container(page_num, margins):
style = {
'page-break-inside': 'avoid',
'page-break-after': 'always',
'display': 'flex',
'flex-direction': 'column',
'height': '100vh',
'justify-content': justify,
'margin-left': f'{margins.left}pt',
'margin-right': f'{margins.right}pt',
'margin-top': '0',
'margin-bottom': '0',
'padding': '0',
'border-width': '0',
'overflow': 'hidden',
'background-color': 'unset',
}
ans = m('div', style=style, id=f'p{page_num}')
return ans
def format_template(template, page_num, height, margins):
div_width_px = pt_to_px(page_layout.paintRectPoints().width() - margins.left - margins.right)
template = template.replace('_TOP_LEVEL_SECTION_PAGES_', str(toplevel_pagenum_map[page_num - 1]))
template = template.replace('_TOP_LEVEL_SECTION_PAGENUM_', str(toplevel_pages_map[page_num - 1]))
template = template.replace('_TOTAL_PAGES_', str(pages_in_doc))
template = template.replace('_PAGENUM_', str(page_number_display_map[page_num]))
template = template.replace('_TITLE_', prepare_string_for_xml(pdf_metadata.title, True))
template = template.replace('_AUTHOR_', prepare_string_for_xml(pdf_metadata.author, True))
template = template.replace('_TOP_LEVEL_SECTION_', prepare_string_for_xml(toplevel_toc_map[page_num - 1]))
template = template.replace('_SECTION_', prepare_string_for_xml(page_toc_map[page_num - 1]))
template = template.replace('_WIDTH_PIXELS_', str(div_width_px))
template = template.replace('_HEIGHT_PIXELS_', str(pt_to_px(height)))
troot = parse(template, namespace_elements=True)
ans = last_tag(troot)[0]
style = ans.get('style') or ''
style = (
'margin: 0; padding: 0; height: {height}pt; border-width: 0;'
'display: flex; align-items: center; overflow: hidden; background-color: unset;').format(height=height) + style
ans.set('style', style)
for child in ans.xpath('descendant-or-self::*[@class]'):
cls = frozenset(child.get('class').split())
q = 'even-page' if page_num % 2 else 'odd-page'
if q in cls or q.replace('-', '_') in cls:
style = child.get('style') or ''
child.set('style', style + '; display: none')
return ans
pages_in_doc = pdf_doc.page_count()
for page_num in range(1, pages_in_doc + 1):
margins = page_margins_map[page_num - 1]
div = create_container(page_num, margins)
body.append(div)
if header_template:
div.append(format_template(header_template, page_num, margins.top, margins))
if footer_template:
div.append(format_template(footer_template, page_num, margins.bottom, margins))
container.commit()
# print(container.raw_data(name))
results = manager.convert_html_files([job], settle_time=1)
data = results[name]
if not isinstance(data, bytes):
raise SystemExit(data)
# open('/t/impose.pdf', 'wb').write(data)
doc = data_as_pdf_doc(data)
first_page_num = pdf_doc.page_count()
num_pages = doc.page_count()
if first_page_num != num_pages:
raise ValueError(f'The number of header/footers pages ({num_pages}) < number of document pages ({first_page_num})')
pdf_doc.append(doc)
pdf_doc.impose(1, first_page_num + 1, num_pages)
report_progress(0.9, _('Headers and footers added'))
# }}}
# Maths {{{
@lru_cache(maxsize=2)
def mathjax_dir():
return P('mathjax', allow_user_override=False)
def add_maths_script(container):
has_maths = {}
for name, is_linear in container.spine_names:
root = container.parsed(name)
has_maths[name] = hm = check_for_maths(root)
if not hm:
continue
script = root.makeelement(XHTML('script'), type="text/javascript", src=f'{FAKE_PROTOCOL}://{FAKE_HOST}/mathjax/loader/pdf-mathjax-loader.js')
script.set('async', 'async')
script.set('data-mathjax-path', f'{FAKE_PROTOCOL}://{FAKE_HOST}/mathjax/data/')
last_tag(root).append(script)
return has_maths
# }}}
def fix_markup(container):
xp = XPath('//h:canvas')
for file_name, is_linear in container.spine_names:
root = container.parsed(file_name)
for canvas in xp(root):
# Canvas causes rendering issues, see https://bugs.launchpad.net/bugs/1859040
# for an example.
canvas.tag = XHTML('div')
def convert(opf_path, opts, metadata=None, output_path=None, log=default_log, cover_data=None, report_progress=lambda x, y: None):
container = Container(opf_path, log)
fix_markup(container)
report_progress(0.05, _('Parsed all content for markup transformation'))
if opts.pdf_hyphenate:
from calibre.ebooks.oeb.polish.hyphenation import add_soft_hyphens
add_soft_hyphens(container)
has_maths = add_maths_script(container)
fix_fullscreen_images(container)
name_anchor_map = make_anchors_unique(container, log)
margin_files = tuple(create_margin_files(container))
toc = get_toc(container, verify_destinations=False)
has_toc = toc and len(toc)
links_page_uuid = add_all_links(container, margin_files)
container.commit()
report_progress(0.1, _('Completed markup transformation'))
manager = RenderManager(opts, log, container)
page_layout = get_page_layout(opts)
pdf_doc = None
anchor_locations = {}
jobs = []
for margin_file in margin_files:
jobs.append(job_for_name(container, margin_file.name, margin_file.margins, page_layout))
results = manager.convert_html_files(jobs, settle_time=1, has_maths=has_maths)
num_pages = 0
page_margins_map = []
all_docs = []
for i, margin_file in enumerate(margin_files):
name = margin_file.name
data = results[name]
if not isinstance(data, bytes):
raise SystemExit(data)
doc = data_as_pdf_doc(data)
anchor_locations.update(get_anchor_locations(name, doc, num_pages + 1, links_page_uuid, log))
doc_pages = doc.page_count()
page_margins_map.extend(repeat(resolve_margins(margin_file.margins, page_layout), doc_pages))
num_pages += doc_pages
all_docs.append(doc)
pdf_doc = all_docs[0]
pdf_doc.append(*all_docs[1:])
page_number_display_map = get_page_number_display_map(manager, opts, num_pages, log)
if has_toc:
annotate_toc(toc, anchor_locations, name_anchor_map, log)
if opts.pdf_add_toc:
tocname = create_skeleton(container)
root = container.parsed(tocname)
add_pagenum_toc(root, toc, opts, page_number_display_map)
container.commit()
jobs = [job_for_name(container, tocname, None, page_layout)]
results = manager.convert_html_files(jobs, settle_time=1)
tocdoc = data_as_pdf_doc(results[tocname])
page_margins_map.extend(repeat(resolve_margins(None, page_layout), tocdoc.page_count()))
pdf_doc.append(tocdoc)
report_progress(0.7, _('Rendered all HTML as PDF'))
fix_links(pdf_doc, anchor_locations, name_anchor_map, opts.pdf_mark_links, log)
if toc and len(toc):
add_toc(PDFOutlineRoot(pdf_doc), toc, log, pdf_doc)
report_progress(0.75, _('Added links to PDF content'))
pdf_metadata = PDFMetadata(metadata)
add_header_footer(
manager, opts, pdf_doc, container,
page_number_display_map, page_layout, page_margins_map,
pdf_metadata, report_progress, toc if has_toc else None, log)
num_removed = remove_unused_fonts(pdf_doc)
if num_removed:
log('Removed', num_removed, 'unused fonts')
merge_fonts(pdf_doc, log)
num_removed = dedup_type3_fonts(pdf_doc)
if num_removed:
log('Removed', num_removed, 'duplicated Type3 glyphs')
num_removed = pdf_doc.dedup_images()
if num_removed:
log('Removed', num_removed, 'duplicate images')
if opts.pdf_odd_even_offset:
for i in range(1, pdf_doc.page_count()):
margins = page_margins_map[i]
mult = -1 if i % 2 else 1
val = opts.pdf_odd_even_offset
if abs(val) < min(margins.left, margins.right):
box = list(pdf_doc.get_page_box("CropBox", i))
box[0] += val * mult
pdf_doc.set_page_box("CropBox", i, *box)
if cover_data:
add_cover(pdf_doc, cover_data, page_layout, opts)
if metadata is not None:
update_metadata(pdf_doc, pdf_metadata)
pdf_doc.creator = pdf_doc.producer = __appname__ + ' ' + __version__
report_progress(1, _('Updated metadata in PDF'))
if opts.uncompressed_pdf:
pdf_doc.uncompress()
pdf_data = pdf_doc.write()
if output_path is None:
return pdf_data
with open(output_path, 'wb') as f:
f.write(pdf_data)
| 46,470 | Python | .py | 1,055 | 34.836019 | 154 | 0.611239 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,597 | reflow.py | kovidgoyal_calibre/src/calibre/ebooks/pdf/reflow.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
import sys
from operator import attrgetter
from lxml import etree
# Global constants affecting formatting decisions
#### Pages/lines
# How many pages/lines to scan when finding header/footer automatically
PAGE_SCAN_COUNT = 20 # Arbitrary
LINE_SCAN_COUNT = 2 # Arbitrary
# Number of character widths that two strings have to be apart,
# for them to be considered part of the same text fragment
# The problem is justified text where fragments can be widely spaced
# Was 0.5 but this forces just about anything to coalesce.
# It also means no columns will be found
COALESCE_FACTOR = 20.0
# Allow some dither of bottom of characters when checking if same line.
# The bottom of 1 line can overlap the top of the next by this amount
# and they are considered different lines.
# Pixels from the PDF file
BOTTOM_FACTOR = 2.0
# Fraction of text height that two strings' bottoms can differ by
# for them to be considered to be part of the same text fragment
LINE_FACTOR = 0.2
# Long words can force a new line (at a new page)
# although the end of the previous is < this percent.
# Needs to find whether 1st word of 2nd page would fit on
# the last line of previous rather than the length of the last line.
LAST_LINE_PERCENT = 60.0
# Pages can split early to avoid orphans.
# Allow a margin when deciding whether a page finishes early,
# and a page break should be put in the HTML.
ORPHAN_LINES = 5
# Fraction of the gap between lines to determine if setting the paragraph break
# is likely to be valid. Somewhere between 1 and 2, probably nearer 2
PARA_FACTOR = 1.8
# Multiplies the gap between paragraphs to determine if this is a section break
# not a paragraph break
SECTION_FACTOR = 1.3
# Multiplies the average line height when determining row height
# of a particular element to detect columns.
YFUZZ = 1.5
# Left (and other) margins can waver.
# Plus or minus this
LEFT_WAVER = 2.0
# Amount left margin must be greater than right for text
# to be considered right aligned. 1.8 = 180%
RIGHT_FACTOR = 1.8
# Percentage amount left and right margins can differ
# and still be considered centered. 0.15 = 15%
CENTER_FACTOR = 0.15
# How near does text right need to be to right margin
# to be considered right aligned. 0.1 = 10%
RIGHT_FLOAT_FACTOR = 0.1
#### Indents and line spacing
# How near must pixel values be to appear the same
SAME_SPACE = 3.0
SAME_INDENT = 2.0
class Font:
def __init__(self, spec):
self.id = spec.get('id')
self.size = float(spec.get('size'))
self.size_em = 0.0
self.color = spec.get('color')
self.family = spec.get('family')
class Element:
def __init__(self):
self.starts_block = None
self.block_style = None
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
class DocStats:
def __init__(self):
self.top = self.bottom = self.left_odd = self.left_even = self.right \
= self.line_space = self.para_space = self.indent_odd = self.indent_even = 0
self.font_size = 0
class Image(Element):
def __init__(self, img, opts, log, idc):
Element.__init__(self)
self.opts, self.log = opts, log
self.id = next(idc)
self.top, self.left, self.width, self.height = \
map(float, map(img.get, ('top', 'left', 'width', 'height')))
self.src = img.get('src')
self.bottom = self.top + self.height
self.right = self.left + self.width
# Check for alignment done later
self.align = 'L'
def to_html(self):
return '<img src="%s" alt="" width="%dpx" height="%dpx"/>' % \
(self.src, int(self.width), int(self.height))
def dump(self, f):
f.write(self.to_html())
f.write('\n')
class Text(Element):
def __init__(self, text, font_map, opts, log, idc):
Element.__init__(self)
self.id = next(idc)
self.opts, self.log = opts, log
self.font_map = font_map
self.top, self.left, self.width, self.height = map(round, map(float, map(text.get,
('top', 'left', 'width', 'height'))))
# This does nothing, as expected,
# but somewhere left (at least) is changed sometimes to not .0
if self.left != round(self.left) :
self.left = round(self.left)
self.bottom = self.top + self.height
self.right = self.left + self.width
self.tag = 'p' # Normal paragraph <p...>
self.indented = 0
self.margin_left = 0 # Normal margins
self.margin_right = 0 # Normal margins
# When joining lines for a paragraph, remember position of last line joined
self.last_left = self.left
self.last_right = self.right
# Remember the length of this line if it is merged into a paragraph
self.final_width = self.width
# Align = Left, Right, Center, Justified. Default L
self.align = 'L'
# Should there be extra space before/after paragraph?
self.blank_line_before = 0
self.blank_line_after = 0
if self.font_map:
self.font = self.font_map[text.get('font')]
self.font_size = self.font.size
self.font_size_em = self.font.size_em
self.color = self.font.color
self.font_family = self.font.family
else:
self.font = {}
self.font_size = 0.0
self.font_size_em = 0.0
# self.color = 0
text.tail = ''
self.text_as_string = etree.tostring(text, method='text', encoding='unicode')
self.raw = text.text if text.text else ''
for x in text.iterchildren():
self.raw += etree.tostring(x, method='xml', encoding='unicode')
self.average_character_width = self.width/len(self.text_as_string)
@property
def is_empty(self):
# There is nothing in this Text
return not self.raw
@property
def is_spaces(self):
# There are only spaces in this Text
return bool(self.raw) and (
re.match(r'^\s+$', self.raw) is not None or
re.match(r'^\s*<i>\s*</i>\s*$', self.raw) is not None or
re.match(r'^\s*<b>\s*</b>\s*$', self.raw) is not None
)
def coalesce(self, other, page_number, left_margin, right_margin):
if self.opts.verbose > 2:
self.log.debug('Coalescing %r with %r on page %d'%(self.text_as_string,
other.text_as_string, page_number))
# Need to work out how to decide this
# For elements of the same line, is there a space between?
has_float = ''
# Spaces are narrow, so a_c_w/3
# Or assume any gap = a space?
if (self.top <= other.top and self.bottom >= other.bottom) \
and abs(other.left - self.right) < 2.0:
#and abs(other.left - self.right) < self.average_character_width / 3.0:
has_gap = 0
else: # Insert n spaces to fill gap. Use TAB? Columns?
if other.left < self.right:
has_gap = 1 # Coalescing different lines. 1 space
else: # Multiple texts on same line
has_gap = round(0.5+abs(other.left - self.right) / self.average_character_width)
# Allow for super or subscript. These probably have lower heights
# In this case, don't use their top/bottom
if other.left >= self.right:
# Same line
if self.top > other.top:
pass
elif self.bottom < other.bottom:
pass
if self.height >= other.height:
self.top = min(self.top, other.top)
self.bottom = max(self.bottom, other.bottom)
else:
self.top = other.top
self.bottom = other.bottom
else:
self.top = min(self.top, other.top)
self.bottom = max(self.bottom, other.bottom)
self.left = min(self.left, other.left)
self.right = max(self.right, other.right)
self.width += other.width
self.final_width += other.final_width
self.height = self.bottom - self.top
# Need to check for </span> <span... as well
# This test does not work in its present form
# The matches can lose data, so force test to fail
if self.font_size_em == other.font_size_em \
and False \
and self.font.id == other.font.id \
and re.match('<span style="font-size:', self.raw) is not None \
and re.match('<span style="font-size:', other.raw) is not None :
# We have the same class, so merge
m_self = re.match('^(.+)</span>$', self.raw)
m_other = re.match('^<span style="font-size:.+em">(.+</span>)$', other.raw)
if m_self and m_other:
self.raw = m_self.group(1)
other.raw = m_other.group(1)
elif self.font_size_em != other.font_size_em \
and self.font_size_em != 1.00 :
if re.match('<span', self.raw) is None :
self.raw = '<span style="font-size:%sem">%s</span>'%(str(self.font_size_em),self.raw)
# Try to allow for a very large initial character
elif len(self.text_as_string) <= 2 \
and self.font_size_em >= other.font_size_em * 2.0 :
# Insert 'float: left' etc. into current font info
# Unfortunately, processing to generate the .epub file changes things.
# The line height gets set to the same as other parts of the file
# and the font size is reduced.
# These need to be fixed manually.
m_self = re.match('^(.+em">)(.+)$', self.raw)
self.raw = m_self.group(1) \
+ '<span style="float:left"><span style="line-height:0.5">' \
+ m_self.group(2) + '</span></span>'
self.font_size = max(self.font_size, other.font_size)
self.font_size_em = max(self.font_size_em, other.font_size_em)
self.font = other.font if self.font_size == other.font_size else other.font
if has_gap > 0:
if has_gap < 3: # Small number of spaces = 1 space
if not (self.text_as_string.endswith(' ') \
or self.text_as_string.endswith('-') \
or other.text_as_string.startswith(' ') \
or other.text_as_string.startswith('-') ):
has_gap = 1
else:
has_gap = 0
else: # Large gap
# Float right if the text ends around the right margin,
# and there are no long groups of spaces earlier in the line
# as that probably means justified text.
if ' ' not in self.text_as_string \
and other.right > right_margin - right_margin * RIGHT_FLOAT_FACTOR:
has_float = '<span style="float:right">'
has_gap = 1
#else leave has_gap
# Insert multiple spaces
while has_gap > 0:
self.text_as_string += ' '
self.raw += ' '
self.width += self.average_character_width
#self.final_width += self.average_character_width
has_gap -= 1
self.text_as_string += other.text_as_string
#self.width += other.width
# Try to merge href where there are 2 for the same place
# Beware multiple hrefs on the same line, but for different places
# e.g.
# self.raw = '<a href="index.html#2">T</a>'
# other.raw = '<span style="font-size:0.7em"><a href="index.html#2">ITLE</a></span>'
# becomes '<a href="index.html#2">T<span style="font-size:0.7em">ITLE</span></a>'
# Are there problems if self.raw does not end </a>?
# Note that the 2 parts could have different font sizes
matchObj = re.match(r'^([^<]*)(<span[^>]*>)*(<a href[^>]+>)(.*)</a>(</span>)*(\s*)$', self.raw)
if matchObj is not None :
otherObj = re.match('^([^<]*)(<span[^>]*>)*(<a href[^>]+>)(.*)(</a>)(</span>)*(.*)$', other.raw)
# There is another href, but is it for the same place?
if otherObj is not None and matchObj.group(3) == otherObj.group(3) :
m2 = matchObj.group(2)
if m2 is None:
m2 = ''
m5 = matchObj.group(5)
if m5 is None:
m5 = ''
o2 = otherObj.group(2)
if o2 is None:
o2 = ''
o6 = otherObj.group(6)
if o6 is None:
o6 = ''
# Remove the other <a...> stuff and put the </a> last
other.raw = otherObj.group(1)+o2+otherObj.group(4)+o6+otherObj.group(5)+otherObj.group(7)
# Move the <span... after the <a... and remove the </a>
self.raw = matchObj.group(1)+matchObj.group(3)+m2+matchObj.group(4)+m5+matchObj.group(6)
# This needs more work
#if sub_super < 0:
# other.raw = '<sup>' + other.raw + '</sup>'
#elif sub_super > 0:
# other.raw = '<sub>' + other.raw + '</sub>'
if has_float:
self.raw += has_float
self.raw += other.raw
if has_float:
self.raw += '</span>'
self.average_character_width = self.width/len(self.text_as_string)
#self.last_left = other.left
def to_html(self):
return self.raw
def dump(self, f):
f.write('T top={}, left={}, width={}, height={}: '.format(self.top, self.left, self.width, self.height))
f.write(self.to_html().encode('utf-8'))
f.write('\n')
class Paragraph(Text):
def __init__(self, text, font_map, opts, log, idc):
Text.__init__(self)
self.id = next(idc)
self.opts, self.log = opts, log
self.font_map = font_map
self.top, self.left, self.width, self.height = map(float, map(text.get,
('top', 'left', 'width', 'height')))
self.bottom = self.top + self.height
self.right = self.left + self.width
if self.font_map:
self.font = self.font_map[text.get('font')]
self.font_size = self.font.size
self.color = self.font.color
self.font_family = self.font.family
else:
self.font = {}
self.font_size = 0
# self.color = 0
text.tail = ''
self.text_as_string = etree.tostring(text, method='text',
encoding='unicode')
self.raw = text.text if text.text else ''
for x in text.iterchildren():
self.raw += etree.tostring(x, method='xml', encoding='unicode')
self.average_character_width = self.width/len(self.text_as_string)
def to_html(self):
return self.raw
def dump(self, f):
f.write('P top={}, left={}, width={}, height={}: '.format(self.top, self.left, self.width, self.height))
f.write(self.to_html().encode('utf-8'))
f.write('\n')
class FontSizeStats(dict):
def __init__(self, stats):
total = float(sum(stats.values()))
self.most_common_size, self.chars_at_most_common_size = -1, 0
for sz, chars in stats.items():
if chars >= self.chars_at_most_common_size:
self.most_common_size, self.chars_at_most_common_size = sz, chars
self[sz] = chars/total
class Interval:
def __init__(self, left, right):
self.left, self.right = left, right
self.width = right - left
def intersection(self, other):
left = max(self.left, other.left)
right = min(self.right, other.right)
return Interval(left, right)
def centered_in(self, parent):
left = abs(self.left - parent.left)
right = abs(self.right - parent.right)
return abs(left-right) < 3
def __nonzero__(self):
return self.width > 0
def __eq__(self, other):
return self.left == other.left and self.right == other.right
def __hash__(self):
return hash('(%f,%f)'%self.left, self.right)
class Column:
# A column contains an element if the element bulges out to
# the left or the right by at most HFUZZ*col width.
HFUZZ = 0.2
def __init__(self):
self.left = self.right = self.top = self.bottom = 0
self.width = self.height = 0
self.elements = []
self.average_line_separation = 0
def add(self, elem):
if elem in self.elements:
return
self.elements.append(elem)
self._post_add()
def prepend(self, elem):
if elem in self.elements:
return
self.elements.insert(0, elem)
self._post_add()
def _post_add(self):
self.elements.sort(key=attrgetter('bottom'))
self.top = self.elements[0].top
self.bottom = self.elements[-1].bottom
self.left, self.right = sys.maxint, 0
for x in self:
self.left = min(self.left, x.left)
self.right = max(self.right, x.right)
self.width, self.height = self.right-self.left, self.bottom-self.top
def __iter__(self):
yield from self.elements
def __len__(self):
return len(self.elements)
def contains(self, elem):
return elem.left > self.left - self.HFUZZ*self.width and \
elem.right < self.right + self.HFUZZ*self.width
def collect_stats(self):
if len(self.elements) > 1:
gaps = [self.elements[i+1].top - self.elements[i].bottom for i in
range(0, len(self.elements)-1)]
self.average_line_separation = sum(gaps)/len(gaps)
for i, elem in enumerate(self.elements):
left_margin = elem.left - self.left
elem.indent_fraction = left_margin/self.width
elem.width_fraction = elem.width/self.width
if i == 0 or self.average_line_separation == 0:
elem.top_gap_ratio = None
else:
elem.top_gap_ratio = (self.elements[i-1].bottom -
elem.top)/self.average_line_separation
def previous_element(self, idx):
if idx == 0:
return None
return self.elements[idx-1]
def dump(self, f, num):
f.write('******** Column %d\n\n'%num)
for elem in self.elements:
elem.dump(f)
class Box(list):
def __init__(self, type='p'):
self.tag = type
def to_html(self):
ans = ['<%s>'%self.tag]
for elem in self:
if isinstance(elem, int):
ans.append('<a name="page_%d"/>'%elem)
else:
ans.append(elem.to_html()+' ')
ans.append('</%s>'%self.tag)
return ans
class ImageBox(Box):
def __init__(self, img):
Box.__init__(self)
self.img = img
def to_html(self):
ans = ['<div style="text-align:center">']
ans.append(self.img.to_html())
if len(self) > 0:
ans.append('<br/>')
for elem in self:
if isinstance(elem, int):
ans.append('<a name="page_%d"/>'%elem)
else:
ans.append(elem.to_html()+' ')
ans.append('</div>')
return ans
class Region:
def __init__(self, opts, log):
self.opts, self.log = opts, log
self.columns = []
self.top = self.bottom = self.left = self.right = self.width = self.height = 0
def add(self, columns):
if not self.columns:
for x in sorted(columns, key=attrgetter('left')):
self.columns.append(x)
else:
for i in range(len(columns)):
for elem in columns[i]:
self.columns[i].add(elem)
def contains(self, columns):
# TODO: handle unbalanced columns
if not self.columns:
return True
if len(columns) != len(self.columns):
return False
for i in range(len(columns)):
c1, c2 = self.columns[i], columns[i]
x1 = Interval(c1.left, c1.right)
x2 = Interval(c2.left, c2.right)
intersection = x1.intersection(x2)
base = min(x1.width, x2.width)
if intersection.width/base < 0.6:
return False
return True
@property
def is_empty(self):
return len(self.columns) == 0
@property
def line_count(self):
max_lines = 0
for c in self.columns:
max_lines = max(max_lines, len(c))
return max_lines
@property
def is_small(self):
return self.line_count < 3
def absorb(self, singleton):
def most_suitable_column(elem):
mc, mw = None, 0
for c in self.columns:
i = Interval(c.left, c.right)
e = Interval(elem.left, elem.right)
w = i.intersection(e).width
if w > mw:
mc, mw = c, w
if mc is None:
self.log.warn('No suitable column for singleton',
elem.to_html())
mc = self.columns[0]
return mc
for c in singleton.columns:
for elem in c:
col = most_suitable_column(elem)
if self.opts.verbose > 3:
idx = self.columns.index(col)
self.log.debug('Absorbing singleton %s into column'%elem.to_html(),
idx)
col.add(elem)
def collect_stats(self):
for column in self.columns:
column.collect_stats()
self.average_line_separation = sum([x.average_line_separation for x in
self.columns])/float(len(self.columns))
def __iter__(self):
yield from self.columns
def absorb_regions(self, regions, at):
for region in regions:
self.absorb_region(region, at)
def absorb_region(self, region, at):
if len(region.columns) <= len(self.columns):
for i in range(len(region.columns)):
src, dest = region.columns[i], self.columns[i]
if at != 'bottom':
src = reversed(list(iter(src)))
for elem in src:
func = dest.add if at == 'bottom' else dest.prepend
func(elem)
else:
col_map = {}
for i, col in enumerate(region.columns):
max_overlap, max_overlap_index = 0, 0
for j, dcol in enumerate(self.columns):
sint = Interval(col.left, col.right)
dint = Interval(dcol.left, dcol.right)
width = sint.intersection(dint).width
if width > max_overlap:
max_overlap = width
max_overlap_index = j
col_map[i] = max_overlap_index
lines = max(map(len, region.columns))
if at == 'bottom':
lines = range(lines)
else:
lines = range(lines-1, -1, -1)
for i in lines:
for j, src in enumerate(region.columns):
dest = self.columns[col_map[j]]
if i < len(src):
func = dest.add if at == 'bottom' else dest.prepend
func(src.elements[i])
def dump(self, f):
f.write('############################################################\n')
f.write('########## Region (%d columns) ###############\n'%len(self.columns))
f.write('############################################################\n\n')
for i, col in enumerate(self.columns):
col.dump(f, i)
def linearize(self):
self.elements = []
for x in self.columns:
self.elements.extend(x)
self.boxes = [Box()]
for i, elem in enumerate(self.elements):
if isinstance(elem, Image):
self.boxes.append(ImageBox(elem))
img = Interval(elem.left, elem.right)
for j in range(i+1, len(self.elements)):
t = self.elements[j]
if not isinstance(t, Text):
break
ti = Interval(t.left, t.right)
if not ti.centered_in(img):
break
self.boxes[-1].append(t)
self.boxes.append(Box())
else:
is_indented = False
if i+1 < len(self.elements):
indent_diff = elem.indent_fraction - \
self.elements[i+1].indent_fraction
if indent_diff > 0.05:
is_indented = True
if elem.top_gap_ratio > 1.2 or is_indented:
self.boxes.append(Box())
self.boxes[-1].append(elem)
class Page:
def __init__(self, page, font_map, opts, log, idc):
def text_cmp(frst, secnd):
# Compare 2 text objects.
# Order by line (top/bottom) then left
if (frst.top <= secnd.top and frst.bottom >= secnd.bottom-BOTTOM_FACTOR) \
or (secnd.top <= frst.top and secnd.bottom >= frst.bottom-BOTTOM_FACTOR) :
# Overlap = same line
if frst.left < secnd.left :
return -1
elif frst.left == secnd.left :
return 0
return 1
# Different line so sort into line number
if frst.bottom < secnd.bottom :
return -1
elif frst.bottom == secnd.bottom :
return 0
return 1
# The sort comparison caller
from functools import cmp_to_key
self.opts, self.log = opts, log
self.font_map = font_map
self.number = int(page.get('number'))
self.odd_even = self.number % 2 # Odd = 1
self.top, self.left, self.width, self.height = map(float, map(page.get, ('top', 'left', 'width', 'height')))
self.id = 'page%d'%self.number
self.page_break_after = False
self.texts = []
self.imgs = []
# Set margins to values that will get adjusted
self.left_margin = self.width
self.right_margin = 0
# For whether page number has been put in <a>
self.id_used = 0
for text in page.xpath('descendant::text'):
self.texts.append(Text(text, self.font_map, self.opts, self.log, idc))
text = self.texts[-1]
# Check within page boundaries.
# Remove lines of only spaces. Could be <i> </i> etc., but process later
# Need to keep any href= (and others?)
if text.is_spaces \
or text.top < self.top \
or text.top > self.height \
or text.left > self.left+self.width \
or text.left < self.left:
#and re.match(r'href=', text.raw) is None:
self.texts.remove(text)
elif (self.opts.pdf_header_skip <= 0 or text.top >= self.opts.pdf_header_skip) \
and (self.opts.pdf_footer_skip <= 0 or text.top <= self.opts.pdf_footer_skip):
# Remove leading spaces and make into indent
# Assume 1 space < 1 av_char_width?
s = 0
# Test above has shown string is not all spaces
# but it could hold no text
while s < len(text.text_as_string) \
and text.text_as_string[s] == ' ':
s += 1
if s > 2: # Allow two leading spaces
# Assume this is a standard indent
# Normally text.indented gets set later
text.indented = 1
w = round(s * text.average_character_width/2.0) # Spaces < avg width
matchObj = re.match(r'^\s*(<[^>]+>)?\s*(.*)$', text.raw)
t1 = matchObj.group(1)
t2 = matchObj.group(2)
if t1 is None:
t1 = ''
if t2 is None:
t2 = ''
text.raw = t1 + t2
text.text_as_string = text.text_as_string[s:]
text.left += w # Add indent
text.last_left += w
text.width -= w # Reduce width
text.final_width -= w
self.left_margin = min(text.left, self.left_margin)
self.right_margin = max(text.right, self.right_margin)
# Change #nnn to #page_nnn in hrefs
matchObj = re.match(r'^(.*)(<a href)(.+)("index.html#)(\d+)(".+)$', text.raw)
if matchObj is not None:
text.raw = matchObj.group(1)+matchObj.group(2)+matchObj.group(3)+matchObj.group(4) \
+'page_'+matchObj.group(5)+matchObj.group(6)
else:
# Not within text boundaries
self.texts.remove(text)
# Find any image occurances if requested
# These can be interspersed with text
if not self.opts.no_images:
for img in page.xpath('descendant::image'):
self.imgs.append(Image(img, self.opts, self.log, idc))
self.textwidth = self.right_margin - self.left_margin
# Sort into page order. bottom then left
# NB. This is only approximate as different sized characters
# can mean sections of a line vary in top or bottom.
# bottom is less varied than top, but is not guaranteed.
# Multi-line characters make things even more interesting.
self.texts.sort(key=cmp_to_key(text_cmp))
self.font_size_stats = {}
self.average_text_height = 0
for t in self.texts:
if t.font_size not in self.font_size_stats:
self.font_size_stats[t.font_size] = 0
self.font_size_stats[t.font_size] += len(t.text_as_string)
self.average_text_height += t.height
if len(self.texts):
self.average_text_height /= len(self.texts)
self.font_size_stats = FontSizeStats(self.font_size_stats)
@property
def is_empty(self):
# There is nothing in this Page
return len(self.texts) == 0 \
and len(self.imgs) == 0
def find_match(self, frag):
for t in self.texts:
if t is not frag :
# Do the parts of a line overlap?
# Some files can have separate lines overlapping slightly
# BOTTOM_FACTOR allows for this
if (frag.top == t.top or frag.bottom == t.bottom) \
or (frag.top < t.top and frag.bottom > t.top+BOTTOM_FACTOR) \
or (frag.top < t.top and frag.bottom+BOTTOM_FACTOR > t.bottom) \
or (t.top < frag.top and t.bottom > frag.top+BOTTOM_FACTOR) \
or (t.top < frag.top and t.bottom+BOTTOM_FACTOR > frag.bottom):
return t # Force match if same line
# Sorting can put parts of a line in the wrong order if there are small chars
if t.left < frag.left:
hdelta = frag.left - t.right
else:
hdelta = t.left - frag.right
hoverlap = COALESCE_FACTOR * frag.average_character_width
if hdelta > -hoverlap and hdelta < hoverlap:
return t
return None
def join_fragments(self, opts):
# Join fragments on a line
# Do some basic checks on structure
match_found = True
tind = 0
while match_found:
match_found, match = False, None
while tind < len(self.texts):
frag = self.texts[tind]
match = self.find_match(frag)
if match is not None:
match_found = True
# Because texts are sorted top, left we can get small chars on the same line
# appearing after larger ones, even though they are further left
if frag.left > match.left:
x = frag
frag = match
match = x
frag.coalesce(match, self.number, self.left_margin, self.right_margin)
break # Leave tind
tind += 1
if match is not None:
self.texts.remove(match)
def check_centered(self, stats):
# Check for centered text
# Also check for right aligned, and basic chapter structure
# If there are different left/indents, need to adjust for this page
# The centering check would fail where all lines on a page are centered
# so use stats_left, stats_right, and stats_indent
first = True
# Assume not Contents
self.contents = False
# Even or odd page?
if self.odd_even:
left = self.stats_left_odd
indent = self.stats_indent_odd
indent1 = self.stats_indent_odd1
else:
left = self.stats_left_even
indent = self.stats_indent_even
indent1 = self.stats_indent_even1
m = len(self.texts)
for i in range(m):
t = self.texts[i]
lmargin = t.last_left # Allow for lines joined into a para
if t.bottom - t.top > stats.line_space * 2:
rmargin = self.width - t.last_right # Right of a coalesced paragraph
else:
rmargin = self.width - t.right # Right of a coalesced line
# Do we have a sequence of indented lines?
xmargin = ymargin = -1
if i > 0:
xmargin = self.texts[i-1].last_left
if i < m-1:
ymargin = self.texts[i+1].last_left
# Don't want to set headings on a Contents page
# NB Doesn't work where Contents goes to another page
if re.match(r'(?i)^\s*(table of )?contents\s*$', t.text_as_string) is not None:
self.contents = True
t.tag = 'h2' # It won't get set later
# Centered if left and right margins are within FACTOR%
# Because indents can waver a bit, use between indent and indent1 as == indent
if (lmargin < indent or lmargin > indent1) \
and lmargin > left \
and lmargin != xmargin \
and lmargin != ymargin \
and lmargin >= rmargin - rmargin*CENTER_FACTOR \
and lmargin <= rmargin + rmargin*CENTER_FACTOR:
#and t.left + t.width + t.left >= self.width + l_offset - t.average_character_width \
#and t.left + t.width + t.left <= self.width + l_offset + t.average_character_width:
t.align = 'C'
# Right aligned if left > FACTOR% of right
elif lmargin > indent \
and lmargin > rmargin*RIGHT_FACTOR:
#and t.right >= self.width - t.average_character_width:
# What about right-aligned but indented on right?
# What about indented rather than right-aligned?
t.align = 'R'
if not self.contents:
# We can get <a href=...Chapter... Should this check be done?
#if 'href=' not in t.raw:
# Check for Roman numerals as the only thing on a line
if re.match(r'^\s*[iIxXvV]+\s*$', t.text_as_string) is not None:
t.tag = 'h3'
# Check for centered digits only
elif first and t.align == 'C' and re.match(r'^\s*\d+\s*$', t.text_as_string) is not None:
t.tag = 'h2'
elif re.match(r'(?i)^\s*part\s[A-Za-z0-9]+$', t.text_as_string) is not None:
t.tag = 'h1'
# Check for 'Chapter' or a centered word at the top of the page
# Some PDFs have chapter starts within the page so this check often fails
elif re.match(r'(?i)^\s*chapter\s', t.text_as_string) is not None \
or re.match(r'(?i)^\s*prologue|epilogue\s*$', t.text_as_string) is not None \
or (first and t.align == 'C' and re.match(r'(?i)^\s*[a-z -]+\s*$', t.text_as_string) is not None) \
or (first and re.match(r'^\s*[A-Z -]+\s*$', t.text_as_string) is not None):
t.tag = 'h2'
first = False
# Now check image alignment
for i in self.imgs:
lmargin = i.left
rmargin = self.width - i.right
if lmargin > left \
and lmargin != indent \
and lmargin >= rmargin - rmargin*CENTER_FACTOR \
and lmargin <= rmargin + rmargin*CENTER_FACTOR:
i.align = 'C'
def coalesce_paras(self, stats):
# Join lines into paragraphs
# Even or odd page?
if self.odd_even:
left = self.stats_left_odd
indent = self.stats_indent_odd
indent1 = self.stats_indent_odd1
else:
left = self.stats_left_even
indent = self.stats_indent_even
indent1 = self.stats_indent_even1
def can_merge(self, first_text, second_text, stats):
# Can two lines be merged into one paragraph?
# Some PDFs have a wandering left margin which is consistent on a page
# but not within the whole document. Hence use self.stats_left
# Try to avoid close double quote at end of one and open double quote at start of next
#
# "float:left" occurs where there is a multi-line character, so indentation is messed up
if ((second_text.left < left + second_text.average_character_width \
and (second_text.left == first_text.last_left \
or (second_text.left < first_text.last_left \
and (first_text.indented > 0 or '"float:left"' in first_text.raw)))) \
or (second_text.left == first_text.last_left \
and first_text.indented == 0 \
and second_text.left >= indent) \
or (second_text.left == first_text.last_left \
and first_text.indented == second_text.indented \
and second_text.indented > 1) \
or (second_text.left >= first_text.last_left \
and second_text.bottom <= first_text.bottom)) \
and 'href=' not in second_text.raw \
and '"float:right"' not in first_text.raw \
and first_text.bottom + stats.line_space + (stats.line_space*LINE_FACTOR) \
>= second_text.bottom \
and first_text.final_width > self.width*self.opts.unwrap_factor \
and not (re.match('.*[.!?].$', first_text.text_as_string) is not None \
and ((first_text.text_as_string[-1] == '\u0022' and second_text.text_as_string[0] == '\u0022') \
or (first_text.text_as_string[-1] == '\u2019' and second_text.text_as_string[0] == '\u2018') \
or (first_text.text_as_string[-1] == '\u201d' and second_text.text_as_string[0] == '\u201c'))):
# This has checked for single quotes (9...6), double quotes (99...66), and "..."
# at end of 1 line then start of next as a check for Don't merge
return True
return False
# Loop through texts elements and coalesce if same lmargin
# and no large gap between lines
# Have to restart loop if an entry is removed
# Doesn't work well with things like Contents list, hence check href
match_found = True
last_frag = None
tind = 0
while match_found:
match_found, match = False, None
# Same left margin probably means coalesce
while tind < len(self.texts):
frag = self.texts[tind]
# Remove lines of only spaces
if frag.is_spaces:
match = frag
break # Leave tind
if last_frag is not None \
and frag != last_frag \
and can_merge(self, last_frag, frag, stats):
last_frag.coalesce(frag, self.number, self.left_margin, self.right_margin)
last_frag.last_left = frag.left
last_frag.last_right = frag.right
last_frag.final_width = frag.final_width
# Check for centred done later
match = frag
break # Leave tind
else:
# Check for start of a paragraph being indented
# Ought to have some way of setting a standard indent
if frag.tag == 'p':
if frag.indented == 0 \
and frag.align != 'C' \
and frag.left > left + frag.average_character_width:
#frag.indented = int((frag.left - self.stats_left) / frag.average_character_width)
# Is it approx self.stats_indent?
if indent <= frag.left <= indent1:
frag.indented = 1 # 1em
else: # Assume left margin of approx = number of chars
# Should check for values approx the same, as with indents
frag.margin_left = int(round((frag.left - left) / self.stats_margin_px)+0.5)
if last_frag is not None \
and frag.bottom - last_frag.bottom \
> stats.para_space*SECTION_FACTOR:
#and frag.top - last_frag.bottom > frag.height + stats.line_space + (stats.line_space*LINE_FACTOR):
frag.blank_line_before = 1
last_frag = frag
tind += 1
if match is not None:
match_found = True
self.texts.remove(match) # Leave tind
def remove_head_foot_regex(self, opts):
# Remove headers or footers from a page
# if there is a regex supplied
if len(opts.pdf_header_regex) > 0 \
and len(self.texts) > 0:
# Remove lines if they match
for i in range(LINE_SCAN_COUNT):
if len(self.texts) < 1:
break
if re.match(opts.pdf_header_regex, self.texts[0].text_as_string) is not None :
# There could be fragments which are spread out, so join_fragments has not coalesced them
# Not sure that this would work as it relies on the first fragment matching regex
t = self.texts[0]
#match = self.find_match(t)
#while match is not None:
# self.texts.remove(match)
# match = self.find_match(t)
self.texts.remove(t)
if len(opts.pdf_footer_regex) > 0 \
and len(self.texts) > 0:
# Remove the last lines if they match
for i in range(LINE_SCAN_COUNT):
if len(self.texts) < 1:
break
if re.match(opts.pdf_footer_regex, self.texts[-1].text_as_string) is not None :
# There could be fragments which are spread out, so join_fragments has not coalesced them
t = self.texts[-1]
#match = self.find_match(t)
#while match is not None:
# self.texts.remove(match)
# match = self.find_match(t)
self.texts.remove(t)
def create_page_format(self, stats, opts):
# Join fragments into lines
# then remove any headers/footers/unwanted areas
self.update_font_sizes(stats)
# Join fragments on a line
self.join_fragments(opts)
# This processes user-supplied regex for header/footer
# Do this before automatic actions
self.remove_head_foot_regex(opts)
def find_margins(self, tops, indents_odd, indents_even, line_spaces, bottoms, rights):
#from collections import Counter
# Should check for left margin and indent for this page
# Find the most used top, left margins, and gaps between lines
# The most used font will be treated as size 1em
max_bot = 0
max_right = 0
last_top = 0
#last_bottom = 0
first = True
for text in self.texts:
top = text.top
left = text.left
if round(left) != left :
text.left = left = round(left)
right = text.right
if round(right) != right :
text.right = right = round(right)
if first:
tops[top] = tops.get(top, 0) + 1
first = False
else:
# Space from 1 line to the next
space = abs(top - last_top)
# Beware of multiple text on same line. These look like small spacing
if text.height <= space:
line_spaces[space] = line_spaces.get(space, 0) + 1
last_top = top
max_bot = max(max_bot, text.bottom)
max_right = max(max_right, text.right)
if self.odd_even:
indents_odd[left] = indents_odd.get(left, 0) + 1
else:
indents_even[left] = indents_even.get(left, 0) + 1
if max_bot > 0:
bottoms[max_bot] = bottoms.get(max_bot, 0) + 1
if max_right > 0:
rights[max_right] = rights.get(max_right, 0) + 1
return
#########################
#### NOT IMPLEMENTED ####
'Sort page into regions and columns'
self.regions = []
if not self.elements:
return
for i, x in enumerate(self.elements):
x.idx = i
current_region = Region(self.opts, self.log)
processed = set()
for x in self.elements:
if x in processed:
continue
elems = set(self.find_elements_in_row_of(x))
columns = self.sort_into_columns(x, elems)
processed.update(elems)
if not current_region.contains(columns):
self.regions.append(current_region)
current_region = Region(self.opts, self.log)
current_region.add(columns)
if not current_region.is_empty:
self.regions.append(current_region)
if self.opts.verbose > 2:
self.debug_dir = 'page-%d'%self.number
os.mkdir(self.debug_dir)
self.dump_regions('pre-coalesce')
self.coalesce_regions()
if self.opts.verbose > 2:
self.dump_regions('post-coalesce')
def dump_regions(self, fname):
fname = 'regions-'+fname+'.txt'
with open(os.path.join(self.debug_dir, fname), 'wb') as f:
f.write('Page #%d\n\n'%self.number)
for region in self.regions:
region.dump(f)
def coalesce_regions(self):
# find contiguous sets of small regions
# absorb into a neighboring region (prefer the one with number of cols
# closer to the avg number of cols in the set, if equal use larger
# region)
found = True
absorbed = set()
processed = set()
while found:
found = False
for i, region in enumerate(self.regions):
if region in absorbed:
continue
if region.is_small and region not in processed:
found = True
processed.add(region)
regions = [region]
end = i+1
for j in range(i+1, len(self.regions)):
end = j
if self.regions[j].is_small:
regions.append(self.regions[j])
else:
break
prev_region = None if i == 0 else i-1
next_region = end if end < len(self.regions) and self.regions[end] not in regions else None
absorb_at = 'bottom'
if prev_region is None and next_region is not None:
absorb_into = next_region
absorb_at = 'top'
elif next_region is None and prev_region is not None:
absorb_into = prev_region
elif prev_region is None and next_region is None:
if len(regions) > 1:
absorb_into = i
regions = regions[1:]
else:
absorb_into = None
else:
absorb_into = prev_region
if self.regions[next_region].line_count >= \
self.regions[prev_region].line_count:
avg_column_count = sum([len(r.columns) for r in
regions])/float(len(regions))
if self.regions[next_region].line_count > \
self.regions[prev_region].line_count \
or abs(avg_column_count -
len(self.regions[prev_region].columns)) \
> abs(avg_column_count -
len(self.regions[next_region].columns)):
absorb_into = next_region
absorb_at = 'top'
if absorb_into is not None:
self.regions[absorb_into].absorb_regions(regions, absorb_at)
absorbed.update(regions)
for region in absorbed:
self.regions.remove(region)
def sort_into_columns(self, elem, neighbors):
neighbors.add(elem)
neighbors = sorted(neighbors, key=attrgetter('left'))
if self.opts.verbose > 3:
self.log.debug('Neighbors:', [x.to_html() for x in neighbors])
columns = [Column()]
columns[0].add(elem)
for x in neighbors:
added = False
for c in columns:
if c.contains(x):
c.add(x)
added = True
break
if not added:
columns.append(Column())
columns[-1].add(x)
columns.sort(key=attrgetter('left'))
return columns
def find_elements_in_row_of(self, x):
interval = Interval(x.top,
x.top + YFUZZ*(self.average_text_height))
h_interval = Interval(x.left, x.right)
for y in self.elements[x.idx:x.idx+15]:
if y is not x:
y_interval = Interval(y.top, y.bottom)
x_interval = Interval(y.left, y.right)
if interval.intersection(y_interval).width > \
0.5*self.average_text_height and \
x_interval.intersection(h_interval).width <= 0:
yield y
def update_font_sizes(self, stats):
# Font sizes start as pixels/points, but em is more useful
for text in self.texts:
text.font_size_em = self.font_map[text.font.id].size_em
if text.font_size_em != 0.00 and text.font_size_em != 1.00:
text.raw = '<span style="font-size:%sem">%s</span>'%(str(text.font_size_em),text.raw)
def second_pass(self, stats, opts):
# If there are alternating pages, pick the left and indent for this one
self.stats_left_odd = stats.left_odd
self.stats_indent_odd = stats.indent_odd
self.stats_indent_odd1 = stats.indent_odd1
self.stats_left_even = stats.left_even
self.stats_indent_even = stats.indent_even
self.stats_indent_even1 = stats.indent_even1
self.stats_right = stats.right # Needs work
self.stats_right_odd = stats.right
self.stats_right_even = stats.right
self.stats_margin_px = stats.margin_px
# Join lines to form paragraphs
self.coalesce_paras(stats)
self.check_centered(stats)
#self.elements = list(self.texts)
#for img in page.xpath('descendant::img'):
# self.elements.append(Image(img, self.opts, self.log, idc))
#self.elements.sort(cmp=lambda x,y:cmp(x.top, y.top))
return
# NOT IMPLEMENTED
'Locate paragraph boundaries in each column'
for region in self.regions:
region.collect_stats()
region.linearize()
def to_html(self):
# If ans.append is used, newlines are inserted between each element
ans = []
iind = 0
itop = 0
ilen = len(self.imgs)
for text in self.texts:
if iind < ilen:
itop = self.imgs[iind].top
else:
itop = 999999
if itop <= text.top:
ans.append('<p')
if self.imgs[iind].align == 'C':
ans[-1] += ' style="text-align:center"'
if self.id_used == 0:
self.id_used = 1
ans[-1] += ' id="page_%d"'%self.number
ans[-1] += '>'
ans[-1] += self.imgs[iind].to_html()
ans[-1] += '</p>'
iind += 1
if text.blank_line_before > 0:
ans.append('<p style="text-align:center"> </p>')
ans.append('<%s'%text.tag)
# Should be only for Headings, but there is no guarantee that the heading will be recognised
# So put in an ID once per page in case the Contents references it
# and text.tag[0] == 'h'
if self.id_used == 0:
self.id_used = 1
ans[-1] += ' id="page_%d"'%self.number
if text.align == 'C':
ans[-1] += ' style="text-align:center"'
elif text.align == 'R':
ans[-1] += ' style="text-align:right"'
elif text.indented > 0:
ans[-1] += ' style="text-indent:'
ans[-1] += str(text.indented)
#ans[-1] += '1'
ans[-1] += 'em"'
# The margins need more work. e.g. can have indented + left + right
elif text.margin_left > 0:
ans[-1] += ' style="margin-left:'
ans[-1] += str(text.margin_left)
ans[-1] += 'em"'
elif text.margin_right > 0:
ans[-1] += ' style="margin-right:'
ans[-1] += str(text.margin_right)
ans[-1] += 'em"'
ans[-1] += '>'
ans[-1] += text.to_html()
ans[-1] += '</%s>'%text.tag # Closing tag
if text.blank_line_after > 0:
ans.append('<p style="text-align:center"> </p>')
# Any remaining images
while iind < ilen:
ans.append('<p')
if self.imgs[iind].align == 'C':
ans[-1] += ' style="text-align:center"'
if self.id_used == 0:
self.id_used = 1
ans[-1] += ' id="page_%d"'%self.number
ans[-1] += '>'
ans[-1] += self.imgs[iind].to_html()
ans[-1] += '</p>'
iind += 1
return ans
class PDFDocument:
def __init__(self, xml, opts, log):
#from calibre.rpdb import set_trace; set_trace()
self.opts, self.log = opts, log
# Check for a testable value
if self.opts.pdf_header_regex is None:
self.opts.pdf_header_regex = '' # Do nothing
if self.opts.pdf_footer_regex is None:
self.opts.pdf_footer_regex = '' # Do nothing
parser = etree.XMLParser(recover=True)
self.root = etree.fromstring(xml, parser=parser)
idc = iter(range(sys.maxsize))
self.stats = DocStats()
self.fonts = []
self.font_map = {}
for spec in self.root.xpath('//fontspec'):
self.fonts.append(Font(spec))
self.font_map[self.fonts[-1].id] = self.fonts[-1]
self.pages = []
#self.page_map = {}
for page in self.root.xpath('//page'):
page = Page(page, self.font_map, opts, log, idc)
#self.page_map[page.id] = page
self.pages.append(page)
self.tops = {}
self.indents_odd = {}
self.indents_even = {}
self.line_spaces = {}
self.bottoms = {}
self.rights = {}
self.font_sizes = {}
self.collect_font_statistics()
# Create lines for pages and remove headers/footers etc.
for page in self.pages:
page.document_font_stats = self.font_size_stats
# This processes user-supplied regex for header/footer
page.create_page_format(self.stats, self.opts)
# Need to work out the header/footer automatically if opt < 0
if self.opts.pdf_header_skip < 0 or self.opts.pdf_footer_skip < 0:
self.find_header_footer()
# Remove any header/footer
if self.opts.pdf_header_skip > 0 or self.opts.pdf_footer_skip > 0:
self.remove_header_footer()
# Work out document dimensions from page format
for page in self.pages:
page.find_margins(self.tops, self.indents_odd, self.indents_even, \
self.line_spaces, self.bottoms, self.rights)
self.setup_stats()
# Joins lines etc. into paragraphs
for page in self.pages:
page.second_pass(self.stats, self.opts)
# Join paragraphs across page boundaries
self.merge_pages(idc)
#self.linearize()
self.render()
def collect_font_statistics(self):
self.font_size_stats = {}
for p in self.pages:
for sz in p.font_size_stats:
chars = p.font_size_stats[sz]
if sz not in self.font_size_stats:
self.font_size_stats[sz] = 0
self.font_size_stats[sz] += chars
for text in p.texts:
font = int(text.font.id)
self.font_sizes[font] = self.font_sizes.get(font, 0) + 1
self.font_size_stats = FontSizeStats(self.font_size_stats)
# Find most popular font so that will be treated as 1em
fcount = f_ind = 0
for f in self.font_sizes:
if fcount < self.font_sizes[f]:
fcount = self.font_sizes[f]
f_ind = f
if len(self.fonts) > 0:
self.stats.font_size = self.fonts[f_ind].size
else:
self.stats.font_size = 12.0
# 1em of a 12pt font is about 16px. Save for indentation/margin setting
self.stats.margin_px = max(self.stats.font_size * 16.0/12.0, 1.0) # Ensure never zero
for f in self.fonts:
f.size_em = round(f.size / self.stats.font_size, 2)
def setup_stats(self):
# This probably needs more work on line spacing/para spacing
# Maybe sort the line_spaces array.
# It is possible to have more than 1 line space value, e.g. 8.0, 9.0, 10.0
# then more than 1 para space, e.g. 24.0, 25.0, 26.0
# Thus the count of a para space could be > the most popular line space.
# So, probably need to find the max line space and max para space
# rather than simply the most popular.
# At the moment it only does this when spaces are close in popularity.
# Find (next) most popular gap between lines
def find_line_space(skip):
scount, soffset = 0, 0
for s in self.line_spaces:
if scount <= self.line_spaces[s] \
and (skip <= 0 or self.line_spaces[s] < skip):
scount = self.line_spaces[s]
soffset = s
return scount, soffset
# Find (next) most popular indent
def find_indent(indents, skip):
icount, ioffset = 0, 0
for i in indents:
if icount <= indents[i] \
and (skip <= 0 or indents[i] < skip):
icount = indents[i]
ioffset = i
return icount, ioffset
def set_indents(indents, odd_even):
# Find most popular left so that will be treated as left of page
indent_c = 0
indent_k = indent_k1 = 0
count = len(indents)
while count > 0:
c, k = find_indent(indents, indent_c)
if indent_c <= 0:
indent_c = c
if indent_k <= 0:
indent_k = k
elif abs(indent_k - k) <= SAME_INDENT:
indent_k = min(indent_k, k)
indent_k1 = max(indent_k1, k)
indent_c = min(indent_c, c)
else:
break
count -= 1
save_left = indent_k
if odd_even:
self.stats.left_odd = indent_k # Min left value
# Max left value
if indent_k1:
self.stats.left_odd1 = indent_k1
else:
self.stats.left_odd1 = indent_k
else:
self.stats.left_even = indent_k # Min left value
# Max left value
if indent_k1:
self.stats.left_even1 = indent_k1
else:
self.stats.left_even1 = indent_k
# Find second most popular left so that will be treated as indent
indent_c -= 1
total_c = 0
indent_k = indent_k1 = 0
count = len(indents)
while count > 0:
c, k = find_indent(indents, indent_c)
if indent_c <= 0:
indent_c = c
if indent_k <= 0:
indent_k = k
elif abs(indent_k - k) <= SAME_INDENT:
indent_k = min(indent_k, k)
indent_k1 = max(indent_k1, k)
indent_c = min(indent_c, c)
else:
break
total_c += c
count -= 1
# Find third most popular left as that might actually be the indent
# if between left and current and occurs a reasonable number of times.
save_k = indent_k
save_k1 = indent_k1
save_count = total_c
indent_c -= 1
total_c = 0
indent_k = indent_k1 = 0
count = len(indents)
while count > 0:
c, k = find_indent(indents, indent_c)
if indent_c <= 0:
indent_c = c
if indent_k <= 0:
indent_k = k
elif abs(indent_k - k) <= SAME_INDENT:
indent_k = min(indent_k, k)
indent_k1 = max(indent_k1, k)
indent_c = min(indent_c, c)
else:
break
total_c += c
count -= 1
# Is this to be used?
if (save_k < indent_k \
and save_k > save_left) \
or total_c < save_count / 2:
# The usual case. The first ones found are to be used
indent_k = save_k
indent_k1 = save_k1
if odd_even:
self.stats.indent_odd = indent_k # Min indent value
# Max indent value
if indent_k1:
self.stats.indent_odd1 = indent_k1
else:
self.stats.indent_odd1 = indent_k
else:
self.stats.indent_even = indent_k # Min indent value
# Max indent value
if indent_k1:
self.stats.indent_even1 = indent_k1
else:
self.stats.indent_even1 = indent_k
# For safety, check left and indent are in the right order
if odd_even:
if self.stats.indent_odd != 0 \
and self.stats.left_odd > self.stats.indent_odd:
l = self.stats.left_odd
l1 = self.stats.left_odd1
self.stats.left_odd = self.stats.indent_odd
self.stats.left_odd1 = self.stats.indent_odd1
self.stats.indent_odd = l
self.stats.indent_odd1 = l1
else:
if self.stats.indent_even != 0 \
and self.stats.left_even > self.stats.indent_even:
l = self.stats.left_even
l1 = self.stats.left_even1
self.stats.left_even = self.stats.indent_even
self.stats.left_even1 = self.stats.indent_even1
self.stats.indent_even = l
self.stats.indent_even1 = l1
# Find most popular top so that will be treated as top of page
tcount = 0
for t in self.tops:
if tcount < self.tops[t]:
tcount = self.tops[t]
self.stats.top = t
# Some PDFs have alternating pages with different lefts/indents.
# Always separate odd and even, though they are usually the same.
# Find most left/indent for odd pages
set_indents(self.indents_odd, 1)
# Find most left/indent for even pages
set_indents(self.indents_even, 0)
# Find farthest right so that will be treated as page right
## SHOULD DO RIGHT2 as well
rcount = 0
for r in self.rights:
if rcount < r:
rcount = r
self.stats.right = r
# Do something about left and right margin values
# They need the same sort of treatment as indents
# self.stats.margin_left = 0
# self.stats.margin_right = 0
# Some PDFs have no indentation of paragraphs.
# In this case, any value for indent is random.
# Assume that at least 20% of lines would be indented
# or that indent offset will be < 10% of line width
if self.stats.indent_odd - self.stats.left_odd > (self.stats.right - self.stats.left_odd) * 0.10: # 10%
self.stats.indent_odd = self.stats.indent_odd1 = self.stats.left_odd
# Assume for both if self.stats.indent_even - self.stats.left_even > (self.stats.right - self.stats.left_even) * 0.10: # 10%
self.stats.indent_even = self.stats.indent_even1 = self.stats.left_even
# Sort spaces into ascending order then loop through.
# Lowest value(s) are line spacing, next are para
# Spaces not yet set up
self.stats.line_space = self.stats.para_space = -1.0
# Find spacing values
# Find most popular space so that will be treated as line space
line_k = 0
line_c = 0
count = len(self.line_spaces)
while count > 0:
c, k = find_line_space(line_c)
if line_c <= 0:
line_c = c
if line_k <= 0:
line_k = k
elif abs(line_k - k) <= SAME_SPACE:
line_k = max(line_k, k)
line_c = min(line_c, c)
else:
break
count -= 1
# Get the next most popular gap
para_c = line_c-1
para_k = 0
count = len(self.line_spaces)
while count > 0:
c, k = find_line_space(para_c)
if para_k <= 0:
para_k = k
if abs(para_k - k) <= SAME_SPACE:
para_k = max(para_k, k)
para_c = min(para_c, c)
else:
break
count -= 1
# For safety, check in the right order
if line_k > para_k:
x = para_k
para_k = line_k
line_k = x
self.stats.line_space = line_k
# Some docs have no great distinction for paragraphs
# Limit the size of the gap, or section breaks not found
if para_k > line_k * PARA_FACTOR:
self.stats.para_space = round(line_k * PARA_FACTOR)
else:
self.stats.para_space = para_k
# Find the max bottom so that will be treated as bottom of page
# Or most popular bottom? Or the max used value within 10% of max value?
bcount = 0
for b in self.bottoms:
if bcount < self.bottoms[b]:
#and b > self.stats.bottom*0.9:
bcount = self.bottoms[b]
if b > self.stats.bottom:
self.stats.bottom = b
def find_header_footer(self):
# If requested, scan first few pages for possible headers/footers
if (self.opts.pdf_header_skip >= 0 \
and self.opts.pdf_footer_skip >= 0) \
or len(self.pages) < 2:
# If doc is empty or 1 page, can't decide on any skips
return
scan_count = PAGE_SCAN_COUNT
head_text = [''] * LINE_SCAN_COUNT
head_match = [0] * LINE_SCAN_COUNT
head_match1 = [0] * LINE_SCAN_COUNT
head_page = 0
head_skip = 0
foot_text = [''] * LINE_SCAN_COUNT
foot_match = [0] * LINE_SCAN_COUNT
foot_match1 = [0] * LINE_SCAN_COUNT
foot_page = 0
foot_skip = 0
pagenum_text = r'(.*\d+\s+\w+\s+\d+.*)|(\s*\d+\s+.*)|(^\s*[ivxlcIVXLC]+\s*$)'
pages_to_scan = scan_count
# Note that a line may be in more than 1 part
# e.g. Page 1 of 6 ... DocName.pdf
# so maybe should merge first 2 lines if same top
# Ditto last 2 lines
# Maybe should do more than 2 parts
for page in self.pages:
if self.opts.pdf_header_skip < 0 \
and len(page.texts) > 0:
# There is something at the top of the page
for head_ind in range(LINE_SCAN_COUNT):
if len(page.texts) < head_ind+1 \
or page.texts[head_ind].top > page.height/2:
break # Short page
t = page.texts[head_ind].text_as_string
#if len(page.texts) > 1 and page.texts[0].top == page.texts[1].top:
# t += ' ' + page.texts[1].text_as_string
if len(head_text[head_ind]) == 0:
head_text[head_ind] = t
else:
if head_text[head_ind] == t:
head_match[head_ind] += 1
if head_page == 0:
head_page = page.number
else: # Look for page count of format 'n xxx n'
if re.match(pagenum_text, t) is not None:
head_match1[head_ind] += 1
if head_page == 0:
head_page = page.number
if self.opts.pdf_footer_skip < 0 \
and len(page.texts) > 0:
# There is something at the bottom of the page
for foot_ind in range(LINE_SCAN_COUNT):
if len(page.texts) < foot_ind+1 \
or page.texts[-foot_ind-1].top < page.height/2:
break # Short page
t = page.texts[-foot_ind-1].text_as_string
#if len(page.texts) > 1 and page.texts[-1].top == page.texts[-2].top:
# t += ' ' + page.texts[-2].text_as_string
if len(foot_text[foot_ind]) == 0:
foot_text[foot_ind] = t
else:
if foot_text[foot_ind] == t:
foot_match[foot_ind] += 1
if foot_page == 0:
foot_page = page.number
else: # Look for page count of format 'n xxx n'
if re.match(pagenum_text, t) is not None:
foot_match1[foot_ind] += 1
if foot_page == 0:
foot_page = page.number
pages_to_scan -= 1
if pages_to_scan < 1:
break
if pages_to_scan > 0:
# Doc is shorter than scan_count
pages_to_scan = scan_count - pages_to_scan # Number scanned
else:
# All required pages scanned
pages_to_scan = scan_count
pages_to_scan /= 2 # Are at least half matching?
head_ind = 0
for i in range(LINE_SCAN_COUNT):
if head_match[i] > pages_to_scan or head_match1[i] > pages_to_scan:
head_ind = i # Remember the last matching line
if head_match[head_ind] > pages_to_scan or head_match1[head_ind] > pages_to_scan:
t = self.pages[head_page].texts[head_ind]
head_skip = t.top + t.height + 1
foot_ind = 0
for i in range(LINE_SCAN_COUNT):
if foot_match[i] > pages_to_scan or foot_match1[i] > pages_to_scan:
foot_ind = i # Remember the last matching line
if foot_match[foot_ind] > pages_to_scan or foot_match1[foot_ind] > pages_to_scan:
t = self.pages[foot_page].texts[-foot_ind-1]
foot_skip = t.top - 1
if head_skip > 0:
self.opts.pdf_header_skip = head_skip
if foot_skip > 0:
self.opts.pdf_footer_skip = foot_skip
def remove_header_footer(self):
# Remove any header/footer lines from all pages
for page in self.pages:
# If a text is removed, we need to restart the loop or what was the next will be skipped
removed = True
while removed:
removed = False
for t in page.texts:
if (self.opts.pdf_header_skip > 0 and t.top < self.opts.pdf_header_skip) \
or (self.opts.pdf_footer_skip > 0 and t.top > self.opts.pdf_footer_skip):
page.texts.remove(t)
removed = True
break # Restart loop
def merge_pages(self, idc):
# Check for pages that can be merged
# When merging pages, assume short last lines mean no merge
# BUT unfortunately there is no way to tell the difference
# between a continuation of a paragraph and a 'section break'
# if the previous page ends a sentence.
# First, find the minimum text top and the maximum text bottom
min_top = self.stats.top
max_bottom = self.stats.bottom
# The space at the end of a page that indicates there is no merge
orphan_space = max_bottom - ORPHAN_LINES*self.stats.line_space
# Keep a note of the position of the final line on the merged page
save_bottom = 0
# After merge, skip to this page
pind = 0
# Now merge where bottom of one is within ORPHAN_LINES lines of max_bottom
# and top of next is within a line of min_top
# and margins correspond, and it's a normal paragraph
merge_done = True
while merge_done:
merge_done = False # A merge was done
merged_page = None # Page merged into previous
candidate = None # Lines close enough to the bottom that it might merge
while pind < len(self.pages):
page = self.pages[pind]
if page.odd_even:
stats_left = page.stats_left_odd
else:
stats_left = page.stats_left_even
# Do not merge if the next paragraph is indented
if page.texts:
if candidate \
and page.texts[0].indented == 0:
last_line = candidate.texts[-1]
merged_text = page.texts[0]
top = merged_text.top
# How much space in pixels was at the end of the last line?
# If the book is justified text, any space could mean end-of-para
# So, how to check for a justified book/page?
last_spare = candidate.right_margin - last_line.final_width # Pixels
# How big is the first word on the next line?
merged_first = re.match(r'^([^ ]+)\s', merged_text.text_as_string)
if merged_first is not None:
# First word number of chars as pixels
merged_len = len(merged_first.group(1)) * merged_text.average_character_width
else:
merged_len = merged_text.right
# Allow where the last line ends with or next line starts with lower case.
if re.match('.*[a-z, -]$', last_line.text_as_string) is not None \
or re.match('^[a-z, -]', merged_text.text_as_string) is not None :
merged_len = merged_text.right
# To use merged_len etc.
# Should not merge if speech where last ends 99 or 9 and next starts 66 or 6
if top <= min_top + page.average_text_height \
and merged_text.tag == 'p' \
and 'href=' not in merged_text.raw \
and merged_text.left < stats_left + merged_text.average_character_width \
and not last_spare > merged_len \
and not (re.match('.*[.!?](\u201d|”)$', last_line.text_as_string) is not None
and re.match('^(\u201c|“).*', merged_text.text_as_string) is not None):
merge_done = True
# We don't want to merge partial pages
# i.e. if this is the last line, preserve its top/bottom till after merge
if len(page.texts) == 1 :
save_bottom = merged_text.bottom
else:
save_bottom = 0.0
# Update this page final top/bottom
merged_text.top = candidate.texts[-1].top + page.average_text_height
merged_text.bottom = merged_text.top + merged_text.height
merged_page = page
break
# If the next page starts below the top, add a blank line before the first line
# This must not be done after a merge as the top has moved
if page.texts[0].top > self.stats.top + self.stats.line_space:
page.texts[0].blank_line_after = 1
candidate = None
last_line = page.texts[-1]
bottom = last_line.bottom
# Decide on whether merging is a good idea
# Non-indented paragraphs are a problem
# Do we have a short page?
if bottom < orphan_space \
and (len(page.imgs) == 0 or page.imgs[-1].bottom < orphan_space):
# Force a new page.
# Avoid this if the next page starts with an image that wouldn't fit
if pind < len(self.pages)-1: # There is another page
if len(self.pages[pind+1].imgs) == 0 \
or (self.pages[pind+1].imgs[0].height < orphan_space \
and (len(self.pages[pind+1].texts) == 0 \
or self.pages[pind+1].texts[0].top > self.pages[pind+1].imgs[0].top)):
page.page_break_after = True
elif (re.match('.*[a-z, ]$', last_line.text_as_string) is not None \
or last_line.final_width > page.width*self.opts.unwrap_factor):
# or (last_line.right * 100.0 / page.right_margin) > LAST_LINE_PERCENT):
candidate = page
else:
candidate = None
pind += 1
if merge_done:
# We now need to skip to the next page number
# The text has been appended to this page, so coalesce the paragraph
if merged_page.odd_even:
left_margin = merged_page.stats_left_odd
right_margin = merged_page.stats_right_odd
else:
left_margin = merged_page.stats_left_even
right_margin = merged_page.stats_right_odd
candidate.texts[-1].coalesce(merged_text, candidate.number, left_margin, right_margin)
merged_page.texts.remove(merged_text)
# Put back top/bottom after coalesce if final line
if save_bottom != 0.0 :
# Ignore top as that can confuse things where the 1st para of a page
# was merged with a previous. Keep the original top
candidate.texts[-1].bottom = save_bottom
#candidate.coalesce_paras()
# Have we removed everything from this page (well, all texts and images)
if merged_page.is_empty:
candidate.texts[-1].blank_line_before = 1
self.pages.remove(merged_page)
def linearize(self):
self.elements = []
last_region = last_block = None
for page in self.pages:
page_number_inserted = False
for region in page.regions:
merge_first_block = last_region is not None and \
len(last_region.columns) == len(region.columns) and \
not hasattr(last_block, 'img')
for i, block in enumerate(region.boxes):
if merge_first_block:
merge_first_block = False
if not page_number_inserted:
last_block.append(page.number)
page_number_inserted = True
for elem in block:
last_block.append(elem)
else:
if not page_number_inserted:
block.insert(0, page.number)
page_number_inserted = True
self.elements.append(block)
last_block = block
last_region = region
def render(self):
#### Where does the title come from if not run from command line?
title = 'Converted Ebook'
if len(sys.argv) > 1:
title = sys.argv[1]
# Need to insert font info and styles
html = ['<?xml version="1.0" encoding="UTF-8"?>',
'<html xmlns="http://www.w3.org/1999/xhtml">', '<head>',
'<title>'+title+'</title>',
'<meta content="PDF Reflow conversion" name="generator"/>',
'</head>', '<body>']
for page in self.pages:
html.extend(page.to_html())
if page.page_break_after:
html+= ['<div style="page-break-after:always"></div>']
html += ['</body>', '</html>']
raw = ('\n'.join(html)).replace('</strong><strong>', '')
raw = raw.replace('</i><i>', '')
raw = raw.replace('</em><em>', '')
raw = raw.replace('</b><b>', '')
raw = raw.replace('</strong> <strong>', ' ')
raw = raw.replace('</i> <i>', ' ')
raw = raw.replace('</em> <em>', ' ')
raw = raw.replace('</b> <b>', ' ')
with open('index.html', 'wb') as f:
f.write(raw.encode('utf-8'))
| 85,175 | Python | .py | 1,810 | 33.065746 | 139 | 0.522337 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,598 | links.py | kovidgoyal_calibre/src/calibre/ebooks/pdf/render/links.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from calibre.ebooks.pdf.render.common import Array, Dictionary, Name, String, UTF16String, current_log
from polyglot.builtins import iteritems
from polyglot.urllib import unquote, urlparse
class Destination(Array):
def __init__(self, start_page, pos, get_pageref):
pnum = start_page + max(0, pos['column'])
q = pnum
while q > -1:
try:
pref = get_pageref(q)
break
except IndexError:
pos['left'] = pos['top'] = 0
q -= 1
if q != pnum:
current_log().warn(f'Could not find page {pnum} for link destination, using page {q} instead')
super().__init__([
pref, Name('XYZ'), pos['left'], pos['top'], None
])
class Links:
def __init__(self, pdf, mark_links, page_size):
self.anchors = {}
self.links = []
self.start = {'top':page_size[1], 'column':0, 'left':0}
self.pdf = pdf
self.mark_links = mark_links
def add(self, base_path, start_page, links, anchors):
path = os.path.normcase(os.path.abspath(base_path))
self.anchors[path] = a = {}
a[None] = Destination(start_page, self.start, self.pdf.get_pageref)
for anchor, pos in iteritems(anchors):
a[anchor] = Destination(start_page, pos, self.pdf.get_pageref)
for link in links:
href, page, rect = link
p, frag = href.partition('#')[0::2]
try:
pref = self.pdf.get_pageref(page).obj
except IndexError:
try:
pref = self.pdf.get_pageref(page-1).obj
except IndexError:
self.pdf.debug('Unable to find page for link: %r, ignoring it' % link)
continue
self.pdf.debug('The link %s points to non-existent page, moving it one page back' % href)
self.links.append(((path, p, frag or None), pref, Array(rect)))
def add_links(self):
for link in self.links:
path, href, frag = link[0]
page, rect = link[1:]
combined_path = os.path.normcase(os.path.abspath(os.path.join(os.path.dirname(path), *unquote(href).split('/'))))
is_local = not href or combined_path in self.anchors
annot = Dictionary({
'Type':Name('Annot'), 'Subtype':Name('Link'),
'Rect':rect, 'Border':Array([0,0,0]),
})
if self.mark_links:
annot.update({'Border':Array([16, 16, 1]), 'C':Array([1.0, 0,
0])})
if is_local:
path = combined_path if href else path
try:
annot['Dest'] = self.anchors[path][frag]
except KeyError:
try:
annot['Dest'] = self.anchors[path][None]
except KeyError:
pass
else:
url = href + (('#'+frag) if frag else '')
try:
purl = urlparse(url)
except Exception:
self.pdf.debug('Ignoring unparsable URL: %r' % url)
continue
if purl.scheme and purl.scheme != 'file':
action = Dictionary({
'Type':Name('Action'), 'S':Name('URI'),
})
# Do not try to normalize/quote/unquote this URL as if it
# has a query part, it will get corrupted
action['URI'] = String(url)
annot['A'] = action
if 'A' in annot or 'Dest' in annot:
if 'Annots' not in page:
page['Annots'] = Array()
page['Annots'].append(self.pdf.objects.add(annot))
else:
self.pdf.debug('Could not find destination for link: %s in file %s'%
(href, path))
def add_outline(self, toc):
parent = Dictionary({'Type':Name('Outlines')})
parentref = self.pdf.objects.add(parent)
self.process_children(toc, parentref, parent_is_root=True)
self.pdf.catalog.obj['Outlines'] = parentref
def process_children(self, toc, parentref, parent_is_root=False):
childrefs = []
for child in toc:
childref = self.process_toc_item(child, parentref)
if childref is None:
continue
if childrefs:
childrefs[-1].obj['Next'] = childref
childref.obj['Prev'] = childrefs[-1]
childrefs.append(childref)
if len(child) > 0:
self.process_children(child, childref)
if childrefs:
parentref.obj['First'] = childrefs[0]
parentref.obj['Last'] = childrefs[-1]
if not parent_is_root:
parentref.obj['Count'] = -len(childrefs)
def process_toc_item(self, toc, parentref):
path = toc.abspath or None
frag = toc.fragment or None
if path is None:
return
path = os.path.normcase(os.path.abspath(path))
if path not in self.anchors:
return None
a = self.anchors[path]
dest = a.get(frag, a[None])
item = Dictionary({'Parent':parentref, 'Dest':dest,
'Title':UTF16String(toc.text or _('Unknown'))})
return self.pdf.objects.add(item)
| 5,706 | Python | .py | 129 | 30.705426 | 125 | 0.517353 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,599 | gradients.py | kovidgoyal_calibre/src/calibre/ebooks/pdf/render/gradients.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import copy
import sys
from collections import namedtuple
from qt.core import QLinearGradient, QPointF, sip
from calibre.ebooks.pdf.render.common import Array, Dictionary, Name
Stop = namedtuple('Stop', 't color')
class LinearGradientPattern(Dictionary):
def __init__(self, brush, matrix, pdf, pixel_page_width, pixel_page_height):
self.matrix = (matrix.m11(), matrix.m12(), matrix.m21(), matrix.m22(),
matrix.dx(), matrix.dy())
gradient = sip.cast(brush.gradient(), QLinearGradient)
start, stop, stops = self.spread_gradient(gradient, pixel_page_width,
pixel_page_height, matrix)
# TODO: Handle colors with different opacities
self.const_opacity = stops[0].color[-1]
funcs = Array()
bounds = Array()
encode = Array()
for i, current_stop in enumerate(stops):
if i < len(stops) - 1:
next_stop = stops[i+1]
func = Dictionary({
'FunctionType': 2,
'Domain': Array([0, 1]),
'C0': Array(current_stop.color[:3]),
'C1': Array(next_stop.color[:3]),
'N': 1,
})
funcs.append(func)
encode.extend((0, 1))
if i+1 < len(stops) - 1:
bounds.append(next_stop.t)
func = Dictionary({
'FunctionType': 3,
'Domain': Array([stops[0].t, stops[-1].t]),
'Functions': funcs,
'Bounds': bounds,
'Encode': encode,
})
shader = Dictionary({
'ShadingType': 2,
'ColorSpace': Name('DeviceRGB'),
'AntiAlias': True,
'Coords': Array([start.x(), start.y(), stop.x(), stop.y()]),
'Function': func,
'Extend': Array([True, True]),
})
Dictionary.__init__(self, {
'Type': Name('Pattern'),
'PatternType': 2,
'Shading': shader,
'Matrix': Array(self.matrix),
})
self.cache_key = (self.__class__.__name__, self.matrix,
tuple(shader['Coords']), stops)
def spread_gradient(self, gradient, pixel_page_width, pixel_page_height,
matrix):
start = gradient.start()
stop = gradient.finalStop()
stops = list(map(lambda x: [x[0], x[1].getRgbF()], gradient.stops()))
spread = gradient.spread()
if spread != gradient.PadSpread:
inv = matrix.inverted()[0]
page_rect = tuple(map(inv.map, (
QPointF(0, 0), QPointF(pixel_page_width, 0), QPointF(0, pixel_page_height),
QPointF(pixel_page_width, pixel_page_height))))
maxx = maxy = -sys.maxsize-1
minx = miny = sys.maxsize
for p in page_rect:
minx, maxx = min(minx, p.x()), max(maxx, p.x())
miny, maxy = min(miny, p.y()), max(maxy, p.y())
def in_page(point):
return (minx <= point.x() <= maxx and miny <= point.y() <= maxy)
offset = stop - start
llimit, rlimit = start, stop
reflect = False
base_stops = copy.deepcopy(stops)
reversed_stops = list(reversed(stops))
do_reflect = spread == gradient.ReflectSpread
totl = abs(stops[-1][0] - stops[0][0])
intervals = [abs(stops[i+1][0] - stops[i][0])/totl
for i in range(len(stops)-1)]
while in_page(llimit):
reflect ^= True
llimit -= offset
estops = reversed_stops if (reflect and do_reflect) else base_stops
stops = copy.deepcopy(estops) + stops
first_is_reflected = reflect
reflect = False
while in_page(rlimit):
reflect ^= True
rlimit += offset
estops = reversed_stops if (reflect and do_reflect) else base_stops
stops = stops + copy.deepcopy(estops)
start, stop = llimit, rlimit
num = len(stops) // len(base_stops)
if num > 1:
# Adjust the stop parameter values
t = base_stops[0][0]
rlen = totl/num
reflect = first_is_reflected ^ True
intervals = [i*rlen for i in intervals]
rintervals = list(reversed(intervals))
for i in range(num):
reflect ^= True
pos = i * len(base_stops)
tvals = [t]
for ival in (rintervals if reflect and do_reflect else
intervals):
tvals.append(tvals[-1] + ival)
for j in range(len(base_stops)):
stops[pos+j][0] = tvals[j]
t = tvals[-1]
# In case there were rounding errors
stops[-1][0] = base_stops[-1][0]
return start, stop, tuple(Stop(s[0], s[1]) for s in stops)
| 5,344 | Python | .py | 120 | 30.408333 | 91 | 0.501348 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |