id
int64
0
458k
file_name
stringlengths
4
119
file_path
stringlengths
14
227
content
stringlengths
24
9.96M
size
int64
24
9.96M
language
stringclasses
1 value
extension
stringclasses
14 values
total_lines
int64
1
219k
avg_line_length
float64
2.52
4.63M
max_line_length
int64
5
9.91M
alphanum_fraction
float64
0
1
repo_name
stringlengths
7
101
repo_stars
int64
100
139k
repo_forks
int64
0
26.4k
repo_open_issues
int64
0
2.27k
repo_license
stringclasses
12 values
repo_extraction_date
stringclasses
433 values
27,400
opf.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/check/opf.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>' from lxml import etree from calibre import prepare_string_for_xml as xml from calibre.ebooks.oeb.base import DC, DC11_NS, OPF, OPF2_NS, XHTML_MIME from calibre.ebooks.oeb.polish.check.base import WARN, BaseError from calibre.ebooks.oeb.polish.toc import find_existing_nav_toc, parse_nav from calibre.ebooks.oeb.polish.utils import guess_type from polyglot.builtins import iteritems class MissingSection(BaseError): def __init__(self, name, section_name): BaseError.__init__(self, _('The <%s> section is missing from the OPF') % section_name, name) self.HELP = xml(_( 'The <%s> section is required in the OPF file. You have to create one.') % section_name) class EmptyID(BaseError): def __init__(self, name, lnum): BaseError.__init__(self, _('Empty id attributes are invalid'), name, lnum) self.HELP = xml(_( 'Empty ID attributes are invalid in OPF files.')) class IncorrectIdref(BaseError): def __init__(self, name, idref, lnum): BaseError.__init__(self, _('idref="%s" points to unknown id') % idref, name, lnum) self.HELP = xml(_( 'The idref="%s" points to an id that does not exist in the OPF') % idref) class IncorrectCover(BaseError): def __init__(self, name, lnum, cover): BaseError.__init__(self, _('The meta cover tag points to an non-existent item'), name, lnum) self.HELP = xml(_( 'The meta cover tag points to an item with id="%s" which does not exist in the manifest') % cover) class NookCover(BaseError): HELP = _( 'Some e-book readers such as the Nook fail to recognize covers if' ' the content attribute comes before the name attribute.' ' For maximum compatibility move the name attribute before the content attribute.') INDIVIDUAL_FIX = _('Move the name attribute before the content attribute') def __init__(self, name, lnum): BaseError.__init__(self, _('The meta cover tag has content before name'), name, lnum) def __call__(self, container): for cover in container.opf_xpath('//opf:meta[@name="cover" and @content]'): cover.set('content', cover.attrib.pop('content')) container.dirty(container.opf_name) return True class IncorrectToc(BaseError): def __init__(self, name, lnum, bad_idref=None, bad_mimetype=None): if bad_idref is not None: msg = _('The item identified as the Table of Contents (%s) does not exist') % bad_idref self.HELP = _('There is no item with id="%s" in the manifest.') % bad_idref else: msg = _('The item identified as the Table of Contents has an incorrect media-type (%s)') % bad_mimetype self.HELP = _('The media type for the Table of Contents must be %s') % guess_type('a.ncx') BaseError.__init__(self, msg, name, lnum) class NoHref(BaseError): HELP = _('This manifest entry has no href attribute. Either add the href attribute or remove the entry.') INDIVIDUAL_FIX = _('Remove this manifest entry') def __init__(self, name, item_id, lnum): BaseError.__init__(self, _('Item in manifest has no href attribute'), name, lnum) self.item_id = item_id def __call__(self, container): changed = False for item in container.opf_xpath('/opf:package/opf:manifest/opf:item'): if item.get('id', None) == self.item_id: changed = True container.remove_from_xml(item) container.dirty(container.opf_name) return changed class MissingNCXRef(BaseError): HELP = _('The <spine> tag has no reference to the NCX table of contents file.' ' Without this reference, the table of contents will not work in most' ' readers. The reference should look like <spine toc="id of manifest item for the ncx file">.') INDIVIDUAL_FIX = _('Add the reference to the NCX file') def __init__(self, name, lnum, ncx_id): BaseError.__init__(self, _('Missing reference to the NCX Table of Contents'), name, lnum) self.ncx_id = ncx_id def __call__(self, container): changed = False for item in container.opf_xpath('/opf:package/opf:spine'): if item.get('toc') is None: item.set('toc', self.ncx_id) changed = True container.dirty(container.opf_name) return changed class MissingNav(BaseError): HELP = _('This book has no Navigation document. According to the EPUB 3 specification, a navigation document' ' is required. The Navigation document contains the Table of Contents. Use the Table of Contents' ' tool to add a Table of Contents to this book.') def __init__(self, name, lnum): BaseError.__init__(self, _('Missing navigation document'), name, lnum) class EmptyNav(BaseError): HELP = _('The nav document for this book contains no table of contents, or an empty table of contents.' ' Use the Table of Contents tool to add a Table of Contents to this book.') LEVEL = WARN def __init__(self, name, lnum): BaseError.__init__(self, _('Missing ToC in navigation document'), name, lnum) class MissingHref(BaseError): HELP = _('A file listed in the manifest is missing, you should either remove' ' it from the manifest or add the missing file to the book.') def __init__(self, name, href, lnum): BaseError.__init__(self, _('Item (%s) in manifest is missing') % href, name, lnum) self.bad_href = href self.INDIVIDUAL_FIX = _('Remove the entry for %s from the manifest') % href def __call__(self, container): [container.remove_from_xml(elem) for elem in container.opf_xpath('/opf:package/opf:manifest/opf:item[@href]') if elem.get('href') == self.bad_href] container.dirty(container.opf_name) return True class NonLinearItems(BaseError): level = WARN has_multiple_locations = True HELP = xml(_('There are items marked as non-linear in the <spine>.' ' These will be displayed in random order by different e-book readers.' ' Some will ignore the non-linear attribute, some will display' ' them at the end or the beginning of the book and some will' ' fail to display them at all. Instead of using non-linear items' ' simply place the items in the order you want them to be displayed.')) INDIVIDUAL_FIX = _('Mark all non-linear items as linear') def __init__(self, name, locs): BaseError.__init__(self, _('Non-linear items in the spine'), name) self.all_locations = [(name, x, None) for x in locs] def __call__(self, container): [elem.attrib.pop('linear') for elem in container.opf_xpath('//opf:spine/opf:itemref[@linear]')] container.dirty(container.opf_name) return True class DuplicateHref(BaseError): has_multiple_locations = True INDIVIDUAL_FIX = _( 'Remove all but the first duplicate item') def __init__(self, name, eid, locs, for_spine=False): loc = 'spine' if for_spine else 'manifest' BaseError.__init__(self, _('Duplicate item in {0}: {1}').format(loc, eid), name) self.HELP = _( 'The item {0} is present more than once in the {2} in {1}. This is' ' not allowed.').format(eid, name, loc) self.all_locations = [(name, lnum, None) for lnum in sorted(locs)] self.duplicate_href = eid self.xpath = '/opf:package/opf:' + ('spine/opf:itemref[@idref]' if for_spine else 'manifest/opf:item[@href]') self.attr = 'idref' if for_spine else 'href' def __call__(self, container): items = [e for e in container.opf_xpath(self.xpath) if e.get(self.attr) == self.duplicate_href] [container.remove_from_xml(e) for e in items[1:]] container.dirty(self.name) return True class MultipleCovers(BaseError): has_multiple_locations = True HELP = xml(_( 'There is more than one <meta name="cover"> tag defined. There should be only one.')) INDIVIDUAL_FIX = _('Remove all but the first meta cover tag') def __init__(self, name, locs): BaseError.__init__(self, _('There is more than one cover defined'), name) self.all_locations = [(name, lnum, None) for lnum in sorted(locs)] def __call__(self, container): items = [e for e in container.opf_xpath('/opf:package/opf:metadata/opf:meta[@name="cover"]')] [container.remove_from_xml(e) for e in items[1:]] container.dirty(self.name) return True class NoUID(BaseError): HELP = xml(_( 'The OPF must have an unique identifier, i.e. a <dc:identifier> element whose id is referenced' ' by the <package> element')) INDIVIDUAL_FIX = _('Auto-generate a unique identifier') def __init__(self, name): BaseError.__init__(self, _('The OPF has no unique identifier'), name) def __call__(self, container): from calibre.ebooks.oeb.base import uuid_id opf = container.opf uid = uuid_id() opf.set('unique-identifier', uid) m = container.opf_xpath('/opf:package/opf:metadata') if not m: m = [container.opf.makeelement(OPF('metadata'), nsmap={'dc':DC11_NS})] container.insert_into_xml(container.opf, m[0], 0) m = m[0] dc = m.makeelement(DC('identifier'), id=uid, nsmap={'opf':OPF2_NS}) dc.set(OPF('scheme'), 'uuid') dc.text = uid container.insert_into_xml(m, dc) container.dirty(container.opf_name) return True class EmptyIdentifier(BaseError): HELP = xml(_('The <dc:identifier> element must not be empty.')) INDIVIDUAL_FIX = _('Remove empty identifiers') def __init__(self, name, lnum): BaseError.__init__(self, _('Empty identifier element'), name, lnum) def __call__(self, container): for dcid in container.opf_xpath('/opf:package/opf:metadata/dc:identifier'): if not dcid.text or not dcid.text.strip(): container.remove_from_xml(dcid) container.dirty(container.opf_name) return True class BadSpineMime(BaseError): def __init__(self, name, iid, mt, lnum, opf_name): BaseError.__init__(self, _('Incorrect media-type for spine item'), opf_name, lnum) self.HELP = _( 'The item {0} present in the spine has the media-type {1}. ' ' Most e-book software cannot handle non-HTML spine items. ' ' If the item is actually HTML, you should change its media-type to {2}.' ' If it is not-HTML you should consider replacing it with an HTML item, as it' ' is unlikely to work in most readers.').format(name, mt, XHTML_MIME) if iid is not None: self.INDIVIDUAL_FIX = _('Change the media-type to %s') % XHTML_MIME self.iid = iid def __call__(self, container): container.opf_xpath('/opf:package/opf:manifest/opf:item[@id=%r]' % self.iid)[0].set( 'media-type', XHTML_MIME) container.dirty(container.opf_name) container.refresh_mime_map() return True def check_opf(container): errors = [] opf_version = container.opf_version_parsed if container.opf.tag != OPF('package'): err = BaseError(_('The OPF does not have the correct root element'), container.opf_name, container.opf.sourceline) err.HELP = xml(_( 'The OPF must have the root element <package> in namespace {0}, like this: <package xmlns="{0}">')).format(OPF2_NS) errors.append(err) elif container.opf.get('version') is None and container.book_type == 'epub': err = BaseError(_('The OPF does not have a version'), container.opf_name, container.opf.sourceline) err.HELP = xml(_( 'The <package> tag in the OPF must have a version attribute. This is usually version="2.0" for EPUB2 and AZW3 and version="3.0" for EPUB3')) errors.append(err) for tag in ('metadata', 'manifest', 'spine'): if not container.opf_xpath('/opf:package/opf:' + tag): errors.append(MissingSection(container.opf_name, tag)) all_ids = set(container.opf_xpath('//*/@id')) if '' in all_ids: for empty_id_tag in container.opf_xpath('//*[@id=""]'): errors.append(EmptyID(container.opf_name, empty_id_tag.sourceline)) all_ids.discard('') for elem in container.opf_xpath('//*[@idref]'): if elem.get('idref') not in all_ids: errors.append(IncorrectIdref(container.opf_name, elem.get('idref'), elem.sourceline)) nl_items = [elem.sourceline for elem in container.opf_xpath('//opf:spine/opf:itemref[@linear="no"]')] if nl_items: errors.append(NonLinearItems(container.opf_name, nl_items)) seen, dups = {}, {} for item in container.opf_xpath('/opf:package/opf:manifest/opf:item'): href = item.get('href', None) if href is None: errors.append(NoHref(container.opf_name, item.get('id', None), item.sourceline)) else: hname = container.href_to_name(href, container.opf_name) if not hname or not container.exists(hname): errors.append(MissingHref(container.opf_name, href, item.sourceline)) if href in seen: if href not in dups: dups[href] = [seen[href]] dups[href].append(item.sourceline) else: seen[href] = item.sourceline errors.extend(DuplicateHref(container.opf_name, eid, locs) for eid, locs in iteritems(dups)) seen, dups = {}, {} for item in container.opf_xpath('/opf:package/opf:spine/opf:itemref[@idref]'): ref = item.get('idref') if ref in seen: if ref not in dups: dups[ref] = [seen[ref]] dups[ref].append(item.sourceline) else: seen[ref] = item.sourceline errors.extend(DuplicateHref(container.opf_name, eid, locs, for_spine=True) for eid, locs in iteritems(dups)) spine = container.opf_xpath('/opf:package/opf:spine[@toc]') if spine: spine = spine[0] mitems = [x for x in container.opf_xpath('/opf:package/opf:manifest/opf:item[@id]') if x.get('id') == spine.get('toc')] if mitems: mitem = mitems[0] if mitem.get('media-type', '') != guess_type('a.ncx'): errors.append(IncorrectToc(container.opf_name, mitem.sourceline, bad_mimetype=mitem.get('media-type'))) else: errors.append(IncorrectToc(container.opf_name, spine.sourceline, bad_idref=spine.get('toc'))) else: spine = container.opf_xpath('/opf:package/opf:spine') if spine: spine = spine[0] ncx = container.manifest_type_map.get(guess_type('a.ncx')) if ncx: ncx_name = ncx[0] rmap = {v:k for k, v in iteritems(container.manifest_id_map)} ncx_id = rmap.get(ncx_name) if ncx_id: errors.append(MissingNCXRef(container.opf_name, spine.sourceline, ncx_id)) if opf_version.major > 2: existing_nav = find_existing_nav_toc(container) if existing_nav is None: errors.append(MissingNav(container.opf_name, 0)) else: toc = parse_nav(container, existing_nav) if len(toc) == 0: errors.append(EmptyNav(existing_nav, 0)) covers = container.opf_xpath('/opf:package/opf:metadata/opf:meta[@name="cover"]') if len(covers) > 0: if len(covers) > 1: errors.append(MultipleCovers(container.opf_name, [c.sourceline for c in covers])) manifest_ids = set(container.opf_xpath('/opf:package/opf:manifest/opf:item/@id')) for cover in covers: if cover.get('content', None) not in manifest_ids: errors.append(IncorrectCover(container.opf_name, cover.sourceline, cover.get('content', ''))) raw = etree.tostring(cover) try: n, c = raw.index(b'name="'), raw.index(b'content="') except ValueError: n = c = -1 if n > -1 and c > -1 and n > c: errors.append(NookCover(container.opf_name, cover.sourceline)) uid = container.opf.get('unique-identifier', None) if uid is None: errors.append(NoUID(container.opf_name)) else: dcid = container.opf_xpath('/opf:package/opf:metadata/dc:identifier[@id=%r]' % uid) if not dcid or not dcid[0].text or not dcid[0].text.strip(): errors.append(NoUID(container.opf_name)) for elem in container.opf_xpath('/opf:package/opf:metadata/dc:identifier'): if not elem.text or not elem.text.strip(): errors.append(EmptyIdentifier(container.opf_name, elem.sourceline)) for item, name, linear in container.spine_iter: mt = container.mime_map[name] if mt != XHTML_MIME: iid = item.get('idref', None) lnum = None if iid: mitem = container.opf_xpath('/opf:package/opf:manifest/opf:item[@id=%r]' % iid) if mitem: lnum = mitem[0].sourceline else: iid = None errors.append(BadSpineMime(name, iid, mt, lnum, container.opf_name)) return errors
17,629
Python
.py
327
44.538226
152
0.624419
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,401
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/check/__init__.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
112
Python
.py
3
34.666667
61
0.673077
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,402
fonts.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/check/fonts.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' from css_parser.css import CSSRule from tinycss.fonts3 import parse_font_family from calibre import force_unicode from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES from calibre.ebooks.oeb.polish.check.base import WARN, BaseError from calibre.ebooks.oeb.polish.fonts import change_font_in_declaration from calibre.ebooks.oeb.polish.pretty import pretty_script_or_style from calibre.ebooks.oeb.polish.utils import OEB_FONTS from calibre.utils.fonts.utils import UnsupportedFont, get_all_font_names, is_font_embeddable from polyglot.builtins import iteritems class InvalidFont(BaseError): HELP = _('This font could not be processed. It most likely will' ' not work in an e-book reader, either') def fix_sheet(sheet, css_name, font_name): changed = False for rule in sheet.cssRules: if rule.type in (CSSRule.FONT_FACE_RULE, CSSRule.STYLE_RULE): changed = change_font_in_declaration(rule.style, css_name, font_name) or changed return changed class NotEmbeddable(BaseError): level = WARN def __init__(self, name, fs_type): BaseError.__init__(self, _('The font {} is not allowed to be embedded').format(name), name) self.HELP = _('The font has a flag in its metadata ({:09b}) set indicating that it is' ' not licensed for embedding. You can ignore this warning, if you are' ' sure you have permission to embed this font.').format(fs_type) class FontAliasing(BaseError): level = WARN def __init__(self, font_name, css_name, name, line): BaseError.__init__(self, _('The CSS font-family name {0} does not match the actual font name {1}').format(css_name, font_name), name, line) self.HELP = _('The font family name specified in the CSS @font-face rule: "{0}" does' ' not match the font name inside the actual font file: "{1}". This can' ' cause problems in some viewers. You should change the CSS font name' ' to match the actual font name.').format(css_name, font_name) self.INDIVIDUAL_FIX = _('Change the font name {0} to {1} everywhere').format(css_name, font_name) self.font_name, self.css_name = font_name, css_name def __call__(self, container): changed = False for name, mt in iteritems(container.mime_map): if mt in OEB_STYLES: sheet = container.parsed(name) if fix_sheet(sheet, self.css_name, self.font_name): container.dirty(name) changed = True elif mt in OEB_DOCS: for style in container.parsed(name).xpath('//*[local-name()="style"]'): if style.get('type', 'text/css') == 'text/css' and style.text: sheet = container.parse_css(style.text) if fix_sheet(sheet, self.css_name, self.font_name): style.text = force_unicode(sheet.cssText, 'utf-8') pretty_script_or_style(container, style) container.dirty(name) changed = True for elem in container.parsed(name).xpath('//*[@style and contains(@style, "font-family")]'): style = container.parse_css(elem.get('style'), is_declaration=True) if change_font_in_declaration(style, self.css_name, self.font_name): elem.set('style', force_unicode(style.cssText, 'utf-8').replace('\n', ' ')) container.dirty(name) changed = True return changed def check_fonts(container): font_map = {} errors = [] for name, mt in iteritems(container.mime_map): if mt in OEB_FONTS: raw = container.raw_data(name) try: name_map = get_all_font_names(raw) except Exception as e: errors.append(InvalidFont(_('Not a valid font: %s') % e, name)) continue font_map[name] = name_map.get('family_name', None) or name_map.get('preferred_family_name', None) or name_map.get('wws_family_name', None) try: embeddable, fs_type = is_font_embeddable(raw) except UnsupportedFont: embeddable = True if not embeddable: errors.append(NotEmbeddable(name, fs_type)) sheets = [] for name, mt in iteritems(container.mime_map): if mt in OEB_STYLES: try: sheets.append((name, container.parsed(name), None)) except Exception: pass # Could not parse, ignore elif mt in OEB_DOCS: for style in container.parsed(name).xpath('//*[local-name()="style"]'): if style.get('type', 'text/css') == 'text/css' and style.text: sheets.append((name, container.parse_css(style.text), style.sourceline)) for name, sheet, line_offset in sheets: for rule in sheet.cssRules.rulesOfType(CSSRule.FONT_FACE_RULE): src = rule.style.getPropertyCSSValue('src') if src is not None and src.length > 0: href = getattr(src.item(0), 'uri', None) if href is not None: fname = container.href_to_name(href, name) font_name = font_map.get(fname, None) if font_name is None: continue families = parse_font_family(rule.style.getPropertyValue('font-family')) if families: if families[0] != font_name: errors.append(FontAliasing(font_name, families[0], name, line_offset)) return errors
5,925
Python
.py
107
42.504673
150
0.593443
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,403
base.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/check/base.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' from contextlib import closing from functools import partial from multiprocessing.pool import ThreadPool from calibre import detect_ncpus as cpu_count DEBUG, INFO, WARN, ERROR, CRITICAL = range(5) class BaseError: HELP = '' INDIVIDUAL_FIX = '' level = ERROR has_multiple_locations = False def __init__(self, msg, name, line=None, col=None): self.msg, self.line, self.col = msg, line, col self.name = name # A list with entries of the form: (name, lnum, col) self.all_locations = None def __str__(self): return f'{self.__class__.__name__}:{self.name} ({self.line}, {self.col}):{self.msg}' __repr__ = __str__ def worker(func, args): try: result = func(*args) tb = None except: result = None import traceback tb = traceback.format_exc() return result, tb def run_checkers(func, args_list): num = cpu_count() pool = ThreadPool(num) ans = [] with closing(pool): for result, tb in pool.map(partial(worker, func), args_list): if tb is not None: raise Exception('Failed to run worker: \n%s' % tb) ans.extend(result) return ans
1,329
Python
.py
40
27.15
92
0.621664
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,404
css.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/check/css.py
#!/usr/bin/env python # License: GPL v3 Copyright: 2020, Kovid Goyal <kovid at kovidgoyal.net> import atexit import json import numbers import sys from collections import namedtuple from itertools import repeat from qt.core import QApplication, QEventLoop, pyqtSignal, sip from qt.webengine import QWebEnginePage, QWebEngineProfile, QWebEngineScript from calibre import detect_ncpus as cpu_count from calibre import prints from calibre.ebooks.oeb.polish.check.base import ERROR, WARN, BaseError from calibre.gui2 import must_use_qt from calibre.utils.resources import get_path as P from calibre.utils.webengine import secure_webengine, setup_profile class CSSParseError(BaseError): level = ERROR is_parsing_error = True FIXABLE_CSS_ERROR = False class CSSError(BaseError): level = ERROR FIXABLE_CSS_ERROR = False class CSSWarning(BaseError): level = WARN FIXABLE_CSS_ERROR = False def as_int_or_none(x): if x is not None and not isinstance(x, numbers.Integral): try: x = int(x) except Exception: x = None return x def message_to_error(message, name, line_offset, rule_metadata): rule_id = message.get('rule') if rule_id == 'CssSyntaxError': cls = CSSParseError else: cls = CSSError if message.get('severity') == 'error' else CSSWarning if message.get('rule') == 'property-no-unknown' and 'panose-1' in message.get('text', ''): # suppress panose-1 it is allowed in CSSS 2.1 and generated by calibre conversation return title = message.get('text') or _('Unknown error') title = title.rpartition('(')[0].strip() line = as_int_or_none(message.get('line')) col = as_int_or_none(message.get('column')) if col is not None: col -= 1 if line is not None: line += line_offset ans = cls(title, name, line, col) ans.HELP = message.get('text') or '' if ans.HELP: ans.HELP += '. ' ans.css_rule_id = rule_id m = rule_metadata.get(rule_id) or {} if 'url' in m: ans.HELP += _('See <a href="{}">detailed description</a>.').format(m['url']) + ' ' if m.get('fixable'): ans.FIXABLE_CSS_ERROR = True ans.HELP += _('This error will be automatically fixed if you click "Try to correct all fixable errors" below.') return ans def stylelint_js(): ans = getattr(stylelint_js, 'ans', None) if ans is None: ans = stylelint_js.ans = ( ('stylelint-bundle.min.js', P('stylelint-bundle.min.js', data=True, allow_user_override=False).decode('utf-8')), ('stylelint.js', P('stylelint.js', data=True, allow_user_override=False).decode('utf-8')), ) return ans def create_profile(): ans = getattr(create_profile, 'ans', None) if ans is None: ans = create_profile.ans = QWebEngineProfile(QApplication.instance()) setup_profile(ans) for (name, code) in stylelint_js(): s = QWebEngineScript() s.setName(name) s.setSourceCode(code) s.setWorldId(QWebEngineScript.ScriptWorldId.ApplicationWorld) ans.scripts().insert(s) return ans class Worker(QWebEnginePage): work_done = pyqtSignal(object, object) def __init__(self): must_use_qt() QWebEnginePage.__init__(self, create_profile(), QApplication.instance()) self.titleChanged.connect(self.title_changed) secure_webengine(self.settings()) self.ready = False self.working = False self.pending = None self.setHtml('') def title_changed(self, new_title): new_title = new_title.partition(':')[0] if new_title == 'ready': self.ready = True if self.pending is not None: self.check_css(*self.pending) self.pending = None elif new_title == 'checked': self.runJavaScript('window.get_css_results()', QWebEngineScript.ScriptWorldId.ApplicationWorld, self.check_done) def javaScriptConsoleMessage(self, level, msg, lineno, source_id): msg = f'{source_id}:{lineno}:{msg}' try: print(msg) except Exception: pass def check_css(self, src, fix=False): self.working = True self.runJavaScript( f'window.check_css({json.dumps(src)}, {"true" if fix else "false"})', QWebEngineScript.ScriptWorldId.ApplicationWorld) def check_css_when_ready(self, src, fix=False): if self.ready: self.check_css(src, fix) else: self.working = True self.pending = src, fix def check_done(self, results): self.working = False for result in results: self.work_done.emit(self, result) class Pool: def __init__(self): self.workers = [] self.max_workers = cpu_count() def add_worker(self): w = Worker() w.work_done.connect(self.work_done) self.workers.append(w) def check_css(self, css_sources, fix=False): self.doing_fix = fix self.pending = list(enumerate(css_sources)) self.results = list(repeat(None, len(css_sources))) self.working = True self.assign_work() app = QApplication.instance() while self.working: app.processEvents(QEventLoop.ProcessEventsFlag.WaitForMoreEvents | QEventLoop.ProcessEventsFlag.ExcludeUserInputEvents) return self.results def assign_work(self): while self.pending: if len(self.workers) < self.max_workers: self.add_worker() for w in self.workers: if not w.working: idx, src = self.pending.pop() w.result_idx = idx w.check_css_when_ready(src, self.doing_fix) break else: break def work_done(self, worker, result): self.results[worker.result_idx] = result self.assign_work() if not self.pending and not any(w for w in self.workers if w.working): self.working = False def shutdown(self): def safe_delete(x): if not sip.isdeleted(x): sip.delete(x) for i in self.workers: safe_delete(i) self.workers = [] pool = Pool() shutdown = pool.shutdown atexit.register(shutdown) Job = namedtuple('Job', 'name css line_offset fix_data') def create_job(name, css, line_offset=0, is_declaration=False, fix_data=None): if is_declaration: css = 'div{\n' + css + '\n}' line_offset -= 1 if line_offset > 0: css = ('\n' * line_offset) + css line_offset = 0 return Job(name, css, line_offset, fix_data) def check_css(jobs): errors = [] if not jobs: return errors results = pool.check_css([j.css for j in jobs]) for job, result in zip(jobs, results): if result['type'] == 'error': errors.append(CSSParseError(_('Failed to process CSS in {name} with errors: {errors}').format( name=job.name, errors=result['error']), job.name)) continue result = json.loads(result['results']['output']) rule_metadata = result['rule_metadata'] for msg in result['results']['warnings']: err = message_to_error(msg, job.name, job.line_offset, rule_metadata) if err is not None: errors.append(err) return errors def main(): with open(sys.argv[-1], 'rb') as f: css = f.read().decode('utf-8') errors = check_css([create_job(sys.argv[-1], css)]) for error in errors: prints(error) if __name__ == '__main__': try: main() finally: shutdown()
7,789
Python
.py
204
30.269608
131
0.620507
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,405
main.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/check/main.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' from collections import namedtuple from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES from calibre.ebooks.oeb.polish.check.base import WARN, run_checkers from calibre.ebooks.oeb.polish.check.fonts import check_fonts from calibre.ebooks.oeb.polish.check.images import check_raster_images from calibre.ebooks.oeb.polish.check.links import check_link_destinations, check_links, check_mimetypes from calibre.ebooks.oeb.polish.check.opf import check_opf from calibre.ebooks.oeb.polish.check.parsing import ( EmptyFile, check_encoding_declarations, check_filenames, check_html_size, check_ids, check_markup, check_xml_parsing, fix_style_tag, ) from calibre.ebooks.oeb.polish.cover import is_raster_image from calibre.ebooks.oeb.polish.utils import guess_type from polyglot.builtins import as_unicode, iteritems XML_TYPES = frozenset(map(guess_type, ('a.xml', 'a.svg', 'a.opf', 'a.ncx'))) | {'application/oebps-page-map+xml'} class CSSChecker: def __init__(self): self.jobs = [] def create_job(self, name, raw, line_offset=0, is_declaration=False): from calibre.ebooks.oeb.polish.check.css import create_job self.jobs.append(create_job(name, as_unicode(raw), line_offset, is_declaration)) def __call__(self): from calibre.ebooks.oeb.polish.check.css import check_css if not self.jobs: return () return check_css(self.jobs) def run_checks(container): errors = [] # Check parsing xml_items, html_items, raster_images, stylesheets = [], [], [], [] for name, mt in iteritems(container.mime_map): items = None decode = False if mt in XML_TYPES: items = xml_items elif mt in OEB_DOCS: items = html_items elif mt in OEB_STYLES: decode = True items = stylesheets elif is_raster_image(mt): items = raster_images if items is not None: items.append((name, mt, container.raw_data(name, decode=decode))) if container.book_type == 'epub': errors.extend(run_checkers(check_html_size, html_items)) errors.extend(run_checkers(check_xml_parsing, xml_items)) errors.extend(run_checkers(check_xml_parsing, html_items)) errors.extend(run_checkers(check_raster_images, raster_images)) for err in errors: if err.level > WARN: return errors # css uses its own worker pool css_checker = CSSChecker() for name, mt, raw in stylesheets: if not raw: errors.append(EmptyFile(name)) continue css_checker.create_job(name, raw) errors.extend(css_checker()) for name, mt, raw in html_items + xml_items: errors.extend(check_encoding_declarations(name, container)) css_checker = CSSChecker() for name, mt, raw in html_items: if not raw: continue root = container.parsed(name) for style in root.xpath('//*[local-name()="style"]'): if style.get('type', 'text/css') == 'text/css' and style.text: css_checker.create_job(name, style.text, line_offset=style.sourceline - 1) for elem in root.xpath('//*[@style]'): raw = elem.get('style') if raw: css_checker.create_job(name, raw, line_offset=elem.sourceline - 1, is_declaration=True) errors.extend(css_checker()) errors += check_mimetypes(container) errors += check_links(container) + check_link_destinations(container) errors += check_fonts(container) errors += check_ids(container) errors += check_filenames(container) errors += check_markup(container) errors += check_opf(container) return errors CSSFix = namedtuple('CSSFix', 'original_css elem attribute') def fix_css(container): from calibre.ebooks.oeb.polish.check.css import create_job, pool jobs = [] for name, mt in iteritems(container.mime_map): if mt in OEB_STYLES: css = container.raw_data(name, decode=True) jobs.append(create_job(name, css, fix_data=CSSFix(css, None, ''))) elif mt in OEB_DOCS: root = container.parsed(name) for style in root.xpath('//*[local-name()="style"]'): if style.get('type', 'text/css') == 'text/css' and style.text: jobs.append(create_job(name, style.text, fix_data=CSSFix(style.text, style, ''))) for elem in root.xpath('//*[@style]'): raw = elem.get('style') if raw: jobs.append(create_job(name, raw, is_declaration=True, fix_data=CSSFix(raw, elem, 'style'))) results = pool.check_css([j.css for j in jobs], fix=True) changed = False for job, result in zip(jobs, results): if result['type'] == 'error': continue fx = job.fix_data fixed_css = result['results']['output'] if fixed_css == fx.original_css: continue changed = True if fx.elem is None: with container.open(job.name, 'wb') as f: f.write(fixed_css.encode('utf-8')) else: if fx.attribute: fx.elem.set(fx.attribute, ' '.join(fixed_css.splitlines()[1:-1])) else: fx.elem.text = fixed_css container.dirty(job.name) return changed def fix_errors(container, errors): # Fix parsing changed = False for name in {e.name for e in errors if getattr(e, 'is_parsing_error', False)}: try: root = container.parsed(name) except TypeError: continue container.dirty(name) if container.mime_map[name] in OEB_DOCS: for style in root.xpath('//*[local-name()="style"]'): if style.get('type', 'text/css') == 'text/css' and style.text and style.text.strip(): fix_style_tag(container, style) changed = True has_fixable_css_errors = False for err in errors: if getattr(err, 'FIXABLE_CSS_ERROR', False): has_fixable_css_errors = True if err.INDIVIDUAL_FIX: if err(container) is not False: # Assume changed unless fixer explicitly says no change (this # is because sometimes I forget to return True, and it is # better to have a false positive than a false negative) changed = True if has_fixable_css_errors: if fix_css(container): changed = True return changed
6,681
Python
.py
157
34.031847
113
0.627868
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,406
container.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/tests/container.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' import os import subprocess from zipfile import ZipFile from calibre import CurrentDir from calibre.ebooks.oeb.polish.container import OCF_NS, clone_container from calibre.ebooks.oeb.polish.container import get_container as _gc from calibre.ebooks.oeb.polish.replace import rationalize_folders, rename_files from calibre.ebooks.oeb.polish.split import merge, split from calibre.ebooks.oeb.polish.tests.base import BaseTest, get_simple_book, get_split_book from calibre.ptempfile import TemporaryDirectory, TemporaryFile from calibre.utils.filenames import nlinks_file from calibre.utils.resources import get_path as P from polyglot.builtins import iteritems, itervalues def get_container(*args, **kwargs): kwargs['tweak_mode'] = True return _gc(*args, **kwargs) class ContainerTests(BaseTest): def test_clone(self): ' Test cloning of containers ' for fmt in ('epub', 'azw3'): base = os.path.join(self.tdir, fmt + '-') book = get_simple_book(fmt) tdir = base + 'first' os.mkdir(tdir) c1 = get_container(book, tdir=tdir) tdir = base + 'second' os.mkdir(tdir) c2 = clone_container(c1, tdir) for c in (c1, c2): for name, path in iteritems(c.name_path_map): self.assertEqual(2, nlinks_file(path), 'The file %s is not linked' % name) for name in c1.name_path_map: self.assertIn(name, c2.name_path_map) with c1.open(name) as one, c2.open(name) as two: self.assertEqual(one.read(), two.read(), 'The file %s differs' % name) spine_names = tuple(x[0] for x in c1.spine_names) text = spine_names[0] root = c2.parsed(text) root.xpath('//*[local-name()="body"]')[0].set('id', 'changed id for test') c2.dirty(text) c2.commit_item(text) for c in (c1, c2): self.assertEqual(1, nlinks_file(c.name_path_map[text])) with c1.open(text) as c1f, c2.open(text) as c2f: self.assertNotEqual(c1f.read(), c2f.read()) name = spine_names[1] with c1.open(name, mode='r+b') as f: f.seek(0, 2) f.write(b' ') for c in (c1, c2): self.assertEqual(1, nlinks_file(c.name_path_map[name])) with c1.open(text) as c1f, c2.open(text) as c2f: self.assertNotEqual(c1f.read(), c2f.read()) x = base + 'out.' + fmt for c in (c1, c2): c.commit(outpath=x) def test_file_removal(self): ' Test removal of files from the container ' book = get_simple_book() c = get_container(book, tdir=self.tdir) files = ('toc.ncx', 'cover.png', 'titlepage.xhtml') self.assertIn('titlepage.xhtml', {x[0] for x in c.spine_names}) self.assertTrue(c.opf_xpath('//opf:meta[@name="cover"]')) for x in files: c.remove_item(x) self.assertIn(c.opf_name, c.dirtied) self.assertNotIn('titlepage.xhtml', {x[0] for x in c.spine_names}) self.assertFalse(c.opf_xpath('//opf:meta[@name="cover"]')) raw = c.serialize_item(c.opf_name).decode('utf-8') for x in files: self.assertNotIn(x, raw) def run_external_tools(self, container, vim=False, epubcheck=True): with TemporaryFile(suffix='.epub', dir=self.tdir) as f: container.commit(outpath=f) if vim: subprocess.Popen(['vim', '-f', f]).wait() if epubcheck: subprocess.Popen(['epubcheck', f]).wait() def test_file_rename(self): ' Test renaming of files ' book = get_simple_book() count = [0] def new_container(): count[0] += 1 tdir = os.mkdir(os.path.join(self.tdir, str(count[0]))) return get_container(book, tdir=tdir) # Test simple opf rename c = new_container() orig_name = c.opf_name name = 'renamed opf.opf' rename_files(c, {c.opf_name: name}) self.assertEqual(c.opf_name, name) for x in ('name_path_map', 'mime_map'): self.assertNotIn(orig_name, getattr(c, x)) self.assertIn(name, getattr(c, x)) self.assertNotIn(name, c.dirtied) root = c.parsed('META-INF/container.xml') vals = set(root.xpath( r'child::ocf:rootfiles/ocf:rootfile/@full-path', namespaces={'ocf':OCF_NS})) self.assertSetEqual(vals, {name}) self.check_links(c) # Test a rename that moves the OPF into different directory c = new_container() orig_name = c.opf_name name = 'renamed/again/metadata.opf' rename_files(c, {c.opf_name: name}) self.check_links(c) # Test that renaming commits dirtied items c = new_container() name = next(c.spine_names)[0] root = c.parsed(name) root.xpath('//*[local-name()="body"]')[0].set('id', 'rename-dirty-test') rename_files(c, {name:'other/' + name}) with c.open('other/' + name) as f: raw = f.read() self.assertIn(b'id="rename-dirty-test"', raw) self.check_links(c) # Test renaming of stylesheets c = new_container() rename_files(c, {'stylesheet.css':'styles/s 1.css', 'page_styles.css':'styles/p 1.css'}) self.check_links(c) # Test renaming of images c = new_container() rename_files(c, {'cover.png':'images/cover img.png', 'light_wood.png':'images/light wood.png', 'marked.png':'images/marked img.png'}) self.check_links(c) # Test renaming of ToC c = new_container() rename_files(c, {'toc.ncx': 'toc/toc file.ncx'}) self.check_links(c) # Test renaming of font files c = new_container() fname = 'LiberationMono-Regular.ttf' if fname not in c.name_path_map: fname = fname.lower() # On OS X the font file name is lowercased for some reason (maybe on windows too) rename_files(c, {fname: 'fonts/LiberationMono Regular.ttf'}) self.check_links(c) # Test renaming of text files c = new_container() rename_files(c, {'index_split_000.html':'text/page one fällen.html', 'index_split_001.html':'text/page two fällen.html'}) self.check_links(c) # Test rename with only case change c = new_container() rename_files(c, {'index_split_000.html':'Index_split_000.html'}) self.check_links(c) # self.run_external_tools(c, vim=True) def test_file_add(self): ' Test adding of files ' book = get_simple_book() c = get_container(book) name = 'folder/added file.html' c.add_file(name, b'xxx') self.assertEqual('xxx', c.raw_data(name)) self.assertIn(name, set(itervalues(c.manifest_id_map))) self.assertIn(name, {x[0] for x in c.spine_names}) name = 'added.css' c.add_file(name, b'xxx') self.assertEqual('xxx', c.raw_data(name)) self.assertIn(name, set(itervalues(c.manifest_id_map))) self.assertNotIn(name, {x[0] for x in c.spine_names}) self.assertEqual(c.make_name_unique(name), 'added-1.css') c.add_file('added-1.css', b'xxx') self.assertEqual(c.make_name_unique(name.upper()), 'added-2.css'.upper()) self.check_links(c) def test_actual_case(self): ' Test getting the actual case for files from names on case insensitive filesystems ' from calibre.ebooks.oeb.polish.utils import actual_case_for_name, corrected_case_for_name book = get_simple_book() c = get_container(book) name = 'f1/f2/added file.html' c.add_file(name, b'xxx') self.assertTrue(c.exists(name)) variations = (name, name.upper(), name.replace('f1', 'F1'), name.replace('f2', 'F2')) if c.exists(name.upper()): for n in variations: self.assertEqual(name, actual_case_for_name(c, n)) else: for n in variations: self.assertEqual(name, corrected_case_for_name(c, n)) self.assertIsNone(corrected_case_for_name(c, name+'/xx')) def test_split_file(self): ' Test splitting of files ' book = get_split_book() c = get_container(book) name = 'index.html' nname = split(c, name, '//*[@id="page2"]') root = c.parsed(nname) troot = c.parsed(name) self.assertEqual(1, len(root.xpath('//*[@id="container"]')), 'Split point was not adjusted') self.assertEqual(0, len(troot.xpath('//*[@id="container"]')), 'Split point was not adjusted') self.check_links(c) def test_merge_file(self): ' Test merging of files ' book = get_simple_book() c = get_container(book) merge(c, 'text', ('index_split_000.html', 'index_split_001.html'), 'index_split_000.html') self.check_links(c) book = get_simple_book() c = get_container(book) one, two = 'one/one.html', 'two/two.html' c.add_file(one, b'<head><link href="../stylesheet.css"><p><a name="one" href="../two/two.html">1</a><a name="two" href="../two/two.html#one">2</a>') # noqa c.add_file(two, b'<head><link href="../page_styles.css"><p><a name="one" href="two.html#two">1</a><a name="two" href="../one/one.html#one">2</a><a href="#one">3</a>') # noqa merge(c, 'text', (one, two), one) self.check_links(c) root = c.parsed(one) self.assertEqual(1, len(root.xpath('//*[@href="../page_styles.css"]'))) book = get_simple_book() c = get_container(book) merge(c, 'styles', ('stylesheet.css', 'page_styles.css'), 'stylesheet.css') self.check_links(c) def test_dir_container(self): def create_book(source): with ZipFile(P('quick_start/eng.epub', allow_user_override=False)) as zf: zf.extractall(source) with CurrentDir(source): self.assertTrue(os.path.exists('images/cover.jpg')) with open('.gitignore', 'wb') as f: f.write(b'nothing') os.mkdir('.git') with open('.git/xxx', 'wb') as f: f.write(b'xxx') with TemporaryDirectory('-polish-dir-container') as source: create_book(source) c = get_container(source) c.remove_item('images/cover.jpg') with c.open('images/test-container.xyz', 'wb') as f: f.write(b'xyz') c.commit() with CurrentDir(source): self.assertTrue(os.path.exists('.gitignore')) self.assertTrue(os.path.exists('.git/xxx')) self.assertTrue(os.path.exists('images/test-container.xyz')) self.assertFalse(os.path.exists('images/cover.jpg')) def test_folder_type_map_case(self): book = get_simple_book() c = get_container(book) c.add_file('Image/testcase.png', b'xxx') rationalize_folders(c, {'image':'image'}) self.assertTrue(c.has_name('Image/testcase.png')) self.assertTrue(c.exists('Image/testcase.png')) self.assertFalse(c.has_name('image/testcase.png'))
11,552
Python
.py
244
37.17623
182
0.586935
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,407
parsing.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/tests/parsing.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' from functools import partial from html5lib.constants import cdataElements, rcdataElements from lxml import etree from calibre.ebooks.oeb.base import SVG_NS, XHTML_NS, XLINK_NS, XPath from calibre.ebooks.oeb.parse_utils import html5_parse from calibre.ebooks.oeb.polish.parsing import parse_html5 as parse from calibre.ebooks.oeb.polish.tests.base import BaseTest from polyglot.builtins import iteritems def nonvoid_cdata_elements(test, parse_function): ''' If self closed version of non-void cdata elements like <title/> are present, the HTML5 parsing algorithm treats all following data as CDATA ''' markup = ''' <html> <head><{0}/></head> <body id="test"> </html> ''' for tag in cdataElements | rcdataElements: for x in (tag, tag.upper(), '\n' + tag, tag + ' id="xxx" '): root = parse_function(markup.format(x)) test.assertEqual( len(XPath('//h:body[@id="test"]')(root)), 1, 'Incorrect parsing for <%s/>, parsed markup:\n' % x + etree.tostring(root, encoding='unicode')) def namespaces(test, parse_function): ae = test.assertEqual def match_and_prefix(root, xpath, prefix, err=''): matches = XPath(xpath)(root) ae(len(matches), 1, err) ae(matches[0].prefix, prefix, err) markup = f''' <html xmlns="{XHTML_NS}"><head><body id="test"></html> ''' root = parse_function(markup) ae( len(XPath('//h:body[@id="test"]')(root)), 1, 'Incorrect parsing, parsed markup:\n' + etree.tostring(root, encoding='unicode')) match_and_prefix(root, '//h:body[@id="test"]', None) markup = ''' <html xmlns="{xhtml}"><head><body id="test"> <svg:svg xmlns:svg="{svg}"><svg:image xmlns:xlink="{xlink}" xlink:href="xxx"/></svg:svg> '''.format(xhtml=XHTML_NS, svg=SVG_NS, xlink=XLINK_NS) root = parse_function(markup) err = 'Incorrect parsing, parsed markup:\n' + etree.tostring(root, encoding='unicode') match_and_prefix(root, '//h:body[@id="test"]', None, err) match_and_prefix(root, '//svg:svg', 'svg', err) match_and_prefix(root, '//svg:image[@xl:href]', 'svg', err) markup = ''' <html xmlns="{xhtml}"><head><body id="test"> <svg xmlns="{svg}" xmlns:xlink="{xlink}" ><image xlink:href="xxx"/></svg> '''.format(xhtml=XHTML_NS, svg=SVG_NS, xlink=XLINK_NS) root = parse_function(markup) err = 'Incorrect parsing, parsed markup:\n' + etree.tostring(root, encoding='unicode') match_and_prefix(root, '//h:body[@id="test"]', None, err) match_and_prefix(root, '//svg:svg', None, err) match_and_prefix(root, '//svg:image[@xl:href]', None, err) markup = '<html><body><svg><image xlink:href="xxx"></svg>' root = parse_function(markup) err = 'Namespaces not created, parsed markup:\n' + etree.tostring(root, encoding='unicode') match_and_prefix(root, '//svg:svg', None, err) match_and_prefix(root, '//svg:image[@xl:href]', None, err) if parse_function is parse: image = XPath('//svg:image')(root)[0] ae(image.nsmap, {'xlink':XLINK_NS, None:SVG_NS}) root = parse_function('<html id="a"><p><html xmlns:x="y" lang="en"><p>') err = 'Multiple HTML tags not handled, parsed markup:\n' + etree.tostring(root, encoding='unicode') match_and_prefix(root, '//h:html', None, err) match_and_prefix(root, '//h:html[@lang]', None, err) match_and_prefix(root, '//h:html[@id]', None, err) # if parse_function is not html5_parse: # markup = '<html:html xmlns:html="{html}" id="a"><html:body><html:p></html:p></html:body></html>'.format(html=XHTML_NS) # root = parse_function(markup) # err = 'HTML namespace prefixed, parsed markup:\n' + etree.tostring(root, encoding='unicode') # match_and_prefix(root, '//h:html', None, err) markup = '<html><body><ns1:tag1 xmlns:ns1="NS"><ns2:tag2 xmlns:ns2="NS" ns1:id="test"/><ns1:tag3 xmlns:ns1="NS2" ns1:id="test"/></ns1:tag1>' root = parse_function(markup) err = 'Arbitrary namespaces not preserved, parsed markup:\n' + etree.tostring(root, encoding='unicode') def xpath(expr): return etree.XPath(expr, namespaces={'ns1':'NS', 'ns2':'NS2'})(root) ae(len(xpath('//ns1:tag1')), 1, err) ae(len(xpath('//ns1:tag2')), 1, err) ae(len(xpath('//ns2:tag3')), 1, err) ae(len(xpath('//ns1:tag2[@ns1:id="test"]')), 1, err) ae(len(xpath('//ns2:tag3[@ns2:id="test"]')), 1, err) # for tag in root.iter(): # if 'NS' in tag.tag: # ae('ns1', tag.prefix) markup = '<html xml:lang="en"><body><p lang="de"><p xml:lang="es"><p lang="en" xml:lang="de">' root = parse_function(markup) err = 'xml:lang not converted to lang, parsed markup:\n' + etree.tostring(root, encoding='unicode') ae(len(root.xpath('//*[@lang="en"]')), 2, err) ae(len(root.xpath('//*[@lang="de"]')), 1, err) ae(len(root.xpath('//*[@lang="es"]')), 1, err) # ae(len(XPath('//*[@xml:lang]')(root)), 0, err) def space_characters(test, parse_function): markup = '<html><p>\u000cX</p>' root = parse_function(markup) err = 'form feed character not converted, parsed markup:\n' + etree.tostring(root, encoding='unicode') test.assertNotIn('\u000c', root.xpath('//*[local-name()="p"]')[0].text, err) markup = '<html><p>a\u000b\u000c</p>' root = parse_function(markup) # Should strip non XML safe control code \u000b test.assertNotIn('\u000b', root.xpath('//*[local-name()="p"]')[0].text, err) test.assertNotIn('\u000c', root.xpath('//*[local-name()="p"]')[0].text, err) def case_insensitive_element_names(test, parse_function): markup = '<HTML><P> </p>' root = parse_function(markup) err = 'case sensitive parsing, parsed markup:\n' + etree.tostring(root, encoding='unicode') test.assertEqual(len(XPath('//h:p')(root)), 1, err) def entities(test, parse_function): markup = '<html><p>&nbsp;&apos;</p>' root = parse_function(markup) err = 'Entities not handled, parsed markup:\n' + etree.tostring(root, encoding='unicode') test.assertEqual('\xa0\'', root.xpath('//*[local-name()="p"]')[0].text, err) def multiple_html_and_body(test, parse_function): markup = '<html id="1"><body id="2"><p><html lang="en"><body lang="de"></p>' root = parse_function(markup) err = 'multiple html and body not handled, parsed markup:\n' + etree.tostring(root, encoding='unicode') test.assertEqual(len(XPath('//h:html')(root)), 1, err) test.assertEqual(len(XPath('//h:body')(root)), 1, err) test.assertEqual(len(XPath('//h:html[@id and @lang]')(root)), 1, err) test.assertEqual(len(XPath('//h:body[@id and @lang]')(root)), 1, err) def attribute_replacement(test, parse_function): markup = '<html><body><svg viewbox="0"></svg><svg xmlns="%s" viewbox="1">' % SVG_NS root = parse_function(markup) err = 'SVG attributes not normalized, parsed markup:\n' + etree.tostring(root, encoding='unicode') test.assertEqual(len(XPath('//svg:svg[@viewBox]')(root)), 2, err) def comments(test, parse_function): markup = '<html><!-- -- ---><body/></html>' root = parse_function(markup) test.assertEqual(len(XPath('//h:body')(root)), 1, 'Failed to parse with comment containing dashes') test.assertEqual(len(tuple(root.iterdescendants(etree.Comment))), 1) basic_checks = (nonvoid_cdata_elements, namespaces, space_characters, case_insensitive_element_names, entities, comments, multiple_html_and_body, attribute_replacement) class ParsingTests(BaseTest): def test_lxml_tostring(self): ' Test for bug in some versions of lxml that causes incorrect serialization of sub-trees' from html5_parser import parse root = parse('<p>a<p>b<p>c') p = root.xpath('//p')[0] self.assertEqual(etree.tostring(p, encoding=str), '<p>a</p>') def test_conversion_parser(self): ' Test parsing with the HTML5 parser used for conversion ' for test in basic_checks: test(self, html5_parse) def test_polish_parser(self): ' Test parsing with the HTML5 parser used for polishing ' for test in basic_checks: test(self, parse) root = parse('<html><p><svg><image /><b></svg>&nbsp;\n<b>xxx', discard_namespaces=True) self.assertTrue(root.xpath('//b'), 'Namespaces not discarded') self.assertFalse(root.xpath('//svg/b'), 'The <b> was not moved out of <svg>') for ds in (False, True): src = '\n<html>\n<p>\n<svg><image />\n<b></svg>&nbsp' root = parse(src, discard_namespaces=ds) for tag, lnum in iteritems({'html':2, 'head':3, 'body':3, 'p':3, 'svg':4, 'image':4, 'b':5}): elem = root.xpath('//*[local-name()="%s"]' % tag)[0] self.assertEqual(lnum, elem.sourceline, f'Line number incorrect for {tag}, source: {src}:') for ds in (False, True): src = '\n<html>\n<p b=1 a=2 c=3 d=4 e=5 f=6 g=7 h=8><svg b=1 a=2 c=3 d=4 e=5 f=6 g=7 h=8>\n' root = parse(src, discard_namespaces=ds) for tag in ('p', 'svg'): for i, (k, v) in enumerate(root.xpath('//*[local-name()="%s"]' % tag)[0].items()): self.assertEqual(i+1, int(v)) root = parse('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US" xmlns:xml="http://www.w3.org/XML/1998/namespace"><body/></html>') self.assertNotIn('xmlnsU0003Axml', root.attrib, 'xml namespace declaration not removed') root = parse('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US" xmlns:extra="extra"><body/></html>') self.assertIn('extra', root.nsmap, 'Extra namespace declaration on <html> tag not preserved') def timing(): import sys from html5lib import parse as vanilla from calibre.ebooks.chardet import xml_to_unicode from calibre.utils.monotonic import monotonic filename = sys.argv[-1] with open(filename, 'rb') as f: raw = f.read() raw = xml_to_unicode(raw)[0] for name, f in (('calibre', partial(parse, line_numbers=False)), ('html5lib', vanilla), ('calibre-old', html5_parse)): timings = [] for i in range(10): st = monotonic() f(raw) timings.append(monotonic() - st) avg = sum(timings)/len(timings) print(f'Average time for {name}: {avg:.2g}')
10,503
Python
.py
182
51.104396
147
0.628541
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,408
structure.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/tests/structure.py
#!/usr/bin/env python # License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net> import os from functools import partial from io import BytesIO from itertools import count from zipfile import ZIP_STORED, ZipFile from calibre.ebooks.metadata.book.base import Metadata from calibre.ebooks.metadata.opf3 import CALIBRE_PREFIX from calibre.ebooks.oeb.base import OEB_DOCS from calibre.ebooks.oeb.polish.container import get_container from calibre.ebooks.oeb.polish.cover import clean_opf, find_cover_image, find_cover_page, mark_as_cover, mark_as_titlepage from calibre.ebooks.oeb.polish.create import create_book from calibre.ebooks.oeb.polish.tests.base import BaseTest from calibre.ebooks.oeb.polish.toc import from_xpaths as toc_from_xpaths from calibre.ebooks.oeb.polish.toc import get_landmarks, get_toc from calibre.ebooks.oeb.polish.utils import guess_type OPF_TEMPLATE = ''' <package xmlns="http://www.idpf.org/2007/opf" version="{ver}" prefix="calibre: %s" unique-identifier="uid"> <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf"> <dc:identifier id="uid">test</dc:identifier> {metadata} </metadata> <manifest>{manifest}</manifest> <spine>{spine}</spine> <guide>{guide}</guide> </package>''' % CALIBRE_PREFIX # noqa def create_manifest_item(name, data=b'', properties=None): return (name, data, properties) cmi = create_manifest_item def create_epub(manifest, spine=(), guide=(), meta_cover=None, ver=3): mo = [] for name, data, properties in manifest: mo.append('<item id="{}" href="{}" media-type="{}" {}/>'.format( name, name, guess_type(name), ('properties="%s"' % properties if properties else ''))) mo = ''.join(mo) metadata = '' if meta_cover: metadata = '<meta name="cover" content="%s"/>' % meta_cover if not spine: spine = [x[0] for x in manifest if guess_type(x[0]) in OEB_DOCS] spine = ''.join('<itemref idref="%s"/>' % name for name in spine) guide = ''.join(f'<reference href="{name}" type="{typ}" title="{title}"/>' for name, typ, title in guide) opf = OPF_TEMPLATE.format(manifest=mo, ver='%d.0'%ver, metadata=metadata, spine=spine, guide=guide) buf = BytesIO() with ZipFile(buf, 'w', ZIP_STORED) as zf: zf.writestr('META-INF/container.xml', b''' <container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container"> <rootfiles> <rootfile full-path="content.opf" media-type="application/oebps-package+xml"/> </rootfiles> </container>''') zf.writestr('content.opf', opf.encode('utf-8')) for name, data, properties in manifest: if isinstance(data, str): data = data.encode('utf-8') zf.writestr(name, data or b'\0') buf.seek(0) return buf counter = count() class Structure(BaseTest): def create_epub(self, *args, **kw): n = next(counter) ep = os.path.join(self.tdir, str(n) + 'book.epub') with open(ep, 'wb') as f: f.write(create_epub(*args, **kw).getvalue()) c = get_container(ep, tdir=os.path.join(self.tdir, 'container%d' % n), tweak_mode=True) return c def test_toc_detection(self): ep = os.path.join(self.tdir, 'book.epub') create_book(Metadata('Test ToC'), ep) c = get_container(ep, tdir=os.path.join(self.tdir, 'container'), tweak_mode=True) self.assertEqual(2, c.opf_version_parsed.major) self.assertTrue(len(get_toc(c))) c.opf.set('version', '3.0') self.assertEqual(3, c.opf_version_parsed.major) self.assertTrue(len(get_toc(c))) # detect NCX toc even in epub 3 files c.add_file('nav.html', b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops">' b'<body><nav epub:type="toc"><ol><li><a href="start.xhtml">EPUB 3 nav</a></li></ol></nav></body></html>', process_manifest_item=lambda item:item.set('properties', 'nav')) toc = get_toc(c) self.assertTrue(len(toc)) self.assertEqual(toc.as_dict['children'][0]['title'], 'EPUB 3 nav') def tfx(linear, expected): items = ['<t{0}>{0}</t{0}>'.format(x) for x in linear] html = '<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops">' html += '<body>%s</body></html>' % '\n'.join(items) with c.open('nav.html', 'wb') as f: f.write(html.encode('utf-8')) toc = toc_from_xpaths(c, ['//h:t'+x for x in sorted(set(linear))]) def p(node): ans = '' if node.children: ans += '[' for c in node.children: ans += c.title + p(c) ans += ']' return ans self.assertEqual('[%s]'%expected, p(toc)) tfx('121333', '1[2]1[333]') tfx('1223424', '1[22[3[4]]2[4]]') tfx('32123', '321[2[3]]') tfx('123123', '1[2[3]]1[2[3]]') def test_landmarks_detection(self): c = self.create_epub([cmi('xxx.html'), cmi('a.html')], guide=[('xxx.html#moo', 'x', 'XXX'), ('a.html', '', 'YYY')], ver=2) self.assertEqual(2, c.opf_version_parsed.major) self.assertEqual([ {'dest':'xxx.html', 'frag':'moo', 'type':'x', 'title':'XXX'}, {'dest':'a.html', 'frag':'', 'type':'', 'title':'YYY'} ], get_landmarks(c)) c = self.create_epub([cmi('xxx.html'), cmi('a.html')], ver=3) self.assertEqual(3, c.opf_version_parsed.major) c.add_file('xxx/nav.html', b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops">' b'<body><nav epub:type="landmarks"><ol><li><a epub:type="x" href="../xxx.html#moo">XXX </a></li>' b'<li><a href="../a.html"> YYY </a></li>' b'</ol></nav></body></html>', process_manifest_item=lambda item:item.set('properties', 'nav')) self.assertEqual([ {'dest':'xxx.html', 'frag':'moo', 'type':'x', 'title':'XXX'}, {'dest':'a.html', 'frag':'', 'type':'', 'title':'YYY'} ], get_landmarks(c)) def test_epub3_covers(self): # cover image ce = partial(self.create_epub, ver=3) c = ce([cmi('c.jpg')]) self.assertIsNone(find_cover_image(c)) c = ce([cmi('c.jpg')], meta_cover='c.jpg') self.assertEqual('c.jpg', find_cover_image(c)) c = ce([cmi('c.jpg', b'z', 'cover-image'), cmi('d.jpg')], meta_cover='d.jpg') self.assertEqual('c.jpg', find_cover_image(c)) mark_as_cover(c, 'd.jpg') self.assertEqual('d.jpg', find_cover_image(c)) self.assertFalse(c.opf_xpath('//*/@name')) # title page c = ce([cmi('c.html'), cmi('a.html')]) self.assertIsNone(find_cover_page(c)) mark_as_titlepage(c, 'a.html', move_to_start=False) self.assertEqual('a.html', find_cover_page(c)) self.assertEqual('c.html', next(c.spine_names)[0]) mark_as_titlepage(c, 'a.html', move_to_start=True) self.assertEqual('a.html', find_cover_page(c)) self.assertEqual('a.html', next(c.spine_names)[0]) # clean opf of all cover information c = ce([cmi('c.jpg', b'z', 'cover-image'), cmi('c.html', b'', 'calibre:title-page'), cmi('d.html')], meta_cover='c.jpg', guide=[('c.jpg', 'cover', ''), ('d.html', 'cover', '')]) self.assertEqual(set(clean_opf(c)), {'c.jpg', 'c.html', 'd.html'}) self.assertFalse(c.opf_xpath('//*/@name')) self.assertFalse(c.opf_xpath('//*/@type')) for prop in 'cover-image calibre:title-page'.split(): self.assertEqual([], list(c.manifest_items_with_property(prop))) def test_epub2_covers(self): # cover image ce = partial(self.create_epub, ver=2) c = ce([cmi('c.jpg')]) self.assertIsNone(find_cover_image(c)) c = ce([cmi('c.jpg')], meta_cover='c.jpg') self.assertEqual('c.jpg', find_cover_image(c)) c = ce([cmi('c.jpg'), cmi('d.jpg')], guide=[('c.jpg', 'cover', '')]) self.assertEqual('c.jpg', find_cover_image(c)) mark_as_cover(c, 'd.jpg') self.assertEqual('d.jpg', find_cover_image(c)) self.assertEqual({'cover':'d.jpg'}, c.guide_type_map) # title page c = ce([cmi('c.html'), cmi('a.html')]) self.assertIsNone(find_cover_page(c)) mark_as_titlepage(c, 'a.html', move_to_start=False) self.assertEqual('a.html', find_cover_page(c)) self.assertEqual('c.html', next(c.spine_names)[0]) mark_as_titlepage(c, 'a.html', move_to_start=True) self.assertEqual('a.html', find_cover_page(c)) self.assertEqual('a.html', next(c.spine_names)[0]) def test_mark_sentences(self): from html5_parser import parse from lxml import html from calibre.ebooks.oeb.polish.tts import id_prefix, mark_sentences_in_html, unmark_sentences_in_html def normalize_markup(root): actual = html.tostring(root, encoding='unicode') actual = actual[actual.find('<body'):] actual = actual[:actual.rfind('</body>')] return actual.replace(id_prefix, '') for text, expected in reversed({ '<p id=1>hello cruel world': '<body><p id="1"><span id="1">hello cruel world</span></p>', '<p>hello <b>cruel</b> world': '<body><p><span id="1">hello <b>cruel</b> world</span></p>', '<p>Yes, please. Hello <b>cruel</b> world.': '<body><p><span id="1">Yes, please. </span><span id="2">Hello <b>cruel</b> world.</span></p>', '<p>Hello <b>cruel</b> <i>world. </i>': '<body><p><span id="1">Hello <b>cruel</b> <i>world. </i></span></p>', '<p>Yes, <b>please.</b> Well done! Bravissima! ': '<body><p><span id="1">Yes, <b>please.</b> </span><span id="2">Well done! </span><span id="3">Bravissima! </span></p>', '<p>Yes, <b>please.</b> Well <i>done! </i>Bravissima! ': '<body><p><span id="1">Yes, <b>please.</b> </span><span id="2">Well <i>done! </i></span><span id="3">Bravissima! </span></p>', '<p><i>Hello</i>, world! Good day to you': '<body><p><span id="1"><i>Hello</i>, world! </span><span id="2">Good day to you</span></p>', '<p><i>Hello, world! </i>Good day to you': '<body><p><i id="1">Hello, world! </i><span id="2">Good day to you</span></p>', '<p><i>Hello, </i><b>world!</b>Good day to you': '<body><p><span id="1"><i>Hello, </i><b>world!</b></span><span id="2">Good day to you</span></p>', '<p><i>Hello, </i><b>world</b>! Good day to you': '<body><p><span id="1"><i>Hello, </i><b>world</b>! </span><span id="2">Good day to you</span></p>', '<p>Hello, <span lang="fr">world!': '<body><p><span id="1">Hello, </span><span lang="fr"><span id="2">world!</span></span></p>', '<p>Hello, <span data-calibre-tts="moose">world!': '<body><p><span id="1">Hello, </span><span data-calibre-tts="moose"><span id="2">world!</span></span></p>', '<p>One<p>Two': '<body><p><span id="1">One</span></p><p><span id="2">Two</span></p>', '<div><p>something': '<body><div><p><span id="1">something</span></p></div>', '<p>One</p> Two. Three <p>Four': '<body><p><span id="1">One</span></p><span id="2"> Two. </span><span id="3">Three </span><p><span id="4">Four</span></p>', '<p>Here is some <b>bold, </b><i>italic, </i><u>underline, </u> text.': '<body><p><span id="1">Here is some <b>bold, </b><i>italic, </i><u>underline, </u> text.</span></p>', '<p>A sentence wrapped\nonto multiple lines.': '<body><p><span id="1">A sentence wrapped\nonto multiple lines.</span></p>', }.items()): root = parse(text, namespace_elements=True) orig = normalize_markup(root) sentences = mark_sentences_in_html(root) ids = tuple(int(s.elem_id[len(id_prefix):]) for s in sentences) self.assertEqual(len(ids), ids[-1]) marked = normalize_markup(root) self.assertEqual(expected, marked) unmark_sentences_in_html(root) self.assertEqual(orig, normalize_markup(root), f'Unmarking failed for {marked}') sentences = mark_sentences_in_html(parse('<p lang="en">Hello, <span lang="fr">world!')) self.assertEqual(tuple(s.lang for s in sentences), ('eng', 'fra')) def find_tests(): import unittest return unittest.defaultTestLoader.loadTestsFromTestCase(Structure) def run_tests(): from calibre.utils.run_tests import run_tests run_tests(find_tests)
12,951
Python
.py
229
47.100437
138
0.576107
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,409
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/tests/__init__.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
112
Python
.py
3
34.666667
61
0.673077
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,410
base.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/tests/base.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' import os import shutil import unittest import calibre.ebooks.oeb.polish.container as pc from calibre import CurrentDir from calibre.ptempfile import PersistentTemporaryDirectory, TemporaryDirectory from calibre.utils.logging import DevNull from calibre.utils.resources import get_image_path as I from calibre.utils.resources import get_path as P from polyglot.builtins import iteritems def get_cache(): from calibre.constants import cache_dir cache = os.path.join(cache_dir(), 'polish-test') if not os.path.exists(cache): os.mkdir(cache) return cache once_per_run = set() def needs_recompile(obj, srcs): is_ci = os.environ.get('CI', '').lower() == 'true' if is_ci and obj not in once_per_run: once_per_run.add(obj) return True if isinstance(srcs, str): srcs = [srcs] try: obj_mtime = os.stat(obj).st_mtime except OSError: return True for src in srcs: if os.stat(src).st_mtime > obj_mtime: return True return False def build_book(src, dest, args=()): from calibre.ebooks.conversion.cli import main main(['ebook-convert', src, dest, '-vv'] + list(args)) def add_resources(raw, rmap): for placeholder, path in iteritems(rmap): fname = os.path.basename(path) shutil.copy2(path, '.') raw = raw.replace(placeholder, fname) return raw def get_simple_book(fmt='epub'): cache = get_cache() ans = os.path.join(cache, 'simple.'+fmt) src = os.path.join(os.path.dirname(__file__), 'simple.html') if needs_recompile(ans, src): with TemporaryDirectory('bpt') as tdir, CurrentDir(tdir): with open(src, 'rb') as sf: raw = sf.read().decode('utf-8') raw = add_resources(raw, { 'LMONOI': P('fonts/liberation/LiberationMono-Italic.ttf'), 'LMONOR': P('fonts/liberation/LiberationMono-Regular.ttf'), 'IMAGE1': I('marked.png'), 'IMAGE2': I('textures/light_wood.png'), }) shutil.copy2(I('lt.png'), '.') x = 'index.html' with open(x, 'wb') as f: f.write(raw.encode('utf-8')) build_book(x, ans, args=[ '--level1-toc=//h:h2', '--language=en', '--authors=Kovid Goyal', '--cover=lt.png']) return ans def get_split_book(fmt='epub'): cache = get_cache() ans = os.path.join(cache, 'split.'+fmt) src = os.path.join(os.path.dirname(__file__), 'split.html') if needs_recompile(ans, src): x = src.replace('split.html', 'index.html') with open(src, 'rb') as sf: raw = sf.read().decode('utf-8') try: with open(x, 'wb') as f: f.write(raw.encode('utf-8')) build_book(x, ans, args=['--level1-toc=//h:h2', '--language=en', '--authors=Kovid Goyal', '--cover=' + I('lt.png')]) finally: os.remove(x) return ans devnull = DevNull() class BaseTest(unittest.TestCase): longMessage = True maxDiff = None def setUp(self): pc.default_log = devnull self.tdir = PersistentTemporaryDirectory(suffix='-polish-test') def tearDown(self): shutil.rmtree(self.tdir, ignore_errors=True) del self.tdir def check_links(self, container): for name in container.name_path_map: for link in container.iterlinks(name, get_line_numbers=False): dest = container.href_to_name(link, name) if dest: self.assertTrue(container.exists(dest), f'The link {link} in {name} does not exist')
3,799
Python
.py
97
31.134021
104
0.607182
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,411
cascade.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/tests/cascade.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2016, Kovid Goyal <kovid at kovidgoyal.net>' from functools import partial from css_parser import parseStyle from calibre.constants import iswindows from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES from calibre.ebooks.oeb.polish.cascade import DEFAULTS, iterrules, resolve_styles from calibre.ebooks.oeb.polish.container import ContainerBase, href_to_name from calibre.ebooks.oeb.polish.css import remove_property_value from calibre.ebooks.oeb.polish.embed import find_matching_font from calibre.ebooks.oeb.polish.stats import StatsCollector, font_keys, normalize_font_properties, prepare_font_rule from calibre.ebooks.oeb.polish.tests.base import BaseTest from calibre.utils.icu import lower as icu_lower from calibre.utils.logging import Log, Stream from polyglot.builtins import iteritems class VirtualContainer(ContainerBase): tweak_mode = True def __init__(self, files): s = Stream() self.log_stream = s.stream log = Log() log.outputs = [s] self.opf_version_parsed = (2, 0, 0) ContainerBase.__init__(self, log=log) self.mime_map = {k:self.guess_type(k) for k in files} self.files = files def has_name(self, name): return name in self.mime_map def href_to_name(self, href, base=None): return href_to_name(href, ('C:\\root' if iswindows else '/root'), base) def parsed(self, name): if name not in self.parsed_cache: mt = self.mime_map[name] if mt in OEB_STYLES: self.parsed_cache[name] = self.parse_css(self.files[name], name) elif mt in OEB_DOCS: self.parsed_cache[name] = self.parse_xhtml(self.files[name], name) else: self.parsed_cache[name] = self.files[name] return self.parsed_cache[name] @property def spine_names(self): for name in sorted(self.mime_map): if self.mime_map[name] in OEB_DOCS: yield name, True class CascadeTest(BaseTest): def test_iterrules(self): def get_rules(files, name='x/one.css', l=1, rule_type=None): c = VirtualContainer(files) rules = tuple(iterrules(c, name, rule_type=rule_type)) self.assertEqual(len(rules), l) return rules, c get_rules({'x/one.css':'@import "../two.css";', 'two.css':'body { color: red; }'}) get_rules({'x/one.css':'@import "../two.css" screen;', 'two.css':'body { color: red; }'}) get_rules({'x/one.css':'@import "../two.css" xyz;', 'two.css':'body { color: red; }'}, l=0) get_rules({'x/one.css':'@import "../two.css";', 'two.css':'body { color: red; }'}, l=0, rule_type='FONT_FACE_RULE') get_rules({'x/one.css':'@import "../two.css";', 'two.css':'body { color: red; }'}, rule_type='STYLE_RULE') get_rules({'x/one.css':'@media screen { body { color: red; } }'}) get_rules({'x/one.css':'@media xyz { body { color: red; } }'}, l=0) c = get_rules({'x/one.css':'@import "../two.css";', 'two.css':'@import "x/one.css"; body { color: red; }'})[1] self.assertIn('Recursive import', c.log_stream.getvalue()) def test_resolve_styles(self): def test_property(select, resolve_property, selector, name, val=None): elem = next(select(selector)) ans = resolve_property(elem, name) if val is None: val = str(DEFAULTS[name]) self.assertEqual(val, ans.cssText) def test_pseudo_property(select, resolve_pseudo_property, selector, prop, name, val=None, abort_on_missing=False): elem = next(select(selector)) ans = resolve_pseudo_property(elem, prop, name, abort_on_missing=abort_on_missing) if abort_on_missing: if val is None: self.assertTrue(ans is None) return if val is None: val = str(DEFAULTS[name]) self.assertEqual(val, ans.cssText) def get_maps(html, styles=None, pseudo=False): html = f'<html><head><link href="styles.css"></head><body>{html}</body></html>' c = VirtualContainer({'index.html':html, 'styles.css':styles or 'body { color: red; font-family: "Kovid Goyal", sans-serif }'}) resolve_property, resolve_pseudo_property, select = resolve_styles(c, 'index.html') if pseudo: tp = partial(test_pseudo_property, select, resolve_pseudo_property) else: tp = partial(test_property, select, resolve_property) return tp t = get_maps('<p style="margin:11pt"><b>x</b>xx</p>') t('body', 'color', 'red') t('p', 'color', 'red') t('b', 'font-weight', 'bold') t('p', 'margin-top', '11pt') t('b', 'margin-top') t('body', 'display', 'block') t('b', 'display', 'inline') t('body', 'font-family', ('"Kovid Goyal"', 'sans-serif')) for e in ('body', 'p', 'b'): for prop in 'background-color text-indent'.split(): t(e, prop) t = get_maps('<p>xxx</p><style>p {color: blue}</style>', 'p {color: red}') t('p', 'color', 'blue') t = get_maps('<p style="color: blue">xxx</p>', 'p {color: red}') t('p', 'color', 'blue') t = get_maps('<p style="color: blue">xxx</p>', 'p {color: red !important}') t('p', 'color', 'red') t = get_maps('<p id="p">xxx</p>', '#p { color: blue } p {color: red}') t('p', 'color', 'blue') t = get_maps('<p>xxx</p>', 'p {color: red; color: blue}') t('p', 'color', 'blue') t = get_maps('<p>xxx</p><style>p {color: blue}</style>', 'p {color: red; margin:11pt}') t('p', 'margin-top', '11pt') t = get_maps('<p></p>', 'p:before { content: "xxx" }', True) t('p', 'before', 'content', '"xxx"') t = get_maps('<p></p>', 'body p:before { content: "xxx" } p:before { content: "yyy" }', True) t('p', 'before', 'content', '"xxx"') t = get_maps('<p></p>', "p:before { content: 'xxx' } p:first-letter { font-weight: bold }", True) t('p', 'before', 'content', '"xxx"') t('p', 'first-letter', 'font-weight', 'bold') t = get_maps('<p></p>', 'p { font-weight: bold; margin: 11pt } p:before { content: xxx }', True) t('p', 'before', 'content', 'xxx') t('p', 'before', 'margin-top', '0') t('p', 'before', 'font-weight', 'bold') t('p', 'first-letter', 'content') t('p', 'first-letter', 'content', abort_on_missing=True) def test_font_stats(self): embeds = '@font-face { font-family: X; src: url(X.otf) }\n@font-face { font-family: X; src: url(XB.otf); font-weight: bold }' def get_stats(html, *fonts): styles = [] html = f'<html><head><link href="styles.css"></head><body>{html}</body></html>' files = {'index.html':html, 'X.otf':b'xxx', 'XB.otf': b'xbxb'} for font in fonts: styles.append('@font-face {') for k, v in iteritems(font): if k == 'src': files[v] = b'xxx' v = 'url(%s)' % v styles.append(f'{k} : {v};') styles.append('}\n') files['styles.css'] = embeds + '\n'.join(styles) c = VirtualContainer(files) return StatsCollector(c, do_embed=True) def font(family, weight=None, style=None): f = {} if weight is not None: f['font-weight'] = weight if style is not None: f['font-style'] = style f = normalize_font_properties(f) f['font-family'] = [family] return f def font_rule(src, *args, **kw): ans = font(*args, **kw) ans['font-family'] = list(map(icu_lower, ans['font-family'])) prepare_font_rule(ans) ans['src'] = src return ans def fkey(*args, **kw): f = font(*args, **kw) f['font-family'] = icu_lower(f['font-family'][0]) return frozenset((k, v) for k, v in iteritems(f) if k in font_keys) def fu(text, *args, **kw): key = fkey(*args, **kw) val = font(*args, **kw) val['text'] = set(text) val['font-family'] = val['font-family'][0] return key, val s = get_stats('<p style="font-family: X">abc<b>d\nef</b><i>ghi</i></p><p style="font-family: U">u</p>') # The normal font must include ghi as it will be used to simulate # italic by most rendering engines when the italic font is missing self.assertEqual(s.font_stats, {'XB.otf':set('def'), 'X.otf':set('abcghi')}) self.assertEqual(s.font_spec_map, {'index.html':set('XU')}) self.assertEqual(s.all_font_rules, {'X.otf':font_rule('X.otf', 'X'), 'XB.otf':font_rule('XB.otf', 'X', 'bold')}) self.assertEqual(set(s.font_rule_map), {'index.html'}) self.assertEqual(s.font_rule_map['index.html'], [font_rule('X.otf', 'X'), font_rule('XB.otf', 'X', 'bold')]) self.assertEqual(set(s.font_usage_map), {'index.html'}) self.assertEqual(s.font_usage_map['index.html'], dict([fu('abc', 'X'), fu('def', 'X', weight='bold'), fu('ghi', 'X', style='italic'), fu('u', 'U')])) s = get_stats('<p style="font-family: X; text-transform:uppercase">abc</p><b style="font-family: X; font-variant: small-caps">d\nef</b>') self.assertEqual(s.font_stats, {'XB.otf':set('defDEF'), 'X.otf':set('ABC')}) s = get_stats('<style>.fl::first-line { font-family: X }</style><p class="fl">abc<b>def</b></p>') # Technically def should not be needed in X but that is hard to achieve self.assertEqual(s.font_stats, {'XB.otf':set('def'), 'X.otf':set('abcdef')}) def test_remove_property_value(self): style = parseStyle('background-image: url(b.png); background: black url(a.png) fixed') for prop in style.getProperties(all=True): remove_property_value(prop, lambda val:'png' in val.cssText) self.assertEqual('background: black fixed', style.cssText.rstrip(';')) def test_fallback_font_matching(self): def cf(id, weight='normal', style='normal', stretch='normal'): return {'id':id, 'font-weight':weight, 'font-style':style, 'font-stretch':stretch} fonts = [cf(1, '500', 'oblique', 'condensed'), cf(2, '300', 'italic', 'normal')] self.assertEqual(find_matching_font(fonts)['id'], 2) fonts = [cf(1, '500', 'oblique', 'normal'), cf(2, '300', 'italic', 'normal')] self.assertEqual(find_matching_font(fonts)['id'], 1) fonts = [cf(1, '500', 'oblique', 'normal'), cf(2, '200', 'oblique', 'normal')] self.assertEqual(find_matching_font(fonts)['id'], 1) fonts = [cf(1, '600', 'oblique', 'normal'), cf(2, '100', 'oblique', 'normal')] self.assertEqual(find_matching_font(fonts)['id'], 2) fonts = [cf(1, '600', 'oblique', 'normal'), cf(2, '100', 'oblique', 'normal')] self.assertEqual(find_matching_font(fonts, '500')['id'], 2) fonts = [cf(1, '600', 'oblique', 'normal'), cf(2, '100', 'oblique', 'normal')] self.assertEqual(find_matching_font(fonts, '600')['id'], 1)
11,437
Python
.py
202
46.371287
157
0.564849
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,412
main.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/polish/tests/main.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' from calibre.utils.run_tests import find_tests_in_package, run_tests def find_tests(): return find_tests_in_package('calibre.ebooks.oeb.polish.tests') if __name__ == '__main__': try: import init_calibre # noqa except ImportError: pass run_tests(find_tests)
404
Python
.py
12
29.416667
68
0.677922
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,413
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/display/__init__.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en'
149
Python
.py
4
35
58
0.678571
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,414
webview.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/display/webview.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import re from calibre import guess_type from polyglot.builtins import iteritems class EntityDeclarationProcessor: # {{{ def __init__(self, html): self.declared_entities = {} for match in re.finditer(r'<!\s*ENTITY\s+([^>]+)>', html): tokens = match.group(1).split() if len(tokens) > 1: self.declared_entities[tokens[0].strip()] = tokens[1].strip().replace('"', '') self.processed_html = html for key, val in iteritems(self.declared_entities): self.processed_html = self.processed_html.replace('&%s;'%key, val) # }}} def self_closing_sub(match): tag = match.group(1) if tag.lower().strip() == 'br': return match.group() return '<%s%s></%s>'%(match.group(1), match.group(2), match.group(1)) def cleanup_html(html): html = EntityDeclarationProcessor(html).processed_html self_closing_pat = re.compile(r'<\s*([:A-Za-z0-9-]+)([^>]*)/\s*>') html = self_closing_pat.sub(self_closing_sub, html) return html xml_detect_pat = re.compile(r'<!(?:\[CDATA\[|ENTITY)') def load_as_html(html): return re.search(r'<[a-zA-Z0-9-]+:svg', html) is None and xml_detect_pat.search(html) is None def load_html(path, view, codec='utf-8', mime_type=None, pre_load_callback=lambda x:None, path_is_html=False, force_as_html=False, loading_url=None): from qt.core import QByteArray, QUrl if mime_type is None: mime_type = guess_type(path)[0] if not mime_type: mime_type = 'text/html' if path_is_html: html = path else: with open(path, 'rb') as f: html = f.read().decode(codec, 'replace') html = cleanup_html(html) loading_url = loading_url or QUrl.fromLocalFile(path) pre_load_callback(loading_url) if force_as_html or load_as_html(html): view.setHtml(html, loading_url) else: view.setContent(QByteArray(html.encode(codec)), mime_type, loading_url) mf = view.page().mainFrame() elem = mf.findFirstElement('parsererror') if not elem.isNull(): return False return True
2,322
Python
.py
57
33.789474
97
0.621104
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,415
split.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/split.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __docformat__ = 'restructuredtext en' ''' Splitting of the XHTML flows. Splitting can happen on page boundaries or can be forced at "likely" locations to conform to size limitations. This transform assumes a prior call to the flatcss transform. ''' import collections import copy import functools import os import re from collections import OrderedDict from css_selectors import Select, SelectorError from lxml import etree from lxml.etree import XPath as _XPath from calibre import as_unicode, force_unicode from calibre.ebooks.epub import rules from calibre.ebooks.oeb.base import OEB_STYLES, XHTML, rewrite_links, urldefrag, urlnormalize from calibre.ebooks.oeb.base import XPNSMAP as NAMESPACES from calibre.ebooks.oeb.polish.split import do_split from polyglot.builtins import iteritems from polyglot.urllib import unquote XPath = functools.partial(_XPath, namespaces=NAMESPACES) SPLIT_POINT_ATTR = 'csp' def tostring(root): return etree.tostring(root, encoding='utf-8') class SplitError(ValueError): def __init__(self, path, root): size = len(tostring(root))/1024. ValueError.__init__(self, _('Could not find reasonable point at which to split: ' '%(path)s Sub-tree size: %(size)d KB')%dict( path=path, size=size)) class Split: def __init__(self, split_on_page_breaks=True, page_breaks_xpath=None, max_flow_size=0, remove_css_pagebreaks=True): self.split_on_page_breaks = split_on_page_breaks self.page_breaks_xpath = page_breaks_xpath self.max_flow_size = max_flow_size self.page_break_selectors = None self.remove_css_pagebreaks = remove_css_pagebreaks if self.page_breaks_xpath is not None: self.page_break_selectors = [(XPath(self.page_breaks_xpath), False)] def __call__(self, oeb, opts): self.oeb = oeb self.log = oeb.log self.log('Splitting markup on page breaks and flow limits, if any...') self.opts = opts self.map = {} for item in list(self.oeb.manifest.items): if item.spine_position is not None and etree.iselement(item.data): self.split_item(item) self.fix_links() def split_item(self, item): page_breaks, page_break_ids = [], [] if self.split_on_page_breaks: page_breaks, page_break_ids = self.find_page_breaks(item) splitter = FlowSplitter(item, page_breaks, page_break_ids, self.max_flow_size, self.oeb, self.opts) if splitter.was_split: am = splitter.anchor_map self.map[item.href] = collections.defaultdict( am.default_factory, am) def find_page_breaks(self, item): if self.page_break_selectors is None: self.page_break_selectors = set() stylesheets = [x.data for x in self.oeb.manifest if x.media_type in OEB_STYLES] for rule in rules(stylesheets): before = force_unicode(getattr(rule.style.getPropertyCSSValue( 'page-break-before'), 'cssText', '').strip().lower()) after = force_unicode(getattr(rule.style.getPropertyCSSValue( 'page-break-after'), 'cssText', '').strip().lower()) try: if before and before not in {'avoid', 'auto', 'inherit'}: self.page_break_selectors.add((rule.selectorText, True)) if self.remove_css_pagebreaks: rule.style.removeProperty('page-break-before') except: pass try: if after and after not in {'avoid', 'auto', 'inherit'}: self.page_break_selectors.add((rule.selectorText, False)) if self.remove_css_pagebreaks: rule.style.removeProperty('page-break-after') except: pass page_breaks = set() select = Select(item.data) if not self.page_break_selectors: return [], [] body = item.data.xpath('//h:body', namespaces=NAMESPACES) if not body: return [], [] descendants = frozenset(body[0].iterdescendants('*')) for selector, before in self.page_break_selectors: try: for elem in select(selector): if elem in descendants and elem.tag.rpartition('}')[2].lower() not in {'html', 'body', 'head', 'style', 'script', 'meta', 'link'}: elem.set('pb_before', '1' if before else '0') page_breaks.add(elem) except SelectorError as err: self.log.warn(f'Ignoring page breaks specified with invalid CSS selector: {selector!r} ({as_unicode(err)})') for i, elem in enumerate(item.data.iter('*')): try: elem.set('pb_order', str(i)) except TypeError: # Can't set attributes on comment nodes etc. continue page_breaks = list(page_breaks) page_breaks.sort(key=lambda x:int(x.get('pb_order'))) page_break_ids, page_breaks_ = [], [] for i, x in enumerate(page_breaks): x.set('id', x.get('id', 'calibre_pb_%d'%i)) id = x.get('id') try: xp = XPath('//*[@id="%s"]'%id) except: try: xp = XPath("//*[@id='%s']"%id) except: # The id has both a quote and an apostrophe or some other # Just replace it since I doubt its going to work anywhere else # either id = 'calibre_pb_%d'%i x.set('id', id) xp = XPath('//*[@id=%r]'%id) page_breaks_.append((xp, x.get('pb_before', '0') == '1')) page_break_ids.append(id) for elem in item.data.iter(etree.Element): elem.attrib.pop('pb_order', False) elem.attrib.pop('pb_before', False) return page_breaks_, page_break_ids def fix_links(self): ''' Fix references to the split files in other content files. ''' for item in self.oeb.manifest: if etree.iselement(item.data): self.current_item = item rewrite_links(item.data, self.rewrite_links) def rewrite_links(self, url): href, frag = urldefrag(url) try: href = self.current_item.abshref(href) except ValueError: # Unparsable URL return url try: href = urlnormalize(href) except ValueError: # href has non utf-8 quoting return url if href in self.map: anchor_map = self.map[href] nhref = anchor_map[frag if frag else None] nhref = self.current_item.relhref(nhref) if frag: nhref = '#'.join((unquote(nhref), frag)) return nhref return url class FlowSplitter: 'The actual splitting logic' def __init__(self, item, page_breaks, page_break_ids, max_flow_size, oeb, opts): self.item = item self.oeb = oeb self.opts = opts self.log = oeb.log self.page_breaks = page_breaks self.page_break_ids = page_break_ids self.max_flow_size = max_flow_size self.base = item.href self.csp_counter = 0 base, ext = os.path.splitext(self.base) self.base = base.replace('%', '%%')+'_split_%.3d'+ext self.trees = [self.item.data.getroottree()] self.splitting_on_page_breaks = True if self.page_breaks: self.split_on_page_breaks(self.trees[0]) self.splitting_on_page_breaks = False if self.max_flow_size > 0: lt_found = False self.log('\tLooking for large trees in %s...'%item.href) trees = list(self.trees) self.tree_map = {} for i, tree in enumerate(trees): size = len(tostring(tree.getroot())) if size > self.max_flow_size: self.log('\tFound large tree #%d'%i) lt_found = True self.split_trees = [] self.split_to_size(tree) self.tree_map[tree] = self.split_trees if not lt_found: self.log('\tNo large trees found') self.trees = [] for x in trees: self.trees.extend(self.tree_map.get(x, [x])) self.was_split = len(self.trees) > 1 if self.was_split: self.log('\tSplit into %d parts'%len(self.trees)) self.commit() def split_on_page_breaks(self, orig_tree): ordered_ids = OrderedDict() all_page_break_ids = frozenset(self.page_break_ids) for elem_id in orig_tree.xpath('//*/@id'): if elem_id in all_page_break_ids: ordered_ids[elem_id] = self.page_breaks[ self.page_break_ids.index(elem_id)] self.trees = [orig_tree] while ordered_ids: pb_id, (pattern, before) = next(iteritems(ordered_ids)) del ordered_ids[pb_id] for i in range(len(self.trees)-1, -1, -1): tree = self.trees[i] elem = pattern(tree) if elem: self.log.debug('\t\tSplitting on page-break at id=%s'% elem[0].get('id')) before_tree, after_tree = self.do_split(tree, elem[0], before) self.trees[i:i+1] = [before_tree, after_tree] break trees, ids = [], set() for tree in self.trees: root = tree.getroot() if self.is_page_empty(root): discarded_ids = root.xpath('//*[@id]') for x in discarded_ids: x = x.get('id') if not x.startswith('calibre_'): ids.add(x) else: if ids: body = self.get_body(root) if body is not None: existing_ids = frozenset(body.xpath('//*/@id')) for x in ids - existing_ids: body.insert(0, body.makeelement(XHTML('div'), id=x, style='height:0pt')) ids = set() trees.append(tree) self.trees = trees def get_body(self, root): body = root.xpath('//h:body', namespaces=NAMESPACES) if not body: return None return body[0] def do_split(self, tree, split_point, before): ''' Split ``tree`` into a *before* and *after* tree at ``split_point``. :param before: If True tree is split before split_point, otherwise after split_point :return: before_tree, after_tree ''' return do_split(split_point, self.log, before=before) def is_page_empty(self, root): body = self.get_body(root) if body is None: return False txt = re.sub(r'\s+|\xa0', '', etree.tostring(body, method='text', encoding='unicode')) if len(txt): return False for img in root.xpath('//h:img', namespaces=NAMESPACES): if img.get('style', '') != 'display:none': return False if root.xpath('//*[local-name() = "svg"]'): return False return True def split_text(self, text, root, size): self.log.debug('\t\t\tSplitting text of length: %d'%len(text)) rest = text.replace('\r', '') parts = re.split('\n\n', rest) self.log.debug('\t\t\t\tFound %d parts'%len(parts)) if max(map(len, parts)) > size: raise SplitError('Cannot split as file contains a <pre> tag ' 'with a very large paragraph', root) ans = [] buf = '' for part in parts: if len(buf) + len(part) < size: buf += '\n\n'+part else: ans.append(buf) buf = part return ans def split_to_size(self, tree): self.log.debug('\t\tSplitting...') root = tree.getroot() # Split large <pre> tags if they contain only text for pre in XPath('//h:pre')(root): if len(tuple(pre.iterchildren(etree.Element))) > 0: continue if pre.text and len(pre.text) > self.max_flow_size*0.5: self.log.debug('\t\tSplitting large <pre> tag') frags = self.split_text(pre.text, root, int(0.2*self.max_flow_size)) new_pres = [] for frag in frags: pre2 = copy.copy(pre) pre2.text = frag pre2.tail = '' new_pres.append(pre2) new_pres[-1].tail = pre.tail p = pre.getparent() i = p.index(pre) p[i:i+1] = new_pres split_point, before = self.find_split_point(root) if split_point is None: raise SplitError(self.item.href, root) self.log.debug('\t\t\tSplit point:', split_point.tag, tree.getpath(split_point)) trees = self.do_split(tree, split_point, before) sizes = [len(tostring(t.getroot())) for t in trees] if min(sizes) < 5*1024: self.log.debug('\t\t\tSplit tree too small') self.split_to_size(tree) return for t, size in zip(trees, sizes): r = t.getroot() if self.is_page_empty(r): continue elif size <= self.max_flow_size: self.split_trees.append(t) self.log.debug( '\t\t\tCommitted sub-tree #%d (%d KB)'%( len(self.split_trees), size/1024.)) else: self.log.debug( '\t\t\tSplit tree still too large: %d KB' % (size/1024.)) self.split_to_size(t) def find_split_point(self, root): ''' Find the tag at which to split the tree rooted at `root`. Search order is: * Heading tags * <div> tags * <pre> tags * <hr> tags * <p> tags * <br> tags * <li> tags We try to split in the "middle" of the file (as defined by tag counts. ''' def pick_elem(elems): if elems: elems = [i for i in elems if i.get(SPLIT_POINT_ATTR, '0') != '1'] if elems: i = int(len(elems)//2) elems[i].set(SPLIT_POINT_ATTR, '1') return elems[i] for path in ( '//*[re:match(name(), "h[1-6]", "i")]', '/h:html/h:body/h:div', '//h:pre', '//h:hr', '//h:p', '//h:div', '//h:br', '//h:li', ): elems = root.xpath(path, namespaces=NAMESPACES) elem = pick_elem(elems) if elem is not None: try: XPath(elem.getroottree().getpath(elem)) except: continue return elem, True return None, True def commit(self): ''' Commit all changes caused by the split. Calculates an *anchor_map* for all anchors in the original tree. Internal links are re-directed. The original file is deleted and the split files are saved. ''' if not self.was_split: return self.anchor_map = collections.defaultdict(lambda :self.base%0) self.files = [] for i, tree in enumerate(self.trees): root = tree.getroot() self.files.append(self.base%i) for elem in root.xpath('//*[@id or @name]'): for anchor in elem.get('id', ''), elem.get('name', ''): if anchor != '' and anchor not in self.anchor_map: self.anchor_map[anchor] = self.files[-1] for elem in root.xpath('//*[@%s]'%SPLIT_POINT_ATTR): elem.attrib.pop(SPLIT_POINT_ATTR, '0') spine_pos = self.item.spine_position for current, tree in zip(*map(reversed, (self.files, self.trees))): for a in tree.getroot().xpath('//h:a[@href]', namespaces=NAMESPACES): href = a.get('href').strip() if href.startswith('#'): anchor = href[1:] file = self.anchor_map[anchor] file = self.item.relhref(file) if file != current: a.set('href', file+href) new_id = self.oeb.manifest.generate(id=self.item.id)[0] new_item = self.oeb.manifest.add(new_id, current, self.item.media_type, data=tree.getroot()) self.oeb.spine.insert(spine_pos, new_item, self.item.linear) if self.oeb.guide: for ref in self.oeb.guide.values(): href, frag = urldefrag(ref.href) if href == self.item.href: nhref = self.anchor_map[frag if frag else None] if frag: nhref = '#'.join((nhref, frag)) ref.href = nhref def fix_toc_entry(toc): if toc.href: href, frag = urldefrag(toc.href) if href == self.item.href: nhref = self.anchor_map[frag if frag else None] if frag: nhref = '#'.join((nhref, frag)) toc.href = nhref for x in toc: fix_toc_entry(x) if self.oeb.toc: fix_toc_entry(self.oeb.toc) if self.oeb.pages: for page in self.oeb.pages: href, frag = urldefrag(page.href) if href == self.item.href: nhref = self.anchor_map[frag if frag else None] if frag: nhref = '#'.join((nhref, frag)) page.href = nhref self.oeb.manifest.remove(self.item)
18,723
Python
.py
431
30.171694
150
0.520184
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,416
trimmanifest.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/trimmanifest.py
''' OPF manifest trimming transform. ''' __license__ = 'GPL v3' __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>' from calibre.ebooks.oeb.base import CSS_MIME, OEB_DOCS, iterlinks, urlnormalize from polyglot.urllib import urldefrag class ManifestTrimmer: @classmethod def config(cls, cfg): return cfg @classmethod def generate(cls, opts): return cls() def __call__(self, oeb, context): import css_parser oeb.logger.info('Trimming unused files from manifest...') self.opts = context used = set() for term in oeb.metadata: for item in oeb.metadata[term]: if item.value in oeb.manifest.hrefs: used.add(oeb.manifest.hrefs[item.value]) elif item.value in oeb.manifest.ids: used.add(oeb.manifest.ids[item.value]) for ref in oeb.guide.values(): path, _ = urldefrag(ref.href) if path in oeb.manifest.hrefs: used.add(oeb.manifest.hrefs[path]) # TOC items are required to be in the spine for item in oeb.spine: used.add(item) unchecked = used while unchecked: new = set() for item in unchecked: if (item.media_type in OEB_DOCS or item.media_type[-4:] in ('/xml', '+xml')) and \ item.data is not None: hrefs = [r[2] for r in iterlinks(item.data)] for href in hrefs: if isinstance(href, bytes): href = href.decode('utf-8') try: href = item.abshref(urlnormalize(href)) except: continue if href in oeb.manifest.hrefs: found = oeb.manifest.hrefs[href] if found not in used: new.add(found) elif item.media_type == CSS_MIME: for href in css_parser.getUrls(item.data): href = item.abshref(urlnormalize(href)) if href in oeb.manifest.hrefs: found = oeb.manifest.hrefs[href] if found not in used: new.add(found) used.update(new) unchecked = new for item in oeb.manifest.values(): if item not in used: oeb.logger.info('Trimming %r from manifest' % item.href) oeb.manifest.remove(item)
2,678
Python
.py
64
26.859375
79
0.505372
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,417
htmltoc.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/htmltoc.py
''' HTML-TOC-adding transform. ''' __license__ = 'GPL v3' __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>' from calibre.ebooks.oeb.base import CSS_MIME, XHTML, XHTML_MIME, XHTML_NS, XML, XPath, element from calibre.utils.localization import __ __all__ = ['HTMLTOCAdder'] DEFAULT_TITLE = __('Table of Contents') STYLE_CSS = { 'nested': """ .calibre_toc_header { text-align: center; } .calibre_toc_block { margin-left: 1.2em; text-indent: -1.2em; } .calibre_toc_block .calibre_toc_block { margin-left: 2.4em; } .calibre_toc_block .calibre_toc_block .calibre_toc_block { margin-left: 3.6em; } """, 'centered': """ .calibre_toc_header { text-align: center; } .calibre_toc_block { text-align: center; } body > .calibre_toc_block { margin-top: 1.2em; } """ } class HTMLTOCAdder: def __init__(self, title=None, style='nested', position='end'): self.title = title self.style = style self.position = position @classmethod def config(cls, cfg): group = cfg.add_group('htmltoc', _('HTML TOC generation options.')) group('toc_title', ['--toc-title'], default=None, help=_('Title for any generated inline table of contents.')) return cfg @classmethod def generate(cls, opts): return cls(title=opts.toc_title) def __call__(self, oeb, context): has_toc = getattr(getattr(oeb, 'toc', False), 'nodes', False) if 'toc' in oeb.guide: # Ensure toc pointed to in <guide> is in spine from calibre.ebooks.oeb.base import urlnormalize href = urlnormalize(oeb.guide['toc'].href) if href in oeb.manifest.hrefs: item = oeb.manifest.hrefs[href] if (hasattr(item.data, 'xpath') and XPath('//h:a[@href]')(item.data)): if oeb.spine.index(item) < 0: if self.position == 'end': oeb.spine.add(item, linear=False) else: oeb.spine.insert(0, item, linear=True) return elif has_toc: oeb.guide.remove('toc') else: oeb.guide.remove('toc') if not has_toc: return oeb.logger.info('Generating in-line TOC...') title = self.title or oeb.translate(DEFAULT_TITLE) style = self.style if style not in STYLE_CSS: oeb.logger.error('Unknown TOC style %r' % style) style = 'nested' id, css_href = oeb.manifest.generate('tocstyle', 'tocstyle.css') oeb.manifest.add(id, css_href, CSS_MIME, data=STYLE_CSS[style]) language = str(oeb.metadata.language[0]) contents = element(None, XHTML('html'), nsmap={None: XHTML_NS}, attrib={XML('lang'): language}) head = element(contents, XHTML('head')) htitle = element(head, XHTML('title')) htitle.text = title element(head, XHTML('link'), rel='stylesheet', type=CSS_MIME, href=css_href) body = element(contents, XHTML('body'), attrib={'class': 'calibre_toc'}) h1 = element(body, XHTML('h2'), attrib={'class': 'calibre_toc_header'}) h1.text = title self.add_toc_level(body, oeb.toc) id, href = oeb.manifest.generate('contents', 'contents.xhtml') item = oeb.manifest.add(id, href, XHTML_MIME, data=contents) if self.position == 'end': oeb.spine.add(item, linear=False) else: oeb.spine.insert(0, item, linear=True) oeb.guide.add('toc', 'Table of Contents', href) def add_toc_level(self, elem, toc): for node in toc: block = element(elem, XHTML('div'), attrib={'class': 'calibre_toc_block'}) line = element(block, XHTML('a'), attrib={'href': node.href, 'class': 'calibre_toc_line'}) line.text = node.title self.add_toc_level(block, node)
4,172
Python
.py
111
28.18018
94
0.561404
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,418
rescale.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/rescale.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' from calibre import fit_image class RescaleImages: 'Rescale all images to fit inside given screen size' def __init__(self, check_colorspaces=False): self.check_colorspaces = check_colorspaces def __call__(self, oeb, opts, max_size: str = 'profile'): self.oeb, self.opts, self.log = oeb, opts, oeb.log self.rescale(max_size) def rescale(self, max_size: str = 'profile'): from io import BytesIO from PIL import Image is_image_collection = getattr(self.opts, 'is_image_collection', False) if is_image_collection: page_width, page_height = self.opts.dest.comic_screen_size else: page_width, page_height = self.opts.dest.width, self.opts.dest.height page_width -= (self.opts.margin_left + self.opts.margin_right) * self.opts.dest.dpi/72 page_height -= (self.opts.margin_top + self.opts.margin_bottom) * self.opts.dest.dpi/72 no_scale_size = 99999999999 if max_size == 'none': page_width = page_height = no_scale_size elif max_size != 'profile': w, __, h = max_size.strip().lower().partition('x') try: page_width = int(w.strip()) except Exception: page_width = no_scale_size if page_width <= 0: page_width = no_scale_size try: page_height = int(h.strip()) except Exception: page_height = no_scale_size if page_height <= 0: page_height = no_scale_size for item in self.oeb.manifest: if item.media_type.startswith('image'): ext = item.media_type.split('/')[-1].upper() if ext == 'JPG': ext = 'JPEG' if ext not in ('PNG', 'JPEG', 'GIF'): ext = 'JPEG' raw = item.data if hasattr(raw, 'xpath') or not raw: # Probably an svg image continue try: img = Image.open(BytesIO(raw)) except Exception: continue width, height = img.size try: if self.check_colorspaces and img.mode == 'CMYK': self.log.warn( 'The image %s is in the CMYK colorspace, converting it ' 'to RGB as Adobe Digital Editions cannot display CMYK' % item.href) img = img.convert('RGB') except Exception: self.log.exception('Failed to convert image %s from CMYK to RGB' % item.href) scaled, new_width, new_height = fit_image(width, height, page_width, page_height) if scaled: new_width = max(1, new_width) new_height = max(1, new_height) self.log('Rescaling image from %dx%d to %dx%d'%( width, height, new_width, new_height), item.href) try: img = img.resize((new_width, new_height)) except Exception: self.log.exception('Failed to rescale image: %s' % item.href) continue buf = BytesIO() try: img.save(buf, ext) except Exception: self.log.exception('Failed to rescale image: %s' % item.href) else: item.data = buf.getvalue() item.unload_data_from_memory()
3,858
Python
.py
82
31.317073
99
0.504521
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,419
manglecase.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/manglecase.py
''' CSS case-mangling transform. ''' __license__ = 'GPL v3' __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>' from lxml import etree from calibre.ebooks.oeb.base import CSS_MIME, XHTML, XHTML_NS, namespace from calibre.ebooks.oeb.stylizer import Stylizer from calibre.utils.icu import lower as icu_lower from calibre.utils.icu import title_case as icu_title from calibre.utils.icu import upper as icu_upper from polyglot.builtins import string_or_bytes CASE_MANGLER_CSS = """ .calibre_lowercase { font-variant: normal; font-size: 0.65em; } """ TEXT_TRANSFORMS = {'capitalize', 'uppercase', 'lowercase'} class CaseMangler: @classmethod def config(cls, cfg): return cfg @classmethod def generate(cls, opts): return cls() def __call__(self, oeb, context): oeb.logger.info('Applying case-transforming CSS...') self.oeb = oeb self.opts = context self.profile = context.source self.mangle_spine() def mangle_spine(self): id, href = self.oeb.manifest.generate('manglecase', 'manglecase.css') self.oeb.manifest.add(id, href, CSS_MIME, data=CASE_MANGLER_CSS) for item in self.oeb.spine: html = item.data relhref = item.relhref(href) etree.SubElement(html.find(XHTML('head')), XHTML('link'), rel='stylesheet', href=relhref, type=CSS_MIME) stylizer = Stylizer(html, item.href, self.oeb, self.opts, self.profile) self.mangle_elem(html.find(XHTML('body')), stylizer) def text_transform(self, transform, text): if transform == 'capitalize': return icu_title(text) elif transform == 'uppercase': return icu_upper(text) elif transform == 'lowercase': return icu_lower(text) return text def split_text(self, text): results = [''] isupper = text[0].isupper() for char in text: if char.isupper() == isupper: results[-1] += char else: isupper = not isupper results.append(char) return results def smallcaps_elem(self, elem, attr): texts = self.split_text(getattr(elem, attr)) setattr(elem, attr, None) last = elem if attr == 'tail' else None attrib = {'class': 'calibre_lowercase'} for text in texts: if text.isupper(): if last is None: elem.text = text else: last.tail = text else: child = elem.makeelement(XHTML('span'), attrib=attrib) child.text = text.upper() if last is None: elem.insert(0, child) else: # addnext() moves the tail for some reason tail = last.tail last.addnext(child) last.tail = tail child.tail = None last = child def mangle_elem(self, elem, stylizer): if not isinstance(elem.tag, string_or_bytes) or \ namespace(elem.tag) != XHTML_NS: return children = list(elem) style = stylizer.style(elem) transform = style['text-transform'] variant = style['font-variant'] if elem.text: if transform in TEXT_TRANSFORMS: elem.text = self.text_transform(transform, elem.text) if variant == 'small-caps': self.smallcaps_elem(elem, 'text') for child in children: self.mangle_elem(child, stylizer) if child.tail: if transform in TEXT_TRANSFORMS: child.tail = self.text_transform(transform, child.tail) if variant == 'small-caps': self.smallcaps_elem(child, 'tail')
3,942
Python
.py
103
27.84466
83
0.573483
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,420
page_margin.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/page_margin.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import numbers from collections import Counter from calibre.ebooks.oeb.base import XPath, barename from polyglot.builtins import iteritems class RemoveAdobeMargins: ''' Remove margins specified in Adobe's page templates. ''' def __call__(self, oeb, log, opts): self.oeb, self.opts, self.log = oeb, opts, log for item in self.oeb.manifest: if item.media_type in { 'application/vnd.adobe-page-template+xml', 'application/vnd.adobe.page-template+xml', 'application/adobe-page-template+xml', 'application/adobe.page-template+xml', } and hasattr(item.data, 'xpath'): self.log('Removing page margins specified in the' ' Adobe page template') for elem in item.data.xpath( '//*[@margin-bottom or @margin-top ' 'or @margin-left or @margin-right]'): for margin in ('left', 'right', 'top', 'bottom'): attr = 'margin-'+margin elem.attrib.pop(attr, None) class NegativeTextIndent(Exception): pass class RemoveFakeMargins: ''' Remove left and right margins from paragraph/divs if the same margin is specified on almost all the elements at that level. Must be called only after CSS flattening ''' def __call__(self, oeb, log, opts): if not opts.remove_fake_margins: return self.oeb, self.log, self.opts = oeb, log, opts stylesheet = None self.levels = {} self.stats = {} self.selector_map = {} stylesheet = self.oeb.manifest.main_stylesheet if stylesheet is None: return self.log('Removing fake margins...') stylesheet = stylesheet.data from css_parser.css import CSSRule for rule in stylesheet.cssRules.rulesOfType(CSSRule.STYLE_RULE): self.selector_map[rule.selectorList.selectorText] = rule.style self.find_levels() for level in self.levels: try: self.process_level(level) except NegativeTextIndent: self.log.debug('Negative text indent detected at level ' ' %s, ignoring this level'%level) def get_margins(self, elem): cls = elem.get('class', None) if cls: style = self.selector_map.get('.'+cls, None) if style: try: ti = style['text-indent'] except: pass else: if ((hasattr(ti, 'startswith') and ti.startswith('-')) or isinstance(ti, numbers.Number) and ti < 0): raise NegativeTextIndent() return style.marginLeft, style.marginRight, style return '', '', None def process_level(self, level): elems = self.levels[level] self.stats[level+'_left'] = Counter() self.stats[level+'_right'] = Counter() for elem in elems: lm, rm = self.get_margins(elem)[:2] self.stats[level+'_left'][lm] += 1 self.stats[level+'_right'][rm] += 1 self.log.debug(level, ' left margin stats:', self.stats[level+'_left']) self.log.debug(level, ' right margin stats:', self.stats[level+'_right']) remove_left = self.analyze_stats(self.stats[level+'_left']) remove_right = self.analyze_stats(self.stats[level+'_right']) if remove_left: mcl = self.stats[level+'_left'].most_common(1)[0][0] self.log('Removing level %s left margin of:'%level, mcl) if remove_right: mcr = self.stats[level+'_right'].most_common(1)[0][0] self.log('Removing level %s right margin of:'%level, mcr) if remove_left or remove_right: for elem in elems: lm, rm, style = self.get_margins(elem) if remove_left and lm == mcl: style.removeProperty('margin-left') if remove_right and rm == mcr: style.removeProperty('margin-right') def find_levels(self): def level_of(elem, body): ans = 1 while elem.getparent() is not body: ans += 1 elem = elem.getparent() return ans paras = XPath('descendant::h:p|descendant::h:div') for item in self.oeb.spine: body = XPath('//h:body')(item.data) if not body: continue body = body[0] for p in paras(body): level = level_of(p, body) level = '%s_%d'%(barename(p.tag), level) if level not in self.levels: self.levels[level] = [] self.levels[level].append(p) remove = set() for k, v in iteritems(self.levels): num = len(v) self.log.debug('Found %d items of level:'%num, k) level = int(k.split('_')[-1]) tag = k.split('_')[0] if tag == 'p' and num < 25: remove.add(k) if tag == 'div': if level > 2 and num < 25: remove.add(k) elif level < 3: # Check each level < 3 element and only keep those # that have many child paras for elem in list(v): children = len(paras(elem)) if children < 5: v.remove(elem) for k in remove: self.levels.pop(k) self.log.debug('Ignoring level', k) def analyze_stats(self, stats): if not stats: return False mc = stats.most_common(1) if len(mc) > 1: return False mc = mc[0] most_common, most_common_count = mc if not most_common or most_common == '0': return False total = sum(stats.values()) # True if greater than 95% of elements have the same margin return most_common_count/total > 0.95
6,357
Python
.py
151
29.596026
101
0.537202
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,421
cover.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/cover.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import textwrap from calibre import guess_type from calibre.utils.imghdr import identify from calibre.utils.xml_parse import safe_xml_fromstring from polyglot.urllib import unquote class CoverManager: SVG_TEMPLATE = textwrap.dedent('''\ <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <meta name="calibre:cover" content="true" /> <title>Cover</title> <style type="text/css" title="override_css"> @page {padding: 0pt; margin:0pt} body { text-align: center; padding:0pt; margin: 0pt; } </style> </head> <body> <div> <svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="100%%" height="100%%" viewBox="__viewbox__" preserveAspectRatio="__ar__"> <image width="__width__" height="__height__" xlink:href="%s"/> </svg> </div> </body> </html> ''') NONSVG_TEMPLATE = textwrap.dedent('''\ <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <meta name="calibre:cover" content="true" /> <title>Cover</title> <style type="text/css" title="override_css"> @page {padding: 0pt; margin:0pt} body { text-align: center; padding:0pt; margin: 0pt } div { padding:0pt; margin: 0pt } img { padding:0pt; margin: 0pt } </style> </head> <body> <div> <img src="%s" alt="cover" __style__ /> </div> </body> </html> ''') def __init__(self, no_default_cover=False, no_svg_cover=False, preserve_aspect_ratio=False, fixed_size=None): self.no_default_cover = no_default_cover self.no_svg_cover = no_svg_cover self.preserve_aspect_ratio = preserve_aspect_ratio ar = 'xMidYMid meet' if preserve_aspect_ratio else 'none' self.svg_template = self.SVG_TEMPLATE.replace('__ar__', ar) if fixed_size is None: style = 'style="height: 100%%"' else: width, height = fixed_size style = 'style="height: %s; width: %s"'%(height, width) self.non_svg_template = self.NONSVG_TEMPLATE.replace('__style__', style) def __call__(self, oeb, opts, log): self.oeb = oeb self.log = log self.insert_cover() def default_cover(self): ''' Create a generic cover for books that dont have a cover ''' if self.no_default_cover: return None self.log('Generating default cover') m = self.oeb.metadata title = str(m.title[0]) authors = [str(x) for x in m.creator if x.role == 'aut'] try: from calibre.ebooks.covers import create_cover series = series_index = None if m.series: try: series, series_index = str(m.series[0]), m.series_index[0] except IndexError: pass img_data = create_cover(title, authors, series, series_index) id, href = self.oeb.manifest.generate('cover', 'cover_image.jpg') item = self.oeb.manifest.add(id, href, guess_type('t.jpg')[0], data=img_data) m.clear('cover') m.add('cover', item.id) return item.href except: self.log.exception('Failed to generate default cover') return None def inspect_cover(self, href): from calibre.ebooks.oeb.base import urlnormalize for x in self.oeb.manifest: if x.href == urlnormalize(href): try: raw = x.data return identify(raw)[1:] except Exception: self.log.exception('Failed to read cover image dimensions') return -1, -1 def insert_cover(self): from calibre.ebooks.oeb.base import urldefrag g, m = self.oeb.guide, self.oeb.manifest item = None if 'titlepage' not in g: if 'cover' in g: href = g['cover'].href else: href = self.default_cover() if href is None: return width, height = self.inspect_cover(href) if width == -1 or height == -1: self.log.warning('Failed to read cover dimensions') width, height = 600, 800 # if self.preserve_aspect_ratio: # width, height = 600, 800 self.svg_template = self.svg_template.replace('__viewbox__', '0 0 %d %d'%(width, height)) self.svg_template = self.svg_template.replace('__width__', str(width)) self.svg_template = self.svg_template.replace('__height__', str(height)) if href is not None: templ = self.non_svg_template if self.no_svg_cover \ else self.svg_template tp = templ%unquote(href) id, href = m.generate('titlepage', 'titlepage.xhtml') item = m.add(id, href, guess_type('t.xhtml')[0], data=safe_xml_fromstring(tp)) else: item = self.oeb.manifest.hrefs[ urldefrag(self.oeb.guide['titlepage'].href)[0]] if item is not None: if item in self.oeb.spine: self.oeb.spine.remove(item) self.oeb.spine.insert(0, item, True) if 'cover' not in self.oeb.guide.refs: self.oeb.guide.add('cover', 'Title page', 'a') self.oeb.guide.refs['cover'].href = item.href if 'titlepage' in self.oeb.guide.refs: self.oeb.guide.refs['titlepage'].href = item.href titem = getattr(self.oeb.toc, 'item_that_refers_to_cover', None) if titem is not None: titem.href = item.href
6,667
Python
.py
155
29.670968
86
0.517475
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,422
structure.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/structure.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import re import uuid from collections import Counter, OrderedDict from lxml import etree from calibre.ebooks import ConversionError from calibre.ebooks.oeb.base import TOC, XHTML, XPNSMAP, barename, xml2text from polyglot.builtins import itervalues from polyglot.urllib import urlparse def XPath(x): try: return etree.XPath(x, namespaces=XPNSMAP) except etree.XPathSyntaxError: raise ConversionError( 'The syntax of the XPath expression %s is invalid.' % repr(x)) def isspace(x): return not x or x.replace('\xa0', '').isspace() def at_start(elem): ' Return True if there is no content before elem ' body = XPath('ancestor-or-self::h:body')(elem) if not body: return True body = body[0] ancestors = frozenset(XPath('ancestor::*')(elem)) for x in body.iter(): if x is elem: return True if hasattr(getattr(x, 'tag', None), 'rpartition') and x.tag.rpartition('}')[-1] in {'img', 'svg'}: return False if isspace(getattr(x, 'text', None)) and (x in ancestors or isspace(getattr(x, 'tail', None))): continue return False return False class DetectStructure: def __call__(self, oeb, opts): self.log = oeb.log self.oeb = oeb self.opts = opts self.log('Detecting structure...') self.detect_chapters() if self.oeb.auto_generated_toc or opts.use_auto_toc: orig_toc = self.oeb.toc self.oeb.toc = TOC() self.create_level_based_toc() if self.oeb.toc.count() < 1: if not opts.no_chapters_in_toc and self.detected_chapters: self.create_toc_from_chapters() if self.oeb.toc.count() < opts.toc_threshold: self.create_toc_from_links() if self.oeb.toc.count() < 2 and orig_toc.count() > 2: self.oeb.toc = orig_toc else: self.oeb.auto_generated_toc = True self.log('Auto generated TOC with %d entries.' % self.oeb.toc.count()) if opts.toc_filter is not None: regexp = re.compile(opts.toc_filter) for node in list(self.oeb.toc.iter()): if not node.title or regexp.search(node.title) is not None: self.log('Filtering', node.title if node.title else 'empty node', 'from TOC') self.oeb.toc.remove(node) if opts.page_breaks_before is not None: pb_xpath = XPath(opts.page_breaks_before) for item in oeb.spine: for elem in pb_xpath(item.data): try: prev = next(elem.itersiblings(tag=etree.Element, preceding=True)) if (barename(elem.tag) in {'h1', 'h2'} and barename( prev.tag) in {'h1', 'h2'} and (not prev.tail or not prev.tail.split())): # We have two adjacent headings, do not put a page # break on the second one continue except StopIteration: pass style = elem.get('style', '') if style: style += '; ' elem.set('style', style+'page-break-before:always') for node in self.oeb.toc.iter(): if not node.title or not node.title.strip(): node.title = _('Unnamed') if self.opts.start_reading_at: self.detect_start_reading() def detect_start_reading(self): expr = self.opts.start_reading_at try: expr = XPath(expr) except: self.log.warn( 'Invalid start reading at XPath expression, ignoring: %s'%expr) return for item in self.oeb.spine: if not hasattr(item.data, 'xpath'): continue matches = expr(item.data) if matches: elem = matches[0] eid = elem.get('id', None) if not eid: eid = 'start_reading_at_'+str(uuid.uuid4()).replace('-', '') elem.set('id', eid) if 'text' in self.oeb.guide: self.oeb.guide.remove('text') self.oeb.guide.add('text', 'Start', item.href+'#'+eid) self.log('Setting start reading at position to %s in %s'%( self.opts.start_reading_at, item.href)) return self.log.warn("Failed to find start reading at position: %s"% self.opts.start_reading_at) def get_toc_parts_for_xpath(self, expr): # if an attribute is selected by the xpath expr then truncate it # from the path and instead return it as where to find the title text title_attribute_regex = re.compile(r'/@([-\w]+)$') match = title_attribute_regex.search(expr) if match is not None: return expr[0:match.start()], match.group(1) return expr, None def detect_chapters(self): self.detected_chapters = [] self.chapter_title_attribute = None def find_matches(expr, doc): try: ans = XPath(expr)(doc) len(ans) return ans except: self.log.warn('Invalid chapter expression, ignoring: %s'%expr) return [] if self.opts.chapter: chapter_path, title_attribute = self.get_toc_parts_for_xpath(self.opts.chapter) self.chapter_title_attribute = title_attribute for item in self.oeb.spine: for x in find_matches(chapter_path, item.data): self.detected_chapters.append((item, x)) chapter_mark = self.opts.chapter_mark page_break_before = 'display: block; page-break-before: always' page_break_after = 'display: block; page-break-after: always' c = Counter() for item, elem in self.detected_chapters: c[item] += 1 text = xml2text(elem).strip() text = re.sub(r'\s+', ' ', text.strip()) self.log('\tDetected chapter:', text[:50]) if chapter_mark == 'none': continue if chapter_mark == 'rule': mark = elem.makeelement(XHTML('hr')) elif chapter_mark == 'pagebreak': if c[item] < 3 and at_start(elem): # For the first two elements in this item, check if they # are at the start of the file, in which case inserting a # page break in unnecessary and can lead to extra blank # pages in the PDF Output plugin. We need to use two as # feedbooks epubs match both a heading tag and its # containing div with the default chapter expression. continue mark = elem.makeelement(XHTML('div'), style=page_break_after) else: # chapter_mark == 'both': mark = elem.makeelement(XHTML('hr'), style=page_break_before) try: elem.addprevious(mark) except TypeError: self.log.exception('Failed to mark chapter') def create_level_based_toc(self): if self.opts.level1_toc is not None: self.add_leveled_toc_items() def create_toc_from_chapters(self): counter = self.oeb.toc.next_play_order() for item, elem in self.detected_chapters: text, href = self.elem_to_link(item, elem, self.chapter_title_attribute, counter) self.oeb.toc.add(text, href, play_order=counter) counter += 1 def create_toc_from_links(self): num = 0 for item in self.oeb.spine: for a in XPath('//h:a[@href]')(item.data): href = a.get('href') try: purl = urlparse(href) except ValueError: self.log.warning('Ignoring malformed URL:', href) continue if not purl[0] or purl[0] == 'file': href, frag = purl.path, purl.fragment href = item.abshref(href) if frag: href = '#'.join((href, frag)) if not self.oeb.toc.has_href(href): text = xml2text(a) text = text[:100].strip() if (not self.opts.duplicate_links_in_toc and self.oeb.toc.has_text(text)): continue try: self.oeb.toc.add(text, href, play_order=self.oeb.toc.next_play_order()) num += 1 except ValueError: self.oeb.log.exception('Failed to process link: %r' % href) continue # Most likely an incorrectly URL encoded link if self.opts.max_toc_links > 0 and \ num >= self.opts.max_toc_links: self.log('Maximum TOC links reached, stopping.') return def elem_to_link(self, item, elem, title_attribute, counter): text = '' if title_attribute is not None: text = elem.get(title_attribute, '') if not text: text = xml2text(elem).strip() if not text: text = elem.get('title', '') if not text: text = elem.get('alt', '') text = re.sub(r'\s+', ' ', text.strip()) text = text[:1000].strip() id = elem.get('id', 'calibre_toc_%d'%counter) elem.set('id', id) href = '#'.join((item.href, id)) return text, href def add_leveled_toc_items(self): added = OrderedDict() added2 = OrderedDict() counter = 1 def find_matches(expr, doc): try: ans = XPath(expr)(doc) len(ans) return ans except: self.log.warn('Invalid ToC expression, ignoring: %s'%expr) return [] for document in self.oeb.spine: previous_level1 = list(itervalues(added))[-1] if added else None previous_level2 = list(itervalues(added2))[-1] if added2 else None level1_toc, level1_title = self.get_toc_parts_for_xpath(self.opts.level1_toc) for elem in find_matches(level1_toc, document.data): text, _href = self.elem_to_link(document, elem, level1_title, counter) counter += 1 if text: node = self.oeb.toc.add(text, _href, play_order=self.oeb.toc.next_play_order()) added[elem] = node # node.add(_('Top'), _href) if self.opts.level2_toc is not None and added: level2_toc, level2_title = self.get_toc_parts_for_xpath(self.opts.level2_toc) for elem in find_matches(level2_toc, document.data): level1 = None for item in document.data.iterdescendants(): if item in added: level1 = added[item] elif item == elem: if level1 is None: if previous_level1 is None: break level1 = previous_level1 text, _href = self.elem_to_link(document, elem, level2_title, counter) counter += 1 if text: added2[elem] = level1.add(text, _href, play_order=self.oeb.toc.next_play_order()) break if self.opts.level3_toc is not None and added2: level3_toc, level3_title = self.get_toc_parts_for_xpath(self.opts.level3_toc) for elem in find_matches(level3_toc, document.data): level2 = None for item in document.data.iterdescendants(): if item in added2: level2 = added2[item] elif item == elem: if level2 is None: if previous_level2 is None: break level2 = previous_level2 text, _href = \ self.elem_to_link(document, elem, level3_title, counter) counter += 1 if text: level2.add(text, _href, play_order=self.oeb.toc.next_play_order()) break
13,615
Python
.py
287
30.756098
106
0.497103
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,423
embed_fonts.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/embed_fonts.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' import logging from collections import defaultdict import css_parser from lxml import etree from calibre import guess_type from calibre.ebooks.oeb.base import CSS_MIME, XHTML, XPath from calibre.ebooks.oeb.polish.embed import font_key from calibre.ebooks.oeb.transforms.subset import elem_style, find_font_face_rules, get_font_properties from calibre.utils.filenames import ascii_filename from calibre.utils.fonts.scanner import NoFonts, font_scanner from polyglot.builtins import iteritems def font_families_from_style(style): return [str(f) for f in style.get('font-family', []) if str(f).lower() not in { 'serif', 'sansserif', 'sans-serif', 'fantasy', 'cursive', 'monospace'}] def style_key(style): style = style.copy() style['font-family'] = font_families_from_style(style)[0] return font_key(style) def font_already_embedded(style, newly_embedded_fonts): return style_key(style) in newly_embedded_fonts def used_font(style, embedded_fonts): ff = font_families_from_style(style) if not ff: return False, None lnames = {str(x).lower() for x in ff} matching_set = [] # Filter on font-family for ef in embedded_fonts: flnames = {x.lower() for x in ef.get('font-family', [])} if not lnames.intersection(flnames): continue matching_set.append(ef) if not matching_set: return True, None # Filter on font-stretch widths = {x:i for i, x in enumerate(('ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded' ))} width = widths[style.get('font-stretch', 'normal')] for f in matching_set: f['width'] = widths[style.get('font-stretch', 'normal')] min_dist = min(abs(width-f['width']) for f in matching_set) if min_dist > 0: return True, None nearest = [f for f in matching_set if abs(width-f['width']) == min_dist] if width <= 4: lmatches = [f for f in nearest if f['width'] <= width] else: lmatches = [f for f in nearest if f['width'] >= width] matching_set = (lmatches or nearest) # Filter on font-style fs = style.get('font-style', 'normal') matching_set = [f for f in matching_set if f.get('font-style', 'normal') == fs] # Filter on font weight fw = int(style.get('font-weight', '400')) matching_set = [f for f in matching_set if f.get('weight', 400) == fw] if not matching_set: return True, None return True, matching_set[0] class EmbedFonts: ''' Embed all referenced fonts, if found on system. Must be called after CSS flattening. ''' def __call__(self, oeb, log, opts): self.oeb, self.log, self.opts = oeb, log, opts self.sheet_cache = {} self.find_style_rules() self.find_embedded_fonts() self.parser = css_parser.CSSParser(loglevel=logging.CRITICAL, log=logging.getLogger('calibre.css')) self.warned = set() self.warned2 = set() self.newly_embedded_fonts = set() for item in oeb.spine: if not hasattr(item.data, 'xpath'): continue sheets = [] for href in XPath('//h:link[@href and @type="text/css"]/@href')(item.data): sheet = self.oeb.manifest.hrefs.get(item.abshref(href), None) if sheet is not None: sheets.append(sheet) if sheets: self.process_item(item, sheets) def find_embedded_fonts(self): ''' Find all @font-face rules and extract the relevant info from them. ''' self.embedded_fonts = [] for item in self.oeb.manifest: if not hasattr(item.data, 'cssRules'): continue self.embedded_fonts.extend(find_font_face_rules(item, self.oeb)) def find_style_rules(self): ''' Extract all font related style information from all stylesheets into a dict mapping classes to font properties specified by that class. All the heavy lifting has already been done by the CSS flattening code. ''' rules = defaultdict(dict) for item in self.oeb.manifest: if not hasattr(item.data, 'cssRules'): continue for i, rule in enumerate(item.data.cssRules): if rule.type != rule.STYLE_RULE: continue props = {k:v for k,v in iteritems(get_font_properties(rule)) if v} if not props: continue for sel in rule.selectorList: sel = sel.selectorText if sel and sel.startswith('.'): # We dont care about pseudo-selectors as the worst that # can happen is some extra characters will remain in # the font sel = sel.partition(':')[0] rules[sel[1:]].update(props) self.style_rules = dict(rules) def get_page_sheet(self): if self.page_sheet is None: manifest = self.oeb.manifest id_, href = manifest.generate('page_css', 'page_styles.css') self.page_sheet = manifest.add(id_, href, CSS_MIME, data=self.parser.parseString('', validate=False)) head = self.current_item.data.xpath('//*[local-name()="head"][1]') if head: href = self.current_item.relhref(href) l = etree.SubElement(head[0], XHTML('link'), rel='stylesheet', type=CSS_MIME, href=href) l.tail = '\n' else: self.log.warn('No <head> cannot embed font rules') return self.page_sheet def process_item(self, item, sheets): ff_rules = [] self.current_item = item self.page_sheet = None for sheet in sheets: if 'page_css' in sheet.id: ff_rules.extend(find_font_face_rules(sheet, self.oeb)) self.page_sheet = sheet base = {'font-family':['serif'], 'font-weight': '400', 'font-style':'normal', 'font-stretch':'normal'} for body in item.data.xpath('//*[local-name()="body"]'): self.find_usage_in(body, base, ff_rules) def find_usage_in(self, elem, inherited_style, ff_rules): style = elem_style(self.style_rules, elem.get('class', '') or '', inherited_style) for child in elem: self.find_usage_in(child, style, ff_rules) has_font, existing = used_font(style, ff_rules) if not has_font or font_already_embedded(style, self.newly_embedded_fonts): return if existing is None: in_book = used_font(style, self.embedded_fonts)[1] if in_book is None: # Try to find the font in the system added = self.embed_font(style) if added is not None: self.newly_embedded_fonts.add(style_key(style)) ff_rules.append(added) self.embedded_fonts.append(added) else: # TODO: Create a page rule from the book rule (cannot use it # directly as paths might be different) item = in_book['item'] sheet = self.parser.parseString(in_book['rule'].cssText, validate=False) rule = sheet.cssRules[0] page_sheet = self.get_page_sheet() href = page_sheet.abshref(item.href) rule.style.setProperty('src', 'url(%s)' % href) ff_rules.append(find_font_face_rules(sheet, self.oeb)[0]) page_sheet.data.insertRule(rule, len(page_sheet.data.cssRules)) def embed_font(self, style): from calibre.ebooks.oeb.polish.embed import find_matching_font, weight_as_number ff = font_families_from_style(style) if not ff: return ff = ff[0] if ff in self.warned or ff == 'inherit': return try: fonts = font_scanner.fonts_for_family(ff) except NoFonts: self.log.warn('Failed to find fonts for family:', ff, 'not embedding') self.warned.add(ff) return weight = weight_as_number(style.get('font-weight', '400')) def do_embed(f): data = font_scanner.get_font_data(f) name = f['full_name'] ext = 'otf' if f['is_otf'] else 'ttf' name = ascii_filename(name).replace(' ', '-').replace('(', '').replace(')', '') fid, href = self.oeb.manifest.generate(id='font', href='fonts/%s.%s'%(name, ext)) item = self.oeb.manifest.add(fid, href, guess_type('dummy.'+ext)[0], data=data) item.unload_data_from_memory() page_sheet = self.get_page_sheet() href = page_sheet.relhref(item.href) css = '''@font-face {{ font-family: "{}"; font-weight: {}; font-style: {}; font-stretch: {}; src: url({}) }}'''.format( f['font-family'], f['font-weight'], f['font-style'], f['font-stretch'], href) sheet = self.parser.parseString(css, validate=False) page_sheet.data.insertRule(sheet.cssRules[0], len(page_sheet.data.cssRules)) return find_font_face_rules(sheet, self.oeb)[0] for f in fonts: if f['weight'] == weight and f['font-style'] == style.get('font-style', 'normal') and f['font-stretch'] == style.get('font-stretch', 'normal'): self.log('Embedding font {} from {}'.format(f['full_name'], f['path'])) return do_embed(f) try: f = find_matching_font(fonts, style.get('font-weight', 'normal'), style.get('font-style', 'normal'), style.get('font-stretch', 'normal')) except Exception: if ff not in self.warned2: self.log.exception('Failed to find a matching font for family', ff, 'not embedding') self.warned2.add(ff) return self.log('Embedding font {} from {}'.format(f['full_name'], f['path'])) return do_embed(f)
10,391
Python
.py
218
36.807339
155
0.583366
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,424
flatcss.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/flatcss.py
''' CSS flattening transform. ''' __license__ = 'GPL v3' __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>' import math import numbers import operator import re from collections import defaultdict from xml.dom import SyntaxErr import css_parser from css_parser.css import Property from lxml import etree from calibre import guess_type from calibre.ebooks import unit_convert from calibre.ebooks.oeb.base import CSS_MIME, OEB_STYLES, SVG, SVG_NS, XHTML, XHTML_NS, XPath, barename, css_text, namespace from calibre.ebooks.oeb.stylizer import Stylizer from calibre.utils.filenames import ascii_filename, ascii_text from calibre.utils.icu import numeric_sort_key from polyglot.builtins import iteritems, string_or_bytes COLLAPSE = re.compile(r'[ \t\r\n\v]+') STRIPNUM = re.compile(r'[-0-9]+$') def asfloat(value, default): if not isinstance(value, numbers.Number): value = default return float(value) class KeyMapper: def __init__(self, sbase, dbase, dkey): self.sbase = float(sbase) self.dprop = [(self.relate(x, dbase), float(x)) for x in dkey] self.cache = {} @staticmethod def relate(size, base): if size == 0: return base size = float(size) base = float(base) if abs(size - base) < 0.1: return 0 sign = -1 if size < base else 1 endp = 0 if size < base else 36 diff = (abs(base - size) * 3) + ((36 - size) / 100) logb = abs(base - endp) if logb == 1.0: logb = 1.1 try: result = sign * math.log(diff, logb) except ValueError: if diff < 0: # Size is both very large and close to base return 0 if logb == 0: logb = 1e-6 if diff == 0: diff = 1e-6 result = sign * math.log(diff, logb) return result def __getitem__(self, ssize): ssize = asfloat(ssize, 0) if ssize in self.cache: return self.cache[ssize] dsize = self.map(ssize) self.cache[ssize] = dsize return dsize def map(self, ssize): sbase = self.sbase prop = self.relate(ssize, sbase) diff = [(abs(prop - p), s) for p, s in self.dprop] dsize = min(diff)[1] return dsize class ScaleMapper: def __init__(self, sbase, dbase): self.dscale = float(dbase) / float(sbase) def __getitem__(self, ssize): ssize = asfloat(ssize, 0) dsize = ssize * self.dscale return dsize class NullMapper: def __init__(self): pass def __getitem__(self, ssize): return ssize def FontMapper(sbase=None, dbase=None, dkey=None): if sbase and dbase and dkey: return KeyMapper(sbase, dbase, dkey) elif sbase and dbase: return ScaleMapper(sbase, dbase) else: return NullMapper() class EmbedFontsCSSRules: def __init__(self, body_font_family, rules): self.body_font_family, self.rules = body_font_family, rules self.href = None def __call__(self, oeb): if not self.body_font_family: return None if not self.href: iid, href = oeb.manifest.generate('page_styles', 'page_styles.css') rules = [css_text(x) for x in self.rules] rules = '\n\n'.join(rules) sheet = css_parser.parseString(rules, validate=False) self.href = oeb.manifest.add(iid, href, guess_type(href)[0], data=sheet).href return self.href class CSSFlattener: def __init__(self, fbase=None, fkey=None, lineh=None, unfloat=False, untable=False, page_break_on_body=False, specializer=None, transform_css_rules=()): self.fbase = fbase self.transform_css_rules = transform_css_rules if self.transform_css_rules: from calibre.ebooks.css_transform_rules import compile_rules self.transform_css_rules = compile_rules(self.transform_css_rules) self.fkey = fkey self.lineh = lineh self.unfloat = unfloat self.untable = untable self.specializer = specializer self.page_break_on_body = page_break_on_body @classmethod def config(cls, cfg): return cfg @classmethod def generate(cls, opts): return cls() def __call__(self, oeb, context): oeb.logger.info('Flattening CSS and remapping font sizes...') self.context = self.opts = context self.oeb = oeb self.items = list(self.oeb.spine) titlepage = self.oeb.guide.get('titlepage') if titlepage is not None: titlepage = titlepage.item if titlepage is not None and titlepage not in self.items: self.items.append(titlepage) epub3_nav = None if getattr(self.opts, 'epub3_nav_href', None): epub3_nav = self.oeb.manifest.hrefs.get(self.opts.epub3_nav_href) if epub3_nav is not None and epub3_nav not in self.items: self.items.append(epub3_nav) self.filter_css = frozenset() if self.opts.filter_css: try: self.filter_css = {x.strip().lower() for x in self.opts.filter_css.split(',')} except: self.oeb.log.warning('Failed to parse filter_css, ignoring') else: from calibre.ebooks.oeb.normalize_css import normalize_filter_css self.filter_css = frozenset(normalize_filter_css(self.filter_css)) self.oeb.log.debug('Filtering CSS properties: %s'% ', '.join(self.filter_css)) for item in oeb.manifest.values(): # Make all links to resources absolute, as these sheets will be # consolidated into a single stylesheet at the root of the document if item.media_type in OEB_STYLES: css_parser.replaceUrls(item.data, item.abshref, ignoreImportRules=True) self.body_font_family, self.embed_font_rules = self.get_embed_font_info( self.opts.embed_font_family) # Store for use in output plugins/transforms that generate content, # like the AZW3 output inline ToC. self.oeb.store_embed_font_rules = EmbedFontsCSSRules(self.body_font_family, self.embed_font_rules) self.stylize_spine() self.sbase = self.baseline_spine() if self.fbase else None self.fmap = FontMapper(self.sbase, self.fbase, self.fkey) self.flatten_spine() if epub3_nav is not None: self.opts.epub3_nav_parsed = epub3_nav.data self.store_page_margins() def store_page_margins(self): self.opts._stored_page_margins = {} for item, stylizer in iteritems(self.stylizers): margins = self.opts._stored_page_margins[item.href] = {} for prop, val in stylizer.page_rule.items(): p, w = prop.partition('-')[::2] if p == 'margin': margins[w] = unit_convert( val, stylizer.profile.width_pts, stylizer.body_font_size, stylizer.profile.dpi, body_font_size=stylizer.body_font_size) def get_embed_font_info(self, family, failure_critical=True): efi = [] body_font_family = None if not family: return body_font_family, efi from calibre.utils.fonts.scanner import NoFonts, font_scanner from calibre.utils.fonts.utils import panose_to_css_generic_family try: faces = font_scanner.fonts_for_family(family) except NoFonts: msg = ('No embeddable fonts found for family: %r'%family) if failure_critical: raise ValueError(msg) self.oeb.log.warn(msg) return body_font_family, efi if not faces: msg = ('No embeddable fonts found for family: %r'%family) if failure_critical: raise ValueError(msg) self.oeb.log.warn(msg) return body_font_family, efi from calibre.ebooks.oeb.polish.utils import OEB_FONTS for i, font in enumerate(faces): ext = 'otf' if font['is_otf'] else 'ttf' font_data = font_scanner.get_font_data(font) for x in self.oeb.manifest: if x.media_type in OEB_FONTS: matches = x.data == font_data x.unload_data_from_memory() if matches: item = x href = item.href break else: fid, href = self.oeb.manifest.generate(id='font', href='fonts/%s.%s'%(ascii_filename(font['full_name']).replace(' ', '-'), ext)) item = self.oeb.manifest.add(fid, href, guess_type('dummy.'+ext)[0], data=font_data) item.unload_data_from_memory() cfont = { 'font-family': '"%s"'%font['font-family'], 'src': 'url(%s)'%item.href, } if i == 0: generic_family = panose_to_css_generic_family(font['panose']) body_font_family = "'%s',%s"%(font['font-family'], generic_family) self.oeb.log('Embedding font: %s'%font['font-family']) for k in ('font-weight', 'font-style', 'font-stretch'): if font[k] != 'normal': cfont[k] = font[k] rule = '@font-face { %s }'%('; '.join('%s:%s'%(k, v) for k, v in iteritems(cfont))) rule = css_parser.parseString(rule) efi.append(rule) return body_font_family, efi def stylize_spine(self): self.stylizers = {} profile = self.context.source css = '' for item in self.items: html = item.data body = html.find(XHTML('body')) if 'style' in html.attrib: b = body.attrib.get('style', '') body.set('style', html.get('style') + ';' + b) del html.attrib['style'] bs = body.get('style', '').split(';') bs.append('margin-top: 0pt') bs.append('margin-bottom: 0pt') if float(self.context.margin_left) >= 0: bs.append('margin-left : %gpt'% float(self.context.margin_left)) if float(self.context.margin_right) >= 0: bs.append('margin-right : %gpt'% float(self.context.margin_right)) bs.extend(['padding-left: 0pt', 'padding-right: 0pt']) if self.page_break_on_body: bs.extend(['page-break-before: always']) if self.context.change_justification != 'original': bs.append('text-align: '+ self.context.change_justification) if self.body_font_family: bs.append('font-family: '+self.body_font_family) body.set('style', '; '.join(bs)) stylizer = Stylizer(html, item.href, self.oeb, self.context, profile, user_css=self.context.extra_css, extra_css=css) self.stylizers[item] = stylizer def baseline_node(self, node, stylizer, sizes, csize): csize = stylizer.style(node)['font-size'] if node.text: sizes[csize] += len(COLLAPSE.sub(' ', node.text)) for child in node: self.baseline_node(child, stylizer, sizes, csize) if child.tail: sizes[csize] += len(COLLAPSE.sub(' ', child.tail)) def baseline_spine(self): sizes = defaultdict(float) for item in self.items: html = item.data stylizer = self.stylizers[item] body = html.find(XHTML('body')) fsize = self.context.source.fbase self.baseline_node(body, stylizer, sizes, fsize) try: sbase = max(list(sizes.items()), key=operator.itemgetter(1))[0] except: sbase = 12.0 self.oeb.logger.info( "Source base font size is %0.05fpt" % sbase) return sbase def clean_edges(self, cssdict, style, fsize): slineh = self.sbase * 1.26 dlineh = self.lineh for kind in ('margin', 'padding'): for edge in ('bottom', 'top'): property = f"{kind}-{edge}" if property not in cssdict: continue if '%' in cssdict[property]: continue value = style[property] if value == 0 or not isinstance(value, numbers.Number): continue if value <= slineh: cssdict[property] = "%0.5fem" % (dlineh / fsize) else: try: value = round(value / slineh) * dlineh except: self.oeb.logger.warning( 'Invalid length:', value) value = 0.0 cssdict[property] = "%0.5fem" % (value / fsize) def flatten_node(self, node, stylizer, names, styles, pseudo_styles, psize, item_id, recurse=True): if not isinstance(node.tag, string_or_bytes) or namespace(node.tag) not in (XHTML_NS, SVG_NS): return tag = barename(node.tag) style = stylizer.style(node) cssdict = style.cssdict() try: font_size = style['font-size'] except: font_size = self.sbase if self.sbase is not None else \ self.context.source.fbase if tag == 'body' and isinstance(font_size, numbers.Number): stylizer.body_font_size = font_size if 'align' in node.attrib: if tag != 'img': cssdict['text-align'] = node.attrib['align'] if cssdict['text-align'] == 'center': # align=center causes tables to be center aligned, # which text-align does not. And the ever trustworthy Word # uses this construct in its HTML output. See # https://bugs.launchpad.net/bugs/1569583 if tag == 'table': if 'margin-left' not in cssdict and 'margin-right' not in cssdict: cssdict['margin-left'] = cssdict['margin-right'] = 'auto' else: for table in node.iterchildren(XHTML("table")): ts = stylizer.style(table) if ts.get('margin-left') is None and ts.get('margin-right') is None: ts.set('margin-left', 'auto') ts.set('margin-right', 'auto') else: val = node.attrib['align'] if val in ('middle', 'bottom', 'top'): cssdict['vertical-align'] = val elif val in ('left', 'right'): cssdict['float'] = val del node.attrib['align'] if 'valign' in node.attrib and tag == 'td': if cssdict.get('vertical-align') == 'inherit': cssdict['vertical-align'] = node.attrib['valign'] del node.attrib['valign'] if node.tag == XHTML('font'): tags = ['descendant::h:%s'%x for x in ('p', 'div', 'table', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ol', 'ul', 'dl', 'blockquote')] tag = 'div' if XPath('|'.join(tags))(node) else 'span' node.tag = XHTML(tag) if 'size' in node.attrib: def force_int(raw): return int(re.search(r'([0-9+-]+)', raw).group(1)) size = node.attrib['size'].strip() if size: fnums = self.context.source.fnums if size[0] in ('+', '-'): # Oh, the warcrimes try: esize = 3 + force_int(size) except: esize = 3 if esize < 1: esize = 1 if esize > 7: esize = 7 font_size = fnums[esize] else: try: font_size = fnums[force_int(size)] except: font_size = fnums[3] cssdict['font-size'] = '%.1fpt'%font_size del node.attrib['size'] if 'face' in node.attrib: cssdict['font-family'] = node.attrib['face'] del node.attrib['face'] if 'color' in node.attrib: try: cssdict['color'] = Property('color', node.attrib['color']).value except (ValueError, SyntaxErr): pass del node.attrib['color'] if 'bgcolor' in node.attrib: try: cssdict['background-color'] = Property('background-color', node.attrib['bgcolor']).value except (ValueError, SyntaxErr): pass del node.attrib['bgcolor'] if tag == 'ol' and 'type' in node.attrib: del node.attrib['type'] if cssdict.get('font-weight', '').lower() == 'medium': cssdict['font-weight'] = 'normal' # ADE chokes on font-weight medium fsize = font_size is_drop_cap = (cssdict.get('float', None) == 'left' and 'font-size' in cssdict and len(node) == 0 and node.text and ( len(node.text) == 1 or (len(node.text) == 2 and 0x2000 <= ord(node.text[0]) <= 0x206f))) # Detect drop caps generated by the docx input plugin if node.tag and node.tag.endswith('}p') and len(node) == 0 and node.text and len(node.text.strip()) == 1 and \ not node.tail and 'line-height' in cssdict and 'font-size' in cssdict: dp = node.getparent() if dp.tag and dp.tag.endswith('}div') and len(dp) == 1 and not dp.text: if stylizer.style(dp).cssdict().get('float', None) == 'left': is_drop_cap = True if style.viewport_relative_font_size: cssdict['font-size'] = style.viewport_relative_font_size elif not self.context.disable_font_rescaling and not is_drop_cap: _sbase = self.sbase if self.sbase is not None else \ self.context.source.fbase dyn_rescale = node.attrib.pop('data-calibre-rescale', None) if dyn_rescale is not None: try: dyn_rescale = float(dyn_rescale) / 100 except Exception: dyn_rescale = 1 fsize = self.fmap[_sbase] fsize *= dyn_rescale cssdict['font-size'] = '%0.5fem'%(fsize/psize) psize = fsize elif 'font-size' in cssdict or tag == 'body': fsize = self.fmap[font_size] try: cssdict['font-size'] = "%0.5fem" % (fsize / psize) except ZeroDivisionError: cssdict['font-size'] = '%.1fpt'%fsize psize = fsize try: minlh = self.context.minimum_line_height / 100. slh = style['line-height'] if not is_drop_cap and isinstance(slh, numbers.Number) and slh < minlh * fsize: cssdict['line-height'] = str(minlh) except Exception: self.oeb.logger.exception('Failed to set minimum line-height') if cssdict: for x in self.filter_css: popval = cssdict.pop(x, None) if self.body_font_family and popval and x == 'font-family' \ and popval.partition(',')[0][1:-1] == self.body_font_family.partition(',')[0][1:-1]: cssdict[x] = popval if cssdict: if self.lineh and self.fbase and tag not in ('body', 'html'): self.clean_edges(cssdict, style, psize) if 'display' in cssdict and cssdict['display'] == 'in-line': cssdict['display'] = 'inline' if self.unfloat and 'float' in cssdict \ and cssdict.get('display', 'none') != 'none': del cssdict['display'] if self.untable and 'display' in cssdict \ and cssdict['display'].startswith('table'): display = cssdict['display'] if display == 'table-cell': cssdict['display'] = 'inline' else: cssdict['display'] = 'block' if 'vertical-align' in cssdict \ and cssdict['vertical-align'] == 'sup': cssdict['vertical-align'] = 'super' if self.lineh and 'line-height' not in cssdict and tag != 'html': lineh = self.lineh / psize cssdict['line-height'] = "%0.5fem" % lineh if (self.context.remove_paragraph_spacing or self.context.insert_blank_line) and tag in ('p', 'div'): if item_id != 'calibre_jacket' or self.context.output_profile.name == 'Kindle': for prop in ('margin', 'padding', 'border'): for edge in ('top', 'bottom'): cssdict['%s-%s'%(prop, edge)] = '0pt' if self.context.insert_blank_line: cssdict['margin-top'] = cssdict['margin-bottom'] = \ '%fem'%self.context.insert_blank_line_size indent_size = self.context.remove_paragraph_spacing_indent_size keep_indents = indent_size < 0.0 if (self.context.remove_paragraph_spacing and not keep_indents and cssdict.get('text-align', None) not in ('center', 'right')): cssdict['text-indent'] = "%1.1fem" % indent_size pseudo_classes = style.pseudo_classes(self.filter_css) if cssdict or pseudo_classes: keep_classes = set() if cssdict: items = sorted(iteritems(cssdict)) css = ';\n'.join(f'{key}: {val}' for key, val in items) classes = node.get('class', '').strip() or 'calibre' classes_list = classes.split() # lower() because otherwise if the document uses the same class # name with different case, both cases will apply, leading # to incorrect results. klass = ascii_text(STRIPNUM.sub('', classes_list[0])).lower().strip().replace(' ', '_') if css in styles: match = styles[css] else: match = klass + str(names[klass] or '') styles[css] = match names[klass] += 1 node.attrib['class'] = match keep_classes.add(match) for psel, cssdict in iteritems(pseudo_classes): items = sorted(iteritems(cssdict)) css = ';\n'.join(f'{key}: {val}' for key, val in items) pstyles = pseudo_styles[psel] if css in pstyles: match = pstyles[css] else: # We have to use a different class for each psel as # otherwise you can have incorrect styles for a situation # like: a:hover { color: red } a:link { color: blue } a.x:hover { color: green } # If the pcalibre class for a:hover and a:link is the same, # then the class attribute for a.x tags will contain both # that class and the class for a.x:hover, which is wrong. klass = 'pcalibre' match = klass + str(names[klass] or '') pstyles[css] = match names[klass] += 1 keep_classes.add(match) node.attrib['class'] = ' '.join(keep_classes) elif 'class' in node.attrib: del node.attrib['class'] if 'style' in node.attrib: del node.attrib['style'] if recurse: for child in node: self.flatten_node(child, stylizer, names, styles, pseudo_styles, psize, item_id) def flatten_head(self, item, href, global_href): html = item.data head = html.find(XHTML('head')) def safe_lower(x): try: x = x.lower() except Exception: pass return x for node in html.xpath('//*[local-name()="style" or local-name()="link"]'): if node.tag == XHTML('link') \ and safe_lower(node.get('rel', 'stylesheet')) == 'stylesheet' \ and safe_lower(node.get('type', CSS_MIME)) in OEB_STYLES: node.getparent().remove(node) elif node.tag in (XHTML('style'), SVG('style')) \ and node.get('type', CSS_MIME) in OEB_STYLES: node.getparent().remove(node) href = item.relhref(href) l = etree.SubElement(head, XHTML('link'), rel='stylesheet', type=CSS_MIME, href=href) l.tail='\n' if global_href: href = item.relhref(global_href) l = etree.SubElement(head, XHTML('link'), rel='stylesheet', type=CSS_MIME, href=href) l.tail = '\n' def replace_css(self, css): manifest = self.oeb.manifest for item in manifest.values(): if item.media_type in OEB_STYLES: manifest.remove(item) id, href = manifest.generate('css', 'stylesheet.css') sheet = css_parser.parseString(css, validate=False) if self.transform_css_rules: from calibre.ebooks.css_transform_rules import transform_sheet transform_sheet(self.transform_css_rules, sheet) item = manifest.add(id, href, CSS_MIME, data=sheet) self.oeb.manifest.main_stylesheet = item return href def collect_global_css(self): def rules_in(sheets): for s in sheets: yield from s.cssRules def unique_font_face_rules(*rules): seen = set() for rule in rules: try: ff = rule.style.getPropertyValue('font-family') src = rule.style.getPropertyValue('src') w = rule.style.getPropertyValue('font-weight') s = rule.style.getPropertyValue('font-style') except Exception: yield rule else: key = ff, src, w, s if key not in seen: seen.add(key) yield rule global_css = defaultdict(list) for item in self.items: stylizer = self.stylizers[item] if float(self.context.margin_top) >= 0: stylizer.page_rule['margin-top'] = '%gpt'%\ float(self.context.margin_top) if float(self.context.margin_bottom) >= 0: stylizer.page_rule['margin-bottom'] = '%gpt'%\ float(self.context.margin_bottom) items = sorted(stylizer.page_rule.items()) css = ';\n'.join(f"{key}: {val}" for key, val in items) css = ('@page {\n%s\n}\n'%css) if items else '' rules = [css_text(r) for r in unique_font_face_rules(*stylizer.font_face_rules, *rules_in(self.embed_font_rules))] raw = '\n\n'.join(rules) css += '\n\n' + raw global_css[css].append(item) gc_map = {} manifest = self.oeb.manifest for css in global_css: href = None if css.strip(): id_, href = manifest.generate('page_css', 'page_styles.css') sheet = css_parser.parseString(css, validate=False) if self.transform_css_rules: from calibre.ebooks.css_transform_rules import transform_sheet transform_sheet(self.transform_css_rules, sheet) manifest.add(id_, href, CSS_MIME, data=sheet) gc_map[css] = href ans = {} for css, items in iteritems(global_css): for item in items: ans[item] = gc_map[css] return ans def flatten_spine(self): names = defaultdict(int) styles, pseudo_styles = {}, defaultdict(dict) for item in self.items: html = item.data stylizer = self.stylizers[item] if self.specializer is not None: self.specializer(item, stylizer) fsize = self.context.dest.fbase self.flatten_node(html, stylizer, names, styles, pseudo_styles, fsize, item.id, recurse=False) self.flatten_node(html.find(XHTML('body')), stylizer, names, styles, pseudo_styles, fsize, item.id) items = sorted(((key, val) for (val, key) in iteritems(styles)), key=lambda x:numeric_sort_key(x[0])) # :hover must come after link and :active must come after :hover psels = sorted(pseudo_styles, key=lambda x : {'hover':1, 'active':2}.get(x, 0)) for psel in psels: styles = pseudo_styles[psel] if not styles: continue x = sorted(((k+':'+psel, v) for v, k in iteritems(styles))) items.extend(x) css = ''.join(f".{key} {{\n{val};\n}}\n\n" for key, val in items) href = self.replace_css(css) global_css = self.collect_global_css() for item in self.items: stylizer = self.stylizers[item] self.flatten_head(item, href, global_css[item])
30,110
Python
.py
647
32.933539
139
0.533408
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,425
unsmarten.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/unsmarten.py
__license__ = 'GPL 3' __copyright__ = '2011, John Schember <john@nachtimwald.com>' __docformat__ = 'restructuredtext en' from calibre.ebooks.oeb.base import OEB_DOCS, XPath, barename from calibre.utils.unsmarten import unsmarten_text class UnsmartenPunctuation: def __init__(self): self.html_tags = XPath('descendant::h:*') def unsmarten(self, root): for x in self.html_tags(root): if not barename(x.tag) == 'pre': if getattr(x, 'text', None): x.text = unsmarten_text(x.text) if getattr(x, 'tail', None) and x.tail: x.tail = unsmarten_text(x.tail) def __call__(self, oeb, context): bx = XPath('//h:body') for x in oeb.manifest.items: if x.media_type in OEB_DOCS: for body in bx(x.data): self.unsmarten(body)
890
Python
.py
21
32.714286
61
0.579374
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,426
metadata.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/metadata.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os import re from calibre import guess_type from calibre.utils.date import isoformat, now from polyglot.builtins import iteritems def meta_info_to_oeb_metadata(mi, m, log, override_input_metadata=False): from calibre.ebooks.oeb.base import OPF if not mi.is_null('title'): m.clear('title') m.add('title', mi.title) if mi.title_sort: if not m.title: m.add('title', mi.title_sort) m.clear('title_sort') m.add('title_sort', mi.title_sort) if not mi.is_null('authors'): m.filter('creator', lambda x : x.role.lower() in ['aut', '']) for a in mi.authors: attrib = {'role':'aut'} if mi.author_sort: attrib[OPF('file-as')] = mi.author_sort m.add('creator', a, attrib=attrib) if not mi.is_null('book_producer'): m.filter('contributor', lambda x : x.role.lower() == 'bkp') m.add('contributor', mi.book_producer, role='bkp') elif override_input_metadata: m.filter('contributor', lambda x : x.role.lower() == 'bkp') if not mi.is_null('comments'): m.clear('description') m.add('description', mi.comments) elif override_input_metadata: m.clear('description') if not mi.is_null('publisher'): m.clear('publisher') m.add('publisher', mi.publisher) elif override_input_metadata: m.clear('publisher') if not mi.is_null('series'): m.clear('series') m.add('series', mi.series) elif override_input_metadata: m.clear('series') identifiers = mi.get_identifiers() set_isbn = False for typ, val in iteritems(identifiers): has = False if typ.lower() == 'isbn': set_isbn = True for x in m.identifier: if x.scheme.lower() == typ.lower(): x.content = val has = True if not has: m.add('identifier', val, scheme=typ.upper()) if override_input_metadata and not set_isbn: m.filter('identifier', lambda x: x.scheme.lower() == 'isbn') if not mi.is_null('languages'): m.clear('language') for lang in mi.languages: if lang and lang.lower() not in ('und', ''): m.add('language', lang) if not mi.is_null('series_index'): m.clear('series_index') m.add('series_index', mi.format_series_index()) elif override_input_metadata: m.clear('series_index') if not mi.is_null('rating'): m.clear('rating') m.add('rating', '%.2f'%mi.rating) elif override_input_metadata: m.clear('rating') if not mi.is_null('tags'): m.clear('subject') for t in mi.tags: m.add('subject', t) elif override_input_metadata: m.clear('subject') if not mi.is_null('pubdate'): m.clear('date') m.add('date', isoformat(mi.pubdate)) if not mi.is_null('timestamp'): m.clear('timestamp') m.add('timestamp', isoformat(mi.timestamp)) if not mi.is_null('rights'): m.clear('rights') m.add('rights', mi.rights) if not mi.is_null('publication_type'): m.clear('publication_type') m.add('publication_type', mi.publication_type) if not m.timestamp: m.add('timestamp', isoformat(now())) class MergeMetadata: 'Merge in user metadata, including cover' def __call__(self, oeb, mi, opts, override_input_metadata=False): self.oeb, self.log = oeb, oeb.log m = self.oeb.metadata self.log('Merging user specified metadata...') meta_info_to_oeb_metadata(mi, m, oeb.log, override_input_metadata=override_input_metadata) cover_id = self.set_cover(mi, opts.prefer_metadata_cover) m.clear('cover') if cover_id is not None: m.add('cover', cover_id) if mi.uuid is not None: m.filter('identifier', lambda x:x.id=='uuid_id') self.oeb.metadata.add('identifier', mi.uuid, id='uuid_id', scheme='uuid') self.oeb.uid = self.oeb.metadata.identifier[-1] if mi.application_id is not None: m.filter('identifier', lambda x:x.scheme=='calibre') self.oeb.metadata.add('identifier', mi.application_id, scheme='calibre') def set_cover(self, mi, prefer_metadata_cover): cdata, ext = b'', 'jpg' if mi.cover and os.access(mi.cover, os.R_OK): with open(mi.cover, 'rb') as f: cdata = f.read() ext = mi.cover.rpartition('.')[-1].lower().strip() elif mi.cover_data and mi.cover_data[-1]: cdata = mi.cover_data[1] ext = mi.cover_data[0] if ext not in ('png', 'jpg', 'jpeg'): ext = 'jpg' id = old_cover = None if 'cover' in self.oeb.guide: old_cover = self.oeb.guide['cover'] if prefer_metadata_cover and old_cover is not None: cdata = b'' if cdata: self.oeb.guide.remove('cover') self.oeb.guide.remove('titlepage') elif self.oeb.plumber_output_format in {'mobi', 'azw3'} and old_cover is not None: # The amazon formats dont support html cover pages, so remove them # even if no cover was specified. self.oeb.guide.remove('titlepage') do_remove_old_cover = False if old_cover is not None: if old_cover.href in self.oeb.manifest.hrefs: item = self.oeb.manifest.hrefs[old_cover.href] if not cdata: return item.id do_remove_old_cover = True elif not cdata: id = self.oeb.manifest.generate(id='cover')[0] self.oeb.manifest.add(id, old_cover.href, 'image/jpeg') return id new_cover_item = None if cdata: id, href = self.oeb.manifest.generate('cover', 'cover.'+ext) new_cover_item = self.oeb.manifest.add(id, href, guess_type('cover.'+ext)[0], data=cdata) self.oeb.guide.add('cover', 'Cover', href) if do_remove_old_cover: self.remove_old_cover(item, new_cover_item.href) return id def remove_old_cover(self, cover_item, new_cover_href=None): from lxml import etree from calibre.ebooks.oeb.base import XLINK, XPath self.oeb.manifest.remove(cover_item) # Remove any references to the cover in the HTML affected_items = set() xp = XPath('//h:img[@src]|//svg:image[@xl:href]') for i, item in enumerate(self.oeb.spine): try: images = xp(item.data) except Exception: images = () removed = False for img in images: href = img.get('src') or img.get(XLINK('href')) try: href = item.abshref(href) except Exception: continue # Invalid URL, ignore if href == cover_item.href: if new_cover_href is not None: replacement_href = item.relhref(new_cover_href) attr = 'src' if img.tag.endswith('img') else XLINK('href') img.set(attr, replacement_href) else: p = img.getparent() if p.tag.endswith('}svg'): p.getparent().remove(p) else: p.remove(img) removed = True if removed: affected_items.add(item) # Check if the resulting HTML has no content, if so remove it for item in affected_items: body = XPath('//h:body')(item.data) if body: text = etree.tostring(body[0], method='text', encoding='unicode') else: text = '' text = re.sub(r'\s+', '', text) if not text and not XPath('//h:img|//svg:svg')(item.data): self.log('Removing %s as it is a wrapper around' ' the cover image'%item.href) self.oeb.spine.remove(item) self.oeb.manifest.remove(item) self.oeb.guide.remove_by_href(item.href)
8,541
Python
.py
204
30.779412
101
0.55426
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,427
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/__init__.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en'
146
Python
.py
4
35
58
0.678571
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,428
linearize_tables.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/linearize_tables.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' from calibre.ebooks.oeb.base import OEB_DOCS, XHTML, XPath class LinearizeTables: def linearize(self, root): for x in XPath('//h:table|//h:td|//h:tr|//h:th|//h:caption|' '//h:tbody|//h:tfoot|//h:thead|//h:colgroup|//h:col')(root): x.tag = XHTML('div') for attr in ('style', 'font', 'valign', 'colspan', 'width', 'height', 'rowspan', 'summary', 'align', 'cellspacing', 'cellpadding', 'frames', 'rules', 'border'): if attr in x.attrib: del x.attrib[attr] def __call__(self, oeb, context): for x in oeb.manifest.items: if x.media_type in OEB_DOCS: self.linearize(x.data)
947
Python
.py
21
32.904762
76
0.515778
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,429
data_url.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/data_url.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>' import re from calibre.ebooks.oeb.base import XPath, urlunquote from polyglot.builtins import as_bytes class DataURL: def __call__(self, oeb, opts): from calibre.utils.imghdr import what self.log = oeb.log attr_path = XPath('//h:img[@src]') for item in oeb.spine: root = item.data if not hasattr(root, 'xpath'): continue for img in attr_path(root): raw = img.get('src', '') if not raw.startswith('data:'): continue header, data = raw.partition(',')[0::2] if not header.startswith('data:image/') or not data: continue if ';base64' in header: data = re.sub(r'\s+', '', data) from polyglot.binary import from_base64_bytes try: data = from_base64_bytes(data) except Exception: self.log.error('Found invalid base64 encoded data URI, ignoring it') continue else: data = urlunquote(data) data = as_bytes(data) fmt = what(None, data) if not fmt: self.log.warn('Image encoded as data URL has unknown format, ignoring') continue img.set('src', item.relhref(self.convert_image_data_uri(data, fmt, oeb))) def convert_image_data_uri(self, data, fmt, oeb): self.log('Found image encoded as data URI converting it to normal image') from calibre import guess_type item_id, item_href = oeb.manifest.generate('data-url-image', 'data-url-image.' + fmt) oeb.manifest.add(item_id, item_href, guess_type(item_href)[0], data=data) return item_href
1,984
Python
.py
44
31.545455
93
0.538302
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,430
jacket.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/jacket.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os import re import sys from contextlib import suppress from string import Formatter from xml.sax.saxutils import escape from calibre import guess_type, prepare_string_for_xml from calibre.constants import iswindows from calibre.ebooks.chardet import strip_encoding_declarations from calibre.ebooks.metadata import fmt_sidx, rating_to_stars from calibre.ebooks.metadata.sources.identify import urls_from_identifiers from calibre.ebooks.oeb.base import XHTML, XHTML_NS, XPath, urldefrag, urlnormalize, xml2text from calibre.library.comments import comments_to_html, markdown from calibre.utils.config import tweaks from calibre.utils.date import as_local_time, format_date, is_date_undefined from calibre.utils.icu import sort_key from calibre.utils.localization import ngettext from calibre.utils.resources import get_path as P JACKET_XPATH = '//h:meta[@name="calibre-content" and @content="jacket"]' class SafeFormatter(Formatter): def get_value(self, *args, **kwargs): try: return Formatter.get_value(self, *args, **kwargs) except KeyError: return '' class Base: def remove_images(self, item, limit=1): path = XPath('//h:img[@src]') removed = 0 for img in path(item.data): if removed >= limit: break href = item.abshref(img.get('src')) image = self.oeb.manifest.hrefs.get(href) if image is None: href = urlnormalize(href) image = self.oeb.manifest.hrefs.get(href) if image is not None: self.oeb.manifest.remove(image) self.oeb.guide.remove_by_href(href) img.getparent().remove(img) removed += 1 return removed class RemoveFirstImage(Base): def remove_first_image(self): deleted_item = None for item in self.oeb.spine: if XPath(JACKET_XPATH)(item.data): continue removed = self.remove_images(item) if removed > 0: self.log('Removed first image') body = XPath('//h:body')(item.data) if body: raw = xml2text(body[0]).strip() imgs = XPath('//h:img|//svg:svg')(item.data) if not raw and not imgs: self.log('Removing %s as it has no content'%item.href) self.oeb.manifest.remove(item) deleted_item = item break else: self.log.warn('Could not find first image to remove') if deleted_item is not None: for item in list(self.oeb.toc): href = urldefrag(item.href)[0] if href == deleted_item.href: self.oeb.toc.remove(item) self.oeb.guide.remove_by_href(deleted_item.href) def __call__(self, oeb, opts, metadata): ''' Add metadata in jacket.xhtml if specified in opts If not specified, remove previous jacket instance ''' self.oeb, self.opts, self.log = oeb, opts, oeb.log if opts.remove_first_image: self.remove_first_image() class Jacket(Base): ''' Book jacket manipulation. Remove first image and insert comments at start of book. ''' def insert_metadata(self, mi): self.log('Inserting metadata into book...') try: tags = list(map(str, self.oeb.metadata.subject)) except Exception: tags = [] try: comments = str(self.oeb.metadata.description[0]) except: comments = '' try: title = str(self.oeb.metadata.title[0]) except: title = _('Unknown') try: authors = list(map(str, self.oeb.metadata.creator)) except: authors = [_('Unknown')] root = render_jacket(mi, self.opts.output_profile, alt_title=title, alt_tags=tags, alt_authors=authors, alt_comments=comments, rescale_fonts=True, smarten_punctuation=self.opts.smarten_punctuation) id, href = self.oeb.manifest.generate('calibre_jacket', 'jacket.xhtml') jacket = self.oeb.manifest.add(id, href, guess_type(href)[0], data=root) self.oeb.spine.insert(0, jacket, True) self.oeb.inserted_metadata_jacket = jacket for img, path in referenced_images(root): self.oeb.log('Embedding referenced image %s into jacket' % path) ext = path.rpartition('.')[-1].lower() item_id, href = self.oeb.manifest.generate('jacket_image', 'jacket_img.'+ext) with open(path, 'rb') as f: item = self.oeb.manifest.add(item_id, href, guess_type(href)[0], data=f.read()) item.unload_data_from_memory() img.set('src', jacket.relhref(item.href)) def remove_existing_jacket(self): for x in self.oeb.spine[:4]: if XPath(JACKET_XPATH)(x.data): self.remove_images(x, limit=sys.maxsize) self.oeb.manifest.remove(x) self.log('Removed existing jacket') break def __call__(self, oeb, opts, metadata): ''' Add metadata in jacket.xhtml if specified in opts If not specified, remove previous jacket instance ''' self.oeb, self.opts, self.log = oeb, opts, oeb.log self.remove_existing_jacket() if opts.insert_metadata: self.insert_metadata(metadata) # Render Jacket {{{ def get_rating(rating, rchar, e_rchar): ans = '' try: num = float(rating)/2 except: return ans num = max(0, num) num = min(num, 5) if num < 1: return ans ans = ("%s%s") % (rchar * int(num), e_rchar * (5 - int(num))) return ans class Series(str): def __new__(self, series, series_index): if series and series_index is not None: roman = _('{1} of <em>{0}</em>').format( escape(series), escape(fmt_sidx(series_index, use_roman=True))) combined = _('{1} of <em>{0}</em>').format( escape(series), escape(fmt_sidx(series_index, use_roman=False))) else: combined = roman = escape(series or '') s = str.__new__(self, combined) s.roman = roman s.name = escape(series or '') s.number = escape(fmt_sidx(series_index or 1.0, use_roman=False)) s.roman_number = escape(fmt_sidx(series_index or 1.0, use_roman=True)) return s class Timestamp: def __init__(self, dt, render_template): self.dt = as_local_time(dt) self.is_date_undefined = dt is None or is_date_undefined(dt) self.default_render = '' if self.is_date_undefined else escape(format_date(self.dt, render_template)) def __repr__(self): return self.default_render __str__ = __repr__ def __bool__(self): return bool(self.default_render) def __getattr__(self, template): with suppress(Exception): if not self.is_date_undefined: return escape(format_date(self.dt, template)) return '' class Tags(str): def __new__(self, tags, output_profile): tags = [escape(x) for x in tags or ()] t = str.__new__(self, ', '.join(tags)) t.alphabetical = ', '.join(sorted(tags, key=sort_key)) t.tags_list = tags return t def postprocess_jacket(root, output_profile, has_data): # Post-process the generated html to strip out empty header items def extract(tag): parent = tag.getparent() idx = parent.index(tag) parent.remove(tag) if tag.tail: if idx == 0: parent.text = (parent.text or '') + tag.tail else: if idx >= len(parent): idx = -1 parent[-1].tail = (parent[-1].tail or '') + tag.tail def extract_class(cls): for tag in root.xpath('//*[@class="_"]'.replace('_', cls)): extract(tag) for key in 'series rating tags'.split(): if not has_data[key]: extract_class('cbj_' + key) if not has_data['pubdate']: extract_class('cbj_pubdata') if output_profile.short_name != 'kindle': extract_class('cbj_kindle_banner_hr') class Attributes: def __getattr__(self, name): return 'none' class Identifiers: def __init__(self, idents): self.identifiers = idents or {} self.display = Attributes() for k in self.identifiers: setattr(self.display, k, 'initial') links = [] for x in urls_from_identifiers(self.identifiers): name, id_typ, id_val, url = (prepare_string_for_xml(e, True) for e in x) links.append(f'<a href="{url}" title="{id_typ}:{id_val}">{name}</a>') self.links = ', '.join(links) self.display.links = 'initial' if self.links else 'none' def __getattr__(self, name): return self.identifiers.get(name, '') def render_jacket(mi, output_profile, alt_title=_('Unknown'), alt_tags=[], alt_comments='', alt_publisher='', rescale_fonts=False, alt_authors=None, smarten_punctuation=False): css = P('jacket/stylesheet.css', data=True).decode('utf-8') template = P('jacket/template.xhtml', data=True).decode('utf-8') template = re.sub(r'<!--.*?-->', '', template, flags=re.DOTALL) css = re.sub(r'/\*.*?\*/', '', css, flags=re.DOTALL) try: title_str = alt_title if mi.is_null('title') else mi.title except: title_str = _('Unknown') title_str = escape(title_str) title = '<span class="title">%s</span>' % title_str series = Series(mi.series, mi.series_index) try: publisher = mi.publisher if not mi.is_null('publisher') else alt_publisher except: publisher = '' publisher = escape(publisher) pubdate = timestamp = None with suppress(Exception): if not is_date_undefined(mi.pubdate): pubdate = mi.pubdate with suppress(Exception): if not is_date_undefined(mi.timestamp): timestamp = mi.timestamp rating = get_rating(mi.rating, output_profile.ratings_char, output_profile.empty_ratings_char) tags = Tags((mi.tags if mi.tags else alt_tags), output_profile) comments = mi.comments if mi.comments else alt_comments comments = comments.strip() if comments: comments = comments_to_html(comments) orig = mi.authors if mi.is_null('authors'): mi.authors = list(alt_authors or (_('Unknown'),)) try: author = mi.format_authors() except: author = '' mi.authors = orig author = escape(author) has_data = {} def generate_html(comments): display = Attributes() args = dict(xmlns=XHTML_NS, title_str=title_str, identifiers=Identifiers(mi.identifiers), css=css, title=title, author=author, publisher=publisher, publisher_label=_('Publisher'), pubdate_label=_('Published'), pubdate=Timestamp(pubdate, tweaks['gui_pubdate_display_format']), series_label=ngettext('Series', 'Series', 1), series=series, rating_label=_('Rating'), rating=rating, tags_label=_('Tags'), tags=tags, timestamp=Timestamp(timestamp, tweaks['gui_timestamp_display_format']), timestamp_label=_('Date'), comments=comments, footer='', display=display, searchable_tags=' '.join(escape(t)+'ttt' for t in tags.tags_list), ) for key in mi.custom_field_keys(): m = mi.get_user_metadata(key, False) or {} try: display_name, val = mi.format_field_extended(key)[:2] dkey = key.replace('#', '_') dt = m.get('datatype') if dt == 'series': args[dkey] = Series(mi.get(key), mi.get(key + '_index')) elif dt == 'rating': args[dkey] = rating_to_stars(mi.get(key), m.get('display', {}).get('allow_half_stars', False)) elif dt == 'datetime': args[dkey] = Timestamp(mi.get(key), m.get('display', {}).get('date_format','dd MMM yyyy')) elif dt == 'comments': val = val or '' ctype = m.get('display', {}).get('interpret_as') or 'html' if ctype == 'long-text': val = '<pre style="white-space:pre-wrap">%s</pre>' % escape(val) elif ctype == 'short-text': val = '<span>%s</span>' % escape(val) elif ctype == 'markdown': val = markdown(val) else: val = comments_to_html(val) args[dkey] = val elif dt == 'composite': val = val or '' # if the column is marked as containing html, use it # unchanged. Otherwise treat it as a comment. if not m.get('display', {}).get('contains_html', False): val = comments_to_html(val) args[dkey] = val else: args[dkey] = escape(val) args[dkey+'_label'] = escape(display_name) setattr(display, dkey, 'none' if mi.is_null(key) else 'initial') except Exception: # if the val (custom column contents) is None, don't add to args pass if False: print("Custom column values available in jacket template:") for key in args.keys(): if key.startswith('_') and not key.endswith('_label'): print(" {}: {}".format('#' + key[1:], args[key])) # Used in the comment describing use of custom columns in templates # Don't change this unless you also change it in template.xhtml args['_genre_label'] = args.get('_genre_label', '{_genre_label}') args['_genre'] = args.get('_genre', '{_genre}') has_data['series'] = bool(series) has_data['tags'] = bool(tags) has_data['rating'] = bool(rating) has_data['pubdate'] = bool(pubdate) has_data['timestamp'] = bool(timestamp) has_data['publisher'] = bool(publisher) for k, v in has_data.items(): setattr(display, k, 'initial' if v else 'none') display.title = 'initial' if mi.identifiers: display.identifiers = 'initial' formatter = SafeFormatter() generated_html = formatter.format(template, **args) return strip_encoding_declarations(generated_html) from calibre.ebooks.oeb.polish.parsing import parse raw = generate_html(comments) if smarten_punctuation: from calibre.ebooks.conversion.preprocess import smarten_punctuation as sp raw = sp(raw) root = parse(raw, line_numbers=False, force_html5_parse=True) if rescale_fonts: # We ensure that the conversion pipeline will set the font sizes for # text in the jacket to the same size as the font sizes for the rest of # the text in the book. That means that as long as the jacket uses # relative font sizes (em or %), the post conversion font size will be # the same as for text in the main book. So text with size x em will # be rescaled to the same value in both the jacket and the main content. # # We cannot use data-calibre-rescale 100 on the body tag as that will just # give the body tag a font size of 1em, which is useless. for body in root.xpath('//*[local-name()="body"]'): fw = body.makeelement(XHTML('div')) fw.set('data-calibre-rescale', '100') for child in body: fw.append(child) body.append(fw) postprocess_jacket(root, output_profile, has_data) from calibre.ebooks.oeb.polish.pretty import pretty_html_tree pretty_html_tree(None, root) return root # }}} def linearize_jacket(oeb): for x in oeb.spine[:4]: if XPath(JACKET_XPATH)(x.data): for e in XPath('//h:table|//h:tr|//h:th')(x.data): e.tag = XHTML('div') for e in XPath('//h:td')(x.data): e.tag = XHTML('span') break def referenced_images(root): for img in XPath('//h:img[@src]')(root): src = img.get('src') if src.startswith('file://'): path = src[7:] if iswindows and path.startswith('/'): path = path[1:] if os.path.exists(path): yield img, path
17,034
Python
.py
391
33.214834
114
0.577835
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,431
filenames.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/filenames.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import posixpath from lxml import etree from calibre.ebooks.oeb.base import rewrite_links, urlnormalize from polyglot.urllib import urldefrag, urlparse class RenameFiles: # {{{ ''' Rename files and adjust all links pointing to them. Note that the spine and manifest are not touched by this transform. ''' def __init__(self, rename_map, renamed_items_map=None): self.rename_map = rename_map self.renamed_items_map = renamed_items_map def __call__(self, oeb, opts): import css_parser self.log = oeb.logger self.opts = opts self.oeb = oeb for item in oeb.manifest.items: self.current_item = item if etree.iselement(item.data): rewrite_links(self.current_item.data, self.url_replacer) elif hasattr(item.data, 'cssText'): css_parser.replaceUrls(item.data, self.url_replacer) if self.oeb.guide: for ref in self.oeb.guide.values(): href = urlnormalize(ref.href) href, frag = urldefrag(href) replacement = self.rename_map.get(href, None) if replacement is not None: nhref = replacement if frag: nhref += '#' + frag ref.href = nhref if self.oeb.toc: self.fix_toc_entry(self.oeb.toc) def fix_toc_entry(self, toc): if toc.href: href = urlnormalize(toc.href) href, frag = urldefrag(href) replacement = self.rename_map.get(href, None) if replacement is not None: nhref = replacement if frag: nhref = '#'.join((nhref, frag)) toc.href = nhref for x in toc: self.fix_toc_entry(x) def url_replacer(self, orig_url): url = urlnormalize(orig_url) parts = urlparse(url) if parts.scheme: # Only rewrite local URLs return orig_url path, frag = urldefrag(url) if self.renamed_items_map: orig_item = self.renamed_items_map.get(self.current_item.href, self.current_item) else: orig_item = self.current_item href = orig_item.abshref(path) replacement = self.current_item.relhref(self.rename_map.get(href, href)) if frag: replacement += '#' + frag return replacement # }}} class UniqueFilenames: # {{{ 'Ensure that every item in the manifest has a unique filename' def __call__(self, oeb, opts): self.log = oeb.logger self.opts = opts self.oeb = oeb self.seen_filenames = set() self.rename_map = {} for item in list(oeb.manifest.items): fname = posixpath.basename(item.href) if fname in self.seen_filenames: suffix = self.unique_suffix(fname) data = item.data base, ext = posixpath.splitext(item.href) nhref = base + suffix + ext nhref = oeb.manifest.generate(href=nhref)[1] spine_pos = item.spine_position oeb.manifest.remove(item) nitem = oeb.manifest.add(item.id, nhref, item.media_type, data=data, fallback=item.fallback) self.seen_filenames.add(posixpath.basename(nhref)) self.rename_map[item.href] = nhref if spine_pos is not None: oeb.spine.insert(spine_pos, nitem, item.linear) else: self.seen_filenames.add(fname) if self.rename_map: self.log('Found non-unique filenames, renaming to support broken' ' EPUB readers like FBReader, Aldiko and Stanza...') from pprint import pformat self.log.debug(pformat(self.rename_map)) renamer = RenameFiles(self.rename_map) renamer(oeb, opts) def unique_suffix(self, fname): base, ext = posixpath.splitext(fname) c = 0 while True: c += 1 suffix = '_u%d'%c candidate = base + suffix + ext if candidate not in self.seen_filenames: return suffix # }}} class FlatFilenames: # {{{ 'Ensure that every item in the manifest has a unique filename without subfolders.' def __call__(self, oeb, opts): self.log = oeb.logger self.opts = opts self.oeb = oeb self.rename_map = {} self.renamed_items_map = {} for item in list(oeb.manifest.items): # Flatten URL by removing directories. # Example: a/b/c/index.html -> a_b_c_index.html nhref = item.href.replace("/", "_") if item.href == nhref: # URL hasn't changed, skip item. continue data = item.data isp = item.spine_position nhref = oeb.manifest.generate(href=nhref)[1] if isp is not None: oeb.spine.remove(item) oeb.manifest.remove(item) nitem = oeb.manifest.add(item.id, nhref, item.media_type, data=data, fallback=item.fallback) self.rename_map[item.href] = nhref self.renamed_items_map[nhref] = item if isp is not None: oeb.spine.insert(isp, nitem, item.linear) if self.rename_map: self.log('Found non-flat filenames, renaming to support broken' ' EPUB readers like FBReader...') from pprint import pformat self.log.debug(pformat(self.rename_map)) self.log.debug(pformat(self.renamed_items_map)) renamer = RenameFiles(self.rename_map, self.renamed_items_map) renamer(oeb, opts) # }}}
6,077
Python
.py
147
29.537415
93
0.56442
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,432
guide.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/guide.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' class Clean: '''Clean up guide, leaving only known values ''' def __call__(self, oeb, opts): self.oeb, self.log, self.opts = oeb, oeb.log, opts if 'cover' not in self.oeb.guide: covers = [] for x in ('other.ms-coverimage-standard', 'coverimagestandard', 'other.ms-titleimage-standard', 'other.ms-titleimage', 'other.ms-coverimage', 'other.ms-thumbimage-standard', 'other.ms-thumbimage', 'thumbimagestandard'): if x in self.oeb.guide: href = self.oeb.guide[x].href try: item = self.oeb.manifest.hrefs[href] except KeyError: continue else: covers.append([self.oeb.guide[x], len(item.data)]) covers.sort(key=lambda x: x[1], reverse=True) if covers: ref = covers[0][0] if len(covers) > 1: self.log('Choosing %s:%s as the cover'%(ref.type, ref.href)) ref.type = 'cover' self.oeb.guide.refs['cover'] = ref if ('start' in self.oeb.guide and 'text' not in self.oeb.guide): # Prefer text to start as per the OPF 2.0 spec x = self.oeb.guide['start'] self.oeb.guide.add('text', x.title, x.href) self.oeb.guide.remove('start') for x in list(self.oeb.guide): if x.lower() not in { 'cover', 'titlepage', 'masthead', 'toc', 'title-page', 'copyright-page', 'text', 'index', 'glossary', 'acknowledgements', 'bibliography', 'colophon', 'copyright-page', 'dedication', 'epigraph', 'foreword', 'loi', 'lot', 'notes', 'preface'}: item = self.oeb.guide[x] if item.title and item.title.lower() == 'start': continue self.oeb.guide.remove(x)
2,198
Python
.py
45
33.688889
80
0.504198
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,433
rasterize.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/rasterize.py
''' SVG rasterization transform. ''' __license__ = 'GPL v3' __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>' import os import re from base64 import standard_b64encode from functools import lru_cache from lxml import etree from qt.core import QBuffer, QByteArray, QColor, QImage, QIODevice, QPainter, QSvgRenderer, Qt from calibre import guess_type from calibre.ebooks.oeb.base import PNG_MIME, SVG_MIME, XHTML, XLINK, urlnormalize, xml2str, xpath from calibre.ebooks.oeb.stylizer import Stylizer from calibre.utils.imghdr import what from polyglot.urllib import urldefrag IMAGE_TAGS = {XHTML('img'), XHTML('object')} KEEP_ATTRS = {'class', 'style', 'width', 'height', 'align'} def test_svg(): # {{{ TEST_PNG_DATA_URI='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAMAAABEpIrGAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAWJQTFRFAAAAAAAAAAAAAAAAAAAAAQEAAgIBAwIBBgQCBwUCCAYDCggECwkEDgsFDwwFEA0GHRcKHxkLIBkLIxwMJR0NJx8OKCAOKCAPKSAPMScSPTAWQTQXQzUYSjsaSjsbSzsbUD8dUUAdVEMeWkggW0ggW0ghW0khXUohYk4ja1Umb1gocVoocVopclspc1spdV0qd18reF8riW0xjXEzl3g2mns3nn04nn45n345oIA6ooE7o4I7pII7pIM7pYQ7p4U8qYY8rYo+s45Bxp1Hx55Hy6FJy6JJzaRJz6RKz6RLz6VL0qdL1KpM1apM1qtN16xN2KxN2K1O2a1O2q1O265P3K9P3bBQ3rBP37FP37FQ37JQ4rNR47VR5LVR7LxV7bxV7r1V7r5W8L9W8MBW8b9V8b9W8cBW8cBX8sBW8sBX8sFW8sFX88BX88FW88FX88FY88JX88JY9MFX9MJX9MJY9MNYSw0rOAAAAAR0Uk5T2+rr8giKtGMAAAFDSURBVDjLhdNFUwNBEIbhJWkkuLu7u5PgHtwWl0CGnW34aJLl/3OgUlRlGfKepqafmstUW1Yw8E9By6IMWVn/z7OsQOpYNrE0H4lEwuFwZHmyLnUb+AUzIiLMItDgrWIfKH3mnz4RA6PX/8Im8xuEgVfxxG33g+rVi9OT46OdPQ0kDgv8gCg3FMrLphkNyCD9BYiIqEErraP5ZrDGDrw2MoIhsPACGUH5g2gVqzWDKQ/gETKCZmHwbo4ZbHhJ1q1kBMMJCKbJCCof35V+qjCDOUCrMTKCFkc8vU5GENpW8NwmMxhVccYsGUHVvWKOFhlBySJicV6u7+7s6Ozq6anxgT44Lwy4jlKK4br96WDl09GA/gA4zp7gLh2MM3MS+EgCGl+iD9JB4cDZzbV9ZV/atn1+frvfaPhuX4HMq0cZsjKt/zfXXmDab9zjGwAAAABJRU5ErkJggg==' return f''' <svg xmlns="http://www.w3.org/2000/svg" width="64" height="64" viewBox="0 0 64 64"> <path d="M4.5 11H3v4h4v-1.5H4.5V11zM3 7h1.5V4.5H7V3H3v4zm10.5 6.5H11V15h4v-4h-1.5v2.5zM11 3v1.5h2.5V7H15V3h-4z"/> <image width="32" height="32" x="32" y="32" xlink:href="{TEST_PNG_DATA_URI}"/> </svg>'''.encode() # }}} class Unavailable(Exception): pass def rasterize_svg(data=None, sizes=(), width=0, height=0, print=None, fmt='PNG', as_qimage=False): if data is None: data = test_svg() svg = QSvgRenderer(QByteArray(data)) size = svg.defaultSize() if size.width() == 100 and size.height() == 100 and sizes: size.setWidth(int(sizes[0])) size.setHeight(int(sizes[1])) if width or height: size.scale(int(width), int(height), Qt.AspectRatioMode.KeepAspectRatio) if print is not None: print(f'Rasterizing SVG to {size.width()} x {size.height()}') image = QImage(size, QImage.Format.Format_ARGB32_Premultiplied) image.fill(QColor("white").rgb()) painter = QPainter(image) svg.render(painter) painter.end() if as_qimage: return image array = QByteArray() buffer = QBuffer(array) buffer.open(QIODevice.OpenModeFlag.WriteOnly) image.save(buffer, fmt) return array.data() @lru_cache(maxsize=128) def data_url(mime_type: str, data: bytes) -> str: return f'data:{mime_type};base64,' + standard_b64encode(data).decode('ascii') class SVGRasterizer: def __init__(self, base_css='', save_svg_originals=False): self.base_css = base_css self.save_svg_originals = save_svg_originals from calibre.gui2 import must_use_qt must_use_qt() @classmethod def config(cls, cfg): return cfg @classmethod def generate(cls, opts): return cls() def __call__(self, oeb, context): oeb.logger.info('Rasterizing SVG images...') self.stylizer_cache = {} self.oeb = oeb self.opts = context self.profile = context.dest self.images = {} self.svg_originals = {} self.scan_for_linked_resources_in_manifest() self.rasterize_spine() self.rasterize_cover() def rasterize_svg(self, elem, width=0, height=0, format='PNG'): view_box = elem.get('viewBox', elem.get('viewbox', None)) sizes = None logger = self.oeb.logger if view_box is not None: try: box = [float(x) for x in filter(None, re.split('[, ]', view_box))] sizes = [box[2]-box[0], box[3] - box[1]] except (TypeError, ValueError, IndexError): logger.warn('SVG image has invalid viewBox="%s", ignoring the viewBox' % view_box) else: for image in elem.xpath('descendant::*[local-name()="image" and ' '@height and contains(@height, "%")]'): logger.info('Found SVG image height in %, trying to convert...') try: h = float(image.get('height').replace('%', ''))/100. image.set('height', str(h*sizes[1])) except: logger.exception('Failed to convert percentage height:', image.get('height')) return rasterize_svg(xml2str(elem, with_tail=False), sizes=sizes, width=width, height=height, print=logger.info, fmt=format) def scan_for_linked_resources_in_manifest(self): for item in self.oeb.manifest.values(): if item.media_type == SVG_MIME and item.data is not None: self.scan_for_linked_resources_in_svg(item) def scan_for_linked_resources_in_svg(self, item, svg=None): if svg is None: svg = item.data hrefs = self.oeb.manifest.hrefs ha = XLINK('href') for elem in xpath(svg, '//svg:*[@xl:href]'): href = urlnormalize(elem.get(ha)) path = urldefrag(href)[0] if not path: continue abshref = item.abshref(path) linkee = hrefs.get(abshref) if linkee is None: continue data = linkee.bytes_representation ext = what(None, data) if not ext: continue mt = guess_type('file.'+ext)[0] if not mt or not mt.startswith('image/'): continue elem.set(ha, data_url(mt, data)) return svg def stylizer(self, item): ans = self.stylizer_cache.get(item, None) if ans is None: ans = self.stylizer_cache[item] = Stylizer(item.data, item.href, self.oeb, self.opts, self.profile, base_css=self.base_css) return ans def rasterize_spine(self): for item in self.oeb.spine: self.rasterize_item(item) def rasterize_item(self, item): html = item.data hrefs = self.oeb.manifest.hrefs for elem in xpath(html, '//h:img[@src]'): src = urlnormalize(elem.attrib['src']) image = hrefs.get(item.abshref(src), None) if image and image.media_type == SVG_MIME: style = self.stylizer(item).style(elem) self.rasterize_external(elem, style, item, image) for elem in xpath(html, '//h:object[@type="%s" and @data]' % SVG_MIME): data = urlnormalize(elem.attrib['data']) image = hrefs.get(item.abshref(data), None) if image and image.media_type == SVG_MIME: style = self.stylizer(item).style(elem) self.rasterize_external(elem, style, item, image) for elem in xpath(html, '//svg:svg'): style = self.stylizer(item).style(elem) self.rasterize_inline(elem, style, item) def rasterize_inline(self, elem, style, item): width = style['width'] height = style['height'] width = (width / 72) * self.profile.dpi height = (height / 72) * self.profile.dpi self.scan_for_linked_resources_in_svg(item, elem) data = self.rasterize_svg(elem, width, height) manifest = self.oeb.manifest href = os.path.splitext(item.href)[0] + '.png' id, href = manifest.generate(item.id, href) manifest.add(id, href, PNG_MIME, data=data) img = elem.makeelement(XHTML('img'), src=item.relhref(href)) if self.save_svg_originals: svg_bytes = etree.tostring(elem, encoding='utf-8', xml_declaration=True, pretty_print=True, with_tail=False) svg_id, svg_href = manifest.generate(item.id, 'inline.svg') manifest.add(svg_id, svg_href, SVG_MIME, data=svg_bytes) self.svg_originals[href] = svg_href img.tail = elem.tail elem.getparent().replace(elem, img) for prop in ('width', 'height'): if prop in elem.attrib: img.attrib[prop] = elem.attrib[prop] def rasterize_external(self, elem, style, item, svgitem): width = style['width'] height = style['height'] width = (width / 72) * self.profile.dpi height = (height / 72) * self.profile.dpi data = QByteArray(svgitem.bytes_representation) svg = QSvgRenderer(data) size = svg.defaultSize() size.scale(int(width), int(height), Qt.AspectRatioMode.KeepAspectRatio) key = (svgitem.href, size.width(), size.height()) if key in self.images: href = self.images[key] else: logger = self.oeb.logger logger.info('Rasterizing %r to %dx%d' % (svgitem.href, size.width(), size.height())) image = QImage(size, QImage.Format.Format_ARGB32_Premultiplied) image.fill(QColor("white").rgb()) painter = QPainter(image) svg.render(painter) painter.end() array = QByteArray() buffer = QBuffer(array) buffer.open(QIODevice.OpenModeFlag.WriteOnly) image.save(buffer, 'PNG') data = array.data() manifest = self.oeb.manifest href = os.path.splitext(svgitem.href)[0] + '.png' id, href = manifest.generate(svgitem.id, href) manifest.add(id, href, PNG_MIME, data=data) self.images[key] = href self.svg_originals[href] = svgitem.href elem.tag = XHTML('img') for attr in elem.attrib: if attr not in KEEP_ATTRS: del elem.attrib[attr] elem.attrib['src'] = item.relhref(href) if elem.text: elem.attrib['alt'] = elem.text elem.text = None for child in elem: elem.remove(child) def rasterize_cover(self): covers = self.oeb.metadata.cover if not covers: return if str(covers[0]) not in self.oeb.manifest.ids: self.oeb.logger.warn('Cover not in manifest, skipping.') self.oeb.metadata.clear('cover') return cover = self.oeb.manifest.ids[str(covers[0])] if not cover.media_type == SVG_MIME: return width = (self.profile.width / 72) * self.profile.dpi height = (self.profile.height / 72) * self.profile.dpi data = self.rasterize_svg(cover.data, width, height) href = os.path.splitext(cover.href)[0] + '.png' id, href = self.oeb.manifest.generate(cover.id, href) self.oeb.manifest.add(id, href, PNG_MIME, data=data) covers[0].value = id if __name__ == '__main__': open('/t/test-svg-rasterization.png', 'wb').write(rasterize_svg())
11,536
Python
.py
237
38.966245
1,114
0.633265
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,434
alt_text.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/alt_text.py
#!/usr/bin/env python # License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net> from io import BytesIO from PIL import Image from calibre.ebooks.oeb.base import SVG_MIME, urlnormalize, xpath from calibre.utils.img import read_alt_text def process_spine_item(item, hrefs, log): html = item.data for elem in xpath(html, '//h:img[@src]'): src = urlnormalize(elem.attrib['src']) image = hrefs.get(item.abshref(src), None) if image and image.media_type != SVG_MIME and not elem.attrib.get('alt'): data = image.bytes_representation try: with Image.open(BytesIO(data)) as im: alt = read_alt_text(im) except Exception as err: log.warn(f'Failed to read alt text from image {src} with error: {err}') else: if alt: elem.set('alt', alt) class AddAltText: def __call__(self, oeb, opts): oeb.logger.info('Add alt text to images...') hrefs = oeb.manifest.hrefs for item in oeb.spine: process_spine_item(item, hrefs, oeb.log)
1,141
Python
.py
27
33.222222
87
0.613575
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,435
subset.py
kovidgoyal_calibre/src/calibre/ebooks/oeb/transforms/subset.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os from collections import defaultdict from io import BytesIO from tinycss.fonts3 import parse_font_family from calibre.ebooks.oeb.base import css_text, urlnormalize from calibre.utils.fonts.subset import subset from polyglot.builtins import iteritems font_properties = ('font-family', 'src', 'font-weight', 'font-stretch', 'font-style', 'text-transform') def get_font_properties(rule, default=None): ''' Given a CSS rule, extract normalized font properties from it. Note that shorthand font property should already have been expanded by the CSS flattening code. ''' props = {} s = rule.style for q in font_properties: g = 'uri' if q == 'src' else 'value' try: val = s.getProperty(q).propertyValue[0] val = getattr(val, g) if q == 'font-family': val = parse_font_family(css_text(s.getProperty(q).propertyValue)) if val and val[0] == 'inherit': val = None except (IndexError, KeyError, AttributeError, TypeError, ValueError): val = None if q in {'src', 'font-family'} else default if q in {'font-weight', 'font-stretch', 'font-style'}: val = str(val).lower() if (val or val == 0) else val if val == 'inherit': val = default if q == 'font-weight': val = {'normal':'400', 'bold':'700'}.get(val, val) if val not in {'100', '200', '300', '400', '500', '600', '700', '800', '900', 'bolder', 'lighter'}: val = default if val == 'normal': val = '400' elif q == 'font-style': if val not in {'normal', 'italic', 'oblique'}: val = default elif q == 'font-stretch': if val not in {'normal', 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'}: val = default props[q] = val return props def find_font_face_rules(sheet, oeb): ''' Find all @font-face rules in the given sheet and extract the relevant info from them. sheet can be either a ManifestItem or a CSSStyleSheet. ''' ans = [] try: rules = sheet.data.cssRules except AttributeError: rules = sheet.cssRules for i, rule in enumerate(rules): if rule.type != rule.FONT_FACE_RULE: continue props = get_font_properties(rule, default='normal') if not props['font-family'] or not props['src']: continue try: path = sheet.abshref(props['src']) except AttributeError: path = props['src'] ff = oeb.manifest.hrefs.get(urlnormalize(path), None) if not ff: continue props['item'] = ff if props['font-weight'] in {'bolder', 'lighter'}: props['font-weight'] = '400' props['weight'] = int(props['font-weight']) props['rule'] = rule props['chars'] = set() ans.append(props) return ans def elem_style(style_rules, cls, inherited_style): ''' Find the effective style for the given element. ''' classes = cls.split() style = inherited_style.copy() for cls in classes: style.update(style_rules.get(cls, {})) wt = style.get('font-weight', None) pwt = inherited_style.get('font-weight', '400') if wt == 'bolder': style['font-weight'] = { '100':'400', '200':'400', '300':'400', '400':'700', '500':'700', }.get(pwt, '900') elif wt == 'lighter': style['font-weight'] = { '600':'400', '700':'400', '800':'700', '900':'700'}.get(pwt, '100') return style class SubsetFonts: ''' Subset all embedded fonts. Must be run after CSS flattening, as it requires CSS normalization and flattening to work. ''' def __call__(self, oeb, log, opts): self.oeb, self.log, self.opts = oeb, log, opts self.find_embedded_fonts() if not self.embedded_fonts: self.log.debug('No embedded fonts found') return self.find_style_rules() self.find_font_usage() totals = [0, 0] def remove(font): totals[1] += len(font['item'].data) self.oeb.manifest.remove(font['item']) font['rule'].parentStyleSheet.deleteRule(font['rule']) fonts = {} for font in self.embedded_fonts: item, chars = font['item'], font['chars'] if item.href in fonts: fonts[item.href]['chars'] |= chars else: fonts[item.href] = font for font in fonts.values(): if not font['chars']: self.log('The font %s is unused. Removing it.'%font['src']) remove(font) continue old_raw = font['item'].data output = BytesIO() font_type = os.path.splitext(font['item'].href)[1][1:].lower() try: subset(BytesIO(old_raw), output, font_type, font['chars']) except Exception as e: self.log.warn('The font %s is unsupported for subsetting. %s'%(font['src'], e)) sz = len(font['item'].data) totals[0] += sz totals[1] += sz else: font['item'].data = output.getvalue() nlen = len(font['item'].data) olen = len(old_raw) self.log('Decreased the font %s to %.1f%% of its original size'% (font['src'], nlen/olen *100)) totals[0] += nlen totals[1] += olen font['item'].unload_data_from_memory() if totals[0]: self.log('Reduced total font size to %.1f%% of original'% (totals[0]/totals[1] * 100)) def find_embedded_fonts(self): ''' Find all @font-face rules and extract the relevant info from them. ''' self.embedded_fonts = [] for item in self.oeb.manifest: try: if not hasattr(item.data, 'cssRules'): continue except FileNotFoundError: continue self.embedded_fonts.extend(find_font_face_rules(item, self.oeb)) def find_style_rules(self): ''' Extract all font related style information from all stylesheets into a dict mapping classes to font properties specified by that class. All the heavy lifting has already been done by the CSS flattening code. ''' rules = defaultdict(dict) for item in self.oeb.manifest: if not hasattr(item.data, 'cssRules'): continue for i, rule in enumerate(item.data.cssRules): if rule.type != rule.STYLE_RULE: continue props = {k:v for k,v in iteritems(get_font_properties(rule)) if v} if not props: continue for sel in rule.selectorList: sel = sel.selectorText if sel and sel.startswith('.'): # We dont care about pseudo-selectors as the worst that # can happen is some extra characters will remain in # the font sel = sel.partition(':')[0] rules[sel[1:]].update(props) self.style_rules = dict(rules) def find_font_usage(self): for item in self.oeb.manifest: if not hasattr(item.data, 'xpath'): continue for body in item.data.xpath('//*[local-name()="body"]'): base = {'font-family':['serif'], 'font-weight': '400', 'font-style':'normal', 'font-stretch':'normal'} self.find_usage_in(body, base) def used_font(self, style): ''' Given a style find the embedded font that matches it. Returns None if no match is found (can happen if no family matches). ''' ff = style.get('font-family', []) lnames = {str(x).lower() for x in ff} matching_set = [] # Filter on font-family for ef in self.embedded_fonts: flnames = {x.lower() for x in ef.get('font-family', [])} if not lnames.intersection(flnames): continue matching_set.append(ef) if not matching_set: return None # Filter on font-stretch widths = {x:i for i, x in enumerate(('ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded' ))} width = widths[style.get('font-stretch', 'normal')] for f in matching_set: f['width'] = widths[style.get('font-stretch', 'normal')] min_dist = min(abs(width-f['width']) for f in matching_set) nearest = [f for f in matching_set if abs(width-f['width']) == min_dist] if width <= 4: lmatches = [f for f in nearest if f['width'] <= width] else: lmatches = [f for f in nearest if f['width'] >= width] matching_set = (lmatches or nearest) # Filter on font-style fs = style.get('font-style', 'normal') order = { 'oblique':['oblique', 'italic', 'normal'], 'normal':['normal', 'oblique', 'italic'] }.get(fs, ['italic', 'oblique', 'normal']) for q in order: matches = [f for f in matching_set if f.get('font-style', 'normal') == q] if matches: matching_set = matches break # Filter on font weight fw = int(style.get('font-weight', '400')) if fw == 400: q = [400, 500, 300, 200, 100, 600, 700, 800, 900] elif fw == 500: q = [500, 400, 300, 200, 100, 600, 700, 800, 900] elif fw < 400: q = [fw] + list(range(fw-100, -100, -100)) + list(range(fw+100, 100, 1000)) else: q = [fw] + list(range(fw+100, 100, 1000)) + list(range(fw-100, -100, -100)) for wt in q: matches = [f for f in matching_set if f['weight'] == wt] if matches: return matches[0] def find_chars(self, elem, style): ans = set() transform = lambda x: x # noqa tt = style.get('text-transform') if tt: if tt in ('uppercase', 'capitalize'): transform = str.upper elif tt == 'lowercase': transform = str.lower if elem.text: ans |= set(transform(elem.text)) for child in elem: if child.tail: ans |= set(transform(child.tail)) return ans def find_usage_in(self, elem, inherited_style): style = elem_style(self.style_rules, elem.get('class', '') or '', inherited_style) for child in elem: self.find_usage_in(child, style) font = self.used_font(style) if font: chars = self.find_chars(elem, style) if chars: font['chars'] |= chars
11,736
Python
.py
287
29.425087
103
0.529494
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,436
input.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/input.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import operator import textwrap from copy import copy, deepcopy from lxml import etree from calibre import guess_type from polyglot.builtins import as_bytes class Canvas(etree.XSLTExtension): def __init__(self, doc, styles, text_block, log): self.doc = doc self.styles = styles self.text_block = text_block self.log = log self.processed = set() def execute(self, context, self_node, input_node, output_parent): cid = input_node.get('objid', None) if cid is None or cid in self.processed: return self.processed.add(cid) input_node = self.doc.xpath('//Canvas[@objid="%s"]'%cid)[0] objects = list(self.get_objects(input_node)) if len(objects) == 1 and objects[0][0].tag == 'ImageBlock': self.image_page(input_node, objects[0][0], output_parent) else: canvases = [input_node] for x in input_node.itersiblings(): if x.tag == 'Canvas': oid = x.get('objid', None) if oid is not None: canvases.append(x) self.processed.add(oid) else: break table = etree.Element('table') table.text = '\n\t' for canvas in canvases: oid = canvas.get('objid') tr = table.makeelement('tr') tr.set('id', oid) tr.tail = '\n\t' table.append(tr) for obj, x, y in self.get_objects(canvas): if obj.tag != 'TextBlock': self.log.warn(obj.tag, 'elements in Canvas not supported') continue td = table.makeelement('td') self.text_block.render_block(obj, td) tr.append(td) output_parent.append(table) def image_page(self, input_node, block, output_parent): div = etree.Element('div') div.set('id', input_node.get('objid', 'scuzzy')) div.set('class', 'image_page') width = self.styles.to_num(block.get("xsize", None)) height = self.styles.to_num(block.get("ysize", None)) img = div.makeelement('img') if width is not None: img.set('width', str(int(width))) if height is not None: img.set('height', str(int(height))) ref = block.get('refstream', None) if ref is not None: imstr = self.doc.xpath('//ImageStream[@objid="%s"]'%ref) if imstr: src = imstr[0].get('file', None) if src: img.set('src', src) div.append(img) output_parent.append(div) def get_objects(self, node): for x in node.xpath('descendant::PutObj[@refobj and @x1 and @y1]'): objs = node.xpath('//*[@objid="%s"]'%x.get('refobj')) x, y = map(self.styles.to_num, (x.get('x1'), x.get('y1'))) if objs and x is not None and y is not None: yield objs[0], int(x), int(y) class MediaType(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): name = input_node.get('file', None) typ = guess_type(name)[0] if not typ: typ = 'application/octet-stream' output_parent.text = typ class ImageBlock(etree.XSLTExtension): def __init__(self, canvas): etree.XSLTExtension.__init__(self) self.canvas = canvas def execute(self, context, self_node, input_node, output_parent): self.canvas.image_page(input_node, input_node, output_parent) class RuledLine(etree.XSLTExtension): def execute(self, context, self_node, input_node, output_parent): hr = etree.Element('hr') output_parent.append(hr) class TextBlock(etree.XSLTExtension): def __init__(self, styles, char_button_map, plot_map, log): etree.XSLTExtension.__init__(self) self.styles = styles self.log = log self.char_button_map = char_button_map self.plot_map = plot_map def execute(self, context, self_node, input_node, output_parent): input_node = deepcopy(input_node) div = etree.Element('div') self.render_block(input_node, div) output_parent.append(div) def render_block(self, node, root): ts = node.get('textstyle', None) classes = [] bs = node.get('blockstyle') if bs in self.styles.block_style_map: classes.append('bs%d'%self.styles.block_style_map[bs]) if ts in self.styles.text_style_map: classes.append('ts%d'%self.styles.text_style_map[ts]) if classes: root.set('class', ' '.join(classes)) objid = node.get('objid', None) if objid: root.set('id', objid) root.text = node.text self.root = root self.parent = root self.add_text_to = (self.parent, 'text') self.fix_deep_nesting(node) for child in node: self.process_child(child) def fix_deep_nesting(self, node): deepest = 1 def depth(node): parent = node.getparent() ans = 1 while parent is not None: ans += 1 parent = parent.getparent() return ans for span in node.xpath('descendant::Span'): d = depth(span) if d > deepest: deepest = d if d > 500: break if deepest < 500: return self.log.warn('Found deeply nested spans. Flattening.') # with open('/t/before.xml', 'wb') as f: # f.write(etree.tostring(node, method='xml')) spans = [(depth(span), span) for span in node.xpath('descendant::Span')] spans.sort(key=operator.itemgetter(0), reverse=True) for depth, span in spans: if depth < 3: continue p = span.getparent() gp = p.getparent() idx = p.index(span) pidx = gp.index(p) children = list(p)[idx:] t = children[-1].tail t = t if t else '' children[-1].tail = t + (p.tail if p.tail else '') p.tail = '' pattrib = dict(**p.attrib) if p.tag == 'Span' else {} for child in children: p.remove(child) if pattrib and child.tag == "Span": attrib = copy(pattrib) attrib.update(child.attrib) child.attrib.update(attrib) for child in reversed(children): gp.insert(pidx+1, child) # with open('/t/after.xml', 'wb') as f: # f.write(etree.tostring(node, method='xml')) def add_text(self, text): if text: if getattr(self.add_text_to[0], self.add_text_to[1]) is None: setattr(self.add_text_to[0], self.add_text_to[1], '') setattr(self.add_text_to[0], self.add_text_to[1], getattr(self.add_text_to[0], self.add_text_to[1])+ text) def process_container(self, child, tgt): idx = self.styles.get_text_styles(child) if idx is not None: tgt.set('class', 'ts%d'%idx) self.parent.append(tgt) orig_parent = self.parent self.parent = tgt self.add_text_to = (self.parent, 'text') self.add_text(child.text) for gchild in child: self.process_child(gchild) self.parent = orig_parent self.add_text_to = (tgt, 'tail') self.add_text(child.tail) def process_child(self, child): if child.tag == 'CR': if self.parent == self.root or self.parent.tag == 'p': self.parent = self.root.makeelement('p') self.root.append(self.parent) self.add_text_to = (self.parent, 'text') else: br = self.parent.makeelement('br') self.parent.append(br) self.add_text_to = (br, 'tail') self.add_text(child.tail) elif child.tag in ('P', 'Span', 'EmpLine', 'NoBR'): span = self.root.makeelement('span') if child.tag == 'EmpLine': td = 'underline' if child.get('emplineposition', 'before') == 'before' else 'overline' span.set('style', 'text-decoration: '+td) self.process_container(child, span) elif child.tag == 'Sup': sup = self.root.makeelement('sup') self.process_container(child, sup) elif child.tag == 'Sub': sub = self.root.makeelement('sub') self.process_container(child, sub) elif child.tag == 'Italic': sup = self.root.makeelement('i') self.process_container(child, sup) elif child.tag == 'CharButton': a = self.root.makeelement('a') oid = child.get('refobj', None) if oid in self.char_button_map: a.set('href', self.char_button_map[oid]) self.process_container(child, a) elif child.tag == 'Plot': xsize = self.styles.to_num(child.get('xsize', None), 166/720) ysize = self.styles.to_num(child.get('ysize', None), 166/720) img = self.root.makeelement('img') if xsize is not None: img.set('width', str(int(xsize))) if ysize is not None: img.set('height', str(int(ysize))) ro = child.get('refobj', None) if ro in self.plot_map: img.set('src', self.plot_map[ro]) self.parent.append(img) self.add_text_to = (img, 'tail') self.add_text(child.tail) else: self.log.warn('Unhandled Text element:', child.tag) class Styles(etree.XSLTExtension): def __init__(self): etree.XSLTExtension.__init__(self) self.text_styles, self.block_styles = [], [] self.text_style_map, self.block_style_map = {}, {} self.CSS = textwrap.dedent(''' .image_page { text-align:center } ''') def write(self, name='styles.css'): def join(style): ans = ['%s : %s;'%(k, v) for k, v in style.items()] if ans: ans[-1] = ans[-1][:-1] return '\n\t'.join(ans) with open(name, 'wb') as f: f.write(as_bytes(self.CSS)) for (w, sel) in [(self.text_styles, 'ts'), (self.block_styles, 'bs')]: for i, s in enumerate(w): if not s: continue rsel = '.%s%d'%(sel, i) s = join(s) f.write(as_bytes(rsel + ' {\n\t' + s + '\n}\n\n')) def execute(self, context, self_node, input_node, output_parent): if input_node.tag == 'TextStyle': idx = self.get_text_styles(input_node) if idx is not None: self.text_style_map[input_node.get('objid')] = idx else: idx = self.get_block_styles(input_node) self.block_style_map[input_node.get('objid')] = idx def px_to_pt(self, px): try: return px * 72/166 except: return None def color(self, val): try: val = int(val, 16) r, g, b, a = val & 0xFF, (val>>8)&0xFF, (val>>16)&0xFF, (val>>24)&0xFF if a == 255: return None if a == 0: return 'rgb(%d,%d,%d)'%(r,g,b) return 'rgba(%d,%d,%d,%f)'%(r,g,b,1.-a/255.) except: return None def get_block_styles(self, node): ans = {} sm = self.px_to_pt(node.get('sidemargin', None)) if sm is not None: ans['margin-left'] = ans['margin-right'] = '%fpt'%sm ts = self.px_to_pt(node.get('topskip', None)) if ts is not None: ans['margin-top'] = '%fpt'%ts fs = self.px_to_pt(node.get('footskip', None)) if fs is not None: ans['margin-bottom'] = '%fpt'%fs fw = self.px_to_pt(node.get('framewidth', None)) if fw is not None: ans['border-width'] = '%fpt'%fw ans['border-style'] = 'solid' fc = self.color(node.get('framecolor', None)) if fc is not None: ans['border-color'] = fc bc = self.color(node.get('bgcolor', None)) if bc is not None: ans['background-color'] = bc if ans not in self.block_styles: self.block_styles.append(ans) return self.block_styles.index(ans) def to_num(self, val, factor=1.): try: return float(val)*factor except: return None def get_text_styles(self, node): ans = {} fs = self.to_num(node.get('fontsize', None), 0.1) if fs is not None: ans['font-size'] = '%fpt'%fs fw = self.to_num(node.get('fontweight', None)) if fw is not None: ans['font-weight'] = ('bold' if fw >= 700 else 'normal') # fn = getattr(obj, 'fontfacename', None) # if fn is not None: # fn = cls.FONT_MAP[fn] # item('font-family: %s;'%fn) fg = self.color(node.get('textcolor', None)) if fg is not None: ans['color'] = fg bg = self.color(node.get('textbgcolor', None)) if bg is not None: ans['background-color'] = bg al = node.get('align', None) if al is not None: all = dict(head='left', center='center', foot='right') ans['text-align'] = all.get(al, 'left') # lh = self.to_num(node.get('linespace', None), 0.1) # if lh is not None: # ans['line-height'] = '%fpt'%lh pi = self.to_num(node.get('parindent', None), 0.1) if pi is not None: ans['text-indent'] = '%fpt'%pi if not ans: return None if ans not in self.text_styles: self.text_styles.append(ans) return self.text_styles.index(ans)
14,393
Python
.py
350
29.688571
102
0.528125
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,437
meta.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/meta.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' """ This module presents an easy to use interface for getting and setting meta information in LRF files. Just create an L{LRFMetaFile} object and use its properties to get and set meta information. For example: >>> lrf = LRFMetaFile("mybook.lrf") >>> print(lrf.title, lrf.author) >>> lrf.category = "History" """ import io import os import struct import sys import xml.dom.minidom as dom import zlib from functools import wraps from shutil import copyfileobj from calibre.ebooks.chardet import xml_to_unicode from calibre.ebooks.metadata import MetaInformation, string_to_authors from calibre.utils.cleantext import clean_xml_chars from polyglot.builtins import string_or_bytes BYTE = "<B" #: Unsigned char little endian encoded in 1 byte WORD = "<H" #: Unsigned short little endian encoded in 2 bytes DWORD = "<I" #: Unsigned integer little endian encoded in 4 bytes QWORD = "<Q" #: Unsigned long long little endian encoded in 8 bytes class field: """ A U{Descriptor<http://www.cafepy.com/article/python_attributes_and_methods/python_attributes_and_methods.html>}, that implements access to protocol packets in a human readable way. """ def __init__(self, start=16, fmt=DWORD): """ @param start: The byte at which this field is stored in the buffer @param fmt: The packing format for this field. See U{struct<http://docs.python.org/lib/module-struct.html>}. """ self._fmt, self._start = fmt, start def __get__(self, obj, typ=None): return obj.unpack(start=self._start, fmt=self._fmt)[0] def __set__(self, obj, val): obj.pack(val, start=self._start, fmt=self._fmt) def __repr__(self): typ = {DWORD: 'unsigned int', 'QWORD': 'unsigned long long', BYTE: 'unsigned char', WORD: 'unsigned short'}.get(self._fmt, '') return "An " + typ + " stored in " + \ str(struct.calcsize(self._fmt)) + \ " bytes starting at byte " + str(self._start) class versioned_field(field): def __init__(self, vfield, version, start=0, fmt=WORD): field.__init__(self, start=start, fmt=fmt) self.vfield, self.version = vfield, version def enabled(self, obj): return self.vfield.__get__(obj) > self.version def __get__(self, obj, typ=None): if self.enabled(obj): return field.__get__(self, obj, typ=typ) else: return None def __set__(self, obj, val): if not self.enabled(obj): raise LRFException("Trying to set disabled field") else: field.__set__(self, obj, val) class LRFException(Exception): pass class fixed_stringfield: """ A field storing a variable length string. """ def __init__(self, length=8, start=0): """ @param length: Size of this string @param start: The byte at which this field is stored in the buffer """ self._length = length self._start = start def __get__(self, obj, typ=None): length = str(self._length) return obj.unpack(start=self._start, fmt="<"+length+"s")[0] def __set__(self, obj, val): if not isinstance(val, string_or_bytes): val = str(val) if isinstance(val, str): val = val.encode('utf-8') if len(val) != self._length: raise LRFException("Trying to set fixed_stringfield with a " + "string of incorrect length") obj.pack(val, start=self._start, fmt="<"+str(len(val))+"s") def __repr__(self): return "A string of length " + str(self._length) + \ " starting at byte " + str(self._start) class xml_attr_field: def __init__(self, tag_name, attr, parent='BookInfo'): self.tag_name = tag_name self.parent = parent self.attr= attr def __get__(self, obj, typ=None): """ Return the data in this field or '' if the field is empty """ document = obj.info elems = document.getElementsByTagName(self.tag_name) if len(elems): elem = None for candidate in elems: if candidate.parentNode.nodeName == self.parent: elem = candidate if elem and elem.hasAttribute(self.attr): return elem.getAttribute(self.attr) return '' def __set__(self, obj, val): if val is None: val = "" document = obj.info elems = document.getElementsByTagName(self.tag_name) if len(elems): elem = None for candidate in elems: if candidate.parentNode.nodeName == self.parent: elem = candidate if elem: elem.setAttribute(self.attr, val) obj.info = document def __repr__(self): return "XML Attr Field: " + self.tag_name + " in " + self.parent def __str__(self): return self.tag_name+'.'+self.attr class xml_field: """ Descriptor that gets and sets XML based meta information from an LRF file. Works for simple XML fields of the form <tagname>data</tagname> """ def __init__(self, tag_name, parent="BookInfo"): """ @param tag_name: The XML tag whose data we operate on @param parent: The tagname of the parent element of C{tag_name} """ self.tag_name = tag_name self.parent = parent def __get__(self, obj, typ=None): """ Return the data in this field or '' if the field is empty """ document = obj.info elems = document.getElementsByTagName(self.tag_name) if len(elems): elem = None for candidate in elems: if candidate.parentNode.nodeName == self.parent: elem = candidate if elem: elem.normalize() if elem.hasChildNodes(): return elem.firstChild.data.strip() return '' def __set__(self, obj, val): if not val: val = '' document = obj.info def create_elem(): elem = document.createElement(self.tag_name) parent = document.getElementsByTagName(self.parent)[0] parent.appendChild(elem) return elem if not val: val = '' if not isinstance(val, str): val = val.decode('utf-8') elems = document.getElementsByTagName(self.tag_name) elem = None if len(elems): for candidate in elems: if candidate.parentNode.nodeName == self.parent: elem = candidate if not elem: elem = create_elem() else: elem.normalize() while elem.hasChildNodes(): elem.removeChild(elem.lastChild) else: elem = create_elem() elem.appendChild(document.createTextNode(val)) obj.info = document def __str__(self): return self.tag_name def __repr__(self): return "XML Field: " + self.tag_name + " in " + self.parent def insert_into_file(fileobj, data, start, end): """ Insert data into fileobj at position C{start}. This function inserts data into a file, overwriting all data between start and end. If end == start no data is overwritten. Do not use this function to append data to a file. @param fileobj: file like object @param data: data to be inserted into fileobj @param start: The position at which to start inserting data @param end: The position in fileobj of data that must not be overwritten @return: C{start + len(data) - end} """ buffer = io.BytesIO() fileobj.seek(end) copyfileobj(fileobj, buffer, -1) buffer.flush() buffer.seek(0) fileobj.seek(start) fileobj.write(data) fileobj.flush() fileobj.truncate() delta = fileobj.tell() - end # < 0 if len(data) < end-start copyfileobj(buffer, fileobj, -1) fileobj.flush() buffer.close() return delta def get_metadata(stream): """ Return basic meta-data about the LRF file in C{stream} as a L{MetaInformation} object. @param stream: A file like object or an instance of L{LRFMetaFile} """ lrf = stream if isinstance(stream, LRFMetaFile) else LRFMetaFile(stream) authors = string_to_authors(lrf.author) mi = MetaInformation(lrf.title.strip(), authors) mi.author = lrf.author.strip() mi.comments = lrf.free_text.strip() mi.category = lrf.category.strip()+', '+lrf.classification.strip() tags = [x.strip() for x in mi.category.split(',') if x.strip()] if tags: mi.tags = tags if mi.category.strip() == ',': mi.category = None mi.publisher = lrf.publisher.strip() mi.cover_data = lrf.get_cover() try: mi.title_sort = lrf.title_reading.strip() if not mi.title_sort: mi.title_sort = None except: pass try: mi.author_sort = lrf.author_reading.strip() if not mi.author_sort: mi.author_sort = None except: pass if not mi.title or 'unknown' in mi.title.lower(): mi.title = None if not mi.authors: mi.authors = None if not mi.author or 'unknown' in mi.author.lower(): mi.author = None if not mi.category or 'unknown' in mi.category.lower(): mi.category = None if not mi.publisher or 'unknown' in mi.publisher.lower() or \ 'some publisher' in mi.publisher.lower(): mi.publisher = None return mi class LRFMetaFile: """ Has properties to read and write all Meta information in a LRF file. """ #: The first 6 bytes of all valid LRF files LRF_HEADER = 'LRF'.encode('utf-16le') lrf_header = fixed_stringfield(length=6, start=0x0) version = field(fmt=WORD, start=0x8) xor_key = field(fmt=WORD, start=0xa) root_object_id = field(fmt=DWORD, start=0xc) number_of_objects = field(fmt=QWORD, start=0x10) object_index_offset = field(fmt=QWORD, start=0x18) binding = field(fmt=BYTE, start=0x24) dpi = field(fmt=WORD, start=0x26) width = field(fmt=WORD, start=0x2a) height = field(fmt=WORD, start=0x2c) color_depth = field(fmt=BYTE, start=0x2e) toc_object_id = field(fmt=DWORD, start=0x44) toc_object_offset = field(fmt=DWORD, start=0x48) compressed_info_size = field(fmt=WORD, start=0x4c) thumbnail_type = versioned_field(version, 800, fmt=WORD, start=0x4e) thumbnail_size = versioned_field(version, 800, fmt=DWORD, start=0x50) uncompressed_info_size = versioned_field(compressed_info_size, 0, fmt=DWORD, start=0x54) title = xml_field("Title", parent="BookInfo") title_reading = xml_attr_field("Title", 'reading', parent="BookInfo") author = xml_field("Author", parent="BookInfo") author_reading = xml_attr_field("Author", 'reading', parent="BookInfo") # 16 characters. First two chars should be FB for personal use ebooks. book_id = xml_field("BookID", parent="BookInfo") publisher = xml_field("Publisher", parent="BookInfo") label = xml_field("Label", parent="BookInfo") category = xml_field("Category", parent="BookInfo") classification = xml_field("Classification", parent="BookInfo") free_text = xml_field("FreeText", parent="BookInfo") # Should use ISO 639 language codes language = xml_field("Language", parent="DocInfo") creator = xml_field("Creator", parent="DocInfo") # Format is %Y-%m-%d creation_date = xml_field("CreationDate", parent="DocInfo") producer = xml_field("Producer", parent="DocInfo") page = xml_field("SumPage", parent="DocInfo") def safe(func): """ Decorator that ensures that function calls leave the pos in the underlying file unchanged """ @wraps(func) def restore_pos(*args, **kwargs): obj = args[0] pos = obj._file.tell() res = func(*args, **kwargs) obj._file.seek(0, 2) if obj._file.tell() >= pos: obj._file.seek(pos) return res return restore_pos def safe_property(func): """ Decorator that ensures that read or writing a property leaves the position in the underlying file unchanged """ def decorator(f): def restore_pos(*args, **kwargs): obj = args[0] pos = obj._file.tell() res = f(*args, **kwargs) obj._file.seek(0, 2) if obj._file.tell() >= pos: obj._file.seek(pos) return res return restore_pos locals_ = func() if 'fget' in locals_: locals_["fget"] = decorator(locals_["fget"]) if 'fset' in locals_: locals_["fset"] = decorator(locals_["fset"]) return property(**locals_) @safe_property def info(): doc = \ """ Document meta information as a minidom Document object. To set use a minidom document object. """ def fget(self): if self.compressed_info_size == 0: raise LRFException("This document has no meta info") size = self.compressed_info_size - 4 self._file.seek(self.info_start) try: src = zlib.decompress(self._file.read(size)) if len(src) != self.uncompressed_info_size: raise LRFException("Decompression of document meta info\ yielded unexpected results") src = xml_to_unicode(src, strip_encoding_pats=True, resolve_entities=True, assume_utf8=True)[0] return dom.parseString(clean_xml_chars(src)) except zlib.error: raise LRFException("Unable to decompress document meta information") def fset(self, document): info = document.toxml('utf-8') self.uncompressed_info_size = len(info) stream = zlib.compress(info) orig_size = self.compressed_info_size self.compressed_info_size = len(stream) + 4 delta = insert_into_file(self._file, stream, self.info_start, self.info_start + orig_size - 4) if self.toc_object_offset > 0: self.toc_object_offset += delta self.object_index_offset += delta self.update_object_offsets(delta) return {"fget":fget, "fset":fset, "doc":doc} @safe_property def thumbnail_pos(): doc = """ The position of the thumbnail in the LRF file """ def fget(self): return self.info_start + self.compressed_info_size-4 return {"fget":fget, "doc":doc} @classmethod def _detect_thumbnail_type(cls, slice): """ @param slice: The first 16 bytes of the thumbnail """ ttype = 0x14 # GIF if "PNG" in slice: ttype = 0x12 if "BM" in slice: ttype = 0x13 if "JFIF" in slice: ttype = 0x11 return ttype @safe_property def thumbnail(): doc = \ """ The thumbnail. Represented as a string. The string you would get from the file read function. """ def fget(self): size = self.thumbnail_size if size: self._file.seek(self.thumbnail_pos) return self._file.read(size) def fset(self, data): if self.version <= 800: raise LRFException("Cannot store thumbnails in LRF files \ of version <= 800") slice = data[0:16] orig_size = self.thumbnail_size self.thumbnail_size = len(data) delta = insert_into_file(self._file, data, self.thumbnail_pos, self.thumbnail_pos + orig_size) self.toc_object_offset += delta self.object_index_offset += delta self.thumbnail_type = self._detect_thumbnail_type(slice) self.update_object_offsets(delta) return {"fget":fget, "fset":fset, "doc":doc} def __init__(self, file): """ @param file: A file object opened in the r+b mode """ file.seek(0, 2) self.size = file.tell() self._file = file if self.lrf_header != LRFMetaFile.LRF_HEADER: raise LRFException(file.name + " has an invalid LRF header. Are you sure it is an LRF file?") # Byte at which the compressed meta information starts self.info_start = 0x58 if self.version > 800 else 0x53 @safe def update_object_offsets(self, delta): """ Run through the LRF Object index changing the offset by C{delta}. """ self._file.seek(self.object_index_offset) count = self.number_of_objects while count > 0: raw = self._file.read(8) new_offset = struct.unpack(DWORD, raw[4:8])[0] + delta if new_offset >= (2**8)**4 or new_offset < 0x4C: raise LRFException(_('Invalid LRF file. Could not set metadata.')) self._file.seek(-4, os.SEEK_CUR) self._file.write(struct.pack(DWORD, new_offset)) self._file.seek(8, os.SEEK_CUR) count -= 1 self._file.flush() @safe def unpack(self, fmt=DWORD, start=0): """ Return decoded data from file. @param fmt: See U{struct<http://docs.python.org/lib/module-struct.html>} @param start: Position in file from which to decode """ end = start + struct.calcsize(fmt) self._file.seek(start) ret = struct.unpack(fmt, self._file.read(end-start)) return ret @safe def pack(self, *args, **kwargs): """ Encode C{args} and write them to file. C{kwargs} must contain the keywords C{fmt} and C{start} @param args: The values to pack @param fmt: See U{struct<http://docs.python.org/lib/module-struct.html>} @param start: Position in file at which to write encoded data """ encoded = struct.pack(kwargs["fmt"], *args) self._file.seek(kwargs["start"]) self._file.write(encoded) self._file.flush() def thumbail_extension(self): """ Return the extension for the thumbnail image type as specified by L{self.thumbnail_type}. If the LRF file was created by buggy software, the extension maye be incorrect. See L{self.fix_thumbnail_type}. """ ext = "gif" ttype = self.thumbnail_type if ttype == 0x11: ext = "jpeg" elif ttype == 0x12: ext = "png" elif ttype == 0x13: ext = "bmp" return ext def fix_thumbnail_type(self): """ Attempt to guess the thumbnail image format and set L{self.thumbnail_type} accordingly. """ slice = self.thumbnail[0:16] self.thumbnail_type = self._detect_thumbnail_type(slice) def seek(self, *args): """ See L{file.seek} """ return self._file.seek(*args) def tell(self): """ See L{file.tell} """ return self._file.tell() def read(self): """ See L{file.read} """ return self._file.read() def write(self, val): """ See L{file.write} """ self._file.write(val) def _objects(self): self._file.seek(self.object_index_offset) c = self.number_of_objects while c > 0: c -= 1 raw = self._file.read(16) pos = self._file.tell() yield struct.unpack('<IIII', raw)[:3] self._file.seek(pos) def get_objects_by_type(self, type): from calibre.ebooks.lrf.tags import Tag objects = [] for id, offset, size in self._objects(): self._file.seek(offset) tag = Tag(self._file) if tag.id == 0xF500: obj_id, obj_type = struct.unpack("<IH", tag.contents) if obj_type == type: objects.append((obj_id, offset, size)) return objects def get_object_by_id(self, tid): from calibre.ebooks.lrf.tags import Tag for id, offset, size in self._objects(): self._file.seek(offset) tag = Tag(self._file) if tag.id == 0xF500: obj_id, obj_type = struct.unpack("<IH", tag.contents) if obj_id == tid: return obj_id, offset, size, obj_type return (False, False, False, False) @safe def get_cover(self): from calibre.ebooks.lrf.objects import get_object for id, offset, size in self.get_objects_by_type(0x0C): image = get_object(None, self._file, id, offset, size, self.xor_key) id, offset, size = self.get_object_by_id(image.refstream)[:3] image_stream = get_object(None, self._file, id, offset, size, self.xor_key) return image_stream.file.rpartition('.')[-1], image_stream.stream return None def option_parser(): from calibre.constants import __appname__, __version__ from calibre.utils.config import OptionParser parser = OptionParser(usage=_('''%prog [options] mybook.lrf Show/edit the metadata in an LRF file.\n\n'''), version=__appname__+' '+__version__, epilog='Created by Kovid Goyal') parser.add_option("-t", "--title", action="store", type="string", dest="title", help=_("Set the book title")) parser.add_option('--title-sort', action='store', type='string', default=None, dest='title_reading', help=_('Set sort key for the title')) parser.add_option("-a", "--author", action="store", type="string", dest="author", help=_("Set the author")) parser.add_option('--author-sort', action='store', type='string', default=None, dest='author_reading', help=_('Set sort key for the author')) parser.add_option("-c", "--category", action="store", type="string", dest="category", help=_("The category this book belongs" " to. E.g.: History")) parser.add_option("--thumbnail", action="store", type="string", dest="thumbnail", help=_("Path to a graphic that will be" " set as this files' thumbnail")) parser.add_option("--comment", action="store", type="string", dest="comment", help=_("Path to a TXT file containing the " "comment to be stored in the LRF file.")) parser.add_option("--get-thumbnail", action="store_true", dest="get_thumbnail", default=False, help=_("Extract thumbnail from LRF file")) parser.add_option('--publisher', default=None, help=_('Set the publisher')) parser.add_option('--classification', default=None, help=_('Set the book classification')) parser.add_option('--creator', default=None, help=_('Set the book creator')) parser.add_option('--producer', default=None, help=_('Set the book producer')) parser.add_option('--get-cover', action='store_true', default=False, help=_('Extract cover from LRF file. Note that the LRF format has no defined cover, so we use some heuristics to guess the cover.')) parser.add_option('--bookid', action='store', type='string', default=None, dest='book_id', help=_('Set book ID')) # The SumPage element specifies the number of "View"s (visible pages for the BookSetting element conditions) of the content. # Basically, the total pages per the page size, font size, etc. when the # LRF is first created. Since this will change as the book is reflowed, it # is probably not worth using. # parser.add_option("-p", "--page", action="store", type="string", \ # dest="page", help=_("Don't know what this is for")) return parser def set_metadata(stream, mi): lrf = LRFMetaFile(stream) if mi.title: lrf.title = mi.title if mi.authors: lrf.author = ', '.join(mi.authors) if mi.tags: lrf.category = mi.tags[0] if getattr(mi, 'category', False): lrf.category = mi.category if mi.comments: lrf.free_text = mi.comments if mi.author_sort: lrf.author_reading = mi.author_sort if mi.publisher: lrf.publisher = mi.publisher def main(args=sys.argv): parser = option_parser() options, args = parser.parse_args(args) if len(args) != 2: parser.print_help() print() print('No lrf file specified') return 1 lrf = LRFMetaFile(open(args[1], "r+b")) if options.title: lrf.title = options.title if options.title_reading is not None: lrf.title_reading = options.title_reading if options.author_reading is not None: lrf.author_reading = options.author_reading if options.author: lrf.author = options.author if options.publisher: lrf.publisher = options.publisher if options.classification: lrf.classification = options.classification if options.category: lrf.category = options.category if options.creator: lrf.creator = options.creator if options.producer: lrf.producer = options.producer if options.thumbnail: path = os.path.expanduser(os.path.expandvars(options.thumbnail)) with open(path, "rb") as f: lrf.thumbnail = f.read() if options.book_id is not None: lrf.book_id = options.book_id if options.comment: path = os.path.expanduser(os.path.expandvars(options.comment)) with open(path, 'rb') as f: lrf.free_text = f.read().decode('utf-8', 'replace') if options.get_thumbnail: t = lrf.thumbnail td = "None" if t and len(t) > 0: td = os.path.basename(args[1])+"_thumbnail."+lrf.thumbail_extension() with open(td, "wb") as f: f.write(t) fields = LRFMetaFile.__dict__.items() fields.sort() for f in fields: if "XML" in str(f): print(str(f[1]) + ":", lrf.__getattribute__(f[0]).encode('utf-8')) if options.get_thumbnail: print("Thumbnail:", td) if options.get_cover: try: ext, data = lrf.get_cover() except: # Fails on books created by LRFCreator 1.0 ext, data = None, None if data: cover = os.path.splitext(os.path.basename(args[1]))[0]+"_cover."+ext with open(cover, 'wb') as f: f.write(data) print('Cover:', cover) else: print('Could not find cover in the LRF file') if __name__ == '__main__': sys.exit(main())
27,568
Python
.py
650
33.187692
154
0.584048
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,438
tags.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/tags.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' '''''' import struct from calibre.ebooks.lrf import LRFParseError class Tag: tags = { 0x00 : (6, "*ObjectStart"), 0x01 : (0, "*ObjectEnd"), 0x02 : (4, "*ObjectInfoLink"), 0x03 : (4, "*Link"), 0x04 : (4, "*StreamSize"), 0x05 : (0, "*StreamStart"), 0x06 : (0, "*StreamEnd"), 0x07 : (4, None), 0x08 : (4, None), 0x09 : (4, None), 0x0A : (4, None), 0x0B : ("type_one", "*ContainedObjectsList"), 0x0D : (2, None), 0x0E : (2, None), 0x11 : (2, None), 0x12 : (2, None), 0x13 : (2, None), 0x14 : (2, None), 0x15 : (2, None), 0x16 : ("string", None), 0x17 : (4, None), 0x18 : (4, None), 0x19 : (2, None), 0x1A : (2, None), 0x1B : (2, None), 0x1C : (2, None), 0x1D : (2, None), 0x1E : (2, None), 0x21 : (2, None), 0x22 : (2, None), 0x23 : (2, None), 0x24 : (2, None), 0x25 : (2, None), 0x26 : (2, None), 0x27 : (2, None), 0x28 : (2, None), 0x29 : (6, None), 0x2A : (2, None), 0x2B : (2, None), 0x2C : (2, None), 0x2D : (4, None), 0x2E : (2, None), 0x31 : (2, None), 0x32 : (2, None), 0x33 : (2, None), 0x34 : (4, None), 0x35 : (2, None), 0x36 : (2, None), 0x37 : (4, None), 0x38 : (2, None), 0x39 : (2, None), 0x3A : (2, None), 0x3C : (2, None), 0x3D : (2, None), 0x3E : (2, None), 0x41 : (2, None), 0x42 : (2, None), 0x44 : (4, None), 0x45 : (4, None), 0x46 : (2, None), 0x47 : (2, None), 0x48 : (2, None), 0x49 : (8, None), 0x4A : (8, None), 0x4B : (4, None), 0x4C : (4, None), 0x4D : (0, None), 0x4E : (12, None), 0x51 : (2, None), 0x52 : (2, None), 0x53 : (4, None), 0x54 : (2, "*StreamFlags"), 0x55 : ("string", None), 0x56 : (2, None), 0x57 : (2, None), 0x58 : (2, None), 0x59 : ("string", None), 0x5A : ("string", None), 0x5B : (4, None), 0x5C : ("type_one", None), 0x5D : ("string", None), 0x5E : (2, None), 0x61 : (2, None), 0x62 : (0, None), 0x63 : (0, None), 0x64 : (0, None), 0x65 : (0, None), 0x66 : (0, None), 0x67 : (0, None), 0x68 : (0, None), 0x69 : (0, None), 0x6A : (0, None), 0x6B : (0, None), 0x6C : (8, None), 0x6D : (2, None), 0x6E : (0, None), 0x71 : (0, None), 0x72 : (0, None), 0x73 : (10, None), 0x75 : (2, None), 0x76 : (2, None), 0x77 : (2, None), 0x78 : ("tag_78", None), 0x79 : (2, None), 0x7A : (2, None), 0x7B : (4, None), 0x7C : (4, "*ParentPageTree"), 0x81 : (0, None), 0x82 : (0, None), 0xA1 : (4, None), 0xA2 : (0, None), 0xA5 : ("unknown", None), 0xA6 : (0, None), 0xA7 : (4, None), 0xA8 : (0, None), 0xA9 : (0, None), 0xAA : (0, None), 0xAB : (0, None), 0xAC : (0, None), 0xAD : (0, None), 0xAE : (0, None), 0xB1 : (0, None), 0xB2 : (0, None), 0xB3 : (0, None), 0xB4 : (0, None), 0xB5 : (0, None), 0xB6 : (0, None), 0xB7 : (0, None), 0xB8 : (0, None), 0xB9 : (0, None), 0xBA : (0, None), 0xBB : (0, None), 0xBC : (0, None), 0xBD : (0, None), 0xBE : (0, None), 0xC1 : (0, None), 0xC2 : (0, None), 0xC3 : (2, None), 0xC4 : (0, None), 0xC5 : (2, None), 0xC6 : (2, None), 0xC7 : (0, None), 0xC8 : (2, None), 0xC9 : (0, None), 0xCA : (2, None), 0xCB : ("unknown", None), 0xCC : (2, None), 0xD1 : (12, None), 0xD2 : (0, None), 0xD4 : (2, None), 0xD6 : (0, None), 0xD7 : (14, None), 0xD8 : (4, None), 0xD9 : (8, None), 0xDA : (2, None), 0xDB : (2, None), 0xDC : (2, None), 0xDD : (2, None), 0xF1 : (2, None), 0xF2 : (4, None), 0xF3 : (4, None), 0xF4 : (2, None), 0xF5 : (4, None), 0xF6 : (4, None), 0xF7 : (4, None), 0xF8 : (4, None), 0xF9 : (6, None), } name_map = {} for key in tags.keys(): temp = tags[key][1] if temp is not None: name_map[key] = temp def __init__(self, stream): self.offset = stream.tell() tag_id = struct.unpack("<BB", stream.read(2)) if tag_id[1] != 0xF5: raise LRFParseError("Bad tag ID %02X at %d"%(tag_id[1], self.offset)) if tag_id[0] not in self.__class__.tags: raise LRFParseError("Unknown tag ID: F5%02X" % tag_id[0]) self.id = 0xF500 + tag_id[0] size, self.name = self.__class__.tags[tag_id[0]] if isinstance(size, str): parser = getattr(self, size + '_parser') self.contents = parser(stream) else: self.contents = stream.read(size) def __str__(self): s = "Tag %04X " % self.id if self.name: s += self.name s += f" at {self.offset:08X}, contents: {repr(self.contents)}" return s @property def byte(self): if len(self.contents) != 1: raise LRFParseError("Bad parameter for tag ID: %04X" % self.id) return struct.unpack("<B", self.contents)[0] @property def word(self): if len(self.contents) != 2: raise LRFParseError("Bad parameter for tag ID: %04X" % self.id) return struct.unpack("<H", self.contents)[0] @property def sword(self): if len(self.contents) != 2: raise LRFParseError("Bad parameter for tag ID: %04X" % self.id) return struct.unpack("<h", self.contents)[0] @property def dword(self): if len(self.contents) != 4: raise LRFParseError("Bad parameter for tag ID: %04X" % self.id) return struct.unpack("<I", self.contents)[0] def dummy_parser(self, stream): raise LRFParseError("Unknown tag at %08X" % stream.tell()) @classmethod def string_parser(self, stream): size = struct.unpack("<H", stream.read(2))[0] return str(stream.read(size), "utf_16") def type_one_parser(self, stream): cnt = struct.unpack("<H", stream.read(2))[0] res = [] while cnt > 0: res.append(struct.unpack("<I", stream.read(4))[0]) cnt -= 1 return res def tag_78_parser(self, stream): pos = stream.tell() res = [] res.append(struct.unpack("<I", stream.read(4))[0]) tag = Tag(stream) if tag.id != 0xF516: raise LRFParseError("Bad tag 78 at %08X" % pos) res.append(tag.contents) res.append(struct.unpack("<H", stream.read(2))[0]) return res
8,699
Python
.py
243
21.460905
81
0.377177
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,439
lrfparser.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/lrfparser.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' '''''' import array import codecs import logging import os import re import sys from itertools import chain from calibre import setup_cli_handlers from calibre.ebooks.lrf.meta import LRFMetaFile from calibre.ebooks.lrf.objects import BookAttr, Font, PageTree, StyleObject, Text, TOCObject, get_object, ruby_tags from calibre.utils.config import OptionParser from calibre.utils.filenames import ascii_filename from polyglot.builtins import itervalues class LRFDocument(LRFMetaFile): class temp: pass def __init__(self, stream): LRFMetaFile.__init__(self, stream) self.scramble_key = self.xor_key self.page_trees = [] self.font_map = {} self.image_map = {} self.toc = '' self.keep_parsing = True def parse(self): self._parse_objects() self.metadata = LRFDocument.temp() for a in ('title', 'title_reading', 'author', 'author_reading', 'book_id', 'classification', 'free_text', 'publisher', 'label', 'category'): setattr(self.metadata, a, getattr(self, a)) self.doc_info = LRFDocument.temp() for a in ('thumbnail', 'language', 'creator', 'producer', 'page'): setattr(self.doc_info, a, getattr(self, a)) self.doc_info.thumbnail_extension = self.thumbail_extension() self.device_info = LRFDocument.temp() for a in ('dpi', 'width', 'height'): setattr(self.device_info, a, getattr(self, a)) def _parse_objects(self): self.objects = {} self._file.seek(self.object_index_offset) obj_array = array.array("I", self._file.read(4*4*self.number_of_objects)) if ord(array.array("i",[1]).tobytes()[0:1])==0: # big-endian obj_array.byteswap() for i in range(self.number_of_objects): if not self.keep_parsing: break objid, objoff, objsize = obj_array[i*4:i*4+3] self._parse_object(objid, objoff, objsize) for obj in self.objects.values(): if not self.keep_parsing: break if hasattr(obj, 'initialize'): obj.initialize() def _parse_object(self, objid, objoff, objsize): obj = get_object(self, self._file, objid, objoff, objsize, self.scramble_key) self.objects[objid] = obj if isinstance(obj, PageTree): self.page_trees.append(obj) elif isinstance(obj, TOCObject): self.toc = obj elif isinstance(obj, BookAttr): self.ruby_tags = {} for h in ruby_tags.values(): attr = h[0] if hasattr(obj, attr): self.ruby_tags[attr] = getattr(obj, attr) def __iter__(self): yield from self.page_trees def write_files(self): for obj in chain(itervalues(self.image_map), itervalues(self.font_map)): with open(obj.file, 'wb') as f: f.write(obj.stream) def to_xml(self, write_files=True): bookinfo = '<BookInformation>\n<Info version="1.1">\n<BookInfo>\n' bookinfo += '<Title reading="%s">%s</Title>\n'%(self.metadata.title_reading, self.metadata.title) bookinfo += '<Author reading="%s">%s</Author>\n'%(self.metadata.author_reading, self.metadata.author) bookinfo += '<BookID>%s</BookID>\n'%(self.metadata.book_id,) bookinfo += '<Publisher reading="">%s</Publisher>\n'%(self.metadata.publisher,) bookinfo += '<Label reading="">%s</Label>\n'%(self.metadata.label,) bookinfo += '<Category reading="">%s</Category>\n'%(self.metadata.category,) bookinfo += '<Classification reading="">%s</Classification>\n'%(self.metadata.classification,) bookinfo += '<FreeText reading="">%s</FreeText>\n</BookInfo>\n<DocInfo>\n'%(self.metadata.free_text,) th = self.doc_info.thumbnail if th: prefix = ascii_filename(self.metadata.title) bookinfo += '<CThumbnail file="%s" />\n'%(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension,) if write_files: with open(prefix+'_thumbnail.'+self.doc_info.thumbnail_extension, 'wb') as f: f.write(th) bookinfo += '<Language reading="">%s</Language>\n'%(self.doc_info.language,) bookinfo += '<Creator reading="">%s</Creator>\n'%(self.doc_info.creator,) bookinfo += '<Producer reading="">%s</Producer>\n'%(self.doc_info.producer,) bookinfo += '<SumPage>%s</SumPage>\n</DocInfo>\n</Info>\n%s</BookInformation>\n'%(self.doc_info.page,self.toc) pages = '' done_main = False pt_id = -1 for page_tree in self: if not done_main: done_main = True pages += '<Main>\n' close = '</Main>\n' pt_id = page_tree.id else: pages += '<PageTree objid="%d">\n'%(page_tree.id,) close = '</PageTree>\n' for page in page_tree: pages += str(page) pages += close traversed_objects = [int(i) for i in re.findall(r'objid="(\w+)"', pages)] + [pt_id] objects = '\n<Objects>\n' styles = '\n<Style>\n' for obj in self.objects: obj = self.objects[obj] if obj.id in traversed_objects: continue if isinstance(obj, (Font, Text, TOCObject)): continue if isinstance(obj, StyleObject): styles += str(obj) else: objects += str(obj) styles += '</Style>\n' objects += '</Objects>\n' if write_files: self.write_files() return '<BBeBXylog version="1.0">\n' + bookinfo + pages + styles + objects + '</BBeBXylog>' def option_parser(): parser = OptionParser(usage=_('%prog book.lrf\nConvert an LRF file into an LRS (XML UTF-8 encoded) file')) parser.add_option('--output', '-o', default=None, help=_('Output LRS file'), dest='out') parser.add_option('--dont-output-resources', default=True, action='store_false', help=_('Do not save embedded image and font files to disk'), dest='output_resources') parser.add_option('--verbose', default=False, action='store_true', dest='verbose', help=_('Be more verbose')) return parser def main(args=sys.argv, logger=None): parser = option_parser() opts, args = parser.parse_args(args) if logger is None: level = logging.DEBUG if opts.verbose else logging.INFO logger = logging.getLogger('lrf2lrs') setup_cli_handlers(logger, level) if len(args) != 2: parser.print_help() return 1 if opts.out is None: opts.out = os.path.join(os.path.dirname(args[1]), os.path.splitext(os.path.basename(args[1]))[0]+".lrs") logger.info(_('Parsing LRF...')) d = LRFDocument(open(args[1], 'rb')) d.parse() logger.info(_('Creating XML...')) with codecs.open(os.path.abspath(os.path.expanduser(opts.out)), 'wb', 'utf-8') as f: f.write('<?xml version="1.0" encoding="UTF-8"?>\n') f.write(d.to_xml(write_files=opts.output_resources)) logger.info(_('LRS written to ')+opts.out) return 0 if __name__ == '__main__': sys.exit(main())
7,418
Python
.py
160
36.93125
118
0.590966
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,440
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/__init__.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' """ This package contains logic to read and write LRF files. The LRF file format is documented at U{http://www.sven.de/librie/Librie/LrfFormat}. """ from calibre.ebooks import ConversionError from calibre.ebooks.lrf.fonts import FONT_FILE_MAP from calibre.ebooks.lrf.pylrs.pylrs import BlockStyle, Header, TextBlock, TextStyle from calibre.ebooks.lrf.pylrs.pylrs import Book as _Book __docformat__ = "epytext" class LRFParseError(Exception): pass class PRS500_PROFILE: screen_width = 600 screen_height = 775 dpi = 166 # Number of pixels to subtract from screen_height when calculating height of text area fudge = 0 font_size = 10 #: Default (in pt) parindent = 10 #: Default (in pt) line_space = 1.2 # : Default (in pt) header_font_size = 6 #: In pt header_height = 30 # : In px default_fonts = {'sans': "Swis721 BT Roman", 'mono': "Courier10 BT Roman", 'serif': "Dutch801 Rm BT Roman"} name = 'prs500' def find_custom_fonts(options, logger): from calibre.utils.fonts.scanner import font_scanner fonts = {'serif' : None, 'sans' : None, 'mono' : None} def family(cmd): return cmd.split(',')[-1].strip() if options.serif_family: f = family(options.serif_family) fonts['serif'] = font_scanner.legacy_fonts_for_family(f) if not fonts['serif']: logger.warn('Unable to find serif family %s'%f) if options.sans_family: f = family(options.sans_family) fonts['sans'] = font_scanner.legacy_fonts_for_family(f) if not fonts['sans']: logger.warn('Unable to find sans family %s'%f) if options.mono_family: f = family(options.mono_family) fonts['mono'] = font_scanner.legacy_fonts_for_family(f) if not fonts['mono']: logger.warn('Unable to find mono family %s'%f) return fonts def Book(options, logger, font_delta=0, header=None, profile=PRS500_PROFILE, **settings): from uuid import uuid4 ps = {} ps['topmargin'] = options.top_margin ps['evensidemargin'] = options.left_margin ps['oddsidemargin'] = options.left_margin ps['textwidth'] = profile.screen_width - (options.left_margin + options.right_margin) ps['textheight'] = profile.screen_height - (options.top_margin + options.bottom_margin) \ - profile.fudge if header: hdr = Header() hb = TextBlock(textStyle=TextStyle(align='foot', fontsize=int(profile.header_font_size*10)), blockStyle=BlockStyle(blockwidth=ps['textwidth'])) hb.append(header) hdr.PutObj(hb) ps['headheight'] = profile.header_height ps['headsep'] = options.header_separation ps['header'] = hdr ps['topmargin'] = 0 ps['textheight'] = profile.screen_height - (options.bottom_margin + ps['topmargin']) \ - ps['headheight'] - ps['headsep'] - profile.fudge fontsize = int(10*profile.font_size+font_delta*20) baselineskip = fontsize + 20 fonts = find_custom_fonts(options, logger) tsd = dict(fontsize=fontsize, parindent=int(10*profile.parindent), linespace=int(10*profile.line_space), baselineskip=baselineskip, wordspace=10*options.wordspace) if fonts['serif'] and 'normal' in fonts['serif']: tsd['fontfacename'] = fonts['serif']['normal'][1] book = _Book(textstyledefault=tsd, pagestyledefault=ps, blockstyledefault=dict(blockwidth=ps['textwidth']), bookid=uuid4().hex, **settings) for family in fonts.keys(): if fonts[family]: for font in fonts[family].values(): book.embed_font(*font) FONT_FILE_MAP[font[1]] = font[0] for family in ['serif', 'sans', 'mono']: if not fonts[family]: fonts[family] = {'normal' : (None, profile.default_fonts[family])} elif 'normal' not in fonts[family]: raise ConversionError('Could not find the normal version of the ' + family + ' font') return book, fonts
4,421
Python
.py
97
36.608247
99
0.60803
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,441
fonts.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/fonts.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' from PIL import ImageFont ''' Default fonts used in the PRS500 ''' LIBERATION_FONT_MAP = { 'Swis721 BT Roman' : 'LiberationSans-Regular', 'Dutch801 Rm BT Roman' : 'LiberationSerif-Regular', 'Courier10 BT Roman' : 'LiberationMono-Regular', } FONT_FILE_MAP = {} def get_font(name, size, encoding='unic'): ''' Get an ImageFont object by name. @param size: Font height in pixels. To convert from pts: sz in pixels = (dpi/72) * size in pts @param encoding: Font encoding to use. E.g. 'unic', 'symbol', 'ADOB', 'ADBE', 'aprm' @param manager: A dict that will store the PersistentTemporary ''' from calibre.utils.resources import get_path as P if name in LIBERATION_FONT_MAP: return ImageFont.truetype(P('fonts/liberation/%s.ttf' % LIBERATION_FONT_MAP[name]), size, encoding=encoding) elif name in FONT_FILE_MAP: return ImageFont.truetype(FONT_FILE_MAP[name], size, encoding=encoding)
1,095
Python
.py
25
37.84
116
0.659454
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,442
objects.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/objects.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' import array import collections import io import re import struct import zlib from calibre import prepare_string_for_xml from calibre.ebooks.html_entities import entity_to_unicode_in_python from calibre.ebooks.lrf import PRS500_PROFILE, LRFParseError from calibre.ebooks.lrf.tags import Tag ruby_tags = { 0xF575: ['rubyAlignAndAdjust', 'W'], 0xF576: ['rubyoverhang', 'W', {0: 'none', 1:'auto'}], 0xF577: ['empdotsposition', 'W', {1: 'before', 2:'after'}], 0xF578: ['','parse_empdots'], 0xF579: ['emplineposition', 'W', {1: 'before', 2:'after'}], 0xF57A: ['emplinetype', 'W', {0: 'none', 0x10: 'solid', 0x20: 'dashed', 0x30: 'double', 0x40: 'dotted'}] } class LRFObject: tag_map = { 0xF500: ['', ''], 0xF502: ['infoLink', 'D'], 0xF501: ['', ''], } @classmethod def descramble_buffer(cls, buf, l, xorKey): i = 0 a = array.array('B',buf) while l>0: a[i] ^= xorKey i+=1 l-=1 return a.tobytes() @classmethod def parse_empdots(self, tag, f): self.refEmpDotsFont, self.empDotsFontName, self.empDotsCode = tag.contents @staticmethod def tag_to_val(h, obj, tag, stream): val = None if h[1] == 'D': val = tag.dword elif h[1] == 'W': val = tag.word elif h[1] == 'w': val = tag.word if val > 0x8000: val -= 0x10000 elif h[1] == 'B': val = tag.byte elif h[1] == 'P': val = tag.contents elif h[1] != '': val = getattr(obj, h[1])(tag, stream) if len(h) > 2: val = h[2](val) if callable(h[2]) else h[2][val] return val def __init__(self, document, stream, id, scramble_key, boundary): self._scramble_key = scramble_key self._document = document self.id = id while stream.tell() < boundary: tag = Tag(stream) self.handle_tag(tag, stream) def parse_bg_image(self, tag, f): self.bg_image_mode, self.bg_image_id = struct.unpack("<HI", tag.contents) def handle_tag(self, tag, stream, tag_map=None): if tag_map is None: tag_map = self.__class__.tag_map if tag.id in tag_map: h = tag_map[tag.id] val = LRFObject.tag_to_val(h, self, tag, stream) if h[1] != '' and h[0] != '': setattr(self, h[0], val) else: raise LRFParseError(f"Unknown tag in {self.__class__.__name__}: {str(tag)}") def __iter__(self): yield from range(0) def __str__(self): return self.__class__.__name__ class LRFContentObject(LRFObject): tag_map = {} def __init__(self, byts, objects): self.stream = byts if hasattr(byts, 'read') else io.BytesIO(byts) length = self.stream_size() self.objects = objects self._contents = [] self.current = 0 self.in_container = True self.parse_stream(length) def parse_stream(self, length): while self.in_container and self.stream.tell() < length: tag = Tag(self.stream) self.handle_tag(tag) def stream_size(self): pos = self.stream.tell() self.stream.seek(0, 2) size = self.stream.tell() self.stream.seek(pos) return size def handle_tag(self, tag): if tag.id in self.tag_map: action = self.tag_map[tag.id] if isinstance(action, str): func, args = action, () else: func, args = action[0], (action[1],) getattr(self, func)(tag, *args) else: raise LRFParseError(f"Unknown tag in {self.__class__.__name__}: {str(tag)}") def __iter__(self): yield from self._contents class LRFStream(LRFObject): tag_map = { 0xF504: ['', 'read_stream_size'], 0xF554: ['stream_flags', 'W'], 0xF505: ['', 'read_stream'], 0xF506: ['', 'end_stream'], } tag_map.update(LRFObject.tag_map) def __init__(self, document, stream, id, scramble_key, boundary): self.stream = '' self.stream_size = 0 self.stream_read = False LRFObject.__init__(self, document, stream, id, scramble_key, boundary) def read_stream_size(self, tag, stream): self.stream_size = tag.dword def end_stream(self, tag, stream): self.stream_read = True def read_stream(self, tag, stream): if self.stream_read: raise LRFParseError('There can be only one stream per object') if not hasattr(self, 'stream_flags'): raise LRFParseError('Stream flags not initialized') self.stream = stream.read(self.stream_size) if self.stream_flags & 0x200 !=0: l = len(self.stream) key = self._scramble_key&0xFF if key != 0 and key <= 0xF0: key = l % key + 0xF else: key = 0 if l > 0x400 and (isinstance(self, ImageStream) or isinstance(self, Font) or isinstance(self, SoundStream)): l = 0x400 self.stream = self.descramble_buffer(self.stream, l, key) if self.stream_flags & 0x100 !=0: decomp_size = struct.unpack("<I", self.stream[:4])[0] self.stream = zlib.decompress(self.stream[4:]) if len(self.stream) != decomp_size: raise LRFParseError("Stream decompressed size is wrong!") if stream.read(2) != b'\x06\xF5': print("Warning: corrupted end-of-stream tag at %08X; skipping it"%(stream.tell()-2)) self.end_stream(None, None) class PageTree(LRFObject): tag_map = { 0xF55C: ['_contents', 'P'], } tag_map.update(LRFObject.tag_map) def __iter__(self): for id in getattr(self, '_contents', []): yield self._document.objects[id] class StyleObject: def _tags_to_xml(self): s = '' for h in self.tag_map.values(): attr = h[0] if hasattr(self, attr): s += '%s="%s" '%(attr, getattr(self, attr)) return s def __str__(self): s = '<%s objid="%s" stylelabel="%s" '%(self.__class__.__name__.replace('Attr', 'Style'), self.id, self.id) s += self._tags_to_xml() s += '/>\n' return s def as_dict(self): d = {} for h in self.tag_map.values(): attr = h[0] if hasattr(self, attr): d[attr] = getattr(self, attr) return d class PageAttr(StyleObject, LRFObject): tag_map = { 0xF507: ['oddheaderid', 'D'], 0xF508: ['evenheaderid', 'D'], 0xF509: ['oddfooterid', 'D'], 0xF50A: ['evenfooterid', 'D'], 0xF521: ['topmargin', 'W'], 0xF522: ['headheight', 'W'], 0xF523: ['headsep', 'W'], 0xF524: ['oddsidemargin', 'W'], 0xF52C: ['evensidemargin', 'W'], 0xF525: ['textheight', 'W'], 0xF526: ['textwidth', 'W'], 0xF527: ['footspace', 'W'], 0xF528: ['footheight', 'W'], 0xF535: ['layout', 'W', {0x41: 'TbRl', 0x34: 'LrTb'}], 0xF52B: ['pageposition', 'W', {0: 'any', 1:'upper', 2: 'lower'}], 0xF52A: ['setemptyview', 'W', {1: 'show', 0: 'empty'}], 0xF5DA: ['setwaitprop', 'W', {1: 'replay', 2: 'noreplay'}], 0xF529: ['', "parse_bg_image"], } tag_map.update(LRFObject.tag_map) @classmethod def to_css(cls, obj, inline=False): return '' class Color: def __init__(self, val): self.a, self.r, self.g, self.b = val & 0xFF, (val>>8)&0xFF, (val>>16)&0xFF, (val>>24)&0xFF def __str__(self): return '0x%02x%02x%02x%02x'%(self.a, self.r, self.g, self.b) def __len__(self): return 4 def __getitem__(self, i): # Qt compatible ordering and values return (self.r, self.g, self.b, 0xff-self.a)[i] # In Qt 0xff is opaque while in LRS 0x00 is opaque def to_html(self): return 'rgb(%d, %d, %d)'%(self.r, self.g, self.b) class EmptyPageElement: def __iter__(self): yield from range(0) def __str__(self): return str(self) class PageDiv(EmptyPageElement): def __init__(self, pain, spacesize, linewidth, linecolor): self.pain, self.spacesize, self.linewidth = pain, spacesize, linewidth self.linecolor = Color(linecolor) def __str__(self): return '\n<PageDiv pain="%s" spacesize="%s" linewidth="%s" linecolor="%s" />\n'%\ (self.pain, self.spacesize, self.linewidth, self.color) class RuledLine(EmptyPageElement): linetype_map = {0x00: 'none', 0x10: 'solid', 0x20: 'dashed', 0x30: 'double', 0x40: 'dotted', 0x13: 'unknown13'} def __init__(self, linelength, linetype, linewidth, linecolor): self.linelength, self.linewidth = linelength, linewidth self.linetype = self.linetype_map[linetype] self.linecolor = Color(linecolor) self.id = -1 def __str__(self): return '\n<RuledLine linelength="%s" linetype="%s" linewidth="%s" linecolor="%s" />\n'%\ (self.linelength, self.linetype, self.linewidth, self.linecolor) class Wait(EmptyPageElement): def __init__(self, time): self.time = time def __str__(self): return '\n<Wait time="%d" />\n'%(self.time) class Locate(EmptyPageElement): pos_map = {1:'bottomleft', 2:'bottomright', 3:'topright', 4:'topleft', 5:'base'} def __init__(self, pos): self.pos = self.pos_map[pos] def __str__(self): return '\n<Locate pos="%s" />\n'%(self.pos) class BlockSpace(EmptyPageElement): def __init__(self, xspace, yspace): self.xspace, self.yspace = xspace, yspace def __str__(self): return '\n<BlockSpace xspace="%d" yspace="%d" />\n'%\ (self.xspace, self.yspace) class Page(LRFStream): tag_map = { 0xF503: ['style_id', 'D'], 0xF50B: ['obj_list', 'P'], 0xF571: ['', ''], 0xF57C: ['parent_page_tree', 'D'], } tag_map.update(PageAttr.tag_map) tag_map.update(LRFStream.tag_map) style = property(fget=lambda self : self._document.objects[self.style_id]) evenheader = property(fget=lambda self : self._document.objects[self.style.evenheaderid]) evenfooter = property(fget=lambda self : self._document.objects[self.style.evenfooterid]) oddheader = property(fget=lambda self : self._document.objects[self.style.oddheaderid]) oddfooter = property(fget=lambda self : self._document.objects[self.style.oddfooterid]) class Content(LRFContentObject): tag_map = { 0xF503: 'link', 0xF54E: 'page_div', 0xF547: 'x_space', 0xF546: 'y_space', 0xF548: 'do_pos', 0xF573: 'ruled_line', 0xF5D4: 'wait', 0xF5D6: 'sound_stop', } def __init__(self, byts, objects): self.in_blockspace = False LRFContentObject.__init__(self, byts, objects) def link(self, tag): self.close_blockspace() self._contents.append(self.objects[tag.dword]) def page_div(self, tag): self.close_blockspace() pars = struct.unpack("<HIHI", tag.contents) self._contents.append(PageDiv(*pars)) def x_space(self, tag): self.xspace = tag.word self.in_blockspace = True def y_space(self, tag): self.yspace = tag.word self.in_blockspace = True def do_pos(self, tag): self.pos = tag.wordself.pos_map[tag.word] self.in_blockspace = True def ruled_line(self, tag): self.close_blockspace() pars = struct.unpack("<HHHI", tag.contents) self._contents.append(RuledLine(*pars)) def wait(self, tag): self.close_blockspace() self._contents.append(Wait(tag.word)) def sound_stop(self, tag): self.close_blockspace() def close_blockspace(self): if self.in_blockspace: if hasattr(self, 'pos'): self._contents.append(Locate(self.pos)) delattr(self, 'pos') else: xspace = self.xspace if hasattr(self, 'xspace') else 0 yspace = self.yspace if hasattr(self, 'yspace') else 0 self._contents.append(BlockSpace(xspace, yspace)) if hasattr(self, 'xspace'): delattr(self, 'xspace') if hasattr(self, 'yspace'): delattr(self, 'yspace') def header(self, odd): id = self._document.objects[self.style_id].oddheaderid if odd else self._document.objects[self.style_id].evenheaderid return self._document.objects[id] def footer(self, odd): id = self._document.objects[self.style_id].oddfooterid if odd else self._document.objects[self.style_id].evenfooterid return self._document.objects[id] def initialize(self): self.content = Page.Content(self.stream, self._document.objects) def __iter__(self): yield from self.content def __str__(self): s = '\n<Page pagestyle="%d" objid="%d">\n'%(self.style_id, self.id) for i in self: s += str(i) s += '\n</Page>\n' return s def to_html(self): s = '' for i in self: s += i.to_html() return s class BlockAttr(StyleObject, LRFObject): tag_map = { 0xF531: ['blockwidth', 'W'], 0xF532: ['blockheight', 'W'], 0xF533: ['blockrule', 'W', { 0x14: "horz-fixed", 0x12: "horz-adjustable", 0x41: "vert-fixed", 0x21: "vert-adjustable", 0x44: "block-fixed", 0x22: "block-adjustable"}], 0xF534: ['bgcolor', 'D', Color], 0xF535: ['layout', 'W', {0x41: 'TbRl', 0x34: 'LrTb'}], 0xF536: ['framewidth', 'W'], 0xF537: ['framecolor', 'D', Color], 0xF52E: ['framemode', 'W', {0: 'none', 2: 'curve', 1:'square'}], 0xF538: ['topskip', 'W'], 0xF539: ['sidemargin', 'W'], 0xF53A: ['footskip', 'W'], 0xF529: ['', 'parse_bg_image'], } tag_map.update(LRFObject.tag_map) @classmethod def to_css(cls, obj, inline=False): ans = '' def item(line): ans = '' if inline else '\t' ans += line ans += ' ' if inline else '\n' return ans if hasattr(obj, 'sidemargin'): margin = str(obj.sidemargin) + 'px' ans += item('margin-left: %(m)s; margin-right: %(m)s;'%dict(m=margin)) if hasattr(obj, 'topskip'): ans += item('margin-top: %dpx;'%obj.topskip) if hasattr(obj, 'footskip'): ans += item('margin-bottom: %dpx;'%obj.footskip) if hasattr(obj, 'framewidth'): ans += item('border: solid %dpx'%obj.framewidth) if hasattr(obj, 'framecolor') and obj.framecolor.a < 255: ans += item('border-color: %s;'%obj.framecolor.to_html()) if hasattr(obj, 'bgcolor') and obj.bgcolor.a < 255: ans += item('background-color: %s;'%obj.bgcolor.to_html()) return ans class TextCSS: @classmethod def to_css(cls, obj, inline=False): ans = '' def item(line): ans = '' if inline else '\t' ans += line ans += ' ' if inline else '\n' return ans fs = getattr(obj, 'fontsize', None) if fs is not None: ans += item('font-size: %fpt;'%(int(fs)/10)) fw = getattr(obj, 'fontweight', None) if fw is not None: ans += item('font-weight: %s;'%('bold' if int(fw) >= 700 else 'normal')) fn = getattr(obj, 'fontfacename', None) if fn is not None: fn = cls.FONT_MAP[fn] ans += item('font-family: %s;'%fn) fg = getattr(obj, 'textcolor', None) if fg is not None: fg = fg.to_html() ans += item('color: %s;'%fg) bg = getattr(obj, 'textbgcolor', None) if bg is not None: bg = bg.to_html() ans += item('background-color: %s;'%bg) al = getattr(obj, 'align', None) if al is not None: al = dict(head='left', center='center', foot='right') ans += item('text-align: %s;'%al) lh = getattr(obj, 'linespace', None) if lh is not None: ans += item('text-align: %fpt;'%(int(lh)/10)) pi = getattr(obj, 'parindent', None) if pi is not None: ans += item('text-indent: %fpt;'%(int(pi)/10)) return ans class TextAttr(StyleObject, LRFObject, TextCSS): FONT_MAP = collections.defaultdict(lambda : 'serif') for key, value in PRS500_PROFILE.default_fonts.items(): FONT_MAP[value] = key tag_map = { 0xF511: ['fontsize', 'w'], 0xF512: ['fontwidth', 'w'], 0xF513: ['fontescapement', 'w'], 0xF514: ['fontorientation', 'w'], 0xF515: ['fontweight', 'W'], 0xF516: ['fontfacename', 'P'], 0xF517: ['textcolor', 'D', Color], 0xF518: ['textbgcolor', 'D', Color], 0xF519: ['wordspace', 'w'], 0xF51A: ['letterspace', 'w'], 0xF51B: ['baselineskip', 'w'], 0xF51C: ['linespace', 'w'], 0xF51D: ['parindent', 'w'], 0xF51E: ['parskip', 'w'], 0xF53C: ['align', 'W', {1: 'head', 4: 'center', 8: 'foot'}], 0xF53D: ['column', 'W'], 0xF53E: ['columnsep', 'W'], 0xF5DD: ['charspace', 'w'], 0xF5F1: ['textlinewidth', 'W'], 0xF5F2: ['linecolor', 'D', Color], } tag_map.update(ruby_tags) tag_map.update(LRFObject.tag_map) class Block(LRFStream, TextCSS): tag_map = { 0xF503: ['style_id', 'D'], } tag_map.update(BlockAttr.tag_map) tag_map.update(TextAttr.tag_map) tag_map.update(LRFStream.tag_map) extra_attrs = [i[0] for i in BlockAttr.tag_map.values()] extra_attrs.extend([i[0] for i in TextAttr.tag_map.values()]) style = property(fget=lambda self : self._document.objects[self.style_id]) textstyle = property(fget=lambda self : self._document.objects[self.textstyle_id]) def initialize(self): self.attrs = {} stream = io.BytesIO(self.stream) tag = Tag(stream) if tag.id != 0xF503: raise LRFParseError("Bad block content") obj = self._document.objects[tag.dword] if isinstance(obj, SimpleText): self.name = 'SimpleTextBlock' self.textstyle_id = obj.style_id elif isinstance(obj, Text): self.name = 'TextBlock' self.textstyle_id = obj.style_id elif isinstance(obj, Image): self.name = 'ImageBlock' for attr in ('x0', 'x1', 'y0', 'y1', 'xsize', 'ysize', 'refstream'): self.attrs[attr] = getattr(obj, attr) self.refstream = self._document.objects[self.attrs['refstream']] elif isinstance(obj, Button): self.name = 'ButtonBlock' else: raise LRFParseError("Unexpected block type: "+obj.__class__.__name__) self.content = obj for attr in self.extra_attrs: if hasattr(self, attr): self.attrs[attr] = getattr(self, attr) def __str__(self): s = '\n<%s objid="%d" blockstyle="%s" '%(self.name, self.id, getattr(self, 'style_id', '')) if hasattr(self, 'textstyle_id'): s += 'textstyle="%d" '%(self.textstyle_id,) for attr in self.attrs: s += '%s="%s" '%(attr, self.attrs[attr]) if self.name != 'ImageBlock': s = s.rstrip()+'>\n' s += str(self.content) s += '</%s>\n'%(self.name,) return s return s.rstrip() + ' />\n' def to_html(self): if self.name == 'TextBlock': return '<div class="block%s text%s">%s</div>'%(self.style_id, self.textstyle_id, self.content.to_html()) return '' class MiniPage(LRFStream): tag_map = { 0xF541: ['minipagewidth', 'W'], 0xF542: ['minipageheight', 'W'], } tag_map.update(LRFStream.tag_map) tag_map.update(BlockAttr.tag_map) class Text(LRFStream): tag_map = { 0xF503: ['style_id', 'D'], } tag_map.update(TextAttr.tag_map) tag_map.update(LRFStream.tag_map) style = property(fget=lambda self : self._document.objects[self.style_id]) text_map = {0x22: '"', 0x26: '&amp;', 0x27: '\'', 0x3c: '&lt;', 0x3e: '&gt;'} entity_pattern = re.compile(r'&amp;(\S+?);') text_tags = { 0xF581: ['simple_container', 'Italic'], 0xF582: 'end_container', 0xF5B1: ['simple_container', 'Yoko'], 0xF5B2: 'end_container', 0xF5B3: ['simple_container', 'Tate'], 0xF5B4: 'end_container', 0xF5B5: ['simple_container', 'Nekase'], 0xF5B6: 'end_container', 0xF5A1: 'start_para', 0xF5A2: 'end_para', 0xF5A7: 'char_button', 0xF5A8: 'end_container', 0xF5A9: ['simple_container', 'Rubi'], 0xF5AA: 'end_container', 0xF5AB: ['simple_container', 'Oyamoji'], 0xF5AC: 'end_container', 0xF5AD: ['simple_container', 'Rubimoji'], 0xF5AE: 'end_container', 0xF5B7: ['simple_container', 'Sup'], 0xF5B8: 'end_container', 0xF5B9: ['simple_container', 'Sub'], 0xF5BA: 'end_container', 0xF5BB: ['simple_container', 'NoBR'], 0xF5BC: 'end_container', 0xF5BD: ['simple_container', 'EmpDots'], 0xF5BE: 'end_container', 0xF5C1: 'empline', 0xF5C2: 'end_container', 0xF5C3: 'draw_char', 0xF5C4: 'end_container', 0xF5C6: 'box', 0xF5C7: 'end_container', 0xF5CA: 'space', 0xF5D1: 'plot', 0xF5D2: 'cr', } class TextTag: def __init__(self, name, attrs={}, self_closing=False): self.name = name self.attrs = attrs self.self_closing = self_closing def __str__(self): s = '<%s '%(self.name,) for name, val in self.attrs.items(): s += '%s="%s" '%(name, val) return s.rstrip() + (' />' if self.self_closing else '>') def to_html(self): s = '' return s def close_html(self): return '' class Span(TextTag): pass linetype_map = {0: 'none', 0x10: 'solid', 0x20: 'dashed', 0x30: 'double', 0x40: 'dotted'} adjustment_map = {1: 'top', 2: 'center', 3: 'baseline', 4: 'bottom'} lineposition_map = {1:'before', 2:'after'} def add_text(self, text): s = str(text, "utf-16-le") if s: s = s.translate(self.text_map) self.content.append(self.entity_pattern.sub(entity_to_unicode_in_python, s)) def end_container(self, tag, stream): self.content.append(None) def start_para(self, tag, stream): self.content.append(self.__class__.TextTag('P')) def close_containers(self, start=0): if len(self.content) == 0: return open_containers = 0 if len(self.content) > 0 and isinstance(self.content[-1], self.__class__.Span): self.content.pop() while start < len(self.content): c = self.content[start] if c is None: open_containers -= 1 elif isinstance(c, self.__class__.TextTag) and not c.self_closing: open_containers += 1 start += 1 self.content.extend(None for i in range(open_containers)) def end_para(self, tag, stream): i = len(self.content)-1 while i > -1: if isinstance(self.content[i], Text.TextTag) and self.content[i].name == 'P': break i -= 1 self.close_containers(start=i) def cr(self, tag, stream): self.content.append(self.__class__.TextTag('CR', self_closing=True)) def char_button(self, tag, stream): self.content.append(self.__class__.TextTag( 'CharButton', attrs={'refobj':tag.dword})) def simple_container(self, tag, name): self.content.append(self.__class__.TextTag(name)) def empline(self, tag, stream): def invalid(op): stream.seek(op) # self.simple_container(None, 'EmpLine') oldpos = stream.tell() try: t = Tag(stream) if t.id not in (0xF579, 0xF57A): raise LRFParseError except LRFParseError: invalid(oldpos) return h = TextAttr.tag_map[t.id] attrs = {} attrs[h[0]] = TextAttr.tag_to_val(h, None, t, None) oldpos = stream.tell() try: t = Tag(stream) if t.id not in (0xF579, 0xF57A): raise LRFParseError h = TextAttr.tag_map[t.id] attrs[h[0]] = TextAttr.tag_to_val(h, None, t, None) except LRFParseError: stream.seek(oldpos) if attrs: self.content.append(self.__class__.TextTag( 'EmpLine', attrs=attrs)) def space(self, tag, stream): self.content.append(self.__class__.TextTag('Space', attrs={'xsize':tag.sword}, self_closing=True)) def plot(self, tag, stream): xsize, ysize, refobj, adjustment = struct.unpack("<HHII", tag.contents) plot = self.__class__.TextTag('Plot', {'xsize': xsize, 'ysize': ysize, 'refobj':refobj, 'adjustment':self.adjustment_map[adjustment]}, self_closing=True) plot.refobj = self._document.objects[refobj] self.content.append(plot) def draw_char(self, tag, stream): self.content.append(self.__class__.TextTag('DrawChar', {'line':tag.word})) def box(self, tag, stream): self.content.append(self.__class__.TextTag('Box', {'linetype':self.linetype_map[tag.word]})) def initialize(self): self.content = collections.deque() s = self.stream or b'' stream = io.BytesIO(s) length = len(s) style = self.style.as_dict() current_style = style.copy() text_tags = set(list(TextAttr.tag_map.keys()) + list(Text.text_tags.keys()) + list(ruby_tags.keys())) text_tags -= {0xf500+i for i in range(10)} text_tags.add(0xf5cc) while stream.tell() < length: # Is there some text before a tag? def find_first_tag(start): pos = s.find(b'\xf5', start) if pos == -1: return -1 try: stream.seek(pos-1) _t = Tag(stream) if _t.id in text_tags: return pos-1 return find_first_tag(pos+1) except: return find_first_tag(pos+1) start_pos = stream.tell() tag_pos = find_first_tag(start_pos) if tag_pos >= start_pos: if tag_pos > start_pos: self.add_text(s[start_pos:tag_pos]) stream.seek(tag_pos) else: # No tags in this stream self.add_text(s) stream.seek(0, 2) break tag = Tag(stream) if tag.id == 0xF5CC: self.add_text(stream.read(tag.word)) elif tag.id in self.__class__.text_tags: # A Text tag action = self.__class__.text_tags[tag.id] if isinstance(action, str): getattr(self, action)(tag, stream) else: getattr(self, action[0])(tag, action[1]) elif tag.id in TextAttr.tag_map: # A Span attribute action = TextAttr.tag_map[tag.id] if len(self.content) == 0: current_style = style.copy() name, val = action[0], LRFObject.tag_to_val(action, self, tag, None) if name and (name not in current_style or current_style[name] != val): # No existing Span if len(self.content) > 0 and isinstance(self.content[-1], self.__class__.Span): self.content[-1].attrs[name] = val else: self.content.append(self.__class__.Span('Span', {name:val})) current_style[name] = val if len(self.content) > 0: self.close_containers() self.stream = None def __str__(self): s = '' open_containers = collections.deque() for c in self.content: if isinstance(c, str): s += prepare_string_for_xml(c).replace('\0', '') elif c is None: if open_containers: p = open_containers.pop() s += '</%s>'%(p.name,) else: s += str(c) if not c.self_closing: open_containers.append(c) if len(open_containers) > 0: if len(open_containers) == 1: s += '</%s>'%(open_containers[0].name,) else: raise LRFParseError('Malformed text stream %s'%([i.name for i in open_containers if isinstance(i, Text.TextTag)],)) return s def to_html(self): s = '' open_containers = collections.deque() in_p = False for c in self.content: if isinstance(c, str): s += c elif c is None: p = open_containers.pop() s += p.close_html() else: if c.name == 'P': in_p = True elif c.name == 'CR': s += '<br />' if in_p else '<p>' else: s += c.to_html() if not c.self_closing: open_containers.append(c) if len(open_containers) > 0: raise LRFParseError('Malformed text stream %s'%([i.name for i in open_containers if isinstance(i, Text.TextTag)],)) return s class Image(LRFObject): tag_map = { 0xF54A: ['', 'parse_image_rect'], 0xF54B: ['', 'parse_image_size'], 0xF54C: ['refstream', 'D'], 0xF555: ['comment', 'P'], } def parse_image_rect(self, tag, f): self.x0, self.y0, self.x1, self.y1 = struct.unpack("<HHHH", tag.contents) def parse_image_size(self, tag, f): self.xsize, self.ysize = struct.unpack("<HH", tag.contents) encoding = property(fget=lambda self : self._document.objects[self.refstream].encoding) data = property(fget=lambda self : self._document.objects[self.refstream].stream) def __str__(self): return '<Image objid="%s" x0="%d" y0="%d" x1="%d" y1="%d" xsize="%d" ysize="%d" refstream="%d" />\n'%\ (self.id, self.x0, self.y0, self.x1, self.y1, self.xsize, self.ysize, self.refstream) class PutObj(EmptyPageElement): def __init__(self, objects, x1, y1, refobj): self.x1, self.y1, self.refobj = x1, y1, refobj self.object = objects[refobj] def __str__(self): return '<PutObj x1="%d" y1="%d" refobj="%d" />'%(self.x1, self.y1, self.refobj) class Canvas(LRFStream): tag_map = { 0xF551: ['canvaswidth', 'W'], 0xF552: ['canvasheight', 'W'], 0xF5DA: ['', 'parse_waits'], 0xF533: ['blockrule', 'W', {0x44: "block-fixed", 0x22: "block-adjustable"}], 0xF534: ['bgcolor', 'D', Color], 0xF535: ['layout', 'W', {0x41: 'TbRl', 0x34: 'LrTb'}], 0xF536: ['framewidth', 'W'], 0xF537: ['framecolor', 'D', Color], 0xF52E: ['framemode', 'W', {0: 'none', 2: 'curve', 1:'square'}], } tag_map.update(LRFStream.tag_map) extra_attrs = ['canvaswidth', 'canvasheight', 'blockrule', 'layout', 'framewidth', 'framecolor', 'framemode'] def parse_waits(self, tag, f): val = tag.word self.setwaitprop = val&0xF self.setwaitsync = val&0xF0 def initialize(self): self.attrs = {} for attr in self.extra_attrs: if hasattr(self, attr): self.attrs[attr] = getattr(self, attr) self._contents = [] s = self.stream or b'' stream = io.BytesIO(s) while stream.tell() < len(s): tag = Tag(stream) try: self._contents.append( PutObj(self._document.objects, *struct.unpack("<HHI", tag.contents))) except struct.error: print('Canvas object has errors, skipping.') def __str__(self): s = '\n<%s objid="%s" '%(self.__class__.__name__, self.id,) for attr in self.attrs: s += '%s="%s" '%(attr, self.attrs[attr]) s = s.rstrip() + '>\n' for po in self: s += str(po) + '\n' s += '</%s>\n'%(self.__class__.__name__,) return s def __iter__(self): yield from self._contents class Header(Canvas): pass class Footer(Canvas): pass class ESound(LRFObject): pass class ImageStream(LRFStream): tag_map = { 0xF555: ['comment', 'P'], } imgext = {0x11: 'jpeg', 0x12: 'png', 0x13: 'bmp', 0x14: 'gif'} tag_map.update(LRFStream.tag_map) encoding = property(fget=lambda self : self.imgext[self.stream_flags & 0xFF].upper()) def end_stream(self, *args): LRFStream.end_stream(self, *args) self.file = str(self.id) + '.' + self.encoding.lower() if self._document is not None: self._document.image_map[self.id] = self def __str__(self): return '<ImageStream objid="%s" encoding="%s" file="%s" />\n'%\ (self.id, self.encoding, self.file) class Import(LRFStream): pass class Button(LRFObject): tag_map = { 0xF503: ['', 'do_ref_image'], 0xF561: ['button_flags','W'], # <Button/> 0xF562: ['','do_base_button'], # <BaseButton> 0xF563: ['',''], # </BaseButton> 0xF564: ['','do_focus_in_button'], # <FocusinButton> 0xF565: ['',''], # </FocusinButton> 0xF566: ['','do_push_button'], # <PushButton> 0xF567: ['',''], # </PushButton> 0xF568: ['','do_up_button'], # <UpButton> 0xF569: ['',''], # </UpButton> 0xF56A: ['','do_start_actions'], # start actions 0xF56B: ['',''], # end actions 0xF56C: ['','parse_jump_to'], # JumpTo 0xF56D: ['','parse_send_message'], # <SendMessage 0xF56E: ['','parse_close_window'], # <CloseWindow/> 0xF5D6: ['','parse_sound_stop'], # <SoundStop/> 0xF5F9: ['','parse_run'], # Run } tag_map.update(LRFObject.tag_map) def __init__(self, document, stream, id, scramble_key, boundary): self.xml = '' self.refimage = {} self.actions = {} self.to_dump = True LRFObject.__init__(self, document, stream, id, scramble_key, boundary) def do_ref_image(self, tag, f): self.refimage[self.button_type] = tag.dword def do_base_button(self, tag, f): self.button_type = 0 self.actions[self.button_type] = [] def do_focus_in_button(self, tag, f): self.button_type = 1 def do_push_button(self, tag, f): self.button_type = 2 def do_up_button(self, tag, f): self.button_type = 3 def do_start_actions(self, tag, f): self.actions[self.button_type] = [] def parse_jump_to(self, tag, f): self.actions[self.button_type].append((1, struct.unpack("<II", tag.contents))) def parse_send_message(self, tag, f): params = (tag.word, Tag.string_parser(f), Tag.string_parser(f)) self.actions[self.button_type].append((2, params)) def parse_close_window(self, tag, f): self.actions[self.button_type].append((3,)) def parse_sound_stop(self, tag, f): self.actions[self.button_type].append((4,)) def parse_run(self, tag, f): self.actions[self.button_type].append((5, struct.unpack("<HI", tag.contents))) def jump_action(self, button_type): for i in self.actions[button_type]: if i[0] == 1: return i[1:][0] return (None, None) def __str__(self): s = '<Button objid="%s">\n'%(self.id,) if self.button_flags & 0x10 != 0: s += '<PushButton ' if 2 in self.refimage: s += 'refimage="%s" '%(self.refimage[2],) s = s.rstrip() + '>\n' s += '<JumpTo refpage="%s" refobj="%s" />\n'% self.jump_action(2) s += '</PushButton>\n' else: raise LRFParseError('Unsupported button type') s += '</Button>\n' return s refpage = property(fget=lambda self : self.jump_action(2)[0]) refobj = property(fget=lambda self : self.jump_action(2)[1]) class Window(LRFObject): pass class PopUpWin(LRFObject): pass class Sound(LRFObject): pass class SoundStream(LRFObject): pass class Font(LRFStream): tag_map = { 0xF559: ['fontfilename', 'P'], 0xF55D: ['fontfacename', 'P'], } tag_map.update(LRFStream.tag_map) data = property(fget=lambda self: self.stream) def end_stream(self, *args): LRFStream.end_stream(self, *args) self._document.font_map[self.fontfacename] = self self.file = self.fontfacename + '.ttf' def __unicode__(self): s = '<RegistFont objid="%s" fontfilename="%s" fontname="%s" encoding="TTF" file="%s" />\n'%\ (self.id, self.fontfilename, self.fontfacename, self.file) return s class ObjectInfo(LRFStream): pass class BookAttr(StyleObject, LRFObject): tag_map = { 0xF57B: ['page_tree_id', 'D'], 0xF5D8: ['', 'add_font'], 0xF5DA: ['setwaitprop', 'W', {1: 'replay', 2: 'noreplay'}], } tag_map.update(ruby_tags) tag_map.update(LRFObject.tag_map) binding_map = {1: 'Lr', 16 : 'Rl'} def __init__(self, document, stream, id, scramble_key, boundary): self.font_link_list = [] LRFObject.__init__(self, document, stream, id, scramble_key, boundary) def add_font(self, tag, f): self.font_link_list.append(tag.dword) def __str__(self): s = '<BookStyle objid="%s" stylelabel="%s">\n'%(self.id, self.id) s += '<SetDefault %s />\n'%(self._tags_to_xml(),) doc = self._document s += '<BookSetting bindingdirection="%s" dpi="%s" screenwidth="%s" screenheight="%s" colordepth="%s" />\n'%\ (self.binding_map[doc.binding], doc.dpi, doc.width, doc.height, doc.color_depth) for font in self._document.font_map.values(): s += str(font) s += '</BookStyle>\n' return s class SimpleText(Text): pass class TocLabel: def __init__(self, refpage, refobject, label): self.refpage, self.refobject, self.label = refpage, refobject, label def __str__(self): return '<TocLabel refpage="%s" refobj="%s">%s</TocLabel>\n'%(self.refpage, self.refobject, self.label) class TOCObject(LRFStream): def initialize(self): stream = io.BytesIO(self.stream or b'') c = struct.unpack("<H", stream.read(2))[0] stream.seek(4*(c+1)) self._contents = [] while c > 0: refpage = struct.unpack("<I", stream.read(4))[0] refobj = struct.unpack("<I", stream.read(4))[0] cnt = struct.unpack("<H", stream.read(2))[0] raw = stream.read(cnt) label = raw.decode('utf_16_le') self._contents.append(TocLabel(refpage, refobj, label)) c -= 1 def __iter__(self): yield from self._contents def __str__(self): s = '<TOC>\n' for i in self: s += str(i) return s + '</TOC>\n' object_map = [ None, # 00 PageTree, # 01 Page, # 02 Header, # 03 Footer, # 04 PageAttr, # 05 Block, # 06 BlockAttr, # 07 MiniPage, # 08 None, # 09 Text, # 0A TextAttr, # 0B Image, # 0C Canvas, # 0D ESound, # 0E None, # 0F None, # 10 ImageStream, # 11 Import, # 12 Button, # 13 Window, # 14 PopUpWin, # 15 Sound, # 16 SoundStream, # 17 None, # 18 Font, # 19 ObjectInfo, # 1A None, # 1B BookAttr, # 1C SimpleText, # 1D TOCObject, # 1E ] def get_object(document, stream, id, offset, size, scramble_key): stream.seek(offset) start_tag = Tag(stream) if start_tag.id != 0xF500: raise LRFParseError('Bad object start') obj_id, obj_type = struct.unpack("<IH", start_tag.contents) if obj_type < len(object_map) and object_map[obj_type] is not None: return object_map[obj_type](document, stream, obj_id, scramble_key, offset+size-Tag.tags[0][0]) raise LRFParseError("Unknown object type: %02X!" % obj_type)
41,785
Python
.py
1,043
30.238734
131
0.542007
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,443
convert_from.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/lrs/convert_from.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' ''' Compile a LRS file into a LRF file. ''' import logging import os import sys from calibre import setup_cli_handlers from calibre.ebooks.BeautifulSoup import BeautifulStoneSoup, CData, NavigableString, Tag from calibre.ebooks.chardet import xml_to_unicode from calibre.ebooks.lrf.pylrs.pylrs import ( CR, BlockStyle, Bold, Book, BookSetting, Canvas, CharButton, DropCaps, EmpLine, Font, Footer, Header, Image, ImageBlock, ImageStream, Italic, JumpButton, Page, PageStyle, Paragraph, Plot, RuledLine, Span, StyleDefault, Sub, Sup, TextBlock, TextStyle, ) from calibre.utils.config import OptionParser from polyglot.builtins import string_or_bytes class LrsParser: def __init__(self, stream, logger): self.logger = logger src = stream.read() self.soup = BeautifulStoneSoup(xml_to_unicode(src)[0]) self.objects = {} for obj in self.soup.findAll(objid=True): self.objects[obj['objid']] = obj self.parsed_objects = {} self.first_pass() self.second_pass() self.third_pass() self.fourth_pass() self.fifth_pass() def fifth_pass(self): for tag in self.soup.findAll(['canvas', 'header', 'footer']): canvas = self.parsed_objects[tag.get('objid')] for po in tag.findAll('putobj'): canvas.put_object(self.parsed_objects[po.get('refobj')], po.get('x1'), po.get('y1')) @classmethod def attrs_to_dict(cls, tag, exclude=('objid',)): result = {} for key, val in tag.attrs: if key in exclude: continue result[str(key)] = val return result def text_tag_to_element(self, tag): map = { 'span' : Span, 'italic' : Italic, 'bold' : Bold, 'empline' : EmpLine, 'sup' : Sup, 'sub' : Sub, 'cr' : CR, 'drawchar': DropCaps, } if tag.name == 'charbutton': return CharButton(self.parsed_objects[tag.get('refobj')], None) if tag.name == 'plot': return Plot(self.parsed_objects[tag.get('refobj')], **self.attrs_to_dict(tag, ['refobj'])) settings = self.attrs_to_dict(tag) settings.pop('spanstyle', '') return map[tag.name](**settings) def process_text_element(self, tag, elem): for item in tag.contents: if isinstance(item, NavigableString): elem.append(item.string) else: subelem = self.text_tag_to_element(item) elem.append(subelem) self.process_text_element(item, subelem) def process_paragraph(self, tag): p = Paragraph() contents = [i for i in tag.contents] if contents: if isinstance(contents[0], NavigableString): contents[0] = contents[0].string.lstrip() for item in contents: if isinstance(item, string_or_bytes): p.append(item) elif isinstance(item, NavigableString): p.append(item.string) else: elem = self.text_tag_to_element(item) p.append(elem) self.process_text_element(item, elem) return p def process_text_block(self, tag): tb = self.parsed_objects[tag.get('objid')] for item in tag.contents: if hasattr(item, 'name'): if item.name == 'p': tb.append(self.process_paragraph(item)) elif item.name == 'cr': tb.append(CR()) elif item.name == 'charbutton': # BookDesigner does this p = Paragraph() tb.append(p) elem = self.text_tag_to_element(item) self.process_text_element(item, elem) p.append(elem) def fourth_pass(self): for tag in self.soup.findAll('page'): page = self.parsed_objects[tag.get('objid')] self.book.append(page) for block_tag in tag.findAll(['canvas', 'imageblock', 'textblock', 'ruledline', 'simpletextblock']): if block_tag.name == 'ruledline': page.append(RuledLine(**self.attrs_to_dict(block_tag))) else: page.append(self.parsed_objects[block_tag.get('objid')]) for tag in self.soup.find('objects').findAll('button'): jt = tag.find('jumpto') tb = self.parsed_objects[jt.get('refobj')] jb = JumpButton(tb) self.book.append(jb) self.parsed_objects[tag.get('objid')] = jb for tag in self.soup.findAll(['textblock', 'simpletextblock']): self.process_text_block(tag) toc = self.soup.find('toc') if toc: for tag in toc.findAll('toclabel'): label = self.tag_to_string(tag) self.book.addTocEntry(label, self.parsed_objects[tag.get('refobj')]) def third_pass(self): map = { 'page' : (Page, ['pagestyle', 'evenfooterid', 'oddfooterid', 'evenheaderid', 'oddheaderid']), 'textblock' : (TextBlock, ['textstyle', 'blockstyle']), 'simpletextblock' : (TextBlock, ['textstyle', 'blockstyle']), 'imageblock' : (ImageBlock, ['blockstyle', 'refstream']), 'image' : (Image, ['refstream']), 'canvas' : (Canvas, ['canvaswidth', 'canvasheight']), } attrmap = { 'pagestyle' : 'pageStyle', 'blockstyle' : 'blockStyle', 'textstyle' : 'textStyle', } for id, tag in self.objects.items(): if tag.name in map.keys(): settings = self.attrs_to_dict(tag, map[tag.name][1]+['objid', 'objlabel']) for a in ('pagestyle', 'blockstyle', 'textstyle'): label = tag.get(a, False) if label and \ (label in self._style_labels or label in self.parsed_objects): _obj = (self.parsed_objects[label] if label in self.parsed_objects else self._style_labels[label]) settings[attrmap[a]] = _obj for a in ('evenfooterid', 'oddfooterid', 'evenheaderid', 'oddheaderid'): if a in tag: settings[a.replace('id', '')] = self.parsed_objects[tag.get(a)] args = [] if 'refstream' in tag: args.append(self.parsed_objects[tag.get('refstream')]) if 'canvaswidth' in tag: args += [tag.get('canvaswidth'), tag.get('canvasheight')] self.parsed_objects[id] = map[tag.name][0](*args, **settings) def second_pass(self): map = { 'pagestyle' : (PageStyle, ['stylelabel', 'evenheaderid', 'oddheaderid', 'evenfooterid', 'oddfooterid']), 'textstyle' : (TextStyle, ['stylelabel', 'rubyalignandadjust']), 'blockstyle' : (BlockStyle, ['stylelabel']), 'imagestream': (ImageStream, ['imagestreamlabel']), 'registfont' : (Font, []) } self._style_labels = {} for id, tag in self.objects.items(): if tag.name in map.keys(): settings = self.attrs_to_dict(tag, map[tag.name][1]+['objid']) if tag.name == 'pagestyle': for a in ('evenheaderid', 'oddheaderid', 'evenfooterid', 'oddfooterid'): if a in tag: settings[a.replace('id', '')] = self.parsed_objects[tag.get(a)] settings.pop('autoindex', '') self.parsed_objects[id] = map[tag.name][0](**settings) x = tag.get('stylelabel', False) if x: self._style_labels[x] = self.parsed_objects[id] if tag.name == 'registfont': self.book.append(self.parsed_objects[id]) @classmethod def tag_to_string(cls, tag): ''' Convenience method to take a BeautifulSoup Tag and extract the text from it recursively. @return: A unicode (possibly empty) object ''' if not tag: return '' strings = [] for item in tag.contents: if isinstance(item, (NavigableString, CData)): strings.append(item.string) elif isinstance(item, Tag): res = cls.tag_to_string(item) if res: strings.append(res) return ''.join(strings) def first_pass(self): info = self.soup.find('bbebxylog').find('bookinformation').find('info') bookinfo = info.find('bookinfo') docinfo = info.find('docinfo') def me(base, tagname): tag = base.find(tagname.lower()) if tag is None: return ('', '', '') tag = (self.tag_to_string(tag), tag.get('reading') if 'reading' in tag else '') # noqa return tag title = me(bookinfo, 'Title') author = me(bookinfo, 'Author') publisher = me(bookinfo, 'Publisher') category = me(bookinfo, 'Category')[0] classification = me(bookinfo, 'Classification')[0] freetext = me(bookinfo, 'FreeText')[0] language = me(docinfo, 'Language')[0] creator = me(docinfo, 'Creator')[0] producer = me(docinfo, 'Producer')[0] bookid = me(bookinfo, 'BookID')[0] sd = self.soup.find('setdefault') sd = StyleDefault(**self.attrs_to_dict(sd, ['page_tree_id', 'rubyalignandadjust'])) bs = self.soup.find('booksetting') bs = BookSetting(**self.attrs_to_dict(bs, [])) settings = {} thumbnail = self.soup.find('cthumbnail') if thumbnail is not None: f = thumbnail['file'] if os.access(f, os.R_OK): settings['thumbnail'] = f else: print(_('Could not read from thumbnail file:'), f) self.book = Book(title=title, author=author, publisher=publisher, category=category, classification=classification, freetext=freetext, language=language, creator=creator, producer=producer, bookid=bookid, setdefault=sd, booksetting=bs, **settings) for hdr in self.soup.findAll(['header', 'footer']): elem = Header if hdr.name == 'header' else Footer self.parsed_objects[hdr.get('objid')] = elem(**self.attrs_to_dict(hdr)) def render(self, file, to_lrs=False): if to_lrs: self.book.renderLrs(file, 'utf-8') else: self.book.renderLrf(file) def option_parser(): parser = OptionParser(usage=_('%prog [options] file.lrs\nCompile an LRS file into an LRF file.')) parser.add_option('-o', '--output', default=None, help=_('Path to output file')) parser.add_option('--verbose', default=False, action='store_true', help=_('Verbose processing')) parser.add_option('--lrs', default=False, action='store_true', help=_('Convert LRS to LRS, useful for debugging.')) return parser def main(args=sys.argv, logger=None): parser = option_parser() opts, args = parser.parse_args(args) if logger is None: level = logging.DEBUG if opts.verbose else logging.INFO logger = logging.getLogger('lrs2lrf') setup_cli_handlers(logger, level) if len(args) != 2: parser.print_help() return 1 if not opts.output: ext = '.lrs' if opts.lrs else '.lrf' opts.output = os.path.splitext(os.path.basename(args[1]))[0]+ext opts.output = os.path.abspath(opts.output) if opts.verbose: import warnings warnings.defaultaction = 'error' logger.info('Parsing LRS file...') converter = LrsParser(open(args[1], 'rb'), logger) logger.info('Writing to output file...') converter.render(opts.output, to_lrs=opts.lrs) logger.info('Output written to '+opts.output) return 0 if __name__ == '__main__': sys.exit(main())
12,861
Python
.py
304
30.404605
120
0.53781
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,444
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/lrs/__init__.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' ''''''
94
Python
.py
3
30.333333
61
0.604396
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,445
convert_from.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/html/convert_from.py
# License: GPLv3 Copyright: 2008, Kovid Goyal <kovid at kovidgoyal.net> import copy import glob import os import re import sys import tempfile from collections import deque from itertools import chain from math import ceil, floor from calibre import __appname__, entity_regex, entity_to_unicode, fit_image, force_unicode, preferred_encoding from calibre.constants import filesystem_encoding from calibre.devices.interface import DevicePlugin as Device from calibre.ebooks import ConversionError from calibre.ebooks.BeautifulSoup import BeautifulSoup, Comment, Declaration, NavigableString, ProcessingInstruction, Tag from calibre.ebooks.chardet import xml_to_unicode from calibre.ebooks.lrf import Book from calibre.ebooks.lrf.html.color_map import lrs_color from calibre.ebooks.lrf.html.table import Table from calibre.ebooks.lrf.pylrs.pylrs import ( CR, BlockSpace, BookSetting, Canvas, CharButton, DropCaps, EmpLine, Image, ImageBlock, ImageStream, Italic, JumpButton, LrsError, Paragraph, Plot, RuledLine, Span, Sub, Sup, TextBlock, ) from calibre.ptempfile import PersistentTemporaryFile from polyglot.builtins import itervalues, string_or_bytes from polyglot.urllib import unquote, urlparse """ Code to convert HTML ebooks into LRF ebooks. I am indebted to esperanc for the initial CSS->Xylog Style conversion code and to Falstaff for pylrs. """ from PIL import Image as PILImage def update_css(ncss, ocss): for key in ncss.keys(): if key in ocss: ocss[key].update(ncss[key]) else: ocss[key] = ncss[key] def munge_paths(basepath, url): purl = urlparse(unquote(url),) path, fragment = purl[2], purl[5] if path: path = path.replace('/', os.sep) if not path: path = basepath elif not os.path.isabs(path): dn = os.path.dirname(basepath) path = os.path.join(dn, path) return os.path.normpath(path), fragment def strip_style_comments(match): src = match.group() while True: lindex = src.find('/*') if lindex < 0: break rindex = src.find('*/', lindex) if rindex < 0: src = src[:lindex] break src = src[:lindex] + src[rindex+2:] return src def tag_regex(tagname): '''Return non-grouping regular expressions that match the opening and closing tags for tagname''' return dict(open=r'(?:<\s*%(t)s\s+[^<>]*?>|<\s*%(t)s\s*>)'%dict(t=tagname), close=r'</\s*%(t)s\s*>'%dict(t=tagname)) class HTMLConverter: SELECTOR_PAT = re.compile(r"([A-Za-z0-9\-\_\:\.]+[A-Za-z0-9\-\_\:\.\s\,]*)\s*\{([^\}]*)\}") PAGE_BREAK_PAT = re.compile(r'page-break-(?:after|before)\s*:\s*(\w+)', re.IGNORECASE) IGNORED_TAGS = (Comment, Declaration, ProcessingInstruction) MARKUP_MASSAGE = [ # Close <a /> tags (re.compile(r'<a(\s[^>]*)?/>', re.IGNORECASE), lambda match: '<a'+match.group(1)+'></a>'), # Strip comments from <style> tags. This is needed as # sometimes there are unterminated comments (re.compile(r"<\s*style.*?>(.*?)<\/\s*style\s*>", re.DOTALL|re.IGNORECASE), lambda match: match.group().replace('<!--', '').replace('-->', '')), # remove <p> tags from within <a href> tags (re.compile(r'<\s*a\s+[^<>]*href\s*=[^<>]*>(.*?)<\s*/\s*a\s*>', re.DOTALL|re.IGNORECASE), lambda match: re.compile(r'%(open)s|%(close)s'%tag_regex('p'), re.IGNORECASE).sub('', match.group())), # Replace common line break patterns with line breaks (re.compile(r'<p>(&nbsp;|\s)*</p>', re.IGNORECASE), lambda m: '<br />'), # Replace empty headers with line breaks (re.compile(r'<h[0-5]?>(&nbsp;|\s)*</h[0-5]?>', re.IGNORECASE), lambda m: '<br />'), # Replace entities (entity_regex(), entity_to_unicode), # Remove comments from within style tags as they can mess up BeatifulSoup (re.compile(r'(<style.*?</style>)', re.IGNORECASE|re.DOTALL), strip_style_comments), # Remove self closing script tags as they also mess up BeautifulSoup (re.compile(r'(?i)<script[^<>]+?/>'), lambda match: ''), # BeautifulSoup treats self closing <div> tags as open <div> tags (re.compile(r'(?i)<\s*div([^>]*)/\s*>'), lambda match: '<div%s></div>'%match.group(1)) ] # Fix Baen markup BAEN = [ (re.compile(r'page-break-before:\s*\w+([\s;\}])', re.IGNORECASE), lambda match: match.group(1)), (re.compile(r'<p>\s*(<a id.*?>\s*</a>)\s*</p>', re.IGNORECASE), lambda match: match.group(1)), (re.compile(r'<\s*a\s+id="p[0-9]+"\s+name="p[0-9]+"\s*>\s*</a>', re.IGNORECASE), lambda match: ''), ] # Fix pdftohtml markup PDFTOHTML = [ # Remove <hr> tags (re.compile(r'<hr.*?>', re.IGNORECASE), lambda match: '<br />'), # Remove page numbers (re.compile(r'\d+<br>', re.IGNORECASE), lambda match: ''), # Remove <br> and replace <br><br> with <p> (re.compile(r'<br.*?>\s*<br.*?>', re.IGNORECASE), lambda match: '<p>'), (re.compile(r'(.*)<br.*?>', re.IGNORECASE), lambda match: match.group() if re.match('<', match.group(1).lstrip()) or len(match.group(1)) < 40 else match.group(1)), # Remove hyphenation (re.compile(r'-\n\r?'), lambda match: ''), ] # Fix Book Designer markup BOOK_DESIGNER = [ # HR (re.compile('<hr>', re.IGNORECASE), lambda match : '<span style="page-break-after:always"> </span>'), # Create header tags (re.compile(r'<h2[^><]*?id=BookTitle[^><]*?(align=)*(?(1)(\w+))*[^><]*?>[^><]*?</h2>', re.IGNORECASE), lambda match : '<h1 id="BookTitle" align="%s">%s</h1>'%(match.group(2) if match.group(2) else 'center', match.group(3))), (re.compile(r'<h2[^><]*?id=BookAuthor[^><]*?(align=)*(?(1)(\w+))*[^><]*?>[^><]*?</h2>', re.IGNORECASE), lambda match : '<h2 id="BookAuthor" align="%s">%s</h2>'%(match.group(2) if match.group(2) else 'center', match.group(3))), (re.compile(r'<span[^><]*?id=title[^><]*?>(.*?)</span>', re.IGNORECASE|re.DOTALL), lambda match : '<h2 class="title">%s</h2>'%(match.group(1),)), (re.compile(r'<span[^><]*?id=subtitle[^><]*?>(.*?)</span>', re.IGNORECASE|re.DOTALL), lambda match : '<h3 class="subtitle">%s</h3>'%(match.group(1),)), # Blank lines (re.compile(r'<div[^><]*?>(&nbsp;){4}</div>', re.IGNORECASE), lambda match : '<p></p>'), ] def __hasattr__(self, attr): if hasattr(self.options, attr): return True return object.__hasattr__(self, attr) def __getattr__(self, attr): if hasattr(self.options, attr): return getattr(self.options, attr) return object.__getattribute__(self, attr) def __setattr__(self, attr, val): if hasattr(self.options, attr): setattr(self.options, attr, val) else: object.__setattr__(self, attr, val) CSS = { 'h1' : {"font-size" : "xx-large", "font-weight":"bold", 'text-indent':'0pt'}, 'h2' : {"font-size" : "x-large", "font-weight":"bold", 'text-indent':'0pt'}, 'h3' : {"font-size" : "large", "font-weight":"bold", 'text-indent':'0pt'}, 'h4' : {"font-size" : "large", 'text-indent':'0pt'}, 'h5' : {"font-weight" : "bold", 'text-indent':'0pt'}, 'b' : {"font-weight" : "bold"}, 'strong' : {"font-weight" : "bold"}, 'i' : {"font-style" : "italic"}, 'cite' : {'font-style' : 'italic'}, 'em' : {"font-style" : "italic"}, 'small' : {'font-size' : 'small'}, 'pre' : {'font-family' : 'monospace', 'white-space': 'pre'}, 'code' : {'font-family' : 'monospace'}, 'tt' : {'font-family' : 'monospace'}, 'center' : {'text-align' : 'center'}, 'th' : {'font-size' : 'large', 'font-weight':'bold'}, 'big' : {'font-size' : 'large', 'font-weight':'bold'}, '.libprs500_dropcaps' : {'font-size': 'xx-large'}, 'u' : {'text-decoration': 'underline'}, 'sup' : {'vertical-align': 'super', 'font-size': '60%'}, 'sub' : {'vertical-align': 'sub', 'font-size': '60%'}, } def __init__(self, book, fonts, options, logger, paths): ''' Convert HTML files at C{paths} and add to C{book}. After creating the object, you must call L{self.writeto} to output the LRF/S file. @param book: The LRF book @type book: L{lrf.pylrs.Book} @param fonts: dict specifying the font families to use ''' # Defaults for various formatting tags object.__setattr__(self, 'options', options) self.log = logger self.fonts = fonts # : dict specifying font families to use # Memory self.scaled_images = {} #: Temporary files with scaled version of images self.rotated_images = {} #: Temporary files with rotated version of images self.text_styles = [] #: Keep track of already used textstyles self.block_styles = [] #: Keep track of already used blockstyles self.images = {} #: Images referenced in the HTML document self.targets = {} #: <a name=...> and id elements self.links = deque() # : <a href=...> elements self.processed_files = [] self.extra_toc_entries = [] # : TOC entries gleaned from semantic information self.image_memory = [] self.id_counter = 0 self.unused_target_blocks = [] # : Used to remove extra TextBlocks self.link_level = 0 #: Current link level self.memory = [] #: Used to ensure that duplicate CSS unhandled errors are not reported self.tops = {} #: element representing the top of each HTML file in the LRF file self.previous_text = '' # : Used to figure out when to lstrip self.stripped_space = '' self.preserve_block_style = False # : Used so that <p> tags in <blockquote> elements are handled properly self.avoid_page_break = False self.current_page = book.create_page() # Styles self.blockquote_style = book.create_block_style(sidemargin=60, topskip=20, footskip=20) self.unindented_style = book.create_text_style(parindent=0) self.in_table = False # List processing self.list_level = 0 self.list_indent = 20 self.list_counter = 1 self.book = book #: The Book object representing a BBeB book self.override_css = {} self.override_pcss = {} if self._override_css is not None: if os.access(self._override_css, os.R_OK): with open(self._override_css, 'rb') as f: src = f.read() else: src = self._override_css if isinstance(src, bytes): src = src.decode('utf-8', 'replace') match = self.PAGE_BREAK_PAT.search(src) if match and not re.match('avoid', match.group(1), re.IGNORECASE): self.page_break_found = True ncss, npcss = self.parse_css(src) if ncss: update_css(ncss, self.override_css) if npcss: update_css(npcss, self.override_pcss) paths = [os.path.abspath(path) for path in paths] paths = [path.decode(sys.getfilesystemencoding()) if not isinstance(path, str) else path for path in paths] while len(paths) > 0 and self.link_level <= self.link_levels: for path in paths: if path in self.processed_files: continue try: self.add_file(path) except KeyboardInterrupt: raise except: if self.link_level == 0: # Die on errors in the first level raise for link in self.links: if link['path'] == path: self.links.remove(link) break self.log.warn('Could not process '+path) if self.verbose: self.log.exception(' ') self.links = self.process_links() self.link_level += 1 paths = [link['path'] for link in self.links] if self.current_page is not None and self.current_page.has_text(): self.book.append(self.current_page) for text, tb in self.extra_toc_entries: self.book.addTocEntry(text, tb) if self.base_font_size > 0: self.log.info('\tRationalizing font sizes...') self.book.rationalize_font_sizes(self.base_font_size) def is_baen(self, soup): return bool(soup.find('meta', attrs={'name':'Publisher', 'content':re.compile('Baen', re.IGNORECASE)})) def is_book_designer(self, raw): return bool(re.search('<H2[^><]*id=BookTitle', raw)) def preprocess(self, raw): nmassage = [] nmassage.extend(HTMLConverter.MARKUP_MASSAGE) if not self.book_designer and self.is_book_designer(raw): self.book_designer = True self.log.info(_('\tBook Designer file detected.')) self.log.info(_('\tParsing HTML...')) if self.baen: nmassage.extend(HTMLConverter.BAEN) if self.pdftohtml: nmassage.extend(HTMLConverter.PDFTOHTML) if self.book_designer: nmassage.extend(HTMLConverter.BOOK_DESIGNER) if isinstance(raw, bytes): raw = xml_to_unicode(raw, replace_entities=True)[0] for pat, repl in nmassage: raw = pat.sub(repl, raw) soup = BeautifulSoup(raw) if not self.baen and self.is_baen(soup): self.baen = True self.log.info(_('\tBaen file detected. Re-parsing...')) return self.preprocess(raw) if self.book_designer: t = soup.find(id='BookTitle') if t: self.book.set_title(self.get_text(t)) a = soup.find(id='BookAuthor') if a: self.book.set_author(self.get_text(a)) if self.verbose: tdir = tempfile.gettempdir() if not os.path.exists(tdir): os.makedirs(tdir) try: with open(os.path.join(tdir, 'html2lrf-verbose.html'), 'wb') as f: f.write(str(soup).encode('utf-8')) self.log.info(_('Written preprocessed HTML to ')+f.name) except: pass return soup def add_file(self, path): self.css = HTMLConverter.CSS.copy() self.pseudo_css = self.override_pcss.copy() for selector in self.override_css: if selector in self.css: self.css[selector].update(self.override_css[selector]) else: self.css[selector] = self.override_css[selector] self.file_name = os.path.basename(path) self.log.info(_('Processing %s')%(path if self.verbose else self.file_name)) if not os.path.exists(path): path = path.replace('&', '%26') # convertlit replaces & with %26 in file names with open(path, 'rb') as f: raw = f.read() if self.pdftohtml: # Bug in pdftohtml that causes it to output invalid UTF-8 files raw = raw.decode('utf-8', 'ignore') elif self.encoding is not None: raw = raw.decode(self.encoding, 'ignore') else: raw = xml_to_unicode(raw, self.verbose)[0] soup = self.preprocess(raw) self.log.info(_('\tConverting to BBeB...')) self.current_style = {} self.page_break_found = False if not isinstance(path, str): path = path.decode(sys.getfilesystemencoding()) self.target_prefix = path self.previous_text = '\n' self.tops[path] = self.parse_file(soup) self.processed_files.append(path) def parse_css(self, style): """ Parse the contents of a <style> tag or .css file. @param style: C{str(style)} should be the CSS to parse. @return: A dictionary with one entry per selector where the key is the selector name and the value is a dictionary of properties """ sdict, pdict = {}, {} style = re.sub(r'/\*.*?\*/', '', style) # Remove /*...*/ comments for sel in re.findall(HTMLConverter.SELECTOR_PAT, style): for key in sel[0].split(','): val = self.parse_style_properties(sel[1]) key = key.strip().lower() if '+' in key: continue if ':' in key: key, sep, pseudo = key.partition(':') if key in pdict: if pseudo in pdict[key]: pdict[key][pseudo].update(val) else: pdict[key][pseudo] = val else: pdict[key] = {pseudo:val} else: if key in sdict: sdict[key].update(val) else: sdict[key] = val return sdict, pdict def parse_style_properties(self, props): """ Parses a style attribute. The code within a CSS selector block or in the style attribute of an HTML element. @return: A dictionary with one entry for each property where the key is the property name and the value is the property value. """ prop = dict() for s in props.split(';'): l = s.split(':',1) if len(l)==2: key = l[0].strip().lower() val = l[1].strip() prop[key] = val return prop def tag_css(self, tag, parent_css={}): """ Return a dictionary of style properties applicable to Tag tag. """ def merge_parent_css(prop, pcss): # float should not be inherited according to the CSS spec # however we need to as we don't do alignment at a block level. # float is removed by the process_alignment function. inherited = ['text-align', 'float', 'white-space', 'color', 'line-height', 'vertical-align'] temp = {} for key in pcss.keys(): chk = key.lower() # float should not be inherited according to the CSS spec # however we need to as we don't do alignment at a block level. # float is removed by the process_alignment function. if chk.startswith('font') or chk in inherited: temp[key] = pcss[key] prop.update(temp) prop, pprop = {}, {} tagname = tag.name.lower() if parent_css: merge_parent_css(prop, parent_css) if tag.has_attr("align"): al = tag['align'].lower() if al in ('left', 'right', 'center', 'justify'): prop["text-align"] = al if tagname in self.css: prop.update(self.css[tagname]) if tagname in self.pseudo_css: pprop.update(self.pseudo_css[tagname]) if tag.has_attr("class"): cls = tag['class'] if isinstance(cls, list): cls = ' '.join(cls) cls = cls.lower() for cls in cls.split(): for classname in ["."+cls, tagname+"."+cls]: if classname in self.css: prop.update(self.css[classname]) if classname in self.pseudo_css: pprop.update(self.pseudo_css[classname]) if tag.has_attr('id') and tag['id'] in self.css: prop.update(self.css[tag['id']]) if tag.has_attr("style"): prop.update(self.parse_style_properties(tag["style"])) return prop, pprop def parse_file(self, soup): def get_valid_block(page): for item in page.contents: if isinstance(item, (Canvas, TextBlock, ImageBlock, RuledLine)): if isinstance(item, TextBlock) and not item.contents: continue return item if not self.current_page: self.current_page = self.book.create_page() self.current_block = self.book.create_text_block() self.current_para = Paragraph() if self.cover: self.add_image_page(self.cover) self.cover = None top = self.current_block self.current_block.must_append = True self.soup = soup self.process_children(soup, {}, {}) self.soup = None if self.current_para and self.current_block: self.current_para.append_to(self.current_block) if self.current_block and self.current_page: self.current_block.append_to(self.current_page) if self.avoid_page_break: self.avoid_page_break = False elif self.current_page and self.current_page.has_text(): self.book.append(self.current_page) self.current_page = None if top not in top.parent.contents: # May have been removed for a cover image top = top.parent.contents[0] if not top.has_text() and top.parent.contents.index(top) == len(top.parent.contents)-1: # Empty block at the bottom of a page opage = top.parent top.parent.contents.remove(top) if self.book.last_page() is opage: if self.current_page and self.current_page.has_text(): for c in self.current_page.contents: if isinstance(c, (TextBlock, ImageBlock)): return c raise ConversionError(_('Could not parse file: %s')%self.file_name) else: try: index = self.book.pages().index(opage) except ValueError: self.log.warning(_('%s is an empty file')%self.file_name) tb = self.book.create_text_block() self.current_page.append(tb) return tb for page in list(self.book.pages()[index+1:]): for c in page.contents: if isinstance(c, (TextBlock, ImageBlock, Canvas)): return c raise ConversionError(_('Could not parse file: %s')%self.file_name) return top def create_link(self, children, tag): para = None for i in range(len(children)-1, -1, -1): if isinstance(children[i], (Span, EmpLine)): para = children[i] break if para is None: raise ConversionError( _('Failed to parse link %(tag)s %(children)s')%dict( tag=tag, children=children)) text = self.get_text(tag, 1000) if not text: text = 'Link' img = tag.find('img') if img: try: text = img['alt'] except KeyError: pass path, fragment = munge_paths(self.target_prefix, tag['href']) return {'para':para, 'text':text, 'path':os.path.abspath(path), 'fragment':fragment, 'in toc': (self.link_level == 0 and not self.use_spine and not self.options.no_links_in_toc)} def get_text(self, tag, limit=None): css = self.tag_css(tag)[0] if ('display' in css and css['display'].lower() == 'none') or ('visibility' in css and css['visibility'].lower() == 'hidden'): return '' text, alt_text = '', '' for c in tag.contents: if limit is not None and len(text) > limit: break if isinstance(c, HTMLConverter.IGNORED_TAGS): continue if isinstance(c, NavigableString): text += str(c) elif isinstance(c, Tag): if c.name.lower() == 'img' and c.has_attr('alt'): alt_text += c['alt'] continue text += self.get_text(c) return text if text.strip() else alt_text def process_links(self): def add_toc_entry(text, target): # TextBlocks in Canvases have a None parent or an Objects Parent if target.parent is not None and \ hasattr(target.parent, 'objId'): self.book.addTocEntry(ascii_text, tb) else: self.log.debug("Cannot add link %s to TOC"%ascii_text) def get_target_block(fragment, targets): '''Return the correct block for the <a name> element''' bs = targets[fragment] if not isinstance(bs, BlockSpace): return bs ans, found, page = None, False, bs.parent for item in page.contents: if found: if isinstance(item, (TextBlock, RuledLine, ImageBlock)): ans = item break if item == bs: found = True continue if not ans: for i in range(len(page.contents)-1, -1, -1): if isinstance(page.contents[i], (TextBlock, RuledLine, ImageBlock)): ans = page.contents[i] break if not ans: ntb = self.book.create_text_block() ntb.Paragraph(' ') page.append(ntb) ans = ntb if found: targets[fragment] = ans page.contents.remove(bs) return ans outside_links = deque() while len(self.links) > 0: link = self.links.popleft() para, text, path, fragment = link['para'], link['text'], link['path'], link['fragment'] ascii_text = text if not isinstance(path, str): path = path.decode(sys.getfilesystemencoding()) if path in self.processed_files: if path+fragment in self.targets.keys(): tb = get_target_block(path+fragment, self.targets) else: tb = self.tops[path] if link['in toc']: add_toc_entry(ascii_text, tb) jb = JumpButton(tb) self.book.append(jb) cb = CharButton(jb, text=text) para.contents = [] para.append(cb) try: self.unused_target_blocks.remove(tb) except ValueError: pass else: outside_links.append(link) return outside_links def create_toc(self, toc): for item in toc.top_level_items(): ascii_text = item.text if not item.fragment and item.abspath in self.tops: self.book.addTocEntry(ascii_text, self.tops[item.abspath]) elif item.abspath: url = item.abspath+(item.fragment if item.fragment else '') if url in self.targets: self.book.addTocEntry(ascii_text, self.targets[url]) def end_page(self): """ End the current page, ensuring that any further content is displayed on a new page. """ if self.current_para.has_text(): self.current_para.append_to(self.current_block) self.current_para = Paragraph() if self.current_block.has_text() or self.current_block.must_append: self.current_block.append_to(self.current_page) self.current_block = self.book.create_text_block() if self.current_page.has_text(): self.book.append(self.current_page) self.current_page = self.book.create_page() def add_image_page(self, path): if os.access(path, os.R_OK): self.end_page() pwidth, pheight = self.profile.screen_width, self.profile.screen_height - \ self.profile.fudge page = self.book.create_page(evensidemargin=0, oddsidemargin=0, topmargin=0, textwidth=pwidth, headheight=0, headsep=0, footspace=0, footheight=0, textheight=pheight) if path not in self.images: self.images[path] = ImageStream(path) im = PILImage.open(path) width, height = im.size canvas = Canvas(pwidth, pheight) ib = ImageBlock(self.images[path], x1=width, y1=height, xsize=width, ysize=height, blockwidth=width, blockheight=height) canvas.put_object(ib, int((pwidth-width)/2.), int((pheight-height)/2.)) page.append(canvas) self.book.append(page) def process_children(self, ptag, pcss, ppcss={}): """ Process the children of ptag """ # Need to make a copy of contents as when # extract is called on a child, it will # mess up the iteration. for c in copy.copy(ptag.contents): if isinstance(c, HTMLConverter.IGNORED_TAGS): continue elif isinstance(c, Tag): self.parse_tag(c, pcss) elif isinstance(c, NavigableString): self.add_text(c, pcss, ppcss) if not self.in_table: try: if self.minimize_memory_usage: ptag.extract() except AttributeError: print(ptag, type(ptag)) def get_alignment(self, css): val = css['text-align'].lower() if 'text-align' in css else None align = 'head' if val is not None: if val in ["right", "foot"]: align = "foot" elif val == "center": align = "center" if 'float' in css: val = css['float'].lower() if val == 'left': align = 'head' if val == 'right': align = 'foot' css.pop('float') return align def process_alignment(self, css): ''' Create a new TextBlock only if necessary as indicated by css @type css: dict ''' align = self.get_alignment(css) if align != self.current_block.textStyle.attrs['align']: self.current_para.append_to(self.current_block) self.current_block.append_to(self.current_page) ts = self.book.create_text_style(**self.current_block.textStyle.attrs) ts.attrs['align'] = align try: index = self.text_styles.index(ts) ts = self.text_styles[index] except ValueError: self.text_styles.append(ts) self.current_block = self.book.create_text_block( blockStyle=self.current_block.blockStyle, textStyle=ts) self.current_para = Paragraph() return True return False def add_text(self, tag, css, pseudo_css, force_span_use=False): ''' Add text to the current paragraph taking CSS into account. @param tag: Either a BeautifulSoup tag or a string @param css: A dict ''' src = tag.string if hasattr(tag, 'string') else tag if len(src) > 32760: pos = 0 while pos < len(src): self.add_text(src[pos:pos+32760], css, pseudo_css, force_span_use) pos += 32760 return src = src.replace('\r\n', '\n').replace('\r', '\n') if 'first-letter' in pseudo_css and len(src) > 1: src = src.lstrip() f = src[0] next = 1 if f in ("'", '"', '\u201c', '\u2018', '\u201d', '\u2019'): if len(src) >= 2: next = 2 f = src[:2] src = src[next:] ncss = css.copy() ncss.update(pseudo_css.pop('first-letter')) self.add_text(f, ncss, {}, force_span_use) collapse_whitespace = 'white-space' not in css or css['white-space'] != 'pre' if self.process_alignment(css) and collapse_whitespace: # Dont want leading blanks in a new paragraph src = src.lstrip() def append_text(src): fp, key, variant = self.font_properties(css) for x, y in [('\xad', ''), ('\xa0', ' '), ('\ufb00', 'ff'), ('\ufb01', 'fi'), ('\ufb02', 'fl'), ('\ufb03', 'ffi'), ('\ufb04', 'ffl')]: src = src.replace(x, y) def valigner(x): return x if 'vertical-align' in css: valign = css['vertical-align'] if valign in ('sup', 'super', 'sub'): fp['fontsize'] = int(fp['fontsize']) * 5 // 3 valigner = Sub if valign == 'sub' else Sup # noqa normal_font_size = int(fp['fontsize']) if variant == 'small-caps': dump = Span(fontsize=normal_font_size-30) temp = [] for c in src: if c.isupper(): if temp: dump.append(valigner(''.join(temp))) temp = [] dump.append(Span(valigner(c), fontsize=normal_font_size)) else: temp.append(c.upper()) src = dump if temp: src.append(valigner(''.join(temp))) else: src = valigner(src) if key in ['italic', 'bi']: already_italic = False for fonts in self.fonts.values(): it = fonts['italic'][1] if 'italic' in fonts else '' bi = fonts['bi'][1] if 'bi' in fonts else '' if fp['fontfacename'] in (it, bi): already_italic = True break if not already_italic: src = Italic(src) unneeded = [] for prop in fp: if fp[prop] == self.current_block.textStyle.attrs[prop]: unneeded.append(prop) for prop in unneeded: fp.pop(prop) attrs = {} if 'color' in css and not self.ignore_colors: attrs['textcolor'] = lrs_color(css['color']) attrs.update(fp) elem = Span(text=src, **attrs) if (attrs or force_span_use) else src if 'text-decoration' in css: dec = css['text-decoration'].lower() linepos = 'after' if dec == 'underline' else 'before' if dec == 'overline' else None if linepos is not None: elem = EmpLine(elem, emplineposition=linepos) self.current_para.append(elem) if collapse_whitespace: src = re.sub(r'\s{1,}', ' ', src) if self.stripped_space and len(src) == len(src.lstrip(' \n\r\t')): src = self.stripped_space + src src, orig = src.rstrip(' \n\r\t'), src self.stripped_space = orig[len(src):] if len(self.previous_text) != len(self.previous_text.rstrip(' \n\r\t')): src = src.lstrip(' \n\r\t') if len(src): self.previous_text = src append_text(src) else: srcs = src.split('\n') for src in srcs[:-1]: append_text(src) self.line_break() last = srcs[-1] if len(last): append_text(last) def line_break(self): self.current_para.append(CR()) self.previous_text = '\n' def end_current_para(self): ''' End current paragraph with a paragraph break after it. ''' if self.current_para.contents: self.current_block.append(self.current_para) self.current_block.append(CR()) self.current_para = Paragraph() def end_current_block(self): ''' End current TextBlock. Create new TextBlock with the same styles. ''' if self.current_para.contents: self.current_block.append(self.current_para) self.current_para = Paragraph() if self.current_block.contents or self.current_block.must_append: self.current_page.append(self.current_block) self.current_block = self.book.create_text_block(textStyle=self.current_block.textStyle, blockStyle=self.current_block.blockStyle) def process_image(self, path, tag_css, width=None, height=None, dropcaps=False, rescale=False): def detect_encoding(im): fmt = im.format if fmt == 'JPG': fmt = 'JPEG' return fmt original_path = path if path in self.rotated_images: path = self.rotated_images[path].name if path in self.scaled_images: path = self.scaled_images[path].name try: im = PILImage.open(path) except OSError as err: self.log.warning('Unable to process image: %s\n%s'%(original_path, err)) return encoding = detect_encoding(im) def scale_image(width, height): if width <= 0: width = 1 if height <= 0: height = 1 pt = PersistentTemporaryFile(suffix='_html2lrf_scaled_image_.'+encoding.lower()) self.image_memory.append(pt) # Necessary, trust me ;-) try: im.resize((int(width), int(height)), PILImage.Resampling.LANCZOS).save(pt, encoding) pt.close() self.scaled_images[path] = pt return pt.name except (OSError, SystemError) as err: # PIL chokes on interlaced PNG images as well a some GIF images self.log.warning( _('Unable to process image %(path)s. Error: %(err)s')%dict( path=path, err=err)) if width is None or height is None: width, height = im.size elif rescale and (width < im.size[0] or height < im.size[1]): path = scale_image(width, height) if not path: return factor = 720./self.profile.dpi pheight = int(self.current_page.pageStyle.attrs['textheight']) pwidth = int(self.current_page.pageStyle.attrs['textwidth']) if dropcaps: scale = False if width > 0.75*pwidth: width = int(0.75*pwidth) scale = True if height > 0.75*pheight: height = int(0.75*pheight) scale = True if scale: path = scale_image(width, height) if path not in self.images: self.images[path] = ImageStream(path) im = Image(self.images[path], x0=0, y0=0, x1=width, y1=height, xsize=width, ysize=height) line_height = (int(self.current_block.textStyle.attrs['baselineskip']) + int(self.current_block.textStyle.attrs['linespace']))//10 line_height *= self.profile.dpi/72 lines = int(ceil(height/line_height)) dc = DropCaps(lines) dc.append(Plot(im, xsize=ceil(width*factor), ysize=ceil(height*factor))) self.current_para.append(dc) return if self.autorotation and width > pwidth and width > height: pt = PersistentTemporaryFile(suffix='_html2lrf_rotated_image_.'+encoding.lower()) try: im = im.rotate(90) im.save(pt, encoding) path = pt.name self.rotated_images[path] = pt width, height = im.size except OSError: # PIL chokes on interlaced PNG files and since auto-rotation is not critical we ignore the error self.log.debug(_('Unable to process interlaced PNG %s')% original_path) finally: pt.close() scaled, width, height = fit_image(width, height, pwidth, pheight) if scaled: path = scale_image(width, height) if not path: return if path not in self.images: try: self.images[path] = ImageStream(path, encoding=encoding) except LrsError as err: self.log.warning(('Could not process image: %s\n%s')%( original_path, err)) return im = Image(self.images[path], x0=0, y0=0, x1=width, y1=height, xsize=width, ysize=height) self.process_alignment(tag_css) if max(width, height) <= min(pwidth, pheight)/5: self.current_para.append(Plot(im, xsize=ceil(width*factor), ysize=ceil(height*factor))) elif height <= int(floor((2/3)*pheight)): pb = self.current_block self.end_current_para() self.process_alignment(tag_css) self.current_para.append(Plot(im, xsize=width*factor, ysize=height*factor)) self.current_block.append(self.current_para) self.current_page.append(self.current_block) self.current_block = self.book.create_text_block( textStyle=pb.textStyle, blockStyle=pb.blockStyle) self.current_para = Paragraph() else: self.end_page() if len(self.current_page.contents) == 1 and not self.current_page.has_text(): self.current_page.contents[0:1] = [] self.current_page.append(Canvas(width=pwidth, height=height)) left = int(floor((pwidth - width)/2)) self.current_page.contents[-1].put_object( ImageBlock(self.images[path], xsize=width, ysize=height, x1=width, y1=height, blockwidth=width, blockheight=height), left, 0) def process_page_breaks(self, tag, tagname, tag_css): if 'page-break-before' in tag_css.keys(): if tag_css['page-break-before'].lower() != 'avoid': self.end_page() tag_css.pop('page-break-before') end_page = False if 'page-break-after' in tag_css.keys(): if tag_css['page-break-after'].lower() == 'avoid': self.avoid_page_break = True else: end_page = True tag_css.pop('page-break-after') if (self.force_page_break_attr[0].match(tagname) and tag.has_attr(self.force_page_break_attr[1]) and self.force_page_break_attr[2].match(tag[self.force_page_break_attr[1]])) or \ self.force_page_break.match(tagname): self.end_page() self.page_break_found = True if not self.page_break_found and self.page_break.match(tagname): number_of_paragraphs = sum( len([1 for i in block.contents if isinstance(i, Paragraph)]) for block in self.current_page.contents if isinstance(block, TextBlock) ) if number_of_paragraphs > 2: self.end_page() self.log.debug('Forcing page break at %s'%tagname) return end_page def block_properties(self, tag_css): def get(what): src = [None for i in range(4)] if what in tag_css: msrc = tag_css[what].split() for i in range(min(len(msrc), len(src))): src[i] = msrc[i] for i, c in enumerate(('-top', '-right', '-bottom', '-left')): if what + c in tag_css: src[i] = tag_css[what+c] return src s1, s2 = get('margin'), get('padding') bl = str(self.current_block.blockStyle.attrs['blockwidth'])+'px' def set(default, one, two): fval = None if one is not None: val = self.unit_convert(one, base_length='10pt' if 'em' in one else bl) if val is not None: fval = val if two is not None: val = self.unit_convert(two, base_length='10pt' if 'em' in two else bl) if val is not None: fval = val if fval is None else fval + val if fval is None: fval = default return fval ans = {} ans['topskip'] = set(self.book.defaultBlockStyle.attrs['topskip'], s1[0], s2[0]) ans['footskip'] = set(self.book.defaultBlockStyle.attrs['footskip'], s1[2], s2[2]) ans['sidemargin'] = set(self.book.defaultBlockStyle.attrs['sidemargin'], s1[3], s2[3]) factor = 0.7 if 2*int(ans['sidemargin']) >= factor*int(self.current_block.blockStyle.attrs['blockwidth']): # Try using (left + right)/2 val = int(ans['sidemargin']) ans['sidemargin'] = set(self.book.defaultBlockStyle.attrs['sidemargin'], s1[1], s2[1]) val += int(ans['sidemargin']) val /= 2. ans['sidemargin'] = int(val) if 2*int(ans['sidemargin']) >= factor*int(self.current_block.blockStyle.attrs['blockwidth']): ans['sidemargin'] = int((factor*int(self.current_block.blockStyle.attrs['blockwidth'])) / 2) for prop in ('topskip', 'footskip', 'sidemargin'): if isinstance(ans[prop], string_or_bytes): ans[prop] = int(ans[prop]) if ans[prop] < 0: ans[prop] = 0 return ans def font_properties(self, css): ''' Convert the font propertiess in css to the Xylog equivalents. If the CSS does not contain a particular font property, the default from self.book.defaultTextSytle is used. Assumes 1em = 10pt @return: dict, key, variant. The dict contains the Xlog equivalents. key indicates the font type (i.e. bold, bi, normal) and variant is None or 'small-caps' ''' t = {} for key in ('fontwidth', 'fontsize', 'wordspace', 'fontfacename', 'fontweight', 'baselineskip'): t[key] = self.book.defaultTextStyle.attrs[key] def font_weight(val): ans = 0 m = re.search("([0-9]+)", val) if m: ans = int(m.group(1)) elif val.find("bold") >= 0 or val.find("strong") >= 0: ans = 700 return 'bold' if ans >= 700 else 'normal' def font_style(val): ans = 'normal' if 'italic' in val or 'oblique' in val: ans = 'italic' return ans def font_family(val): ans = 'serif' if max(val.find("courier"), val.find("mono"), val.find("fixed"), val.find("typewriter"))>=0: ans = 'mono' elif max(val.find("arial"), val.find("helvetica"), val.find("verdana"), val.find("trebuchet"), val.find("sans")) >= 0: ans = 'sans' return ans def font_variant(val): ans = None if 'small-caps' in val.lower(): ans = 'small-caps' return ans def font_key(family, style, weight): key = 'normal' if style == 'italic' and weight == 'normal': key = 'italic' elif style == 'normal' and weight == 'bold': key = 'bold' elif style == 'italic' and weight == 'bold': key = 'bi' return key def font_size(val): ''' Assumes 1em=100%=10pt ''' normal = 100 ans = self.unit_convert(val, pts=True, base_length='10pt') if ans: if ans <= 0: ans += normal if ans == 0: # Common case of using -1em to mean "smaller" ans = int(font_size("smaller")) if ans < 0: ans = normal else: if ans == 0: ans = int(font_size("smaller")) elif "smaller" in val: ans = normal - 20 elif "xx-small" in val: ans = 40 elif "x-small" in val: ans = 60 elif "small" in val: ans = 80 elif "medium" in val: ans = 100 elif "larger" in val: ans = normal + 20 elif "xx-large" in val: ans = 180 elif "x-large" in val: ans = 140 elif "large" in val: ans = 120 if ans is not None: ans += int(self.font_delta * 20) ans = str(ans) return ans family, weight, style, variant = 'serif', 'normal', 'normal', None for key in css.keys(): val = css[key].lower() if key == 'font': vals = val.split() for val in vals: family = font_family(val) if family != 'serif': break for val in vals: weight = font_weight(val) if weight != 'normal': break for val in vals: style = font_style(val) if style != 'normal': break for val in vals: sz = font_size(val) if sz: t['fontsize'] = sz break for val in vals: variant = font_variant(val) if variant: t['fontvariant'] = variant break elif key in ['font-family', 'font-name']: family = font_family(val) elif key == "font-size": ans = font_size(val) if ans: t['fontsize'] = ans elif key == 'font-weight': weight = font_weight(val) elif key == 'font-style': style = font_style(val) elif key == 'font-variant': variant = font_variant(val) if variant: css['font-variant'] = variant key = font_key(family, style, weight) if key in self.fonts[family]: t['fontfacename'] = self.fonts[family][key][1] else: t['fontfacename'] = self.fonts[family]['normal'][1] if key in ['bold', 'bi']: t['fontweight'] = 700 fs = int(t['fontsize']) if fs > 120: t['wordspace'] = fs // 4 t['baselineskip'] = fs + 20 return t, key, variant def unit_convert(self, val, pts=False, base_length='10pt'): ''' Tries to convert html units in C{val} to pixels. @param pts: If True return 10*pts instead of pixels. @return: The number of pixels (an int) if successful. Otherwise, returns None. ''' dpi = self.profile.dpi result = None try: result = int(val) except ValueError: pass m = re.search(r"\s*(-*[0-9]*\.?[0-9]*)\s*(%|em|px|mm|cm|in|dpt|pt|pc)", val) if m is not None and m.group(1): unit = float(m.group(1)) if m.group(2) == '%': normal = self.unit_convert(base_length) result = (unit/100) * normal elif m.group(2) == 'px': result = unit elif m.group(2) == 'in': result = unit * dpi elif m.group(2) == 'pt': result = unit * dpi/72 elif m.group(2) == 'dpt': result = unit * dpi/720 elif m.group(2) == 'em': normal = self.unit_convert(base_length) result = unit * normal elif m.group(2) == 'pc': result = unit * (dpi/72) * 12 elif m.group(2) == 'mm': result = unit * 0.04 * (dpi) elif m.group(2) == 'cm': result = unit * 0.4 * (dpi) if result is not None: if pts: result = int(round(result * (720/dpi))) else: result = int(round(result)) return result def text_properties(self, tag_css): indent = self.book.defaultTextStyle.attrs['parindent'] if 'text-indent' in tag_css: bl = str(self.current_block.blockStyle.attrs['blockwidth'])+'px' if 'em' in tag_css['text-indent']: bl = '10pt' indent = self.unit_convert(str(tag_css['text-indent']), pts=True, base_length=bl) if not indent: indent = 0 if indent > 0 and indent < 10 * self.minimum_indent: indent = int(10 * self.minimum_indent) fp = self.font_properties(tag_css)[0] fp['parindent'] = indent if 'line-height' in tag_css: bls, ls = int(self.book.defaultTextStyle.attrs['baselineskip']), \ int(self.book.defaultTextStyle.attrs['linespace']) try: # See if line-height is a unitless number val = int(float(tag_css['line-height'].strip()) * (ls)) fp['linespace'] = val except ValueError: val = self.unit_convert(tag_css['line-height'], pts=True, base_length='1pt') if val is not None: val -= bls if val >= 0: fp['linespace'] = val return fp def process_block(self, tag, tag_css): ''' Ensure padding and text-indent properties are respected ''' text_properties = self.text_properties(tag_css) block_properties = self.block_properties(tag_css) indent = (float(text_properties['parindent'])/10) * (self.profile.dpi/72) margin = float(block_properties['sidemargin']) # Since we're flattening the block structure, we need to ensure that text # doesn't go off the left edge of the screen if indent < 0 and margin + indent < 0: text_properties['parindent'] = int(-margin * (72/self.profile.dpi) * 10) align = self.get_alignment(tag_css) def fill_out_properties(props, default): for key in default.keys(): if key not in props: props[key] = default[key] fill_out_properties(block_properties, self.book.defaultBlockStyle.attrs) fill_out_properties(text_properties, self.book.defaultTextStyle.attrs) def properties_different(dict1, dict2): for key in dict1.keys(): if dict1[key] != dict2[key]: return True return False if properties_different(self.current_block.blockStyle.attrs, block_properties) or \ properties_different(self.current_block.textStyle.attrs, text_properties) or\ align != self.current_block.textStyle.attrs['align']: ts = self.current_block.textStyle.copy() ts.attrs.update(text_properties) ts.attrs['align'] = align bs = self.current_block.blockStyle.copy() if not self.preserve_block_style: bs.attrs.update(block_properties) self.current_block.append_to(self.current_page) try: index = self.text_styles.index(ts) ts = self.text_styles[index] except ValueError: self.text_styles.append(ts) try: index = self.block_styles.index(bs) bs = self.block_styles[index] except ValueError: self.block_styles.append(bs) self.current_block = self.book.create_text_block(blockStyle=bs, textStyle=ts) return True return False def process_anchor(self, tag, tag_css, tag_pseudo_css): if not self.in_table: # Anchors in tables are handled separately key = 'name' if tag.has_attr('name') else 'id' name = tag[key].replace('#', '') previous = self.current_block self.process_children(tag, tag_css, tag_pseudo_css) target = None if self.current_block == previous: self.current_block.must_append = True target = self.current_block else: found = False for item in self.current_page.contents: if item == previous: found = True continue if found: target = item break if target and not isinstance(target, (TextBlock, ImageBlock)): if isinstance(target, RuledLine): target = self.book.create_text_block(textStyle=self.current_block.textStyle, blockStyle=self.current_block.blockStyle) target.Paragraph(' ') self.current_page.append(target) else: target = BlockSpace() self.current_page.append(target) if target is None: if self.current_block.has_text(): target = self.current_block else: target = self.current_block self.current_block.must_append = True self.targets[self.target_prefix+name] = target else: self.process_children(tag, tag_css, tag_pseudo_css) def parse_tag(self, tag, parent_css): try: tagname = tag.name.lower() except AttributeError: if not isinstance(tag, HTMLConverter.IGNORED_TAGS): self.add_text(tag, parent_css, {}) return tag_css, tag_pseudo_css = self.tag_css(tag, parent_css=parent_css) try: # Skip element if its display attribute is set to none if tag_css['display'].lower() == 'none' or \ tag_css['visibility'].lower() == 'hidden': return except KeyError: pass if not self.disable_chapter_detection and \ (self.chapter_attr[0].match(tagname) and (self.chapter_attr[1].lower() == 'none' or (tag.has_attr(self.chapter_attr[1]) and self.chapter_attr[2].match(tag[self.chapter_attr[1]])))): self.log.debug('Detected chapter %s'%tagname) self.end_page() self.page_break_found = True if self.options.add_chapters_to_toc: self.current_block.must_append = True self.extra_toc_entries.append((self.get_text(tag, limit=1000), self.current_block)) end_page = self.process_page_breaks(tag, tagname, tag_css) try: if tagname in ["title", "script", "meta", 'del', 'frameset']: pass elif tagname == 'a' and self.link_levels >= 0: if tag.has_attr('href') and not self.link_exclude.match(tag['href']): if urlparse(tag['href'])[0] not in ('', 'file'): self.process_children(tag, tag_css, tag_pseudo_css) else: path = munge_paths(self.target_prefix, tag['href'])[0] ext = os.path.splitext(path)[1] if ext: ext = ext[1:].lower() if os.access(path, os.R_OK) and os.path.isfile(path): if ext in ['png', 'jpg', 'bmp', 'jpeg']: self.process_image(path, tag_css) else: text = self.get_text(tag, limit=1000) if not text.strip(): text = "Link" self.add_text(text, tag_css, {}, force_span_use=True) self.links.append(self.create_link(self.current_para.contents, tag)) if tag.has_attr('id') or tag.has_attr('name'): key = 'name' if tag.has_attr('name') else 'id' self.targets[self.target_prefix+tag[key]] = self.current_block self.current_block.must_append = True else: self.log.debug('Could not follow link to '+tag['href']) self.process_children(tag, tag_css, tag_pseudo_css) elif tag.has_attr('name') or tag.has_attr('id'): self.process_anchor(tag, tag_css, tag_pseudo_css) else: self.process_children(tag, tag_css, tag_pseudo_css) elif tagname == 'img': if tag.has_attr('src'): path = munge_paths(self.target_prefix, tag['src'])[0] if not os.path.exists(path): path = path.replace('&', '%26') # convertlit replaces & with %26 if os.access(path, os.R_OK) and os.path.isfile(path): width, height = None, None try: width = int(tag['width']) height = int(tag['height']) except: pass dropcaps = tag.get('class') in ('libprs500_dropcaps', ['libprs500_dropcaps']) self.process_image(path, tag_css, width, height, dropcaps=dropcaps, rescale=True) elif not urlparse(tag['src'])[0]: self.log.warn('Could not find image: '+tag['src']) else: self.log.debug("Failed to process: %s"%str(tag)) elif tagname in ['style', 'link']: ncss, npcss = {}, {} if tagname == 'style': text = ''.join([str(i) for i in tag.findAll(text=True)]) css, pcss = self.parse_css(text) ncss.update(css) npcss.update(pcss) elif (tag.has_attr('type') and tag['type'] in ("text/css", "text/x-oeb1-css") and tag.has_attr('href')): path = munge_paths(self.target_prefix, tag['href'])[0] try: with open(path, 'rb') as f: src = f.read().decode('utf-8', 'replace') match = self.PAGE_BREAK_PAT.search(src) if match and not re.match('avoid', match.group(1), re.IGNORECASE): self.page_break_found = True ncss, npcss = self.parse_css(src) except OSError: self.log.warn('Could not read stylesheet: '+tag['href']) if ncss: update_css(ncss, self.css) self.css.update(self.override_css) if npcss: update_css(npcss, self.pseudo_css) self.pseudo_css.update(self.override_pcss) elif tagname == 'pre': self.end_current_para() self.end_current_block() self.current_block = self.book.create_text_block() ts = self.current_block.textStyle.copy() self.current_block.textStyle = ts self.current_block.textStyle.attrs['parindent'] = '0' if tag.contents: c = tag.contents[0] if isinstance(c, NavigableString): c = str(c).replace('\r\n', '\n').replace('\r', '\n') if c.startswith('\n'): c = c[1:] tag.contents[0] = NavigableString(c) tag.contents[0].setup(tag) self.process_children(tag, tag_css, tag_pseudo_css) self.end_current_block() elif tagname in ['ul', 'ol', 'dl']: self.list_level += 1 if tagname == 'ol': old_counter = self.list_counter self.list_counter = 1 try: self.list_counter = int(tag['start']) except: pass prev_bs = self.current_block.blockStyle self.end_current_block() attrs = self.current_block.blockStyle.attrs attrs = attrs.copy() attrs['sidemargin'] = self.list_indent*self.list_level bs = self.book.create_block_style(**attrs) self.current_block = self.book.create_text_block( blockStyle=bs, textStyle=self.unindented_style) self.process_children(tag, tag_css, tag_pseudo_css) self.end_current_block() self.current_block.blockStyle = prev_bs self.list_level -= 1 if tagname == 'ol': self.list_counter = old_counter elif tagname in ['li', 'dt', 'dd']: margin = self.list_indent*self.list_level if tagname == 'dd': margin += 80 if int(self.current_block.blockStyle.attrs['sidemargin']) != margin: self.end_current_block() attrs = self.current_block.blockStyle.attrs attrs = attrs.copy() attrs['sidemargin'] = margin attrs['blockwidth'] = int(attrs['blockwidth']) + margin bs = self.book.create_block_style(**attrs) self.current_block = self.book.create_text_block( blockStyle=bs, textStyle=self.unindented_style) if self.current_para.has_text(): self.line_break() self.current_block.append(self.current_para) self.current_para = Paragraph() self.previous_text = '\n' if tagname == 'li': in_ol, parent = True, tag.parent while parent: if parent.name and parent.name.lower() in ['ul', 'ol']: in_ol = parent.name.lower() == 'ol' break parent = parent.parent prepend = str(self.list_counter)+'. ' if in_ol else '\u2022' + ' ' self.current_para.append(Span(prepend)) self.process_children(tag, tag_css, tag_pseudo_css) if in_ol: self.list_counter += 1 else: self.process_children(tag, tag_css, tag_pseudo_css) elif tagname == 'blockquote': self.current_para.append_to(self.current_block) self.current_block.append_to(self.current_page) pb = self.current_block self.current_para = Paragraph() ts = self.book.create_text_style() ts.attrs['parindent'] = 0 try: index = self.text_styles.index(ts) ts = self.text_styles[index] except ValueError: self.text_styles.append(ts) bs = self.book.create_block_style() bs.attrs['sidemargin'], bs.attrs['topskip'], bs.attrs['footskip'] = \ 60, 20, 20 try: index = self.block_styles.index(bs) bs = self.block_styles[index] except ValueError: self.block_styles.append(bs) self.current_block = self.book.create_text_block( blockStyle=bs, textStyle=ts) self.previous_text = '\n' self.preserve_block_style = True self.process_children(tag, tag_css, tag_pseudo_css) self.preserve_block_style = False self.current_para.append_to(self.current_block) self.current_block.append_to(self.current_page) self.current_para = Paragraph() self.current_block = self.book.create_text_block(textStyle=pb.textStyle, blockStyle=pb.blockStyle) elif tagname in ['p', 'div', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']: new_block = self.process_block(tag, tag_css) if (self.anchor_ids and tag.has_attr('id')) or (self.book_designer and tag.get('class') in ('title', ['title'])): if not tag.has_attr('id'): tag['id'] = __appname__+'_id_'+str(self.id_counter) self.id_counter += 1 tkey = self.target_prefix+tag['id'] if not new_block: self.end_current_block() self.current_block.must_append = True self.targets[tkey] = self.current_block if (self.book_designer and tag.get('class') in ('title', ['title'])): self.extra_toc_entries.append((self.get_text(tag, 100), self.current_block)) src = self.get_text(tag, limit=1000) if not self.disable_chapter_detection and tagname.startswith('h'): if self.chapter_regex.search(src): self.log.debug('Detected chapter %s'%src) self.end_page() self.page_break_found = True if self.options.add_chapters_to_toc: self.current_block.must_append = True self.extra_toc_entries.append((self.get_text(tag, limit=1000), self.current_block)) if self.current_para.has_text(): self.current_para.append_to(self.current_block) self.current_para = Paragraph() self.previous_text = '\n' if not tag.contents: self.current_block.append(CR()) return if self.current_block.contents: self.current_block.append(CR()) self.process_children(tag, tag_css, tag_pseudo_css) if self.current_para.contents : self.current_block.append(self.current_para) self.current_para = Paragraph() if tagname.startswith('h') or self.blank_after_para: self.current_block.append(CR()) elif tagname in ['b', 'strong', 'i', 'em', 'span', 'tt', 'big', 'code', 'cite', 'sup', 'sub']: self.process_children(tag, tag_css, tag_pseudo_css) elif tagname == 'font': if tag.has_attr('face'): tag_css['font-family'] = tag['face'] if tag.has_attr('color'): tag_css['color'] = tag['color'] self.process_children(tag, tag_css, tag_pseudo_css) elif tagname in ['br']: self.line_break() self.previous_text = '\n' elif tagname in ['hr', 'tr']: # tr needed for nested tables self.end_current_block() if tagname == 'hr' and not tag_css.get('width', '').strip().startswith('0'): self.current_page.RuledLine(linelength=int(self.current_page.pageStyle.attrs['textwidth'])) self.previous_text = '\n' self.process_children(tag, tag_css, tag_pseudo_css) elif tagname == 'td': # Needed for nested tables if not self.in_table: self.current_para.append(' ') self.previous_text = ' ' self.process_children(tag, tag_css, tag_pseudo_css) elif tagname == 'table' and not self.ignore_tables and not self.in_table: tag_css = self.tag_css(tag)[0] # Table should not inherit CSS try: self.process_table(tag, tag_css) except Exception as err: self.log.warning(_('An error occurred while processing a table: %s. Ignoring table markup.')%repr(err)) self.log.exception('') self.log.debug(_('Bad table:\n%s')%str(tag)[:300]) self.in_table = False self.process_children(tag, tag_css, tag_pseudo_css) finally: if self.minimize_memory_usage: tag.extract() else: self.process_children(tag, tag_css, tag_pseudo_css) finally: if end_page: self.end_page() def process_table(self, tag, tag_css): self.end_current_block() self.current_block = self.book.create_text_block() rowpad = 10 table = Table(self, tag, tag_css, rowpad=rowpad, colpad=10) canvases = [] ps = self.current_page.pageStyle.attrs for block, xpos, ypos, delta, targets in table.blocks(int(ps['textwidth']), int(ps['textheight'])): if not block: if ypos > int(ps['textheight']): raise Exception(_('Table has cell that is too large')) canvases.append(Canvas(int(self.current_page.pageStyle.attrs['textwidth']), ypos+rowpad, blockrule='block-fixed')) for name in targets: self.targets[self.target_prefix+name] = canvases[-1] else: if xpos > 65535: xpos = 65535 canvases[-1].put_object(block, xpos + int(delta/2), ypos) for canvas in canvases: self.current_page.append(canvas) self.end_current_block() def remove_unused_target_blocks(self): for block in self.unused_target_blocks: block.parent.contents.remove(block) block.parent = None def writeto(self, path, lrs=False): self.remove_unused_target_blocks() self.book.renderLrs(path) if lrs else self.book.renderLrf(path) def cleanup(self): for _file in chain(itervalues(self.scaled_images), itervalues(self.rotated_images)): _file.__del__() def process_file(path, options, logger): path = os.path.abspath(path) default_title = force_unicode(os.path.splitext(os.path.basename(path))[0], filesystem_encoding) dirpath = os.path.dirname(path) tpath = '' try_opf(path, options, logger) if getattr(options, 'cover', None): options.cover = os.path.expanduser(options.cover) if not os.path.isabs(options.cover): options.cover = os.path.join(dirpath, options.cover) if os.access(options.cover, os.R_OK): th = Device.THUMBNAIL_HEIGHT im = PILImage.open(options.cover) pwidth, pheight = options.profile.screen_width, \ options.profile.screen_height - options.profile.fudge width, height = im.size if width < pwidth: corrf = pwidth/width width, height = pwidth, int(corrf*height) scaled, width, height = fit_image(width, height, pwidth, pheight) try: cim = im.resize((width, height), PILImage.BICUBIC).convert('RGB') if \ scaled else im cf = PersistentTemporaryFile(prefix=__appname__+"_", suffix=".jpg") cf.close() cim.convert('RGB').save(cf.name) options.cover = cf.name tim = im.resize((int(0.75*th), th), PILImage.Resampling.LANCZOS).convert('RGB') tf = PersistentTemporaryFile(prefix=__appname__+'_', suffix=".jpg") tf.close() tim.save(tf.name) tpath = tf.name except OSError as err: # PIL sometimes fails, for example on interlaced PNG files logger.warn(_('Could not read cover image: %s'), err) options.cover = None else: raise ConversionError(_('Cannot read from: %s')% (options.cover,)) if not options.title: options.title = default_title for prop in ('author', 'author_sort', 'title', 'title_sort', 'publisher', 'freetext'): val = getattr(options, prop, None) if val and not isinstance(val, str): soup = BeautifulSoup(val) setattr(options, prop, str(soup)) title = (options.title, options.title_sort) author = (options.author, options.author_sort) args = dict(font_delta=options.font_delta, title=title, author=author, sourceencoding='utf8', freetext=options.freetext, category=options.category, publisher=options.publisher, booksetting=BookSetting(dpi=10*options.profile.dpi, screenheight=options.profile.screen_height, screenwidth=options.profile.screen_width)) if tpath: args['thumbnail'] = tpath header = None if options.header: header = Paragraph() fheader = options.headerformat if not options.title: options.title = _('Unknown') if not options.author: options.author = _('Unknown') if not fheader: fheader = "%t by %a" fheader = re.sub(r'(?<!%)%t', options.title, fheader) fheader = re.sub(r'(?<!%)%a', options.author, fheader) fheader = re.sub(r'%%a','%a',fheader) fheader = re.sub(r'%%t','%t',fheader) header.append(fheader + " ") book, fonts = Book(options, logger, header=header, **args) le = re.compile(options.link_exclude) if options.link_exclude else \ re.compile('$') pb = re.compile(options.page_break, re.IGNORECASE) if options.page_break else \ re.compile('$') fpb = re.compile(options.force_page_break, re.IGNORECASE) if options.force_page_break else \ re.compile('$') cq = options.chapter_attr.split(',') if len(cq) < 3: raise ValueError('The --chapter-attr setting must have 2 commas.') options.chapter_attr = [re.compile(cq[0], re.IGNORECASE), cq[1], re.compile(cq[2], re.IGNORECASE)] options.force_page_break = fpb options.link_exclude = le options.page_break = pb if not isinstance(options.chapter_regex, str): options.chapter_regex = options.chapter_regex.decode(preferred_encoding) options.chapter_regex = re.compile(options.chapter_regex, re.IGNORECASE) fpba = options.force_page_break_attr.split(',') if len(fpba) != 3: fpba = ['$', '', '$'] options.force_page_break_attr = [re.compile(fpba[0], re.IGNORECASE), fpba[1], re.compile(fpba[2], re.IGNORECASE)] if not hasattr(options, 'anchor_ids'): options.anchor_ids = True files = options.spine if (options.use_spine and hasattr(options, 'spine')) else [path] conv = HTMLConverter(book, fonts, options, logger, files) if options.use_spine and hasattr(options, 'toc') and options.toc is not None: conv.create_toc(options.toc) oname = options.output if not oname: suffix = '.lrs' if options.lrs else '.lrf' name = os.path.splitext(os.path.basename(path))[0] + suffix oname = os.path.join(os.getcwd(), name) oname = os.path.abspath(os.path.expanduser(oname)) conv.writeto(oname, lrs=options.lrs) conv.cleanup() return oname def try_opf(path, options, logger): if hasattr(options, 'opf'): opf = options.opf else: files = glob.glob(os.path.join(os.path.dirname(path),'*')) opf = None for f in files: ext = f.rpartition('.')[-1].lower() if ext == 'opf': opf = f break if opf is None: return dirpath = os.path.dirname(os.path.abspath(opf)) from calibre.ebooks.metadata.opf2 import OPF as OPF2 with open(opf, 'rb') as f: opf = OPF2(f, dirpath) try: title = opf.title if title and not getattr(options, 'title', None): options.title = title if getattr(options, 'author', 'Unknown') == 'Unknown': if opf.authors: options.author = ', '.join(opf.authors) if opf.author_sort: options.author_sort = opf.author_sort if options.publisher == 'Unknown': publisher = opf.publisher if publisher: options.publisher = publisher if not getattr(options, 'cover', None) or options.use_metadata_cover: orig_cover = getattr(options, 'cover', None) options.cover = None cover = opf.cover if cover: cover = cover.replace('/', os.sep) if not os.path.isabs(cover): cover = os.path.join(dirpath, cover) if os.access(cover, os.R_OK): try: PILImage.open(cover) options.cover = cover except: pass if not getattr(options, 'cover', None) and orig_cover is not None: options.cover = orig_cover if getattr(opf, 'spine', False): options.spine = [i.path for i in opf.spine if i.path] if not getattr(options, 'toc', None): options.toc = opf.toc except Exception: logger.exception(_('Failed to process OPF file'))
84,836
Python
.py
1,788
32.563758
146
0.512368
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,446
color_map.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/html/color_map.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' import re NAME_MAP = { 'aliceblue': '#F0F8FF', 'antiquewhite': '#FAEBD7', 'aqua': '#00FFFF', 'aquamarine': '#7FFFD4', 'azure': '#F0FFFF', 'beige': '#F5F5DC', 'bisque': '#FFE4C4', 'black': '#000000', 'blanchedalmond': '#FFEBCD', 'blue': '#0000FF', 'brown': '#A52A2A', 'burlywood': '#DEB887', 'cadetblue': '#5F9EA0', 'chartreuse': '#7FFF00', 'chocolate': '#D2691E', 'coral': '#FF7F50', 'crimson': '#DC143C', 'cyan': '#00FFFF', 'darkblue': '#00008B', 'darkgoldenrod': '#B8860B', 'darkgreen': '#006400', 'darkkhaki': '#BDB76B', 'darkmagenta': '#8B008B', 'darkolivegreen': '#556B2F', 'darkorange': '#FF8C00', 'darkorchid': '#9932CC', 'darkred': '#8B0000', 'darksalmon': '#E9967A', 'darkslateblue': '#483D8B', 'darkslategrey': '#2F4F4F', 'darkviolet': '#9400D3', 'deeppink': '#FF1493', 'dodgerblue': '#1E90FF', 'firebrick': '#B22222', 'floralwhite': '#FFFAF0', 'forestgreen': '#228B22', 'fuchsia': '#FF00FF', 'gainsboro': '#DCDCDC', 'ghostwhite': '#F8F8FF', 'gold': '#FFD700', 'goldenrod': '#DAA520', 'indianred ': '#CD5C5C', 'indigo ': '#4B0082', 'khaki': '#F0E68C', 'lavenderblush': '#FFF0F5', 'lawngreen': '#7CFC00', 'lightblue': '#ADD8E6', 'lightcoral': '#F08080', 'lightgoldenrodyellow': '#FAFAD2', 'lightgray': '#D3D3D3', 'lightgrey': '#D3D3D3', 'lightskyblue': '#87CEFA', 'lightslategrey': '#778899', 'lightsteelblue': '#B0C4DE', 'lime': '#87CEFA', 'linen': '#FAF0E6', 'magenta': '#FF00FF', 'maroon': '#800000', 'mediumaquamarine': '#66CDAA', 'mediumblue': '#0000CD', 'mediumorchid': '#BA55D3', 'mediumpurple': '#9370D8', 'mediumseagreen': '#3CB371', 'mediumslateblue': '#7B68EE', 'midnightblue': '#191970', 'moccasin': '#FFE4B5', 'navajowhite': '#FFDEAD', 'navy': '#000080', 'oldlace': '#FDF5E6', 'olive': '#808000', 'orange': '#FFA500', 'orangered': '#FF4500', 'orchid': '#DA70D6', 'paleturquoise': '#AFEEEE', 'papayawhip': '#FFEFD5', 'peachpuff': '#FFDAB9', 'powderblue': '#B0E0E6', 'rosybrown': '#BC8F8F', 'royalblue': '#4169E1', 'saddlebrown': '#8B4513', 'sandybrown': '#8B4513', 'seashell': '#FFF5EE', 'sienna': '#A0522D', 'silver': '#C0C0C0', 'skyblue': '#87CEEB', 'slategrey': '#708090', 'snow': '#FFFAFA', 'springgreen': '#00FF7F', 'violet': '#EE82EE', 'yellowgreen': '#9ACD32' } hex_pat = re.compile(r'#(\d{2})(\d{2})(\d{2})') rgb_pat = re.compile(r'rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)', re.IGNORECASE) def lrs_color(html_color): hcol = html_color.lower() match = hex_pat.search(hcol) if match: return '0x00'+match.group(1)+match.group(2)+match.group(3) match = rgb_pat.search(hcol) if match: return '0x00'+hex(int(match.group(1)))[2:]+hex(int(match.group(2)))[2:]+hex(int(match.group(3)))[2:] if hcol in NAME_MAP: return NAME_MAP[hcol].replace('#', '0x00') return '0x00000000'
4,023
Python
.py
108
24.777778
108
0.446547
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,447
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/html/__init__.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' """ This package contains code to convert HTML ebooks to LRF ebooks. """ __docformat__ = "epytext" __author__ = "Kovid Goyal <kovid@kovidgoyal.net>"
240
Python
.py
7
33.142857
64
0.681034
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,448
table.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/html/table.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' import math import numbers import re import sys from calibre.ebooks.lrf.fonts import get_font from calibre.ebooks.lrf.pylrs.pylrs import CR, CharButton, LrsTextTag, Paragraph, Plot, Span, Text, TextBlock from polyglot.builtins import native_string_type, string_or_bytes def ceil(num): return int(math.ceil(num)) def print_xml(elem): from calibre.ebooks.lrf.pylrs.pylrs import ElementWriter elem = elem.toElement(native_string_type('utf8')) ew = ElementWriter(elem, sourceEncoding=native_string_type('utf8')) ew.write(sys.stdout) print() def cattrs(base, extra): new = base.copy() new.update(extra) return new def tokens(tb): ''' Return the next token. A token is : 1. A string a block of text that has the same style ''' def process_element(x, attrs): if isinstance(x, CR): yield 2, None elif isinstance(x, Text): yield x.text, cattrs(attrs, {}) elif isinstance(x, string_or_bytes): yield x, cattrs(attrs, {}) elif isinstance(x, (CharButton, LrsTextTag)): if x.contents: if hasattr(x.contents[0], 'text'): yield x.contents[0].text, cattrs(attrs, {}) elif hasattr(x.contents[0], 'attrs'): yield from process_element(x.contents[0], x.contents[0].attrs) elif isinstance(x, Plot): yield x, None elif isinstance(x, Span): attrs = cattrs(attrs, x.attrs) for y in x.contents: yield from process_element(y, attrs) for i in tb.contents: if isinstance(i, CR): yield 1, None elif isinstance(i, Paragraph): for j in i.contents: attrs = {} if hasattr(j, 'attrs'): attrs = j.attrs yield from process_element(j, attrs) class Cell: def __init__(self, conv, tag, css): self.conv = conv self.tag = tag self.css = css self.text_blocks = [] self.pwidth = -1. if tag.has_attr('width') and '%' in tag['width']: try: self.pwidth = float(tag['width'].replace('%', '')) except ValueError: pass if 'width' in css and '%' in css['width']: try: self.pwidth = float(css['width'].replace('%', '')) except ValueError: pass if self.pwidth > 100: self.pwidth = -1 self.rowspan = self.colspan = 1 try: self.colspan = int(tag['colspan']) if tag.has_attr('colspan') else 1 self.rowspan = int(tag['rowspan']) if tag.has_attr('rowspan') else 1 except: pass pp = conv.current_page conv.book.allow_new_page = False conv.current_page = conv.book.create_page() conv.parse_tag(tag, css) conv.end_current_block() for item in conv.current_page.contents: if isinstance(item, TextBlock): self.text_blocks.append(item) conv.current_page = pp conv.book.allow_new_page = True if not self.text_blocks: tb = conv.book.create_text_block() tb.Paragraph(' ') self.text_blocks.append(tb) for tb in self.text_blocks: tb.parent = None tb.objId = 0 # Needed as we have to eventually change this BlockStyle's width and # height attributes. This blockstyle may be shared with other # elements, so doing that causes havoc. tb.blockStyle = conv.book.create_block_style() ts = conv.book.create_text_style(**tb.textStyle.attrs) ts.attrs['parindent'] = 0 tb.textStyle = ts if ts.attrs['align'] == 'foot': if isinstance(tb.contents[-1], Paragraph): tb.contents[-1].append(' ') def pts_to_pixels(self, pts): pts = int(pts) return ceil((float(self.conv.profile.dpi)/72)*(pts/10)) def minimum_width(self): return max(self.minimum_tb_width(tb) for tb in self.text_blocks) def minimum_tb_width(self, tb): ts = tb.textStyle.attrs default_font = get_font(ts['fontfacename'], self.pts_to_pixels(ts['fontsize'])) parindent = self.pts_to_pixels(ts['parindent']) mwidth = 0 for token, attrs in tokens(tb): font = default_font if isinstance(token, numbers.Integral): # Handle para and line breaks continue if isinstance(token, Plot): return self.pts_to_pixels(token.xsize) ff = attrs.get('fontfacename', ts['fontfacename']) fs = attrs.get('fontsize', ts['fontsize']) if (ff, fs) != (ts['fontfacename'], ts['fontsize']): font = get_font(ff, self.pts_to_pixels(fs)) if not token.strip(): continue word = token.split() word = word[0] if word else "" fl, ft, fr, fb = font.getbbox(word) width = fr - fl if width > mwidth: mwidth = width return parindent + mwidth + 2 def text_block_size(self, tb, maxwidth=sys.maxsize, debug=False): ts = tb.textStyle.attrs default_font = get_font(ts['fontfacename'], self.pts_to_pixels(ts['fontsize'])) parindent = self.pts_to_pixels(ts['parindent']) top, bottom, left, right = 0, 0, parindent, parindent def add_word(width, height, left, right, top, bottom, ls, ws): if left + width > maxwidth: left = width + ws top += ls bottom = top+ls if top+ls > bottom else bottom else: left += (width + ws) right = left if left > right else right bottom = top+ls if top+ls > bottom else bottom return left, right, top, bottom for token, attrs in tokens(tb): if attrs is None: attrs = {} font = default_font ls = self.pts_to_pixels(attrs.get('baselineskip', ts['baselineskip']))+\ self.pts_to_pixels(attrs.get('linespace', ts['linespace'])) ws = self.pts_to_pixels(attrs.get('wordspace', ts['wordspace'])) if isinstance(token, numbers.Integral): # Handle para and line breaks if top != bottom: # Previous element not a line break top = bottom else: top += ls bottom += ls left = parindent if int == 1 else 0 continue if isinstance(token, Plot): width, height = self.pts_to_pixels(token.xsize), self.pts_to_pixels(token.ysize) left, right, top, bottom = add_word(width, height, left, right, top, bottom, height, ws) continue ff = attrs.get('fontfacename', ts['fontfacename']) fs = attrs.get('fontsize', ts['fontsize']) if (ff, fs) != (ts['fontfacename'], ts['fontsize']): font = get_font(ff, self.pts_to_pixels(fs)) for word in token.split(): fl, ft, fr, fb = font.getbbox(word) width, height = fr - fl, abs(fb - ft) left, right, top, bottom = add_word(width, height, left, right, top, bottom, ls, ws) return right+3+max(parindent, 10), bottom def text_block_preferred_width(self, tb, debug=False): return self.text_block_size(tb, sys.maxsize, debug=debug)[0] def preferred_width(self, debug=False): return ceil(max(self.text_block_preferred_width(i, debug=debug) for i in self.text_blocks)) def height(self, width): return sum(self.text_block_size(i, width)[1] for i in self.text_blocks) class Row: def __init__(self, conv, row, css, colpad): self.cells = [] self.colpad = colpad cells = row.findAll(re.compile('td|th', re.IGNORECASE)) self.targets = [] for cell in cells: ccss = conv.tag_css(cell, css)[0] self.cells.append(Cell(conv, cell, ccss)) for a in row.findAll(id=True) + row.findAll(name=True): name = a['name'] if a.has_attr('name') else a['id'] if a.has_attr('id') else None if name is not None: self.targets.append(name.replace('#', '')) def number_of_cells(self): '''Number of cells in this row. Respects colspan''' ans = 0 for cell in self.cells: ans += cell.colspan return ans def height(self, widths): i, heights = 0, [] for cell in self.cells: width = sum(widths[i:i+cell.colspan]) heights.append(cell.height(width)) i += cell.colspan if not heights: return 0 return max(heights) def cell_from_index(self, col): i = -1 cell = None for cell in self.cells: for k in range(0, cell.colspan): if i == col: break i += 1 if i == col: break return cell def minimum_width(self, col): cell = self.cell_from_index(col) if not cell: return 0 return cell.minimum_width() def preferred_width(self, col): cell = self.cell_from_index(col) if not cell: return 0 return 0 if cell.colspan > 1 else cell.preferred_width() def width_percent(self, col): cell = self.cell_from_index(col) if not cell: return -1 return -1 if cell.colspan > 1 else cell.pwidth def cell_iterator(self): yield from self.cells class Table: def __init__(self, conv, table, css, rowpad=10, colpad=10): self.rows = [] self.conv = conv self.rowpad = rowpad self.colpad = colpad rows = table.findAll('tr') conv.in_table = True for row in rows: rcss = conv.tag_css(row, css)[0] self.rows.append(Row(conv, row, rcss, colpad)) conv.in_table = False def number_of_columns(self): max = 0 for row in self.rows: max = row.number_of_cells() if row.number_of_cells() > max else max return max def number_or_rows(self): return len(self.rows) def height(self, maxwidth): ''' Return row heights + self.rowpad''' widths = self.get_widths(maxwidth) return sum(row.height(widths) + self.rowpad for row in self.rows) - self.rowpad def minimum_width(self, col): return max(row.minimum_width(col) for row in self.rows) def width_percent(self, col): return max(row.width_percent(col) for row in self.rows) def get_widths(self, maxwidth): ''' Return widths of columns + self.colpad ''' rows, cols = self.number_or_rows(), self.number_of_columns() widths = list(range(cols)) for c in range(cols): cellwidths = [0 for i in range(rows)] for r in range(rows): try: cellwidths[r] = self.rows[r].preferred_width(c) except IndexError: continue widths[c] = max(cellwidths) min_widths = [self.minimum_width(i)+10 for i in range(cols)] for i in range(len(widths)): wp = self.width_percent(i) if wp >= 0: widths[i] = max(min_widths[i], ceil((wp/100) * (maxwidth - (cols-1)*self.colpad))) itercount = 0 while sum(widths) > maxwidth-((len(widths)-1)*self.colpad) and itercount < 100: for i in range(cols): widths[i] = ceil((95/100)*widths[i]) if \ ceil((95/100)*widths[i]) >= min_widths[i] else widths[i] itercount += 1 return [i+self.colpad for i in widths] def blocks(self, maxwidth, maxheight): rows, cols = self.number_or_rows(), self.number_of_columns() cellmatrix = [[None for c in range(cols)] for r in range(rows)] rowpos = [0 for i in range(rows)] for r in range(rows): nc = self.rows[r].cell_iterator() try: while True: cell = next(nc) cellmatrix[r][rowpos[r]] = cell rowpos[r] += cell.colspan for k in range(1, cell.rowspan): try: rowpos[r+k] += 1 except IndexError: break except StopIteration: # No more cells in this row continue widths = self.get_widths(maxwidth) heights = [row.height(widths) for row in self.rows] xpos = [sum(widths[:i]) for i in range(cols)] delta = maxwidth - sum(widths) if delta < 0: delta = 0 for r in range(len(cellmatrix)): yield None, 0, heights[r], 0, self.rows[r].targets for c in range(len(cellmatrix[r])): cell = cellmatrix[r][c] if not cell: continue width = sum(widths[c:c+cell.colspan])-self.colpad*cell.colspan sypos = 0 for tb in cell.text_blocks: tb.blockStyle = self.conv.book.create_block_style( blockwidth=width, blockheight=cell.text_block_size(tb, width)[1], blockrule='horz-fixed') yield tb, xpos[c], sypos, delta, None sypos += tb.blockStyle.attrs['blockheight']
13,953
Python
.py
333
30.201201
109
0.544878
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,449
pylrs.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/pylrs/pylrs.py
# Copyright (c) 2007 Mike Higgins (Falstaff) # Modifications from the original: # Copyright (C) 2007 Kovid Goyal <kovid@kovidgoyal.net> # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # Current limitations and bugs: # Bug: Does not check if most setting values are valid unless lrf is created. # # Unsupported objects: MiniPage, SimpleTextBlock, Canvas, Window, # PopUpWindow, Sound, Import, SoundStream, # ObjectInfo # # Does not support background images for blocks or pages. # # The only button type supported are JumpButtons. # # None of the Japanese language tags are supported. # # Other unsupported tags: PageDiv, SoundStop, Wait, pos, # Plot, Image (outside of ImageBlock), # EmpLine, EmpDots import codecs import io import operator import os import re from datetime import date from xml.etree.ElementTree import Element, ElementTree, SubElement from xml.sax.saxutils import escape from calibre.utils.date import isoformat from .pylrf import ( BINDING_DIRECTION_ENCODING, IMAGE_TYPE_ENCODING, LINE_TYPE_ENCODING, STREAM_COMPRESSED, STREAM_FORCE_COMPRESSED, LrfFileStream, LrfObject, LrfStreamBase, LrfTag, LrfTagStream, LrfToc, LrfWriter, ) DEFAULT_SOURCE_ENCODING = "cp1252" # default is us-windows character set DEFAULT_GENREADING = "fs" # default is yes to both lrf and lrs from calibre import __appname__, __version__, replace_entities from polyglot.builtins import iteritems, native_string_type, string_or_bytes class LrsError(Exception): pass class ContentError(Exception): pass def _checkExists(filename): if not os.path.exists(filename): raise LrsError("file '%s' not found" % filename) def _formatXml(root): """ A helper to make the LRS output look nicer. """ for elem in root.iter(): if len(elem) > 0 and (not elem.text or not elem.text.strip()): elem.text = "\n" if not elem.tail or not elem.tail.strip(): elem.tail = "\n" def ElementWithText(tag, text, **extra): """ A shorthand function to create Elements with text. """ e = Element(tag, **extra) e.text = text return e def ElementWithReading(tag, text, reading=False): """ A helper function that creates reading attributes. """ # note: old lrs2lrf parser only allows reading = "" if text is None: readingText = "" elif isinstance(text, string_or_bytes): readingText = text else: # assumed to be a sequence of (name, sortas) readingText = text[1] text = text[0] if not reading: readingText = "" return ElementWithText(tag, text, reading=readingText) def appendTextElements(e, contentsList, se): """ A helper function to convert text streams into the proper elements. """ def uconcat(text, newText, se): if isinstance(text, bytes): text = text.decode(se) if isinstance(newText, bytes): newText = newText.decode(se) return text + newText e.text = "" lastElement = None for content in contentsList: if not isinstance(content, Text): newElement = content.toElement(se) if newElement is None: continue lastElement = newElement lastElement.tail = "" e.append(lastElement) else: if lastElement is None: e.text = uconcat(e.text, content.text, se) else: lastElement.tail = uconcat(lastElement.tail, content.text, se) class Delegator: """ A mixin class to create delegated methods that create elements. """ def __init__(self, delegates): self.delegates = delegates self.delegatedMethods = [] # self.delegatedSettingsDict = {} # self.delegatedSettings = [] for d in delegates: d.parent = self methods = d.getMethods() self.delegatedMethods += methods for m in methods: setattr(self, m, getattr(d, m)) """ for setting in d.getSettings(): if isinstance(setting, string_or_bytes): setting = (d, setting) delegates = \ self.delegatedSettingsDict.setdefault(setting[1], []) delegates.append(setting[0]) self.delegatedSettings.append(setting) """ def applySetting(self, name, value, testValid=False): applied = False if name in self.getSettings(): setattr(self, name, value) applied = True for d in self.delegates: if hasattr(d, "applySetting"): applied = applied or d.applySetting(name, value) else: if name in d.getSettings(): setattr(d, name, value) applied = True if testValid and not applied: raise LrsError("setting %s not valid" % name) return applied def applySettings(self, settings, testValid=False): for (setting, value) in settings.items(): self.applySetting(setting, value, testValid) """ if setting not in self.delegatedSettingsDict: raise LrsError, "setting %s not valid" % setting delegates = self.delegatedSettingsDict[setting] for d in delegates: setattr(d, setting, value) """ def appendDelegates(self, element, sourceEncoding): for d in self.delegates: e = d.toElement(sourceEncoding) if e is not None: if isinstance(e, list): for e1 in e: element.append(e1) else: element.append(e) def appendReferencedObjects(self, parent): for d in self.delegates: d.appendReferencedObjects(parent) def getMethods(self): return self.delegatedMethods def getSettings(self): return [] def toLrfDelegates(self, lrfWriter): for d in self.delegates: d.toLrf(lrfWriter) def toLrf(self, lrfWriter): self.toLrfDelegates(lrfWriter) class LrsAttributes: """ A mixin class to handle default and user supplied attributes. """ def __init__(self, defaults, alsoAllow=None, **settings): if alsoAllow is None: alsoAllow = [] self.attrs = defaults.copy() for (name, value) in settings.items(): if name not in self.attrs and name not in alsoAllow: raise LrsError("%s does not support setting %s" % (self.__class__.__name__, name)) if isinstance(value, int): value = str(value) self.attrs[name] = value class LrsContainer: """ This class is a mixin class for elements that are contained in or contain an unknown number of other elements. """ def __init__(self, validChildren): self.parent = None self.contents = [] self.validChildren = validChildren self.must_append = False # : If True even an empty container is appended by append_to def has_text(self): ''' Return True iff this container has non whitespace text ''' if hasattr(self, 'text'): if self.text.strip(): return True if hasattr(self, 'contents'): for child in self.contents: if child.has_text(): return True for item in self.contents: if isinstance(item, (Plot, ImageBlock, Canvas, CR)): return True return False def append_to(self, parent): ''' Append self to C{parent} iff self has non whitespace textual content @type parent: LrsContainer ''' if self.contents or self.must_append: parent.append(self) def appendReferencedObjects(self, parent): for c in self.contents: c.appendReferencedObjects(parent) def setParent(self, parent): if self.parent is not None: raise LrsError("object already has parent") self.parent = parent def append(self, content, convertText=True): """ Appends valid objects to container. Can auto-covert text strings to Text objects. """ for validChild in self.validChildren: if isinstance(content, validChild): break else: raise LrsError("can't append %s to %s" % (content.__class__.__name__, self.__class__.__name__)) if convertText and isinstance(content, string_or_bytes): content = Text(content) content.setParent(self) if isinstance(content, LrsObject): content.assignId() self.contents.append(content) return self def get_all(self, predicate=lambda x: x): for child in self.contents: if predicate(child): yield child if hasattr(child, 'get_all'): yield from child.get_all(predicate) class LrsObject: """ A mixin class for elements that need an object id. """ nextObjId = 0 @classmethod def getNextObjId(selfClass): selfClass.nextObjId += 1 return selfClass.nextObjId def __init__(self, assignId=False): if assignId: self.objId = LrsObject.getNextObjId() else: self.objId = 0 def assignId(self): if self.objId != 0: raise LrsError("id already assigned to " + self.__class__.__name__) self.objId = LrsObject.getNextObjId() def lrsObjectElement(self, name, objlabel="objlabel", labelName=None, labelDecorate=True, **settings): element = Element(name) element.attrib["objid"] = str(self.objId) if labelName is None: labelName = name if labelDecorate: label = "%s.%d" % (labelName, self.objId) else: label = str(self.objId) element.attrib[objlabel] = label element.attrib.update(settings) return element class Book(Delegator): """ Main class for any lrs or lrf. All objects must be appended to the Book class in some way or another in order to be rendered as an LRS or LRF file. The following settings are available on the constructor of Book: author="book author" or author=("book author", "sort as") Author of the book. title="book title" or title=("book title", "sort as") Title of the book. sourceencoding="codec" Gives the assumed encoding for all non-unicode strings. thumbnail="thumbnail file name" A small (80x80?) graphics file with a thumbnail of the book's cover. bookid="book id" A unique id for the book. textstyledefault=<dictionary of settings> Sets the default values for all TextStyles. pagetstyledefault=<dictionary of settings> Sets the default values for all PageStyles. blockstyledefault=<dictionary of settings> Sets the default values for all BlockStyles. booksetting=BookSetting() Override the default BookSetting. setdefault=StyleDefault() Override the default SetDefault. There are several other settings -- see the BookInfo class for more. """ def __init__(self, textstyledefault=None, blockstyledefault=None, pagestyledefault=None, optimizeTags=False, optimizeCompression=False, **settings): self.parent = None # we are the top of the parent chain if "thumbnail" in settings: _checkExists(settings["thumbnail"]) # highly experimental -- use with caution self.optimizeTags = optimizeTags self.optimizeCompression = optimizeCompression pageStyle = PageStyle(**PageStyle.baseDefaults.copy()) blockStyle = BlockStyle(**BlockStyle.baseDefaults.copy()) textStyle = TextStyle(**TextStyle.baseDefaults.copy()) if textstyledefault is not None: textStyle.update(textstyledefault) if blockstyledefault is not None: blockStyle.update(blockstyledefault) if pagestyledefault is not None: pageStyle.update(pagestyledefault) self.defaultPageStyle = pageStyle self.defaultTextStyle = textStyle self.defaultBlockStyle = blockStyle LrsObject.nextObjId += 1 styledefault = StyleDefault() if 'setdefault' in settings: styledefault = settings.pop('setdefault') Delegator.__init__(self, [BookInformation(), Main(), Template(), Style(styledefault), Solos(), Objects()]) self.sourceencoding = None # apply default settings self.applySetting("genreading", DEFAULT_GENREADING) self.applySetting("sourceencoding", DEFAULT_SOURCE_ENCODING) self.applySettings(settings, testValid=True) self.allow_new_page = True # : If False L{create_page} raises an exception self.gc_count = 0 def set_title(self, title): ot = self.delegates[0].delegates[0].delegates[0].title self.delegates[0].delegates[0].delegates[0].title = (title, ot[1]) def set_author(self, author): ot = self.delegates[0].delegates[0].delegates[0].author self.delegates[0].delegates[0].delegates[0].author = (author, ot[1]) def create_text_style(self, **settings): ans = TextStyle(**self.defaultTextStyle.attrs.copy()) ans.update(settings) return ans def create_block_style(self, **settings): ans = BlockStyle(**self.defaultBlockStyle.attrs.copy()) ans.update(settings) return ans def create_page_style(self, **settings): if not self.allow_new_page: raise ContentError ans = PageStyle(**self.defaultPageStyle.attrs.copy()) ans.update(settings) return ans def create_page(self, pageStyle=None, **settings): ''' Return a new L{Page}. The page has not been appended to this book. @param pageStyle: If None the default pagestyle is used. @type pageStyle: L{PageStyle} ''' if not pageStyle: pageStyle = self.defaultPageStyle return Page(pageStyle=pageStyle, **settings) def create_text_block(self, textStyle=None, blockStyle=None, **settings): ''' Return a new L{TextBlock}. The block has not been appended to this book. @param textStyle: If None the default text style is used @type textStyle: L{TextStyle} @param blockStyle: If None the default block style is used. @type blockStyle: L{BlockStyle} ''' if not textStyle: textStyle = self.defaultTextStyle if not blockStyle: blockStyle = self.defaultBlockStyle return TextBlock(textStyle=textStyle, blockStyle=blockStyle, **settings) def pages(self): '''Return list of Page objects in this book ''' ans = [] for item in self.delegates: if isinstance(item, Main): for candidate in item.contents: if isinstance(candidate, Page): ans.append(candidate) break return ans def last_page(self): '''Return last Page in this book ''' for item in self.delegates: if isinstance(item, Main): temp = list(item.contents) temp.reverse() for candidate in temp: if isinstance(candidate, Page): return candidate def embed_font(self, file, facename): f = Font(file, facename) self.append(f) def getSettings(self): return ["sourceencoding"] def append(self, content): """ Find and invoke the correct appender for this content. """ className = content.__class__.__name__ try: method = getattr(self, "append" + className) except AttributeError: raise LrsError("can't append %s to Book" % className) method(content) def rationalize_font_sizes(self, base_font_size=10): base_font_size *= 10. main = None for obj in self.delegates: if isinstance(obj, Main): main = obj break fonts = {} for text in main.get_all(lambda x: isinstance(x, Text)): fs = base_font_size ancestor = text.parent while ancestor: try: fs = int(ancestor.attrs['fontsize']) break except (AttributeError, KeyError): pass try: fs = int(ancestor.textSettings['fontsize']) break except (AttributeError, KeyError): pass try: fs = int(ancestor.textStyle.attrs['fontsize']) break except (AttributeError, KeyError): pass ancestor = ancestor.parent length = len(text.text) fonts[fs] = fonts.get(fs, 0) + length if not fonts: print('WARNING: LRF seems to have no textual content. Cannot rationalize font sizes.') return old_base_font_size = float(max(fonts.items(), key=operator.itemgetter(1))[0]) factor = base_font_size / old_base_font_size def rescale(old): return str(int(int(old) * factor)) text_blocks = list(main.get_all(lambda x: isinstance(x, TextBlock))) for tb in text_blocks: if 'fontsize' in tb.textSettings: tb.textSettings['fontsize'] = rescale(tb.textSettings['fontsize']) for span in tb.get_all(lambda x: isinstance(x, Span)): if 'fontsize' in span.attrs: span.attrs['fontsize'] = rescale(span.attrs['fontsize']) if 'baselineskip' in span.attrs: span.attrs['baselineskip'] = rescale(span.attrs['baselineskip']) text_styles = (tb.textStyle for tb in text_blocks) for ts in text_styles: ts.attrs['fontsize'] = rescale(ts.attrs['fontsize']) ts.attrs['baselineskip'] = rescale(ts.attrs['baselineskip']) def renderLrs(self, lrsFile, encoding="UTF-8"): if isinstance(lrsFile, string_or_bytes): lrsFile = codecs.open(lrsFile, "wb", encoding=encoding) self.render(lrsFile, outputEncodingName=encoding) lrsFile.close() def renderLrf(self, lrfFile): self.appendReferencedObjects(self) if isinstance(lrfFile, string_or_bytes): lrfFile = open(lrfFile, "wb") lrfWriter = LrfWriter(self.sourceencoding) lrfWriter.optimizeTags = self.optimizeTags lrfWriter.optimizeCompression = self.optimizeCompression self.toLrf(lrfWriter) lrfWriter.writeFile(lrfFile) lrfFile.close() def toElement(self, se): root = Element("BBeBXylog", version="1.0") root.append(Element("Property")) self.appendDelegates(root, self.sourceencoding) return root def render(self, f, outputEncodingName='UTF-8'): """ Write the book as an LRS to file f. """ self.appendReferencedObjects(self) # create the root node, and populate with the parts of the book root = self.toElement(self.sourceencoding) # now, add some newlines to make it easier to look at _formatXml(root) tree = ElementTree(element=root) tree.write(f, encoding=native_string_type(outputEncodingName), xml_declaration=True) class BookInformation(Delegator): """ Just a container for the Info and TableOfContents elements. """ def __init__(self): Delegator.__init__(self, [Info(), TableOfContents()]) def toElement(self, se): bi = Element("BookInformation") self.appendDelegates(bi, se) return bi class Info(Delegator): """ Just a container for the BookInfo and DocInfo elements. """ def __init__(self): self.genreading = DEFAULT_GENREADING Delegator.__init__(self, [BookInfo(), DocInfo()]) def getSettings(self): return ["genreading"] # + self.delegatedSettings def toElement(self, se): info = Element("Info", version="1.1") info.append( self.delegates[0].toElement(se, reading="s" in self.genreading)) info.append(self.delegates[1].toElement(se)) return info def toLrf(self, lrfWriter): # this info is set in XML form in the LRF info = Element("Info", version="1.1") # self.appendDelegates(info) info.append( self.delegates[0].toElement(lrfWriter.getSourceEncoding(), reading="f" in self.genreading)) info.append(self.delegates[1].toElement(lrfWriter.getSourceEncoding())) # look for the thumbnail file and get the filename tnail = info.find("DocInfo/CThumbnail") if tnail is not None: lrfWriter.setThumbnailFile(tnail.get("file")) # does not work: info.remove(tnail) _formatXml(info) # fix up the doc info to match the LRF format # NB: generates an encoding attribute, which lrs2lrf does not tree = ElementTree(element=info) f = io.BytesIO() tree.write(f, encoding=native_string_type('utf-8'), xml_declaration=True) xmlInfo = f.getvalue().decode('utf-8') xmlInfo = re.sub(r"<CThumbnail.*?>\n", "", xmlInfo) xmlInfo = xmlInfo.replace("SumPage>", "Page>") lrfWriter.docInfoXml = xmlInfo class TableOfContents: def __init__(self): self.tocEntries = [] def appendReferencedObjects(self, parent): pass def getMethods(self): return ["addTocEntry"] def getSettings(self): return [] def addTocEntry(self, tocLabel, textBlock): if not isinstance(textBlock, (Canvas, TextBlock, ImageBlock, RuledLine)): raise LrsError("TOC destination must be a Canvas, TextBlock, ImageBlock or RuledLine"+ " not a " + str(type(textBlock))) if textBlock.parent is None: raise LrsError("TOC text block must be already appended to a page") if False and textBlock.parent.parent is None: raise LrsError("TOC destination page must be already appended to a book") if not hasattr(textBlock.parent, 'objId'): raise LrsError("TOC destination must be appended to a container with an objID") for tl in self.tocEntries: if tl.label == tocLabel and tl.textBlock == textBlock: return self.tocEntries.append(TocLabel(tocLabel, textBlock)) textBlock.tocLabel = tocLabel def toElement(self, se): if len(self.tocEntries) == 0: return None toc = Element("TOC") for t in self.tocEntries: toc.append(t.toElement(se)) return toc def toLrf(self, lrfWriter): if len(self.tocEntries) == 0: return toc = [] for t in self.tocEntries: toc.append((t.textBlock.parent.objId, t.textBlock.objId, t.label)) lrfToc = LrfToc(LrsObject.getNextObjId(), toc, lrfWriter.getSourceEncoding()) lrfWriter.append(lrfToc) lrfWriter.setTocObject(lrfToc) class TocLabel: def __init__(self, label, textBlock): self.label = escape(replace_entities(label)) self.textBlock = textBlock def toElement(self, se): return ElementWithText("TocLabel", self.label, refobj=str(self.textBlock.objId), refpage=str(self.textBlock.parent.objId)) class BookInfo: def __init__(self): self.title = "Untitled" self.author = "Anonymous" self.bookid = None self.pi = None self.isbn = None self.publisher = None self.freetext = "\n\n" self.label = None self.category = None self.classification = None def appendReferencedObjects(self, parent): pass def getMethods(self): return [] def getSettings(self): return ["author", "title", "bookid", "isbn", "publisher", "freetext", "label", "category", "classification"] def _appendISBN(self, bi): pi = Element("ProductIdentifier") isbnElement = ElementWithText("ISBNPrintable", self.isbn) isbnValueElement = ElementWithText("ISBNValue", self.isbn.replace("-", "")) pi.append(isbnElement) pi.append(isbnValueElement) bi.append(pi) def toElement(self, se, reading=True): bi = Element("BookInfo") bi.append(ElementWithReading("Title", self.title, reading=reading)) bi.append(ElementWithReading("Author", self.author, reading=reading)) bi.append(ElementWithText("BookID", self.bookid)) if self.isbn is not None: self._appendISBN(bi) if self.publisher is not None: bi.append(ElementWithReading("Publisher", self.publisher)) bi.append(ElementWithReading("Label", self.label, reading=reading)) bi.append(ElementWithText("Category", self.category)) bi.append(ElementWithText("Classification", self.classification)) bi.append(ElementWithText("FreeText", self.freetext)) return bi class DocInfo: def __init__(self): self.thumbnail = None self.language = "en" self.creator = None self.creationdate = str(isoformat(date.today())) self.producer = "%s v%s"%(__appname__, __version__) self.numberofpages = "0" def appendReferencedObjects(self, parent): pass def getMethods(self): return [] def getSettings(self): return ["thumbnail", "language", "creator", "creationdate", "producer", "numberofpages"] def toElement(self, se): docInfo = Element("DocInfo") if self.thumbnail is not None: docInfo.append(Element("CThumbnail", file=self.thumbnail)) docInfo.append(ElementWithText("Language", self.language)) docInfo.append(ElementWithText("Creator", self.creator)) docInfo.append(ElementWithText("CreationDate", self.creationdate)) docInfo.append(ElementWithText("Producer", self.producer)) docInfo.append(ElementWithText("SumPage", str(self.numberofpages))) return docInfo class Main(LrsContainer): def __init__(self): LrsContainer.__init__(self, [Page]) def getMethods(self): return ["appendPage", "Page"] def getSettings(self): return [] def Page(self, *args, **kwargs): p = Page(*args, **kwargs) self.append(p) return p def appendPage(self, page): self.append(page) def toElement(self, sourceEncoding): main = Element(self.__class__.__name__) for page in self.contents: main.append(page.toElement(sourceEncoding)) return main def toLrf(self, lrfWriter): pageIds = [] # set this id now so that pages can see it pageTreeId = LrsObject.getNextObjId() lrfWriter.setPageTreeId(pageTreeId) # create a list of all the page object ids while dumping the pages for p in self.contents: pageIds.append(p.objId) p.toLrf(lrfWriter) # create a page tree object pageTree = LrfObject("PageTree", pageTreeId) pageTree.appendLrfTag(LrfTag("PageList", pageIds)) lrfWriter.append(pageTree) class Solos(LrsContainer): def __init__(self): LrsContainer.__init__(self, [Solo]) def getMethods(self): return ["appendSolo", "Solo"] def getSettings(self): return [] def Solo(self, *args, **kwargs): p = Solo(*args, **kwargs) self.append(p) return p def appendSolo(self, solo): self.append(solo) def toLrf(self, lrfWriter): for s in self.contents: s.toLrf(lrfWriter) def toElement(self, se): solos = [] for s in self.contents: solos.append(s.toElement(se)) if len(solos) == 0: return None return solos class Solo(Main): pass class Template: """ Does nothing that I know of. """ def appendReferencedObjects(self, parent): pass def getMethods(self): return [] def getSettings(self): return [] def toElement(self, se): t = Element("Template") t.attrib["version"] = "1.0" return t def toLrf(self, lrfWriter): # does nothing pass class StyleDefault(LrsAttributes): """ Supply some defaults for all TextBlocks. The legal values are a subset of what is allowed on a TextBlock -- ruby, emphasis, and waitprop settings. """ defaults = dict(rubyalign="start", rubyadjust="none", rubyoverhang="none", empdotsposition="before", empdotsfontname="Dutch801 Rm BT Roman", empdotscode="0x002e", emplineposition="after", emplinetype="solid", setwaitprop="noreplay") alsoAllow = ["refempdotsfont", "rubyAlignAndAdjust"] def __init__(self, **settings): LrsAttributes.__init__(self, self.defaults, alsoAllow=self.alsoAllow, **settings) def toElement(self, se): return Element("SetDefault", self.attrs) class Style(LrsContainer, Delegator): def __init__(self, styledefault=StyleDefault()): LrsContainer.__init__(self, [PageStyle, TextStyle, BlockStyle]) Delegator.__init__(self, [BookStyle(styledefault=styledefault)]) self.bookStyle = self.delegates[0] self.appendPageStyle = self.appendTextStyle = \ self.appendBlockStyle = self.append def appendReferencedObjects(self, parent): LrsContainer.appendReferencedObjects(self, parent) def getMethods(self): return ["PageStyle", "TextStyle", "BlockStyle", "appendPageStyle", "appendTextStyle", "appendBlockStyle"] + \ self.delegatedMethods def getSettings(self): return [(self.bookStyle, x) for x in self.bookStyle.getSettings()] def PageStyle(self, *args, **kwargs): ps = PageStyle(*args, **kwargs) self.append(ps) return ps def TextStyle(self, *args, **kwargs): ts = TextStyle(*args, **kwargs) self.append(ts) return ts def BlockStyle(self, *args, **kwargs): bs = BlockStyle(*args, **kwargs) self.append(bs) return bs def toElement(self, se): style = Element("Style") style.append(self.bookStyle.toElement(se)) for content in self.contents: style.append(content.toElement(se)) return style def toLrf(self, lrfWriter): self.bookStyle.toLrf(lrfWriter) for s in self.contents: s.toLrf(lrfWriter) class BookStyle(LrsObject, LrsContainer): def __init__(self, styledefault=StyleDefault()): LrsObject.__init__(self, assignId=True) LrsContainer.__init__(self, [Font]) self.styledefault = styledefault self.booksetting = BookSetting() self.appendFont = self.append def getSettings(self): return ["styledefault", "booksetting"] def getMethods(self): return ["Font", "appendFont"] def Font(self, *args, **kwargs): f = Font(*args, **kwargs) self.append(f) return def toElement(self, se): bookStyle = self.lrsObjectElement("BookStyle", objlabel="stylelabel", labelDecorate=False) bookStyle.append(self.styledefault.toElement(se)) bookStyle.append(self.booksetting.toElement(se)) for font in self.contents: bookStyle.append(font.toElement(se)) return bookStyle def toLrf(self, lrfWriter): bookAtr = LrfObject("BookAtr", self.objId) bookAtr.appendLrfTag(LrfTag("ChildPageTree", lrfWriter.getPageTreeId())) bookAtr.appendTagDict(self.styledefault.attrs) self.booksetting.toLrf(lrfWriter) lrfWriter.append(bookAtr) lrfWriter.setRootObject(bookAtr) for font in self.contents: font.toLrf(lrfWriter) class BookSetting(LrsAttributes): def __init__(self, **settings): defaults = dict(bindingdirection="Lr", dpi="1660", screenheight="800", screenwidth="600", colordepth="24") LrsAttributes.__init__(self, defaults, **settings) def toLrf(self, lrfWriter): a = self.attrs lrfWriter.dpi = int(a["dpi"]) lrfWriter.bindingdirection = \ BINDING_DIRECTION_ENCODING[a["bindingdirection"]] lrfWriter.height = int(a["screenheight"]) lrfWriter.width = int(a["screenwidth"]) lrfWriter.colorDepth = int(a["colordepth"]) def toElement(self, se): return Element("BookSetting", self.attrs) class LrsStyle(LrsObject, LrsAttributes, LrsContainer): """ A mixin class for styles. """ def __init__(self, elementName, defaults=None, alsoAllow=None, **overrides): if defaults is None: defaults = {} LrsObject.__init__(self) LrsAttributes.__init__(self, defaults, alsoAllow=alsoAllow, **overrides) LrsContainer.__init__(self, []) self.elementName = elementName self.objectsAppended = False # self.label = "%s.%d" % (elementName, self.objId) # self.label = str(self.objId) # self.parent = None def update(self, settings): for name, value in settings.items(): if name not in self.__class__.validSettings: raise LrsError(f"{name} not a valid setting for {self.__class__.__name__}") self.attrs[name] = value def getLabel(self): return str(self.objId) def toElement(self, se): element = Element(self.elementName, stylelabel=self.getLabel(), objid=str(self.objId)) element.attrib.update(self.attrs) return element def toLrf(self, lrfWriter): obj = LrfObject(self.elementName, self.objId) obj.appendTagDict(self.attrs, self.__class__.__name__) lrfWriter.append(obj) def __eq__(self, other): if hasattr(other, 'attrs'): return self.__class__ == other.__class__ and self.attrs == other.attrs return False class TextStyle(LrsStyle): """ The text style of a TextBlock. Default is 10 pt. Times Roman. Setting Value Default -------- ----- ------- align "head","center","foot" "head" (left aligned) baselineskip points * 10 120 (12 pt. distance between bottoms of lines) fontsize points * 10 100 (10 pt.) fontweight 1 to 1000 400 (normal, 800 is bold) fontwidth points * 10 or -10 -10 (use values from font) linespace points * 10 10 (min space btw. lines?) wordspace points * 10 25 (min space btw. each word) """ baseDefaults = dict( columnsep="0", charspace="0", textlinewidth="2", align="head", linecolor="0x00000000", column="1", fontsize="100", fontwidth="-10", fontescapement="0", fontorientation="0", fontweight="400", fontfacename="Dutch801 Rm BT Roman", textcolor="0x00000000", wordspace="25", letterspace="0", baselineskip="120", linespace="10", parindent="0", parskip="0", textbgcolor="0xFF000000") alsoAllow = ["empdotscode", "empdotsfontname", "refempdotsfont", "rubyadjust", "rubyalign", "rubyoverhang", "empdotsposition", 'emplinetype', 'emplineposition'] validSettings = list(baseDefaults) + alsoAllow defaults = baseDefaults.copy() def __init__(self, **overrides): LrsStyle.__init__(self, "TextStyle", self.defaults, alsoAllow=self.alsoAllow, **overrides) def copy(self): tb = TextStyle() tb.attrs = self.attrs.copy() return tb class BlockStyle(LrsStyle): """ The block style of a TextBlock. Default is an expandable 560 pixel wide area with no space for headers or footers. Setting Value Default -------- ----- ------- blockwidth pixels 560 sidemargin pixels 0 """ baseDefaults = dict( bgimagemode="fix", framemode="square", blockwidth="560", blockheight="100", blockrule="horz-adjustable", layout="LrTb", framewidth="0", framecolor="0x00000000", topskip="0", sidemargin="0", footskip="0", bgcolor="0xFF000000") validSettings = baseDefaults.keys() defaults = baseDefaults.copy() def __init__(self, **overrides): LrsStyle.__init__(self, "BlockStyle", self.defaults, **overrides) def copy(self): tb = BlockStyle() tb.attrs = self.attrs.copy() return tb class PageStyle(LrsStyle): """ Setting Value Default -------- ----- ------- evensidemargin pixels 20 oddsidemargin pixels 20 topmargin pixels 20 """ baseDefaults = dict( topmargin="20", headheight="0", headsep="0", oddsidemargin="20", textheight="747", textwidth="575", footspace="0", evensidemargin="20", footheight="0", layout="LrTb", bgimagemode="fix", pageposition="any", setwaitprop="noreplay", setemptyview="show") alsoAllow = ["header", "evenheader", "oddheader", "footer", "evenfooter", "oddfooter"] validSettings = list(baseDefaults) + alsoAllow defaults = baseDefaults.copy() @classmethod def translateHeaderAndFooter(selfClass, parent, settings): selfClass._fixup(parent, "header", settings) selfClass._fixup(parent, "footer", settings) @classmethod def _fixup(selfClass, parent, basename, settings): evenbase = "even" + basename oddbase = "odd" + basename if basename in settings: baseObj = settings[basename] del settings[basename] settings[evenbase] = settings[oddbase] = baseObj if evenbase in settings: evenObj = settings[evenbase] del settings[evenbase] if evenObj.parent is None: parent.append(evenObj) settings[evenbase + "id"] = str(evenObj.objId) if oddbase in settings: oddObj = settings[oddbase] del settings[oddbase] if oddObj.parent is None: parent.append(oddObj) settings[oddbase + "id"] = str(oddObj.objId) def appendReferencedObjects(self, parent): if self.objectsAppended: return PageStyle.translateHeaderAndFooter(parent, self.attrs) self.objectsAppended = True def __init__(self, **settings): # self.fixHeaderSettings(settings) LrsStyle.__init__(self, "PageStyle", self.defaults, alsoAllow=self.alsoAllow, **settings) class Page(LrsObject, LrsContainer): """ Pages are added to Books. Pages can be supplied a PageStyle. If they are not, Page.defaultPageStyle will be used. """ defaultPageStyle = PageStyle() def __init__(self, pageStyle=defaultPageStyle, **settings): LrsObject.__init__(self) LrsContainer.__init__(self, [TextBlock, BlockSpace, RuledLine, ImageBlock, Canvas]) self.pageStyle = pageStyle for settingName in settings.keys(): if settingName not in PageStyle.defaults and \ settingName not in PageStyle.alsoAllow: raise LrsError("setting %s not allowed on Page" % settingName) self.settings = settings.copy() def appendReferencedObjects(self, parent): PageStyle.translateHeaderAndFooter(parent, self.settings) self.pageStyle.appendReferencedObjects(parent) if self.pageStyle.parent is None: parent.append(self.pageStyle) LrsContainer.appendReferencedObjects(self, parent) def RuledLine(self, *args, **kwargs): rl = RuledLine(*args, **kwargs) self.append(rl) return rl def BlockSpace(self, *args, **kwargs): bs = BlockSpace(*args, **kwargs) self.append(bs) return bs def TextBlock(self, *args, **kwargs): """ Create and append a new text block (shortcut). """ tb = TextBlock(*args, **kwargs) self.append(tb) return tb def ImageBlock(self, *args, **kwargs): """ Create and append and new Image block (shorthand). """ ib = ImageBlock(*args, **kwargs) self.append(ib) return ib def addLrfObject(self, objId): self.stream.appendLrfTag(LrfTag("Link", objId)) def appendLrfTag(self, lrfTag): self.stream.appendLrfTag(lrfTag) def toLrf(self, lrfWriter): # tags: # ObjectList # Link to pagestyle # Parent page tree id # stream of tags p = LrfObject("Page", self.objId) lrfWriter.append(p) pageContent = set() self.stream = LrfTagStream(0) for content in self.contents: content.toLrfContainer(lrfWriter, self) if hasattr(content, "getReferencedObjIds"): pageContent.update(content.getReferencedObjIds()) # print "page contents:", pageContent # ObjectList not needed and causes slowdown in SONY LRF renderer # p.appendLrfTag(LrfTag("ObjectList", pageContent)) p.appendLrfTag(LrfTag("Link", self.pageStyle.objId)) p.appendLrfTag(LrfTag("ParentPageTree", lrfWriter.getPageTreeId())) p.appendTagDict(self.settings) p.appendLrfTags(self.stream.getStreamTags(lrfWriter.getSourceEncoding())) def toElement(self, sourceEncoding): page = self.lrsObjectElement("Page") page.set("pagestyle", self.pageStyle.getLabel()) page.attrib.update(self.settings) for content in self.contents: page.append(content.toElement(sourceEncoding)) return page class TextBlock(LrsObject, LrsContainer): """ TextBlocks are added to Pages. They hold Paragraphs or CRs. If a TextBlock is used in a header, it should be appended to the Book, not to a specific Page. """ defaultTextStyle = TextStyle() defaultBlockStyle = BlockStyle() def __init__(self, textStyle=defaultTextStyle, blockStyle=defaultBlockStyle, **settings): ''' Create TextBlock. @param textStyle: The L{TextStyle} for this block. @param blockStyle: The L{BlockStyle} for this block. @param settings: C{dict} of extra settings to apply to this block. ''' LrsObject.__init__(self) LrsContainer.__init__(self, [Paragraph, CR]) self.textSettings = {} self.blockSettings = {} for name, value in settings.items(): if name in TextStyle.validSettings: self.textSettings[name] = value elif name in BlockStyle.validSettings: self.blockSettings[name] = value elif name == 'toclabel': self.tocLabel = value else: raise LrsError("%s not a valid setting for TextBlock" % name) self.textStyle = textStyle self.blockStyle = blockStyle # create a textStyle with our current text settings (for Span to find) self.currentTextStyle = textStyle.copy() if self.textSettings else textStyle self.currentTextStyle.attrs.update(self.textSettings) def appendReferencedObjects(self, parent): if self.textStyle.parent is None: parent.append(self.textStyle) if self.blockStyle.parent is None: parent.append(self.blockStyle) LrsContainer.appendReferencedObjects(self, parent) def Paragraph(self, *args, **kwargs): """ Create and append a Paragraph to this TextBlock. A CR is automatically inserted after the Paragraph. To avoid this behavior, create the Paragraph and append it to the TextBlock in a separate call. """ p = Paragraph(*args, **kwargs) self.append(p) self.append(CR()) return p def toElement(self, sourceEncoding): tb = self.lrsObjectElement("TextBlock", labelName="Block") tb.attrib.update(self.textSettings) tb.attrib.update(self.blockSettings) tb.set("textstyle", self.textStyle.getLabel()) tb.set("blockstyle", self.blockStyle.getLabel()) if hasattr(self, "tocLabel"): tb.set("toclabel", self.tocLabel) for content in self.contents: tb.append(content.toElement(sourceEncoding)) return tb def getReferencedObjIds(self): ids = [self.objId, self.extraId, self.blockStyle.objId, self.textStyle.objId] for content in self.contents: if hasattr(content, "getReferencedObjIds"): ids.extend(content.getReferencedObjIds()) return ids def toLrf(self, lrfWriter): self.toLrfContainer(lrfWriter, lrfWriter) def toLrfContainer(self, lrfWriter, container): # id really belongs to the outer block extraId = LrsObject.getNextObjId() b = LrfObject("Block", self.objId) b.appendLrfTag(LrfTag("Link", self.blockStyle.objId)) b.appendLrfTags( LrfTagStream(0, [LrfTag("Link", extraId)]).getStreamTags(lrfWriter.getSourceEncoding())) b.appendTagDict(self.blockSettings) container.addLrfObject(b.objId) lrfWriter.append(b) tb = LrfObject("TextBlock", extraId) tb.appendLrfTag(LrfTag("Link", self.textStyle.objId)) tb.appendTagDict(self.textSettings) stream = LrfTagStream(STREAM_COMPRESSED) for content in self.contents: content.toLrfContainer(lrfWriter, stream) if lrfWriter.saveStreamTags: # true only if testing tb.saveStreamTags = stream.tags tb.appendLrfTags( stream.getStreamTags(lrfWriter.getSourceEncoding(), optimizeTags=lrfWriter.optimizeTags, optimizeCompression=lrfWriter.optimizeCompression)) lrfWriter.append(tb) self.extraId = extraId class Paragraph(LrsContainer): """ Note: <P> alone does not make a paragraph. Only a CR inserted into a text block right after a <P> makes a real paragraph. Two Paragraphs appended in a row act like a single Paragraph. Also note that there are few autoappenders for Paragraph (and the things that can go in it.) It's less confusing (to me) to use explicit .append methods to build up the text stream. """ def __init__(self, text=None): LrsContainer.__init__(self, [Text, CR, DropCaps, CharButton, LrsSimpleChar1, bytes, str]) if text is not None: if isinstance(text, string_or_bytes): text = Text(text) self.append(text) def CR(self): # Okay, here's a single autoappender for this common operation cr = CR() self.append(cr) return cr def getReferencedObjIds(self): ids = [] for content in self.contents: if hasattr(content, "getReferencedObjIds"): ids.extend(content.getReferencedObjIds()) return ids def toLrfContainer(self, lrfWriter, parent): parent.appendLrfTag(LrfTag("pstart", 0)) for content in self.contents: content.toLrfContainer(lrfWriter, parent) parent.appendLrfTag(LrfTag("pend")) def toElement(self, sourceEncoding): p = Element("P") appendTextElements(p, self.contents, sourceEncoding) return p class LrsTextTag(LrsContainer): def __init__(self, text, validContents): LrsContainer.__init__(self, [Text, bytes, str] + validContents) if text is not None: self.append(text) def toLrfContainer(self, lrfWriter, parent): if hasattr(self, "tagName"): tagName = self.tagName else: tagName = self.__class__.__name__ parent.appendLrfTag(LrfTag(tagName)) for content in self.contents: content.toLrfContainer(lrfWriter, parent) parent.appendLrfTag(LrfTag(tagName + "End")) def toElement(self, se): if hasattr(self, "tagName"): tagName = self.tagName else: tagName = self.__class__.__name__ p = Element(tagName) appendTextElements(p, self.contents, se) return p class LrsSimpleChar1: def isEmpty(self): for content in self.contents: if not content.isEmpty(): return False return True def hasFollowingContent(self): foundSelf = False for content in self.parent.contents: if content == self: foundSelf = True elif foundSelf: if not content.isEmpty(): return True return False class DropCaps(LrsTextTag): def __init__(self, line=1): LrsTextTag.__init__(self, None, [LrsSimpleChar1]) if int(line) <= 0: raise LrsError('A DrawChar must span at least one line.') self.line = int(line) def isEmpty(self): return self.text is None or not self.text.strip() def toElement(self, se): elem = Element('DrawChar', line=str(self.line)) appendTextElements(elem, self.contents, se) return elem def toLrfContainer(self, lrfWriter, parent): parent.appendLrfTag(LrfTag('DrawChar', (int(self.line),))) for content in self.contents: content.toLrfContainer(lrfWriter, parent) parent.appendLrfTag(LrfTag("DrawCharEnd")) class Button(LrsObject, LrsContainer): def __init__(self, **settings): LrsObject.__init__(self, **settings) LrsContainer.__init__(self, [PushButton]) def findJumpToRefs(self): for sub1 in self.contents: if isinstance(sub1, PushButton): for sub2 in sub1.contents: if isinstance(sub2, JumpTo): return (sub2.textBlock.objId, sub2.textBlock.parent.objId) raise LrsError("%s has no PushButton or JumpTo subs"%self.__class__.__name__) def toLrf(self, lrfWriter): (refobj, refpage) = self.findJumpToRefs() # print "Button writing JumpTo refobj=", jumpto.refobj, ", and refpage=", jumpto.refpage button = LrfObject("Button", self.objId) button.appendLrfTag(LrfTag("buttonflags", 0x10)) # pushbutton button.appendLrfTag(LrfTag("PushButtonStart")) button.appendLrfTag(LrfTag("buttonactions")) button.appendLrfTag(LrfTag("jumpto", (int(refpage), int(refobj)))) button.append(LrfTag("endbuttonactions")) button.appendLrfTag(LrfTag("PushButtonEnd")) lrfWriter.append(button) def toElement(self, se): b = self.lrsObjectElement("Button") for content in self.contents: b.append(content.toElement(se)) return b class ButtonBlock(Button): pass class PushButton(LrsContainer): def __init__(self, **settings): LrsContainer.__init__(self, [JumpTo]) def toElement(self, se): b = Element("PushButton") for content in self.contents: b.append(content.toElement(se)) return b class JumpTo(LrsContainer): def __init__(self, textBlock): LrsContainer.__init__(self, []) self.textBlock=textBlock def setTextBlock(self, textBlock): self.textBlock = textBlock def toElement(self, se): return Element("JumpTo", refpage=str(self.textBlock.parent.objId), refobj=str(self.textBlock.objId)) class Plot(LrsSimpleChar1, LrsContainer): ADJUSTMENT_VALUES = {'center':1, 'baseline':2, 'top':3, 'bottom':4} def __init__(self, obj, xsize=0, ysize=0, adjustment=None): LrsContainer.__init__(self, []) if obj is not None: self.setObj(obj) if xsize < 0 or ysize < 0: raise LrsError('Sizes must be positive semi-definite') self.xsize = int(xsize) self.ysize = int(ysize) if adjustment and adjustment not in Plot.ADJUSTMENT_VALUES.keys(): raise LrsError('adjustment must be one of' + Plot.ADJUSTMENT_VALUES.keys()) self.adjustment = adjustment def setObj(self, obj): if not isinstance(obj, (Image, Button)): raise LrsError('Plot elements can only refer to Image or Button elements') self.obj = obj def getReferencedObjIds(self): return [self.obj.objId] def appendReferencedObjects(self, parent): if self.obj.parent is None: parent.append(self.obj) def toElement(self, se): elem = Element('Plot', xsize=str(self.xsize), ysize=str(self.ysize), refobj=str(self.obj.objId)) if self.adjustment: elem.set('adjustment', self.adjustment) return elem def toLrfContainer(self, lrfWriter, parent): adj = self.adjustment if self.adjustment else 'bottom' params = (int(self.xsize), int(self.ysize), int(self.obj.objId), Plot.ADJUSTMENT_VALUES[adj]) parent.appendLrfTag(LrfTag("Plot", params)) class Text(LrsContainer): """ A object that represents raw text. Does not have a toElement. """ def __init__(self, text): LrsContainer.__init__(self, []) self.text = text def isEmpty(self): return not self.text or not self.text.strip() def toLrfContainer(self, lrfWriter, parent): if self.text: if isinstance(self.text, bytes): parent.appendLrfTag(LrfTag("rawtext", self.text)) else: parent.appendLrfTag(LrfTag("textstring", self.text)) class CR(LrsSimpleChar1, LrsContainer): """ A line break (when appended to a Paragraph) or a paragraph break (when appended to a TextBlock). """ def __init__(self): LrsContainer.__init__(self, []) def toElement(self, se): return Element("CR") def toLrfContainer(self, lrfWriter, parent): parent.appendLrfTag(LrfTag("CR")) class Italic(LrsSimpleChar1, LrsTextTag): def __init__(self, text=None): LrsTextTag.__init__(self, text, [LrsSimpleChar1]) class Sub(LrsSimpleChar1, LrsTextTag): def __init__(self, text=None): LrsTextTag.__init__(self, text, []) class Sup(LrsSimpleChar1, LrsTextTag): def __init__(self, text=None): LrsTextTag.__init__(self, text, []) class NoBR(LrsSimpleChar1, LrsTextTag): def __init__(self, text=None): LrsTextTag.__init__(self, text, [LrsSimpleChar1]) class Space(LrsSimpleChar1, LrsContainer): def __init__(self, xsize=0, x=0): LrsContainer.__init__(self, []) if xsize == 0 and x != 0: xsize = x self.xsize = xsize def toElement(self, se): if self.xsize == 0: return return Element("Space", xsize=str(self.xsize)) def toLrfContainer(self, lrfWriter, container): if self.xsize != 0: container.appendLrfTag(LrfTag("Space", self.xsize)) class Box(LrsSimpleChar1, LrsContainer): """ Draw a box around text. Unfortunately, does not seem to do anything on the PRS-500. """ def __init__(self, linetype="solid"): LrsContainer.__init__(self, [Text, bytes, str]) if linetype not in LINE_TYPE_ENCODING: raise LrsError(linetype + " is not a valid line type") self.linetype = linetype def toElement(self, se): e = Element("Box", linetype=self.linetype) appendTextElements(e, self.contents, se) return e def toLrfContainer(self, lrfWriter, container): container.appendLrfTag(LrfTag("Box", self.linetype)) for content in self.contents: content.toLrfContainer(lrfWriter, container) container.appendLrfTag(LrfTag("BoxEnd")) class Span(LrsSimpleChar1, LrsContainer): def __init__(self, text=None, **attrs): LrsContainer.__init__(self, [LrsSimpleChar1, Text, bytes, str]) if text is not None: if isinstance(text, string_or_bytes): text = Text(text) self.append(text) for attrname in attrs.keys(): if attrname not in TextStyle.defaults and \ attrname not in TextStyle.alsoAllow: raise LrsError("setting %s not allowed on Span" % attrname) self.attrs = attrs def findCurrentTextStyle(self): parent = self.parent while 1: if parent is None or hasattr(parent, "currentTextStyle"): break parent = parent.parent if parent is None: raise LrsError("no enclosing current TextStyle found") return parent.currentTextStyle def toLrfContainer(self, lrfWriter, container): # find the currentTextStyle oldTextStyle = self.findCurrentTextStyle() # set the attributes we want changed for (name, value) in tuple(iteritems(self.attrs)): if name in oldTextStyle.attrs and oldTextStyle.attrs[name] == self.attrs[name]: self.attrs.pop(name) else: container.appendLrfTag(LrfTag(name, value)) # set a currentTextStyle so nested span can put things back oldTextStyle = self.findCurrentTextStyle() self.currentTextStyle = oldTextStyle.copy() self.currentTextStyle.attrs.update(self.attrs) for content in self.contents: content.toLrfContainer(lrfWriter, container) # put the attributes back the way we found them # the attributes persist beyond the next </P> # if self.hasFollowingContent(): for name in self.attrs.keys(): container.appendLrfTag(LrfTag(name, oldTextStyle.attrs[name])) def toElement(self, se): element = Element('Span') for (key, value) in self.attrs.items(): element.set(key, str(value)) appendTextElements(element, self.contents, se) return element class EmpLine(LrsTextTag, LrsSimpleChar1): emplinetypes = ['none', 'solid', 'dotted', 'dashed', 'double'] emplinepositions = ['before', 'after'] def __init__(self, text=None, emplineposition='before', emplinetype='solid'): LrsTextTag.__init__(self, text, [LrsSimpleChar1]) if emplineposition not in self.__class__.emplinepositions: raise LrsError('emplineposition for an EmpLine must be one of: '+str(self.__class__.emplinepositions)) if emplinetype not in self.__class__.emplinetypes: raise LrsError('emplinetype for an EmpLine must be one of: '+str(self.__class__.emplinetypes)) self.emplinetype = emplinetype self.emplineposition = emplineposition def toLrfContainer(self, lrfWriter, parent): parent.appendLrfTag(LrfTag(self.__class__.__name__, (self.emplineposition, self.emplinetype))) parent.appendLrfTag(LrfTag('emplineposition', self.emplineposition)) parent.appendLrfTag(LrfTag('emplinetype', self.emplinetype)) for content in self.contents: content.toLrfContainer(lrfWriter, parent) parent.appendLrfTag(LrfTag(self.__class__.__name__ + "End")) def toElement(self, se): element = Element(self.__class__.__name__) element.set('emplineposition', self.emplineposition) element.set('emplinetype', self.emplinetype) appendTextElements(element, self.contents, se) return element class Bold(Span): """ There is no known "bold" lrf tag. Use Span with a fontweight in LRF, but use the word Bold in the LRS. """ def __init__(self, text=None): Span.__init__(self, text, fontweight=800) def toElement(self, se): e = Element("Bold") appendTextElements(e, self.contents, se) return e class BlockSpace(LrsContainer): """ Can be appended to a page to move the text point. """ def __init__(self, xspace=0, yspace=0, x=0, y=0): LrsContainer.__init__(self, []) if xspace == 0 and x != 0: xspace = x if yspace == 0 and y != 0: yspace = y self.xspace = xspace self.yspace = yspace def toLrfContainer(self, lrfWriter, container): if self.xspace != 0: container.appendLrfTag(LrfTag("xspace", self.xspace)) if self.yspace != 0: container.appendLrfTag(LrfTag("yspace", self.yspace)) def toElement(self, se): element = Element("BlockSpace") if self.xspace != 0: element.attrib["xspace"] = str(self.xspace) if self.yspace != 0: element.attrib["yspace"] = str(self.yspace) return element class CharButton(LrsSimpleChar1, LrsContainer): """ Define the text and target of a CharButton. Must be passed a JumpButton that is the destination of the CharButton. Only text or SimpleChars can be appended to the CharButton. """ def __init__(self, button, text=None): LrsContainer.__init__(self, [bytes, str, Text, LrsSimpleChar1]) self.button = None if button is not None: self.setButton(button) if text is not None: self.append(text) def setButton(self, button): if not isinstance(button, (JumpButton, Button)): raise LrsError("CharButton button must be a JumpButton or Button") self.button = button def appendReferencedObjects(self, parent): if self.button.parent is None: parent.append(self.button) def getReferencedObjIds(self): return [self.button.objId] def toLrfContainer(self, lrfWriter, container): container.appendLrfTag(LrfTag("CharButton", self.button.objId)) for content in self.contents: content.toLrfContainer(lrfWriter, container) container.appendLrfTag(LrfTag("CharButtonEnd")) def toElement(self, se): cb = Element("CharButton", refobj=str(self.button.objId)) appendTextElements(cb, self.contents, se) return cb class Objects(LrsContainer): def __init__(self): LrsContainer.__init__(self, [JumpButton, TextBlock, HeaderOrFooter, ImageStream, Image, ImageBlock, Button, ButtonBlock]) self.appendJumpButton = self.appendTextBlock = self.appendHeader = \ self.appendFooter = self.appendImageStream = \ self.appendImage = self.appendImageBlock = self.append def getMethods(self): return ["JumpButton", "appendJumpButton", "TextBlock", "appendTextBlock", "Header", "appendHeader", "Footer", "appendFooter", "ImageBlock", "ImageStream", "appendImageStream", 'Image','appendImage', 'appendImageBlock'] def getSettings(self): return [] def ImageBlock(self, *args, **kwargs): ib = ImageBlock(*args, **kwargs) self.append(ib) return ib def JumpButton(self, textBlock): b = JumpButton(textBlock) self.append(b) return b def TextBlock(self, *args, **kwargs): tb = TextBlock(*args, **kwargs) self.append(tb) return tb def Header(self, *args, **kwargs): h = Header(*args, **kwargs) self.append(h) return h def Footer(self, *args, **kwargs): h = Footer(*args, **kwargs) self.append(h) return h def ImageStream(self, *args, **kwargs): i = ImageStream(*args, **kwargs) self.append(i) return i def Image(self, *args, **kwargs): i = Image(*args, **kwargs) self.append(i) return i def toElement(self, se): o = Element("Objects") for content in self.contents: o.append(content.toElement(se)) return o def toLrf(self, lrfWriter): for content in self.contents: content.toLrf(lrfWriter) class JumpButton(LrsObject, LrsContainer): """ The target of a CharButton. Needs a parented TextBlock to jump to. Actually creates several elements in the XML. JumpButtons must be eventually appended to a Book (actually, an Object.) """ def __init__(self, textBlock): LrsObject.__init__(self) LrsContainer.__init__(self, []) self.textBlock = textBlock def setTextBlock(self, textBlock): self.textBlock = textBlock def toLrf(self, lrfWriter): button = LrfObject("Button", self.objId) button.appendLrfTag(LrfTag("buttonflags", 0x10)) # pushbutton button.appendLrfTag(LrfTag("PushButtonStart")) button.appendLrfTag(LrfTag("buttonactions")) button.appendLrfTag(LrfTag("jumpto", (self.textBlock.parent.objId, self.textBlock.objId))) button.append(LrfTag("endbuttonactions")) button.appendLrfTag(LrfTag("PushButtonEnd")) lrfWriter.append(button) def toElement(self, se): b = self.lrsObjectElement("Button") pb = SubElement(b, "PushButton") SubElement(pb, "JumpTo", refpage=str(self.textBlock.parent.objId), refobj=str(self.textBlock.objId)) return b class RuledLine(LrsContainer, LrsAttributes, LrsObject): """ A line. Default is 500 pixels long, 2 pixels wide. """ defaults = dict( linelength="500", linetype="solid", linewidth="2", linecolor="0x00000000") def __init__(self, **settings): LrsContainer.__init__(self, []) LrsAttributes.__init__(self, self.defaults, **settings) LrsObject.__init__(self) def toLrfContainer(self, lrfWriter, container): a = self.attrs container.appendLrfTag(LrfTag("RuledLine", (a["linelength"], a["linetype"], a["linewidth"], a["linecolor"]))) def toElement(self, se): return Element("RuledLine", self.attrs) class HeaderOrFooter(LrsObject, LrsContainer, LrsAttributes): """ Creates empty header or footer objects. Append PutObj objects to the header or footer to create the text. Note: it seems that adding multiple PutObjs to a header or footer only shows the last one. """ defaults = dict(framemode="square", layout="LrTb", framewidth="0", framecolor="0x00000000", bgcolor="0xFF000000") def __init__(self, **settings): LrsObject.__init__(self) LrsContainer.__init__(self, [PutObj]) LrsAttributes.__init__(self, self.defaults, **settings) def put_object(self, obj, x1, y1): self.append(PutObj(obj, x1, y1)) def PutObj(self, *args, **kwargs): p = PutObj(*args, **kwargs) self.append(p) return p def toLrf(self, lrfWriter): hd = LrfObject(self.__class__.__name__, self.objId) hd.appendTagDict(self.attrs) stream = LrfTagStream(0) for content in self.contents: content.toLrfContainer(lrfWriter, stream) hd.appendLrfTags(stream.getStreamTags(lrfWriter.getSourceEncoding())) lrfWriter.append(hd) def toElement(self, se): name = self.__class__.__name__ labelName = name.lower() + "label" hd = self.lrsObjectElement(name, objlabel=labelName) hd.attrib.update(self.attrs) for content in self.contents: hd.append(content.toElement(se)) return hd class Header(HeaderOrFooter): pass class Footer(HeaderOrFooter): pass class Canvas(LrsObject, LrsContainer, LrsAttributes): defaults = dict(framemode="square", layout="LrTb", framewidth="0", framecolor="0x00000000", bgcolor="0xFF000000", canvasheight=0, canvaswidth=0, blockrule='block-adjustable') def __init__(self, width, height, **settings): LrsObject.__init__(self) LrsContainer.__init__(self, [PutObj]) LrsAttributes.__init__(self, self.defaults, **settings) self.settings = self.defaults.copy() self.settings.update(settings) self.settings['canvasheight'] = int(height) self.settings['canvaswidth'] = int(width) def put_object(self, obj, x1, y1): self.append(PutObj(obj, x1, y1)) def toElement(self, source_encoding): el = self.lrsObjectElement("Canvas", **self.settings) for po in self.contents: el.append(po.toElement(source_encoding)) return el def toLrf(self, lrfWriter): self.toLrfContainer(lrfWriter, lrfWriter) def toLrfContainer(self, lrfWriter, container): c = LrfObject("Canvas", self.objId) c.appendTagDict(self.settings) stream = LrfTagStream(STREAM_COMPRESSED) for content in self.contents: content.toLrfContainer(lrfWriter, stream) if lrfWriter.saveStreamTags: # true only if testing c.saveStreamTags = stream.tags c.appendLrfTags( stream.getStreamTags(lrfWriter.getSourceEncoding(), optimizeTags=lrfWriter.optimizeTags, optimizeCompression=lrfWriter.optimizeCompression)) container.addLrfObject(c.objId) lrfWriter.append(c) def has_text(self): return bool(self.contents) class PutObj(LrsContainer): """ PutObj holds other objects that are drawn on a Canvas or Header. """ def __init__(self, content, x1=0, y1=0): LrsContainer.__init__(self, [TextBlock, ImageBlock]) self.content = content self.x1 = int(x1) self.y1 = int(y1) def setContent(self, content): self.content = content def appendReferencedObjects(self, parent): if self.content.parent is None: parent.append(self.content) def toLrfContainer(self, lrfWriter, container): container.appendLrfTag(LrfTag("PutObj", (self.x1, self.y1, self.content.objId))) def toElement(self, se): el = Element("PutObj", x1=str(self.x1), y1=str(self.y1), refobj=str(self.content.objId)) return el class ImageStream(LrsObject, LrsContainer): """ Embed an image file into an Lrf. """ VALID_ENCODINGS = ["JPEG", "GIF", "BMP", "PNG"] def __init__(self, file=None, encoding=None, comment=None): LrsObject.__init__(self) LrsContainer.__init__(self, []) _checkExists(file) self.filename = file self.comment = comment # TODO: move encoding from extension to lrf module if encoding is None: extension = os.path.splitext(file)[1] if not extension: raise LrsError("file must have extension if encoding is not specified") extension = extension[1:].upper() if extension == "JPG": extension = "JPEG" encoding = extension else: encoding = encoding.upper() if encoding not in self.VALID_ENCODINGS: raise LrsError("encoding or file extension not JPEG, GIF, BMP, or PNG") self.encoding = encoding def toLrf(self, lrfWriter): with open(self.filename, "rb") as f: imageData = f.read() isObj = LrfObject("ImageStream", self.objId) if self.comment is not None: isObj.appendLrfTag(LrfTag("comment", self.comment)) streamFlags = IMAGE_TYPE_ENCODING[self.encoding] stream = LrfStreamBase(streamFlags, imageData) isObj.appendLrfTags(stream.getStreamTags()) lrfWriter.append(isObj) def toElement(self, se): element = self.lrsObjectElement("ImageStream", objlabel="imagestreamlabel", encoding=self.encoding, file=self.filename) element.text = self.comment return element class Image(LrsObject, LrsContainer, LrsAttributes): defaults = dict() def __init__(self, refstream, x0=0, x1=0, y0=0, y1=0, xsize=0, ysize=0, **settings): LrsObject.__init__(self) LrsContainer.__init__(self, []) LrsAttributes.__init__(self, self.defaults, settings) self.x0, self.y0, self.x1, self.y1 = int(x0), int(y0), int(x1), int(y1) self.xsize, self.ysize = int(xsize), int(ysize) self.setRefstream(refstream) def setRefstream(self, refstream): self.refstream = refstream def appendReferencedObjects(self, parent): if self.refstream.parent is None: parent.append(self.refstream) def getReferencedObjIds(self): return [self.objId, self.refstream.objId] def toElement(self, se): element = self.lrsObjectElement("Image", **self.attrs) element.set("refstream", str(self.refstream.objId)) for name in ["x0", "y0", "x1", "y1", "xsize", "ysize"]: element.set(name, str(getattr(self, name))) return element def toLrf(self, lrfWriter): ib = LrfObject("Image", self.objId) ib.appendLrfTag(LrfTag("ImageRect", (self.x0, self.y0, self.x1, self.y1))) ib.appendLrfTag(LrfTag("ImageSize", (self.xsize, self.ysize))) ib.appendLrfTag(LrfTag("RefObjId", self.refstream.objId)) lrfWriter.append(ib) class ImageBlock(LrsObject, LrsContainer, LrsAttributes): """ Create an image on a page. """ # TODO: allow other block attributes defaults = BlockStyle.baseDefaults.copy() def __init__(self, refstream, x0="0", y0="0", x1="600", y1="800", xsize="600", ysize="800", blockStyle=BlockStyle(blockrule='block-fixed'), alttext=None, **settings): LrsObject.__init__(self) LrsContainer.__init__(self, [Text, Image]) LrsAttributes.__init__(self, self.defaults, **settings) self.x0, self.y0, self.x1, self.y1 = int(x0), int(y0), int(x1), int(y1) self.xsize, self.ysize = int(xsize), int(ysize) self.setRefstream(refstream) self.blockStyle = blockStyle self.alttext = alttext def setRefstream(self, refstream): self.refstream = refstream def appendReferencedObjects(self, parent): if self.refstream.parent is None: parent.append(self.refstream) if self.blockStyle is not None and self.blockStyle.parent is None: parent.append(self.blockStyle) def getReferencedObjIds(self): objects = [self.objId, self.extraId, self.refstream.objId] if self.blockStyle is not None: objects.append(self.blockStyle.objId) return objects def toLrf(self, lrfWriter): self.toLrfContainer(lrfWriter, lrfWriter) def toLrfContainer(self, lrfWriter, container): # id really belongs to the outer block extraId = LrsObject.getNextObjId() b = LrfObject("Block", self.objId) if self.blockStyle is not None: b.appendLrfTag(LrfTag("Link", self.blockStyle.objId)) b.appendTagDict(self.attrs) b.appendLrfTags( LrfTagStream(0, [LrfTag("Link", extraId)]).getStreamTags(lrfWriter.getSourceEncoding())) container.addLrfObject(b.objId) lrfWriter.append(b) ib = LrfObject("Image", extraId) ib.appendLrfTag(LrfTag("ImageRect", (self.x0, self.y0, self.x1, self.y1))) ib.appendLrfTag(LrfTag("ImageSize", (self.xsize, self.ysize))) ib.appendLrfTag(LrfTag("RefObjId", self.refstream.objId)) if self.alttext: ib.appendLrfTag("Comment", self.alttext) lrfWriter.append(ib) self.extraId = extraId def toElement(self, se): element = self.lrsObjectElement("ImageBlock", **self.attrs) element.set("refstream", str(self.refstream.objId)) for name in ["x0", "y0", "x1", "y1", "xsize", "ysize"]: element.set(name, str(getattr(self, name))) element.text = self.alttext return element class Font(LrsContainer): """ Allows a TrueType file to be embedded in an Lrf. """ def __init__(self, file=None, fontname=None, fontfilename=None, encoding=None): LrsContainer.__init__(self, []) try: _checkExists(fontfilename) self.truefile = fontfilename except: try: _checkExists(file) self.truefile = file except: raise LrsError("neither '%s' nor '%s' exists"%(fontfilename, file)) self.file = file self.fontname = fontname self.fontfilename = fontfilename self.encoding = encoding def toLrf(self, lrfWriter): font = LrfObject("Font", LrsObject.getNextObjId()) lrfWriter.registerFontId(font.objId) font.appendLrfTag(LrfTag("FontFilename", lrfWriter.toUnicode(self.truefile))) font.appendLrfTag(LrfTag("FontFacename", lrfWriter.toUnicode(self.fontname))) stream = LrfFileStream(STREAM_FORCE_COMPRESSED, self.truefile) font.appendLrfTags(stream.getStreamTags()) lrfWriter.append(font) def toElement(self, se): element = Element("RegistFont", encoding="TTF", fontname=self.fontname, file=self.file, fontfilename=self.file) return element
80,084
Python
.py
1,866
33.415327
114
0.621672
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,450
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/pylrs/__init__.py
""" This package contains code to generate ebooks in the SONY LRS/F format. It was originally developed by Mike Higgins and has been extended and modified by Kovid Goyal. """
175
Python
.py
5
34
80
0.8
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,451
elements.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/pylrs/elements.py
""" elements.py -- replacements and helpers for ElementTree """ from polyglot.builtins import string_or_bytes class ElementWriter: def __init__(self, e, header=False, sourceEncoding="ascii", spaceBeforeClose=True, outputEncodingName="UTF-16"): self.header = header self.e = e self.sourceEncoding=sourceEncoding self.spaceBeforeClose = spaceBeforeClose self.outputEncodingName = outputEncodingName def _encodeCdata(self, rawText): if isinstance(rawText, bytes): rawText = rawText.decode(self.sourceEncoding) text = rawText.replace("&", "&amp;") text = text.replace("<", "&lt;") text = text.replace(">", "&gt;") return text def _writeAttribute(self, f, name, value): f.write(' %s="' % str(name)) if not isinstance(value, string_or_bytes): value = str(value) value = self._encodeCdata(value) value = value.replace('"', '&quot;') f.write(value) f.write('"') def _writeText(self, f, rawText): text = self._encodeCdata(rawText) f.write(text) def _write(self, f, e): f.write('<' + str(e.tag)) attributes = e.items() attributes.sort() for name, value in attributes: self._writeAttribute(f, name, value) if e.text is not None or len(e) > 0: f.write('>') if e.text: self._writeText(f, e.text) for e2 in e: self._write(f, e2) f.write('</%s>' % e.tag) else: if self.spaceBeforeClose: f.write(' ') f.write('/>') if e.tail is not None: self._writeText(f, e.tail) def toString(self): class x: pass buffer = [] x.write = buffer.append self.write(x) return ''.join(buffer) def write(self, f): if self.header: f.write('<?xml version="1.0" encoding="%s"?>\n' % self.outputEncodingName) self._write(f, self.e)
2,101
Python
.py
58
26.413793
86
0.551605
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,452
pylrf.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/pylrs/pylrf.py
#!/usr/bin/env python """ pylrf.py -- very low level interface to create lrf files. See pylrs for higher level interface that can use this module to render books to lrf. """ import codecs import io import os import struct import zlib from polyglot.builtins import iteritems, string_or_bytes from .pylrfopt import tagListOptimizer PYLRF_VERSION = "1.0" # # Acknowledgement: # This software would not have been possible without the pioneering # efforts of the author of lrf2lrs.py, Igor Skochinsky. # # Copyright (c) 2007 Mike Higgins (Falstaff) # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # Change History: # # V1.0 06 Feb 2007 # Initial Release. # # Current limitations and bugs: # Never "scrambles" any streams (even if asked to). This does not seem # to hurt anything. # # Not based on any official documentation, so many assumptions had to be made. # # Can be used to create lrf files that can lock up an eBook reader. # This is your only warning. # # Unsupported objects: Canvas, Window, PopUpWindow, Sound, Import, # SoundStream, ObjectInfo # # The only button type supported is JumpButton. # # Unsupported tags: SoundStop, Wait, pos on BlockSpace (and those used by # unsupported objects). # # Tags supporting Japanese text and Asian layout have not been tested. # # Tested on Python 2.4 and 2.5, Windows XP and Sony PRS-500. # # Commented even less than pylrs, but not very useful when called directly, # anyway. # class LrfError(Exception): pass def writeByte(f, byte): f.write(struct.pack("<B", byte)) def writeWord(f, word): if int(word) > 65535: raise LrfError('Cannot encode a number greater than 65535 in a word.') if int(word) < 0: raise LrfError('Cannot encode a number < 0 in a word: '+str(word)) f.write(struct.pack("<H", int(word))) def writeSignedWord(f, sword): f.write(struct.pack("<h", int(float(sword)))) def writeWords(f, *words): f.write(struct.pack("<%dH" % len(words), *words)) def writeDWord(f, dword): f.write(struct.pack("<I", int(dword))) def writeDWords(f, *dwords): f.write(struct.pack("<%dI" % len(dwords), *dwords)) def writeQWord(f, qword): f.write(struct.pack("<Q", qword)) def writeZeros(f, nZeros): f.write(b"\0" * nZeros) def writeString(f, s): f.write(s) def writeIdList(f, idList): writeWord(f, len(idList)) writeDWords(f, *idList) def writeColor(f, color): # TODO: allow color names, web format f.write(struct.pack(">I", int(color, 0))) def writeLineWidth(f, width): writeWord(f, int(width)) def writeUnicode(f, string, encoding): if isinstance(string, bytes): string = string.decode(encoding) string = string.encode("utf-16-le") length = len(string) if length > 65535: raise LrfError('Cannot write strings longer than 65535 characters.') writeWord(f, length) writeString(f, string) def writeRaw(f, string, encoding): if isinstance(string, bytes): string = string.decode(encoding) string = string.encode("utf-16-le") writeString(f, string) def writeRubyAA(f, rubyAA): ralign, radjust = rubyAA radjust = {"line-edge":0x10, "none":0}[radjust] ralign = {"start":1, "center":2}[ralign] writeWord(f, ralign | radjust) def writeBgImage(f, bgInfo): imode, iid = bgInfo imode = {"pfix": 0, "fix":1, "tile":2, "centering":3}[imode] writeWord(f, imode) writeDWord(f, iid) def writeEmpDots(f, dotsInfo, encoding): refDotsFont, dotsFontName, dotsCode = dotsInfo writeDWord(f, refDotsFont) LrfTag("fontfacename", dotsFontName).write(f, encoding) writeWord(f, int(dotsCode, 0)) def writeRuledLine(f, lineInfo): lineLength, lineType, lineWidth, lineColor = lineInfo writeWord(f, lineLength) writeWord(f, LINE_TYPE_ENCODING[lineType]) writeWord(f, lineWidth) writeColor(f, lineColor) LRF_SIGNATURE = b"L\x00R\x00F\x00\x00\x00" # XOR_KEY = 48 XOR_KEY = 65024 # that's what lrf2lrs says -- not used, anyway... LRF_VERSION = 1000 # is 999 for librie? lrf2lrs uses 1000 IMAGE_TYPE_ENCODING = dict(GIF=0x14, PNG=0x12, BMP=0x13, JPEG=0x11, JPG=0x11) OBJECT_TYPE_ENCODING = dict( PageTree=0x01, Page=0x02, Header=0x03, Footer=0x04, PageAtr=0x05, PageStyle=0x05, Block=0x06, BlockAtr=0x07, BlockStyle=0x07, MiniPage=0x08, TextBlock=0x0A, Text=0x0A, TextAtr=0x0B, TextStyle=0x0B, ImageBlock=0x0C, Image=0x0C, Canvas=0x0D, ESound=0x0E, ImageStream=0x11, Import=0x12, Button=0x13, Window=0x14, PopUpWindow=0x15, Sound=0x16, SoundStream=0x17, Font=0x19, ObjectInfo=0x1A, BookAtr=0x1C, BookStyle=0x1C, SimpleTextBlock=0x1D, TOC=0x1E ) LINE_TYPE_ENCODING = { 'none':0, 'solid':0x10, 'dashed':0x20, 'double':0x30, 'dotted':0x40 } BINDING_DIRECTION_ENCODING = dict(Lr=1, Rl=16) TAG_INFO = dict( rawtext=(0, writeRaw), ObjectStart=(0xF500, "<IH"), ObjectEnd=(0xF501,), # InfoLink (0xF502) Link=(0xF503, "<I"), StreamSize=(0xF504, writeDWord), StreamData=(0xF505, writeString), StreamEnd=(0xF506,), oddheaderid=(0xF507, writeDWord), evenheaderid=(0xF508, writeDWord), oddfooterid=(0xF509, writeDWord), evenfooterid=(0xF50A, writeDWord), ObjectList=(0xF50B, writeIdList), fontsize=(0xF511, writeSignedWord), fontwidth=(0xF512, writeSignedWord), fontescapement=(0xF513, writeSignedWord), fontorientation=(0xF514, writeSignedWord), fontweight=(0xF515, writeWord), fontfacename=(0xF516, writeUnicode), textcolor=(0xF517, writeColor), textbgcolor=(0xF518, writeColor), wordspace=(0xF519, writeSignedWord), letterspace=(0xF51A, writeSignedWord), baselineskip=(0xF51B, writeSignedWord), linespace=(0xF51C, writeSignedWord), parindent=(0xF51D, writeSignedWord), parskip=(0xF51E, writeSignedWord), # F51F, F520 topmargin=(0xF521, writeWord), headheight=(0xF522, writeWord), headsep=(0xF523, writeWord), oddsidemargin=(0xF524, writeWord), textheight=(0xF525, writeWord), textwidth=(0xF526, writeWord), canvaswidth=(0xF551, writeWord), canvasheight=(0xF552, writeWord), footspace=(0xF527, writeWord), footheight=(0xF528, writeWord), bgimage=(0xF529, writeBgImage), setemptyview=(0xF52A, {'show':1, 'empty':0}, writeWord), pageposition=(0xF52B, {'any':0,'upper':1, 'lower':2}, writeWord), evensidemargin=(0xF52C, writeWord), framemode=(0xF52E, {'None':0, 'curve':2, 'square':1}, writeWord), blockwidth=(0xF531, writeWord), blockheight=(0xF532, writeWord), blockrule=(0xF533, {"horz-fixed":0x14, "horz-adjustable":0x12, "vert-fixed":0x41, "vert-adjustable":0x21, "block-fixed":0x44, "block-adjustable":0x22}, writeWord), bgcolor=(0xF534, writeColor), layout=(0xF535, {'TbRl':0x41, 'LrTb':0x34}, writeWord), framewidth=(0xF536, writeWord), framecolor=(0xF537, writeColor), topskip=(0xF538, writeWord), sidemargin=(0xF539, writeWord), footskip=(0xF53A, writeWord), align=(0xF53C, {'head':1, 'center':4, 'foot':8}, writeWord), column=(0xF53D, writeWord), columnsep=(0xF53E, writeSignedWord), minipagewidth=(0xF541, writeWord), minipageheight=(0xF542, writeWord), yspace=(0xF546, writeWord), xspace=(0xF547, writeWord), PutObj=(0xF549, "<HHI"), ImageRect=(0xF54A, "<HHHH"), ImageSize=(0xF54B, "<HH"), RefObjId=(0xF54C, "<I"), PageDiv=(0xF54E, "<HIHI"), StreamFlags=(0xF554, writeWord), Comment=(0xF555, writeUnicode), FontFilename=(0xF559, writeUnicode), PageList=(0xF55C, writeIdList), FontFacename=(0xF55D, writeUnicode), buttonflags=(0xF561, writeWord), PushButtonStart=(0xF566,), PushButtonEnd=(0xF567,), buttonactions=(0xF56A,), endbuttonactions=(0xF56B,), jumpto=(0xF56C, "<II"), RuledLine=(0xF573, writeRuledLine), rubyaa=(0xF575, writeRubyAA), rubyoverhang=(0xF576, {'none':0, 'auto':1}, writeWord), empdotsposition=(0xF577, {'before':1, 'after':2}, writeWord), empdots=(0xF578, writeEmpDots), emplineposition=(0xF579, {'before':1, 'after':2}, writeWord), emplinetype=(0xF57A, LINE_TYPE_ENCODING, writeWord), ChildPageTree=(0xF57B, "<I"), ParentPageTree=(0xF57C, "<I"), Italic=(0xF581,), ItalicEnd=(0xF582,), pstart=(0xF5A1, writeDWord), # what goes in the dword? refesound pend=(0xF5A2,), CharButton=(0xF5A7, writeDWord), CharButtonEnd=(0xF5A8,), Rubi=(0xF5A9,), RubiEnd=(0xF5AA,), Oyamoji=(0xF5AB,), OyamojiEnd=(0xF5AC,), Rubimoji=(0xF5AD,), RubimojiEnd=(0xF5AE,), Yoko=(0xF5B1,), YokoEnd=(0xF5B2,), Tate=(0xF5B3,), TateEnd=(0xF5B4,), Nekase=(0xF5B5,), NekaseEnd=(0xF5B6,), Sup=(0xF5B7,), SupEnd=(0xF5B8,), Sub=(0xF5B9,), SubEnd=(0xF5BA,), NoBR=(0xF5BB,), NoBREnd=(0xF5BC,), EmpDots=(0xF5BD,), EmpDotsEnd=(0xF5BE,), EmpLine=(0xF5C1,), EmpLineEnd=(0xF5C2,), DrawChar=(0xF5C3, '<H'), DrawCharEnd=(0xF5C4,), Box=(0xF5C6, LINE_TYPE_ENCODING, writeWord), BoxEnd=(0xF5C7,), Space=(0xF5CA, writeSignedWord), textstring=(0xF5CC, writeUnicode), Plot=(0xF5D1, "<HHII"), CR=(0xF5D2,), RegisterFont=(0xF5D8, writeDWord), setwaitprop=(0xF5DA, {'replay':1, 'noreplay':2}, writeWord), charspace=(0xF5DD, writeSignedWord), textlinewidth=(0xF5F1, writeLineWidth), linecolor=(0xF5F2, writeColor) ) class ObjectTableEntry: def __init__(self, objId, offset, size): self.objId = objId self.offset = offset self.size = size def write(self, f): writeDWords(f, self.objId, self.offset, self.size, 0) class LrfTag: def __init__(self, name, *parameters): try: tagInfo = TAG_INFO[name] except KeyError: raise LrfError("tag name %s not recognized" % name) self.name = name self.type = tagInfo[0] self.format = tagInfo[1:] if len(parameters) > 1: raise LrfError("only one parameter allowed on tag %s" % name) if len(parameters) == 0: self.parameter = None else: self.parameter = parameters[0] def write(self, lrf, encoding=None): if self.type != 0: writeWord(lrf, self.type) p = self.parameter if p is None: return # print " Writing tag", self.name for f in self.format: if isinstance(f, dict): p = f[p] elif isinstance(f, string_or_bytes): if isinstance(p, tuple): writeString(lrf, struct.pack(f, *p)) else: writeString(lrf, struct.pack(f, p)) else: if f in [writeUnicode, writeRaw, writeEmpDots]: if encoding is None: raise LrfError("Tag requires encoding") f(lrf, p, encoding) else: f(lrf, p) STREAM_SCRAMBLED = 0x200 STREAM_COMPRESSED = 0x100 STREAM_FORCE_COMPRESSED = 0x8100 STREAM_TOC = 0x0051 class LrfStreamBase: def __init__(self, streamFlags, streamData=None): self.streamFlags = streamFlags self.streamData = streamData def setStreamData(self, streamData): self.streamData = streamData def getStreamTags(self, optimize=False): # tags: # StreamFlags # StreamSize # StreamStart # (data) # StreamEnd # # if flags & 0x200, stream is scrambled # if flags & 0x100, stream is compressed flags = self.streamFlags streamBuffer = self.streamData # implement scramble? I never scramble anything... if flags & STREAM_FORCE_COMPRESSED == STREAM_FORCE_COMPRESSED: optimize = False if flags & STREAM_COMPRESSED == STREAM_COMPRESSED: uncompLen = len(streamBuffer) compStreamBuffer = zlib.compress(streamBuffer) if optimize and uncompLen <= len(compStreamBuffer) + 4: flags &= ~STREAM_COMPRESSED else: streamBuffer = struct.pack("<I", uncompLen) + compStreamBuffer return [LrfTag("StreamFlags", flags & 0x01FF), LrfTag("StreamSize", len(streamBuffer)), LrfTag("StreamData", streamBuffer), LrfTag("StreamEnd")] class LrfTagStream(LrfStreamBase): def __init__(self, streamFlags, streamTags=None): LrfStreamBase.__init__(self, streamFlags) if streamTags is None: self.tags = [] else: self.tags = streamTags[:] def appendLrfTag(self, tag): self.tags.append(tag) def getStreamTags(self, encoding, optimizeTags=False, optimizeCompression=False): stream = io.BytesIO() if optimizeTags: tagListOptimizer(self.tags) for tag in self.tags: tag.write(stream, encoding) self.streamData = stream.getvalue() stream.close() return LrfStreamBase.getStreamTags(self, optimize=optimizeCompression) class LrfFileStream(LrfStreamBase): def __init__(self, streamFlags, filename): LrfStreamBase.__init__(self, streamFlags) with open(filename, "rb") as f: self.streamData = f.read() class LrfObject: def __init__(self, name, objId): if objId <= 0: raise LrfError("invalid objId for " + name) self.name = name self.objId = objId self.tags = [] try: self.type = OBJECT_TYPE_ENCODING[name] except KeyError: raise LrfError("object name %s not recognized" % name) def __str__(self): return 'LRFObject: ' + self.name + ", " + str(self.objId) def appendLrfTag(self, tag): self.tags.append(tag) def appendLrfTags(self, tagList): self.tags.extend(tagList) # deprecated old name append = appendLrfTag def appendTagDict(self, tagDict, genClass=None): # # This code does not really belong here, I think. But it # belongs somewhere, so here it is. # composites = {} for name, value in iteritems(tagDict): if name == 'rubyAlignAndAdjust': continue if name in { "bgimagemode", "bgimageid", "rubyalign", "rubyadjust", "empdotscode", "empdotsfontname", "refempdotsfont"}: composites[name] = value else: self.append(LrfTag(name, value)) if "rubyalign" in composites or "rubyadjust" in composites: ralign = composites.get("rubyalign", "none") radjust = composites.get("rubyadjust", "start") self.append(LrfTag("rubyaa", (ralign, radjust))) if "bgimagemode" in composites or "bgimageid" in composites: imode = composites.get("bgimagemode", "fix") iid = composites.get("bgimageid", 0) # for some reason, page style uses 0 for "fix" # we call this pfix to differentiate it if genClass == "PageStyle" and imode == "fix": imode = "pfix" self.append(LrfTag("bgimage", (imode, iid))) if "empdotscode" in composites or "empdotsfontname" in composites or \ "refempdotsfont" in composites: dotscode = composites.get("empdotscode", "0x002E") dotsfontname = composites.get("empdotsfontname", "Dutch801 Rm BT Roman") refdotsfont = composites.get("refempdotsfont", 0) self.append(LrfTag("empdots", (refdotsfont, dotsfontname, dotscode))) def write(self, lrf, encoding=None): # print "Writing object", self.name LrfTag("ObjectStart", (self.objId, self.type)).write(lrf) for tag in self.tags: tag.write(lrf, encoding) LrfTag("ObjectEnd").write(lrf) class LrfToc(LrfObject): """ Table of contents. Format of toc is: [ (pageid, objid, string)...] """ def __init__(self, objId, toc, se): LrfObject.__init__(self, "TOC", objId) streamData = self._makeTocStream(toc, se) self._makeStreamTags(streamData) def _makeStreamTags(self, streamData): stream = LrfStreamBase(STREAM_TOC, streamData) self.tags.extend(stream.getStreamTags()) def _makeTocStream(self, toc, se): stream = io.BytesIO() nEntries = len(toc) writeDWord(stream, nEntries) lastOffset = 0 writeDWord(stream, lastOffset) for i in range(nEntries - 1): pageId, objId, label = toc[i] entryLen = 4 + 4 + 2 + len(label)*2 lastOffset += entryLen writeDWord(stream, lastOffset) for entry in toc: pageId, objId, label = entry if pageId <= 0: raise LrfError("page id invalid in toc: " + label) if objId <= 0: raise LrfError("textblock id invalid in toc: " + label) writeDWord(stream, pageId) writeDWord(stream, objId) writeUnicode(stream, label, se) streamData = stream.getvalue() stream.close() return streamData class LrfWriter: def __init__(self, sourceEncoding): self.sourceEncoding = sourceEncoding # The following flags are just to have a place to remember these # values. The flags must still be passed to the appropriate classes # in order to have them work. self.saveStreamTags = False # used only in testing -- hogs memory # highly experimental -- set to True at your own risk self.optimizeTags = False self.optimizeCompression = False # End of placeholders self.rootObjId = 0 self.rootObj = None self.binding = 1 # 1=front to back, 16=back to front self.dpi = 1600 self.width = 600 self.height = 800 self.colorDepth = 24 self.tocObjId = 0 self.docInfoXml = "" self.thumbnailEncoding = "JPEG" self.thumbnailData = b"" self.objects = [] self.objectTable = [] def getSourceEncoding(self): return self.sourceEncoding def toUnicode(self, string): if isinstance(string, bytes): string = string.decode(self.sourceEncoding) return string def getDocInfoXml(self): return self.docInfoXml def setPageTreeId(self, objId): self.pageTreeId = objId def getPageTreeId(self): return self.pageTreeId def setRootObject(self, obj): if self.rootObjId != 0: raise LrfError("root object already set") self.rootObjId = obj.objId self.rootObj = obj def registerFontId(self, id): if self.rootObj is None: raise LrfError("can't register font -- no root object") self.rootObj.append(LrfTag("RegisterFont", id)) def setTocObject(self, obj): if self.tocObjId != 0: raise LrfError("toc object already set") self.tocObjId = obj.objId def setThumbnailFile(self, filename, encoding=None): with open(filename, "rb") as f: self.thumbnailData = f.read() if encoding is None: encoding = os.path.splitext(filename)[1][1:] encoding = encoding.upper() if encoding not in IMAGE_TYPE_ENCODING: raise LrfError("unknown image type: " + encoding) self.thumbnailEncoding = encoding def append(self, obj): self.objects.append(obj) def addLrfObject(self, objId): pass def writeFile(self, lrf): if self.rootObjId == 0: raise LrfError("no root object has been set") self.writeHeader(lrf) self.writeObjects(lrf) self.updateObjectTableOffset(lrf) self.updateTocObjectOffset(lrf) self.writeObjectTable(lrf) def writeHeader(self, lrf): writeString(lrf, LRF_SIGNATURE) writeWord(lrf, LRF_VERSION) writeWord(lrf, XOR_KEY) writeDWord(lrf, self.rootObjId) writeQWord(lrf, len(self.objects)) writeQWord(lrf, 0) # 0x18 objectTableOffset -- will be updated writeZeros(lrf, 4) # 0x20 unknown writeWord(lrf, self.binding) writeDWord(lrf, self.dpi) writeWords(lrf, self.width, self.height, self.colorDepth) writeZeros(lrf, 20) # 0x30 unknown writeDWord(lrf, self.tocObjId) writeDWord(lrf, 0) # 0x48 tocObjectOffset -- will be updated docInfoXml = codecs.BOM_UTF8 + self.docInfoXml.encode("utf-8") compDocInfo = zlib.compress(docInfoXml) writeWord(lrf, len(compDocInfo) + 4) writeWord(lrf, IMAGE_TYPE_ENCODING[self.thumbnailEncoding]) writeDWord(lrf, len(self.thumbnailData)) writeDWord(lrf, len(docInfoXml)) writeString(lrf, compDocInfo) writeString(lrf, self.thumbnailData) def writeObjects(self, lrf): # also appends object entries to the object table self.objectTable = [] for obj in self.objects: objStart = lrf.tell() obj.write(lrf, self.sourceEncoding) objEnd = lrf.tell() self.objectTable.append( ObjectTableEntry(obj.objId, objStart, objEnd-objStart)) def updateObjectTableOffset(self, lrf): # update the offset of the object table tableOffset = lrf.tell() lrf.seek(0x18, 0) writeQWord(lrf, tableOffset) lrf.seek(0, 2) def updateTocObjectOffset(self, lrf): if self.tocObjId == 0: return for entry in self.objectTable: if entry.objId == self.tocObjId: lrf.seek(0x48, 0) writeDWord(lrf, entry.offset) lrf.seek(0, 2) break else: raise LrfError("toc object not in object table") def writeObjectTable(self, lrf): for tableEntry in self.objectTable: tableEntry.write(lrf)
24,069
Python
.py
619
30.476575
80
0.62288
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,453
pylrfopt.py
kovidgoyal_calibre/src/calibre/ebooks/lrf/pylrs/pylrfopt.py
def _optimize(tagList, tagName, conversion): # copy the tag of interest plus any text newTagList = [] for tag in tagList: if tag.name == tagName or tag.name == "rawtext": newTagList.append(tag) # now, eliminate any duplicates (leaving the last one) for i, newTag in enumerate(newTagList[:-1]): if newTag.name == tagName and newTagList[i+1].name == tagName: tagList.remove(newTag) # eliminate redundant settings to same value across text strings newTagList = [] for tag in tagList: if tag.name == tagName: newTagList.append(tag) for i, newTag in enumerate(newTagList[:-1]): value = conversion(newTag.parameter) nextValue = conversion(newTagList[i+1].parameter) if value == nextValue: tagList.remove(newTagList[i+1]) # eliminate any setting that don't have text after them while len(tagList) > 0 and tagList[-1].name == tagName: del tagList[-1] def tagListOptimizer(tagList): # this function eliminates redundant or unnecessary tags # it scans a list of tags, looking for text settings that are # changed before any text is output # for example, # fontsize=100, fontsize=200, text, fontsize=100, fontsize=200 # should be: # fontsize=200 text oldSize = len(tagList) _optimize(tagList, "fontsize", int) _optimize(tagList, "fontweight", int) return oldSize - len(tagList)
1,466
Python
.py
35
35.228571
70
0.669474
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,454
htmls.py
kovidgoyal_calibre/src/calibre/ebooks/readability/htmls.py
import re import lxml.html from lxml.html import tostring from calibre.ebooks.chardet import xml_to_unicode from calibre.ebooks.readability.cleaners import clean_attributes, normalize_spaces from polyglot.builtins import iteritems def build_doc(page): page_unicode = xml_to_unicode(page, strip_encoding_pats=True)[0] doc = lxml.html.document_fromstring(page_unicode) return doc def js_re(src, pattern, flags, repl): return re.compile(pattern, flags).sub(src, repl.replace('$', '\\')) def normalize_entities(cur_title): entities = { '\u2014':'-', '\u2013':'-', '&mdash;': '-', '&ndash;': '-', '\u00A0': ' ', '\u00AB': '"', '\u00BB': '"', '&quot;': '"', } for c, r in iteritems(entities): if c in cur_title: cur_title = cur_title.replace(c, r) return cur_title def norm_title(title): return normalize_entities(normalize_spaces(title)) def get_title(doc): try: title = doc.find('.//title').text except AttributeError: title = None if not title: return '[no-title]' return norm_title(title) def add_match(collection, text, orig): text = norm_title(text) if len(text.split()) >= 2 and len(text) >= 15: if text.replace('"', '') in orig.replace('"', ''): collection.add(text) def shorten_title(doc): title = doc.find('.//title').text if not title: return '' title = orig = norm_title(title) candidates = set() for item in ['.//h1', './/h2', './/h3']: for e in list(doc.iterfind(item)): if e.text: add_match(candidates, e.text, orig) if e.text_content(): add_match(candidates, e.text_content(), orig) for item in [ "descendant-or-self::*[@id = 'title']", "descendant-or-self::*[@id = 'head']", "descendant-or-self::*[@id = 'heading']", "descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), ' pageTitle ')]", "descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), ' news_title ')]", "descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), ' title ')]", "descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), ' head ')]", "descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), ' heading ')]", "descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), ' contentheading ')]", "descendant-or-self::*[@class and contains(concat(' ', normalize-space(@class), ' '), ' small_header_red ')]" ]: for e in doc.xpath(item): if e.text: add_match(candidates, e.text, orig) if e.text_content(): add_match(candidates, e.text_content(), orig) if candidates: title = sorted(candidates, key=len)[-1] else: for delimiter in [' | ', ' - ', ' :: ', ' / ']: if delimiter in title: parts = orig.split(delimiter) if len(parts[0].split()) >= 4: title = parts[0] break elif len(parts[-1].split()) >= 4: title = parts[-1] break else: if ': ' in title: parts = orig.split(': ') if len(parts[-1].split()) >= 4: title = parts[-1] else: title = orig.split(': ', 1)[1] if not 15 < len(title) < 150: return orig return title def get_body(doc): [elem.drop_tree() for elem in doc.xpath('.//script | .//link | .//style')] raw_html = str(tostring(doc.body or doc)) return clean_attributes(raw_html)
3,934
Python
.py
97
31.319588
121
0.542257
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,455
debug.py
kovidgoyal_calibre/src/calibre/ebooks/readability/debug.py
def save_to_file(text, filename): with open(filename, 'wb') as f: f.write(b'<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />') f.write(text.encode('utf-8')) uids = {} def describe(node, depth=2): if not hasattr(node, 'tag'): return "[%s]" % type(node) name = node.tag if node.get('id', ''): name += '#'+node.get('id') if node.get('class', ''): name += '.' + node.get('class').replace(' ','.') if name[:4] in ['div#', 'div.']: name = name[3:] if name in ['tr', 'td', 'div', 'p']: if node not in uids: uid = uids[node] = len(uids)+1 else: uid = uids.get(node) name += "%02d" % (uid) if depth and node.getparent() is not None: return name+' - '+describe(node.getparent(), depth-1) return name
855
Python
.py
24
28.625
89
0.525998
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,456
readability.py
kovidgoyal_calibre/src/calibre/ebooks/readability/readability.py
#!/usr/bin/env python import re import sys from collections import defaultdict from lxml.html import document_fromstring, fragment_fromstring from lxml.html import tostring as htostring from calibre.ebooks.readability.cleaners import clean_attributes, html_cleaner from calibre.ebooks.readability.htmls import build_doc, get_body, get_title, shorten_title from polyglot.builtins import reraise def tounicode(tree_or_node, **kwargs): kwargs['encoding'] = str return htostring(tree_or_node, **kwargs) REGEXES = { 'unlikelyCandidatesRe': re.compile('combx|comment|community|disqus|extra|foot|header|menu|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|popup|tweet|twitter',re.I), # noqa 'okMaybeItsACandidateRe': re.compile('and|article|body|column|main|shadow',re.I), 'positiveRe': re.compile('article|body|content|entry|hentry|main|page|pagination|post|text|blog|story',re.I), 'negativeRe': re.compile('combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|widget',re.I), # noqa 'divToPElementsRe': re.compile('<(a|blockquote|dl|div|img|ol|p|pre|table|ul)',re.I), # 'replaceBrsRe': re.compile('(<br[^>]*>[ \n\r\t]*){2,}',re.I), # 'replaceFontsRe': re.compile('<(\/?)font[^>]*>',re.I), # 'trimRe': re.compile('^\s+|\s+$/'), # 'normalizeRe': re.compile('\s{2,}/'), # 'killBreaksRe': re.compile('(<br\s*\/?>(\s|&nbsp;?)*){1,}/'), # 'videoRe': re.compile('http:\/\/(www\.)?(youtube|vimeo)\.com', re.I), # skipFootnoteLink: /^\s*(\[?[a-z0-9]{1,2}\]?|^|edit|citation needed)\s*$/i, } def describe(node, depth=1): if not hasattr(node, 'tag'): return "[%s]" % type(node) name = node.tag if node.get('id', ''): name += '#'+node.get('id') if node.get('class', ''): name += '.' + node.get('class').replace(' ','.') if name[:4] in ['div#', 'div.']: name = name[3:] if depth and node.getparent() is not None: return name+' - '+describe(node.getparent(), depth-1) return name def to_int(x): if not x: return None x = x.strip() if x.endswith('px'): return int(x[:-2]) if x.endswith('em'): return int(x[:-2]) * 12 return int(x) def clean(text): text = re.sub('\\s*\n\\s*', '\n', text) text = re.sub('[ \t]{2,}', ' ', text) return text.strip() def text_length(i): return len(clean(i.text_content() or "")) class Unparsable(ValueError): pass class Document: TEXT_LENGTH_THRESHOLD = 25 RETRY_LENGTH = 250 def __init__(self, input, log, **options): self.input = input self.options = defaultdict(lambda: None) for k, v in options.items(): self.options[k] = v self.html = None self.log = log self.keep_elements = set() def _html(self, force=False): if force or self.html is None: self.html = self._parse(self.input) path = self.options['keep_elements'] if path is not None: self.keep_elements = set(self.html.xpath(path)) return self.html def _parse(self, input): doc = build_doc(input) doc = html_cleaner.clean_html(doc) base_href = self.options['url'] if base_href: doc.make_links_absolute(base_href, resolve_base_href=True) else: doc.resolve_base_href() return doc def content(self): return get_body(self._html(True)) def title(self): return get_title(self._html(True)) def short_title(self): return shorten_title(self._html(True)) def summary(self): try: ruthless = True while True: self._html(True) for i in self.tags(self.html, 'script', 'style'): i.drop_tree() for i in self.tags(self.html, 'body'): i.set('id', 'readabilityBody') if ruthless: self.remove_unlikely_candidates() self.transform_misused_divs_into_paragraphs() candidates = self.score_paragraphs() best_candidate = self.select_best_candidate(candidates) if best_candidate: article = self.get_article(candidates, best_candidate) else: if ruthless: self.log.debug("ruthless removal did not work. ") ruthless = False self.debug("ended up stripping too much - going for a safer _parse") # try again continue else: self.log.debug("Ruthless and lenient parsing did not work. Returning raw html") article = self.html.find('body') if article is None: article = self.html cleaned_article = self.sanitize(article, candidates) of_acceptable_length = len(cleaned_article or '') >= (self.options['retry_length'] or self.RETRY_LENGTH) if ruthless and not of_acceptable_length: ruthless = False continue # try again else: return cleaned_article except Exception as e: self.log.exception('error getting summary: ') reraise(Unparsable, Unparsable(str(e)), sys.exc_info()[2]) def get_article(self, candidates, best_candidate): # Now that we have the top candidate, look through its siblings for content that might also be related. # Things like preambles, content split by ads that we removed, etc. sibling_score_threshold = max([10, best_candidate['content_score'] * 0.2]) output = document_fromstring('<div/>') parent = output.xpath('//div')[0] best_elem = best_candidate['elem'] for sibling in best_elem.getparent().getchildren(): # if isinstance(sibling, NavigableString): continue#in lxml there no concept of simple text append = False if sibling is best_elem: append = True if sibling in candidates and candidates[sibling]['content_score'] >= sibling_score_threshold: append = True if sibling in self.keep_elements: append = True if sibling.tag == "p": link_density = self.get_link_density(sibling) node_content = sibling.text or "" node_length = len(node_content) if node_length > 80 and link_density < 0.25: append = True elif node_length < 80 and link_density == 0 and re.search(r'\.( |$)', node_content): append = True if append: parent.append(sibling) # if output is not None: # output.append(best_elem) return output.find('body') def select_best_candidate(self, candidates): sorted_candidates = sorted(candidates.values(), key=lambda x: x['content_score'], reverse=True) for candidate in sorted_candidates[:5]: elem = candidate['elem'] self.debug("Top 5 : {:6.3f} {}".format(candidate['content_score'], describe(elem))) if len(sorted_candidates) == 0: return None best_candidate = sorted_candidates[0] return best_candidate def get_link_density(self, elem): link_length = 0 for i in elem.findall(".//a"): link_length += text_length(i) # if len(elem.findall(".//div") or elem.findall(".//p")): # link_length = link_length total_length = text_length(elem) return float(link_length) / max(total_length, 1) def score_paragraphs(self, ): MIN_LEN = self.options.get('min_text_length', self.TEXT_LENGTH_THRESHOLD) candidates = {} # self.debug(str([describe(node) for node in self.tags(self.html, "div")])) ordered = [] for elem in self.tags(self.html, "p", "pre", "td"): parent_node = elem.getparent() if parent_node is None: continue grand_parent_node = parent_node.getparent() inner_text = clean(elem.text_content() or "") inner_text_len = len(inner_text) # If this paragraph is less than 25 characters, don't even count it. if inner_text_len < MIN_LEN: continue if parent_node not in candidates: candidates[parent_node] = self.score_node(parent_node) ordered.append(parent_node) if grand_parent_node is not None and grand_parent_node not in candidates: candidates[grand_parent_node] = self.score_node(grand_parent_node) ordered.append(grand_parent_node) content_score = 1 content_score += len(inner_text.split(',')) content_score += min((inner_text_len / 100), 3) # if elem not in candidates: # candidates[elem] = self.score_node(elem) # WTF? candidates[elem]['content_score'] += content_score candidates[parent_node]['content_score'] += content_score if grand_parent_node is not None: candidates[grand_parent_node]['content_score'] += content_score / 2.0 # Scale the final candidates score based on link density. Good content should have a # relatively small link density (5% or less) and be mostly unaffected by this operation. for elem in ordered: candidate = candidates[elem] ld = self.get_link_density(elem) score = candidate['content_score'] self.debug(f"Candid: {score:6.3f} {describe(elem)} link density {ld:.3f} -> {score*(1-ld):6.3f}") candidate['content_score'] *= (1 - ld) return candidates def class_weight(self, e): weight = 0 if e.get('class', None): if REGEXES['negativeRe'].search(e.get('class')): weight -= 25 if REGEXES['positiveRe'].search(e.get('class')): weight += 25 if e.get('id', None): if REGEXES['negativeRe'].search(e.get('id')): weight -= 25 if REGEXES['positiveRe'].search(e.get('id')): weight += 25 return weight def score_node(self, elem): content_score = self.class_weight(elem) name = elem.tag.lower() if name == "div": content_score += 5 elif name in ["pre", "td", "blockquote"]: content_score += 3 elif name in ["address", "ol", "ul", "dl", "dd", "dt", "li", "form"]: content_score -= 3 elif name in ["h1", "h2", "h3", "h4", "h5", "h6", "th"]: content_score -= 5 return { 'content_score': content_score, 'elem': elem } def debug(self, *a): # if self.options['debug']: self.log.debug(*a) def remove_unlikely_candidates(self): for elem in self.html.iter(): if elem in self.keep_elements: continue s = "{} {}".format(elem.get('class', ''), elem.get('id', '')) # self.debug(s) if REGEXES['unlikelyCandidatesRe'].search(s) and (not REGEXES['okMaybeItsACandidateRe'].search(s)) and elem.tag != 'body': self.debug("Removing unlikely candidate - %s" % describe(elem)) elem.drop_tree() def transform_misused_divs_into_paragraphs(self): for elem in self.tags(self.html, 'div'): # transform <div>s that do not contain other block elements into <p>s if not REGEXES['divToPElementsRe'].search(str(''.join(map(tounicode, list(elem))))): # self.debug("Altering %s to p" % (describe(elem))) elem.tag = "p" # print("Fixed element "+describe(elem)) for elem in self.tags(self.html, 'div'): if elem.text and elem.text.strip(): p = fragment_fromstring('<p/>') p.text = elem.text elem.text = None elem.insert(0, p) # print("Appended "+tounicode(p)+" to "+describe(elem)) for pos, child in reversed(list(enumerate(elem))): if child.tail and child.tail.strip(): p = fragment_fromstring('<p/>') p.text = child.tail child.tail = None elem.insert(pos + 1, p) # print("Inserted "+tounicode(p)+" to "+describe(elem)) if child.tag == 'br': # print('Dropped <br> at '+describe(elem)) child.drop_tree() def tags(self, node, *tag_names): for tag_name in tag_names: yield from node.findall('.//%s' % tag_name) def reverse_tags(self, node, *tag_names): for tag_name in tag_names: yield from reversed(node.findall('.//%s' % tag_name)) def sanitize(self, node, candidates): MIN_LEN = self.options.get('min_text_length', self.TEXT_LENGTH_THRESHOLD) for header in self.tags(node, "h1", "h2", "h3", "h4", "h5", "h6"): if self.class_weight(header) < 0 or self.get_link_density(header) > 0.33: header.drop_tree() for elem in self.tags(node, "form", "iframe", "textarea"): elem.drop_tree() allowed = {} # Conditionally clean <table>s, <ul>s, and <div>s for el in self.reverse_tags(node, "table", "ul", "div"): if el in allowed or el in self.keep_elements: continue weight = self.class_weight(el) if el in candidates: content_score = candidates[el]['content_score'] # print('!',el, '-> %6.3f' % content_score) else: content_score = 0 tag = el.tag if weight + content_score < 0: self.debug("Cleaned %s with score %6.3f and weight %-3s" % (describe(el), content_score, weight, )) el.drop_tree() elif el.text_content().count(",") < 10: counts = {} for kind in ['p', 'img', 'li', 'a', 'embed', 'input']: counts[kind] = len(el.findall('.//%s' %kind)) counts["li"] -= 100 content_length = text_length(el) # Count the text length excluding any surrounding whitespace link_density = self.get_link_density(el) parent_node = el.getparent() if parent_node is not None: if parent_node in candidates: content_score = candidates[parent_node]['content_score'] else: content_score = 0 # if parent_node is not None: # pweight = self.class_weight(parent_node) + content_score # pname = describe(parent_node) # else: # pweight = 0 # pname = "no parent" to_remove = False reason = "" # if el.tag == 'div' and counts["img"] >= 1: # continue if counts["p"] and counts["img"] > counts["p"]: reason = "too many images (%s)" % counts["img"] to_remove = True elif counts["li"] > counts["p"] and tag != "ul" and tag != "ol": reason = "more <li>s than <p>s" to_remove = True elif counts["input"] > (counts["p"] / 3): reason = "less than 3x <p>s than <input>s" to_remove = True elif content_length < (MIN_LEN) and (counts["img"] == 0 or counts["img"] > 2): reason = "too short content length %s without a single image" % content_length to_remove = True elif weight < 25 and link_density > 0.2: reason = f"too many links {link_density:.3f} for its weight {weight}" to_remove = True elif weight >= 25 and link_density > 0.5: reason = f"too many links {link_density:.3f} for its weight {weight}" to_remove = True elif (counts["embed"] == 1 and content_length < 75) or counts["embed"] > 1: reason = "<embed>s with too short content length, or too many <embed>s" to_remove = True # if el.tag == 'div' and counts['img'] >= 1 and to_remove: # imgs = el.findall('.//img') # valid_img = False # self.debug(tounicode(el)) # for img in imgs: # # height = img.get('height') # text_length = img.get('text_length') # self.debug ("height %s text_length %s" %(repr(height), repr(text_length))) # if to_int(height) >= 100 or to_int(text_length) >= 100: # valid_img = True # self.debug("valid image" + tounicode(img)) # break # if valid_img: # to_remove = False # self.debug("Allowing %s" %el.text_content()) # for desnode in self.tags(el, "table", "ul", "div"): # allowed[desnode] = True # find x non empty preceding and succeeding siblings i, j = 0, 0 x = 1 siblings = [] for sib in el.itersiblings(): # self.debug(sib.text_content()) sib_content_length = text_length(sib) if sib_content_length: i += 1 siblings.append(sib_content_length) if i == x: break for sib in el.itersiblings(preceding=True): # self.debug(sib.text_content()) sib_content_length = text_length(sib) if sib_content_length: j =+ 1 siblings.append(sib_content_length) if j == x: break # self.debug(str(siblings)) if siblings and sum(siblings) > 1000 : to_remove = False self.debug("Allowing %s" % describe(el)) for desnode in self.tags(el, "table", "ul", "div"): allowed[desnode] = True if to_remove: self.debug("Cleaned %6.3f %s with weight %s cause it has %s." % (content_score, describe(el), weight, reason)) # print(tounicode(el)) # self.debug("pname %s pweight %.3f" %(pname, pweight)) el.drop_tree() return clean_attributes(tounicode(node)) def option_parser(): from calibre.utils.config import OptionParser parser = OptionParser(usage='%prog: [options] file') parser.add_option('-v', '--verbose', default=False, action='store_true', dest='verbose', help='Show detailed output information. Useful for debugging') parser.add_option('-k', '--keep-elements', default=None, action='store', dest='keep_elements', help='XPath specifying elements that should not be removed') return parser def main(): from calibre.utils.logging import default_log parser = option_parser() options, args = parser.parse_args() if len(args) != 1: parser.print_help() raise SystemExit(1) with open(args[0], 'rb') as f: raw = f.read() enc = sys.__stdout__.encoding or 'utf-8' if options.verbose: default_log.filter_level = default_log.DEBUG print(Document(raw, default_log, debug=options.verbose, keep_elements=options.keep_elements).summary().encode(enc, 'replace')) if __name__ == '__main__': main()
20,623
Python
.py
428
35.5
199
0.531868
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,457
cleaners.py
kovidgoyal_calibre/src/calibre/ebooks/readability/cleaners.py
# strip out a set of nuisance html attributes that can mess up rendering in RSS feeds import re try: from lxml_html_clean import Cleaner except ImportError: from lxml.html.clean import Cleaner bad_attrs = ['width', 'height', 'style', '[-a-z]*color', 'background[-a-z]*', 'on*'] single_quoted = "'[^']+'" double_quoted = '"[^"]+"' non_space = '[^ "\'>]+' htmlstrip = re.compile("<" # open "([^>]+) " # prefix "(?:%s) *" % ('|'.join(bad_attrs),) + # undesirable attributes f'= *(?:{non_space}|{single_quoted}|{double_quoted})' + # value "([^>]*)" # postfix ">" # end , re.I) def clean_attributes(html): while htmlstrip.search(html): html = htmlstrip.sub(r'<\1\2>', html) return html def normalize_spaces(s): if not s: return '' """replace any sequence of whitespace characters with a single space""" return ' '.join(s.split()) html_cleaner = Cleaner(scripts=True, javascript=True, comments=True, style=True, links=True, meta=False, add_nofollow=False, page_structure=False, processing_instructions=True, embedded=False, frames=False, forms=False, annoying_tags=False, remove_tags=None, remove_unknown_tags=False, safe_attrs_only=False)
1,294
Python
.py
32
34.8125
85
0.618022
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,458
tweak.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/tweak.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import glob import os from calibre import CurrentDir from calibre.customize.ui import plugin_for_input_format, plugin_for_output_format from calibre.ebooks import DRMError from calibre.ebooks.conversion.plumber import Plumber, create_oebbook from calibre.ebooks.mobi import MobiError from calibre.ebooks.mobi.reader.headers import MetadataHeader from calibre.ebooks.mobi.reader.mobi6 import MobiReader from calibre.ebooks.mobi.reader.mobi8 import Mobi8Reader from calibre.utils.ipc.simple_worker import fork_job from calibre.utils.logging import default_log class BadFormat(ValueError): pass def do_explode(path, dest): with open(path, 'rb') as stream: mr = MobiReader(stream, default_log, None, None) with CurrentDir(dest): mr = Mobi8Reader(mr, default_log) opf = os.path.abspath(mr()) try: os.remove('debug-raw.html') except: pass return opf def explode(path, dest, question=lambda x:True): with open(path, 'rb') as stream: raw = stream.read(3) stream.seek(0) if raw == b'TPZ': raise BadFormat(_('This is not a MOBI file. It is a Topaz file.')) try: header = MetadataHeader(stream, default_log) except MobiError: raise BadFormat(_('This is not a MOBI file.')) if header.encryption_type != 0: raise DRMError(_('This file is locked with DRM. It cannot be tweaked.')) kf8_type = header.kf8_type if kf8_type is None: raise BadFormat(_('This MOBI file does not contain a KF8 format ' 'book. KF8 is the new format from Amazon. calibre can ' 'only tweak MOBI files that contain KF8 books. Older ' 'MOBI files without KF8 are not tweakable.')) if kf8_type == 'joint': if not question(_('This MOBI file contains both KF8 and ' 'older Mobi6 data. Tweaking it will remove the Mobi6 data, which ' 'means the file will not be usable on older Kindles. Are you ' 'sure?')): return None return fork_job('calibre.ebooks.mobi.tweak', 'do_explode', args=(path, dest), no_output=True)['result'] def set_cover(oeb): if 'cover' not in oeb.guide or oeb.metadata['cover']: return cover = oeb.guide['cover'] if cover.href in oeb.manifest.hrefs: item = oeb.manifest.hrefs[cover.href] oeb.metadata.clear('cover') oeb.metadata.add('cover', item.id) def do_rebuild(opf, dest_path): plumber = Plumber(opf, dest_path, default_log) plumber.setup_options() inp = plugin_for_input_format('azw3') outp = plugin_for_output_format('azw3') plumber.opts.mobi_passthrough = True oeb = create_oebbook(default_log, opf, plumber.opts) set_cover(oeb) outp.convert(oeb, dest_path, inp, plumber.opts, default_log) def rebuild(src_dir, dest_path): opf = glob.glob(os.path.join(src_dir, '*.opf')) if not opf: raise ValueError('No OPF file found in %s'%src_dir) opf = opf[0] # For debugging, uncomment the following two lines # def fork_job(a, b, args=None, no_output=True): # do_rebuild(*args) fork_job('calibre.ebooks.mobi.tweak', 'do_rebuild', args=(opf, dest_path), no_output=True)
3,532
Python
.py
82
35.426829
84
0.649051
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,459
mobiml.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/mobiml.py
''' Transform XHTML/OPS-ish content into Mobipocket HTML 3.2. ''' __license__ = 'GPL v3' __copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.cam>' import copy import numbers import re from contextlib import suppress from lxml import etree from calibre.ebooks.mobi.utils import convert_color_for_font_tag from calibre.ebooks.oeb.base import XHTML, XHTML_NS, barename, namespace, urlnormalize from calibre.ebooks.oeb.stylizer import Stylizer from calibre.ebooks.oeb.transforms.flatcss import KeyMapper from calibre.utils.imghdr import identify from polyglot.builtins import string_or_bytes MBP_NS = 'http://mobipocket.com/ns/mbp' def MBP(name): return f'{{{MBP_NS}}}{name}' MOBI_NSMAP = {None: XHTML_NS, 'mbp': MBP_NS} INLINE_TAGS = {'span', 'a', 'code', 'u', 's', 'big', 'strike', 'tt', 'font', 'q', 'i', 'b', 'em', 'strong', 'sup', 'sub'} HEADER_TAGS = {'h1', 'h2', 'h3', 'h4', 'h5', 'h6'} # GR: Added 'caption' to both sets NESTABLE_TAGS = {'ol', 'ul', 'li', 'table', 'tr', 'td', 'th', 'caption'} TABLE_TAGS = {'table', 'tr', 'td', 'th', 'caption'} SPECIAL_TAGS = {'hr', 'br'} CONTENT_TAGS = {'img', 'hr', 'br'} NOT_VTAGS = HEADER_TAGS | NESTABLE_TAGS | TABLE_TAGS | SPECIAL_TAGS | \ CONTENT_TAGS LEAF_TAGS = {'base', 'basefont', 'frame', 'link', 'meta', 'area', 'br', 'col', 'hr', 'img', 'input', 'param'} PAGE_BREAKS = {'always', 'left', 'right'} COLLAPSE = re.compile(r'[ \t\r\n\v]+') def asfloat(value): if not isinstance(value, numbers.Number): return 0.0 return float(value) def convert_margin(style, which): # percentage values come out too large when the user uses a non kindle # output profile like the tablet profile ans = asfloat(style[which]) raw = style._get(which) if isinstance(raw, str) and '%' in raw: with suppress(TypeError): ans = min(style._unit_convert(raw, base=600), ans) return ans def isspace(text): if not text: return True if '\xa0' in text: return False return text.isspace() class BlockState: def __init__(self, body): self.body = body self.nested = [] self.para = None self.inline = None self.anchor = None self.vpadding = 0. self.vmargin = 0. self.pbreak = False self.istate = None self.content = False class FormatState: def __init__(self): self.rendered = False self.left = 0. self.halign = 'auto' self.indent = 0. self.fsize = 3 self.ids = set() self.italic = False self.bold = False self.strikethrough = False self.underline = False self.preserve = False self.pre_wrap = False self.family = 'serif' self.bgcolor = 'transparent' self.fgcolor = 'black' self.href = None self.list_num = 0 self.attrib = {} def __eq__(self, other): return self.fsize == other.fsize \ and self.italic == other.italic \ and self.bold == other.bold \ and self.href == other.href \ and self.preserve == other.preserve \ and self.pre_wrap == other.pre_wrap \ and self.family == other.family \ and self.bgcolor == other.bgcolor \ and self.fgcolor == other.fgcolor \ and self.strikethrough == other.strikethrough \ and self.underline == other.underline def __ne__(self, other): return not self.__eq__(other) class MobiMLizer: def __init__(self, ignore_tables=False): self.ignore_tables = ignore_tables def __call__(self, oeb, context): oeb.logger.info('Converting XHTML to Mobipocket markup...') self.oeb = oeb self.log = self.oeb.logger self.opts = context self.profile = profile = context.dest self.fnums = fnums = {v: k for k, v in profile.fnums.items()} self.fmap = KeyMapper(profile.fbase, profile.fbase, fnums.keys()) self.mobimlize_spine() def mobimlize_spine(self): 'Iterate over the spine and convert it to MOBIML' for item in self.oeb.spine: stylizer = Stylizer(item.data, item.href, self.oeb, self.opts, self.profile) body = item.data.find(XHTML('body')) nroot = etree.Element(XHTML('html'), nsmap=MOBI_NSMAP) nbody = etree.SubElement(nroot, XHTML('body')) self.current_spine_item = item self.mobimlize_elem(body, stylizer, BlockState(nbody), [FormatState()]) item.data = nroot # print(etree.tostring(nroot)) def mobimlize_font(self, ptsize): return self.fnums[self.fmap[ptsize]] def mobimlize_measure(self, ptsize): if isinstance(ptsize, string_or_bytes): return ptsize embase = self.profile.fbase if round(ptsize) < embase: return "%dpt" % int(round(ptsize)) return "%dem" % int(round(ptsize / embase)) def preize_text(self, text, pre_wrap=False): text = str(text) if pre_wrap: # Replace n consecutive spaces with n-1 NBSP + space text = re.sub(r' {2,}', lambda m:('\xa0'*(len(m.group())-1) + ' '), text) else: text = text.replace(' ', '\xa0') text = text.replace('\r\n', '\n') text = text.replace('\r', '\n') lines = text.split('\n') result = lines[:1] for line in lines[1:]: result.append(etree.Element(XHTML('br'))) if line: result.append(line) return result def mobimlize_content(self, tag, text, bstate, istates): 'Convert text content' if text or tag != 'br': bstate.content = True istate = istates[-1] para = bstate.para if tag in SPECIAL_TAGS and not text: para = para if para is not None else bstate.body elif para is None or tag in ('td', 'th'): body = bstate.body if bstate.pbreak: etree.SubElement(body, MBP('pagebreak')) bstate.pbreak = False bstate.istate = None bstate.anchor = None parent = bstate.nested[-1] if bstate.nested else bstate.body indent = istate.indent left = istate.left if isinstance(indent, string_or_bytes): indent = 0 if indent < 0 and abs(indent) < left: left += indent indent = 0 elif indent != 0 and abs(indent) < self.profile.fbase: indent = (indent / abs(indent)) * self.profile.fbase if tag in NESTABLE_TAGS and not istate.rendered: para = wrapper = etree.SubElement( parent, XHTML(tag), attrib=istate.attrib) bstate.nested.append(para) if tag == 'li' and len(istates) > 1: istates[-2].list_num += 1 para.attrib['value'] = str(istates[-2].list_num) elif tag in NESTABLE_TAGS and istate.rendered: para = wrapper = bstate.nested[-1] elif not self.opts.mobi_ignore_margins and left > 0 and indent >= 0: ems = self.profile.mobi_ems_per_blockquote para = wrapper = etree.SubElement(parent, XHTML('blockquote')) para = wrapper emleft = int(round(left / self.profile.fbase)) - ems emleft = min((emleft, 10)) while emleft > ems / 2: para = etree.SubElement(para, XHTML('blockquote')) emleft -= ems else: para = wrapper = etree.SubElement(parent, XHTML('p')) bstate.inline = bstate.para = para vspace = bstate.vpadding + bstate.vmargin bstate.vpadding = bstate.vmargin = 0 if tag not in TABLE_TAGS: if tag in ('ul', 'ol') and vspace > 0: wrapper.addprevious(etree.Element(XHTML('div'), height=self.mobimlize_measure(vspace))) else: wrapper.attrib['height'] = self.mobimlize_measure(vspace) para.attrib['width'] = self.mobimlize_measure(indent) elif tag == 'table' and vspace > 0: vspace = int(round(vspace / self.profile.fbase)) while vspace > 0: wrapper.addprevious(etree.Element(XHTML('br'))) vspace -= 1 if istate.halign != 'auto' and isinstance(istate.halign, (bytes, str)): if isinstance(istate.halign, bytes): istate.halign = istate.halign.decode('utf-8') para.attrib['align'] = istate.halign istate.rendered = True pstate = bstate.istate if tag in CONTENT_TAGS: bstate.inline = para pstate = bstate.istate = None try: etree.SubElement(para, XHTML(tag), attrib=istate.attrib) except: print('Invalid subelement:', para, tag, istate.attrib) raise elif tag in TABLE_TAGS: para.attrib['valign'] = 'top' if istate.ids: for id_ in istate.ids: anchor = etree.Element(XHTML('a'), attrib={'id': id_}) if tag == 'li': try: last = bstate.body[-1][-1] except: break last.insert(0, anchor) anchor.tail = last.text last.text = None else: last = bstate.body[-1] # We use append instead of addprevious so that inline # anchors in large blocks point to the correct place. See # https://bugs.launchpad.net/calibre/+bug/899831 # This could potentially break if inserting an anchor at # this point in the markup is illegal, but I cannot think # of such a case offhand. if barename(last.tag) in LEAF_TAGS: last.addprevious(anchor) else: last.append(anchor) istate.ids.clear() if not text: return if not pstate or istate != pstate: inline = para fsize = istate.fsize href = istate.href if not href: bstate.anchor = None elif pstate and pstate.href == href: inline = bstate.anchor else: inline = etree.SubElement(inline, XHTML('a'), href=href) bstate.anchor = inline if fsize != 3: inline = etree.SubElement(inline, XHTML('font'), size=str(fsize)) if istate.family == 'monospace': inline = etree.SubElement(inline, XHTML('tt')) if istate.italic: inline = etree.SubElement(inline, XHTML('i')) if istate.bold: inline = etree.SubElement(inline, XHTML('b')) if istate.bgcolor is not None and istate.bgcolor != 'transparent' : inline = etree.SubElement(inline, XHTML('span'), bgcolor=convert_color_for_font_tag(istate.bgcolor)) if istate.fgcolor != 'black': inline = etree.SubElement(inline, XHTML('font'), color=convert_color_for_font_tag(istate.fgcolor)) if istate.strikethrough: inline = etree.SubElement(inline, XHTML('s')) if istate.underline: inline = etree.SubElement(inline, XHTML('u')) bstate.inline = inline bstate.istate = istate inline = bstate.inline content = self.preize_text(text, pre_wrap=istate.pre_wrap) if istate.preserve or istate.pre_wrap else [text] for item in content: if isinstance(item, string_or_bytes): if len(inline) == 0: inline.text = (inline.text or '') + item else: last = inline[-1] last.tail = (last.tail or '') + item else: inline.append(item) def mobimlize_elem(self, elem, stylizer, bstate, istates, ignore_valign=False): if not isinstance(elem.tag, string_or_bytes) \ or namespace(elem.tag) != XHTML_NS: return style = stylizer.style(elem) # <mbp:frame-set/> does not exist lalalala if ((style['display'] in ('none', 'oeb-page-head', 'oeb-page-foot') or style['visibility'] == 'hidden') and elem.get('data-calibre-jacket-searchable-tags', None) != '1'): id_ = elem.get('id', None) if id_: # Keep anchors so people can use display:none # to generate hidden TOCs tail = elem.tail elem.clear() elem.text = None elem.set('id', id_) elem.tail = tail elem.tag = XHTML('a') else: return tag = barename(elem.tag) istate = copy.copy(istates[-1]) istate.rendered = False istate.list_num = 0 if tag == 'ol' and 'start' in elem.attrib: try: istate.list_num = int(elem.attrib['start'])-1 except: pass istates.append(istate) left = 0 display = style['display'] if display == 'table-cell': display = 'inline' elif display.startswith('table'): display = 'block' isblock = (not display.startswith('inline') and style['display'] != 'none') isblock = isblock and style['float'] == 'none' isblock = isblock and tag != 'br' if isblock: bstate.para = None istate.halign = style['text-align'] rawti = style._get('text-indent') try: istate.indent = style['text-indent'] except Exception: istate.indent = 0 if hasattr(rawti, 'strip') and '%' in rawti: # We have a percentage text indent, these can come out looking # too large if the user chooses a wide output profile like # tablet istate.indent = min(style._unit_convert(rawti, base=500), istate.indent) if style['margin-left'] == 'auto' \ and style['margin-right'] == 'auto': istate.halign = 'center' margin = convert_margin(style, 'margin-left') padding = asfloat(style['padding-left']) if tag != 'body': left = margin + padding istate.left += left vmargin = convert_margin(style, 'margin-top') bstate.vmargin = max((bstate.vmargin, vmargin)) vpadding = asfloat(style['padding-top']) if vpadding > 0: bstate.vpadding += bstate.vmargin bstate.vmargin = 0 bstate.vpadding += vpadding elif not istate.href: margin = convert_margin(style, 'margin-left') padding = asfloat(style['padding-left']) lspace = margin + padding if lspace > 0: spaces = int(round((lspace * 3) / style['font-size'])) elem.text = ('\xa0' * spaces) + (elem.text or '') margin = convert_margin(style, 'margin-right') padding = asfloat(style['padding-right']) rspace = margin + padding if rspace > 0: spaces = int(round((rspace * 3) / style['font-size'])) if len(elem) == 0: elem.text = (elem.text or '') + ('\xa0' * spaces) else: last = elem[-1] last.text = (last.text or '') + ('\xa0' * spaces) if bstate.content and style['page-break-before'] in PAGE_BREAKS: bstate.pbreak = True istate.fsize = self.mobimlize_font(style['font-size']) istate.italic = True if style['font-style'] == 'italic' else False weight = style['font-weight'] istate.bold = weight in ('bold', 'bolder') or asfloat(weight) > 400 istate.preserve = style['white-space'] == 'pre' istate.pre_wrap = style['white-space'] == 'pre-wrap' istate.bgcolor = style['background-color'] istate.fgcolor = style['color'] istate.strikethrough = style.effective_text_decoration == 'line-through' istate.underline = style.effective_text_decoration == 'underline' ff = style['font-family'].lower() if hasattr(style['font-family'], 'lower') else '' if 'monospace' in ff or 'courier' in ff or ff.endswith(' mono'): istate.family = 'monospace' elif ('sans-serif' in ff or 'sansserif' in ff or 'verdana' in ff or 'arial' in ff or 'helvetica' in ff): istate.family = 'sans-serif' else: istate.family = 'serif' if 'id' in elem.attrib: istate.ids.add(elem.attrib['id']) if 'name' in elem.attrib: istate.ids.add(elem.attrib['name']) if tag == 'a' and 'href' in elem.attrib: istate.href = elem.attrib['href'] istate.attrib.clear() if tag == 'img' and 'src' in elem.attrib: istate.attrib['src'] = elem.attrib['src'] istate.attrib['align'] = 'baseline' cssdict = style.cssdict() valign = cssdict.get('vertical-align', None) if valign in ('top', 'bottom', 'middle'): istate.attrib['align'] = valign for prop in ('width', 'height'): if cssdict[prop] != 'auto': value = style[prop] if value == getattr(self.profile, prop): result = '100%' else: # Amazon's renderer does not support # img sizes in units other than px # See #7520 for test case try: pixs = int(round(float(value) / (72/self.profile.dpi))) except: continue result = str(pixs) istate.attrib[prop] = result if 'width' not in istate.attrib or 'height' not in istate.attrib: href = self.current_spine_item.abshref(elem.attrib['src']) try: item = self.oeb.manifest.hrefs[urlnormalize(href)] except: self.oeb.logger.warn('Failed to find image:', href) else: try: width, height = identify(item.data)[1:] except Exception: self.oeb.logger.warn('Invalid image:', href) else: if 'width' not in istate.attrib and 'height' not in \ istate.attrib: istate.attrib['width'] = str(width) istate.attrib['height'] = str(height) else: ar = width / height if 'width' not in istate.attrib: try: width = int(istate.attrib['height'])*ar except: pass istate.attrib['width'] = str(int(width)) else: try: height = int(istate.attrib['width'])/ar except: pass istate.attrib['height'] = str(int(height)) item.unload_data_from_memory() elif tag == 'hr' and asfloat(style['width']) > 0 and style._get('width') not in {'100%', 'auto'}: raww = style._get('width') if hasattr(raww, 'strip') and '%' in raww: istate.attrib['width'] = raww else: prop = style['width'] / self.profile.width istate.attrib['width'] = "%d%%" % int(round(prop * 100)) elif display == 'table': tag = 'table' elif display == 'table-row': tag = 'tr' elif display == 'table-cell': tag = 'td' if tag in TABLE_TAGS and self.ignore_tables: tag = 'span' if tag == 'td' else 'div' if tag in ('table', 'td', 'tr'): col = style.backgroundColor if col: elem.set('bgcolor', col) css = style.cssdict() if 'border' in css or 'border-width' in css: elem.set('border', '1') if tag in TABLE_TAGS: for attr in ('rowspan', 'colspan', 'width', 'border', 'scope', 'bgcolor'): if attr in elem.attrib: istate.attrib[attr] = elem.attrib[attr] if tag == 'q': t = elem.text if not t: t = '' elem.text = '\u201c' + t t = elem.tail if not t: t = '' elem.tail = '\u201d' + t text = None if elem.text: if istate.preserve or istate.pre_wrap: text = elem.text elif (len(elem) > 0 and isspace(elem.text) and hasattr(elem[0].tag, 'rpartition') and elem[0].tag.rpartition('}')[-1] not in INLINE_TAGS): text = None else: text = COLLAPSE.sub(' ', elem.text) valign = style['vertical-align'] not_baseline = valign in ('super', 'sub', 'text-top', 'text-bottom', 'top', 'bottom') or ( isinstance(valign, numbers.Number) and abs(valign) != 0) issup = valign in ('super', 'text-top', 'top') or ( isinstance(valign, numbers.Number) and valign > 0) vtag = 'sup' if issup else 'sub' if not_baseline and not ignore_valign and tag not in NOT_VTAGS and not isblock: nroot = etree.Element(XHTML('html'), nsmap=MOBI_NSMAP) vbstate = BlockState(etree.SubElement(nroot, XHTML('body'))) vbstate.para = etree.SubElement(vbstate.body, XHTML('p')) self.mobimlize_elem(elem, stylizer, vbstate, istates, ignore_valign=True) if len(istates) > 0: istates.pop() if len(istates) == 0: istates.append(FormatState()) at_start = bstate.para is None if at_start: self.mobimlize_content('span', '', bstate, istates) parent = bstate.para if bstate.inline is None else bstate.inline if parent is not None: vtag = etree.SubElement(parent, XHTML(vtag)) vtag = etree.SubElement(vtag, XHTML('small')) # Add anchors for child in vbstate.body: if child is not vbstate.para: vtag.append(child) else: break if vbstate.para is not None: if vbstate.para.text: vtag.text = vbstate.para.text for child in vbstate.para: vtag.append(child) return if tag == 'blockquote': old_mim = self.opts.mobi_ignore_margins self.opts.mobi_ignore_margins = False if (text or tag in CONTENT_TAGS or tag in NESTABLE_TAGS or ( # We have an id but no text and no children, the id should still # be added. istate.ids and tag in ('a', 'span', 'i', 'b', 'u') and len(elem)==0)): if tag == 'li' and len(istates) > 1 and 'value' in elem.attrib: try: value = int(elem.attrib['value']) istates[-2].list_num = value - 1 except: pass self.mobimlize_content(tag, text, bstate, istates) for child in elem: self.mobimlize_elem(child, stylizer, bstate, istates) tail = None if child.tail: if istate.preserve or istate.pre_wrap: tail = child.tail elif bstate.para is None and isspace(child.tail): tail = None else: tail = COLLAPSE.sub(' ', child.tail) if tail: self.mobimlize_content(tag, tail, bstate, istates) if tag == 'blockquote': self.opts.mobi_ignore_margins = old_mim if bstate.content and style['page-break-after'] in PAGE_BREAKS: bstate.pbreak = True if isblock: para = bstate.para if para is not None and para.text == '\xa0' and len(para) < 1: if style.height > 2: para.getparent().replace(para, etree.Element(XHTML('br'))) else: # This is too small to be rendered effectively, drop it para.getparent().remove(para) bstate.para = None bstate.istate = None vmargin = convert_margin(style, 'margin-bottom') bstate.vmargin = max((bstate.vmargin, vmargin)) vpadding = asfloat(style['padding-bottom']) if vpadding > 0: bstate.vpadding += bstate.vmargin bstate.vmargin = 0 bstate.vpadding += vpadding if bstate.nested and bstate.nested[-1].tag == elem.tag: bstate.nested.pop() istates.pop()
26,324
Python
.py
592
30.594595
121
0.514715
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,460
langcodes.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/langcodes.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net' __docformat__ = 'restructuredtext en' from struct import pack from calibre.utils.localization import lang_as_iso639_1 lang_codes = { } main_language = { 0 : "NEUTRAL", 54 : "AFRIKAANS", 28 : "ALBANIAN", 1 : "ARABIC", 43 : "ARMENIAN", 77 : "ASSAMESE", 44 : "AZERI", 45 : "BASQUE", 35 : "BELARUSIAN", 69 : "BENGALI", 2 : "BULGARIAN", 3 : "CATALAN", 4 : "CHINESE", # 26 : "CROATIAN", 5 : "CZECH", 6 : "DANISH", 19 : "DUTCH", 9 : "ENGLISH", 37 : "ESTONIAN", 56 : "FAEROESE", 41 : "FARSI", 11 : "FINNISH", 12 : "FRENCH", 55 : "GEORGIAN", 7 : "GERMAN", 8 : "GREEK", 71 : "GUJARATI", 13 : "HEBREW", 57 : "HINDI", 14 : "HUNGARIAN", 15 : "ICELANDIC", 33 : "INDONESIAN", 16 : "ITALIAN", 17 : "JAPANESE", 75 : "KANNADA", 63 : "KAZAK", 87 : "KONKANI", 18 : "KOREAN", 38 : "LATVIAN", 39 : "LITHUANIAN", 47 : "MACEDONIAN", 62 : "MALAY", 76 : "MALAYALAM", 58 : "MALTESE", 78 : "MARATHI", 97 : "NEPALI", 20 : "NORWEGIAN", 72 : "ORIYA", 21 : "POLISH", 22 : "PORTUGUESE", 70 : "PUNJABI", 23 : "RHAETOROMANIC", 24 : "ROMANIAN", 25 : "RUSSIAN", 59 : "SAMI", 79 : "SANSKRIT", 26 : "SERBIAN", 27 : "SLOVAK", 36 : "SLOVENIAN", 46 : "SORBIAN", 10 : "SPANISH", 48 : "SUTU", 65 : "SWAHILI", 29 : "SWEDISH", 73 : "TAMIL", 68 : "TATAR", 74 : "TELUGU", 30 : "THAI", 49 : "TSONGA", 50 : "TSWANA", 31 : "TURKISH", 34 : "UKRAINIAN", 32 : "URDU", 67 : "UZBEK", 42 : "VIETNAMESE", 52 : "XHOSA", 53 : "ZULU", } sub_language = { 0 : "NEUTRAL", # 1 : "ARABIC_SAUDI_ARABIA", # 2 : "ARABIC_IRAQ", # 3 : "ARABIC_EGYPT", # 4 : "ARABIC_LIBYA", # 5 : "ARABIC_ALGERIA", # 6 : "ARABIC_MOROCCO", # 7 : "ARABIC_TUNISIA", # 8 : "ARABIC_OMAN", # 9 : "ARABIC_YEMEN", # 10 : "ARABIC_SYRIA", # 11 : "ARABIC_JORDAN", # 12 : "ARABIC_LEBANON", # 13 : "ARABIC_KUWAIT", # 14 : "ARABIC_UAE", # 15 : "ARABIC_BAHRAIN", # 16 : "ARABIC_QATAR", # 1 : "AZERI_LATIN", # 2 : "AZERI_CYRILLIC", # 1 : "CHINESE_TRADITIONAL", # 2 : "CHINESE_SIMPLIFIED", # 3 : "CHINESE_HONGKONG", # 4 : "CHINESE_SINGAPORE", # 1 : "DUTCH", # 2 : "DUTCH_BELGIAN", # 1 : "FRENCH", # 2 : "FRENCH_BELGIAN", # 3 : "FRENCH_CANADIAN", # 4 : "FRENCH_SWISS", # 5 : "FRENCH_LUXEMBOURG", # 6 : "FRENCH_MONACO", # 1 : "GERMAN", # 2 : "GERMAN_SWISS", # 3 : "GERMAN_AUSTRIAN", # 4 : "GERMAN_LUXEMBOURG", # 5 : "GERMAN_LIECHTENSTEIN", # 1 : "ITALIAN", # 2 : "ITALIAN_SWISS", # 1 : "KOREAN", # 1 : "LITHUANIAN", # 1 : "MALAY_MALAYSIA", # 2 : "MALAY_BRUNEI_DARUSSALAM", # 1 : "NORWEGIAN_BOKMAL", # 2 : "NORWEGIAN_NYNORSK", # 2 : "PORTUGUESE", # 1 : "PORTUGUESE_BRAZILIAN", # 2 : "SERBIAN_LATIN", 3 : "SERBIAN_CYRILLIC", # 1 : "SPANISH", # 2 : "SPANISH_MEXICAN", 4 : "SPANISH_GUATEMALA", 5 : "SPANISH_COSTA_RICA", 6 : "SPANISH_PANAMA", 7 : "SPANISH_DOMINICAN_REPUBLIC", 8 : "SPANISH_VENEZUELA", 9 : "SPANISH_COLOMBIA", 10 : "SPANISH_PERU", 11 : "SPANISH_ARGENTINA", 12 : "SPANISH_ECUADOR", 13 : "SPANISH_CHILE", 14 : "SPANISH_URUGUAY", 15 : "SPANISH_PARAGUAY", 16 : "SPANISH_BOLIVIA", 17 : "SPANISH_EL_SALVADOR", 18 : "SPANISH_HONDURAS", 19 : "SPANISH_NICARAGUA", 20 : "SPANISH_PUERTO_RICO", # 1 : "SWEDISH", # 2 : "SWEDISH_FINLAND", 1 : "UZBEK_LATIN", 2 : "UZBEK_CYRILLIC", } IANA_MOBI = \ {None: {None: (0, 0)}, 'af': {None: (54, 0)}, 'ar': {None: (1, 0), 'AE': (1, 56), 'BH': (1, 60), 'DZ': (1, 20), 'EG': (1, 12), 'JO': (1, 44), 'KW': (1, 52), 'LB': (1, 48), 'MA': (1, 24), 'OM': (1, 32), 'QA': (1, 64), 'SA': (1, 4), 'SY': (1, 40), 'TN': (1, 28), 'YE': (1, 36)}, 'as': {None: (77, 0)}, 'az': {None: (44, 0)}, 'be': {None: (35, 0)}, 'bg': {None: (2, 0)}, 'bn': {None: (69, 0)}, 'ca': {None: (3, 0)}, 'cs': {None: (5, 0)}, 'da': {None: (6, 0)}, 'de': {None: (7, 0), 'AT': (7, 12), 'CH': (7, 8), 'LI': (7, 20), 'LU': (7, 16)}, 'el': {None: (8, 0)}, 'en': {None: (9, 0), 'AU': (9, 12), 'BZ': (9, 40), 'CA': (9, 16), 'GB': (9, 8), 'IE': (9, 24), 'JM': (9, 32), 'NZ': (9, 20), 'PH': (9, 52), 'TT': (9, 44), 'US': (9, 4), 'ZA': (9, 28), 'ZW': (9, 48)}, 'es': {None: (10, 0), 'AR': (10, 44), 'BO': (10, 64), 'CL': (10, 52), 'CO': (10, 36), 'CR': (10, 20), 'DO': (10, 28), 'EC': (10, 48), 'ES': (10, 4), 'GT': (10, 16), 'HN': (10, 72), 'MX': (10, 8), 'NI': (10, 76), 'PA': (10, 24), 'PE': (10, 40), 'PR': (10, 80), 'PY': (10, 60), 'SV': (10, 68), 'UY': (10, 56), 'VE': (10, 32)}, 'et': {None: (37, 0)}, 'eu': {None: (45, 0)}, 'fa': {None: (41, 0)}, 'fi': {None: (11, 0)}, 'fo': {None: (56, 0)}, 'fr': {None: (12, 0), 'BE': (12, 8), 'CA': (12, 12), 'CH': (12, 16), 'FR': (12, 4), 'LU': (12, 20), 'MC': (12, 24)}, 'gu': {None: (71, 0)}, 'he': {None: (13, 0)}, 'hi': {None: (57, 0)}, 'hr': {None: (26, 0)}, 'hu': {None: (14, 0)}, 'hy': {None: (43, 0)}, 'id': {None: (33, 0)}, 'is': {None: (15, 0)}, 'it': {None: (16, 0), 'CH': (16, 8), 'IT': (16, 4)}, 'ja': {None: (17, 0)}, 'ka': {None: (55, 0)}, 'kk': {None: (63, 0)}, 'kn': {None: (75, 0)}, 'ko': {None: (18, 0)}, 'kok': {None: (87, 0)}, 'lt': {None: (39, 0)}, 'lv': {None: (38, 0)}, 'mk': {None: (47, 0)}, 'ml': {None: (76, 0)}, 'mr': {None: (78, 0)}, 'ms': {None: (62, 0)}, 'mt': {None: (58, 0)}, 'ne': {None: (97, 0)}, 'nl': {None: (19, 0), 'BE': (19, 8)}, 'no': {None: (20, 0)}, 'or': {None: (72, 0)}, 'pa': {None: (70, 0)}, 'pl': {None: (21, 0)}, 'pt': {None: (22, 0), 'BR': (22, 4), 'PT': (22, 8)}, 'rm': {None: (23, 0)}, 'ro': {None: (24, 0)}, 'ru': {None: (25, 0)}, 'sa': {None: (79, 0)}, 'se': {None: (59, 0)}, 'sk': {None: (27, 0)}, 'sl': {None: (36, 0)}, 'sq': {None: (28, 0)}, 'sr': {None: (26, 12), 'RS': (26, 12)}, 'st': {None: (48, 0)}, 'sv': {None: (29, 0), 'FI': (29, 8)}, 'sw': {None: (65, 0)}, 'ta': {None: (73, 0)}, 'te': {None: (74, 0)}, 'th': {None: (30, 0)}, 'tn': {None: (50, 0)}, 'tr': {None: (31, 0)}, 'ts': {None: (49, 0)}, 'tt': {None: (68, 0)}, 'uk': {None: (34, 0)}, 'ur': {None: (32, 0)}, 'uz': {None: (67, 0), 'UZ': (67, 8)}, 'vi': {None: (42, 0)}, 'wen': {None: (46, 0)}, 'xh': {None: (52, 0)}, 'zh': {None: (4, 0), 'CN': (4, 8), 'HK': (4, 12), 'SG': (4, 16), 'TW': (4, 4)}, 'zu': {None: (53, 0)}} def iana2mobi(icode): langdict, subtags = IANA_MOBI[None], [] if icode: subtags = list(icode.split('-')) while len(subtags) > 0: lang = subtags.pop(0).lower() lang = lang_as_iso639_1(lang) if lang and lang in IANA_MOBI: langdict = IANA_MOBI[lang] break mcode = langdict[None] while len(subtags) > 0: subtag = subtags.pop(0) if subtag not in langdict: subtag = subtag.title() if subtag not in langdict: subtag = subtag.upper() if subtag in langdict: mcode = langdict[subtag] break return pack('>HBB', 0, mcode[1], mcode[0]) def mobi2iana(langcode, sublangcode): prefix = suffix = None for code, d in IANA_MOBI.items(): for subcode, t in d.items(): cc, cl = t if cc == langcode: prefix = code if cl == sublangcode: suffix = subcode.lower() if subcode else None break if prefix is not None: break if prefix is None: return 'und' if suffix is None: return prefix return prefix + '-' + suffix
9,889
Python
.py
343
19.008746
61
0.373859
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,461
utils.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/utils.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os import string import struct import zlib from collections import OrderedDict from io import BytesIO from tinycss.color3 import parse_color_string from calibre.ebooks import normalize from calibre.utils.img import image_from_data, image_to_data, png_data_to_gif_data, resize_image, save_cover_data_to, scale_image from calibre.utils.imghdr import what from polyglot.builtins import as_bytes IMAGE_MAX_SIZE = 10 * 1024 * 1024 RECORD_SIZE = 0x1000 # 4096 (Text record size (uncompressed)) class PolyglotDict(dict): def __setitem__(self, key, val): if isinstance(key, str): key = key.encode('utf-8') dict.__setitem__(self, key, val) def __getitem__(self, key): if isinstance(key, str): key = key.encode('utf-8') return dict.__getitem__(self, key) def __contains__(self, key): if isinstance(key, str): key = key.encode('utf-8') return dict.__contains__(self, key) def decode_string(raw, codec='utf-8', ordt_map=None): length, = struct.unpack(b'>B', raw[0:1]) raw = raw[1:1+length] consumed = length+1 if ordt_map: return ''.join(ordt_map[x] for x in bytearray(raw)), consumed return raw.decode(codec), consumed def decode_hex_number(raw, codec='utf-8'): ''' Return a variable length number encoded using hexadecimal encoding. These numbers have the first byte which tells the number of bytes that follow. The bytes that follow are simply the hexadecimal representation of the number. :param raw: Raw binary data as a bytestring :return: The number and the number of bytes from raw that the number occupies. ''' raw, consumed = decode_string(raw, codec=codec) return int(raw, 16), consumed def encode_string(raw): ans = bytearray(as_bytes(raw)) ans.insert(0, len(ans)) return bytes(ans) def encode_number_as_hex(num): ''' Encode num as a variable length encoded hexadecimal number. Returns the bytestring containing the encoded number. These numbers have the first byte which tells the number of bytes that follow. The bytes that follow are simply the hexadecimal representation of the number. ''' num = hex(num)[2:].upper().encode('ascii') nlen = len(num) if nlen % 2 != 0: num = b'0'+num return encode_string(num) def encint(value, forward=True): ''' Some parts of the Mobipocket format encode data as variable-width integers. These integers are represented big-endian with 7 bits per byte in bits 1-7. They may be either forward-encoded, in which case only the first byte has bit 8 set, or backward-encoded, in which case only the last byte has bit 8 set. For example, the number 0x11111 = 0b10001000100010001 would be represented forward-encoded as: 0x04 0x22 0x91 = 0b100 0b100010 0b10010001 And backward-encoded as: 0x84 0x22 0x11 = 0b10000100 0b100010 0b10001 This function encodes the integer ``value`` as a variable width integer and returns the bytestring corresponding to it. If forward is True the bytes returned are suitable for prepending to the output buffer, otherwise they must be append to the output buffer. ''' if value < 0: raise ValueError('Cannot encode negative numbers as vwi') # Encode vwi byts = bytearray() while True: b = value & 0b01111111 value >>= 7 # shift value to the right by 7 bits byts.append(b) if value == 0: break byts[0 if forward else -1] |= 0b10000000 byts.reverse() return bytes(byts) def decint(raw, forward=True): ''' Read a variable width integer from the bytestring or bytearray raw and return the integer and the number of bytes read. If forward is True bytes are read from the start of raw, otherwise from the end of raw. This function is the inverse of encint above, see its docs for more details. ''' val = 0 byts = bytearray() src = bytearray(raw) if not forward: src.reverse() for bnum in src: byts.append(bnum & 0b01111111) if bnum & 0b10000000: break if not forward: byts.reverse() for byte in byts: val <<= 7 # Shift value to the left by 7 bits val |= byte return val, len(byts) def test_decint(num): for d in (True, False): raw = encint(num, forward=d) sz = len(raw) if (num, sz) != decint(raw, forward=d): raise ValueError('Failed for num %d, forward=%r: %r != %r' % ( num, d, (num, sz), decint(raw, forward=d))) def rescale_image(data, maxsizeb=IMAGE_MAX_SIZE, dimen=None): ''' Convert image setting all transparent pixels to white and changing format to JPEG. Ensure the resultant image has a byte size less than maxsizeb. If dimen is not None, generate a thumbnail of width=dimen, height=dimen or width, height = dimen (depending on the type of dimen) Returns the image as a bytestring ''' if dimen is not None: if hasattr(dimen, '__len__'): width, height = dimen else: width = height = dimen data = scale_image(data, width=width, height=height, compression_quality=90)[-1] else: # Replace transparent pixels with white pixels and convert to JPEG data = save_cover_data_to(data) if len(data) <= maxsizeb: return data orig_data = data # save it in case compression fails quality = 90 while len(data) > maxsizeb and quality >= 5: data = image_to_data(image_from_data(orig_data), compression_quality=quality) quality -= 5 if len(data) <= maxsizeb: return data orig_data = data scale = 0.9 while len(data) > maxsizeb and scale >= 0.05: img = image_from_data(data) w, h = img.width(), img.height() img = resize_image(img, int(scale*w), int(scale*h)) data = image_to_data(img, compression_quality=quality) scale -= 0.05 return data def get_trailing_data(record, extra_data_flags): ''' Given a text record as a bytestring and the extra data flags from the MOBI header, return the trailing data as a dictionary, mapping bit number to data as bytestring. Also returns the record - all trailing data. :return: Trailing data, record - trailing data ''' data = OrderedDict() flags = extra_data_flags >> 1 num = 0 while flags: num += 1 if flags & 0b1: sz, consumed = decint(record, forward=False) if sz > consumed: data[num] = record[-sz:-consumed] record = record[:-sz] flags >>= 1 # Read multibyte chars if any if extra_data_flags & 0b1: # Only the first two bits are used for the size since there can # never be more than 3 trailing multibyte chars sz = (ord(record[-1:]) & 0b11) + 1 consumed = 1 if sz > consumed: data[0] = record[-sz:-consumed] record = record[:-sz] return data, record def encode_trailing_data(raw): ''' Given some data in the bytestring raw, return a bytestring of the form <data><size> where size is a backwards encoded vwi whose value is the length of the entire returned bytestring. data is the bytestring passed in as raw. This is the encoding used for trailing data entries at the end of text records. See get_trailing_data() for details. ''' lsize = 1 while True: encoded = encint(len(raw) + lsize, forward=False) if len(encoded) == lsize: break lsize += 1 return raw + encoded def encode_fvwi(val, flags, flag_size=4): ''' Encode the value val and the flag_size bits from flags as a fvwi. This encoding is used in the trailing byte sequences for indexing. Returns encoded bytestring. ''' ans = val << flag_size for i in range(flag_size): ans |= (flags & (1 << i)) return encint(ans) def decode_fvwi(byts, flag_size=4): ''' Decode encoded fvwi. Returns number, flags, consumed ''' arg, consumed = decint(bytes(byts)) val = arg >> flag_size flags = 0 for i in range(flag_size): flags |= (arg & (1 << i)) return val, flags, consumed def decode_tbs(byts, flag_size=4): ''' Trailing byte sequences for indexing consists of series of fvwi numbers. This function reads the fvwi number and its associated flags. It then uses the flags to read any more numbers that belong to the series. The flags are the lowest 4 bits of the vwi (see the encode_fvwi function above). Returns the fvwi number, a dictionary mapping flags bits to the associated data and the number of bytes consumed. ''' byts = bytes(byts) val, flags, consumed = decode_fvwi(byts, flag_size=flag_size) extra = {} byts = byts[consumed:] if flags & 0b1000 and flag_size > 3: extra[0b1000] = True if flags & 0b0010: x, consumed2 = decint(byts) byts = byts[consumed2:] extra[0b0010] = x consumed += consumed2 if flags & 0b0100: extra[0b0100] = ord(byts[0:1]) byts = byts[1:] consumed += 1 if flags & 0b0001: x, consumed2 = decint(byts) byts = byts[consumed2:] extra[0b0001] = x consumed += consumed2 return val, extra, consumed def encode_tbs(val, extra, flag_size=4): ''' Encode the number val and the extra data in the extra dict as an fvwi. See decode_tbs above. ''' flags = 0 for flag in extra: flags |= flag ans = encode_fvwi(val, flags, flag_size=flag_size) if 0b0010 in extra: ans += encint(extra[0b0010]) if 0b0100 in extra: ans += bytes(bytearray([extra[0b0100]])) if 0b0001 in extra: ans += encint(extra[0b0001]) return ans def utf8_text(text): ''' Convert a possibly null string to utf-8 bytes, guaranteeing to return a non empty, normalized bytestring. ''' if text and text.strip(): text = text.strip() if not isinstance(text, str): text = text.decode('utf-8', 'replace') text = normalize(text).encode('utf-8') else: text = _('Unknown').encode('utf-8') return text def align_block(raw, multiple=4, pad=b'\0'): ''' Return raw with enough pad bytes append to ensure its length is a multiple of 4. ''' extra = len(raw) % multiple if extra == 0: return raw return raw + pad*(multiple - extra) def detect_periodical(toc, log=None): ''' Detect if the TOC object toc contains a periodical that conforms to the structure required by kindlegen to generate a periodical. ''' if toc.count() < 1 or not toc[0].klass == 'periodical': return False for node in toc.iterdescendants(): if node.depth() == 1 and node.klass != 'article': if log is not None: log.debug( 'Not a periodical: Deepest node does not have ' 'class="article"') return False if node.depth() == 2 and node.klass != 'section': if log is not None: log.debug( 'Not a periodical: Second deepest node does not have' ' class="section"') return False if node.depth() == 3 and node.klass != 'periodical': if log is not None: log.debug('Not a periodical: Third deepest node' ' does not have class="periodical"') return False if node.depth() > 3: if log is not None: log.debug('Not a periodical: Has nodes of depth > 3') return False return True def count_set_bits(num): if num < 0: num = -num ans = 0 while num > 0: ans += (num & 0b1) num >>= 1 return ans def to_base(num, base=32, min_num_digits=None): digits = string.digits + string.ascii_uppercase sign = 1 if num >= 0 else -1 if num == 0: return ('0' if min_num_digits is None else '0'*min_num_digits) num *= sign ans = [] while num: ans.append(digits[(num % base)]) num //= base if min_num_digits is not None and len(ans) < min_num_digits: ans.extend('0'*(min_num_digits - len(ans))) if sign < 0: ans.append('-') ans.reverse() return ''.join(ans) def mobify_image(data): 'Convert PNG images to GIF as the idiotic Kindle cannot display some PNG' fmt = what(None, data) if fmt == 'png': data = png_data_to_gif_data(data) return data # Font records {{{ def read_font_record(data, extent=1040): ''' Return the font encoded in the MOBI FONT record represented by data. The return value in a dict with fields raw_data, font_data, err, ext, headers. :param extent: The number of obfuscated bytes. So far I have only encountered files with 1040 obfuscated bytes. If you encounter an obfuscated record for which this function fails, try different extent values (easily automated). raw_data is the raw data in the font record font_data is the decoded font_data or None if an error occurred err is not None if some error occurred ext is the font type (ttf for TrueType, dat for unknown and failed if an error occurred) headers is the list of decoded headers from the font record or None if decoding failed ''' # Format: # bytes 0 - 3: 'FONT' # bytes 4 - 7: Uncompressed size # bytes 8 - 11: flags # bit 1 - zlib compression # bit 2 - XOR obfuscated # bytes 12 - 15: offset to start of compressed data # bytes 16 - 19: length of XOR string # bytes 19 - 23: offset to start of XOR data # The zlib compressed data begins with 2 bytes of header and # has 4 bytes of checksum at the end ans = {'raw_data':data, 'font_data':None, 'err':None, 'ext':'failed', 'headers':None, 'encrypted':False} try: usize, flags, dstart, xor_len, xor_start = struct.unpack_from( b'>LLLLL', data, 4) except: ans['err'] = 'Failed to read font record header fields' return ans font_data = data[dstart:] ans['headers'] = {'usize':usize, 'flags':bin(flags), 'xor_len':xor_len, 'xor_start':xor_start, 'dstart':dstart} if flags & 0b10: # De-obfuscate the data key = bytearray(data[xor_start:xor_start+xor_len]) buf = bytearray(font_data) extent = len(font_data) if extent is None else extent extent = min(extent, len(font_data)) for n in range(extent): buf[n] ^= key[n%xor_len] # XOR of buf and key font_data = bytes(buf) ans['encrypted'] = True if flags & 0b1: # ZLIB compressed data try: font_data = zlib.decompress(font_data) except Exception as e: ans['err'] = 'Failed to zlib decompress font data (%s)'%e return ans if len(font_data) != usize: ans['err'] = 'Uncompressed font size mismatch' return ans ans['font_data'] = font_data sig = font_data[:4] ans['ext'] = ('ttf' if sig in {b'\0\1\0\0', b'true', b'ttcf'} else 'otf' if sig == b'OTTO' else 'dat') return ans def write_font_record(data, obfuscate=True, compress=True): ''' Write the ttf/otf font represented by data into a font record. See read_font_record() for details on the format of the record. ''' flags = 0 key_len = 20 usize = len(data) xor_key = b'' if compress: flags |= 0b1 data = zlib.compress(data, 9) if obfuscate and len(data) >= 1040: flags |= 0b10 xor_key = os.urandom(key_len) key = bytearray(xor_key) data = bytearray(data) for i in range(1040): data[i] ^= key[i%key_len] data = bytes(data) key_start = struct.calcsize(b'>5L') + 4 data_start = key_start + len(xor_key) header = b'FONT' + struct.pack(b'>5L', usize, flags, data_start, len(xor_key), key_start) return header + xor_key + data # }}} def create_text_record(text): ''' Return a Palmdoc record of size RECORD_SIZE from the text file object. In case the record ends in the middle of a multibyte character return the overlap as well. Returns data, overlap: where both are byte strings. overlap is the extra bytes needed to complete the truncated multibyte character. ''' opos = text.tell() text.seek(0, 2) # npos is the position of the next record npos = min((opos + RECORD_SIZE, text.tell())) # Number of bytes from the next record needed to complete the last # character in this record extra = 0 last = b'' while not last.decode('utf-8', 'ignore'): # last contains no valid utf-8 characters size = len(last) + 1 text.seek(npos - size) last = text.read(size) # last now has one valid utf-8 char and possibly some bytes that belong # to a truncated char try: last.decode('utf-8', 'strict') except UnicodeDecodeError: # There are some truncated bytes in last prev = len(last) while True: text.seek(npos - prev) last = text.read(len(last) + 1) try: last.decode('utf-8') except UnicodeDecodeError: pass else: break extra = len(last) - prev text.seek(opos) data = text.read(RECORD_SIZE) overlap = text.read(extra) text.seek(npos) return data, overlap class CNCX: # {{{ ''' Create the CNCX records. These are records containing all the strings from an index. Each record is of the form: <vwi string size><utf-8 encoded string> ''' MAX_STRING_LENGTH = 500 def __init__(self, strings=()): self.strings = OrderedDict((s, 0) for s in strings) self.records = [] offset = 0 buf = BytesIO() RECORD_LIMIT = 0x10000 - 1024 # kindlegen appears to use 1024, PDB limit is 0x10000 for key in self.strings: utf8 = utf8_text(key[:self.MAX_STRING_LENGTH]) l = len(utf8) sz_bytes = encint(l) raw = sz_bytes + utf8 if buf.tell() + len(raw) > RECORD_LIMIT: self.records.append(align_block(buf.getvalue())) buf.seek(0), buf.truncate(0) offset = len(self.records) * 0x10000 buf.write(raw) self.strings[key] = offset offset += len(raw) val = buf.getvalue() if val: self.records.append(align_block(val)) def __getitem__(self, string): return self.strings[string] def __bool__(self): return bool(self.records) __nonzero__ = __bool__ def __len__(self): return len(self.records) # }}} def is_guide_ref_start(ref): return (ref.title.lower() == 'start' or (ref.type and ref.type.lower() in {'start', 'other.start', 'text'})) def convert_color_for_font_tag(val): rgba = parse_color_string(str(val or '')) if rgba is None or rgba == 'currentColor': return str(val) def clamp(x): return min(x, max(0, x), 1) rgb = map(clamp, rgba[:3]) return '#' + ''.join(map(lambda x:'%02x' % int(x * 255), rgb))
19,836
Python
.py
536
29.981343
129
0.622225
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,462
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/__init__.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' class MobiError(Exception): pass # That might be a bit small on the PW, but Amazon/KG 2.5 still uses these values, even when delivered to a PW MAX_THUMB_SIZE = 16 * 1024 MAX_THUMB_DIMEN = (180, 240)
318
Python
.py
8
37.5
109
0.710526
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,463
huffcdic.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/huffcdic.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' ''' Decompress MOBI files compressed with the Huff/cdic algorithm. Code thanks to darkninja and igorsk. ''' import struct from calibre.ebooks.mobi import MobiError class Reader: def __init__(self): self.q = struct.Struct(b'>Q').unpack_from def load_huff(self, huff): if huff[0:8] != b'HUFF\x00\x00\x00\x18': raise MobiError('Invalid HUFF header') off1, off2 = struct.unpack_from(b'>LL', huff, 8) def dict1_unpack(v): codelen, term, maxcode = v&0x1f, v&0x80, v>>8 assert codelen != 0 if codelen <= 8: assert term maxcode = ((maxcode + 1) << (32 - codelen)) - 1 return (codelen, term, maxcode) self.dict1 = tuple(map(dict1_unpack, struct.unpack_from(b'>256L', huff, off1))) dict2 = struct.unpack_from(b'>64L', huff, off2) self.mincode, self.maxcode = (), () for codelen, mincode in enumerate((0,) + dict2[0::2]): self.mincode += (mincode << (32 - codelen), ) for codelen, maxcode in enumerate((0,) + dict2[1::2]): self.maxcode += (((maxcode + 1) << (32 - codelen)) - 1, ) self.dictionary = [] def load_cdic(self, cdic): if cdic[0:8] != b'CDIC\x00\x00\x00\x10': raise MobiError('Invalid CDIC header') phrases, bits = struct.unpack_from(b'>LL', cdic, 8) n = min(1<<bits, phrases-len(self.dictionary)) h = struct.Struct(b'>H').unpack_from def getslice(off): blen, = h(cdic, 16+off) slice = cdic[18+off:18+off+(blen&0x7fff)] return (slice, blen&0x8000) self.dictionary += map(getslice, struct.unpack_from(b'>%dH' % n, cdic, 16)) def unpack(self, data): q = self.q bitsleft = len(data) * 8 data += b'\x00\x00\x00\x00\x00\x00\x00\x00' pos = 0 x, = q(data, pos) n = 32 s = [] while True: if n <= 0: pos += 4 x, = q(data, pos) n += 32 code = (x >> n) & ((1 << 32) - 1) codelen, term, maxcode = self.dict1[code >> 24] if not term: while code < self.mincode[codelen]: codelen += 1 maxcode = self.maxcode[codelen] n -= codelen bitsleft -= codelen if bitsleft < 0: break r = (maxcode - code) >> (32 - codelen) slice_, flag = self.dictionary[r] if not flag: self.dictionary[r] = None slice_ = self.unpack(slice_) self.dictionary[r] = (slice_, 1) s.append(slice_) return b''.join(s) class HuffReader: def __init__(self, huffs): self.reader = Reader() self.reader.load_huff(huffs[0]) for cdic in huffs[1:]: self.reader.load_cdic(cdic) def unpack(self, section): return self.reader.unpack(section)
3,172
Python
.py
82
28.560976
87
0.529354
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,464
serializer.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer2/serializer.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import re import unicodedata from collections import defaultdict from io import BytesIO from calibre.ebooks.mobi.mobiml import MBP_NS from calibre.ebooks.mobi.utils import is_guide_ref_start from calibre.ebooks.oeb.base import OEB_DOCS, XHTML, XHTML_NS, XML_NS, namespace, prefixname, urlnormalize from polyglot.builtins import string_or_bytes from polyglot.urllib import urldefrag class Buf(BytesIO): def write(self, x): if isinstance(x, str): x = x.encode('utf-8') BytesIO.write(self, x) class Serializer: NSRMAP = {'': None, XML_NS: 'xml', XHTML_NS: '', MBP_NS: 'mbp'} def __init__(self, oeb, images, is_periodical, write_page_breaks_after_item=True): ''' Write all the HTML markup in oeb into a single in memory buffer containing a single html document with links replaced by offsets into the buffer. :param oeb: OEBBook object that encapsulates the document to be processed. :param images: Mapping of image hrefs (urlnormalized) to image record indices. :param write_page_breaks_after_item: If True a MOBIpocket pagebreak tag is written after every element of the spine in ``oeb``. ''' self.oeb = oeb # Map of image hrefs to image index in the MOBI file self.images = images self.used_images = set() self.logger = oeb.logger self.is_periodical = is_periodical self.write_page_breaks_after_item = write_page_breaks_after_item # If not None, this is a number pointing to the location at which to # open the MOBI file on the Kindle self.start_offset = None # Mapping of hrefs (urlnormalized) to the offset in the buffer where # the resource pointed to by the href lives. Used at the end to fill in # the correct values into all filepos="..." links. self.id_offsets = {} # Mapping of hrefs (urlnormalized) to a list of offsets into the buffer # where filepos="..." elements are written corresponding to links that # point to the href. This is used at the end to fill in the correct values. self.href_offsets = defaultdict(list) # List of offsets in the buffer of non linear items in the spine. These # become uncrossable breaks in the MOBI self.breaks = [] self.find_blocks() def find_blocks(self): ''' Mark every item in the spine if it is the start/end of a section/article, so that it can be wrapped in divs appropriately. ''' for item in self.oeb.spine: item.is_section_start = item.is_section_end = False item.is_article_start = item.is_article_end = False def spine_item(tocitem): href = urldefrag(tocitem.href)[0] for item in self.oeb.spine: if item.href == href: return item for item in self.oeb.toc.iterdescendants(): if item.klass == 'section': articles = list(item) if not articles: continue spine_item(item).is_section_start = True for i, article in enumerate(articles): si = spine_item(article) if si is not None: si.is_article_start = True items = list(self.oeb.spine) in_sec = in_art = False for i, item in enumerate(items): try: prev_item = items[i-1] except: prev_item = None if in_art and item.is_article_start is True: prev_item.is_article_end = True in_art = False if in_sec and item.is_section_start is True: prev_item.is_section_end = True in_sec = False if item.is_section_start: in_sec = True if item.is_article_start: in_art = True item.is_section_end = item.is_article_end = True def __call__(self): ''' Return the document serialized as a single UTF-8 encoded bytestring. ''' buf = self.buf = Buf() buf.write(b'<html>') self.serialize_head() self.serialize_body() buf.write(b'</html>') self.end_offset = buf.tell() self.fixup_links() if self.start_offset is None and not self.is_periodical: # If we don't set a start offset, the stupid Kindle will # open the book at the location of the first IndexEntry, which # could be anywhere. So ensure the book is always opened at the # beginning, instead. self.start_offset = self.body_start_offset return buf.getvalue() def serialize_head(self): buf = self.buf buf.write(b'<head>') if len(self.oeb.guide) > 0: self.serialize_guide() buf.write(b'</head>') def serialize_guide(self): ''' The Kindle decides where to open a book based on the presence of an item in the guide that looks like <reference type="text" title="Start" href="chapter-one.xhtml"/> Similarly an item with type="toc" controls where the Goto Table of Contents operation on the kindle goes. ''' buf = self.buf hrefs = self.oeb.manifest.hrefs buf.write(b'<guide>') for ref in self.oeb.guide.values(): path = urldefrag(ref.href)[0] if path not in hrefs or hrefs[path].media_type not in OEB_DOCS: continue buf.write(b'<reference type="') if ref.type.startswith('other.') : self.serialize_text(ref.type.replace('other.',''), quot=True) else: self.serialize_text(ref.type, quot=True) buf.write(b'" ') if ref.title is not None: buf.write(b'title="') self.serialize_text(ref.title, quot=True) buf.write(b'" ') if is_guide_ref_start(ref): self._start_href = ref.href self.serialize_href(ref.href) # Space required or won't work, I kid you not buf.write(b' />') buf.write(b'</guide>') def serialize_href(self, href, base=None): ''' Serialize the href attribute of an <a> or <reference> tag. It is serialized as filepos="000000000" and a pointer to its location is stored in self.href_offsets so that the correct value can be filled in at the end. ''' hrefs = self.oeb.manifest.hrefs try: path, frag = urldefrag(urlnormalize(href)) except ValueError: # Unparsable URL return False if path and base: path = base.abshref(path) if path and path not in hrefs: return False buf = self.buf item = hrefs[path] if path else None if item and item.spine_position is None: return False path = item.href if item else base.href href = '#'.join((path, frag)) if frag else path buf.write(b'filepos=') self.href_offsets[href].append(buf.tell()) buf.write(b'0000000000') return True def serialize_body(self): ''' Serialize all items in the spine of the document. Non linear items are moved to the end. ''' buf = self.buf def serialize_toc_level(tocref, href=None): # add the provided toc level to the output stream # if href is provided add a link ref to the toc level output (e.g. feed_0/index.html) if href is not None: # resolve the section url in id_offsets buf.write(b'<mbp:pagebreak />') self.id_offsets[urlnormalize(href)] = buf.tell() if tocref.klass == "periodical": buf.write(b'<div> <div height="1em"></div>') else: t = tocref.title if isinstance(t, str): t = t.encode('utf-8') buf.write(b'<div></div> <div> <h2 height="1em"><font size="+2"><b>' + t + b'</b></font></h2> <div height="1em"></div>') buf.write(b'<ul>') for tocitem in tocref.nodes: buf.write(b'<li><a filepos=') itemhref = tocitem.href if tocref.klass == 'periodical': # This is a section node. # For periodical tocs, the section urls are like r'feed_\d+/index.html' # We dont want to point to the start of the first article # so we change the href. itemhref = re.sub(r'article_\d+/', '', itemhref) self.href_offsets[itemhref].append(buf.tell()) buf.write(b'0000000000') buf.write(b' ><font size="+1"><b><u>') t = tocitem.title if isinstance(t, str): t = t.encode('utf-8') buf.write(t) buf.write(b'</u></b></font></a></li>') buf.write(b'</ul><div height="1em"></div></div><mbp:pagebreak />') self.anchor_offset = buf.tell() buf.write(b'<body>') self.body_start_offset = buf.tell() if self.is_periodical: top_toc = self.oeb.toc.nodes[0] serialize_toc_level(top_toc) spine = [item for item in self.oeb.spine if item.linear] spine.extend([item for item in self.oeb.spine if not item.linear]) for item in spine: if self.is_periodical and item.is_section_start: for section_toc in top_toc.nodes: if urlnormalize(item.href) == section_toc.href: # create section url of the form r'feed_\d+/index.html' section_url = re.sub(r'article_\d+/', '', section_toc.href) serialize_toc_level(section_toc, section_url) section_toc.href = section_url break self.serialize_item(item) self.body_end_offset = buf.tell() buf.write(b'</body>') def serialize_item(self, item): ''' Serialize an individual item from the spine of the input document. A reference to this item is stored in self.href_offsets ''' buf = self.buf if not item.linear: self.breaks.append(buf.tell() - 1) self.id_offsets[urlnormalize(item.href)] = buf.tell() if item.is_section_start: buf.write(b'<a ></a> ') if item.is_article_start: buf.write(b'<a ></a> <a ></a>') for elem in item.data.find(XHTML('body')): self.serialize_elem(elem, item) if self.write_page_breaks_after_item: buf.write(b'<mbp:pagebreak/>') if item.is_article_end: # Kindle periodical article end marker buf.write(b'<a ></a> <a ></a>') if item.is_section_end: buf.write(b' <a ></a>') self.anchor_offset = None def serialize_elem(self, elem, item, nsrmap=NSRMAP): buf = self.buf if not isinstance(elem.tag, string_or_bytes) \ or namespace(elem.tag) not in nsrmap: return tag = prefixname(elem.tag, nsrmap) # Previous layers take care of @name id_ = elem.attrib.pop('id', None) if id_: href = '#'.join((item.href, id_)) offset = self.anchor_offset or buf.tell() key = urlnormalize(href) # Only set this id_offset if it wasn't previously seen self.id_offsets[key] = self.id_offsets.get(key, offset) if self.anchor_offset is not None and \ tag == 'a' and not elem.attrib and \ not len(elem) and not elem.text: return self.anchor_offset = buf.tell() buf.write(b'<') buf.write(tag.encode('utf-8')) if elem.attrib: for attr, val in elem.attrib.items(): if namespace(attr) not in nsrmap: continue attr = prefixname(attr, nsrmap) buf.write(b' ') if attr == 'href': if self.serialize_href(val, item): continue elif attr == 'src': href = urlnormalize(item.abshref(val)) if href in self.images: index = self.images[href] self.used_images.add(href) buf.write(b'recindex="%05d"' % index) continue buf.write(attr.encode('utf-8')) buf.write(b'="') self.serialize_text(val, quot=True) buf.write(b'"') buf.write(b'>') if elem.text or len(elem) > 0: if elem.text: self.anchor_offset = None self.serialize_text(elem.text) for child in elem: self.serialize_elem(child, item) if child.tail: self.anchor_offset = None self.serialize_text(child.tail) buf.write(('</%s>' % tag).encode('utf-8')) def serialize_text(self, text, quot=False): text = text.replace('&', '&amp;') text = text.replace('<', '&lt;') text = text.replace('>', '&gt;') text = text.replace('\u00AD', '') # Soft-hyphen if quot: text = text.replace('"', '&quot;') if isinstance(text, str): text = unicodedata.normalize('NFC', text) self.buf.write(text.encode('utf-8')) def fixup_links(self): ''' Fill in the correct values for all filepos="..." links with the offsets of the linked to content (as stored in id_offsets). ''' buf = self.buf id_offsets = self.id_offsets start_href = getattr(self, '_start_href', None) for href, hoffs in self.href_offsets.items(): is_start = (href and href == start_href) # Iterate over all filepos items if href not in id_offsets: self.logger.warn('Hyperlink target %r not found' % href) # Link to the top of the document, better than just ignoring href, _ = urldefrag(href) if href in self.id_offsets: ioff = self.id_offsets[href] if is_start: self.start_offset = ioff for hoff in hoffs: buf.seek(hoff) buf.write(('%010d' % ioff).encode('utf-8'))
15,005
Python
.py
344
31.47093
106
0.551122
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,465
indexer.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer2/indexer.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import io import numbers from collections import OrderedDict, defaultdict from struct import pack from calibre.ebooks.mobi.utils import CNCX as CNCX_ from calibre.ebooks.mobi.utils import RECORD_SIZE, align_block, encint, encode_number_as_hex, encode_tbs from polyglot.builtins import iteritems, itervalues class CNCX(CNCX_): # {{{ def __init__(self, toc, is_periodical): strings = [] for item in toc.iterdescendants(breadth_first=True): strings.append(item.title) if is_periodical: strings.append(item.klass) if item.author: strings.append(item.author) if item.description: strings.append(item.description) CNCX_.__init__(self, strings) # }}} class TAGX: # {{{ BITMASKS = {11:0b1} BITMASKS.update({x:(1 << i) for i, x in enumerate([1, 2, 3, 4, 5, 21, 22, 23])}) BITMASKS.update({x:(1 << i) for i, x in enumerate([69, 70, 71, 72, 73])}) NUM_VALUES = defaultdict(lambda :1) NUM_VALUES[11] = 3 NUM_VALUES[0] = 0 def __init__(self): self.byts = bytearray() def add_tag(self, tag): buf = self.byts buf.append(tag) buf.append(self.NUM_VALUES[tag]) # bitmask buf.append(self.BITMASKS[tag] if tag else 0) # eof buf.append(0 if tag else 1) def header(self, control_byte_count): header = b'TAGX' # table length, control byte count header += pack(b'>II', 12+len(self.byts), control_byte_count) return header @property def periodical(self): ''' TAGX block for the Primary index header of a periodical ''' for i in (1, 2, 3, 4, 5, 21, 22, 23, 0, 69, 70, 71, 72,73, 0): self.add_tag(i) return self.header(2) + bytes(self.byts) @property def secondary(self): ''' TAGX block for the secondary index header of a periodical ''' for i in (11, 0): self.add_tag(i) return self.header(1) + bytes(self.byts) @property def flat_book(self): ''' TAGX block for the primary index header of a flat book ''' for i in (1, 2, 3, 4, 0): self.add_tag(i) return self.header(1) + bytes(self.byts) # }}} # Index Entries {{{ class IndexEntry: TAG_VALUES = { 'offset': 1, 'size': 2, 'label_offset': 3, 'depth': 4, 'class_offset': 5, 'secondary': 11, 'parent_index': 21, 'first_child_index': 22, 'last_child_index': 23, 'image_index': 69, 'desc_offset': 70, 'author_offset': 71, } RTAG_MAP = {v:k for k, v in iteritems(TAG_VALUES)} # noqa def __init__(self, offset, label_offset): self.offset, self.label_offset = offset, label_offset self.depth, self.class_offset = 0, None self.control_byte_count = 1 self.length = 0 self.index = 0 self.parent_index = None self.first_child_index = None self.last_child_index = None self.image_index = None self.author_offset = None self.desc_offset = None def __repr__(self): return ('IndexEntry(offset=%r, depth=%r, length=%r, index=%r,' ' parent_index=%r)')%(self.offset, self.depth, self.length, self.index, self.parent_index) @property def size(self): return self.length @size.setter def size(self, val): self.length = val @property def next_offset(self): return self.offset + self.length @property def tag_nums(self): yield from range(1, 5) for attr in ('class_offset', 'parent_index', 'first_child_index', 'last_child_index'): if getattr(self, attr) is not None: yield self.TAG_VALUES[attr] @property def entry_type(self): ans = 0 for tag in self.tag_nums: ans |= TAGX.BITMASKS[tag] return ans def attr_for_tag(self, tag): return self.RTAG_MAP[tag] @property def bytestring(self): buf = io.BytesIO() if isinstance(self.index, numbers.Integral): buf.write(encode_number_as_hex(self.index)) else: raw = bytearray(self.index.encode('ascii')) raw.insert(0, len(raw)) buf.write(bytes(raw)) et = self.entry_type buf.write(bytes(bytearray([et]))) if self.control_byte_count == 2: flags = 0 for attr in ('image_index', 'desc_offset', 'author_offset'): val = getattr(self, attr) if val is not None: tag = self.TAG_VALUES[attr] bm = TAGX.BITMASKS[tag] flags |= bm buf.write(bytes(bytearray([flags]))) for tag in self.tag_nums: attr = self.attr_for_tag(tag) val = getattr(self, attr) if isinstance(val, numbers.Integral): val = [val] for x in val: buf.write(encint(x)) if self.control_byte_count == 2: for attr in ('image_index', 'desc_offset', 'author_offset'): val = getattr(self, attr) if val is not None: buf.write(encint(val)) ans = buf.getvalue() return ans class PeriodicalIndexEntry(IndexEntry): def __init__(self, offset, label_offset, class_offset, depth): IndexEntry.__init__(self, offset, label_offset) self.depth = depth self.class_offset = class_offset self.control_byte_count = 2 class SecondaryIndexEntry(IndexEntry): INDEX_MAP = {'author':73, 'caption':72, 'credit':71, 'description':70, 'mastheadImage':69} def __init__(self, index): IndexEntry.__init__(self, 0, 0) self.index = index tag = self.INDEX_MAP[index] # The values for this index entry # I dont know what the 5 means, it is not the number of entries self.secondary = [5 if tag == min( itervalues(self.INDEX_MAP)) else 0, 0, tag] @property def tag_nums(self): yield 11 @property def entry_type(self): return 1 @classmethod def entries(cls): rmap = {v:k for k,v in iteritems(cls.INDEX_MAP)} for tag in sorted(rmap, reverse=True): yield cls(rmap[tag]) # }}} class TBS: # {{{ ''' Take the list of index nodes starting/ending on a record and calculate the trailing byte sequence for the record. ''' def __init__(self, data, is_periodical, first=False, section_map={}, after_first=False): self.section_map = section_map if is_periodical: # The starting bytes. # The value is zero which I think indicates the periodical # index entry. The values for the various flags seem to be # unused. If the 0b100 is present, it means that the record # deals with section 1 (or is the final record with section # transitions). self.type_010 = encode_tbs(0, {0b010: 0}, flag_size=3) self.type_011 = encode_tbs(0, {0b010: 0, 0b001: 0}, flag_size=3) self.type_110 = encode_tbs(0, {0b100: 2, 0b010: 0}, flag_size=3) self.type_111 = encode_tbs(0, {0b100: 2, 0b010: 0, 0b001: 0}, flag_size=3) if not data: byts = b'' if after_first: # This can happen if a record contains only text between # the periodical start and the first section byts = self.type_011 self.bytestring = byts else: depth_map = defaultdict(list) for x in ('starts', 'ends', 'completes'): for idx in data[x]: depth_map[idx.depth].append(idx) for l in itervalues(depth_map): l.sort(key=lambda x:x.offset) self.periodical_tbs(data, first, depth_map) else: if not data: self.bytestring = b'' else: self.book_tbs(data, first) def periodical_tbs(self, data, first, depth_map): buf = io.BytesIO() has_section_start = (depth_map[1] and set(depth_map[1]).intersection(set(data['starts']))) spanner = data['spans'] parent_section_index = -1 if depth_map[0]: # We have a terminal record # Find the first non periodical node first_node = None for nodes in (depth_map[1], depth_map[2]): for node in nodes: if (first_node is None or (node.offset, node.depth) < (first_node.offset, first_node.depth)): first_node = node typ = (self.type_110 if has_section_start else self.type_010) # parent_section_index is needed for the last record if first_node is not None and first_node.depth > 0: parent_section_index = (first_node.index if first_node.depth == 1 else first_node.parent_index) else: parent_section_index = max(iter(self.section_map)) else: # Non terminal record if spanner is not None: # record is spanned by a single article parent_section_index = spanner.parent_index typ = (self.type_110 if parent_section_index == 1 else self.type_010) elif not depth_map[1]: # has only article nodes, i.e. spanned by a section parent_section_index = depth_map[2][0].parent_index typ = (self.type_111 if parent_section_index == 1 else self.type_010) else: # has section transitions if depth_map[2]: parent_section_index = depth_map[2][0].parent_index else: parent_section_index = depth_map[1][0].index typ = self.type_011 buf.write(typ) if typ not in (self.type_110, self.type_111) and parent_section_index > 0: extra = {} # Write starting section information if spanner is None: num_articles = len([a for a in depth_map[1] if a.parent_index == parent_section_index]) if not depth_map[1]: extra = {0b0001: 0} if num_articles > 1: extra = {0b0100: num_articles} buf.write(encode_tbs(parent_section_index, extra)) if spanner is None: articles = depth_map[2] sections = {self.section_map[a.parent_index] for a in articles} sections = sorted(sections, key=lambda x:x.offset) section_map = {s:[a for a in articles if a.parent_index == s.index] for s in sections} for i, section in enumerate(sections): # All the articles in this record that belong to section articles = section_map[section] first_article = articles[0] last_article = articles[-1] num = len(articles) last_article_ends = (last_article in data['ends'] or last_article in data['completes']) try: next_sec = sections[i+1] except: next_sec = None extra = {} if num > 1: extra[0b0100] = num if False and i == 0 and next_sec is not None: # Write offset to next section from start of record # I can't figure out exactly when Kindlegen decides to # write this so I have disabled it for now. extra[0b0001] = next_sec.offset - data['offset'] buf.write(encode_tbs(first_article.index-section.index, extra)) if next_sec is not None: buf.write(encode_tbs(last_article.index-next_sec.index, {0b1000: 0})) # If a section TOC starts and extends into the next record add # a trailing vwi. We detect this by TBS type==3, processing last # section present in the record, and the last article in that # section either ends or completes and doesn't finish # on the last byte of the record. elif (typ == self.type_011 and last_article_ends and ((last_article.offset+last_article.size) % RECORD_SIZE > 0) ): buf.write(encode_tbs(last_article.index-section.index-1, {0b1000: 0})) else: buf.write(encode_tbs(spanner.index - parent_section_index, {0b0001: 0})) self.bytestring = buf.getvalue() def book_tbs(self, data, first): spanner = data['spans'] if spanner is not None: self.bytestring = encode_tbs(spanner.index, {0b010: 0, 0b001: 0}, flag_size=3) else: starts, completes, ends = (data['starts'], data['completes'], data['ends']) if (not completes and ( (len(starts) == 1 and not ends) or (len(ends) == 1 and not starts))): node = starts[0] if starts else ends[0] self.bytestring = encode_tbs(node.index, {0b010: 0}, flag_size=3) else: nodes = [] for x in (starts, completes, ends): nodes.extend(x) nodes.sort(key=lambda x:x.index) self.bytestring = encode_tbs(nodes[0].index, {0b010:0, 0b100: len(nodes)}, flag_size=3) # }}} class Indexer: # {{{ def __init__(self, serializer, number_of_text_records, size_of_last_text_record, masthead_offset, is_periodical, opts, oeb): self.serializer = serializer self.number_of_text_records = number_of_text_records self.text_size = (RECORD_SIZE * (self.number_of_text_records-1) + size_of_last_text_record) self.masthead_offset = masthead_offset self.secondary_record_offset = None self.oeb = oeb self.log = oeb.log self.opts = opts self.is_periodical = is_periodical if self.is_periodical and self.masthead_offset is None: raise ValueError('Periodicals must have a masthead') self.log('Generating MOBI index for a %s'%('periodical' if self.is_periodical else 'book')) self.is_flat_periodical = False if self.is_periodical: periodical_node = next(iter(oeb.toc)) sections = tuple(periodical_node) self.is_flat_periodical = len(sections) == 1 self.records = [] if self.is_periodical: # Ensure all articles have an author and description before # creating the CNCX for node in oeb.toc.iterdescendants(): if node.klass == 'article': aut, desc = node.author, node.description if not aut: aut = _('Unknown') if not desc: desc = _('No details available') node.author, node.description = aut, desc self.cncx = CNCX(oeb.toc, self.is_periodical) if self.is_periodical: self.indices = self.create_periodical_index() else: self.indices = self.create_book_index() if not self.indices: raise ValueError('No valid entries in TOC, cannot generate index') self.records.append(self.create_index_record()) self.records.insert(0, self.create_header()) self.records.extend(self.cncx.records) if is_periodical: self.secondary_record_offset = len(self.records) self.records.append(self.create_header(secondary=True)) self.records.append(self.create_index_record(secondary=True)) self.calculate_trailing_byte_sequences() def create_index_record(self, secondary=False): # {{{ header_length = 192 buf = io.BytesIO() indices = list(SecondaryIndexEntry.entries()) if secondary else self.indices # Write index entries offsets = [] for i in indices: offsets.append(buf.tell()) buf.write(i.bytestring) index_block = align_block(buf.getvalue()) # Write offsets to index entries as an IDXT block idxt_block = b'IDXT' buf.seek(0), buf.truncate(0) for offset in offsets: buf.write(pack(b'>H', header_length+offset)) idxt_block = align_block(idxt_block + buf.getvalue()) body = index_block + idxt_block header = b'INDX' buf.seek(0), buf.truncate(0) buf.write(pack(b'>I', header_length)) buf.write(b'\0'*4) # Unknown buf.write(pack(b'>I', 1)) # Header type? Or index record number? buf.write(b'\0'*4) # Unknown # IDXT block offset buf.write(pack(b'>I', header_length + len(index_block))) # Number of index entries buf.write(pack(b'>I', len(offsets))) # Unknown buf.write(b'\xff'*8) # Unknown buf.write(b'\0'*156) header += buf.getvalue() ans = header + body if len(ans) > 0x10000: raise ValueError('Too many entries (%d) in the TOC'%len(offsets)) return ans # }}} def create_header(self, secondary=False): # {{{ buf = io.BytesIO() if secondary: tagx_block = TAGX().secondary else: tagx_block = (TAGX().periodical if self.is_periodical else TAGX().flat_book) header_length = 192 # Ident 0 - 4 buf.write(b'INDX') # Header length 4 - 8 buf.write(pack(b'>I', header_length)) # Unknown 8-16 buf.write(b'\0'*8) # Index type: 0 - normal, 2 - inflection 16 - 20 buf.write(pack(b'>I', 2)) # IDXT offset 20-24 buf.write(pack(b'>I', 0)) # Filled in later # Number of index records 24-28 buf.write(pack(b'>I', 1 if secondary else len(self.records))) # Index Encoding 28-32 buf.write(pack(b'>I', 65001)) # utf-8 # Unknown 32-36 buf.write(b'\xff'*4) # Number of index entries 36-40 indices = list(SecondaryIndexEntry.entries()) if secondary else self.indices buf.write(pack(b'>I', len(indices))) # ORDT offset 40-44 buf.write(pack(b'>I', 0)) # LIGT offset 44-48 buf.write(pack(b'>I', 0)) # Number of LIGT entries 48-52 buf.write(pack(b'>I', 0)) # Number of CNCX records 52-56 buf.write(pack(b'>I', 0 if secondary else len(self.cncx.records))) # Unknown 56-180 buf.write(b'\0'*124) # TAGX offset 180-184 buf.write(pack(b'>I', header_length)) # Unknown 184-192 buf.write(b'\0'*8) # TAGX block buf.write(tagx_block) num = len(indices) # The index of the last entry in the NCX idx = indices[-1].index if isinstance(idx, numbers.Integral): idx = encode_number_as_hex(idx) else: idx = idx.encode('ascii') idx = (bytes(bytearray([len(idx)]))) + idx buf.write(idx) # The number of entries in the NCX buf.write(pack(b'>H', num)) # Padding pad = (4 - (buf.tell()%4))%4 if pad: buf.write(b'\0'*pad) idxt_offset = buf.tell() buf.write(b'IDXT') buf.write(pack(b'>H', header_length + len(tagx_block))) buf.write(b'\0') buf.seek(20) buf.write(pack(b'>I', idxt_offset)) return align_block(buf.getvalue()) # }}} def create_book_index(self): # {{{ indices = [] seen = set() id_offsets = self.serializer.id_offsets # Flatten toc so that chapter to chapter jumps work with all sub # chapter levels as well for node in self.oeb.toc.iterdescendants(): try: offset = id_offsets[node.href] label = self.cncx[node.title] except: self.log.warn('TOC item %s [%s] not found in document'%( node.title, node.href)) continue if offset in seen: continue seen.add(offset) indices.append(IndexEntry(offset, label)) indices.sort(key=lambda x:x.offset) # Set lengths for i, index in enumerate(indices): try: next_offset = indices[i+1].offset except: next_offset = self.serializer.body_end_offset index.length = next_offset - index.offset # Remove empty indices indices = [x for x in indices if x.length > 0] # Reset lengths in case any were removed for i, index in enumerate(indices): try: next_offset = indices[i+1].offset except: next_offset = self.serializer.body_end_offset index.length = next_offset - index.offset # Set index values for index, x in enumerate(indices): x.index = index return indices # }}} def create_periodical_index(self): # {{{ periodical_node = next(iter(self.oeb.toc)) periodical_node_offset = self.serializer.body_start_offset periodical_node_size = (self.serializer.body_end_offset - periodical_node_offset) normalized_sections = [] id_offsets = self.serializer.id_offsets periodical = PeriodicalIndexEntry(periodical_node_offset, self.cncx[periodical_node.title], self.cncx[periodical_node.klass], 0) periodical.length = periodical_node_size periodical.first_child_index = 1 periodical.image_index = self.masthead_offset seen_sec_offsets = set() seen_art_offsets = set() for sec in periodical_node: normalized_articles = [] try: offset = id_offsets[sec.href] label = self.cncx[sec.title] klass = self.cncx[sec.klass] except: continue if offset in seen_sec_offsets: continue seen_sec_offsets.add(offset) section = PeriodicalIndexEntry(offset, label, klass, 1) section.parent_index = 0 for art in sec: try: offset = id_offsets[art.href] label = self.cncx[art.title] klass = self.cncx[art.klass] except: continue if offset in seen_art_offsets: continue seen_art_offsets.add(offset) article = PeriodicalIndexEntry(offset, label, klass, 2) normalized_articles.append(article) article.author_offset = self.cncx[art.author] article.desc_offset = self.cncx[art.description] if getattr(art, 'toc_thumbnail', None) is not None: try: ii = self.serializer.images[art.toc_thumbnail] - 1 if ii > -1: article.image_index = ii except KeyError: pass # Image not found in serializer if normalized_articles: normalized_articles.sort(key=lambda x:x.offset) normalized_sections.append((section, normalized_articles)) normalized_sections.sort(key=lambda x:x[0].offset) # Set lengths for s, x in enumerate(normalized_sections): sec, normalized_articles = x try: sec.length = normalized_sections[s+1][0].offset - sec.offset except: sec.length = self.serializer.body_end_offset - sec.offset for i, art in enumerate(normalized_articles): try: art.length = normalized_articles[i+1].offset - art.offset except: art.length = sec.offset + sec.length - art.offset # Filter for i, x in list(enumerate(normalized_sections)): sec, normalized_articles = x normalized_articles = list(filter(lambda x: x.length > 0, normalized_articles)) normalized_sections[i] = (sec, normalized_articles) normalized_sections = list(filter(lambda x: x[0].length > 0 and x[1], normalized_sections)) # Set indices i = 0 for sec, articles in normalized_sections: i += 1 sec.index = i sec.parent_index = 0 for sec, articles in normalized_sections: for art in articles: i += 1 art.index = i art.parent_index = sec.index for sec, normalized_articles in normalized_sections: sec.first_child_index = normalized_articles[0].index sec.last_child_index = normalized_articles[-1].index # Set lengths again to close up any gaps left by filtering for s, x in enumerate(normalized_sections): sec, articles = x try: next_offset = normalized_sections[s+1][0].offset except: next_offset = self.serializer.body_end_offset sec.length = next_offset - sec.offset for a, art in enumerate(articles): try: next_offset = articles[a+1].offset except: next_offset = sec.next_offset art.length = next_offset - art.offset # Sanity check for s, x in enumerate(normalized_sections): sec, articles = x try: next_sec = normalized_sections[s+1][0] except: if (sec.length == 0 or sec.next_offset != self.serializer.body_end_offset): raise ValueError('Invalid section layout') else: if next_sec.offset != sec.next_offset or sec.length == 0: raise ValueError('Invalid section layout') for a, art in enumerate(articles): try: next_art = articles[a+1] except: if (art.length == 0 or art.next_offset != sec.next_offset): raise ValueError('Invalid article layout') else: if art.length == 0 or art.next_offset != next_art.offset: raise ValueError('Invalid article layout') # Flatten indices = [periodical] for sec, articles in normalized_sections: indices.append(sec) periodical.last_child_index = sec.index for sec, articles in normalized_sections: for a in articles: indices.append(a) return indices # }}} # TBS {{{ def calculate_trailing_byte_sequences(self): self.tbs_map = {} found_node = False sections = [i for i in self.indices if i.depth == 1] section_map = OrderedDict((i.index, i) for i in sorted(sections, key=lambda x:x.offset)) deepest = max(i.depth for i in self.indices) for i in range(self.number_of_text_records): offset = i * RECORD_SIZE next_offset = offset + RECORD_SIZE data = {'ends':[], 'completes':[], 'starts':[], 'spans':None, 'offset':offset, 'record_number':i+1} for index in self.indices: if index.offset >= next_offset: # Node starts after current record if index.depth == deepest: break else: continue if index.next_offset <= offset: # Node ends before current record continue if index.offset >= offset: # Node starts in current record if index.next_offset <= next_offset: # Node ends in current record data['completes'].append(index) else: data['starts'].append(index) else: # Node starts before current records if index.next_offset <= next_offset: # Node ends in current record data['ends'].append(index) elif index.depth == deepest: data['spans'] = index if (data['ends'] or data['completes'] or data['starts'] or data['spans'] is not None): self.tbs_map[i+1] = TBS(data, self.is_periodical, first=not found_node, section_map=section_map) found_node = True else: self.tbs_map[i+1] = TBS({}, self.is_periodical, first=False, after_first=found_node, section_map=section_map) def get_trailing_byte_sequence(self, num): return self.tbs_map[num].bytestring # }}} # }}}
30,384
Python
.py
725
29.177931
111
0.537653
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,466
resources.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer2/resources.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os from io import BytesIO from PIL import Image, ImageOps from calibre.ebooks import generate_masthead from calibre.ebooks.mobi import MAX_THUMB_DIMEN, MAX_THUMB_SIZE from calibre.ebooks.mobi.utils import mobify_image, rescale_image, write_font_record from calibre.ebooks.oeb.base import OEB_RASTER_IMAGES from calibre.ptempfile import PersistentTemporaryFile from calibre.utils.imghdr import what from polyglot.builtins import iteritems PLACEHOLDER_GIF = b'GIF89a\x01\x00\x01\x00\xf0\x00\x00\x00\x00\x00\xff\xff\xff!\xf9\x04\x01\x00\x00\x00\x00!\xfe calibre-placeholder-gif-for-azw3\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;' # noqa def process_jpegs_for_amazon(data: bytes) -> bytes: img = Image.open(BytesIO(data)) if img.format == 'JPEG': # Amazon's MOBI renderer can't render JPEG images without JFIF metadata # and images with EXIF data dont get displayed on the cover screen changed = not img.info has_exif = False if hasattr(img, 'getexif'): exif = img.getexif() has_exif = bool(exif) if exif.get(0x0112) in (2,3,4,5,6,7,8): changed = True img = ImageOps.exif_transpose(img) if changed or has_exif: out = BytesIO() img.save(out, 'JPEG') data = out.getvalue() return data class Resources: def __init__(self, oeb, opts, is_periodical, add_fonts=False, process_images=True): self.oeb, self.log, self.opts = oeb, oeb.log, opts self.is_periodical = is_periodical self.process_images = process_images self.item_map = {} self.records = [] self.mime_map = {} self.masthead_offset = 0 self.used_image_indices = set() self.image_indices = set() self.cover_offset = self.thumbnail_offset = None self.has_fonts = False self.add_resources(add_fonts) def process_image(self, data): if not self.process_images: return process_jpegs_for_amazon(data) func = mobify_image if self.opts.mobi_keep_original_images else rescale_image try: return process_jpegs_for_amazon(func(data)) except Exception: if 'png' != what(None, data): raise with PersistentTemporaryFile(suffix='.png') as pt: pt.write(data) try: from calibre.utils.img import optimize_png optimize_png(pt.name) data = open(pt.name, 'rb').read() finally: os.remove(pt.name) return func(data) def add_resources(self, add_fonts): oeb = self.oeb oeb.logger.info('Serializing resources...') index = 1 mh_href = None if 'masthead' in oeb.guide and oeb.guide['masthead'].href: mh_href = oeb.guide['masthead'].href self.records.append(None) index += 1 self.used_image_indices.add(0) self.image_indices.add(0) elif self.is_periodical: # Generate a default masthead data = generate_masthead(str(self.oeb.metadata['title'][0])) self.records.append(data) self.used_image_indices.add(0) self.image_indices.add(0) index += 1 cover_href = self.cover_offset = self.thumbnail_offset = None if (oeb.metadata.cover and str(oeb.metadata.cover[0]) in oeb.manifest.ids): cover_id = str(oeb.metadata.cover[0]) item = oeb.manifest.ids[cover_id] cover_href = item.href for item in self.oeb.manifest.values(): if item.media_type not in OEB_RASTER_IMAGES: continue if item.media_type.lower() == 'image/webp': self.convert_webp(item) try: data = self.process_image(item.data) except Exception: self.log.warn('Bad image file %r' % item.href) continue else: if mh_href and item.href == mh_href: self.records[0] = data continue self.image_indices.add(len(self.records)) self.records.append(data) self.item_map[item.href] = index self.mime_map[item.href] = 'image/%s'%what(None, data) index += 1 if cover_href and item.href == cover_href: self.cover_offset = self.item_map[item.href] - 1 self.used_image_indices.add(self.cover_offset) try: tdata = rescale_image(data, dimen=MAX_THUMB_DIMEN, maxsizeb=MAX_THUMB_SIZE) except: self.log.warn('Failed to generate thumbnail') else: self.image_indices.add(len(self.records)) self.records.append(tdata) self.thumbnail_offset = index - 1 self.used_image_indices.add(self.thumbnail_offset) index += 1 finally: item.unload_data_from_memory() if add_fonts: for item in self.oeb.manifest.values(): if item.href and item.href.rpartition('.')[-1].lower() in { 'ttf', 'otf'} and isinstance(item.data, bytes): self.records.append(write_font_record(item.data)) self.item_map[item.href] = len(self.records) self.has_fonts = True def convert_webp(self, item): from calibre.utils.img import image_and_format_from_data, image_to_data img, fmt = image_and_format_from_data(item.data) if fmt == 'webp' and not img.isNull(): self.log.info(f'Converting WebP image {item.href} to PNG') item.data = image_to_data(img, fmt='PNG') item.media_type = 'image/png' def add_extra_images(self): ''' Add any images that were created after the call to add_resources() ''' for item in self.oeb.manifest.values(): if (item.media_type not in OEB_RASTER_IMAGES or item.href in self.item_map): continue try: data = self.process_image(item.data) except: self.log.warn('Bad image file %r' % item.href) else: self.records.append(data) self.item_map[item.href] = len(self.records) finally: item.unload_data_from_memory() def serialize(self, records, used_images): used_image_indices = self.used_image_indices | { v-1 for k, v in iteritems(self.item_map) if k in used_images} for i in self.image_indices-used_image_indices: self.records[i] = PLACEHOLDER_GIF records.extend(self.records) def __bool__(self): return bool(self.records) __nonzero__ = __bool__
7,256
Python
.py
163
32.392638
213
0.57343
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,467
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer2/__init__.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' UNCOMPRESSED = 1 PALMDOC = 2 HUFFDIC = 17480 PALM_MAX_IMAGE_SIZE = 63 * 1024
226
Python
.py
8
26.625
58
0.704225
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,468
main.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer2/main.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import io import random import time from struct import pack from calibre.ebooks import normalize from calibre.ebooks.compression.palmdoc import compress_doc from calibre.ebooks.mobi.langcodes import iana2mobi from calibre.ebooks.mobi.utils import RECORD_SIZE, align_block, create_text_record, detect_periodical, encint, encode_trailing_data from calibre.ebooks.mobi.writer2 import PALMDOC, UNCOMPRESSED from calibre.ebooks.mobi.writer2.indexer import Indexer from calibre.ebooks.mobi.writer2.serializer import Serializer from calibre.utils.filenames import ascii_filename from polyglot.builtins import iteritems # Disabled as I dont care about uncrossable breaks WRITE_UNCROSSABLE_BREAKS = False NULL_INDEX = 0xffffffff FLIS = (b'FLIS\0\0\0\x08\0\x41\0\0\0\0\0\0\xff\xff\xff\xff\0\x01\0\x03\0\0\0\x03\0\0\0\x01'+ b'\xff'*4) def fcis(text_length): fcis = b'FCIS\x00\x00\x00\x14\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00' fcis += pack(b'>I', text_length) fcis += b'\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x08\x00\x01\x00\x01\x00\x00\x00\x00' return fcis class MobiWriter: def __init__(self, opts, resources, kf8, write_page_breaks_after_item=True): self.opts = opts self.resources = resources self.kf8 = kf8 self.for_joint = kf8 is not None self.write_page_breaks_after_item = write_page_breaks_after_item self.compression = UNCOMPRESSED if opts.dont_compress else PALMDOC self.prefer_author_sort = opts.prefer_author_sort self.last_text_record_idx = 1 def __call__(self, oeb, path_or_stream): self.log = oeb.log pt = None if oeb.metadata.publication_type: x = str(oeb.metadata.publication_type[0]).split(':') if len(x) > 1: pt = x[1].lower() self.publication_type = pt if hasattr(path_or_stream, 'write'): return self.dump_stream(oeb, path_or_stream) with open(path_or_stream, 'w+b') as stream: return self.dump_stream(oeb, stream) def write(self, *args): for datum in args: self.stream.write(datum) def tell(self): return self.stream.tell() def dump_stream(self, oeb, stream): self.oeb = oeb self.stream = stream self.records = [None] self.generate_content() self.generate_joint_record0() if self.for_joint else self.generate_record0() self.write_header() self.write_content() def generate_content(self): self.is_periodical = detect_periodical(self.oeb.toc, self.oeb.log) # Image records are stored in their own list, they are merged into the # main record list at the end self.generate_images() self.generate_text() # The uncrossable breaks trailing entries come before the indexing # trailing entries self.write_uncrossable_breaks() # Index records come after text records self.generate_index() # Indexing {{{ def generate_index(self): self.primary_index_record_idx = None if self.oeb.toc.count() < 1: self.log.warn('No TOC, MOBI index not generated') return try: self.indexer = Indexer(self.serializer, self.last_text_record_idx, len(self.records[self.last_text_record_idx]), self.masthead_offset, self.is_periodical, self.opts, self.oeb) except: self.log.exception('Failed to generate MOBI index:') else: self.primary_index_record_idx = len(self.records) for i in range(self.last_text_record_idx + 1): if i == 0: continue tbs = self.indexer.get_trailing_byte_sequence(i) self.records[i] += encode_trailing_data(tbs) self.records.extend(self.indexer.records) # }}} def write_uncrossable_breaks(self): # {{{ ''' Write information about uncrossable breaks (non linear items in the spine. ''' if not WRITE_UNCROSSABLE_BREAKS: return breaks = self.serializer.breaks for i in range(1, self.last_text_record_idx+1): offset = i * RECORD_SIZE pbreak = 0 running = offset buf = io.BytesIO() while breaks and (breaks[0] - offset) < RECORD_SIZE: pbreak = (breaks.pop(0) - running) >> 3 encoded = encint(pbreak) buf.write(encoded) running += pbreak << 3 encoded = encode_trailing_data(buf.getvalue()) self.records[i] += encoded # }}} # Images {{{ def generate_images(self): resources = self.resources image_records = resources.records self.image_map = resources.item_map self.masthead_offset = resources.masthead_offset self.cover_offset = resources.cover_offset self.thumbnail_offset = resources.thumbnail_offset if image_records and image_records[0] is None: raise ValueError('Failed to find masthead image in manifest') # }}} def generate_text(self): # {{{ self.oeb.logger.info('Serializing markup content...') self.serializer = Serializer(self.oeb, self.image_map, self.is_periodical, write_page_breaks_after_item=self.write_page_breaks_after_item) text = self.serializer() self.text_length = len(text) text = io.BytesIO(text) nrecords = 0 records_size = 0 if self.compression != UNCOMPRESSED: self.oeb.logger.info(' Compressing markup content...') while text.tell() < self.text_length: data, overlap = create_text_record(text) if self.compression == PALMDOC: data = compress_doc(data) data += overlap data += pack(b'>B', len(overlap)) self.records.append(data) records_size += len(data) nrecords += 1 self.last_text_record_idx = nrecords self.first_non_text_record_idx = nrecords + 1 # Pad so that the next records starts at a 4 byte boundary if records_size % 4 != 0: self.records.append(b'\x00'*(records_size % 4)) self.first_non_text_record_idx += 1 # }}} def generate_record0(self): # MOBI header {{{ metadata = self.oeb.metadata bt = 0x002 if self.primary_index_record_idx is not None: if False and self.indexer.is_flat_periodical: # Disabled as setting this to 0x102 causes the Kindle to not # auto archive the issues bt = 0x102 elif self.indexer.is_periodical: # If you change this, remember to change the cdetype in the EXTH # header as well bt = 0x103 if self.indexer.is_flat_periodical else 0x101 from calibre.ebooks.mobi.writer8.exth import build_exth exth = build_exth(metadata, prefer_author_sort=self.opts.prefer_author_sort, is_periodical=self.is_periodical, share_not_sync=self.opts.share_not_sync, cover_offset=self.cover_offset, thumbnail_offset=self.thumbnail_offset, start_offset=self.serializer.start_offset, mobi_doctype=bt ) first_image_record = None if self.resources: used_images = self.serializer.used_images first_image_record = len(self.records) self.resources.serialize(self.records, used_images) last_content_record = len(self.records) - 1 # FCIS/FLIS (Seems to serve no purpose) flis_number = len(self.records) self.records.append(FLIS) fcis_number = len(self.records) self.records.append(fcis(self.text_length)) # EOF record self.records.append(b'\xE9\x8E\x0D\x0A') record0 = io.BytesIO() # The MOBI Header record0.write(pack(b'>HHIHHHH', self.compression, # compression type # compression type 0, # Unused self.text_length, # Text length self.last_text_record_idx, # Number of text records or last tr idx RECORD_SIZE, # Text record size 0, # Unused 0 # Unused )) # 0 - 15 (0x0 - 0xf) uid = random.randint(0, 0xffffffff) title = normalize(str(metadata.title[0])).encode('utf-8') # 0x0 - 0x3 record0.write(b'MOBI') # 0x4 - 0x7 : Length of header # 0x8 - 0x11 : MOBI type # type meaning # 0x002 MOBI book (chapter - chapter navigation) # 0x101 News - Hierarchical navigation with sections and articles # 0x102 News feed - Flat navigation # 0x103 News magazine - same as 0x101 # 0xC - 0xF : Text encoding (65001 is utf-8) # 0x10 - 0x13 : UID # 0x14 - 0x17 : Generator version record0.write(pack(b'>IIIII', 0xe8, bt, 65001, uid, 6)) # 0x18 - 0x1f : Unknown record0.write(b'\xff' * 8) # 0x20 - 0x23 : Secondary index record sir = 0xffffffff if (self.primary_index_record_idx is not None and self.indexer.secondary_record_offset is not None): sir = (self.primary_index_record_idx + self.indexer.secondary_record_offset) record0.write(pack(b'>I', sir)) # 0x24 - 0x3f : Unknown record0.write(b'\xff' * 28) # 0x40 - 0x43 : Offset of first non-text record record0.write(pack(b'>I', self.first_non_text_record_idx)) # 0x44 - 0x4b : title offset, title length record0.write(pack(b'>II', 0xe8 + 16 + len(exth), len(title))) # 0x4c - 0x4f : Language specifier record0.write(iana2mobi( str(metadata.language[0]))) # 0x50 - 0x57 : Input language and Output language record0.write(b'\0' * 8) # 0x58 - 0x5b : Format version # 0x5c - 0x5f : First image record number record0.write(pack(b'>II', 6, first_image_record if first_image_record else len(self.records))) # 0x60 - 0x63 : First HUFF/CDIC record number # 0x64 - 0x67 : Number of HUFF/CDIC records # 0x68 - 0x6b : First DATP record number # 0x6c - 0x6f : Number of DATP records record0.write(b'\0' * 16) # 0x70 - 0x73 : EXTH flags # Bit 6 (0b1000000) being set indicates the presence of an EXTH header # Bit 12 being set indicates the presence of embedded fonts # The purpose of the other bits is unknown exth_flags = 0b1010000 if self.is_periodical: exth_flags |= 0b1000 if self.resources.has_fonts: exth_flags |= 0b1000000000000 record0.write(pack(b'>I', exth_flags)) # 0x74 - 0x93 : Unknown record0.write(b'\0' * 32) # 0x94 - 0x97 : DRM offset # 0x98 - 0x9b : DRM count # 0x9c - 0x9f : DRM size # 0xa0 - 0xa3 : DRM flags record0.write(pack(b'>IIII', 0xffffffff, 0xffffffff, 0, 0)) # 0xa4 - 0xaf : Unknown record0.write(b'\0'*12) # 0xb0 - 0xb1 : First content record number # 0xb2 - 0xb3 : last content record number # (Includes Image, DATP, HUFF, DRM) record0.write(pack(b'>HH', 1, last_content_record)) # 0xb4 - 0xb7 : Unknown record0.write(b'\0\0\0\x01') # 0xb8 - 0xbb : FCIS record number record0.write(pack(b'>I', fcis_number)) # 0xbc - 0xbf : Unknown (FCIS record count?) record0.write(pack(b'>I', 1)) # 0xc0 - 0xc3 : FLIS record number record0.write(pack(b'>I', flis_number)) # 0xc4 - 0xc7 : Unknown (FLIS record count?) record0.write(pack(b'>I', 1)) # 0xc8 - 0xcf : Unknown record0.write(b'\0'*8) # 0xd0 - 0xdf : Unknown record0.write(pack(b'>IIII', 0xffffffff, 0, 0xffffffff, 0xffffffff)) # 0xe0 - 0xe3 : Extra record data # Extra record data flags: # - 0b1 : <extra multibyte bytes><size> # - 0b10 : <TBS indexing description of this HTML record><size> # - 0b100: <uncrossable breaks><size> # Setting bit 2 (0x2) disables <guide><reference type="start"> functionality extra_data_flags = 0b1 # Has multibyte overlap bytes if self.primary_index_record_idx is not None: extra_data_flags |= 0b10 if WRITE_UNCROSSABLE_BREAKS: extra_data_flags |= 0b100 record0.write(pack(b'>I', extra_data_flags)) # 0xe4 - 0xe7 : Primary index record record0.write(pack(b'>I', 0xffffffff if self.primary_index_record_idx is None else self.primary_index_record_idx)) record0.write(exth) record0.write(title) record0 = record0.getvalue() # Add some buffer so that Amazon can add encryption information if this # MOBI is submitted for publication record0 += (b'\0' * (1024*8)) self.records[0] = align_block(record0) # }}} def generate_joint_record0(self): # {{{ from calibre.ebooks.mobi.writer8.exth import build_exth from calibre.ebooks.mobi.writer8.mobi import HEADER_FIELDS, MOBIHeader # Insert resource records first_image_record = None old = len(self.records) if self.resources: used_images = self.serializer.used_images | self.kf8.used_images first_image_record = len(self.records) self.resources.serialize(self.records, used_images) resource_record_count = len(self.records) - old last_content_record = len(self.records) - 1 # FCIS/FLIS (Seems to serve no purpose) flis_number = len(self.records) self.records.append(FLIS) fcis_number = len(self.records) self.records.append(fcis(self.text_length)) # Insert KF8 records self.records.append(b'BOUNDARY') kf8_header_index = len(self.records) self.kf8.start_offset = (self.serializer.start_offset, self.kf8.start_offset) self.records.append(self.kf8.record0) self.records.extend(self.kf8.records[1:]) first_image_record = (first_image_record if first_image_record else len(self.records)) header_fields = {k:getattr(self.kf8, k) for k in HEADER_FIELDS} # Now change the header fields that need to be different in the MOBI 6 # header header_fields['first_resource_record'] = first_image_record ef = 0b100001010000 # Kinglegen uses this if self.resources.has_fonts: ef |= 0b1000000000000 header_fields['exth_flags'] = ef header_fields['fdst_record'] = pack(b'>HH', 1, last_content_record) header_fields['fdst_count'] = 1 # Why not 0? Kindlegen uses 1 header_fields['flis_record'] = flis_number header_fields['fcis_record'] = fcis_number header_fields['text_length'] = self.text_length extra_data_flags = 0b1 # Has multibyte overlap bytes if self.primary_index_record_idx is not None: extra_data_flags |= 0b10 header_fields['extra_data_flags'] = extra_data_flags for k, v in iteritems({'last_text_record':'last_text_record_idx', 'first_non_text_record':'first_non_text_record_idx', 'ncx_index':'primary_index_record_idx', }): header_fields[k] = getattr(self, v) if header_fields['ncx_index'] is None: header_fields['ncx_index'] = NULL_INDEX for x in ('skel', 'chunk', 'guide'): header_fields[x+'_index'] = NULL_INDEX # Create the MOBI 6 EXTH opts = self.opts kuc = 0 if resource_record_count > 0 else None header_fields['exth'] = build_exth(self.oeb.metadata, prefer_author_sort=opts.prefer_author_sort, is_periodical=opts.mobi_periodical, share_not_sync=opts.share_not_sync, cover_offset=self.cover_offset, thumbnail_offset=self.thumbnail_offset, num_of_resources=resource_record_count, kf8_unknown_count=kuc, be_kindlegen2=True, kf8_header_index=kf8_header_index, start_offset=self.serializer.start_offset, mobi_doctype=2) self.records[0] = MOBIHeader(file_version=6)(**header_fields) # }}} def write_header(self): # PalmDB header {{{ ''' Write the PalmDB header ''' title = ascii_filename(str(self.oeb.metadata.title[0])).replace( ' ', '_') if not isinstance(title, bytes): title = title.encode('ascii') title = title[:31] title = title + (b'\0' * (32 - len(title))) now = int(time.time()) nrecords = len(self.records) self.write(title, pack(b'>HHIIIIII', 0, 0, now, now, 0, 0, 0, 0), b'BOOK', b'MOBI', pack(b'>IIH', (2*nrecords)-1, 0, nrecords)) offset = self.tell() + (8 * nrecords) + 2 for i, record in enumerate(self.records): self.write(pack(b'>I', offset), b'\0', pack(b'>I', 2*i)[1:]) offset += len(record) self.write(b'\0\0') # }}} def write_content(self): for record in self.records: self.write(record)
17,868
Python
.py
399
34.649123
131
0.600955
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,469
headers.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/reader/headers.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os import re import struct from calibre import replace_entities from calibre.ebooks.metadata import MetaInformation, check_isbn from calibre.ebooks.mobi import MobiError from calibre.ebooks.mobi.langcodes import main_language, mobi2iana, sub_language from calibre.utils.cleantext import clean_ascii_chars, clean_xml_chars from calibre.utils.config_base import tweaks from calibre.utils.date import parse_date from calibre.utils.localization import canonicalize_lang NULL_INDEX = 0xffffffff def uniq(vals): ''' Remove all duplicates from vals, while preserving order. ''' vals = vals or () seen = set() seen_add = seen.add return list(x for x in vals if x not in seen and not seen_add(x)) class EXTHHeader: # {{{ def __init__(self, raw, codec, title): self.doctype = raw[:4] self.length, self.num_items = struct.unpack('>LL', raw[4:12]) raw = raw[12:] pos = 0 self.mi = MetaInformation(_('Unknown'), [_('Unknown')]) self.has_fake_cover = True self.start_offset = None left = self.num_items self.kf8_header = None self.uuid = self.cdetype = None self.page_progression_direction = None self.primary_writing_mode = None self.decode = lambda x : clean_ascii_chars(x.decode(codec, 'replace')) while left > 0: left -= 1 idx, size = struct.unpack('>LL', raw[pos:pos + 8]) content = raw[pos + 8:pos + size] pos += size if idx >= 100 and idx < 200: self.process_metadata(idx, content, codec) elif idx == 203: self.has_fake_cover = bool(struct.unpack('>L', content)[0]) elif idx == 201: co, = struct.unpack('>L', content) if co < NULL_INDEX: self.cover_offset = co elif idx == 202: self.thumbnail_offset, = struct.unpack('>L', content) elif idx == 501: try: self.cdetype = content.decode('ascii') except UnicodeDecodeError: self.cdetype = None # cdetype if content == b'EBSP': if not self.mi.tags: self.mi.tags = [] self.mi.tags.append(_('Sample Book')) elif idx == 502: # last update time pass elif idx == 503: # Long title # Amazon seems to regard this as the definitive book title # rather than the title from the PDB header. In fact when # sending MOBI files through Amazon's email service if the # title contains non ASCII chars or non filename safe chars # they are messed up in the PDB header try: title = self.decode(content) except Exception: pass elif idx == 524: # Lang code try: lang = content.decode(codec) lang = canonicalize_lang(lang) if lang: self.mi.language = lang except Exception: pass elif idx == 525: try: pwm = content.decode(codec) if pwm: self.primary_writing_mode = pwm except Exception: pass elif idx == 527: try: ppd = content.decode(codec) if ppd: self.page_progression_direction = ppd except Exception: pass # else: # print 'unknown record', idx, repr(content) if title: self.mi.title = replace_entities(clean_xml_chars(clean_ascii_chars(title))) def process_metadata(self, idx, content, codec): if idx == 100: if self.mi.is_null('authors'): self.mi.authors = [] au = clean_xml_chars(self.decode(content).strip()) # Author names in Amazon MOBI files are usually in LN, FN format, # try to detect and auto-correct that. m = re.match(r'([^,]+?)\s*,\s+([^,]+)$', au.strip()) if m is not None: if tweaks['author_sort_copy_method'] != 'copy': self.mi.authors.append(m.group(2) + ' ' + m.group(1)) else: self.mi.authors.append(m.group()) if self.mi.is_null('author_sort'): self.mi.author_sort = m.group() else: self.mi.author_sort += ' & ' + m.group() else: self.mi.authors.append(au) elif idx == 101: self.mi.publisher = clean_xml_chars(self.decode(content).strip()) if self.mi.publisher in {'Unknown', _('Unknown')}: self.mi.publisher = None elif idx == 103: self.mi.comments = clean_xml_chars(self.decode(content).strip()) elif idx == 104: raw = check_isbn(self.decode(content).strip().replace('-', '')) if raw: self.mi.isbn = raw elif idx == 105: if not self.mi.tags: self.mi.tags = [] self.mi.tags.extend([x.strip() for x in clean_xml_chars(self.decode(content)).split(';')]) self.mi.tags = uniq(self.mi.tags) elif idx == 106: try: self.mi.pubdate = parse_date(self.decode(content), as_utc=False) except Exception: pass elif idx == 108: self.mi.book_producer = clean_xml_chars(self.decode(content).strip()) elif idx == 109: self.mi.rights = clean_xml_chars(self.decode(content).strip()) elif idx == 112: # dc:source set in some EBSP amazon samples try: content = content.decode(codec).strip() isig = 'urn:isbn:' if content.lower().startswith(isig): raw = check_isbn(content[len(isig):]) if raw and not self.mi.isbn: self.mi.isbn = raw elif content.startswith('calibre:'): # calibre book uuid is stored here by recent calibre # releases cid = content[len('calibre:'):] if cid: self.mi.application_id = self.mi.uuid = cid except: pass elif idx == 113: # ASIN or other id try: self.uuid = content.decode('ascii') self.mi.set_identifier('mobi-asin', self.uuid) except Exception: self.uuid = None elif idx == 116: self.start_offset, = struct.unpack(b'>L', content) elif idx == 121: self.kf8_header, = struct.unpack(b'>L', content) if self.kf8_header == NULL_INDEX: self.kf8_header = None # else: # print 'unhandled metadata record', idx, repr(content) # }}} class BookHeader: def __init__(self, raw, ident, user_encoding, log, try_extra_data_fix=False): self.log = log self.compression_type = raw[:2] self.records, self.records_size = struct.unpack('>HH', raw[8:12]) self.encryption_type, = struct.unpack('>H', raw[12:14]) if ident == b'TEXTREAD': self.codepage = 1252 if len(raw) <= 16: self.codec = 'cp1252' self.extra_flags = 0 self.title = _('Unknown') self.language = 'ENGLISH' self.sublanguage = 'NEUTRAL' self.exth_flag, self.exth = 0, None self.ancient = True self.first_image_index = -1 self.mobi_version = 1 else: self.ancient = False self.doctype = raw[16:20] self.length, self.type, self.codepage, self.unique_id, \ self.version = struct.unpack('>LLLLL', raw[20:40]) try: self.codec = { 1252: 'cp1252', 65001: 'utf-8', }[self.codepage] except (IndexError, KeyError): self.codec = 'cp1252' if not user_encoding else user_encoding log.warn('Unknown codepage %d. Assuming %s' % (self.codepage, self.codec)) # Some KF8 files have header length == 264 (generated by kindlegen # 2.9?). See https://bugs.launchpad.net/bugs/1179144 max_header_length = 500 # We choose 500 for future versions of kindlegen if (ident == b'TEXTREAD' or self.length < 0xE4 or self.length > max_header_length or (try_extra_data_fix and self.length == 0xE4)): self.extra_flags = 0 else: self.extra_flags, = struct.unpack('>H', raw[0xF2:0xF4]) if self.compression_type == b'DH': self.huff_offset, self.huff_number = struct.unpack('>LL', raw[0x70:0x78]) toff, tlen = struct.unpack('>II', raw[0x54:0x5c]) tend = toff + tlen self.title = raw[toff:tend] if tend < len(raw) else _('Unknown') langcode = struct.unpack('!L', raw[0x5C:0x60])[0] langid = langcode & 0xFF sublangid = (langcode >> 10) & 0xFF self.language = main_language.get(langid, 'ENGLISH') self.sublanguage = sub_language.get(sublangid, 'NEUTRAL') self.mobi_version = struct.unpack('>I', raw[0x68:0x6c])[0] self.first_image_index = struct.unpack('>L', raw[0x6c:0x6c + 4])[0] self.exth_flag, = struct.unpack('>L', raw[0x80:0x84]) self.exth = None if not isinstance(self.title, str): self.title = self.title.decode(self.codec, 'replace') if self.exth_flag & 0x40: try: self.exth = EXTHHeader(raw[16 + self.length:], self.codec, self.title) self.exth.mi.uid = self.unique_id if self.exth.mi.is_null('language'): try: self.exth.mi.language = mobi2iana(langid, sublangid) except: self.log.exception('Unknown language code') except: self.log.exception('Invalid EXTH header') self.exth_flag = 0 self.ncxidx = NULL_INDEX if len(raw) >= 0xF8: self.ncxidx, = struct.unpack_from(b'>L', raw, 0xF4) # Ancient PRC files from Baen can have random values for # mobi_version, so be conservative if self.mobi_version == 8 and len(raw) >= (0xF8 + 16): self.dividx, self.skelidx, self.datpidx, self.othidx = \ struct.unpack_from(b'>4L', raw, 0xF8) # need to use the FDST record to find out how to properly # unpack the raw_ml into pieces it is simply a table of start # and end locations for each flow piece self.fdstidx, self.fdstcnt = struct.unpack_from(b'>2L', raw, 0xC0) # if cnt is 1 or less, fdst section number can be garbage if self.fdstcnt <= 1: self.fdstidx = NULL_INDEX else: # Null values self.skelidx = self.dividx = self.othidx = self.fdstidx = \ NULL_INDEX class MetadataHeader(BookHeader): def __init__(self, stream, log): self.stream = stream self.ident = self.identity() self.num_sections = self.section_count() if self.num_sections >= 2: header = self.header() BookHeader.__init__(self, header, self.ident, None, log) else: self.exth = None @property def kf8_type(self): if (self.mobi_version == 8 and getattr(self, 'skelidx', NULL_INDEX) != NULL_INDEX): return 'standalone' kf8_header_index = getattr(self.exth, 'kf8_header', None) if kf8_header_index is None: return None try: if self.section_data(kf8_header_index-1) == b'BOUNDARY': return 'joint' except Exception: pass return None def identity(self): self.stream.seek(60) ident = self.stream.read(8).upper() if ident not in (b'BOOKMOBI', b'TEXTREAD'): raise MobiError('Unknown book type: %s' % ident) return ident def section_count(self): self.stream.seek(76) return struct.unpack('>H', self.stream.read(2))[0] def section_offset(self, number): self.stream.seek(78 + number * 8) return struct.unpack('>LBBBB', self.stream.read(8))[0] def header(self): section_headers = [] # First section with the metadata section_headers.append(self.section_offset(0)) # Second section used to get the length of the first section_headers.append(self.section_offset(1)) end_off = section_headers[1] off = section_headers[0] self.stream.seek(off) return self.stream.read(end_off - off) def section_data(self, number): start = self.section_offset(number) if number == self.num_sections -1: end = os.stat(self.stream.name).st_size else: end = self.section_offset(number + 1) self.stream.seek(start) try: return self.stream.read(end - start) except OverflowError: self.stream.seek(start) return self.stream.read()
14,165
Python
.py
322
30.596273
102
0.526796
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,470
ncx.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/reader/ncx.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os from calibre import replace_entities from calibre.ebooks.metadata.toc import TOC from calibre.ebooks.mobi.reader.headers import NULL_INDEX from calibre.ebooks.mobi.reader.index import read_index from polyglot.builtins import iteritems tag_fieldname_map = { 1: ['pos',0], 2: ['len',0], 3: ['noffs',0], 4: ['hlvl',0], 5: ['koffs',0], 6: ['pos_fid',0], 21: ['parent',0], 22: ['child1',0], 23: ['childn',0], 69: ['image_index',0], 70 : ['desc_offset', 0], # 'Description offset in cncx' 71 : ['author_offset', 0], # 'Author offset in cncx' 72 : ['image_caption_offset', 0], # 'Image caption offset in cncx', 73 : ['image_attr_offset', 0], # 'Image attribution offset in cncx', } default_entry = { 'pos': -1, 'len': 0, 'noffs': -1, 'text' : "Unknown Text", 'hlvl' : -1, 'kind' : "Unknown Class", 'pos_fid' : None, 'parent' : -1, 'child1' : -1, 'childn' : -1, 'description': None, 'author': None, 'image_caption': None, 'image_attribution': None, } def read_ncx(sections, index, codec): index_entries = [] if index != NULL_INDEX: table, cncx = read_index(sections, index, codec) for num, x in enumerate(iteritems(table)): text, tag_map = x entry = default_entry.copy() entry['name'] = text entry['num'] = num for tag in tag_fieldname_map: fieldname, i = tag_fieldname_map[tag] if tag in tag_map: fieldvalue = tag_map[tag][i] if tag == 6: # Appears to be an idx into the KF8 elems table with an # offset fieldvalue = tuple(tag_map[tag]) entry[fieldname] = fieldvalue for which, name in iteritems({3:'text', 5:'kind', 70:'description', 71:'author', 72:'image_caption', 73:'image_attribution'}): if tag == which: entry[name] = cncx.get(fieldvalue, default_entry[name]) index_entries.append(entry) return index_entries def build_toc(index_entries): ans = TOC(base_path=os.getcwd()) levels = {x['hlvl'] for x in index_entries} num_map = {-1: ans} level_map = {l:[x for x in index_entries if x['hlvl'] == l] for l in levels} for lvl in sorted(levels): for item in level_map[lvl]: parent = num_map[item['parent']] child = parent.add_item(item['href'], item['idtag'], replace_entities(item['text'], encoding=None)) num_map[item['num']] = child # Set play orders in depth first order for i, item in enumerate(ans.flat()): item.play_order = i return ans
3,357
Python
.py
84
27.619048
87
0.496314
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,471
mobi8.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/reader/mobi8.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os import re import struct from collections import namedtuple from itertools import repeat from uuid import uuid4 from lxml import etree from calibre.ebooks.metadata.opf2 import Guide, OPFCreator from calibre.ebooks.metadata.toc import TOC from calibre.ebooks.mobi.reader.containers import Container, find_imgtype from calibre.ebooks.mobi.reader.headers import NULL_INDEX from calibre.ebooks.mobi.reader.index import read_index from calibre.ebooks.mobi.reader.markup import expand_mobi8_markup from calibre.ebooks.mobi.reader.ncx import build_toc, read_ncx from calibre.ebooks.mobi.utils import read_font_record from calibre.ebooks.oeb.base import XHTML, XPath, xml2text from calibre.ebooks.oeb.parse_utils import parse_html from polyglot.builtins import as_unicode from polyglot.urllib import urldefrag Part = namedtuple('Part', 'num type filename start end aid') Elem = namedtuple('Elem', 'insert_pos toc_text file_number sequence_number start_pos ' 'length') FlowInfo = namedtuple('FlowInfo', 'type format dir fname') # locate beginning and ending positions of tag with specific aid attribute def locate_beg_end_of_tag(ml, aid): pattern = br'''<[^>]*\said\s*=\s*['"]%s['"][^>]*>''' % aid aid_pattern = re.compile(pattern, re.IGNORECASE) for m in re.finditer(aid_pattern, ml): plt = m.start() pgt = ml.find(b'>', plt+1) return plt, pgt return 0, 0 def reverse_tag_iter(block): ''' Iterate over all tags in block in reverse order, i.e. last tag to first tag. ''' end = len(block) while True: pgt = block.rfind(b'>', 0, end) if pgt == -1: break plt = block.rfind(b'<', 0, pgt) if plt == -1: break yield block[plt:pgt+1] end = plt def get_first_resource_index(first_image_index, num_of_text_records, first_text_record_number): first_resource_index = first_image_index if first_resource_index in {-1, NULL_INDEX}: first_resource_index = num_of_text_records + first_text_record_number return first_resource_index class Mobi8Reader: def __init__(self, mobi6_reader, log, for_tweak=False): self.for_tweak = for_tweak self.mobi6_reader, self.log = mobi6_reader, log self.header = mobi6_reader.book_header self.encrypted_fonts = [] self.id_re = re.compile(br'''<[^>]+\s(?:id|ID)\s*=\s*['"]([^'"]+)['"]''') self.name_re = re.compile(br'''<\s*a\s*\s(?:name|NAME)\s*=\s*['"]([^'"]+)['"]''') self.aid_re = re.compile(br'''<[^>]+\s(?:aid|AID)\s*=\s*['"]([^'"]+)['"]''') def __call__(self): self.mobi6_reader.check_for_drm() self.aid_anchor_suffix = uuid4().hex.encode('utf-8') bh = self.mobi6_reader.book_header if self.mobi6_reader.kf8_type == 'joint': offset = self.mobi6_reader.kf8_boundary + 2 self.resource_offsets = [ (get_first_resource_index(bh.first_image_index, bh.mobi6_records, 1), offset - 2), (get_first_resource_index(bh.kf8_first_image_index, bh.records, offset), len(self.mobi6_reader.sections)), ] else: offset = 1 self.resource_offsets = [(get_first_resource_index(bh.first_image_index, bh.records, offset), len(self.mobi6_reader.sections))] self.processed_records = self.mobi6_reader.extract_text(offset=offset) self.raw_ml = self.mobi6_reader.mobi_html with open('debug-raw.html', 'wb') as f: f.write(self.raw_ml) self.kf8_sections = self.mobi6_reader.sections[offset-1:] self.cover_offset = getattr(self.header.exth, 'cover_offset', None) self.linked_aids = set() self.read_indices() self.build_parts() guide = self.create_guide() ncx = self.create_ncx() resource_map = self.extract_resources(self.mobi6_reader.sections) spine = self.expand_text(resource_map) return self.write_opf(guide, ncx, spine, resource_map) def read_indices(self): self.flow_table = () if self.header.fdstidx != NULL_INDEX: header = self.kf8_sections[self.header.fdstidx][0] if header[:4] != b'FDST': raise ValueError('KF8 does not have a valid FDST record') sec_start, num_sections = struct.unpack_from(b'>LL', header, 4) secs = struct.unpack_from(b'>%dL' % (num_sections*2), header, sec_start) self.flow_table = tuple(zip(secs[::2], secs[1::2])) self.files = [] if self.header.skelidx != NULL_INDEX: table = read_index(self.kf8_sections, self.header.skelidx, self.header.codec)[0] File = namedtuple('File', 'file_number name divtbl_count start_position length') for i, text in enumerate(table): tag_map = table[text] self.files.append(File(i, text, tag_map[1][0], tag_map[6][0], tag_map[6][1])) self.elems = [] if self.header.dividx != NULL_INDEX: table, cncx = read_index(self.kf8_sections, self.header.dividx, self.header.codec) for i, text in enumerate(table): tag_map = table[text] toc_text = cncx[tag_map[2][0]] self.elems.append(Elem(int(text), toc_text, tag_map[3][0], tag_map[4][0], tag_map[6][0], tag_map[6][1])) self.guide = [] if self.header.othidx != NULL_INDEX: table, cncx = read_index(self.kf8_sections, self.header.othidx, self.header.codec) Item = namedtuple('Item', 'type title pos_fid') for i, ref_type in enumerate(table): tag_map = table[ref_type] # ref_type, ref_title, div/frag number title = cncx[tag_map[1][0]] fileno = None if 3 in list(tag_map.keys()): fileno = tag_map[3][0] if 6 in list(tag_map.keys()): fileno = tag_map[6] if isinstance(ref_type, bytes): ref_type = ref_type.decode(self.header.codec) self.guide.append(Item(ref_type, title, fileno)) def build_parts(self): raw_ml = self.mobi6_reader.mobi_html self.flows = [] self.flowinfo = [] ft = self.flow_table if self.flow_table else [(0, len(raw_ml))] # now split the raw_ml into its flow pieces for start, end in ft: self.flows.append(raw_ml[start:end]) # the first piece represents the xhtml text text = self.flows[0] self.flows[0] = b'' # walk the <skeleton> and <div> tables to build original source xhtml # files *without* destroying any file position information needed for # later href processing and create final list of file separation start: # stop points and etc in partinfo self.parts = [] self.partinfo = [] divptr = 0 baseptr = 0 for skelnum, skelname, divcnt, skelpos, skellen in self.files: baseptr = skelpos + skellen skeleton = text[skelpos:baseptr] inspos_warned = False for i in range(divcnt): insertpos, idtext, filenum, seqnum, startpos, length = \ self.elems[divptr] if i == 0: aidtext = idtext[12:-2] filename = 'part%04d.html' % filenum part = text[baseptr:baseptr + length] insertpos = insertpos - skelpos head = skeleton[:insertpos] tail = skeleton[insertpos:] if (tail.find(b'>') < tail.find(b'<') or head.rfind(b'>') < head.rfind(b'<')): # There is an incomplete tag in either the head or tail. # This can happen for some badly formed KF8 files, see for # example, https://bugs.launchpad.net/bugs/1082669 if not inspos_warned: self.log.warn( 'The div table for %s has incorrect insert ' 'positions. Calculating manually.'%skelname) inspos_warned = True bp, ep = locate_beg_end_of_tag(skeleton, aidtext if isinstance(aidtext, bytes) else aidtext.encode('utf-8')) if bp != ep: insertpos = ep + 1 + startpos skeleton = skeleton[0:insertpos] + part + skeleton[insertpos:] baseptr = baseptr + length divptr += 1 self.parts.append(skeleton) if divcnt < 1: # Empty file aidtext = str(uuid4()) filename = aidtext + '.html' self.partinfo.append(Part(skelnum, 'text', filename, skelpos, baseptr, aidtext)) # The primary css style sheet is typically stored next followed by any # snippets of code that were previously inlined in the # original xhtml but have been stripped out and placed here. # This can include local CDATA snippets and svg sections. # The problem is that for most browsers and ereaders, you can not # use <img src="imageXXXX.svg" /> to import any svg image that itself # properly uses an <image/> tag to import some raster image - it # should work according to the spec but does not for almost all browsers # and ereaders and causes epub validation issues because those raster # images are in manifest but not in xhtml text - since they only # referenced from an svg image # So we need to check the remaining flow pieces to see if they are css # or svg images. if svg images, we must check if they have an <image/> # and if so inline them into the xhtml text pieces. # there may be other sorts of pieces stored here but until we see one # in the wild to reverse engineer we won't be able to tell self.flowinfo.append(FlowInfo(None, None, None, None)) svg_tag_pattern = re.compile(br'''(<svg[^>]*>)''', re.IGNORECASE) image_tag_pattern = re.compile(br'''(<(?:svg:)?image[^>]*>)''', re.IGNORECASE) for j in range(1, len(self.flows)): flowpart = self.flows[j] nstr = '%04d' % j m = svg_tag_pattern.search(flowpart) if m is not None: # svg typ = 'svg' start = m.start() # strip off anything before <svg if inlining from_svg = flowpart[start:] m2 = image_tag_pattern.search(from_svg) if m2 is not None: format = 'inline' dir = None fname = None flowpart = from_svg else: format = 'file' dir = "images" fname = 'svgimg' + nstr + '.svg' else: # search for CDATA and if exists inline it if flowpart.find(b'[CDATA[') >= 0: typ = 'css' flowpart = b'<style type="text/css">\n' + flowpart + b'\n</style>\n' format = 'inline' dir = None fname = None else: # css - assume as standalone css file typ = 'css' format = 'file' dir = "styles" fname = nstr + '.css' self.flows[j] = flowpart self.flowinfo.append(FlowInfo(typ, format, dir, fname)) def get_file_info(self, pos): ''' Get information about the part (file) that exists at pos in the raw markup ''' for part in self.partinfo: if pos >= part.start and pos < part.end: return part return Part(*repeat(None, len(Part._fields))) def get_id_tag_by_pos_fid(self, posfid, offset): # first convert kindle:pos:fid and offset info to position in file insertpos, idtext, filenum, seqnm, startpos, length = self.elems[posfid] pos = insertpos + offset fi = self.get_file_info(pos) # an existing "id=" must exist in original xhtml otherwise it would not # have worked for linking. Amazon seems to have added its own # additional "aid=" inside tags whose contents seem to represent some # position information encoded into Base32 name. # so find the closest "id=" before position the file by actually # searching in that file idtext = self.get_id_tag(pos) return '%s/%s'%(fi.type, fi.filename), idtext def get_id_tag(self, pos): # Find the first tag with a named anchor (name or id attribute) before # pos fi = self.get_file_info(pos) if fi.num is None and fi.start is None: raise ValueError('No file contains pos: %d'%pos) textblock = self.parts[fi.num] npos = pos - fi.start pgt = textblock.find(b'>', npos) plt = textblock.find(b'<', npos) # if npos inside a tag then search all text before the its end of tag marker # else not in a tag need to search the preceding tag if plt == npos or pgt < plt: npos = pgt + 1 textblock = textblock[0:npos] for tag in reverse_tag_iter(textblock): m = self.id_re.match(tag) or self.name_re.match(tag) if m is not None: return m.group(1) # For some files, kindlegen apparently creates links to tags # without HTML anchors, using the AID instead. See # See https://www.mobileread.com/forums/showthread.php?t=259557 m = self.aid_re.match(tag) if m is not None: self.linked_aids.add(m.group(1).decode('utf-8')) return m.group(1) + b'-' + self.aid_anchor_suffix # No tag found, link to start of file return b'' def create_guide(self): guide = Guide() has_start = False for ref_type, ref_title, pos_fid in self.guide: try: if len(pos_fid) != 2: continue except TypeError: continue # thumbnailstandard record, ignore it linktgt, idtext = self.get_id_tag_by_pos_fid(*pos_fid) if idtext: if isinstance(idtext, bytes): idtext = idtext.decode(self.header.codec) linktgt += '#' + idtext g = Guide.Reference(linktgt, os.getcwd()) g.title, g.type = ref_title, ref_type if g.title == 'start' or g.type == 'text': has_start = True guide.append(g) so = self.header.exth.start_offset if so not in {None, NULL_INDEX} and not has_start: fi = self.get_file_info(so) if fi.filename is not None: idtext = self.get_id_tag(so).decode(self.header.codec) linktgt = fi.filename if idtext: linktgt += '#' + idtext g = Guide.Reference('%s/%s'%(fi.type, linktgt), os.getcwd()) g.title, g.type = 'start', 'text' guide.append(g) return guide def create_ncx(self): index_entries = read_ncx(self.kf8_sections, self.header.ncxidx, self.header.codec) remove = [] # Add href and anchor info to the index entries for entry in index_entries: pos_fid = entry['pos_fid'] if pos_fid is None: pos = entry['pos'] fi = self.get_file_info(pos) if fi.filename is None: raise ValueError('Index entry has invalid pos: %d'%pos) idtag = self.get_id_tag(pos) href = '%s/%s'%(fi.type, fi.filename) else: try: href, idtag = self.get_id_tag_by_pos_fid(*pos_fid) except ValueError: self.log.warn('Invalid entry in NCX (title: %s), ignoring' %entry['text']) remove.append(entry) continue entry['href'] = href entry['idtag'] = as_unicode(idtag, self.header.codec or 'utf-8') for e in remove: index_entries.remove(e) # Build the TOC object return build_toc(index_entries) def extract_resources(self, sections): from calibre.ebooks.mobi.writer2.resources import PLACEHOLDER_GIF resource_map = [] container = None for x in ('fonts', 'images'): os.mkdir(x) for start, end in self.resource_offsets: for i, sec in enumerate(sections[start:end]): fname_idx = i+1 data = sec[0] typ = data[:4] href = None if typ in {b'FLIS', b'FCIS', b'SRCS', b'\xe9\x8e\r\n', b'BOUN', b'FDST', b'DATP', b'AUDI', b'VIDE', b'RESC', b'CMET', b'PAGE'}: pass # Ignore these records elif typ == b'FONT': font = read_font_record(data) href = "fonts/%05d.%s" % (fname_idx, font['ext']) if font['err']: self.log.warn('Reading font record %d failed: %s'%( fname_idx, font['err'])) if font['headers']: self.log.debug('Font record headers: %s'%font['headers']) with open(href.replace('/', os.sep), 'wb') as f: f.write(font['font_data'] if font['font_data'] else font['raw_data']) if font['encrypted']: self.encrypted_fonts.append(href) elif typ == b'CONT': if data == b'CONTBOUNDARY': container = None continue container = Container(data) elif typ == b'CRES': data, imgtype = container.load_image(data) if data is not None: href = 'images/%05d.%s'%(container.resource_index, imgtype) with open(href.replace('/', os.sep), 'wb') as f: f.write(data) elif typ == b'\xa0\xa0\xa0\xa0' and len(data) == 4 and container is not None: container.resource_index += 1 elif container is None: if not (len(data) == len(PLACEHOLDER_GIF) and data == PLACEHOLDER_GIF): imgtype = find_imgtype(data) href = 'images/%05d.%s'%(fname_idx, imgtype) with open(href.replace('/', os.sep), 'wb') as f: f.write(data) resource_map.append(href) return resource_map def expand_text(self, resource_map): return expand_mobi8_markup(self, resource_map, self.log) def write_opf(self, guide, toc, spine, resource_map): mi = self.header.exth.mi if (self.cover_offset is not None and self.cover_offset < len(resource_map)): mi.cover = resource_map[self.cover_offset] if len(list(toc)) < 2: self.log.warn('KF8 has no metadata Table of Contents') for ref in guide: if ref.type == 'toc': href = ref.href() href, frag = urldefrag(href) if os.path.exists(href.replace('/', os.sep)): try: toc = self.read_inline_toc(href, frag) except: self.log.exception('Failed to read inline ToC') opf = OPFCreator(os.getcwd(), mi) opf.guide = guide def exclude(path): return os.path.basename(path) == 'debug-raw.html' # If there are no images then the azw3 input plugin dumps all # binary records as .unknown images, remove them if self.for_tweak and os.path.exists('images') and os.path.isdir('images'): files = os.listdir('images') unknown = [x for x in files if x.endswith('.unknown')] if len(files) == len(unknown): [os.remove('images/'+f) for f in files] if self.for_tweak: try: os.remove('debug-raw.html') except: pass opf.create_manifest_from_files_in([os.getcwd()], exclude=exclude) mime_map = { 'text/html': 'application/xhtml+xml', 'font/ttf': 'application/x-font-truetype', 'font/otf': 'application/vnd.ms-opentype', 'font/woff': 'application/font-woff', } for entry in opf.manifest: n = mime_map.get(entry.mime_type) if n is not None: entry.mime_type = n opf.create_spine(spine) opf.set_toc(toc) ppd = getattr(self.header.exth, 'page_progression_direction', None) if ppd in {'ltr', 'rtl', 'default'}: opf.page_progression_direction = ppd pwm = getattr(self.header.exth, 'primary_writing_mode', None) if pwm is not None: opf.primary_writing_mode = pwm with open('metadata.opf', 'wb') as of, open('toc.ncx', 'wb') as ncx: opf.render(of, ncx, 'toc.ncx') return 'metadata.opf' def read_inline_toc(self, href, frag): ans = TOC() base_href = '/'.join(href.split('/')[:-1]) with open(href.replace('/', os.sep), 'rb') as f: raw = f.read().decode(self.header.codec) root = parse_html(raw, log=self.log) body = XPath('//h:body')(root) reached = False if body: start = body[0] else: start = None reached = True if frag: elems = XPath('//*[@id="%s"]'%frag)(root) if elems: start = elems[0] def node_depth(elem): ans = 0 parent = elem.getparent() while parent is not None: parent = parent.getparent() ans += 1 return ans # Layer the ToC based on nesting order in the source HTML current_depth = None parent = ans seen = set() links = [] for elem in root.iterdescendants(etree.Element): if reached and elem.tag == XHTML('a') and elem.get('href', False): href = elem.get('href') href, frag = urldefrag(href) href = base_href + '/' + href text = xml2text(elem).strip() if (text, href, frag) in seen: continue seen.add((text, href, frag)) links.append((text, href, frag, node_depth(elem))) elif elem is start: reached = True depths = sorted({x[-1] for x in links}) depth_map = {x:i for i, x in enumerate(depths)} for text, href, frag, depth in links: depth = depth_map[depth] if current_depth is None: current_depth = 0 parent.add_item(href, frag, text) elif current_depth == depth: parent.add_item(href, frag, text) elif current_depth < depth: parent = parent[-1] if len(parent) > 0 else parent parent.add_item(href, frag, text) current_depth += 1 else: delta = current_depth - depth while delta > 0 and parent.parent is not None: parent = parent.parent delta -= 1 parent.add_item(href, frag, text) current_depth = depth return ans
24,537
Python
.py
527
33.358634
139
0.539101
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,472
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/reader/__init__.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en'
149
Python
.py
4
35
58
0.678571
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,473
index.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/reader/index.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import struct from collections import OrderedDict, namedtuple from calibre.ebooks.mobi.utils import count_set_bits, decint, decode_string from polyglot.builtins import iteritems TagX = namedtuple('TagX', 'tag num_of_values bitmask eof') PTagX = namedtuple('PTagX', 'tag value_count value_bytes num_of_values') INDEX_HEADER_FIELDS = ( 'len', 'nul1', 'type', 'gen', 'start', 'count', 'code', 'lng', 'total', 'ordt', 'ligt', 'nligt', 'ncncx' ) + tuple('unknown%d'%i for i in range(27)) + ('ocnt', 'oentries', 'ordt1', 'ordt2', 'tagx') class InvalidFile(ValueError): pass def check_signature(data, signature): if data[:len(signature)] != signature: raise InvalidFile('Not a valid %r section'%signature) class NotAnINDXRecord(InvalidFile): pass class NotATAGXSection(InvalidFile): pass def format_bytes(byts): byts = bytearray(byts) byts = [hex(b)[2:] for b in byts] return ' '.join(byts) def parse_indx_header(data): check_signature(data, b'INDX') words = INDEX_HEADER_FIELDS num = len(words) values = struct.unpack('>%dL' % num, data[4:4*(num+1)]) ans = dict(zip(words, values)) ans['idx_header_end_pos'] = 4 * (num+1) ordt1, ordt2 = ans['ordt1'], ans['ordt2'] ans['ordt1_raw'], ans['ordt2_raw'] = [], [] ans['ordt_map'] = '' if ordt1 > 0 and data[ordt1:ordt1+4] == b'ORDT': # I dont know what this is, but using it seems to be unnecessary, so # just leave it as the raw bytestring ans['ordt1_raw'] = data[ordt1+4:ordt1+4+ans['oentries']] if ordt2 > 0 and data[ordt2:ordt2+4] == b'ORDT': ans['ordt2_raw'] = raw = bytearray(data[ordt2+4:ordt2+4+2*ans['oentries']]) if ans['code'] == 65002: # This appears to be EBCDIC-UTF (65002) encoded. I can't be # bothered to write a decoder for this (see # http://www.unicode.org/reports/tr16/) Just how stupid is Amazon? # Instead, we use a weird hack that seems to do the trick for all # the books with this type of ORDT record that I have come across. # Some EBSP book samples in KF8 format from Amazon have this type # of encoding. # Basically we try to interpret every second byte as a printable # ascii character. If we cannot, we map to the ? char. parsed = bytearray(ans['oentries']) for i in range(0, 2*ans['oentries'], 2): parsed[i//2] = raw[i+1] if 0x20 < raw[i+1] < 0x7f else ord(b'?') ans['ordt_map'] = bytes(parsed).decode('ascii') else: ans['ordt_map'] = '?'*ans['oentries'] return ans class CNCX: # {{{ ''' Parses the records that contain the compiled NCX (all strings from the NCX). Presents a simple offset : string mapping interface to access the data. ''' def __init__(self, records, codec): self.records = OrderedDict() record_offset = 0 for raw in records: pos = 0 while pos < len(raw): length, consumed = decint(raw[pos:]) if length > 0: try: self.records[pos+record_offset] = raw[ pos+consumed:pos+consumed+length].decode(codec) except: byts = raw[pos:] r = format_bytes(byts) print('CNCX entry at offset %d has unknown format %s'%( pos+record_offset, r)) self.records[pos+record_offset] = r pos = len(raw) pos += consumed+length record_offset += 0x10000 def __getitem__(self, offset): return self.records.get(offset) def get(self, offset, default=None): return self.records.get(offset, default) def __bool__(self): return bool(self.records) __nonzero__ = __bool__ def iteritems(self): return iteritems(self.records) def items(self): return iteritems(self.records) # }}} def parse_tagx_section(data): check_signature(data, b'TAGX') tags = [] first_entry_offset, = struct.unpack_from(b'>L', data, 4) control_byte_count, = struct.unpack_from(b'>L', data, 8) for i in range(12, first_entry_offset, 4): vals = list(bytearray(data[i:i+4])) tags.append(TagX(*vals)) return control_byte_count, tags def get_tag_map(control_byte_count, tagx, data, strict=False): ptags = [] ans = {} control_bytes = list(bytearray(data[:control_byte_count])) data = data[control_byte_count:] for x in tagx: if x.eof == 0x01: control_bytes = control_bytes[1:] continue value = control_bytes[0] & x.bitmask if value != 0: value_count = value_bytes = None if value == x.bitmask: if count_set_bits(x.bitmask) > 1: # If all bits of masked value are set and the mask has more # than one bit, a variable width value will follow after # the control bytes which defines the length of bytes (NOT # the value count!) which will contain the corresponding # variable width values. value_bytes, consumed = decint(data) data = data[consumed:] else: value_count = 1 else: # Shift bits to get the masked value. mask = x.bitmask while mask & 0b1 == 0: mask >>= 1 value >>= 1 value_count = value ptags.append(PTagX(x.tag, value_count, value_bytes, x.num_of_values)) for x in ptags: values = [] if x.value_count is not None: # Read value_count * values_per_entry variable width values. for _ in range(x.value_count * x.num_of_values): byts, consumed = decint(data) data = data[consumed:] values.append(byts) else: # value_bytes is not None # Convert value_bytes to variable width values. total_consumed = 0 while total_consumed < x.value_bytes: # Does this work for values_per_entry != 1? byts, consumed = decint(data) data = data[consumed:] total_consumed += consumed values.append(byts) if total_consumed != x.value_bytes: err = ("Error: Should consume %s bytes, but consumed %s" % (x.value_bytes, total_consumed)) if strict: raise ValueError(err) else: print(err) ans[x.tag] = values # Test that all bytes have been processed if data.replace(b'\0', b''): err = ("Warning: There are unprocessed index bytes left: %s" % format_bytes(data)) if strict: raise ValueError(err) else: print(err) return ans def parse_index_record(table, data, control_byte_count, tags, codec, ordt_map, strict=False): header = parse_indx_header(data) idxt_pos = header['start'] if data[idxt_pos:idxt_pos+4] != b'IDXT': print('WARNING: Invalid INDX record') entry_count = header['count'] # loop through to build up the IDXT position starts idx_positions= [] for j in range(entry_count): pos, = struct.unpack_from(b'>H', data, idxt_pos + 4 + (2 * j)) idx_positions.append(pos) # The last entry ends before the IDXT tag (but there might be zero fill # bytes we need to ignore!) idx_positions.append(idxt_pos) # For each entry in the IDXT build up the tag map and any associated # text for j in range(entry_count): start, end = idx_positions[j:j+2] rec = data[start:end] # Sometimes (in the guide table if the type attribute has non ascii # values) the ident is UTF-16 encoded. Try to handle that. try: ident, consumed = decode_string(rec, codec=codec, ordt_map=ordt_map) except UnicodeDecodeError: ident, consumed = decode_string(rec, codec='utf-16', ordt_map=ordt_map) if '\x00' in ident: try: ident, consumed = decode_string(rec, codec='utf-16', ordt_map=ordt_map) except UnicodeDecodeError: ident = ident.replace('u\x00', '') rec = rec[consumed:] tag_map = get_tag_map(control_byte_count, tags, rec, strict=strict) table[ident] = tag_map return header def get_tag_section_start(data, indx_header): tag_section_start = indx_header['tagx'] if data[tag_section_start:tag_section_start + 4] != b'TAGX': tpos = data.find(b'TAGX', indx_header['idx_header_end_pos']) if tpos > -1: tag_section_start = tpos return tag_section_start def read_index(sections, idx, codec): table, cncx = OrderedDict(), CNCX([], codec) data = sections[idx][0] indx_header = parse_indx_header(data) indx_count = indx_header['count'] if indx_header['ncncx'] > 0: off = idx + indx_count + 1 cncx_records = [x[0] for x in sections[off:off+indx_header['ncncx']]] cncx = CNCX(cncx_records, codec) tag_section_start = get_tag_section_start(data, indx_header) control_byte_count, tags = parse_tagx_section(data[tag_section_start:]) for i in range(idx + 1, idx + 1 + indx_count): # Index record data = sections[i][0] parse_index_record(table, data, control_byte_count, tags, codec, indx_header['ordt_map']) return table, cncx
10,061
Python
.py
234
32.871795
83
0.576309
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,474
markup.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/reader/markup.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os import re from calibre.ebooks.chardet import strip_encoding_declarations def update_internal_links(mobi8_reader, log): # need to update all links that are internal which # are based on positions within the xhtml files **BEFORE** # cutting and pasting any pieces into the xhtml text files # kindle:pos:fid:XXXX:off:YYYYYYYYYY (used for internal link within xhtml) # XXXX is the offset in records into divtbl # YYYYYYYYYYYY is a base32 number you add to the divtbl insertpos to get final position mr = mobi8_reader # pos:fid pattern posfid_pattern = re.compile(br'''(<a.*?href=.*?>)''', re.IGNORECASE) posfid_index_pattern = re.compile(br'''['"]kindle:pos:fid:([0-9|A-V]+):off:([0-9|A-V]+).*?["']''') parts = [] for part in mr.parts: srcpieces = posfid_pattern.split(part) for j in range(1, len(srcpieces), 2): tag = srcpieces[j] if tag.startswith(b'<'): for m in posfid_index_pattern.finditer(tag): posfid = m.group(1) offset = m.group(2) try: filename, idtag = mr.get_id_tag_by_pos_fid( int(posfid, 32), int(offset, 32)) except ValueError: log.warn('Invalid link, points to nowhere, ignoring') replacement = b'#' else: suffix = (b'#' + idtag) if idtag else b'' replacement = filename.split('/')[-1].encode( mr.header.codec) + suffix replacement = replacement.replace(b'"', b'&quot;') tag = posfid_index_pattern.sub(b'"' + replacement + b'"', tag, 1) srcpieces[j] = tag raw = b''.join(srcpieces) try: parts.append(raw.decode(mr.header.codec)) except UnicodeDecodeError: log.warn('Failed to decode text in KF8 part, replacing bad bytes') parts.append(raw.decode(mr.header.codec, 'replace')) # All parts are now unicode and have no internal links return parts def remove_kindlegen_markup(parts, aid_anchor_suffix, linked_aids): # we can safely remove all of the Kindlegen generated aid attributes and # calibre generated cid attributes find_tag_with_aid_pattern = re.compile(r'''(<[^>]*\s[ac]id\s*=[^>]*>)''', re.IGNORECASE) within_tag_aid_position_pattern = re.compile(r'''\s[ac]id\s*=['"]([^'"]*)['"]''') for i in range(len(parts)): part = parts[i] srcpieces = find_tag_with_aid_pattern.split(part) for j in range(len(srcpieces)): tag = srcpieces[j] if tag.startswith('<'): for m in within_tag_aid_position_pattern.finditer(tag): try: aid = m.group(1) except IndexError: aid = None replacement = '' if aid in linked_aids: replacement = ' id="%s"' % (aid + '-' + aid_anchor_suffix) tag = within_tag_aid_position_pattern.sub(replacement, tag, 1) srcpieces[j] = tag part = "".join(srcpieces) parts[i] = part # we can safely remove all of the Kindlegen generated data-AmznPageBreak # attributes find_tag_with_AmznPageBreak_pattern = re.compile( r'''(<[^>]*\sdata-AmznPageBreak=[^>]*>)''', re.IGNORECASE) within_tag_AmznPageBreak_position_pattern = re.compile( r'''\sdata-AmznPageBreak=['"]([^'"]*)['"]''') for i in range(len(parts)): part = parts[i] srcpieces = find_tag_with_AmznPageBreak_pattern.split(part) for j in range(len(srcpieces)): tag = srcpieces[j] if tag.startswith('<'): srcpieces[j] = within_tag_AmznPageBreak_position_pattern.sub( lambda m:' style="page-break-after:%s"'%m.group(1), tag) part = "".join(srcpieces) parts[i] = part def update_flow_links(mobi8_reader, resource_map, log): # kindle:embed:XXXX?mime=image/gif (png, jpeg, etc) (used for images) # kindle:flow:XXXX?mime=YYYY/ZZZ (used for style sheets, svg images, etc) # kindle:embed:XXXX (used for fonts) mr = mobi8_reader flows = [] img_pattern = re.compile(r'''(<[img\s|image\s|svg:image\s][^>]*>)''', re.IGNORECASE) img_index_pattern = re.compile(r'''['"]kindle:embed:([0-9|A-V]+)[^'"]*['"]''', re.IGNORECASE) tag_pattern = re.compile(r'''(<[^>]*>)''') flow_pattern = re.compile(r'''['"]kindle:flow:([0-9|A-V]+)\?mime=([^'"]+)['"]''', re.IGNORECASE) url_pattern = re.compile(r'''(url\(.*?\))''', re.IGNORECASE) url_img_index_pattern = re.compile(r'''kindle:embed:([0-9|A-V]+)\?mime=image/[^\)]*''', re.IGNORECASE) font_index_pattern = re.compile(r'''kindle:embed:([0-9|A-V]+)''', re.IGNORECASE) url_css_index_pattern = re.compile(r'''kindle:flow:([0-9|A-V]+)\?mime=text/css[^\)]*''', re.IGNORECASE) def flow_as_unicode(flow): if isinstance(flow, bytes): try: flow = flow.decode(mr.header.codec) except UnicodeDecodeError: log.error('Flow part has invalid %s encoded bytes'%mr.header.codec) flow = flow.decode(mr.header.codec, 'replace') return flow for flow in mr.flows: if flow is None: # 0th flow is None flows.append(flow) continue flow = flow_as_unicode(flow) # links to raster image files from image tags # image_pattern srcpieces = img_pattern.split(flow) for j in range(1, len(srcpieces), 2): tag = srcpieces[j] if tag.startswith('<im') or tag.startswith('<svg:image'): for m in img_index_pattern.finditer(tag): num = int(m.group(1), 32) href = resource_map[num-1] if href: replacement = '"%s"'%('../'+ href) tag = img_index_pattern.sub(replacement, tag, 1) else: log.warn('Referenced image %s was not recognized ' 'as a valid image in %s' % (num, tag)) srcpieces[j] = tag flow = "".join(srcpieces) # replacements inside css url(): srcpieces = url_pattern.split(flow) for j in range(1, len(srcpieces), 2): tag = srcpieces[j] # process links to raster image files for m in url_img_index_pattern.finditer(tag): num = int(m.group(1), 32) href = resource_map[num-1] if href: replacement = '"%s"'%('../'+ href) tag = url_img_index_pattern.sub(replacement, tag, 1) else: log.warn('Referenced image %s was not recognized as a ' 'valid image in %s' % (num, tag)) # process links to fonts for m in font_index_pattern.finditer(tag): num = int(m.group(1), 32) href = resource_map[num-1] if href is None: log.warn('Referenced font %s was not recognized as a ' 'valid font in %s' % (num, tag)) else: replacement = '"%s"'%('../'+ href) if href.endswith('.failed'): replacement = '"%s"'%('failed-'+href) tag = font_index_pattern.sub(replacement, tag, 1) # process links to other css pieces for m in url_css_index_pattern.finditer(tag): num = int(m.group(1), 32) fi = mr.flowinfo[num] replacement = '"../' + fi.dir + '/' + fi.fname + '"' tag = url_css_index_pattern.sub(replacement, tag, 1) srcpieces[j] = tag flow = "".join(srcpieces) # flow pattern not inside url() srcpieces = re.split(tag_pattern, flow) for j in range(1, len(srcpieces), 2): tag = srcpieces[j] if tag.startswith('<'): for m in re.finditer(flow_pattern, tag): try: num = int(m.group(1), 32) fi = mr.flowinfo[num] except IndexError: log.warn('Ignoring invalid flow reference in tag', tag) tag = '' else: if fi.format == 'inline': flowtext = flow_as_unicode(mr.flows[num]) tag = flowtext else: replacement = '"../' + fi.dir + '/' + fi.fname + '"' tag = flow_pattern.sub(replacement, tag, 1) srcpieces[j] = tag flow = "".join(srcpieces) flows.append(flow) # All flows are now unicode and have links resolved return flows def insert_flows_into_markup(parts, flows, mobi8_reader, log): mr = mobi8_reader # kindle:flow:XXXX?mime=YYYY/ZZZ (used for style sheets, svg images, etc) tag_pattern = re.compile(r'''(<[^>]*>)''') flow_pattern = re.compile(r'''['"]kindle:flow:([0-9|A-V]+)\?mime=([^'"]+)['"]''', re.IGNORECASE) for i in range(len(parts)): part = parts[i] # flow pattern srcpieces = tag_pattern.split(part) for j in range(1, len(srcpieces),2): tag = srcpieces[j] if tag.startswith('<'): for m in flow_pattern.finditer(tag): num = int(m.group(1), 32) try: fi = mr.flowinfo[num] except IndexError: log.warn('Ignoring invalid flow reference: %s'%m.group()) tag = '' else: if fi.format == 'inline': tag = flows[num] else: replacement = '"../' + fi.dir + '/' + fi.fname + '"' tag = flow_pattern.sub(replacement, tag, 1) srcpieces[j] = tag part = "".join(srcpieces) # store away modified version parts[i] = part def insert_images_into_markup(parts, resource_map, log): # Handle any embedded raster images links in the xhtml text # kindle:embed:XXXX?mime=image/gif (png, jpeg, etc) (used for images) img_pattern = re.compile(r'''(<[img\s|image\s][^>]*>)''', re.IGNORECASE) img_index_pattern = re.compile(r'''[('"]kindle:embed:([0-9|A-V]+)[^')"]*[)'"]''') style_pattern = re.compile(r'''(<[a-zA-Z0-9]+\s[^>]*style\s*=\s*[^>]*>)''', re.IGNORECASE) for i in range(len(parts)): part = parts[i] srcpieces = img_pattern.split(part) for j in range(1, len(srcpieces), 2): tag = srcpieces[j] if tag.startswith('<im'): for m in img_index_pattern.finditer(tag): num = int(m.group(1), 32) try: href = resource_map[num-1] except IndexError: href = '' if href: replacement = '"%s"'%('../' + href) tag = img_index_pattern.sub(replacement, tag, 1) else: log.warn('Referenced image %s was not recognized as ' 'a valid image in %s' % (num, tag)) srcpieces[j] = tag part = "".join(srcpieces) # store away modified version parts[i] = part # Replace urls used in style attributes for i in range(len(parts)): part = parts[i] srcpieces = style_pattern.split(part) for j in range(1, len(srcpieces), 2): tag = srcpieces[j] if 'kindle:embed' in tag: for m in img_index_pattern.finditer(tag): num = int(m.group(1), 32) href = resource_map[num-1] osep = m.group()[0] csep = m.group()[-1] if href: replacement = '%s%s%s'%(osep, '../' + href, csep) tag = img_index_pattern.sub(replacement, tag, 1) else: log.warn('Referenced image %s was not recognized as ' 'a valid image in %s' % (num, tag)) srcpieces[j] = tag part = "".join(srcpieces) # store away modified version parts[i] = part def upshift_markup(parts): tag_pattern = re.compile(r'''(<(?:svg)[^>]*>)''', re.IGNORECASE) for i in range(len(parts)): part = parts[i] # tag pattern srcpieces = re.split(tag_pattern, part) for j in range(1, len(srcpieces), 2): tag = srcpieces[j] if tag[:4].lower() == '<svg': tag = tag.replace('preserveaspectratio','preserveAspectRatio') tag = tag.replace('viewbox','viewBox') srcpieces[j] = tag part = "".join(srcpieces) # store away modified version parts[i] = part def expand_mobi8_markup(mobi8_reader, resource_map, log): # First update all internal links that are based on offsets parts = update_internal_links(mobi8_reader, log) # Remove pointless markup inserted by kindlegen remove_kindlegen_markup(parts, mobi8_reader.aid_anchor_suffix.decode('utf-8'), mobi8_reader.linked_aids) # Handle substitutions for the flows pieces first as they may # be inlined into the xhtml text flows = update_flow_links(mobi8_reader, resource_map, log) # Insert inline flows into the markup insert_flows_into_markup(parts, flows, mobi8_reader, log) # Insert raster images into markup insert_images_into_markup(parts, resource_map, log) # Perform general markup cleanups upshift_markup(parts) # Update the parts and flows stored in the reader mobi8_reader.parts = parts mobi8_reader.flows = flows # write out the parts and file flows os.mkdir('text') # directory containing all parts spine = [] for i, part in enumerate(parts): pi = mobi8_reader.partinfo[i] with open(os.path.join(pi.type, pi.filename), 'wb') as f: part = strip_encoding_declarations(part) part = part.replace('<head>', '<head><meta charset="UTF-8"/>', 1) f.write(part.encode('utf-8')) spine.append(f.name) for i, flow in enumerate(flows): fi = mobi8_reader.flowinfo[i] if fi.format == 'file': if not os.path.exists(fi.dir): os.mkdir(fi.dir) with open(os.path.join(fi.dir, fi.fname), 'wb') as f: f.write(flow.encode('utf-8')) return spine
15,288
Python
.py
320
34.503125
108
0.525119
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,475
mobi6.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/reader/mobi6.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import io import os import re import shutil import struct import textwrap from lxml import etree, html from calibre import guess_type, replace_entities, xml_replace_entities from calibre.ebooks import DRMError, unit_convert from calibre.ebooks.chardet import strip_encoding_declarations from calibre.ebooks.compression.palmdoc import decompress_doc from calibre.ebooks.metadata import MetaInformation from calibre.ebooks.metadata.opf2 import OPF, OPFCreator from calibre.ebooks.metadata.toc import TOC from calibre.ebooks.mobi import MobiError from calibre.ebooks.mobi.huffcdic import HuffReader from calibre.ebooks.mobi.reader.headers import BookHeader from calibre.utils.cleantext import clean_ascii_chars, clean_xml_chars from calibre.utils.img import AnimatedGIF, gif_data_to_png_data, save_cover_data_to from calibre.utils.imghdr import what from calibre.utils.logging import default_log from polyglot.builtins import iteritems class TopazError(ValueError): pass class KFXError(ValueError): def __init__(self): ValueError.__init__(self, _( 'This is an Amazon KFX book. It cannot be processed.' ' See {} for information on how to handle KFX books.' ).format('https://www.mobileread.com/forums/showthread.php?t=283371')) class MobiReader: PAGE_BREAK_PAT = re.compile( r'<\s*/{0,1}\s*mbp:pagebreak((?:\s+[^/>]*){0,1})/{0,1}\s*>\s*(?:<\s*/{0,1}\s*mbp:pagebreak\s*/{0,1}\s*>)*', re.IGNORECASE) IMAGE_ATTRS = ('lowrecindex', 'recindex', 'hirecindex') def __init__(self, filename_or_stream, log=None, user_encoding=None, debug=None, try_extra_data_fix=False): self.log = log or default_log self.debug = debug self.embedded_mi = None self.warned_about_trailing_entry_corruption = False self.base_css_rules = textwrap.dedent(''' body { text-align: justify } blockquote { margin: 0em 0em 0em 2em; } p { margin: 0em; text-indent: 1.5em } .bold { font-weight: bold } .italic { font-style: italic } .underline { text-decoration: underline } .mbp_pagebreak { page-break-after: always; margin: 0; display: block } ''') self.tag_css_rules = {} self.left_margins = {} self.text_indents = {} if hasattr(filename_or_stream, 'read'): stream = filename_or_stream stream.seek(0) else: stream = open(filename_or_stream, 'rb') raw = stream.read() if raw.startswith(b'TPZ'): raise TopazError(_('This is an Amazon Topaz book. It cannot be processed.')) if raw.startswith(b'\xeaDRMION\xee'): raise KFXError() self.header = raw[0:72] self.name = self.header[:32].replace(b'\x00', b'') self.num_sections, = struct.unpack('>H', raw[76:78]) self.ident = self.header[0x3C:0x3C + 8].upper() if self.ident not in (b'BOOKMOBI', b'TEXTREAD'): raise MobiError('Unknown book type: %s' % repr(self.ident)) self.sections = [] self.section_headers = [] for i in range(self.num_sections): offset, a1, a2, a3, a4 = struct.unpack('>LBBBB', raw[78 + i * 8:78 + i * 8 + 8]) flags, val = a1, a2 << 16 | a3 << 8 | a4 self.section_headers.append((offset, flags, val)) def section(section_number): if section_number == self.num_sections - 1: end_off = len(raw) else: end_off = self.section_headers[section_number + 1][0] off = self.section_headers[section_number][0] return raw[off:end_off] for i in range(self.num_sections): self.sections.append((section(i), self.section_headers[i])) self.book_header = bh = BookHeader(self.sections[0][0], self.ident, user_encoding, self.log, try_extra_data_fix=try_extra_data_fix) self.name = self.name.decode(self.book_header.codec, 'replace') self.kf8_type = None k8i = getattr(self.book_header.exth, 'kf8_header', None) # Ancient PRC files from Baen can have random values for # mobi_version, so be conservative if (self.book_header.mobi_version == 8 and hasattr(self.book_header, 'skelidx')): self.kf8_type = 'standalone' elif k8i is not None: # Check for joint mobi 6 and kf 8 file try: raw = self.sections[k8i-1][0] except: raw = None if raw == b'BOUNDARY': try: self.book_header = BookHeader(self.sections[k8i][0], self.ident, user_encoding, self.log) self.book_header.kf8_first_image_index = self.book_header.first_image_index + k8i self.book_header.mobi6_records = bh.records # Need the first_image_index from the mobi 6 header as well for x in ('first_image_index',): setattr(self.book_header, x, getattr(bh, x)) # We need to do this because the MOBI 6 text extract code # does not know anything about the kf8 offset if hasattr(self.book_header, 'huff_offset'): self.book_header.huff_offset += k8i self.kf8_type = 'joint' self.kf8_boundary = k8i-1 except: self.book_header = bh def check_for_drm(self): if self.book_header.encryption_type != 0: try: name = self.book_header.exth.mi.title except: name = self.name if not name: name = self.name raise DRMError(name) def extract_content(self, output_dir, parse_cache): output_dir = os.path.abspath(output_dir) self.check_for_drm() processed_records = self.extract_text() if self.debug is not None: parse_cache['calibre_raw_mobi_markup'] = self.mobi_html self.add_anchors() self.processed_html = self.processed_html.decode(self.book_header.codec, 'ignore') self.processed_html = self.processed_html.replace('</</', '</') self.processed_html = re.sub(r'</([a-zA-Z]+)<', r'</\1><', self.processed_html) self.processed_html = self.processed_html.replace('\ufeff', '') # Remove tags of the form <xyz: ...> as they can cause issues further # along the pipeline self.processed_html = re.sub(r'</{0,1}[a-zA-Z]+:\s+[^>]*>', '', self.processed_html) self.processed_html = strip_encoding_declarations(self.processed_html) self.processed_html = xml_replace_entities(self.processed_html) image_name_map = self.extract_images(processed_records, output_dir) self.replace_page_breaks() self.cleanup_html() self.log.debug('Parsing HTML...') self.processed_html = clean_xml_chars(self.processed_html) try: root = html.fromstring(self.processed_html) if len(root.xpath('//html')) > 5: root = html.fromstring(self.processed_html.replace('\x0c', '').replace('\x14', '')) except Exception: self.log.warning('MOBI markup appears to contain random bytes. Stripping.') self.processed_html = self.remove_random_bytes(self.processed_html) try: root = html.fromstring(self.processed_html) except Exception: self.log.warning('MOBI markup could not be parsed by lxml using html5-parser') # Happens on windows with python 3 where lxml causes libxml to die with an # error about using UCS-4 little endian encoding if certain # characters are present in the input from html5_parser import parse root = parse(self.processed_html, keep_doctype=False, namespace_elements=False, maybe_xhtml=False, sanitize_names=True) if root.xpath('descendant::p/descendant::p'): from html5_parser import parse self.log.warning('Malformed markup, parsing using html5-parser') self.processed_html = strip_encoding_declarations(self.processed_html) # These trip up the html5 parser causing all content to be placed # under the <guide> tag self.processed_html = re.sub(r'<metadata>.+?</metadata>', '', self.processed_html, flags=re.I) self.processed_html = re.sub(r'<guide>.+?</guide>', '', self.processed_html, flags=re.I) try: root = parse(self.processed_html, maybe_xhtml=False, keep_doctype=False, sanitize_names=True) except Exception: self.log.warning('MOBI markup appears to contain random bytes. Stripping.') self.processed_html = self.remove_random_bytes(self.processed_html) root = parse(self.processed_html, maybe_xhtml=False, keep_doctype=False, sanitize_names=True) if len(root.xpath('body/descendant::*')) < 1: # There are probably stray </html>s in the markup self.processed_html = self.processed_html.replace('</html>', '') root = parse(self.processed_html, maybe_xhtml=False, keep_doctype=False, sanitize_names=True) if root.tag != 'html': self.log.warn('File does not have opening <html> tag') nroot = html.fromstring('<html><head></head><body></body></html>') bod = nroot.find('body') for child in list(root): child.getparent().remove(child) bod.append(child) root = nroot htmls = list(root.xpath('//html')) if len(htmls) > 1: self.log.warn('Markup contains multiple <html> tags, merging.') # Merge all <head> and <body> sections for h in htmls: p = h.getparent() if hasattr(p, 'remove'): p.remove(h) bodies, heads = root.xpath('//body'), root.xpath('//head') for x in root: root.remove(x) head, body = map(root.makeelement, ('head', 'body')) for h in heads: for x in h: h.remove(x) head.append(x) for b in bodies: for x in b: b.remove(x) body.append(x) root.append(head), root.append(body) for x in root.xpath('//script'): x.getparent().remove(x) head = root.xpath('//head') if head: head = head[0] else: head = root.makeelement('head', {}) root.insert(0, head) head.text = '\n\t' link = head.makeelement('link', {'type':'text/css', 'href':'styles.css', 'rel':'stylesheet'}) head.insert(0, link) link.tail = '\n\t' title = head.xpath('descendant::title') m = head.makeelement('meta', {'http-equiv':'Content-Type', 'content':'text/html; charset=utf-8'}) head.insert(0, m) if not title: title = head.makeelement('title', {}) try: title.text = self.book_header.title except ValueError: title.text = clean_ascii_chars(self.book_header.title) title.tail = '\n\t' head.insert(0, title) head.text = '\n\t' self.upshift_markup(root, image_name_map) guides = root.xpath('//guide') guide = guides[0] if guides else None metadata_elems = root.xpath('//metadata') if metadata_elems and self.book_header.exth is None: self.read_embedded_metadata(root, metadata_elems[0], guide) for elem in guides + metadata_elems: elem.getparent().remove(elem) htmlfile = os.path.join(output_dir, 'index.html') try: for ref in guide.xpath('descendant::reference'): if 'href' in ref.attrib: ref.attrib['href'] = os.path.basename(htmlfile) + ref.attrib['href'] except AttributeError: pass def write_as_utf8(path, data): if isinstance(data, str): data = data.encode('utf-8') with open(path, 'wb') as f: f.write(data) parse_cache[htmlfile] = root self.htmlfile = htmlfile ncx = io.BytesIO() opf, ncx_manifest_entry = self.create_opf(htmlfile, guide, root) self.created_opf_path = os.path.splitext(htmlfile)[0] + '.opf' opf.render(open(self.created_opf_path, 'wb'), ncx, ncx_manifest_entry=ncx_manifest_entry) ncx = ncx.getvalue() if ncx: ncx_path = os.path.join(os.path.dirname(htmlfile), 'toc.ncx') write_as_utf8(ncx_path, ncx) css = [self.base_css_rules, '\n\n'] for cls, rule in self.tag_css_rules.items(): css.append(f'.{cls} {{ {rule} }}\n\n') write_as_utf8('styles.css', ''.join(css)) if self.book_header.exth is not None or self.embedded_mi is not None: self.log.debug('Creating OPF...') ncx = io.BytesIO() opf, ncx_manifest_entry = self.create_opf(htmlfile, guide, root) opf.render(open(os.path.splitext(htmlfile)[0] + '.opf', 'wb'), ncx, ncx_manifest_entry) ncx = ncx.getvalue() if ncx: write_as_utf8(os.path.splitext(htmlfile)[0] + '.ncx', ncx) def read_embedded_metadata(self, root, elem, guide): raw = b'<?xml version="1.0" encoding="utf-8" ?>\n<package>' + \ html.tostring(elem, encoding='utf-8') + b'</package>' stream = io.BytesIO(raw) opf = OPF(stream) self.embedded_mi = opf.to_book_metadata() if guide is not None: for ref in guide.xpath('descendant::reference'): if 'cover' in ref.get('type', '').lower(): href = ref.get('href', '') if href.startswith('#'): href = href[1:] anchors = root.xpath('//*[@id="%s"]' % href) if anchors: cpos = anchors[0] reached = False for elem in root.iter(): if elem is cpos: reached = True if reached and elem.tag == 'img': cover = elem.get('src', None) self.embedded_mi.cover = cover elem.getparent().remove(elem) break break def cleanup_html(self): self.log.debug('Cleaning up HTML...') self.processed_html = re.sub(r'<div height="0(pt|px|ex|em|%){0,1}"></div>', '', self.processed_html) if self.book_header.ancient and b'<html' not in self.mobi_html[:300].lower(): self.processed_html = '<html><p>' + self.processed_html.replace('\n\n', '<p>') + '</html>' self.processed_html = self.processed_html.replace('\r\n', '\n') self.processed_html = self.processed_html.replace('> <', '>\n<') self.processed_html = self.processed_html.replace('<mbp: ', '<mbp:') self.processed_html = re.sub(r'<\?xml[^>]*>', '', self.processed_html) self.processed_html = re.sub(r'<\s*(/?)\s*o:p[^>]*>', r'', self.processed_html) # Swap inline and block level elements, and order block level elements according to priority # - lxml and beautifulsoup expect/assume a specific order based on xhtml spec self.processed_html = re.sub( r'(?i)(?P<styletags>(<(h\d+|i|b|u|em|small|big|strong|tt)>\s*){1,})(?P<para><p[^>]*>)', r'\g<para>'+r'\g<styletags>', self.processed_html) self.processed_html = re.sub( r'(?i)(?P<para></p[^>]*>)\s*(?P<styletags>(</(h\d+|i|b|u|em|small|big|strong|tt)>\s*){1,})', r'\g<styletags>'+r'\g<para>', self.processed_html) self.processed_html = re.sub( r'(?i)(?P<blockquote>(</(blockquote|div)[^>]*>\s*){1,})(?P<para></p[^>]*>)', r'\g<para>'+r'\g<blockquote>', self.processed_html) self.processed_html = re.sub( r'(?i)(?P<para><p[^>]*>)\s*(?P<blockquote>(<(blockquote|div)[^>]*>\s*){1,})', r'\g<blockquote>'+r'\g<para>', self.processed_html) bods = htmls = 0 for x in re.finditer('</body>|</html>', self.processed_html): if x == '</body>': bods +=1 else: htmls += 1 if bods > 1 and htmls > 1: break if bods > 1: self.processed_html = self.processed_html.replace('</body>', '') if htmls > 1: self.processed_html = self.processed_html.replace('</html>', '') def remove_random_bytes(self, html): return re.sub('\x14|\x15|\x19|\x1c|\x1d|\xef|\x12|\x13|\xec|\x08|\x01|\x02|\x03|\x04|\x05|\x06|\x07', '', html) def ensure_unit(self, raw, unit='px'): if re.search(r'\d+$', raw) is not None: raw += unit return raw def upshift_markup(self, root, image_name_map=None): self.log.debug('Converting style information to CSS...') image_name_map = image_name_map or {} size_map = { 'xx-small': '0.5', 'x-small': '1', 'small': '2', 'medium': '3', 'large': '4', 'x-large': '5', 'xx-large': '6', } def barename(x): return x.rpartition(':')[-1] mobi_version = self.book_header.mobi_version for x in root.xpath('//ncx'): x.getparent().remove(x) svg_tags = [] forwardable_anchors = [] pagebreak_anchors = [] BLOCK_TAGS = {'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'div', 'p'} for i, tag in enumerate(root.iter(etree.Element)): tag.attrib.pop('xmlns', '') for x in tag.attrib: if ':' in x: del tag.attrib[x] if tag.tag and barename(tag.tag) == 'svg': svg_tags.append(tag) if tag.tag and barename(tag.tag.lower()) in \ ('country-region', 'place', 'placetype', 'placename', 'state', 'city', 'street', 'address', 'content', 'form'): tag.tag = 'div' if tag.tag in ('content', 'form') else 'span' for key in tag.attrib.keys(): tag.attrib.pop(key) continue styles, attrib = [], tag.attrib if 'style' in attrib: style = attrib.pop('style').strip() if style: styles.append(style) if 'height' in attrib: height = attrib.pop('height').strip() if ( height and '<' not in height and '>' not in height and re.search(r'\d+', height)): if tag.tag in ('table', 'td', 'tr'): pass elif tag.tag == 'img': tag.set('height', height) else: if tag.tag == 'div' and not tag.text and \ (not tag.tail or not tag.tail.strip()) and \ not len(list(tag.iterdescendants())): # Paragraph spacer # Insert nbsp so that the element is never # discarded by a renderer tag.text = '\u00a0' # nbsp styles.append('height: %s' % self.ensure_unit(height)) else: styles.append('margin-top: %s' % self.ensure_unit(height)) if 'width' in attrib: width = attrib.pop('width').strip() if width and re.search(r'\d+', width): if tag.tag in ('table', 'td', 'tr'): pass elif tag.tag == 'img': tag.set('width', width) else: ewidth = self.ensure_unit(width) styles.append('text-indent: %s' % ewidth) try: ewidth_val = unit_convert(ewidth, 12, 500, 166) self.text_indents[tag] = ewidth_val except: pass if width.startswith('-'): styles.append('margin-left: %s' % self.ensure_unit(width[1:])) try: ewidth_val = unit_convert(ewidth[1:], 12, 500, 166) self.left_margins[tag] = ewidth_val except: pass if 'align' in attrib: align = attrib.pop('align').strip() if align: align = align.lower() if align == 'baseline': styles.append('vertical-align: '+align) else: styles.append('text-align: %s' % align) if tag.tag == 'hr': if mobi_version == 1: tag.tag = 'div' styles.append('page-break-before: always') styles.append('display: block') styles.append('margin: 0') elif tag.tag == 'i': tag.tag = 'span' tag.attrib['class'] = 'italic' elif tag.tag == 'u': tag.tag = 'span' tag.attrib['class'] = 'underline' elif tag.tag == 'b': tag.tag = 'span' tag.attrib['class'] = 'bold' elif tag.tag == 'font': sz = tag.get('size', '').lower() try: float(sz) except ValueError: if sz in list(size_map.keys()): attrib['size'] = size_map[sz] elif tag.tag == 'img': recindex = None for attr in self.IMAGE_ATTRS: recindex = attrib.pop(attr, None) or recindex if recindex is not None: try: recindex = int(recindex) except Exception: pass else: attrib['src'] = 'images/' + image_name_map.get(recindex, '%05d.jpg' % recindex) for attr in ('width', 'height'): if attr in attrib: val = attrib[attr] if val.lower().endswith('em'): try: nval = float(val[:-2]) nval *= 16 * (168.451/72) # Assume this was set using the Kindle profile attrib[attr] = "%dpx"%int(nval) except: del attrib[attr] elif val.lower().endswith('%'): del attrib[attr] elif tag.tag == 'pre': if not tag.text: tag.tag = 'div' if (attrib.get('class', None) == 'mbp_pagebreak' and tag.tag == 'div' and 'filepos-id' in attrib): pagebreak_anchors.append(tag) if 'color' in attrib: styles.append('color: ' + attrib.pop('color')) if 'bgcolor' in attrib: styles.append('background-color: ' + attrib.pop('bgcolor')) if 'filepos-id' in attrib: attrib['id'] = attrib.pop('filepos-id') if 'name' in attrib and attrib['name'] != attrib['id']: attrib['name'] = attrib['id'] if 'filepos' in attrib: filepos = attrib.pop('filepos') try: attrib['href'] = "#filepos%d" % int(filepos) except ValueError: pass if (tag.tag == 'a' and attrib.get('id', '').startswith('filepos') and not tag.text and len(tag) == 0 and (tag.tail is None or not tag.tail.strip()) and getattr(tag.getnext(), 'tag', None) in BLOCK_TAGS): # This is an empty anchor immediately before a block tag, move # the id onto the block tag instead forwardable_anchors.append(tag) if styles: ncls = None rule = '; '.join(styles) for sel, srule in self.tag_css_rules.items(): if srule == rule: ncls = sel break if ncls is None: ncls = 'calibre_%d' % i self.tag_css_rules[ncls] = rule cls = attrib.get('class', '') cls = cls + (' ' if cls else '') + ncls attrib['class'] = cls for tag in svg_tags: images = tag.xpath('descendant::img[@src]') parent = tag.getparent() if images and hasattr(parent, 'find'): index = parent.index(tag) for img in images: img.getparent().remove(img) img.tail = img.text = None parent.insert(index, img) if hasattr(parent, 'remove'): parent.remove(tag) for tag in pagebreak_anchors: anchor = tag.attrib['id'] del tag.attrib['id'] if 'name' in tag.attrib: del tag.attrib['name'] p = tag.getparent() a = p.makeelement('a') a.attrib['id'] = anchor p.insert(p.index(tag)+1, a) if getattr(a.getnext(), 'tag', None) in BLOCK_TAGS: forwardable_anchors.append(a) for tag in forwardable_anchors: block = tag.getnext() tag.getparent().remove(tag) if 'id' in block.attrib: tag.tail = block.text block.text = None block.insert(0, tag) else: block.attrib['id'] = tag.attrib['id'] # WebKit fails to navigate to anchors located on <br> tags for br in root.xpath('/body/br[@id]'): br.tag = 'div' def get_left_whitespace(self, tag): def whitespace(tag): lm = ti = 0.0 if tag.tag == 'p': ti = unit_convert('1.5em', 12, 500, 166) if tag.tag == 'blockquote': lm = unit_convert('2em', 12, 500, 166) lm = self.left_margins.get(tag, lm) ti = self.text_indents.get(tag, ti) try: lm = float(lm) except: lm = 0.0 try: ti = float(ti) except: ti = 0.0 return lm + ti parent = tag ans = 0.0 while parent is not None: ans += whitespace(parent) parent = parent.getparent() return ans def create_opf(self, htmlfile, guide=None, root=None): mi = getattr(self.book_header.exth, 'mi', self.embedded_mi) if mi is None: mi = MetaInformation(self.book_header.title, [_('Unknown')]) opf = OPFCreator(os.path.dirname(htmlfile), mi) if hasattr(self.book_header.exth, 'cover_offset'): opf.cover = 'images/%05d.jpg' % (self.book_header.exth.cover_offset + 1) elif mi.cover is not None: opf.cover = mi.cover else: opf.cover = 'images/%05d.jpg' % 1 if not os.path.exists(os.path.join(os.path.dirname(htmlfile), * opf.cover.split('/'))): opf.cover = None cover = opf.cover cover_copied = None if cover is not None: cover = cover.replace('/', os.sep) if os.path.exists(cover): ncover = 'images'+os.sep+'calibre_cover.jpg' if os.path.exists(ncover): os.remove(ncover) shutil.copyfile(cover, ncover) cover_copied = os.path.abspath(ncover) opf.cover = ncover.replace(os.sep, '/') manifest = [(htmlfile, 'application/xhtml+xml'), (os.path.abspath('styles.css'), 'text/css')] bp = os.path.dirname(htmlfile) added = set() for i in getattr(self, 'image_names', []): path = os.path.join(bp, 'images', i) added.add(path) manifest.append((path, guess_type(path)[0] or 'image/jpeg')) if cover_copied is not None: manifest.append((cover_copied, 'image/jpeg')) opf.create_manifest(manifest) opf.create_spine([os.path.basename(htmlfile)]) toc = None if guide is not None: opf.create_guide(guide) for ref in opf.guide: if ref.type.lower() == 'toc': toc = ref.href() ncx_manifest_entry = None if toc: ncx_manifest_entry = 'toc.ncx' elems = root.xpath('//*[@id="%s"]' % toc.partition('#')[-1]) tocobj = None if elems: tocobj = TOC() found = False reached = False for x in root.iter(): if x == elems[-1]: reached = True continue if reached and x.tag == 'a': href = x.get('href', '') if href and re.match(r'\w+://', href) is None: try: text = ' '.join([t.strip() for t in x.xpath('descendant::text()')]) except: text = '' text = replace_entities(text) item = tocobj.add_item(toc.partition('#')[0], href[1:], text) item.left_space = int(self.get_left_whitespace(x)) found = True if reached and found and x.get('class', None) == 'mbp_pagebreak': break if tocobj is not None: tocobj = self.structure_toc(tocobj) opf.set_toc(tocobj) return opf, ncx_manifest_entry def structure_toc(self, toc): indent_vals = set() for item in toc: indent_vals.add(item.left_space) if len(indent_vals) > 6 or len(indent_vals) < 2: # Too many or too few levels, give up return toc indent_vals = sorted(indent_vals) last_found = [None for i in indent_vals] newtoc = TOC() def find_parent(level): candidates = last_found[:level] for x in reversed(candidates): if x is not None: return x return newtoc for item in toc: level = indent_vals.index(item.left_space) parent = find_parent(level) last_found[level] = parent.add_item(item.href, item.fragment, item.text) return newtoc def sizeof_trailing_entries(self, data): def sizeof_trailing_entry(ptr, psize): bitpos, result = 0, 0 while True: v = ord(ptr[psize-1:psize]) result |= (v & 0x7F) << bitpos bitpos += 7 psize -= 1 if (v & 0x80) != 0 or (bitpos >= 28) or (psize == 0): return result num = 0 size = len(data) flags = self.book_header.extra_flags >> 1 while flags: if flags & 1: try: num += sizeof_trailing_entry(data, size - num) except (IndexError, TypeError): self.warn_about_trailing_entry_corruption() return 0 flags >>= 1 if self.book_header.extra_flags & 1: off = size - num - 1 try: num += (ord(data[off:off+1]) & 0x3) + 1 except TypeError: self.log.warn('Invalid sizeof trailing entries') num += 1 return num def warn_about_trailing_entry_corruption(self): if not self.warned_about_trailing_entry_corruption: self.warned_about_trailing_entry_corruption = True self.log.warn('The trailing data entries in this MOBI file are corrupted, you might see corrupted text in the output') def text_section(self, index): data = self.sections[index][0] trail_size = self.sizeof_trailing_entries(data) return data[:len(data)-trail_size] def extract_text(self, offset=1): self.log.debug('Extracting text...') text_sections = [self.text_section(i) for i in range(offset, min(self.book_header.records + offset, len(self.sections)))] processed_records = list(range(offset-1, self.book_header.records + offset)) self.mobi_html = b'' if self.book_header.compression_type == b'DH': huffs = [self.sections[i][0] for i in range(self.book_header.huff_offset, self.book_header.huff_offset + self.book_header.huff_number)] processed_records += list(range(self.book_header.huff_offset, self.book_header.huff_offset + self.book_header.huff_number)) huff = HuffReader(huffs) unpack = huff.unpack elif self.book_header.compression_type == b'\x00\x02': unpack = decompress_doc elif self.book_header.compression_type == b'\x00\x01': def unpack(x): return x else: raise MobiError('Unknown compression algorithm: %r' % self.book_header.compression_type) self.mobi_html = b''.join(map(unpack, text_sections)) if self.mobi_html.endswith(b'#'): self.mobi_html = self.mobi_html[:-1] if self.book_header.ancient and b'<html' not in self.mobi_html[:300].lower(): self.mobi_html = self.mobi_html.replace(b'\r ', b'\n\n ') self.mobi_html = self.mobi_html.replace(b'\0', b'') if self.book_header.codec == 'cp1252': self.mobi_html = self.mobi_html.replace(b'\x1e', b'') # record separator self.mobi_html = self.mobi_html.replace(b'\x02', b'') # start of text return processed_records def replace_page_breaks(self): self.processed_html = self.PAGE_BREAK_PAT.sub( r'<div \1 class="mbp_pagebreak" />', self.processed_html) def add_anchors(self): self.log.debug('Adding anchors...') positions = set() link_pattern = re.compile(br'''<[^<>]+filepos=['"]{0,1}(\d+)[^<>]*>''', re.IGNORECASE) for match in link_pattern.finditer(self.mobi_html): positions.add(int(match.group(1))) pos = 0 processed_html = [] end_tag_re = re.compile(br'<\s*/') for end in sorted(positions): if end == 0: continue oend = end l = self.mobi_html.find(b'<', end) r = self.mobi_html.find(b'>', end) anchor = b'<a id="filepos%d"></a>' if r > -1 and (r < l or l == end or l == -1): p = self.mobi_html.rfind(b'<', 0, end + 1) if (pos < end and p > -1 and not end_tag_re.match(self.mobi_html[p:r]) and not self.mobi_html[p:r + 1].endswith(b'/>')): anchor = b' filepos-id="filepos%d"' end = r else: end = r + 1 processed_html.append(self.mobi_html[pos:end] + (anchor % oend)) pos = end processed_html.append(self.mobi_html[pos:]) processed_html = b''.join(processed_html) # Remove anchors placed inside entities self.processed_html = re.sub(br'&([^;]*?)(<a id="filepos\d+"></a>)([^;]*);', br'&\1\3;\2', processed_html) def extract_images(self, processed_records, output_dir): self.log.debug('Extracting images...') output_dir = os.path.abspath(os.path.join(output_dir, 'images')) if not os.path.exists(output_dir): os.makedirs(output_dir) image_index = 0 self.image_names = [] image_name_map = {} start = getattr(self.book_header, 'first_image_index', -1) if start > self.num_sections or start < 0: # BAEN PRC files have bad headers start = 0 for i in range(start, self.num_sections): if i in processed_records: continue processed_records.append(i) data = self.sections[i][0] image_index += 1 if data[:4] in {b'FLIS', b'FCIS', b'SRCS', b'\xe9\x8e\r\n', b'RESC', b'BOUN', b'FDST', b'DATP', b'AUDI', b'VIDE'}: # This record is a known non image type, no need to try to # load the image continue try: imgfmt = what(None, data) except Exception: continue if imgfmt not in {'jpg', 'jpeg', 'gif', 'png', 'bmp'}: continue if imgfmt == 'jpeg': imgfmt = 'jpg' if imgfmt == 'gif': try: data = gif_data_to_png_data(data) imgfmt = 'png' except AnimatedGIF: pass except OSError: self.log.warn(f'Ignoring undecodeable GIF image at index {image_index}') continue path = os.path.join(output_dir, '%05d.%s' % (image_index, imgfmt)) image_name_map[image_index] = os.path.basename(path) if imgfmt == 'png': with open(path, 'wb') as f: f.write(data) else: try: save_cover_data_to(data, path, minify_to=(10000, 10000)) except Exception: continue self.image_names.append(os.path.basename(path)) return image_name_map def test_mbp_regex(): for raw, m in iteritems({ '<mbp:pagebreak></mbp:pagebreak>':'', '<mbp:pagebreak xxx></mbp:pagebreak>yyy':' xxxyyy', '<mbp:pagebreak> </mbp:pagebreak>':'', '<mbp:pagebreak>xxx':'xxx', '<mbp:pagebreak/>xxx':'xxx', '<mbp:pagebreak sdf/ >xxx':' sdfxxx', '<mbp:pagebreak / >':' ', '</mbp:pagebreak>':'', '</mbp:pagebreak sdf>':' sdf', '</mbp:pagebreak><mbp:pagebreak></mbp:pagebreak>xxx':'xxx', }): ans = MobiReader.PAGE_BREAK_PAT.sub(r'\1', raw) if ans != m: raise Exception('%r != %r for %r'%(ans, m, raw))
40,024
Python
.py
858
32.249417
155
0.50819
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,476
containers.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/reader/containers.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>' from struct import error, unpack_from from calibre.utils.imghdr import what def find_imgtype(data): return what(None, data) or 'unknown' class Container: def __init__(self, data): self.is_image_container = False self.resource_index = 0 if len(data) > 60 and data[48:52] == b'EXTH': length, num_items = unpack_from(b'>LL', data, 52) pos = 60 while pos < 60 + length - 8: try: idx, size = unpack_from(b'>LL', data, pos) except error: break pos += 8 size -= 8 if size < 0: break if idx == 539: self.is_image_container = data[pos:pos+size] == b'application/image' break pos += size def load_image(self, data): self.resource_index += 1 if self.is_image_container: data = data[12:] imgtype = find_imgtype(data) if imgtype != 'unknown': return data, imgtype return None, None
1,249
Python
.py
35
24.028571
88
0.507077
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,477
toc.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/toc.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' from calibre.ebooks.oeb.base import XHTML, XHTML_MIME, XHTML_NS, XPath, css_text, urlnormalize from calibre.utils.localization import __ from calibre.utils.xml_parse import safe_xml_fromstring DEFAULT_TITLE = __('Table of Contents') TEMPLATE = ''' <html xmlns="{xhtmlns}"> <head> <title>{title}</title> <style type="text/css"> li {{ list-style-type: none }} a {{ text-decoration: none }} a:hover {{ color: red }} {extra_css} {embed_css} </style> </head> <body id="calibre_generated_inline_toc"> <h2>{title}</h2> <ul> </ul> </body> </html> ''' def find_previous_calibre_inline_toc(oeb): if 'toc' in oeb.guide: href = urlnormalize(oeb.guide['toc'].href.partition('#')[0]) if href in oeb.manifest.hrefs: item = oeb.manifest.hrefs[href] if (hasattr(item.data, 'xpath') and XPath('//h:body[@id="calibre_generated_inline_toc"]')(item.data)): return item class TOCAdder: def __init__(self, oeb, opts, replace_previous_inline_toc=True, ignore_existing_toc=False): self.oeb, self.opts, self.log = oeb, opts, oeb.log self.title = opts.toc_title or DEFAULT_TITLE self.at_start = opts.mobi_toc_at_start self.generated_item = None self.added_toc_guide_entry = False self.has_toc = oeb.toc and oeb.toc.count() > 1 self.tocitem = tocitem = None if replace_previous_inline_toc: tocitem = self.tocitem = find_previous_calibre_inline_toc(oeb) if ignore_existing_toc and 'toc' in oeb.guide: oeb.guide.remove('toc') if 'toc' in oeb.guide: # Remove spurious toc entry from guide if it is not in spine or it # does not have any hyperlinks href = urlnormalize(oeb.guide['toc'].href.partition('#')[0]) if href in oeb.manifest.hrefs: item = oeb.manifest.hrefs[href] if (hasattr(item.data, 'xpath') and XPath('//h:a[@href]')(item.data)): if oeb.spine.index(item) < 0: oeb.spine.add(item, linear=False) return elif self.has_toc: oeb.guide.remove('toc') else: oeb.guide.remove('toc') if (not self.has_toc or 'toc' in oeb.guide or opts.no_inline_toc or getattr(opts, 'mobi_passthrough', False)): return self.log('\tGenerating in-line ToC') embed_css = '' s = getattr(oeb, 'store_embed_font_rules', None) if getattr(s, 'body_font_family', None): css = [css_text(x) for x in s.rules] + [ 'body { font-family: %s }'%s.body_font_family] embed_css = '\n\n'.join(css) root = safe_xml_fromstring(TEMPLATE.format(xhtmlns=XHTML_NS, title=self.title, embed_css=embed_css, extra_css=(opts.extra_css or ''))) parent = XPath('//h:ul')(root)[0] parent.text = '\n\t' for child in self.oeb.toc: self.process_toc_node(child, parent) if tocitem is not None: href = tocitem.href if oeb.spine.index(tocitem) > -1: oeb.spine.remove(tocitem) tocitem.data = root else: id, href = oeb.manifest.generate('contents', 'contents.xhtml') tocitem = self.generated_item = oeb.manifest.add(id, href, XHTML_MIME, data=root) if self.at_start: oeb.spine.insert(0, tocitem, linear=True) else: oeb.spine.add(tocitem, linear=False) oeb.guide.add('toc', 'Table of Contents', href) def process_toc_node(self, toc, parent, level=0): li = parent.makeelement(XHTML('li')) li.tail = '\n'+ ('\t'*level) parent.append(li) href = toc.href if self.tocitem is not None and href: href = self.tocitem.relhref(toc.href) a = parent.makeelement(XHTML('a'), href=href or '#') a.text = toc.title li.append(a) if toc.count() > 0: parent = li.makeelement(XHTML('ul')) li.append(parent) a.tail = '\n' + ('\t'*level) parent.text = '\n'+('\t'*(level+1)) parent.tail = '\n' + ('\t'*level) for child in toc: self.process_toc_node(child, parent, level+1) def remove_generated_toc(self): if self.generated_item is not None: self.oeb.manifest.remove(self.generated_item) self.generated_item = None if self.added_toc_guide_entry: self.oeb.guide.remove('toc') self.added_toc_guide_entry = False
4,854
Python
.py
118
31.559322
114
0.574852
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,478
header.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/header.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import numbers import random from collections import OrderedDict from io import BytesIO from struct import pack from calibre.ebooks.mobi.utils import align_block from polyglot.builtins import as_bytes, iteritems NULL = 0xffffffff def zeroes(x): return (b'\x00' * x) def nulls(x): return (b'\xff' * x) def short(x): return pack(b'>H', x) class Header(OrderedDict): HEADER_NAME = b'' DEFINITION = ''' ''' ALIGN_BLOCK = False POSITIONS = {} # Mapping of position field to field whose position should # be stored in the position field SHORT_FIELDS = set() def __init__(self): OrderedDict.__init__(self) for line in self.DEFINITION.splitlines(): line = line.strip() if not line or line.startswith('#'): continue name, val = (x.strip() for x in line.partition('=')[0::2]) if val: val = eval(val, {'zeroes':zeroes, 'NULL':NULL, 'DYN':None, 'nulls':nulls, 'short':short, 'random':random}) else: val = 0 if name in self: raise ValueError('Duplicate field in definition: %r'%name) self[name] = val @property def dynamic_fields(self): return tuple(k for k, v in iteritems(self) if v is None) def __call__(self, **kwargs): positions = {} for name, val in iteritems(kwargs): if name not in self: raise KeyError('Not a valid header field: %r'%name) self[name] = val buf = BytesIO() buf.write(as_bytes(self.HEADER_NAME)) for name, val in iteritems(self): val = self.format_value(name, val) positions[name] = buf.tell() if val is None: raise ValueError('Dynamic field %r not set'%name) if isinstance(val, numbers.Integral): fmt = b'H' if name in self.SHORT_FIELDS else b'I' val = pack(b'>'+fmt, val) buf.write(val) for pos_field, field in iteritems(self.POSITIONS): buf.seek(positions[pos_field]) buf.write(pack(b'>I', positions[field])) ans = buf.getvalue() if self.ALIGN_BLOCK: ans = align_block(ans) return ans def format_value(self, name, val): return val
2,523
Python
.py
70
27.414286
78
0.581109
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,479
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/__init__.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en'
149
Python
.py
4
35
58
0.678571
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,480
index.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/index.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' from collections import namedtuple from io import BytesIO from struct import pack from calibre.ebooks.mobi.utils import CNCX, align_block, encint from calibre.ebooks.mobi.writer8.header import Header TagMeta_ = namedtuple('TagMeta', 'name number values_per_entry bitmask end_flag') def TagMeta(x): return TagMeta_(*x) EndTagTable = TagMeta(('eof', 0, 0, 0, 1)) # map of mask to number of shifts needed, works with 1 bit and two-bit wide masks # could also be extended to 4 bit wide ones as well mask_to_bit_shifts = {1:0, 2:1, 3:0, 4:2, 8:3, 12:2, 16:4, 32:5, 48:4, 64:6, 128:7, 192: 6} class IndexHeader(Header): # {{{ HEADER_NAME = b'INDX' ALIGN_BLOCK = True HEADER_LENGTH = 192 DEFINITION = ''' # 4 - 8: Header Length header_length = {header_length} # 8 - 16: Unknown unknown1 = zeroes(8) # 16 - 20: Index type: 0 - normal 2 - inflection type = 2 # 20 - 24: IDXT offset (filled in later) idxt_offset # 24 - 28: Number of index records num_of_records = DYN # 28 - 32: Index encoding (65001 = utf-8) encoding = 65001 # 32 - 36: Unknown unknown2 = NULL # 36 - 40: Number of Index entries num_of_entries = DYN # 40 - 44: ORDT offset ordt_offset # 44 - 48: LIGT offset ligt_offset # 48 - 52: Number of ORDT/LIGT? entries num_of_ordt_entries # 52 - 56: Number of CNCX records num_of_cncx = DYN # 56 - 180: Unknown unknown3 = zeroes(124) # 180 - 184: TAGX offset tagx_offset = {header_length} # 184 - 192: Unknown unknown4 = zeroes(8) # TAGX tagx = DYN # Geometry of index records geometry = DYN # IDXT idxt = DYN '''.format(header_length=HEADER_LENGTH) POSITIONS = {'idxt_offset':'idxt'} # }}} class Index: # {{{ control_byte_count = 1 cncx = CNCX() tag_types = (EndTagTable,) HEADER_LENGTH = IndexHeader.HEADER_LENGTH @classmethod def generate_tagx(cls): header = b'TAGX' byts = bytearray() for tag_meta in cls.tag_types: byts.extend(tag_meta[1:]) # table length, control byte count header += pack(b'>II', 12+len(byts), cls.control_byte_count) return header + bytes(byts) @classmethod def calculate_control_bytes_for_each_entry(cls, entries): control_bytes = [] for lead_text, tags in entries: cbs = [] ans = 0 for (name, number, vpe, mask, endi) in cls.tag_types: if endi == 1: cbs.append(ans) ans = 0 continue try: nvals = len(tags.get(name, ())) except TypeError: nvals = 1 nentries = nvals // vpe shifts = mask_to_bit_shifts[mask] ans |= mask & (nentries << shifts) if len(cbs) != cls.control_byte_count: raise ValueError(f'The entry {[lead_text, tags]!r} is invalid') control_bytes.append(cbs) return control_bytes def __call__(self): self.control_bytes = self.calculate_control_bytes_for_each_entry( self.entries) index_blocks, idxt_blocks, record_counts, last_indices = [BytesIO()], [BytesIO()], [0], [b''] buf = BytesIO() RECORD_LIMIT = 0x10000 - self.HEADER_LENGTH - 1048 # kindlegen uses 1048 (there has to be some margin because of block alignment) for i, (index_num, tags) in enumerate(self.entries): control_bytes = self.control_bytes[i] buf.seek(0), buf.truncate(0) index_num = (index_num.encode('utf-8') if isinstance(index_num, str) else index_num) raw = bytearray(index_num) raw.insert(0, len(index_num)) buf.write(bytes(raw)) buf.write(bytes(bytearray(control_bytes))) for tag in self.tag_types: values = tags.get(tag.name, None) if values is None: continue try: len(values) except TypeError: values = [values] if values: for val in values: try: buf.write(encint(val)) except ValueError: raise ValueError('Invalid values for %r: %r'%( tag, values)) raw = buf.getvalue() offset = index_blocks[-1].tell() idxt_pos = idxt_blocks[-1].tell() if offset + idxt_pos + len(raw) + 2 > RECORD_LIMIT: index_blocks.append(BytesIO()) idxt_blocks.append(BytesIO()) record_counts.append(0) offset = idxt_pos = 0 last_indices.append(b'') record_counts[-1] += 1 idxt_blocks[-1].write(pack(b'>H', self.HEADER_LENGTH+offset)) index_blocks[-1].write(raw) last_indices[-1] = index_num index_records = [] for index_block, idxt_block, record_count in zip(index_blocks, idxt_blocks, record_counts): index_block = align_block(index_block.getvalue()) idxt_block = align_block(b'IDXT' + idxt_block.getvalue()) # Create header for this index record header = b'INDX' buf.seek(0), buf.truncate(0) buf.write(pack(b'>I', self.HEADER_LENGTH)) buf.write(b'\0'*4) # Unknown buf.write(pack(b'>I', 1)) # Header type (0 for Index header record and 1 for Index records) buf.write(b'\0'*4) # Unknown # IDXT block offset buf.write(pack(b'>I', self.HEADER_LENGTH + len(index_block))) # Number of index entries in this record buf.write(pack(b'>I', record_count)) buf.write(b'\xff'*8) # Unknown buf.write(b'\0'*156) # Unknown header += buf.getvalue() index_records.append(header + index_block + idxt_block) if len(index_records[-1]) > 0x10000: raise ValueError('Failed to rollover index blocks for very large index.') # Create the Index Header record tagx = self.generate_tagx() # Geometry of the index records is written as index entries pointed to # by the IDXT records buf.seek(0), buf.truncate() idxt = [b'IDXT'] pos = IndexHeader.HEADER_LENGTH + len(tagx) for last_idx, num in zip(last_indices, record_counts): start = buf.tell() idxt.append(pack(b'>H', pos)) buf.write(bytes(bytearray([len(last_idx)])) + last_idx) buf.write(pack(b'>H', num)) pos += buf.tell() - start header = { 'num_of_entries': sum(r for r in record_counts), 'num_of_records': len(index_records), 'num_of_cncx': len(self.cncx), 'tagx':align_block(tagx), 'geometry':align_block(buf.getvalue()), 'idxt':align_block(b''.join(idxt)), } header = IndexHeader()(**header) self.records = [header] + index_records self.records.extend(self.cncx.records) return self.records # }}} class SkelIndex(Index): tag_types = tuple(map(TagMeta, ( ('chunk_count', 1, 1, 3, 0), ('geometry', 6, 2, 12, 0), EndTagTable ))) def __init__(self, skel_table): self.entries = [ (s.name, { # Dont ask me why these entries have to be repeated twice 'chunk_count':(s.chunk_count, s.chunk_count), 'geometry':(s.start_pos, s.length, s.start_pos, s.length), }) for s in skel_table ] class ChunkIndex(Index): tag_types = tuple(map(TagMeta, ( ('cncx_offset', 2, 1, 1, 0), ('file_number', 3, 1, 2, 0), ('sequence_number', 4, 1, 4, 0), ('geometry', 6, 2, 8, 0), EndTagTable ))) def __init__(self, chunk_table): self.cncx = CNCX(c.selector for c in chunk_table) self.entries = [ ('%010d'%c.insert_pos, { 'cncx_offset':self.cncx[c.selector], 'file_number':c.file_number, 'sequence_number':c.sequence_number, 'geometry':(c.start_pos, c.length), }) for c in chunk_table ] class GuideIndex(Index): tag_types = tuple(map(TagMeta, ( ('title', 1, 1, 1, 0), ('pos_fid', 6, 2, 2, 0), EndTagTable ))) def __init__(self, guide_table): self.cncx = CNCX(c.title for c in guide_table) self.entries = [ (r.type, { 'title':self.cncx[r.title], 'pos_fid':r.pos_fid, }) for r in guide_table ] class NCXIndex(Index): ''' The commented out parts have been seen in NCX indexes from MOBI 6 periodicals. Since we have no MOBI 8 periodicals to reverse engineer, leave it for now. ''' # control_byte_count = 2 tag_types = tuple(map(TagMeta, ( ('offset', 1, 1, 1, 0), ('length', 2, 1, 2, 0), ('label', 3, 1, 4, 0), ('depth', 4, 1, 8, 0), ('parent', 21, 1, 16, 0), ('first_child', 22, 1, 32, 0), ('last_child', 23, 1, 64, 0), ('pos_fid', 6, 2, 128, 0), EndTagTable, # ('image', 69, 1, 1, 0), # ('description', 70, 1, 2, 0), # ('author', 71, 1, 4, 0), # ('caption', 72, 1, 8, 0), # ('attribution', 73, 1, 16, 0), # EndTagTable ))) def __init__(self, toc_table): strings = [] for entry in toc_table: strings.append(entry['label']) aut = entry.get('author', None) if aut: strings.append(aut) desc = entry.get('description', None) if desc: strings.append(desc) kind = entry.get('kind', None) if kind: strings.append(kind) self.cncx = CNCX(strings) try: largest = max(x['index'] for x in toc_table) except ValueError: largest = 0 fmt = '%0{}X'.format(max(2, len('%X' % largest))) def to_entry(x): ans = {} for f in ('offset', 'length', 'depth', 'pos_fid', 'parent', 'first_child', 'last_child'): if f in x: ans[f] = x[f] for f in ('label', 'description', 'author', 'kind'): if f in x: ans[f] = self.cncx[x[f]] return (fmt % x['index'], ans) self.entries = list(map(to_entry, toc_table)) class NonLinearNCXIndex(NCXIndex): control_byte_count = 2 tag_types = tuple(map(TagMeta, ( ('offset', 1, 1, 1, 0), ('length', 2, 1, 2, 0), ('label', 3, 1, 4, 0), ('depth', 4, 1, 8, 0), ('kind', 5, 1, 16, 0), ('parent', 21, 1, 32, 0), ('first_child', 22, 1, 64, 0), ('last_child', 23, 1, 128, 0), EndTagTable, ('pos_fid', 6, 2, 1, 0), EndTagTable ))) if __name__ == '__main__': # Generate a document with a large number of index entries using both # calibre and kindlegen and compare the output import os import subprocess os.chdir('/t') paras = ['<p>%d</p>' % i for i in range(4000)] raw = '<html><body>' + '\n\n'.join(paras) + '</body></html>' src = 'index.html' with open(src, 'wb') as f: f.write(raw.encode('utf-8')) subprocess.check_call(['ebook-convert', src, '.epub', '--level1-toc', '//h:p', '--no-default-epub-cover', '--flow-size', '1000000']) subprocess.check_call(['ebook-convert', src, '.azw3', '--level1-toc', '//h:p', '--no-inline-toc', '--extract-to=x']) subprocess.call(['kindlegen', 'index.epub']) # kindlegen exit code is not 0 as we dont have a cover subprocess.check_call(['calibre-debug', 'index.mobi']) from calibre.gui2.tweak_book.diff.main import main main(['cdiff', 'decompiled_index/mobi8/ncx.record', 'x/ncx.record'])
12,782
Python
.py
317
30.022082
138
0.521269
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,481
skeleton.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/skeleton.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import re from collections import namedtuple from functools import partial from xml.sax.saxutils import escape from lxml import etree from calibre import my_unichr from calibre.ebooks.mobi.utils import PolyglotDict, to_base from calibre.ebooks.oeb.base import XHTML_NS, extract from polyglot.builtins import as_bytes, iteritems CHUNK_SIZE = 8192 # References in links are stored with 10 digits to_href = partial(to_base, base=32, min_num_digits=10) # Tags to which kindlegen adds the aid attribute aid_able_tags = {'a', 'abbr', 'address', 'article', 'aside', 'audio', 'b', 'bdo', 'blockquote', 'body', 'button', 'cite', 'code', 'dd', 'del', 'details', 'dfn', 'div', 'dl', 'dt', 'em', 'fieldset', 'figcaption', 'figure', 'footer', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'header', 'hgroup', 'i', 'ins', 'kbd', 'label', 'legend', 'li', 'map', 'mark', 'meter', 'nav', 'ol', 'output', 'p', 'pre', 'progress', 'q', 'rp', 'rt', 'samp', 'section', 'select', 'small', 'span', 'strong', 'sub', 'summary', 'sup', 'textarea', 'time', 'ul', 'var', 'video'} _self_closing_pat = re.compile( br'<(?P<tag>%s)(?=[\s/])(?P<arg>[^>]*)/>'%('|'.join(aid_able_tags|{'script', 'style', 'title', 'head'})).encode('ascii'), re.IGNORECASE) def close_self_closing_tags(raw): return _self_closing_pat.sub(br'<\g<tag>\g<arg>></\g<tag>>', raw) def path_to_node(node): ans = [] parent = node.getparent() while parent is not None: ans.append(parent.index(node)) node = parent parent = parent.getparent() return tuple(reversed(ans)) def node_from_path(root, path): parent = root for idx in path: parent = parent[idx] return parent def tostring(raw, **kwargs): ''' lxml *sometimes* represents non-ascii characters as hex entities in attribute values. I can't figure out exactly what circumstances cause it. It seems to happen when serializing a part of a larger tree. Since we need serialization to be the same when serializing full and partial trees, we manually replace all hex entities with their unicode codepoints. ''' xml_declaration = kwargs.pop('xml_declaration', False) encoding = kwargs.pop('encoding', 'UTF-8') kwargs['encoding'] = str kwargs['xml_declaration'] = False ans = etree.tostring(raw, **kwargs) if xml_declaration: ans = '<?xml version="1.0" encoding="%s"?>\n'%encoding + ans return re.sub(r'&#x([0-9A-Fa-f]+);', lambda m:my_unichr(int(m.group(1), 16)), ans).encode(encoding) class Chunk: def __init__(self, raw, selector): self.raw = raw self.starts_tags = [] self.ends_tags = [] self.insert_pos = None self.is_first_chunk = False self.selector = "%s-//*[@aid='%s']"%selector def __len__(self): return len(self.raw) def merge(self, chunk): self.raw += chunk.raw self.ends_tags = chunk.ends_tags def __repr__(self): return 'Chunk(len=%r insert_pos=%r starts_tags=%r ends_tags=%r)'%( len(self.raw), self.insert_pos, self.starts_tags, self.ends_tags) __str__ = __repr__ class Skeleton: def __init__(self, file_number, item, root, chunks): self.file_number, self.item = file_number, item self.chunks = chunks self.skeleton = self.render(root) self.body_offset = self.skeleton.find(b'<body') self.calculate_metrics(root) self.calculate_insert_positions() def render(self, root): raw = tostring(root, xml_declaration=True) raw = raw.replace(b'<html', ('<html xmlns="%s"'%XHTML_NS).encode('ascii'), 1) raw = close_self_closing_tags(raw) return raw def calculate_metrics(self, root): Metric = namedtuple('Metric', 'start end') self.metrics = {} for tag in root.xpath('//*[@aid]'): text = (tag.text or '').encode('utf-8') raw = close_self_closing_tags(tostring(tag, with_tail=True)) start_length = len(raw.partition(b'>')[0]) + len(text) + 1 end_length = len(raw.rpartition(b'<')[-1]) + 1 self.metrics[tag.get('aid')] = Metric(start_length, end_length) def calculate_insert_positions(self): pos = self.body_offset for chunk in self.chunks: for tag in chunk.starts_tags: pos += self.metrics[tag].start chunk.insert_pos = pos pos += len(chunk) for tag in chunk.ends_tags: pos += self.metrics[tag].end def rebuild(self): ans = self.skeleton for chunk in self.chunks: i = chunk.insert_pos ans = ans[:i] + chunk.raw + ans[i:] return ans def __len__(self): return len(self.skeleton) + sum(len(x.raw) for x in self.chunks) @property def raw_text(self): return b''.join([self.skeleton] + [x.raw for x in self.chunks]) class Chunker: def __init__(self, oeb, data_func, placeholder_map): self.oeb, self.log = oeb, oeb.log self.data = data_func self.placeholder_map = placeholder_map self.skeletons = [] # Set this to a list to enable dumping of the original and rebuilt # html files for debugging orig_dumps = None for i, item in enumerate(self.oeb.spine): root = self.remove_namespaces(self.data(item)) for child in root.xpath('//*[@aid]'): child.set('aid', child.attrib.pop('aid')) # kindlegen always puts the aid last body = root.xpath('//body')[0] body.tail = '\n' if orig_dumps is not None: orig_dumps.append(tostring(root, xml_declaration=True, with_tail=True)) orig_dumps[-1] = close_self_closing_tags( orig_dumps[-1].replace(b'<html', ('<html xmlns="%s"'%XHTML_NS).encode('ascii'), 1)) # First pass: break up document into rendered strings of length no # more than CHUNK_SIZE chunks = [] self.step_into_tag(body, chunks) # Second pass: Merge neighboring small chunks within the same # skeleton tag so as to have chunks as close to the CHUNK_SIZE as # possible. chunks = self.merge_small_chunks(chunks) # Third pass: Create the skeleton and calculate the insert position # for all chunks self.skeletons.append(Skeleton(i, item, root, chunks)) if orig_dumps: self.dump(orig_dumps) # Create the SKEL and Chunk tables self.skel_table = [] self.chunk_table = [] self.create_tables() # Set internal links text = b''.join(x.raw_text for x in self.skeletons) self.text = self.set_internal_links(text, b''.join(x.rebuild() for x in self.skeletons)) def remove_namespaces(self, root): lang = None for attr, val in iteritems(root.attrib): if attr.rpartition('}')[-1] == 'lang': lang = val # Remove all namespace information from the tree. This means namespaced # tags have their namespaces removed and all namespace declarations are # removed. We have to do this manual cloning of the tree as there is no # other way to remove namespace declarations in lxml. This is done so # that serialization creates clean HTML 5 markup with no namespaces. We # insert the XHTML namespace manually after serialization. The # preceding layers should have removed svg and any other non html # namespaced tags. attrib = {'lang':lang} if lang else {} if 'class' in root.attrib: attrib['class'] = root.attrib['class'] if 'style' in root.attrib: attrib['style'] = root.attrib['style'] nroot = etree.Element('html', attrib=attrib) nroot.text = root.text nroot.tail = '\n' # Remove Comments and ProcessingInstructions as kindlegen seems to # remove them as well for tag in root.iterdescendants(): if tag.tag in {etree.Comment, etree.ProcessingInstruction}: extract(tag) for tag in root.iterdescendants(): if tag.tag == etree.Entity: elem = etree.Entity(tag.name) else: tn = tag.tag if tn is not None: tn = tn.rpartition('}')[-1] attrib = {k.rpartition('}')[-1]:v for k, v in iteritems(tag.attrib)} try: elem = nroot.makeelement(tn, attrib=attrib) except ValueError: attrib = {k:v for k, v in iteritems(attrib) if ':' not in k} elem = nroot.makeelement(tn, attrib=attrib) elem.text = tag.text elem.tail = tag.tail parent = node_from_path(nroot, path_to_node(tag.getparent())) parent.append(elem) return nroot def step_into_tag(self, tag, chunks): aid = tag.get('aid') self.chunk_selector = ('P', aid) first_chunk_idx = len(chunks) # First handle any text if tag.text and tag.text.strip(): # Leave pure whitespace in the skel chunks.extend(self.chunk_up_text(tag.text)) tag.text = None # Now loop over children for child in list(tag): raw = tostring(child, with_tail=False) if child.tag == etree.Entity: chunks.append(raw) if child.tail: chunks.extend(self.chunk_up_text(child.tail)) continue raw = close_self_closing_tags(raw) if len(raw) > CHUNK_SIZE and child.get('aid', None): self.step_into_tag(child, chunks) if child.tail and child.tail.strip(): # Leave pure whitespace chunks.extend(self.chunk_up_text(child.tail)) child.tail = None else: if len(raw) > CHUNK_SIZE: self.log.warn('Tag %s has no aid and a too large chunk' ' size. Adding anyway.'%child.tag) chunks.append(Chunk(raw, self.chunk_selector)) if child.tail: chunks.extend(self.chunk_up_text(child.tail)) tag.remove(child) if len(chunks) <= first_chunk_idx and chunks: raise ValueError('Stepped into a tag that generated no chunks.') # Mark the first and last chunks of this tag if chunks: chunks[first_chunk_idx].starts_tags.append(aid) chunks[-1].ends_tags.append(aid) my_chunks = chunks[first_chunk_idx:] if my_chunks: my_chunks[0].is_first_chunk = True self.chunk_selector = ('S', aid) def chunk_up_text(self, text): text = escape(text) text = text.encode('utf-8') ans = [] def split_multibyte_text(raw): if len(raw) <= CHUNK_SIZE: return raw, b'' l = raw[:CHUNK_SIZE] l = l.decode('utf-8', 'ignore').encode('utf-8') return l, raw[len(l):] start, rest = split_multibyte_text(text) ans.append(start) while rest: start, rest = split_multibyte_text(rest) ans.append(b'<span class="AmznBigTextBlock">' + start + b'</span>') return [Chunk(x, self.chunk_selector) for x in ans] def merge_small_chunks(self, chunks): ans = chunks[:1] for chunk in chunks[1:]: prev = ans[-1] if ( chunk.starts_tags or # Starts a tag in the skel len(chunk) + len(prev) > CHUNK_SIZE or # Too large prev.ends_tags # Prev chunk ended a tag ): ans.append(chunk) else: prev.merge(chunk) return ans def create_tables(self): Skel = namedtuple('Skel', 'file_number name chunk_count start_pos length') sp = 0 for s in self.skeletons: s.start_pos = sp sp += len(s) self.skel_table = [Skel(s.file_number, 'SKEL%010d'%s.file_number, len(s.chunks), s.start_pos, len(s.skeleton)) for s in self.skeletons] Chunk = namedtuple('Chunk', 'insert_pos selector file_number sequence_number start_pos length') num = 0 for skel in self.skeletons: cp = 0 for chunk in skel.chunks: self.chunk_table.append( Chunk(chunk.insert_pos + skel.start_pos, chunk.selector, skel.file_number, num, cp, len(chunk.raw))) cp += len(chunk.raw) num += 1 def set_internal_links(self, text, rebuilt_text): ''' Update the internal link placeholders to point to the correct location, based on the chunk table.''' # A kindle:pos:fid:off link contains two base 32 numbers of the form # XXXX:YYYYYYYYYY # The first number is an index into the chunk table and the second is # an offset from the start of the chunk to the start of the tag pointed # to by the link. aid_map = PolyglotDict() # Map of aid to (fid, offset_from_start_of_chunk, offset_from_start_of_text) for match in re.finditer(br'<[^>]+? [ac]id=[\'"]([cA-Z0-9]+)[\'"]', rebuilt_text): offset = match.start() pos_fid = None for chunk in self.chunk_table: if chunk.insert_pos <= offset < chunk.insert_pos + chunk.length: pos_fid = (chunk.sequence_number, offset-chunk.insert_pos, offset) break if chunk.insert_pos > offset: # This aid is in the skeleton, not in a chunk, so we use # the chunk immediately after pos_fid = (chunk.sequence_number, 0, offset) break if chunk is self.chunk_table[-1]: # This can happen for aids very close to the end of the # text (https://bugs.launchpad.net/bugs/1011330) pos_fid = (chunk.sequence_number, offset-chunk.insert_pos, offset) if pos_fid is None: raise ValueError('Could not find chunk for aid: %r'% match.group(1)) aid_map[match.group(1)] = pos_fid self.aid_offset_map = aid_map def to_placeholder(aid): pos, fid, _ = aid_map[aid] pos, fid = to_base(pos, min_num_digits=4), to_href(fid) return ':off:'.join((pos, fid)).encode('utf-8') placeholder_map = {as_bytes(k):to_placeholder(v) for k, v in iteritems(self.placeholder_map)} # Now update the links def sub(match): raw = match.group() pl = match.group(1) try: return raw[:-19] + placeholder_map[pl] except KeyError: pass return raw return re.sub(br'<[^>]+(kindle:pos:fid:0000:off:[0-9A-Za-z]{10})', sub, text) def dump(self, orig_dumps): import os import shutil import tempfile tdir = os.path.join(tempfile.gettempdir(), 'skeleton') self.log('Skeletons dumped to:', tdir) if os.path.exists(tdir): shutil.rmtree(tdir) orig = os.path.join(tdir, 'orig') rebuilt = os.path.join(tdir, 'rebuilt') chunks = os.path.join(tdir, 'chunks') for x in (orig, rebuilt, chunks): os.makedirs(x) error = False for i, skeleton in enumerate(self.skeletons): for j, chunk in enumerate(skeleton.chunks): with open(os.path.join(chunks, 'file-%d-chunk-%d.html'%(i, j)), 'wb') as f: f.write(chunk.raw) oraw, rraw = orig_dumps[i], skeleton.rebuild() with open(os.path.join(orig, '%04d.html'%i), 'wb') as f: f.write(oraw) with open(os.path.join(rebuilt, '%04d.html'%i), 'wb') as f: f.write(rraw) if oraw != rraw: error = True if error: raise ValueError('The before and after HTML differs. Run a diff ' 'tool on the orig and rebuilt directories') else: self.log('Skeleton HTML before and after is identical.')
16,892
Python
.py
372
34.287634
110
0.566365
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,482
mobi.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/mobi.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import random import time from struct import pack from calibre.ebooks.mobi.langcodes import iana2mobi from calibre.ebooks.mobi.utils import RECORD_SIZE, utf8_text from calibre.ebooks.mobi.writer2 import PALMDOC, UNCOMPRESSED from calibre.ebooks.mobi.writer8.exth import build_exth from calibre.ebooks.mobi.writer8.header import Header from calibre.utils.filenames import ascii_filename NULL_INDEX = 0xffffffff FLIS = b'FLIS\0\0\0\x08\0\x41\0\0\0\0\0\0\xff\xff\xff\xff\0\x01\0\x03\0\0\0\x03\0\0\0\x01'+ b'\xff'*4 def fcis(text_length): fcis = b'FCIS\x00\x00\x00\x14\x00\x00\x00\x10\x00\x00\x00\x02\x00\x00\x00\x00' fcis += pack(b'>L', text_length) fcis += b'\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00' fcis += b'\x28\x00\x00\x00\x08\x00\x01\x00\x01\x00\x00\x00\x00' return fcis class MOBIHeader(Header): # {{{ ''' Represents the first record in a MOBI file, contains all the metadata about the file. ''' DEFINITION = ''' # 0: Compression compression = DYN # 2: Unused unused1 = zeroes(2) # 4: Text length text_length = DYN # 8: Last text record last_text_record = DYN # 10: Text record size record_size = {record_size} # 12: Encryption Type encryption_type # 14: Unused unused2 # 16: Ident ident = b'MOBI' # 20: Header length header_length = 264 # 24: Book Type (0x2 - Book, 0x101 - News hierarchical, 0x102 - News # (flat), 0x103 - News magazine same as 0x101) book_type = DYN # 28: Text encoding (utf-8 = 65001) encoding = 65001 # 32: UID uid = DYN # 36: File version file_version = {file_version} # 40: Meta orth record (used in dictionaries) meta_orth_record = NULL # 44: Meta infl index meta_infl_index = NULL # 48: Extra indices extra_index0 = NULL extra_index1 = NULL extra_index2 = NULL extra_index3 = NULL extra_index4 = NULL extra_index5 = NULL extra_index6 = NULL extra_index7 = NULL # 80: First non text record first_non_text_record = DYN # 84: Title offset title_offset # 88: Title Length title_length = DYN # 92: Language code language_code = DYN # 96: Dictionary in and out languages in_lang out_lang # 104: Min version min_version = {file_version} # 108: First resource record first_resource_record = DYN # 112: Huff/CDIC compression huff_first_record huff_count huff_table_offset = zeroes(4) huff_table_length = zeroes(4) # 128: EXTH flags exth_flags = DYN # 132: Unknown unknown = zeroes(32) # 164: Unknown unknown_index = NULL # 168: DRM drm_offset = NULL drm_count drm_size drm_flags # 184: Unknown unknown2 = zeroes(8) # 192: FDST # In MOBI 6 the fdst record is instead two two byte fields storing the # index of the first and last content records fdst_record = DYN fdst_count = DYN # 200: FCIS fcis_record = DYN fcis_count = 1 # 208: FLIS flis_record = DYN flis_count = 1 # 216: Unknown unknown3 = zeroes(8) # 224: SRCS srcs_record = NULL srcs_count # 232: Unknown unknown4 = nulls(8) # 240: Extra data flags # 0b1 - extra multibyte bytes after text records # 0b10 - TBS indexing data (only used in MOBI 6) # 0b100 - uncrossable breaks only used in MOBI 6 extra_data_flags = DYN # 244: KF8 Indices ncx_index = DYN chunk_index = DYN skel_index = DYN datp_index = NULL guide_index = DYN # 264: Unknown unknown5 = nulls(4) unknown6 = zeroes(4) unknown7 = nulls(4) unknown8 = zeroes(4) # 280: EXTH exth = DYN # Full title full_title = DYN # Padding to allow amazon's DTP service to add data padding = zeroes(8192) ''' SHORT_FIELDS = {'compression', 'last_text_record', 'record_size', 'encryption_type', 'unused2'} ALIGN = True POSITIONS = {'title_offset':'full_title'} def __init__(self, file_version=8): self.DEFINITION = self.DEFINITION.format(file_version=file_version, record_size=RECORD_SIZE) super().__init__() def format_value(self, name, val): if name == 'compression': val = PALMDOC if val else UNCOMPRESSED return super().format_value(name, val) # }}} HEADER_FIELDS = {'compression', 'text_length', 'last_text_record', 'book_type', 'first_non_text_record', 'title_length', 'language_code', 'first_resource_record', 'exth_flags', 'fdst_record', 'fdst_count', 'ncx_index', 'chunk_index', 'skel_index', 'guide_index', 'exth', 'full_title', 'extra_data_flags', 'flis_record', 'fcis_record', 'uid'} class KF8Book: def __init__(self, writer, for_joint=False): self.build_records(writer, for_joint) self.used_images = writer.used_images self.page_progression_direction = writer.oeb.spine.page_progression_direction self.primary_writing_mode = writer.oeb.metadata.primary_writing_mode if self.page_progression_direction == 'rtl' and not self.primary_writing_mode: # Without this the Kindle renderer does not respect # page_progression_direction self.primary_writing_mode = 'horizontal-rl' def build_records(self, writer, for_joint): metadata = writer.oeb.metadata # The text records for x in ('last_text_record_idx', 'first_non_text_record_idx'): setattr(self, x.rpartition('_')[0], getattr(writer, x)) self.records = writer.records self.text_length = writer.text_length # KF8 Indices self.chunk_index = len(self.records) self.records.extend(writer.chunk_records) self.skel_index = len(self.records) self.records.extend(writer.skel_records) self.guide_index = NULL_INDEX if writer.guide_records: self.guide_index = len(self.records) self.records.extend(writer.guide_records) self.ncx_index = NULL_INDEX if writer.ncx_records: self.ncx_index = len(self.records) self.records.extend(writer.ncx_records) # Resources resources = writer.resources for x in ('cover_offset', 'thumbnail_offset', 'masthead_offset'): setattr(self, x, getattr(resources, x)) self.first_resource_record = NULL_INDEX before = len(self.records) if resources.records: self.first_resource_record = len(self.records) if not for_joint: resources.serialize(self.records, writer.used_images) self.num_of_resources = len(self.records) - before # FDST self.fdst_count = writer.fdst_count self.fdst_record = len(self.records) self.records.extend(writer.fdst_records) # FLIS/FCIS self.flis_record = len(self.records) self.records.append(FLIS) self.fcis_record = len(self.records) self.records.append(fcis(self.text_length)) # EOF self.records.append(b'\xe9\x8e\r\n') # EOF record # Miscellaneous header fields self.compression = writer.compress self.book_type = 0x101 if writer.opts.mobi_periodical else 2 self.full_title = utf8_text(str(metadata.title[0])) self.title_length = len(self.full_title) self.extra_data_flags = 0b1 if writer.has_tbs: self.extra_data_flags |= 0b10 self.uid = random.randint(0, 0xffffffff) self.language_code = iana2mobi(str(metadata.language[0])) self.exth_flags = 0b1010000 if writer.opts.mobi_periodical: self.exth_flags |= 0b1000 if resources.has_fonts: self.exth_flags |= 0b1000000000000 self.opts = writer.opts self.start_offset = writer.start_offset self.metadata = metadata self.kuc = 0 if len(resources.records) > 0 else None @property def record0(self): ''' We generate the EXTH header and record0 dynamically, to allow other code to customize various values after build_records() has been called''' opts = self.opts self.exth = build_exth( self.metadata, prefer_author_sort=opts.prefer_author_sort, is_periodical=opts.mobi_periodical, share_not_sync=opts.share_not_sync, cover_offset=self.cover_offset, thumbnail_offset=self.thumbnail_offset, num_of_resources=self.num_of_resources, kf8_unknown_count=self.kuc, be_kindlegen2=True, start_offset=self.start_offset, mobi_doctype=self.book_type, page_progression_direction=self.page_progression_direction, primary_writing_mode=self.primary_writing_mode ) kwargs = {field:getattr(self, field) for field in HEADER_FIELDS} return MOBIHeader()(**kwargs) def write(self, outpath): records = [self.record0] + self.records[1:] with open(outpath, 'wb') as f: # Write PalmDB Header title = ascii_filename(self.full_title.decode('utf-8')).replace(' ', '_') if not isinstance(title, bytes): title = title.encode('ascii') title = title[:31] title += (b'\0' * (32 - len(title))) now = int(time.time()) nrecords = len(records) f.write(title) f.write(pack(b'>HHIIIIII', 0, 0, now, now, 0, 0, 0, 0)) f.write(b'BOOKMOBI') f.write(pack(b'>IIH', (2*nrecords)-1, 0, nrecords)) offset = f.tell() + (8 * nrecords) + 2 for i, record in enumerate(records): f.write(pack(b'>I', offset)) f.write(b'\0' + pack(b'>I', 2*i)[1:]) offset += len(record) f.write(b'\0\0') for rec in records: f.write(rec)
10,264
Python
.py
275
29.541818
101
0.622982
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,483
cleanup.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/cleanup.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' from calibre.ebooks.oeb.base import XPath class CSSCleanup: def __init__(self, log, opts): self.log, self.opts = log, opts def __call__(self, item, stylizer): if not hasattr(item.data, 'xpath'): return # The Kindle touch displays all black pages if the height is set on # body for body in XPath('//h:body')(item.data): style = stylizer.style(body) style.drop('height') def remove_duplicate_anchors(oeb): # The Kindle apparently has incorrect behavior for duplicate anchors, see # https://bugs.launchpad.net/calibre/+bug/1454199 for item in oeb.spine: if not hasattr(item.data, 'xpath'): continue seen = set() for tag in item.data.xpath('//*[@id or @name]'): for attr in ('id', 'name'): anchor = tag.get(attr) if anchor is not None: if anchor in seen: oeb.log.debug('Removing duplicate anchor:', anchor) tag.attrib.pop(attr) else: seen.add(anchor)
1,289
Python
.py
32
29.96875
77
0.567763
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,484
main.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/main.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import copy import logging from collections import defaultdict, namedtuple from functools import partial from io import BytesIO from struct import pack import css_parser from css_parser.css import CSSRule from lxml import etree from calibre import force_unicode, isbytestring from calibre.ebooks.compression.palmdoc import compress_doc from calibre.ebooks.mobi.utils import create_text_record, is_guide_ref_start, to_base from calibre.ebooks.mobi.writer8.index import ChunkIndex, GuideIndex, NCXIndex, NonLinearNCXIndex, SkelIndex from calibre.ebooks.mobi.writer8.mobi import KF8Book from calibre.ebooks.mobi.writer8.skeleton import Chunker, aid_able_tags, to_href from calibre.ebooks.mobi.writer8.tbs import apply_trailing_byte_sequences from calibre.ebooks.mobi.writer8.toc import TOCAdder from calibre.ebooks.oeb.base import OEB_DOCS, OEB_STYLES, SVG_MIME, XHTML, XPath, extract, urlnormalize from calibre.ebooks.oeb.normalize_css import condense_sheet from calibre.ebooks.oeb.parse_utils import barename from polyglot.builtins import iteritems XML_DOCS = OEB_DOCS | {SVG_MIME} # References to record numbers in KF8 are stored as base-32 encoded integers, # with 4 digits to_ref = partial(to_base, base=32, min_num_digits=4) class KF8Writer: def __init__(self, oeb, opts, resources): self.oeb, self.opts, self.log = oeb, opts, oeb.log try: self.compress = not self.opts.dont_compress except Exception: self.compress = True self.has_tbs = False self.log.info('Creating KF8 output') # Create an inline ToC if one does not already exist self.toc_adder = TOCAdder(oeb, opts) self.used_images = set() self.resources = resources self.flows = [None] # First flow item is reserved for the text self.records = [None] # Placeholder for zeroth record self.log('\tGenerating KF8 markup...') self.dup_data() self.cleanup_markup() self.replace_resource_links() self.extract_css_into_flows() self.extract_svg_into_flows() self.replace_internal_links_with_placeholders() self.insert_aid_attributes() self.chunk_it_up() # Dump the cloned data as it is no longer needed del self._data_cache self.create_text_records() self.log('\tCreating indices...') self.create_fdst_records() self.create_indices() self.create_guide() # We do not want to use this ToC for MOBI 6, so remove it self.toc_adder.remove_generated_toc() def dup_data(self): ''' Duplicate data so that any changes we make to markup/CSS only affect KF8 output and not MOBI 6 output ''' self._data_cache = {} # Suppress css_parser logging output as it is duplicated anyway earlier # in the pipeline css_parser.log.setLevel(logging.CRITICAL) for item in self.oeb.manifest: if item.media_type in XML_DOCS: self._data_cache[item.href] = copy.deepcopy(item.data) elif item.media_type in OEB_STYLES: # I can't figure out how to make an efficient copy of the # in-memory CSSStylesheet, as deepcopy doesn't work (raises an # exception) self._data_cache[item.href] = css_parser.parseString( item.data.cssText, validate=False) def data(self, item): return self._data_cache.get(item.href, item.data) def cleanup_markup(self): for item in self.oeb.spine: root = self.data(item) # Remove empty script tags as they are pointless for tag in XPath('//h:script')(root): if not tag.text and not tag.get('src', False): tag.getparent().remove(tag) # Remove [ac]id attributes as they are used by this code for anchor # to offset mapping for tag in XPath('//*[@aid or @cid]')(root): tag.attrib.pop('aid', None), tag.attrib.pop('cid', None) def replace_resource_links(self): ''' Replace links to resources (raster images/fonts) with pointers to the MOBI record containing the resource. The pointers are of the form: kindle:embed:XXXX?mime=image/* The ?mime= is apparently optional and not used for fonts. ''' def pointer(item, oref): ref = urlnormalize(item.abshref(oref)) idx = self.resources.item_map.get(ref, None) if idx is not None: is_image = self.resources.records[idx-1][:4] not in {b'FONT'} idx = to_ref(idx) if is_image: self.used_images.add(ref) return 'kindle:embed:%s?mime=%s'%(idx, self.resources.mime_map[ref]) else: return 'kindle:embed:%s'%idx return oref for item in self.oeb.manifest: if item.media_type in XML_DOCS: root = self.data(item) for tag in XPath('//h:img|//svg:image')(root): for attr, ref in iteritems(tag.attrib): if attr.split('}')[-1].lower() in {'src', 'href'}: tag.attrib[attr] = pointer(item, ref) for tag in XPath('//h:style')(root): if tag.text: sheet = css_parser.parseString(tag.text, validate=False) replacer = partial(pointer, item) css_parser.replaceUrls(sheet, replacer, ignoreImportRules=True) repl = sheet.cssText if isbytestring(repl): repl = repl.decode('utf-8') tag.text = '\n'+ repl + '\n' elif item.media_type in OEB_STYLES: sheet = self.data(item) replacer = partial(pointer, item) css_parser.replaceUrls(sheet, replacer, ignoreImportRules=True) def extract_css_into_flows(self): inlines = defaultdict(list) # Ensure identical <style>s not repeated sheets = {} passthrough = getattr(self.opts, 'mobi_passthrough', False) for item in self.oeb.manifest: if item.media_type in OEB_STYLES: sheet = self.data(item) if not passthrough and not self.opts.expand_css and hasattr(item.data, 'cssText'): condense_sheet(sheet) sheets[item.href] = len(self.flows) self.flows.append(sheet) def fix_import_rules(sheet): changed = False for rule in sheet.cssRules.rulesOfType(CSSRule.IMPORT_RULE): if rule.href: href = item.abshref(rule.href) idx = sheets.get(href, None) if idx is not None: idx = to_ref(idx) rule.href = 'kindle:flow:%s?mime=text/css'%idx changed = True return changed for item in self.oeb.spine: root = self.data(item) for link in XPath('//h:link[@href]')(root): href = item.abshref(link.get('href')) idx = sheets.get(href, None) if idx is not None: idx = to_ref(idx) link.set('href', 'kindle:flow:%s?mime=text/css'%idx) for tag in XPath('//h:style')(root): p = tag.getparent() idx = p.index(tag) raw = tag.text if not raw or not raw.strip(): extract(tag) continue sheet = css_parser.parseString(raw, validate=False) if fix_import_rules(sheet): raw = force_unicode(sheet.cssText, 'utf-8') repl = etree.Element(XHTML('link'), type='text/css', rel='stylesheet') repl.tail='\n' p.insert(idx, repl) extract(tag) inlines[raw].append(repl) for raw, elems in iteritems(inlines): idx = to_ref(len(self.flows)) self.flows.append(raw) for link in elems: link.set('href', 'kindle:flow:%s?mime=text/css'%idx) for item in self.oeb.manifest: if item.media_type in OEB_STYLES: sheet = self.data(item) if hasattr(sheet, 'cssRules'): fix_import_rules(sheet) for i, sheet in enumerate(tuple(self.flows)): if hasattr(sheet, 'cssText'): self.flows[i] = force_unicode(sheet.cssText, 'utf-8') def extract_svg_into_flows(self): images = {} for item in self.oeb.manifest: if item.media_type == SVG_MIME: data = self.data(item) images[item.href] = len(self.flows) self.flows.append(etree.tostring(data, encoding='UTF-8', with_tail=True, xml_declaration=True)) for item in self.oeb.spine: root = self.data(item) for svg in XPath('//svg:svg')(root): raw = etree.tostring(svg, encoding='unicode', with_tail=False) idx = len(self.flows) self.flows.append(raw) p = svg.getparent() pos = p.index(svg) img = etree.Element(XHTML('img'), src="kindle:flow:%s?mime=image/svg+xml"%to_ref(idx)) p.insert(pos, img) extract(svg) for img in XPath('//h:img[@src]')(root): src = img.get('src') abshref = item.abshref(src) idx = images.get(abshref, None) if idx is not None: img.set('src', 'kindle:flow:%s?mime=image/svg+xml'% to_ref(idx)) def replace_internal_links_with_placeholders(self): self.link_map = {} count = 0 hrefs = {item.href for item in self.oeb.spine} for item in self.oeb.spine: root = self.data(item) for a in XPath('//h:a[@href]')(root): count += 1 ref = item.abshref(a.get('href')) href, _, frag = ref.partition('#') try: href = urlnormalize(href) except ValueError: # a non utf-8 quoted url? Since we cannot interpret it, pass it through. pass if href in hrefs: placeholder = 'kindle:pos:fid:0000:off:%s'%to_href(count) self.link_map[placeholder] = (href, frag) a.set('href', placeholder) def insert_aid_attributes(self): self.id_map = {} cid = 0 for i, item in enumerate(self.oeb.spine): root = self.data(item) aidbase = i * int(1e6) j = 0 def in_table(elem): p = elem.getparent() if p is None: return False if barename(p.tag).lower() == 'table': return True return in_table(p) for tag in root.iterdescendants(etree.Element): id_ = tag.attrib.get('id', None) if id_ is None and tag.tag == XHTML('a'): # Can happen during tweaking id_ = tag.attrib.get('name', None) if id_ is not None: tag.attrib['id'] = id_ tagname = barename(tag.tag).lower() if id_ is not None or tagname in aid_able_tags: if tagname == 'table' or in_table(tag): # The Kindle renderer barfs on large tables that have # aid on any of their tags. See # https://bugs.launchpad.net/bugs/1489495 if id_: cid += 1 val = 'c%d' % cid self.id_map[(item.href, id_)] = val tag.set('cid', val) else: aid = to_base(aidbase + j, base=32) tag.set('aid', aid) if tag.tag == XHTML('body'): self.id_map[(item.href, '')] = aid if id_ is not None: self.id_map[(item.href, id_)] = aid j += 1 def chunk_it_up(self): placeholder_map = {} for placeholder, x in iteritems(self.link_map): href, frag = x aid = self.id_map.get(x, None) if aid is None: aid = self.id_map.get((href, '')) placeholder_map[placeholder] = aid chunker = Chunker(self.oeb, self.data, placeholder_map) for x in ('skel_table', 'chunk_table', 'aid_offset_map'): setattr(self, x, getattr(chunker, x)) self.flows[0] = chunker.text def create_text_records(self): self.flows = [x.encode('utf-8') if isinstance(x, str) else x for x in self.flows] text = b''.join(self.flows) self.text_length = len(text) text = BytesIO(text) nrecords = 0 records_size = 0 self.uncompressed_record_lengths = [] if self.compress: self.oeb.logger.info('\tCompressing markup...') while text.tell() < self.text_length: data, overlap = create_text_record(text) self.uncompressed_record_lengths.append(len(data)) if self.compress: data = compress_doc(data) data += overlap data += pack(b'>B', len(overlap)) self.records.append(data) records_size += len(data) nrecords += 1 self.last_text_record_idx = nrecords self.first_non_text_record_idx = nrecords + 1 # Pad so that the next records starts at a 4 byte boundary if records_size % 4 != 0: self.records.append(b'\x00'*(records_size % 4)) self.first_non_text_record_idx += 1 def create_fdst_records(self): FDST = namedtuple('Flow', 'start end') entries = [] self.fdst_table = [] for i, flow in enumerate(self.flows): start = 0 if i == 0 else self.fdst_table[-1].end self.fdst_table.append(FDST(start, start + len(flow))) entries.extend(self.fdst_table[-1]) rec = (b'FDST' + pack(b'>LL', 12, len(self.fdst_table)) + pack(b'>%dL'%len(entries), *entries)) self.fdst_records = [rec] self.fdst_count = len(self.fdst_table) def create_indices(self): self.skel_records = SkelIndex(self.skel_table)() self.chunk_records = ChunkIndex(self.chunk_table)() self.ncx_records = [] toc = self.oeb.toc entries = [] is_periodical = self.opts.mobi_periodical if toc.count() < 1: self.log.warn('Document has no ToC, MOBI will have no NCX index') return # Flatten the ToC into a depth first list fl = toc.iterdescendants() for i, item in enumerate(fl): entry = {'id': id(item), 'index': i, 'label':(item.title or _('Unknown')), 'children':[]} entry['depth'] = getattr(item, 'ncx_hlvl', 0) p = getattr(item, 'ncx_parent', None) if p is not None: entry['parent_id'] = p for child in item: child.ncx_parent = entry['id'] child.ncx_hlvl = entry['depth'] + 1 entry['children'].append(id(child)) if is_periodical: if item.author: entry['author'] = item.author if item.description: entry['description'] = item.description entries.append(entry) href = item.href or '' href, frag = href.partition('#')[0::2] aid = self.id_map.get((href, frag), None) if aid is None: aid = self.id_map.get((href, ''), None) if aid is None: pos, fid = 0, 0 chunk = self.chunk_table[pos] offset = chunk.insert_pos + fid else: pos, fid, offset = self.aid_offset_map[aid] entry['pos_fid'] = (pos, fid) entry['offset'] = offset # The Kindle requires entries to be sorted by (depth, playorder) # However, I cannot figure out how to deal with non linear ToCs, i.e. # ToCs whose nth entry at depth d has an offset after its n+k entry at # the same depth, so we sort on (depth, offset) instead. This re-orders # the ToC to be linear. A non-linear ToC causes section to section # jumping to not work. kindlegen somehow handles non-linear tocs, but I # cannot figure out how. original = sorted(entries, key=lambda entry: (entry['depth'], entry['index'])) linearized = sorted(entries, key=lambda entry: (entry['depth'], entry['offset'])) is_non_linear = original != linearized entries = linearized is_non_linear = False # False as we are using the linearized entries if is_non_linear: for entry in entries: entry['kind'] = 'chapter' for i, entry in enumerate(entries): entry['index'] = i id_to_index = {entry['id']:entry['index'] for entry in entries} # Write the hierarchical information for entry in entries: children = entry.pop('children') if children: entry['first_child'] = id_to_index[children[0]] entry['last_child'] = id_to_index[children[-1]] if 'parent_id' in entry: entry['parent'] = id_to_index[entry.pop('parent_id')] # Write the lengths def get_next_start(entry): enders = [e['offset'] for e in entries if e['depth'] <= entry['depth'] and e['offset'] > entry['offset']] if enders: return min(enders) return len(self.flows[0]) for entry in entries: entry['length'] = get_next_start(entry) - entry['offset'] self.has_tbs = apply_trailing_byte_sequences(entries, self.records, self.uncompressed_record_lengths) idx_type = NonLinearNCXIndex if is_non_linear else NCXIndex self.ncx_records = idx_type(entries)() def create_guide(self): self.start_offset = None self.guide_table = [] self.guide_records = [] GuideRef = namedtuple('GuideRef', 'title type pos_fid') for ref in self.oeb.guide.values(): href, frag = ref.href.partition('#')[0::2] aid = self.id_map.get((href, frag), None) if aid is None: aid = self.id_map.get((href, '')) if aid is None: continue pos, fid, offset = self.aid_offset_map[aid] if is_guide_ref_start(ref): self.start_offset = offset self.guide_table.append(GuideRef(ref.title or _('Unknown'), ref.type, (pos, fid))) if self.guide_table: self.guide_table.sort(key=lambda x:x.type) # Needed by the Kindle self.guide_records = GuideIndex(self.guide_table)() def create_kf8_book(oeb, opts, resources, for_joint=False): writer = KF8Writer(oeb, opts, resources) return KF8Book(writer, for_joint=for_joint)
20,076
Python
.py
432
33.069444
108
0.543876
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,485
exth.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/exth.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import re from io import BytesIO from struct import pack from calibre.constants import ismacos, iswindows from calibre.ebooks.metadata import authors_to_sort_string from calibre.ebooks.mobi.utils import to_base, utf8_text from calibre.utils.localization import lang_as_iso639_1 from polyglot.builtins import iteritems EXTH_CODES = { 'creator': 100, 'publisher': 101, 'description': 103, 'identifier': 104, 'subject': 105, 'pubdate': 106, 'review': 107, 'contributor': 108, 'rights': 109, 'type': 111, 'source': 112, 'versionnumber': 114, 'startreading': 116, 'kf8_header_index': 121, 'num_of_resources': 125, 'kf8_thumbnail_uri': 129, 'kf8_unknown_count': 131, 'coveroffset': 201, 'thumboffset': 202, 'hasfakecover': 203, 'lastupdatetime': 502, 'title': 503, 'language': 524, 'primary_writing_mode': 525, 'page_progression_direction': 527, 'override_kindle_fonts': 528, } COLLAPSE_RE = re.compile(r'[ \t\r\n\v]+') def build_exth(metadata, prefer_author_sort=False, is_periodical=False, share_not_sync=True, cover_offset=None, thumbnail_offset=None, start_offset=None, mobi_doctype=2, num_of_resources=None, kf8_unknown_count=0, be_kindlegen2=False, kf8_header_index=None, page_progression_direction=None, primary_writing_mode=None): exth = BytesIO() nrecs = 0 for term in metadata: if term not in EXTH_CODES: continue code = EXTH_CODES[term] items = metadata[term] if term == 'creator': if prefer_author_sort: creators = [authors_to_sort_string([str(c)]) for c in items] else: creators = [str(c) for c in items] items = creators elif term == 'rights': try: rights = utf8_text(str(metadata.rights[0])) except: rights = b'Unknown' exth.write(pack(b'>II', EXTH_CODES['rights'], len(rights) + 8)) exth.write(rights) nrecs += 1 continue for item in items: data = str(item) if term != 'description': data = COLLAPSE_RE.sub(' ', data) if term == 'identifier': if data.lower().startswith('urn:isbn:'): data = data[9:] elif item.scheme.lower() == 'isbn': pass else: continue if term == 'language': d2 = lang_as_iso639_1(data) if d2: data = d2 data = utf8_text(data) exth.write(pack(b'>II', code, len(data) + 8)) exth.write(data) nrecs += 1 # Write UUID as ASIN uuid = None from calibre.ebooks.oeb.base import OPF for x in metadata['identifier']: if (x.get(OPF('scheme'), None).lower() == 'uuid' or str(x).startswith('urn:uuid:')): uuid = str(x).split(':')[-1] break if uuid is None: from uuid import uuid4 uuid = str(uuid4()) if isinstance(uuid, str): uuid = uuid.encode('utf-8') if not share_not_sync: exth.write(pack(b'>II', 113, len(uuid) + 8)) exth.write(uuid) nrecs += 1 # Write UUID as SOURCE c_uuid = b'calibre:%s' % uuid exth.write(pack(b'>II', 112, len(c_uuid) + 8)) exth.write(c_uuid) nrecs += 1 # Write cdetype if not is_periodical: if not share_not_sync: exth.write(pack(b'>II', 501, 12)) exth.write(b'EBOK') nrecs += 1 else: ids = {0x101:b'NWPR', 0x103:b'MAGZ'}.get(mobi_doctype, None) if ids: exth.write(pack(b'>II', 501, 12)) exth.write(ids) nrecs += 1 # Add a publication date entry if metadata['date']: datestr = str(metadata['date'][0]) elif metadata['timestamp']: datestr = str(metadata['timestamp'][0]) if datestr is None: raise ValueError("missing date or timestamp") datestr = datestr.encode('utf-8') exth.write(pack(b'>II', EXTH_CODES['pubdate'], len(datestr) + 8)) exth.write(datestr) nrecs += 1 if is_periodical: exth.write(pack(b'>II', EXTH_CODES['lastupdatetime'], len(datestr) + 8)) exth.write(datestr) nrecs += 1 if be_kindlegen2: mv = 200 if iswindows else 202 if ismacos else 201 vals = {204:mv, 205:2, 206:9, 207:0} elif is_periodical: # Pretend to be amazon's super secret periodical generator vals = {204:201, 205:2, 206:0, 207:101} else: # Pretend to be kindlegen 1.2 vals = {204:201, 205:1, 206:2, 207:33307} for code, val in iteritems(vals): exth.write(pack(b'>III', code, 12, val)) nrecs += 1 if be_kindlegen2: revnum = b'0730-890adc2' exth.write(pack(b'>II', 535, 8 + len(revnum)) + revnum) nrecs += 1 if cover_offset is not None: exth.write(pack(b'>III', EXTH_CODES['coveroffset'], 12, cover_offset)) exth.write(pack(b'>III', EXTH_CODES['hasfakecover'], 12, 0)) nrecs += 2 if thumbnail_offset is not None: exth.write(pack(b'>III', EXTH_CODES['thumboffset'], 12, thumbnail_offset)) thumbnail_uri_str = ('kindle:embed:%s' %(to_base(thumbnail_offset, base=32, min_num_digits=4))).encode('utf-8') exth.write(pack(b'>II', EXTH_CODES['kf8_thumbnail_uri'], len(thumbnail_uri_str) + 8)) exth.write(thumbnail_uri_str) nrecs += 2 if start_offset is not None: try: len(start_offset) except TypeError: start_offset = [start_offset] for so in start_offset: if so is not None: exth.write(pack(b'>III', EXTH_CODES['startreading'], 12, so)) nrecs += 1 if kf8_header_index is not None: exth.write(pack(b'>III', EXTH_CODES['kf8_header_index'], 12, kf8_header_index)) nrecs += 1 if num_of_resources is not None: exth.write(pack(b'>III', EXTH_CODES['num_of_resources'], 12, num_of_resources)) nrecs += 1 if kf8_unknown_count is not None: exth.write(pack(b'>III', EXTH_CODES['kf8_unknown_count'], 12, kf8_unknown_count)) nrecs += 1 if primary_writing_mode: pwm = primary_writing_mode.encode('utf-8') exth.write(pack(b'>II', EXTH_CODES['primary_writing_mode'], len(pwm) + 8)) exth.write(pwm) nrecs += 1 if page_progression_direction in {'rtl', 'ltr', 'default'}: ppd = page_progression_direction.encode('ascii') exth.write(pack(b'>II', EXTH_CODES['page_progression_direction'], len(ppd) + 8)) exth.write(ppd) nrecs += 1 exth.write(pack(b'>II', EXTH_CODES['override_kindle_fonts'], len(b'true') + 8)) exth.write(b'true') nrecs += 1 exth = exth.getvalue() trail = len(exth) % 4 pad = b'\0' * (4 - trail) # Always pad w/ at least 1 byte exth = [b'EXTH', pack(b'>II', len(exth) + 12, nrecs), exth, pad] return b''.join(exth)
7,438
Python
.py
205
27.658537
119
0.569109
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,486
tbs.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/writer8/tbs.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' DOC = ''' Trailing Byte Sequences contain information about which index entries touch a particular text record. Every text records has a set of trailing byte sequences. In order to figure out the sequence for a given text record, you have to first calculate all the indices that start, end, span and are contained within that text record. Then arrange the indices into 'strands', where each strand is a hierarchical progression from the top level index down. For the exact algorithm, see separate_strands(). The strands are then encoded into 'sequences', see encode_strands_as_sequences() and finally the sequences are turned into bytes. ''' from collections import OrderedDict, namedtuple from operator import attrgetter from calibre.ebooks.mobi.utils import encode_tbs, encode_trailing_data from polyglot.builtins import iteritems, itervalues Entry = namedtuple('IndexEntry', 'index start length depth parent ' 'first_child last_child title action start_offset length_offset ' 'text_record_length') def fill_entry(entry, start_offset, text_record_length): length_offset = start_offset + entry.length if start_offset < 0: action = 'spans' if length_offset > text_record_length else 'ends' else: action = 'starts' if length_offset > text_record_length else 'completes' return Entry(*(entry[:-4] + (action, start_offset, length_offset, text_record_length))) def populate_strand(parent, entries): ans = [parent] children = [c for c in entries if c.parent == parent.index] if children: # Add first child to this strand, and recurse downwards child = children[0] entries.remove(child) ans += populate_strand(child, entries) else: # Add any entries at the same depth that form a contiguous set of # indices and belong to the same parent (these can all be # represented as a single sequence with the 0b100 flag) current_index = parent.index siblings = [] for entry in list(entries): if (entry.depth == parent.depth and entry.parent == parent.parent and entry.index == current_index+1): current_index += 1 entries.remove(entry) children = [c for c in entries if c.parent == entry.index] if children: siblings += populate_strand(entry, entries) break # Cannot add more siblings, as we have added children else: siblings.append(entry) ans += siblings return ans def separate_strands(entries): ans = [] while entries: top, entries = entries[0], entries[1:] strand = populate_strand(top, entries) layers = OrderedDict() for entry in strand: if entry.depth not in layers: layers[entry.depth] = [] layers[entry.depth].append(entry) ans.append(layers) return ans def collect_indexing_data(entries, text_record_lengths): ''' For every text record calculate which index entries start, end, span or are contained within that record. Arrange these entries in 'strands'. ''' data = [] entries = sorted(entries, key=attrgetter('start')) record_start = 0 for rec_length in text_record_lengths: next_record_start = record_start + rec_length local_entries = [] for entry in entries: if entry.start >= next_record_start: # No more entries overlap this record break if entry.start + entry.length <= record_start: # This entry does not touch this record continue local_entries.append(fill_entry(entry, entry.start - record_start, rec_length)) strands = separate_strands(local_entries) data.append(strands) record_start += rec_length return data class NegativeStrandIndex(Exception): pass def encode_strands_as_sequences(strands, tbs_type=8): ''' Encode the list of strands for a single text record into a list of sequences, ready to be converted into TBS bytes. ''' ans = [] last_index = None max_length_offset = 0 first_entry = None for strand in strands: for entries in itervalues(strand): for entry in entries: if first_entry is None: first_entry = entry if entry.length_offset > max_length_offset: max_length_offset = entry.length_offset for strand in strands: strand_seqs = [] for depth, entries in iteritems(strand): extra = {} if entries[-1].action == 'spans': extra[0b1] = 0 elif False and ( entries[-1].length_offset < entries[-1].text_record_length and entries[-1].action == 'completes' and entries[-1].length_offset != max_length_offset): # I can't figure out exactly when kindlegen decides to insert # this, so disable it for now. extra[0b1] = entries[-1].length_offset if entries[0] is first_entry: extra[0b10] = tbs_type if len(entries) > 1: extra[0b100] = len(entries) index = entries[0].index - (entries[0].parent or 0) if ans and not strand_seqs: # We are in the second or later strands, so we need to use a # special flag and index value. The index value is the entry # index - the index of the last entry in the previous strand. index = last_index - entries[0].index if index < 0: if tbs_type == 5: index = -index else: raise NegativeStrandIndex() else: extra[0b1000] = True last_index = entries[-1].index strand_seqs.append((index, extra)) # Handle the case of consecutive action == 'spans' entries. In this # case, the 0b1 = 0 flag should be present only in the last consecutive # spans entry. for i, seq in enumerate(strand_seqs): if i + 1 < len(strand_seqs): if 0b1 in seq[1] and 0b1 in strand_seqs[i+1][1]: del seq[1][0b1] ans.extend(strand_seqs) return ans def sequences_to_bytes(sequences): ans = [] flag_size = 3 for val, extra in sequences: ans.append(encode_tbs(val, extra, flag_size)) flag_size = 4 # only the first sequence has flag size 3 as all # subsequent sequences could need the 0b1000 flag return b''.join(ans) def calculate_all_tbs(indexing_data, tbs_type=8): rmap = {} for i, strands in enumerate(indexing_data): sequences = encode_strands_as_sequences(strands, tbs_type=tbs_type) tbs_bytes = sequences_to_bytes(sequences) rmap[i+1] = tbs_bytes return rmap def apply_trailing_byte_sequences(index_table, records, text_record_lengths): entries = tuple(Entry(r['index'], r['offset'], r['length'], r['depth'], r.get('parent', None), r.get('first_child', None), r.get('last_child', None), r['label'], None, None, None, None) for r in index_table) indexing_data = collect_indexing_data(entries, text_record_lengths) try: rmap = calculate_all_tbs(indexing_data) except NegativeStrandIndex: rmap = calculate_all_tbs(indexing_data, tbs_type=5) for i, tbs_bytes in iteritems(rmap): records[i] += encode_trailing_data(tbs_bytes) return True
7,879
Python
.py
174
35.747126
114
0.622848
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,487
headers.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/debug/headers.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import binascii import datetime import numbers import os import struct from calibre.ebooks.mobi.debug import format_bytes from calibre.ebooks.mobi.langcodes import main_language, sub_language from calibre.ebooks.mobi.reader.headers import NULL_INDEX from calibre.ebooks.mobi.utils import get_trailing_data from calibre.utils.date import utc_tz from polyglot.builtins import iteritems # PalmDB {{{ class PalmDOCAttributes: class Attr: def __init__(self, name, field, val): self.name = name self.val = val & field def __str__(self): return '%s: %s'%(self.name, bool(self.val)) __unicode__ = __str__ def __init__(self, raw): self.val = struct.unpack(b'<H', raw)[0] self.attributes = [] for name, field in [('Read Only', 0x02), ('Dirty AppInfoArea', 0x04), ('Backup this database', 0x08), ('Okay to install newer over existing copy, if present on PalmPilot', 0x10), ('Force the PalmPilot to reset after this database is installed', 0x12), ('Don\'t allow copy of file to be beamed to other Pilot', 0x14)]: self.attributes.append(PalmDOCAttributes.Attr(name, field, self.val)) def __str__(self): attrs = '\n\t'.join([str(x) for x in self.attributes]) return 'PalmDOC Attributes: %s\n\t%s'%(bin(self.val), attrs) __unicode__ = __str__ class PalmDB: def __init__(self, raw): self.raw = raw if self.raw.startswith(b'TPZ'): raise ValueError('This is a Topaz file') self.name = self.raw[:32].replace(b'\x00', b'') self.attributes = PalmDOCAttributes(self.raw[32:34]) self.version = struct.unpack(b'>H', self.raw[34:36])[0] palm_epoch = datetime.datetime(1904, 1, 1, tzinfo=utc_tz) self.creation_date_raw = struct.unpack(b'>I', self.raw[36:40])[0] self.creation_date = (palm_epoch + datetime.timedelta(seconds=self.creation_date_raw)) self.modification_date_raw = struct.unpack(b'>I', self.raw[40:44])[0] self.modification_date = (palm_epoch + datetime.timedelta(seconds=self.modification_date_raw)) self.last_backup_date_raw = struct.unpack(b'>I', self.raw[44:48])[0] self.last_backup_date = (palm_epoch + datetime.timedelta(seconds=self.last_backup_date_raw)) self.modification_number = struct.unpack(b'>I', self.raw[48:52])[0] self.app_info_id = self.raw[52:56] self.sort_info_id = self.raw[56:60] self.type = self.raw[60:64] self.creator = self.raw[64:68] self.ident = self.type + self.creator if self.ident not in (b'BOOKMOBI', b'TEXTREAD'): raise ValueError('Unknown book ident: %r'%self.ident) self.last_record_uid, = struct.unpack(b'>I', self.raw[68:72]) self.next_rec_list_id = self.raw[72:76] self.number_of_records, = struct.unpack(b'>H', self.raw[76:78]) def __str__(self): ans = ['*'*20 + ' PalmDB Header '+ '*'*20] ans.append('Name: %r'%self.name) ans.append(str(self.attributes)) ans.append('Version: %s'%self.version) ans.append('Creation date: %s (%s)'%(self.creation_date.isoformat(), self.creation_date_raw)) ans.append('Modification date: %s (%s)'%(self.modification_date.isoformat(), self.modification_date_raw)) ans.append('Backup date: %s (%s)'%(self.last_backup_date.isoformat(), self.last_backup_date_raw)) ans.append('Modification number: %s'%self.modification_number) ans.append('App Info ID: %r'%self.app_info_id) ans.append('Sort Info ID: %r'%self.sort_info_id) ans.append('Type: %r'%self.type) ans.append('Creator: %r'%self.creator) ans.append('Last record UID +1: %r'%self.last_record_uid) ans.append('Next record list id: %r'%self.next_rec_list_id) ans.append('Number of records: %s'%self.number_of_records) return '\n'.join(ans) __unicode__ = __str__ # }}} class Record: # {{{ def __init__(self, raw, header): self.offset, self.flags, self.uid = header self.raw = raw @property def header(self): return 'Offset: %d Flags: %d UID: %d First 4 bytes: %r Size: %d'%(self.offset, self.flags, self.uid, self.raw[:4], len(self.raw)) # }}} # EXTH {{{ class EXTHRecord: def __init__(self, type_, data, length): self.type = type_ self.data = data self.length = length self.name = { 1 : 'Drm Server Id', 2 : 'Drm Commerce Id', 3 : 'Drm Ebookbase Book Id', 100 : 'Creator', 101 : 'Publisher', 102 : 'Imprint', 103 : 'Description', 104 : 'ISBN', 105 : 'Subject', 106 : 'Published', 107 : 'Review', 108 : 'Contributor', 109 : 'Rights', 110 : 'SubjectCode', 111 : 'Type', 112 : 'Source', 113 : 'ASIN', 114 : 'versionNumber', 115 : 'sample', 116 : 'StartOffset', 117 : 'Adult', 118 : 'Price', 119 : 'Currency', 121 : 'KF8_Boundary_Section', 122 : 'fixed-layout', 123 : 'book-type', 124 : 'orientation-lock', 125 : 'KF8_Count_of_Resources_Fonts_Images', 126 : 'original-resolution', 127 : 'zero-gutter', 128 : 'zero-margin', 129 : 'KF8_Masthead/Cover_Image', 131 : 'KF8_Unidentified_Count', 132 : 'RegionMagnification', 200 : 'DictShortName', 201 : 'CoverOffset', 202 : 'ThumbOffset', 203 : 'Fake Cover', 204 : 'Creator Software', 205 : 'Creator Major Version', # '>I' 206 : 'Creator Minor Version', # '>I' 207 : 'Creator Build Number', # '>I' 208 : 'Watermark', 209 : 'Tamper Proof Keys [hex]', 300 : 'Font Signature [hex]', 301 : 'Clipping Limit [3xx]', # percentage '>B' 401 : 'Clipping Limit', # percentage '>B' 402 : 'Publisher Limit', 404 : 'Text to Speech Disabled', # '>B' 1 - TTS disabled 0 - TTS enabled 501 : 'CDE Type', # 4 chars (PDOC, EBOK, MAGZ, ...) 502 : 'last_update_time', 503 : 'Updated Title', 504 : 'ASIN [5xx]', 508 : 'Unknown Title Furigana?', 517 : 'Unknown Creator Furigana?', 522 : 'Unknown Publisher Furigana?', 524 : 'Language', 525 : 'primary-writing-mode', 527 : 'page-progression-direction', 528 : 'Override Kindle fonts', 534 : 'Input Source Type', 535 : 'Kindlegen Build-Rev Number', 536 : 'Container Info', # CONT_Header is 0, Ends with CONTAINER_BOUNDARY (or Asset_Type?) 538 : 'Container Resolution', 539 : 'Container Mimetype', 543 : 'Container id', # FONT_CONTAINER, BW_CONTAINER, HD_CONTAINER }.get(self.type, repr(self.type)) if (self.name in {'sample', 'StartOffset', 'CoverOffset', 'ThumbOffset', 'Fake Cover', 'Creator Software', 'Creator Major Version', 'Creator Minor Version', 'Creator Build Number', 'Clipping Limit (3xx)', 'Clipping Limit', 'Publisher Limit', 'Text to Speech Disabled'} or self.type in {121, 125, 131}): if self.length == 9: self.data, = struct.unpack(b'>B', self.data) elif self.length == 10: self.data, = struct.unpack(b'>H', self.data) else: self.data, = struct.unpack(b'>L', self.data) elif self.type in {209, 300}: self.data = binascii.hexlify(self.data) def __str__(self): return '%s (%d): %r'%(self.name, self.type, self.data) class EXTHHeader: def __init__(self, raw): self.raw = raw if not self.raw.startswith(b'EXTH'): raise ValueError('EXTH header does not start with EXTH') self.length, = struct.unpack(b'>L', self.raw[4:8]) self.count, = struct.unpack(b'>L', self.raw[8:12]) pos = 12 self.records = [] for i in range(self.count): pos = self.read_record(pos) self.records.sort(key=lambda x:x.type) self.rmap = {x.type:x for x in self.records} def __getitem__(self, type_): return self.rmap.__getitem__(type_).data def get(self, type_, default=None): ans = self.rmap.get(type_, default) return getattr(ans, 'data', default) def read_record(self, pos): type_, length = struct.unpack(b'>LL', self.raw[pos:pos+8]) data = self.raw[(pos+8):(pos+length)] self.records.append(EXTHRecord(type_, data, length)) return pos + length @property def kf8_header_index(self): ans = self.get(121, None) if ans == NULL_INDEX: ans = None return ans def __str__(self): ans = ['*'*20 + ' EXTH Header '+ '*'*20] ans.append('EXTH header length: %d'%self.length) ans.append('Number of EXTH records: %d'%self.count) ans.append('EXTH records...') for r in self.records: ans.append(str(r)) return '\n'.join(ans) __unicode__ = __str__ # }}} class MOBIHeader: # {{{ def __init__(self, record0, offset): self.raw = record0.raw self.header_offset = offset self.compression_raw = self.raw[:2] self.compression = {1: 'No compression', 2: 'PalmDoc compression', 17480: 'HUFF/CDIC compression'}.get(struct.unpack(b'>H', self.compression_raw)[0], repr(self.compression_raw)) self.unused = self.raw[2:4] self.text_length, = struct.unpack(b'>I', self.raw[4:8]) self.number_of_text_records, self.text_record_size = \ struct.unpack(b'>HH', self.raw[8:12]) self.encryption_type_raw, = struct.unpack(b'>H', self.raw[12:14]) self.encryption_type = { 0: 'No encryption', 1: 'Old mobipocket encryption', 2: 'Mobipocket encryption' }.get(self.encryption_type_raw, repr(self.encryption_type_raw)) self.unknown = self.raw[14:16] self.identifier = self.raw[16:20] if self.identifier != b'MOBI': raise ValueError('Identifier %r unknown'%self.identifier) self.length, = struct.unpack(b'>I', self.raw[20:24]) self.type_raw, = struct.unpack(b'>I', self.raw[24:28]) self.type = { 2 : 'Mobipocket book', 3 : 'PalmDOC book', 4 : 'Audio', 257 : 'News', 258 : 'News Feed', 259 : 'News magazine', 513 : 'PICS', 514 : 'Word', 515 : 'XLS', 516 : 'PPT', 517 : 'TEXT', 518 : 'HTML', }.get(self.type_raw, repr(self.type_raw)) self.encoding_raw, = struct.unpack(b'>I', self.raw[28:32]) self.encoding = { 1252 : 'cp1252', 65001: 'utf-8', }.get(self.encoding_raw, repr(self.encoding_raw)) self.uid = self.raw[32:36] self.file_version, = struct.unpack(b'>I', self.raw[36:40]) self.meta_orth_indx, self.meta_infl_indx = struct.unpack( b'>II', self.raw[40:48]) self.secondary_index_record, = struct.unpack(b'>I', self.raw[48:52]) self.reserved = self.raw[52:80] self.first_non_book_record, = struct.unpack(b'>I', self.raw[80:84]) self.fullname_offset, = struct.unpack(b'>I', self.raw[84:88]) self.fullname_length, = struct.unpack(b'>I', self.raw[88:92]) self.locale_raw, = struct.unpack(b'>I', self.raw[92:96]) langcode = self.locale_raw langid = langcode & 0xFF sublangid = (langcode >> 10) & 0xFF self.language = main_language.get(langid, 'ENGLISH') self.sublanguage = sub_language.get(sublangid, 'NEUTRAL') self.input_language = self.raw[96:100] self.output_langauage = self.raw[100:104] self.min_version, = struct.unpack(b'>I', self.raw[104:108]) self.first_image_index, = struct.unpack(b'>I', self.raw[108:112]) self.huffman_record_offset, = struct.unpack(b'>I', self.raw[112:116]) self.huffman_record_count, = struct.unpack(b'>I', self.raw[116:120]) self.datp_record_offset, = struct.unpack(b'>I', self.raw[120:124]) self.datp_record_count, = struct.unpack(b'>I', self.raw[124:128]) self.exth_flags, = struct.unpack(b'>I', self.raw[128:132]) self.has_exth = bool(self.exth_flags & 0x40) self.has_drm_data = self.length >= 174 and len(self.raw) >= 184 if self.has_drm_data: self.unknown3 = self.raw[132:168] self.drm_offset, self.drm_count, self.drm_size, self.drm_flags = \ struct.unpack(b'>4I', self.raw[168:184]) self.has_extra_data_flags = self.length >= 232 and len(self.raw) >= 232+16 self.has_fcis_flis = False self.has_multibytes = self.has_indexing_bytes = self.has_uncrossable_breaks = False self.extra_data_flags = 0 if self.has_extra_data_flags: self.unknown4 = self.raw[184:192] if self.file_version < 8: self.first_text_record, self.last_text_record = \ struct.unpack_from(b'>HH', self.raw, 192) self.fdst_count = struct.unpack_from(b'>L', self.raw, 196) else: self.fdst_idx, self.fdst_count = struct.unpack_from(b'>LL', self.raw, 192) if self.fdst_count <= 1: self.fdst_idx = NULL_INDEX (self.fcis_number, self.fcis_count, self.flis_number, self.flis_count) = struct.unpack(b'>IIII', self.raw[200:216]) self.unknown6 = self.raw[216:224] self.srcs_record_index = struct.unpack(b'>I', self.raw[224:228])[0] self.num_srcs_records = struct.unpack(b'>I', self.raw[228:232])[0] self.unknown7 = self.raw[232:240] self.extra_data_flags = struct.unpack(b'>I', self.raw[240:244])[0] self.has_multibytes = bool(self.extra_data_flags & 0b1) self.has_indexing_bytes = bool(self.extra_data_flags & 0b10) self.has_uncrossable_breaks = bool(self.extra_data_flags & 0b100) self.primary_index_record, = struct.unpack(b'>I', self.raw[244:248]) if self.length >= 248: (self.sect_idx, self.skel_idx, self.datp_idx, self.oth_idx ) = struct.unpack_from(b'>4L', self.raw, 248) self.unknown9 = self.raw[264:self.length+16] if self.meta_orth_indx not in {NULL_INDEX, self.sect_idx}: raise ValueError('KF8 header has different Meta orth and ' 'section indices') # The following are all relative to the position of the header record # make them absolute for ease of debugging self.relative_records = {'sect_idx', 'skel_idx', 'datp_idx', 'oth_idx', 'meta_orth_indx', 'huffman_record_offset', 'first_non_book_record', 'datp_record_offset', 'fcis_number', 'flis_number', 'primary_index_record', 'fdst_idx', 'first_image_index'} for x in self.relative_records: if hasattr(self, x) and getattr(self, x) != NULL_INDEX: setattr(self, x, self.header_offset+getattr(self, x)) # Try to find the first non-text record self.first_resource_record = offset + 1 + self.number_of_text_records # Default to first record after all text records pointer = min(getattr(self, 'first_non_book_record', NULL_INDEX), getattr(self, 'first_image_index', NULL_INDEX)) if pointer != NULL_INDEX: self.first_resource_record = max(pointer, self.first_resource_record) self.last_resource_record = NULL_INDEX if self.has_exth: self.exth_offset = 16 + self.length self.exth = EXTHHeader(self.raw[self.exth_offset:]) self.end_of_exth = self.exth_offset + self.exth.length self.bytes_after_exth = self.raw[self.end_of_exth:self.fullname_offset] if self.exth.kf8_header_index is not None and offset == 0: # MOBI 6 header in a joint file, adjust self.last_resource_record self.last_resource_record = self.exth.kf8_header_index - 2 def __str__(self): ans = ['*'*20 + ' MOBI %d Header '%self.file_version+ '*'*20] a = ans.append def i(d, x): x = 'NULL' if x == NULL_INDEX else x a('%s: %s'%(d, x)) def r(d, attr): x = getattr(self, attr) if attr in self.relative_records and x != NULL_INDEX: a('%s: Absolute: %d Relative: %d'%(d, x, x-self.header_offset)) else: i(d, x) a('Compression: %s'%self.compression) a('Unused: %r'%self.unused) a('Text length: %d'%self.text_length) a('Number of text records: %d'%self.number_of_text_records) a('Text record size: %d'%self.text_record_size) a('Encryption: %s'%self.encryption_type) a('Unknown: %r'%self.unknown) a('Identifier: %r'%self.identifier) a('Header length: %d'% self.length) a('Type: %s'%self.type) a('Encoding: %s'%self.encoding) a('UID: %r'%self.uid) a('File version: %d'%self.file_version) r('Meta Orth Index', 'meta_orth_indx') r('Meta Infl Index', 'meta_infl_indx') r('Secondary index record', 'secondary_index_record') a('Reserved: %r'%self.reserved) r('First non-book record', 'first_non_book_record') a('Full name offset: %d'%self.fullname_offset) a('Full name length: %d bytes'%self.fullname_length) a('Langcode: %r'%self.locale_raw) a('Language: %s'%self.language) a('Sub language: %s'%self.sublanguage) a('Input language: %r'%self.input_language) a('Output language: %r'%self.output_langauage) a('Min version: %d'%self.min_version) r('First Image index', 'first_image_index') r('Huffman record offset', 'huffman_record_offset') a('Huffman record count: %d'%self.huffman_record_count) r('Huffman table offset', 'datp_record_offset') a('Huffman table length: %r'%self.datp_record_count) a('EXTH flags: %s (%s)'%(bin(self.exth_flags)[2:], self.has_exth)) if self.has_drm_data: a('Unknown3: %r'%self.unknown3) r('DRM Offset', 'drm_offset') a('DRM Count: %s'%self.drm_count) a('DRM Size: %s'%self.drm_size) a('DRM Flags: %r'%self.drm_flags) if self.has_extra_data_flags: a('Unknown4: %r'%self.unknown4) if hasattr(self, 'first_text_record'): a('First content record: %d'%self.first_text_record) a('Last content record: %d'%self.last_text_record) else: r('FDST Index', 'fdst_idx') a('FDST Count: %d'% self.fdst_count) r('FCIS number', 'fcis_number') a('FCIS count: %d'% self.fcis_count) r('FLIS number', 'flis_number') a('FLIS count: %d'% self.flis_count) a('Unknown6: %r'% self.unknown6) r('SRCS record index', 'srcs_record_index') a('Number of SRCS records?: %d'%self.num_srcs_records) a('Unknown7: %r'%self.unknown7) a(('Extra data flags: %s (has multibyte: %s) ' '(has indexing: %s) (has uncrossable breaks: %s)')%( bin(self.extra_data_flags), self.has_multibytes, self.has_indexing_bytes, self.has_uncrossable_breaks)) r('NCX index', 'primary_index_record') if self.length >= 248: r('Sections Index', 'sect_idx') r('SKEL Index', 'skel_idx') r('DATP Index', 'datp_idx') r('Other Index', 'oth_idx') if self.unknown9: a('Unknown9: %r'%self.unknown9) ans = '\n'.join(ans) if self.has_exth: ans += '\n\n' + str(self.exth) ans += '\n\nBytes after EXTH (%d bytes): %s'%( len(self.bytes_after_exth), format_bytes(self.bytes_after_exth)) ans += '\nNumber of bytes after full name: %d' % (len(self.raw) - (self.fullname_offset + self.fullname_length)) ans += '\nRecord 0 length: %d'%len(self.raw) return ans # }}} class MOBIFile: def __init__(self, stream): self.raw = stream.read() self.palmdb = PalmDB(self.raw[:78]) self.record_headers = [] self.records = [] for i in range(self.palmdb.number_of_records): pos = 78 + i * 8 offset, a1, a2, a3, a4 = struct.unpack(b'>LBBBB', self.raw[pos:pos+8]) flags, val = a1, a2 << 16 | a3 << 8 | a4 self.record_headers.append((offset, flags, val)) def section(section_number): if section_number == self.palmdb.number_of_records - 1: end_off = len(self.raw) else: end_off = self.record_headers[section_number + 1][0] off = self.record_headers[section_number][0] return self.raw[off:end_off] for i in range(self.palmdb.number_of_records): self.records.append(Record(section(i), self.record_headers[i])) self.mobi_header = MOBIHeader(self.records[0], 0) self.huffman_record_nums = [] self.kf8_type = None mh = mh8 = self.mobi_header if mh.file_version >= 8: self.kf8_type = 'standalone' elif mh.has_exth and mh.exth.kf8_header_index is not None: kf8i = mh.exth.kf8_header_index try: rec = self.records[kf8i-1] except IndexError: pass else: if rec.raw == b'BOUNDARY': self.kf8_type = 'joint' mh8 = MOBIHeader(self.records[kf8i], kf8i) self.mobi8_header = mh8 if 'huff' in self.mobi_header.compression.lower(): from calibre.ebooks.mobi.huffcdic import HuffReader def huffit(off, cnt): huffman_record_nums = list(range(off, off+cnt)) huffrecs = [self.records[r].raw for r in huffman_record_nums] huffs = HuffReader(huffrecs) return huffman_record_nums, huffs.unpack if self.kf8_type == 'joint': recs6, d6 = huffit(mh.huffman_record_offset, mh.huffman_record_count) recs8, d8 = huffit(mh8.huffman_record_offset, mh8.huffman_record_count) self.huffman_record_nums = recs6 + recs8 else: self.huffman_record_nums, d6 = huffit(mh.huffman_record_offset, mh.huffman_record_count) d8 = d6 elif 'palmdoc' in self.mobi_header.compression.lower(): from calibre.ebooks.compression.palmdoc import decompress_doc d8 = d6 = decompress_doc else: d8 = d6 = lambda x: x self.decompress6, self.decompress8 = d6, d8 class TextRecord: # {{{ def __init__(self, idx, record, extra_data_flags, decompress): self.trailing_data, self.raw = get_trailing_data(record.raw, extra_data_flags) raw_trailing_bytes = record.raw[len(self.raw):] self.raw = decompress(self.raw) if 0 in self.trailing_data: self.trailing_data['multibyte_overlap'] = self.trailing_data.pop(0) if 1 in self.trailing_data: self.trailing_data['indexing'] = self.trailing_data.pop(1) if 2 in self.trailing_data: self.trailing_data['uncrossable_breaks'] = self.trailing_data.pop(2) self.trailing_data['raw_bytes'] = raw_trailing_bytes for typ, val in iteritems(self.trailing_data): if isinstance(typ, numbers.Integral): print('Record %d has unknown trailing data of type: %d : %r'% (idx, typ, val)) self.idx = idx def dump(self, folder): name = '%06d'%self.idx with open(os.path.join(folder, name+'.txt'), 'wb') as f: f.write(self.raw) with open(os.path.join(folder, name+'.trailing_data'), 'wb') as f: for k, v in iteritems(self.trailing_data): raw = '%s : %r\n\n'%(k, v) f.write(raw.encode('utf-8')) def __len__(self): return len(self.raw) # }}}
25,814
Python
.py
540
35.859259
127
0.551135
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,488
mobi8.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/debug/mobi8.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os import struct import sys import textwrap from calibre import CurrentDir from calibre.ebooks.mobi.debug import format_bytes from calibre.ebooks.mobi.debug.containers import ContainerHeader from calibre.ebooks.mobi.debug.headers import TextRecord from calibre.ebooks.mobi.debug.index import GuideIndex, NCXIndex, SECTIndex, SKELIndex from calibre.ebooks.mobi.reader.headers import NULL_INDEX from calibre.ebooks.mobi.utils import RECORD_SIZE, decode_tbs, read_font_record from calibre.utils.imghdr import what from polyglot.builtins import iteritems, itervalues, print_to_binary_file class FDST: def __init__(self, raw): if raw[:4] != b'FDST': raise ValueError('KF8 does not have a valid FDST record') self.sec_off, self.num_sections = struct.unpack_from(b'>LL', raw, 4) if self.sec_off != 12: raise ValueError('FDST record has unknown extra fields') secf = b'>%dL' % (self.num_sections*2) secs = struct.unpack_from(secf, raw, self.sec_off) rest = raw[self.sec_off+struct.calcsize(secf):] if rest: raise ValueError('FDST record has trailing data: ' '%s'%format_bytes(rest)) self.sections = tuple(zip(secs[::2], secs[1::2])) def __str__(self): ans = ['FDST record'] def a(k, v): return ans.append('{}: {}'.format(k, v)) a('Offset to sections', self.sec_off) a('Number of section records', self.num_sections) ans.append('**** %d Sections ****'% len(self.sections)) for sec in self.sections: ans.append('Start: %20d End: %d'%sec) return '\n'.join(ans) class File: def __init__(self, skel, skeleton, text, first_aid, sections): self.name = 'part%04d'%skel.file_number self.skeleton, self.text, self.first_aid = skeleton, text, first_aid self.sections = sections def dump(self, ddir): with open(os.path.join(ddir, self.name + '.html'), 'wb') as f: f.write(self.text) base = os.path.join(ddir, self.name + '-parts') os.mkdir(base) with CurrentDir(base): with open('skeleton.html', 'wb') as f: f.write(self.skeleton) for i, text in enumerate(self.sections): with open('sect-%04d.html'%i, 'wb') as f: f.write(text) class MOBIFile: def __init__(self, mf): self.mf = mf h, h8 = mf.mobi_header, mf.mobi8_header first_text_record = 1 offset = 0 self.resource_ranges = [(h8.first_resource_record, h8.last_resource_record, h8.first_image_index)] if mf.kf8_type == 'joint': offset = h.exth.kf8_header_index self.resource_ranges.insert(0, (h.first_resource_record, h.last_resource_record, h.first_image_index)) self.text_records = [TextRecord(i, r, h8.extra_data_flags, mf.decompress8) for i, r in enumerate(mf.records[first_text_record+offset: first_text_record+offset+h8.number_of_text_records])] self.raw_text = b''.join(r.raw for r in self.text_records) self.header = self.mf.mobi8_header self.extract_resources(mf.records) self.read_fdst() self.read_indices() self.build_files() self.read_tbs() def print_header(self, f=sys.stdout): p = print_to_binary_file(f) p(str(self.mf.palmdb)) p() p('Record headers:') for i, r in enumerate(self.mf.records): p('%6d. %s'%(i, r.header)) p() p(str(self.mf.mobi8_header)) def read_fdst(self): self.fdst = None if self.header.fdst_idx != NULL_INDEX: idx = self.header.fdst_idx self.fdst = FDST(self.mf.records[idx].raw) if self.fdst.num_sections != self.header.fdst_count: raise ValueError('KF8 Header contains invalid FDST count') def read_indices(self): self.skel_index = SKELIndex(self.header.skel_idx, self.mf.records, self.header.encoding) self.sect_index = SECTIndex(self.header.sect_idx, self.mf.records, self.header.encoding) self.ncx_index = NCXIndex(self.header.primary_index_record, self.mf.records, self.header.encoding) self.guide_index = GuideIndex(self.header.oth_idx, self.mf.records, self.header.encoding) def build_files(self): text = self.raw_text self.files = [] for skel in self.skel_index.records: sects = [x for x in self.sect_index.records if x.file_number == skel.file_number] skeleton = text[skel.start_position:skel.start_position+skel.length] ftext = skeleton first_aid = sects[0].toc_text sections = [] for sect in sects: start_pos = skel.start_position + skel.length + sect.start_pos sect_text = text[start_pos:start_pos+sect.length] insert_pos = sect.insert_pos - skel.start_position ftext = ftext[:insert_pos] + sect_text + ftext[insert_pos:] sections.append(sect_text) self.files.append(File(skel, skeleton, ftext, first_aid, sections)) def dump_flows(self, ddir): boundaries = [(0, len(self.raw_text))] if self.fdst is not None: boundaries = self.fdst.sections for i, x in enumerate(boundaries): start, end = x raw = self.raw_text[start:end] with open(os.path.join(ddir, 'flow%04d.txt'%i), 'wb') as f: f.write(raw) def extract_resources(self, records): self.resource_map = [] self.containers = [] known_types = {b'FLIS', b'FCIS', b'SRCS', b'\xe9\x8e\r\n', b'RESC', b'BOUN', b'FDST', b'DATP', b'AUDI', b'VIDE', b'CRES', b'CONT', b'CMET', b'PAGE'} container = None for i, rec in enumerate(records): for (l, r, offset) in self.resource_ranges: if l <= i <= r: resource_index = i + 1 if offset is not None and resource_index >= offset: resource_index -= offset break else: continue sig = rec.raw[:4] payload = rec.raw ext = 'dat' prefix = 'binary' suffix = '' if sig in {b'HUFF', b'CDIC', b'INDX'}: continue # TODO: Ignore CNCX records as well if sig == b'FONT': font = read_font_record(rec.raw) if font['err']: raise ValueError('Failed to read font record: %s Headers: %s'%( font['err'], font['headers'])) payload = (font['font_data'] if font['font_data'] else font['raw_data']) prefix, ext = 'fonts', font['ext'] elif sig == b'CONT': if payload == b'CONTBOUNDARY': self.containers.append(container) container = None continue container = ContainerHeader(payload) elif sig == b'CRES': container.resources.append(payload) if container.is_image_container: payload = payload[12:] q = what(None, payload) if q: prefix, ext = 'hd-images', q resource_index = len(container.resources) elif sig == b'\xa0\xa0\xa0\xa0' and len(payload) == 4: if container is None: print('Found an end of container record with no container, ignoring') else: container.resources.append(None) continue elif sig not in known_types: if container is not None and len(container.resources) == container.num_of_resource_records: container.add_hrefs(payload) continue q = what(None, rec.raw) if q: prefix, ext = 'images', q if prefix == 'binary': if sig == b'\xe9\x8e\r\n': suffix = '-EOF' elif sig in known_types: suffix = '-' + sig.decode('ascii') self.resource_map.append(('%s/%06d%s.%s'%(prefix, resource_index, suffix, ext), payload)) def read_tbs(self): from calibre.ebooks.mobi.writer8.tbs import ( DOC, Entry, NegativeStrandIndex, calculate_all_tbs, collect_indexing_data, encode_strands_as_sequences, sequences_to_bytes, ) entry_map = [] for index in self.ncx_index: vals = list(index)[:-1] + [None, None, None, None] entry_map.append(Entry(*(vals[:12]))) indexing_data = collect_indexing_data(entry_map, list(map(len, self.text_records))) self.indexing_data = [DOC + '\n' +textwrap.dedent('''\ Index Entry lines are of the form: depth:index_number [action] parent (index_num-parent) Geometry Where Geometry is the start and end of the index entry w.r.t the start of the text record. ''')] tbs_type = 8 try: calculate_all_tbs(indexing_data) except NegativeStrandIndex: calculate_all_tbs(indexing_data, tbs_type=5) tbs_type = 5 for i, strands in enumerate(indexing_data): rec = self.text_records[i] tbs_bytes = rec.trailing_data.get('indexing', b'') desc = ['Record #%d'%i] for s, strand in enumerate(strands): desc.append('Strand %d'%s) for entries in itervalues(strand): for e in entries: desc.append( ' %s%d [%-9s] parent: %s (%d) Geometry: (%d, %d)'%( e.depth * (' ') + '- ', e.index, e.action, e.parent, e.index-(e.parent or 0), e.start-i*RECORD_SIZE, e.start+e.length-i*RECORD_SIZE)) desc.append('TBS Bytes: ' + format_bytes(tbs_bytes)) flag_sz = 3 sequences = [] otbs = tbs_bytes while tbs_bytes: try: val, extra, consumed = decode_tbs(tbs_bytes, flag_size=flag_sz) except: break flag_sz = 4 tbs_bytes = tbs_bytes[consumed:] extra = {bin(k):v for k, v in iteritems(extra)} sequences.append((val, extra)) for j, seq in enumerate(sequences): desc.append('Sequence #%d: %r %r'%(j, seq[0], seq[1])) if tbs_bytes: desc.append('Remaining bytes: %s'%format_bytes(tbs_bytes)) calculated_sequences = encode_strands_as_sequences(strands, tbs_type=tbs_type) try: calculated_bytes = sequences_to_bytes(calculated_sequences) except: calculated_bytes = b'failed to calculate tbs bytes' if calculated_bytes != otbs: print('WARNING: TBS mismatch for record %d'%i) desc.append('WARNING: TBS mismatch!') desc.append('Calculated sequences: %r'%calculated_sequences) desc.append('') self.indexing_data.append('\n'.join(desc)) def inspect_mobi(mobi_file, ddir): f = MOBIFile(mobi_file) with open(os.path.join(ddir, 'header.txt'), 'wb') as out: f.print_header(f=out) alltext = os.path.join(ddir, 'raw_text.html') with open(alltext, 'wb') as of: of.write(f.raw_text) for x in ('text_records', 'images', 'fonts', 'binary', 'files', 'flows', 'hd-images',): os.mkdir(os.path.join(ddir, x)) for rec in f.text_records: rec.dump(os.path.join(ddir, 'text_records')) for href, payload in f.resource_map: with open(os.path.join(ddir, href), 'wb') as fo: fo.write(payload) for i, container in enumerate(f.containers): with open(os.path.join(ddir, 'container%d.txt' % (i + 1)), 'wb') as cf: cf.write(str(container).encode('utf-8')) if f.fdst: with open(os.path.join(ddir, 'fdst.record'), 'wb') as fo: fo.write(str(f.fdst).encode('utf-8')) with open(os.path.join(ddir, 'skel.record'), 'wb') as fo: fo.write(str(f.skel_index).encode('utf-8')) with open(os.path.join(ddir, 'chunks.record'), 'wb') as fo: fo.write(str(f.sect_index).encode('utf-8')) with open(os.path.join(ddir, 'ncx.record'), 'wb') as fo: fo.write(str(f.ncx_index).encode('utf-8')) with open(os.path.join(ddir, 'guide.record'), 'wb') as fo: fo.write(str(f.guide_index).encode('utf-8')) with open(os.path.join(ddir, 'tbs.txt'), 'wb') as fo: fo.write(('\n'.join(f.indexing_data)).encode('utf-8')) for part in f.files: part.dump(os.path.join(ddir, 'files')) f.dump_flows(os.path.join(ddir, 'flows'))
13,569
Python
.py
297
33.387205
114
0.550832
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,489
__init__.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/debug/__init__.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' def format_bytes(byts): byts = bytearray(byts) byts = [hex(b)[2:] for b in byts] return ' '.join(byts)
265
Python
.py
8
29.875
58
0.649402
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,490
index.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/debug/index.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import struct from collections import OrderedDict, namedtuple from calibre.ebooks.mobi.reader.headers import NULL_INDEX from calibre.ebooks.mobi.reader.index import CNCX, INDEX_HEADER_FIELDS, get_tag_section_start, parse_index_record, parse_indx_header, parse_tagx_section from calibre.ebooks.mobi.reader.ncx import default_entry, tag_fieldname_map from polyglot.builtins import iteritems File = namedtuple('File', 'file_number name divtbl_count start_position length') Elem = namedtuple('Chunk', 'insert_pos toc_text file_number sequence_number start_pos ' 'length') GuideRef = namedtuple('GuideRef', 'type title pos_fid') INDEX_HEADER_FIELDS = INDEX_HEADER_FIELDS + ('indices', 'tagx_block_size', 'tagx_block') FIELD_NAMES = {'len':'Header length', 'type':'Unknown', 'gen':'Index Type (0 - normal, 2 - inflection)', 'start':'IDXT Offset', 'count':'Number of entries in this record', 'code': 'character encoding', 'lng':'Unknown', 'total':'Total number of actual Index Entries in all records', 'ordt': 'ORDT Offset', 'ligt':'LIGT Offset', 'nligt':'Number of LIGT', 'ncncx':'Number of CNCX records', 'indices':'Geometry of index records'} def read_variable_len_data(data, header): offset = header['tagx'] indices = [] idxt_offset = header['start'] idxt_size = 4 + header['count'] * 2 if offset > 0: tagx_block_size = header['tagx_block_size'] = struct.unpack_from(b'>I', data, offset + 4)[0] header['tagx_block'] = data[offset:offset+tagx_block_size] offset = idxt_offset + 4 for i in range(header['count']): p = struct.unpack_from(b'>H', data, offset)[0] offset += 2 strlen = bytearray(data[p])[0] text = data[p+1:p+1+strlen] p += 1 + strlen num = struct.unpack_from(b'>H', data, p)[0] indices.append((text, num)) else: header['tagx_block'] = b'' header['tagx_block_size'] = 0 trailing_bytes = data[idxt_offset+idxt_size:] if trailing_bytes.rstrip(b'\0'): raise ValueError('Traling bytes after last IDXT entry: %r' % trailing_bytes.rstrip(b'\0')) header['indices'] = indices def read_index(sections, idx, codec): table, cncx = OrderedDict(), CNCX([], codec) data = sections[idx].raw indx_header = parse_indx_header(data) indx_count = indx_header['count'] if indx_header['ncncx'] > 0: off = idx + indx_count + 1 cncx_records = [x.raw for x in sections[off:off+indx_header['ncncx']]] cncx = CNCX(cncx_records, codec) tag_section_start = get_tag_section_start(data, indx_header) control_byte_count, tags = parse_tagx_section(data[tag_section_start:]) read_variable_len_data(data, indx_header) index_headers = [] for i in range(idx + 1, idx + 1 + indx_count): # Index record data = sections[i].raw index_headers.append(parse_index_record(table, data, control_byte_count, tags, codec, indx_header['ordt_map'], strict=True)) read_variable_len_data(data, index_headers[-1]) return table, cncx, indx_header, index_headers class Index: def __init__(self, idx, records, codec): self.table = self.cncx = self.header = self.records = None self.index_headers = [] if idx != NULL_INDEX: self.table, self.cncx, self.header, self.index_headers = read_index(records, idx, codec) def render(self): ans = ['*'*10 + ' Index Header ' + '*'*10] a = ans.append if self.header is not None: for field in INDEX_HEADER_FIELDS: a('%-12s: %r'%(FIELD_NAMES.get(field, field), self.header[field])) ans.extend(['', '']) ans += ['*'*10 + ' Index Record Headers (%d records) ' % len(self.index_headers) + '*'*10] for i, header in enumerate(self.index_headers): ans += ['*'*10 + ' Index Record %d ' % i + '*'*10] for field in INDEX_HEADER_FIELDS: a('%-12s: %r'%(FIELD_NAMES.get(field, field), header[field])) if self.cncx: a('*'*10 + ' CNCX ' + '*'*10) for offset, val in iteritems(self.cncx): a('%10s: %s'%(offset, val)) ans.extend(['', '']) if self.table is not None: a('*'*10 + ' %d Index Entries '%len(self.table) + '*'*10) for k, v in iteritems(self.table): a('%s: %r'%(k, v)) if self.records: ans.extend(['', '', '*'*10 + ' Parsed Entries ' + '*'*10]) for f in self.records: a(repr(f)) return ans + [''] def __str__(self): return '\n'.join(self.render()) def __iter__(self): return iter(self.records) class SKELIndex(Index): def __init__(self, skelidx, records, codec): super().__init__(skelidx, records, codec) self.records = [] if self.table is not None: for i, text in enumerate(self.table): tag_map = self.table[text] if set(tag_map) != {1, 6}: raise ValueError('SKEL Index has unknown tags: %s'% (set(tag_map)-{1,6})) self.records.append(File( i, # file_number text, # name tag_map[1][0], # divtbl_count tag_map[6][0], # start_pos tag_map[6][1]) # length ) class SECTIndex(Index): def __init__(self, sectidx, records, codec): super().__init__(sectidx, records, codec) self.records = [] if self.table is not None: for i, text in enumerate(self.table): tag_map = self.table[text] if set(tag_map) != {2, 3, 4, 6}: raise ValueError('Chunk Index has unknown tags: %s'% (set(tag_map)-{2, 3, 4, 6})) toc_text = self.cncx[tag_map[2][0]] self.records.append(Elem( int(text), # insert_pos toc_text, # toc_text tag_map[3][0], # file_number tag_map[4][0], # sequence_number tag_map[6][0], # start_pos tag_map[6][1] # length ) ) class GuideIndex(Index): def __init__(self, guideidx, records, codec): super().__init__(guideidx, records, codec) self.records = [] if self.table is not None: for i, text in enumerate(self.table): tag_map = self.table[text] if set(tag_map) not in ({1, 6}, {1, 2, 3}): raise ValueError('Guide Index has unknown tags: %s'% tag_map) title = self.cncx[tag_map[1][0]] self.records.append(GuideRef( text, title, tag_map[6] if 6 in tag_map else (tag_map[2], tag_map[3]) ) ) class NCXIndex(Index): def __init__(self, ncxidx, records, codec): super().__init__(ncxidx, records, codec) self.records = [] if self.table is not None: NCXEntry = namedtuple('NCXEntry', 'index start length depth parent ' 'first_child last_child title pos_fid kind') for num, x in enumerate(iteritems(self.table)): text, tag_map = x entry = e = default_entry.copy() entry['name'] = text entry['num'] = num for tag in tag_fieldname_map: fieldname, i = tag_fieldname_map[tag] if tag in tag_map: fieldvalue = tag_map[tag][i] if tag == 6: # Appears to be an idx into the KF8 elems table with an # offset fieldvalue = tuple(tag_map[tag]) entry[fieldname] = fieldvalue for which, name in iteritems({3:'text', 5:'kind', 70:'description', 71:'author', 72:'image_caption', 73:'image_attribution'}): if tag == which: entry[name] = self.cncx.get(fieldvalue, default_entry[name]) def refindx(e, name): ans = e[name] if ans < 0: ans = None return ans entry = NCXEntry(start=e['pos'], index=e['num'], length=e['len'], depth=e['hlvl'], parent=refindx(e, 'parent'), first_child=refindx(e, 'child1'), last_child=refindx(e, 'childn'), title=e['text'], pos_fid=e['pos_fid'], kind=e['kind']) self.records.append(entry)
9,225
Python
.py
193
34.823834
152
0.525495
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,491
mobi6.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/debug/mobi6.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os import struct import sys from collections import OrderedDict, defaultdict from lxml import html from calibre.ebooks.mobi.debug import format_bytes from calibre.ebooks.mobi.debug.headers import TextRecord from calibre.ebooks.mobi.reader.headers import NULL_INDEX from calibre.ebooks.mobi.reader.index import parse_index_record, parse_tagx_section from calibre.ebooks.mobi.utils import decint, decode_hex_number, decode_tbs, read_font_record from calibre.utils.imghdr import what from polyglot.builtins import as_bytes, iteritems, print_to_binary_file class TagX: # {{{ def __init__(self, tag, num_values, bitmask, eof): self.tag, self.num_values, self.bitmask, self.eof = (tag, num_values, bitmask, eof) self.num_of_values = num_values self.is_eof = (self.eof == 1 and self.tag == 0 and self.num_values == 0 and self.bitmask == 0) def __repr__(self): return 'TAGX(tag=%02d, num_values=%d, bitmask=%r, eof=%d)' % (self.tag, self.num_values, bin(self.bitmask), self.eof) # }}} class SecondaryIndexHeader: # {{{ def __init__(self, record): self.record = record raw = self.record.raw # open('/t/index_header.bin', 'wb').write(raw) if raw[:4] != b'INDX': raise ValueError('Invalid Secondary Index Record') self.header_length, = struct.unpack('>I', raw[4:8]) self.unknown1 = raw[8:16] self.index_type, = struct.unpack('>I', raw[16:20]) self.index_type_desc = {0: 'normal', 2: 'inflection', 6: 'calibre'}.get(self.index_type, 'unknown') self.idxt_start, = struct.unpack('>I', raw[20:24]) self.index_count, = struct.unpack('>I', raw[24:28]) self.index_encoding_num, = struct.unpack('>I', raw[28:32]) self.index_encoding = {65001: 'utf-8', 1252: 'cp1252'}.get(self.index_encoding_num, 'unknown') if self.index_encoding == 'unknown': raise ValueError( 'Unknown index encoding: %d'%self.index_encoding_num) self.unknown2 = raw[32:36] self.num_index_entries, = struct.unpack('>I', raw[36:40]) self.ordt_start, = struct.unpack('>I', raw[40:44]) self.ligt_start, = struct.unpack('>I', raw[44:48]) self.num_of_ligt_entries, = struct.unpack('>I', raw[48:52]) self.num_of_cncx_blocks, = struct.unpack('>I', raw[52:56]) self.unknown3 = raw[56:180] self.tagx_offset, = struct.unpack(b'>I', raw[180:184]) if self.tagx_offset != self.header_length: raise ValueError('TAGX offset and header length disagree') self.unknown4 = raw[184:self.header_length] tagx = raw[self.header_length:] if not tagx.startswith(b'TAGX'): raise ValueError('Invalid TAGX section') self.tagx_header_length, = struct.unpack('>I', tagx[4:8]) self.tagx_control_byte_count, = struct.unpack('>I', tagx[8:12]) self.tagx_entries = [TagX(*x) for x in parse_tagx_section(tagx)[1]] if self.tagx_entries and not self.tagx_entries[-1].is_eof: raise ValueError('TAGX last entry is not EOF') idxt0_pos = self.header_length+self.tagx_header_length num = ord(raw[idxt0_pos:idxt0_pos+1]) count_pos = idxt0_pos+1+num self.last_entry = raw[idxt0_pos+1:count_pos] self.ncx_count, = struct.unpack(b'>H', raw[count_pos:count_pos+2]) # There may be some alignment zero bytes between the end of the idxt0 # and self.idxt_start idxt = raw[self.idxt_start:] if idxt[:4] != b'IDXT': raise ValueError('Invalid IDXT header') length_check, = struct.unpack(b'>H', idxt[4:6]) if length_check != self.header_length + self.tagx_header_length: raise ValueError('Length check failed') if idxt[6:].replace(b'\0', b''): raise ValueError('Non null trailing bytes after IDXT') def __str__(self): ans = ['*'*20 + ' Secondary Index Header '+ '*'*20] a = ans.append def u(w): a('Unknown: %r (%d bytes) (All zeros: %r)'%(w, len(w), not bool(w.replace(b'\0', b'')))) a('Header length: %d'%self.header_length) u(self.unknown1) a('Index Type: %s (%d)'%(self.index_type_desc, self.index_type)) a('Offset to IDXT start: %d'%self.idxt_start) a('Number of index records: %d'%self.index_count) a('Index encoding: %s (%d)'%(self.index_encoding, self.index_encoding_num)) u(self.unknown2) a('Number of index entries: %d'% self.num_index_entries) a('ORDT start: %d'%self.ordt_start) a('LIGT start: %d'%self.ligt_start) a('Number of LIGT entries: %d'%self.num_of_ligt_entries) a('Number of cncx blocks: %d'%self.num_of_cncx_blocks) u(self.unknown3) a('TAGX offset: %d'%self.tagx_offset) u(self.unknown4) a('\n\n') a('*'*20 + ' TAGX Header (%d bytes)'%self.tagx_header_length+ '*'*20) a('Header length: %d'%self.tagx_header_length) a('Control byte count: %d'%self.tagx_control_byte_count) for i in self.tagx_entries: a('\t' + repr(i)) a('Index of last IndexEntry in secondary index record: %s'% self.last_entry) a('Number of entries in the NCX: %d'% self.ncx_count) return '\n'.join(ans) # }}} class IndexHeader: # {{{ def __init__(self, record): self.record = record raw = self.record.raw # open('/t/index_header.bin', 'wb').write(raw) if raw[:4] != b'INDX': raise ValueError('Invalid Primary Index Record') self.header_length, = struct.unpack('>I', raw[4:8]) self.unknown1 = raw[8:12] self.header_type, = struct.unpack('>I', raw[12:16]) self.index_type, = struct.unpack('>I', raw[16:20]) self.index_type_desc = {0: 'normal', 2: 'inflection', 6: 'calibre'}.get(self.index_type, 'unknown') self.idxt_start, = struct.unpack('>I', raw[20:24]) self.index_count, = struct.unpack('>I', raw[24:28]) self.index_encoding_num, = struct.unpack('>I', raw[28:32]) self.index_encoding = {65001: 'utf-8', 1252: 'cp1252'}.get(self.index_encoding_num, 'unknown') if self.index_encoding == 'unknown': raise ValueError( 'Unknown index encoding: %d'%self.index_encoding_num) self.possibly_language = raw[32:36] self.num_index_entries, = struct.unpack('>I', raw[36:40]) self.ordt_start, = struct.unpack('>I', raw[40:44]) self.ligt_start, = struct.unpack('>I', raw[44:48]) self.num_of_ligt_entries, = struct.unpack('>I', raw[48:52]) self.num_of_cncx_blocks, = struct.unpack('>I', raw[52:56]) self.unknown2 = raw[56:180] self.tagx_offset, = struct.unpack(b'>I', raw[180:184]) if self.tagx_offset != self.header_length: raise ValueError('TAGX offset and header length disagree') self.unknown3 = raw[184:self.header_length] tagx = raw[self.header_length:] if not tagx.startswith(b'TAGX'): raise ValueError('Invalid TAGX section') self.tagx_header_length, = struct.unpack('>I', tagx[4:8]) self.tagx_control_byte_count, = struct.unpack('>I', tagx[8:12]) self.tagx_entries = [TagX(*x) for x in parse_tagx_section(tagx)[1]] if self.tagx_entries and not self.tagx_entries[-1].is_eof: raise ValueError('TAGX last entry is not EOF') idxt0_pos = self.header_length+self.tagx_header_length last_num, consumed = decode_hex_number(raw[idxt0_pos:]) count_pos = idxt0_pos + consumed self.ncx_count, = struct.unpack(b'>H', raw[count_pos:count_pos+2]) self.last_entry = last_num if last_num != self.ncx_count - 1: raise ValueError('Last id number in the NCX != NCX count - 1') # There may be some alignment zero bytes between the end of the idxt0 # and self.idxt_start idxt = raw[self.idxt_start:] if idxt[:4] != b'IDXT': raise ValueError('Invalid IDXT header') length_check, = struct.unpack(b'>H', idxt[4:6]) if length_check != self.header_length + self.tagx_header_length: raise ValueError('Length check failed') # if idxt[6:].replace(b'\0', b''): # raise ValueError('Non null trailing bytes after IDXT') def __str__(self): ans = ['*'*20 + ' Index Header (%d bytes)'%len(self.record.raw)+ '*'*20] a = ans.append def u(w): a('Unknown: %r (%d bytes) (All zeros: %r)'%(w, len(w), not bool(w.replace(b'\0', b'')))) a('Header length: %d'%self.header_length) u(self.unknown1) a('Header type: %d'%self.header_type) a('Index Type: %s (%d)'%(self.index_type_desc, self.index_type)) a('Offset to IDXT start: %d'%self.idxt_start) a('Number of index records: %d'%self.index_count) a('Index encoding: %s (%d)'%(self.index_encoding, self.index_encoding_num)) a('Unknown (possibly language?): %r'%(self.possibly_language)) a('Number of index entries: %d'% self.num_index_entries) a('ORDT start: %d'%self.ordt_start) a('LIGT start: %d'%self.ligt_start) a('Number of LIGT entries: %d'%self.num_of_ligt_entries) a('Number of cncx blocks: %d'%self.num_of_cncx_blocks) u(self.unknown2) a('TAGX offset: %d'%self.tagx_offset) u(self.unknown3) a('\n\n') a('*'*20 + ' TAGX Header (%d bytes)'%self.tagx_header_length+ '*'*20) a('Header length: %d'%self.tagx_header_length) a('Control byte count: %d'%self.tagx_control_byte_count) for i in self.tagx_entries: a('\t' + repr(i)) a('Index of last IndexEntry in primary index record: %s'% self.last_entry) a('Number of entries in the NCX: %d'% self.ncx_count) return '\n'.join(ans) # }}} class Tag: # {{{ ''' Index entries are a collection of tags. Each tag is represented by this class. ''' TAG_MAP = { 1: ('offset', 'Offset in HTML'), 2: ('size', 'Size in HTML'), 3: ('label_offset', 'Label offset in CNCX'), 4: ('depth', 'Depth of this entry in TOC'), 5: ('class_offset', 'Class offset in CNCX'), 6: ('pos_fid', 'File Index'), 11: ('secondary', '[unknown, unknown, ' 'tag type from TAGX in primary index header]'), 21: ('parent_index', 'Parent'), 22: ('first_child_index', 'First child'), 23: ('last_child_index', 'Last child'), 69 : ('image_index', 'Offset from first image record to the' ' image record associated with this entry' ' (masthead for periodical or thumbnail for' ' article entry).'), 70 : ('desc_offset', 'Description offset in cncx'), 71 : ('author_offset', 'Author offset in cncx'), 72 : ('image_caption_offset', 'Image caption offset in cncx'), 73 : ('image_attr_offset', 'Image attribution offset in cncx'), } def __init__(self, tag_type, vals, cncx): self.value = vals if len(vals) > 1 else vals[0] if vals else None self.cncx_value = None if tag_type in self.TAG_MAP: self.attr, self.desc = self.TAG_MAP[tag_type] else: print('Unknown tag value: %%s'%tag_type) self.desc = '??Unknown (tag value: %d)'%tag_type self.attr = 'unknown' if '_offset' in self.attr: self.cncx_value = cncx[self.value] def __str__(self): if self.cncx_value is not None: return '%s : %r [%r]'%(self.desc, self.value, self.cncx_value) return '%s : %r'%(self.desc, self.value) # }}} class IndexEntry: # {{{ ''' The index is made up of entries, each of which is represented by an instance of this class. Index entries typically point to offsets in the HTML, specify HTML sizes and point to text strings in the CNCX that are used in the navigation UI. ''' def __init__(self, ident, entry, cncx): try: self.index = int(ident, 16) except ValueError: self.index = ident self.tags = [Tag(tag_type, vals, cncx) for tag_type, vals in iteritems(entry)] @property def label(self): for tag in self.tags: if tag.attr == 'label_offset': return tag.cncx_value return '' @property def offset(self): for tag in self.tags: if tag.attr == 'offset': return tag.value return 0 @property def size(self): for tag in self.tags: if tag.attr == 'size': return tag.value return 0 @property def depth(self): for tag in self.tags: if tag.attr == 'depth': return tag.value return 0 @property def parent_index(self): for tag in self.tags: if tag.attr == 'parent_index': return tag.value return -1 @property def first_child_index(self): for tag in self.tags: if tag.attr == 'first_child_index': return tag.value return -1 @property def last_child_index(self): for tag in self.tags: if tag.attr == 'last_child_index': return tag.value return -1 @property def pos_fid(self): for tag in self.tags: if tag.attr == 'pos_fid': return tag.value return [0, 0] def __str__(self): ans = ['Index Entry(index=%s, length=%d)'%( self.index, len(self.tags))] for tag in self.tags: if tag.value is not None: ans.append('\t'+str(tag)) if self.first_child_index != -1: ans.append('\tNumber of children: %d'%(self.last_child_index - self.first_child_index + 1)) return '\n'.join(ans) # }}} class IndexRecord: # {{{ ''' Represents all indexing information in the MOBI, apart from indexing info in the trailing data of the text records. ''' def __init__(self, records, index_header, cncx): self.alltext = None table = OrderedDict() tags = [TagX(x.tag, x.num_values, x.bitmask, x.eof) for x in index_header.tagx_entries] for record in records: raw = record.raw if raw[:4] != b'INDX': raise ValueError('Invalid Primary Index Record') parse_index_record(table, record.raw, index_header.tagx_control_byte_count, tags, index_header.index_encoding, {}, strict=True) self.indices = [] for ident, entry in iteritems(table): self.indices.append(IndexEntry(ident, entry, cncx)) def get_parent(self, index): if index.depth < 1: return None parent_depth = index.depth - 1 for p in self.indices: if p.depth != parent_depth: continue def __str__(self): ans = ['*'*20 + ' Index Entries (%d entries) '%len(self.indices)+ '*'*20] a = ans.append def u(w): a('Unknown: %r (%d bytes) (All zeros: %r)'%(w, len(w), not bool(w.replace(b'\0', b'')))) for entry in self.indices: offset = entry.offset a(str(entry)) t = self.alltext if offset is not None and self.alltext is not None: a('\tHTML before offset: %r'%t[offset-50:offset]) a('\tHTML after offset: %r'%t[offset:offset+50]) p = offset+entry.size a('\tHTML before end: %r'%t[p-50:p]) a('\tHTML after end: %r'%t[p:p+50]) a('') return '\n'.join(ans) # }}} class CNCX: # {{{ ''' Parses the records that contain the compiled NCX (all strings from the NCX). Presents a simple offset : string mapping interface to access the data. ''' def __init__(self, records, codec): self.records = OrderedDict() record_offset = 0 for record in records: raw = record.raw pos = 0 while pos < len(raw): length, consumed = decint(raw[pos:]) if length > 0: try: self.records[pos+record_offset] = raw[ pos+consumed:pos+consumed+length].decode(codec) except: byts = raw[pos:] r = format_bytes(byts) print('CNCX entry at offset %d has unknown format %s'%( pos+record_offset, r)) self.records[pos+record_offset] = r pos = len(raw) pos += consumed+length record_offset += 0x10000 def __getitem__(self, offset): return self.records.get(offset) def __str__(self): ans = ['*'*20 + ' cncx (%d strings) '%len(self.records)+ '*'*20] for k, v in iteritems(self.records): ans.append('%10d : %s'%(k, v)) return '\n'.join(ans) # }}} class ImageRecord: # {{{ def __init__(self, idx, record, fmt): self.raw = record.raw self.fmt = fmt self.idx = idx def dump(self, folder): name = '%06d'%self.idx with open(os.path.join(folder, name+'.'+self.fmt), 'wb') as f: f.write(self.raw) # }}} class BinaryRecord: # {{{ def __init__(self, idx, record): self.raw = record.raw sig = self.raw[:4] name = '%06d'%idx if sig in {b'FCIS', b'FLIS', b'SRCS', b'DATP', b'RESC', b'BOUN', b'FDST', b'AUDI', b'VIDE', b'CRES', b'CONT', b'CMET'}: name += '-' + sig.decode('ascii') elif sig == b'\xe9\x8e\r\n': name += '-' + 'EOF' self.name = name def dump(self, folder): with open(os.path.join(folder, self.name+'.bin'), 'wb') as f: f.write(self.raw) # }}} class FontRecord: # {{{ def __init__(self, idx, record): self.raw = record.raw name = '%06d'%idx self.font = read_font_record(self.raw) if self.font['err']: raise ValueError('Failed to read font record: %s Headers: %s'%( self.font['err'], self.font['headers'])) self.payload = (self.font['font_data'] if self.font['font_data'] else self.font['raw_data']) self.name = '%s.%s'%(name, self.font['ext']) def dump(self, folder): with open(os.path.join(folder, self.name), 'wb') as f: f.write(self.payload) # }}} class TBSIndexing: # {{{ def __init__(self, text_records, indices, doc_type): self.record_indices = OrderedDict() self.doc_type = doc_type self.indices = indices pos = 0 for r in text_records: start = pos pos += len(r.raw) end = pos - 1 self.record_indices[r] = x = {'starts':[], 'ends':[], 'complete':[], 'geom': (start, end)} for entry in indices: istart, sz = entry.offset, entry.size iend = istart + sz - 1 has_start = istart >= start and istart <= end has_end = iend >= start and iend <= end rec = None if has_start and has_end: rec = 'complete' elif has_start and not has_end: rec = 'starts' elif not has_start and has_end: rec = 'ends' if rec: x[rec].append(entry) def get_index(self, idx): for i in self.indices: if i.index in {idx, str(idx)}: return i raise IndexError('Index %d not found'%idx) def __str__(self): ans = ['*'*20 + ' TBS Indexing (%d records) '%len(self.record_indices)+ '*'*20] for r, dat in iteritems(self.record_indices): ans += self.dump_record(r, dat)[-1] return '\n'.join(ans) def dump(self, bdir): types = defaultdict(list) for r, dat in iteritems(self.record_indices): tbs_type, strings = self.dump_record(r, dat) if tbs_type == 0: continue types[tbs_type] += strings for typ, strings in iteritems(types): with open(os.path.join(bdir, 'tbs_type_%d.txt'%typ), 'wb') as f: f.write(as_bytes('\n'.join(strings))) def dump_record(self, r, dat): ans = [] ans.append('\nRecord #%d: Starts at: %d Ends at: %d'%(r.idx, dat['geom'][0], dat['geom'][1])) s, e, c = dat['starts'], dat['ends'], dat['complete'] ans.append(('\tContains: %d index entries ' '(%d ends, %d complete, %d starts)')%tuple(map(len, (s+e+c, e, c, s)))) byts = bytearray(r.trailing_data.get('indexing', b'')) ans.append('TBS bytes: %s'%format_bytes(byts)) for typ, entries in (('Ends', e), ('Complete', c), ('Starts', s)): if entries: ans.append('\t%s:'%typ) for x in entries: ans.append(('\t\tIndex Entry: %s (Parent index: %s, ' 'Depth: %d, Offset: %d, Size: %d) [%s]')%( x.index, x.parent_index, x.depth, x.offset, x.size, x.label)) def bin4(num): ans = bin(num)[2:] return as_bytes('0'*(4-len(ans)) + ans) def repr_extra(x): return str({bin4(k):v for k, v in iteritems(extra)}) tbs_type = 0 is_periodical = self.doc_type in (257, 258, 259) if len(byts): outermost_index, extra, consumed = decode_tbs(byts, flag_size=3) byts = byts[consumed:] for k in extra: tbs_type |= k ans.append('\nTBS: %d (%s)'%(tbs_type, bin4(tbs_type))) ans.append('Outermost index: %d'%outermost_index) ans.append('Unknown extra start bytes: %s'%repr_extra(extra)) if is_periodical: # Hierarchical periodical try: byts, a = self.interpret_periodical(tbs_type, byts, dat['geom'][0]) except: import traceback traceback.print_exc() a = [] print('Failed to decode TBS bytes for record: %d'%r.idx) ans += a if byts: sbyts = tuple(hex(b)[2:] for b in byts) ans.append('Remaining bytes: %s'%' '.join(sbyts)) ans.append('') return tbs_type, ans def interpret_periodical(self, tbs_type, byts, record_offset): ans = [] def read_section_transitions(byts, psi=None): # {{{ if psi is None: # Assume previous section is 1 psi = self.get_index(1) while byts: ai, extra, consumed = decode_tbs(byts) byts = byts[consumed:] if extra.get(0b0010, None) is not None: raise ValueError('Dont know how to interpret flag 0b0010' ' while reading section transitions') if extra.get(0b1000, None) is not None: if len(extra) > 1: raise ValueError('Dont know how to interpret flags' ' %r while reading section transitions'%extra) nsi = self.get_index(psi.index+1) ans.append('Last article in this record of section %d' ' (relative to next section index [%d]): ' '%d [%d absolute index]'%(psi.index, nsi.index, ai, ai+nsi.index)) psi = nsi continue ans.append('First article in this record of section %d' ' (relative to its parent section): ' '%d [%d absolute index]'%(psi.index, ai, ai+psi.index)) num = extra.get(0b0100, None) if num is None: msg = ('The section %d has at most one article' ' in this record')%psi.index else: msg = ('Number of articles in this record of ' 'section %d: %d')%(psi.index, num) ans.append(msg) offset = extra.get(0b0001, None) if offset is not None: if offset == 0: ans.append('This record is spanned by the article:' '%d'%(ai+psi.index)) else: ans.append('->Offset to start of next section (%d) from start' ' of record: %d [%d absolute offset]'%(psi.index+1, offset, offset+record_offset)) return byts # }}} def read_starting_section(byts): # {{{ orig = byts si, extra, consumed = decode_tbs(byts) byts = byts[consumed:] if len(extra) > 1 or 0b0010 in extra or 0b1000 in extra: raise ValueError('Dont know how to interpret flags %r' ' when reading starting section'%extra) si = self.get_index(si) ans.append('The section at the start of this record is:' ' %s'%si.index) if 0b0100 in extra: num = extra[0b0100] ans.append('The number of articles from the section %d' ' in this record: %s'%(si.index, num)) elif 0b0001 in extra: eof = extra[0b0001] if eof != 0: raise ValueError('Unknown eof value %s when reading' ' starting section. All bytes: %r'%(eof, orig)) ans.append('??This record has more than one article from ' ' the section: %s'%si.index) return si, byts # }}} if tbs_type & 0b0100: # Starting section is the first section ssi = self.get_index(1) else: ssi, byts = read_starting_section(byts) byts = read_section_transitions(byts, ssi) return byts, ans # }}} class MOBIFile: # {{{ def __init__(self, mf): for x in ('raw', 'palmdb', 'record_headers', 'records', 'mobi_header', 'huffman_record_nums',): setattr(self, x, getattr(mf, x)) self.index_header = self.index_record = None self.indexing_record_nums = set() pir = getattr(self.mobi_header, 'primary_index_record', NULL_INDEX) if pir != NULL_INDEX: self.index_header = IndexHeader(self.records[pir]) numi = self.index_header.index_count self.cncx = CNCX(self.records[ pir+1+numi:pir+1+numi+self.index_header.num_of_cncx_blocks], self.index_header.index_encoding) self.index_record = IndexRecord(self.records[pir+1:pir+1+numi], self.index_header, self.cncx) self.indexing_record_nums = set(range(pir, pir+1+numi+self.index_header.num_of_cncx_blocks)) self.secondary_index_record = self.secondary_index_header = None sir = self.mobi_header.secondary_index_record if sir != NULL_INDEX: self.secondary_index_header = SecondaryIndexHeader(self.records[sir]) numi = self.secondary_index_header.index_count self.indexing_record_nums.add(sir) self.secondary_index_record = IndexRecord( self.records[sir+1:sir+1+numi], self.secondary_index_header, self.cncx) self.indexing_record_nums |= set(range(sir+1, sir+1+numi)) ntr = self.mobi_header.number_of_text_records fii = self.mobi_header.first_image_index self.text_records = [TextRecord(r, self.records[r], self.mobi_header.extra_data_flags, mf.decompress6) for r in range(1, min(len(self.records), ntr+1))] self.image_records, self.binary_records = [], [] self.font_records = [] image_index = 0 for i in range(self.mobi_header.first_resource_record, min(self.mobi_header.last_resource_record, len(self.records))): if i in self.indexing_record_nums or i in self.huffman_record_nums: continue image_index += 1 r = self.records[i] fmt = None if i >= fii and r.raw[:4] not in {b'FLIS', b'FCIS', b'SRCS', b'\xe9\x8e\r\n', b'RESC', b'BOUN', b'FDST', b'DATP', b'AUDI', b'VIDE', b'FONT', b'CRES', b'CONT', b'CMET'}: try: fmt = what(None, r.raw) except: pass if fmt is not None: self.image_records.append(ImageRecord(image_index, r, fmt)) elif r.raw[:4] == b'FONT': self.font_records.append(FontRecord(i, r)) else: self.binary_records.append(BinaryRecord(i, r)) if self.index_record is not None: self.tbs_indexing = TBSIndexing(self.text_records, self.index_record.indices, self.mobi_header.type_raw) def print_header(self, f=sys.stdout): p = print_to_binary_file(f) p(str(self.palmdb)) p() p('Record headers:') for i, r in enumerate(self.records): p('%6d. %s'%(i, r.header)) p() p(str(self.mobi_header)) # }}} def inspect_mobi(mobi_file, ddir): f = MOBIFile(mobi_file) with open(os.path.join(ddir, 'header.txt'), 'wb') as out: f.print_header(f=out) alltext = os.path.join(ddir, 'text.html') with open(alltext, 'wb') as of: alltext = b'' for rec in f.text_records: of.write(rec.raw) alltext += rec.raw of.seek(0) root = html.fromstring(alltext.decode(f.mobi_header.encoding)) with open(os.path.join(ddir, 'pretty.html'), 'wb') as of: of.write(html.tostring(root, pretty_print=True, encoding='utf-8', include_meta_content_type=True)) if f.index_header is not None: f.index_record.alltext = alltext with open(os.path.join(ddir, 'index.txt'), 'wb') as out: print = print_to_binary_file(out) print(str(f.index_header), file=out) print('\n\n', file=out) if f.secondary_index_header is not None: print(str(f.secondary_index_header), file=out) print('\n\n', file=out) if f.secondary_index_record is not None: print(str(f.secondary_index_record), file=out) print('\n\n', file=out) print(str(f.cncx), file=out) print('\n\n', file=out) print(str(f.index_record), file=out) with open(os.path.join(ddir, 'tbs_indexing.txt'), 'wb') as out: print = print_to_binary_file(out) print(str(f.tbs_indexing), file=out) f.tbs_indexing.dump(ddir) for tdir, attr in [('text', 'text_records'), ('images', 'image_records'), ('binary', 'binary_records'), ('font', 'font_records')]: tdir = os.path.join(ddir, tdir) os.mkdir(tdir) for rec in getattr(f, attr): rec.dump(tdir) # }}}
32,267
Python
.py
715
33.475524
126
0.541774
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,492
main.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/debug/main.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import os import shutil import sys from calibre.ebooks.mobi.debug.headers import MOBIFile from calibre.ebooks.mobi.debug.mobi6 import inspect_mobi as inspect_mobi6 from calibre.ebooks.mobi.debug.mobi8 import inspect_mobi as inspect_mobi8 def inspect_mobi(path_or_stream, ddir=None): # {{{ stream = (path_or_stream if hasattr(path_or_stream, 'read') else open(path_or_stream, 'rb')) f = MOBIFile(stream) if ddir is None: ddir = 'decompiled_' + os.path.splitext(os.path.basename(stream.name))[0] try: shutil.rmtree(ddir) except: pass os.makedirs(ddir) if f.kf8_type is None: inspect_mobi6(f, ddir) elif f.kf8_type == 'joint': p6 = os.path.join(ddir, 'mobi6') os.mkdir(p6) inspect_mobi6(f, p6) p8 = os.path.join(ddir, 'mobi8') os.mkdir(p8) inspect_mobi8(f, p8) else: inspect_mobi8(f, ddir) print('Debug data saved to:', ddir) # }}} def main(): inspect_mobi(sys.argv[1]) if __name__ == '__main__': main()
1,206
Python
.py
38
26.526316
81
0.642734
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,493
containers.py
kovidgoyal_calibre/src/calibre/ebooks/mobi/debug/containers.py
#!/usr/bin/env python __license__ = 'GPL v3' __copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>' from struct import unpack_from from calibre.ebooks.mobi.debug.headers import EXTHHeader class ContainerHeader: def __init__(self, data): self.ident = data[:4] self.record_size, self.type, self.count, self.encoding = unpack_from(b'>IHHI', data, 4) self.encoding = { 1252 : 'cp1252', 65001: 'utf-8', }.get(self.encoding, repr(self.encoding)) rest = list(unpack_from(b'>IIIIIIII', data, 16)) self.num_of_resource_records = rest[2] self.num_of_non_dummy_resource_records = rest[3] self.offset_to_href_record = rest[4] self.unknowns1 = rest[:2] self.unknowns2 = rest[5] self.header_length = rest[6] self.title_length = rest[7] self.resources = [] self.hrefs = [] if data[48:52] == b'EXTH': self.exth = EXTHHeader(data[48:]) self.title = data[48 + self.exth.length:][:self.title_length].decode(self.encoding) self.is_image_container = self.exth[539] == 'application/image' else: self.exth = ' No EXTH header present ' self.title = '' self.is_image_container = False self.bytes_after_exth = data[self.header_length + self.title_length:] self.null_bytes_after_exth = len(self.bytes_after_exth) - len(self.bytes_after_exth.replace(b'\0', b'')) def add_hrefs(self, data): # kindlegen inserts a trailing | after the last href self.hrefs = list(filter(None, data.decode('utf-8').split('|'))) def __str__(self): ans = [('*'*10) + ' Container Header ' + ('*'*10)] a = ans.append a('Record size: %d' % self.record_size) a('Type: %d' % self.type) a('Total number of records in this container: %d' % self.count) a('Encoding: %s' % self.encoding) a('Unknowns1: %s' % self.unknowns1) a('Num of resource records: %d' % self.num_of_resource_records) a('Num of non-dummy resource records: %d' % self.num_of_non_dummy_resource_records) a('Offset to href record: %d' % self.offset_to_href_record) a('Unknowns2: %s' % self.unknowns2) a('Header length: %d' % self.header_length) a('Title Length: %s' % self.title_length) a('hrefs: %s' % self.hrefs) a('Null bytes after EXTH: %d' % self.null_bytes_after_exth) if len(self.bytes_after_exth) != self.null_bytes_after_exth: a('Non-null bytes present after EXTH header!!!!') return '\n'.join(ans) + '\n\n' + str(self.exth) + '\n\n' + ('Title: %s' % self.title)
2,724
Python
.py
55
40.581818
112
0.591729
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,494
plucker.py
kovidgoyal_calibre/src/calibre/ebooks/metadata/plucker.py
''' Read meta information from Plucker pdb files. ''' __license__ = 'GPL v3' __copyright__ = '2009, John Schember <john@nachtimwald.com>' __docformat__ = 'restructuredtext en' import struct from datetime import datetime from calibre.ebooks.metadata import MetaInformation from calibre.ebooks.pdb.header import PdbHeaderReader from calibre.ebooks.pdb.plucker.reader import DATATYPE_METADATA, MIBNUM_TO_NAME, SectionHeader def get_metadata(stream, extract_cover=True): ''' Return metadata as a L{MetaInfo} object ''' mi = MetaInformation(_('Unknown'), [_('Unknown')]) stream.seek(0) pheader = PdbHeaderReader(stream) section_data = None for i in range(1, pheader.num_sections): raw_data = pheader.section_data(i) section_header = SectionHeader(raw_data) if section_header.type == DATATYPE_METADATA: section_data = raw_data[8:] break if not section_data: return mi default_encoding = 'latin-1' record_count, = struct.unpack('>H', section_data[0:2]) adv = 0 title = None author = None pubdate = 0 for i in range(record_count): try: type, length = struct.unpack_from('>HH', section_data, 2 + adv) except struct.error: break # CharSet if type == 1: val, = struct.unpack('>H', section_data[6+adv:8+adv]) default_encoding = MIBNUM_TO_NAME.get(val, 'latin-1') # Author elif type == 4: author = section_data[6+adv+(2*length)] # Title elif type == 5: title = section_data[6+adv+(2*length)] # Publication Date elif type == 6: pubdate, = struct.unpack('>I', section_data[6+adv:6+adv+4]) adv += 2*length if title: mi.title = title.replace('\0', '').decode(default_encoding, 'replace') if author: author = author.replace('\0', '').decode(default_encoding, 'replace') mi.author = author.split(',') mi.pubdate = datetime.fromtimestamp(pubdate) return mi
2,083
Python
.py
59
28.474576
94
0.624254
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,495
lit.py
kovidgoyal_calibre/src/calibre/ebooks/metadata/lit.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' ''' Support for reading the metadata from a LIT file. ''' import io import os from calibre.ebooks.metadata.opf2 import OPF def get_metadata(stream): from calibre.ebooks.lit.reader import LitContainer from calibre.utils.logging import Log litfile = LitContainer(stream, Log()) src = litfile.get_metadata().encode('utf-8') litfile = litfile._litfile opf = OPF(io.BytesIO(src), os.getcwd()) mi = opf.to_book_metadata() covers = [] for item in opf.iterguide(): if 'cover' not in item.get('type', '').lower(): continue ctype = item.get('type') href = item.get('href', '') candidates = [href, href.replace('&', '%26')] for item in litfile.manifest.values(): if item.path in candidates: try: covers.append((litfile.get_file('/data/'+item.internal), ctype)) except Exception: pass break covers.sort(key=lambda x: len(x[0]), reverse=True) idx = 0 if len(covers) > 1: if covers[1][1] == covers[0][1]+'-standard': idx = 1 mi.cover_data = ('jpg', covers[idx][0]) return mi
1,315
Python
.py
38
26.578947
76
0.574234
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,496
kfx.py
kovidgoyal_calibre/src/calibre/ebooks/metadata/kfx.py
#!/usr/bin/env python # License: GPLv3 Copyright: 2015, Kovid Goyal <kovid at kovidgoyal.net>, John Howell <jhowell@acm.org>' # Based on work of John Howell reversing the KFX format # https://www.mobileread.com/forums/showpost.php?p=3176029&postcount=89 import re import struct import sys from collections import defaultdict from calibre.ebooks.metadata.book.base import Metadata from calibre.ebooks.mobi.utils import decint from calibre.utils.cleantext import clean_xml_chars from calibre.utils.config_base import tweaks from calibre.utils.date import parse_only_date from calibre.utils.imghdr import identify from calibre.utils.localization import canonicalize_lang from polyglot.binary import as_base64_bytes, from_base64_bytes class InvalidKFX(ValueError): pass # magic numbers for data structures CONTAINER_MAGIC = b'CONT' ENTITY_MAGIC = b'ENTY' ION_MAGIC = b'\xe0\x01\x00\xea' # ION data types (comment shows equivalent python data type produced) DT_BOOLEAN = 1 # True/False DT_INTEGER = 2 # int # str (using non-unicode to distinguish symbols from strings) DT_PROPERTY = 7 DT_STRING = 8 # unicode DT_STRUCT = 11 # tuple DT_LIST = 12 # list DT_OBJECT = 13 # dict of property/value pairs DT_TYPED_DATA = 14 # type, name, value # property names (non-unicode strings to distinguish them from ION strings in this program) # These are place holders. The correct property names are unknown. PROP_METADATA = b'P258' PROP_METADATA2 = b'P490' PROP_METADATA3 = b'P491' PROP_METADATA_KEY = b'P492' PROP_METADATA_VALUE = b'P307' PROP_IMAGE = b'P417' METADATA_PROPERTIES = { b'P10' : "languages", b'P153': "title", b'P154': "description", b'P222': "author", b'P232': "publisher", } COVER_KEY = "cover_image_base64" def hexs(string, sep=' '): if isinstance(string, bytes): string = bytearray(string) else: string = map(ord, string) return sep.join('%02x' % b for b in string) class PackedData: ''' Simplify unpacking of packed binary data structures ''' def __init__(self, data): self.buffer = data self.offset = 0 def unpack_one(self, fmt, advance=True): return self.unpack_multi(fmt, advance)[0] def unpack_multi(self, fmt, advance=True): fmt = fmt.encode('ascii') result = struct.unpack_from(fmt, self.buffer, self.offset) if advance: self.advance(struct.calcsize(fmt)) return result def extract(self, size): data = self.buffer[self.offset:self.offset + size] self.advance(size) return data def advance(self, size): self.offset += size def remaining(self): return len(self.buffer) - self.offset class PackedBlock(PackedData): ''' Common header structure of container and entity blocks ''' def __init__(self, data, magic): PackedData.__init__(self, data) self.magic = self.unpack_one('4s') if self.magic != magic: raise InvalidKFX('%s magic number is incorrect (%s)' % (magic, hexs(self.magic))) self.version = self.unpack_one('<H') self.header_len = self.unpack_one('<L') class Container(PackedBlock): ''' Container file containing data entities ''' def __init__(self, data): self.data = data PackedBlock.__init__(self, data, CONTAINER_MAGIC) # Unknown data self.advance(8) self.entities = [] while self.unpack_one('4s', advance=False) != ION_MAGIC: entity_id, entity_type, entity_offset, entity_len = self.unpack_multi('<LLQQ') entity_start = self.header_len + entity_offset self.entities.append( Entity(self.data[entity_start:entity_start + entity_len], entity_type, entity_id)) def decode(self): return [entity.decode() for entity in self.entities] class Entity(PackedBlock): ''' Data entity inside a container ''' def __init__(self, data, entity_type, entity_id): PackedBlock.__init__(self, data, ENTITY_MAGIC) self.entity_type = entity_type self.entity_id = entity_id self.entity_data = data[self.header_len:] def decode(self): if PackedData(self.entity_data).unpack_one('4s') == ION_MAGIC: entity_value = PackedIon(self.entity_data).decode() else: entity_value = as_base64_bytes(self.entity_data) return (property_name(self.entity_type), property_name(self.entity_id), entity_value) class PackedIon(PackedData): ''' Packed structured binary data format used by KFX ''' def __init__(self, data): PackedData.__init__(self, data) def decode(self): if self.unpack_one('4s') != ION_MAGIC: raise Exception('ION marker missing at start of data') return self.unpack_typed_value() def unpack_typed_value(self): cmd = self.unpack_one('B') data_type = cmd >> 4 data_len = cmd & 0x0f if data_len == 14: data_len = self.unpack_number() # print('cmd=%02x, len=%s: %s' % (cmd, data_len, hexs(self.buffer[self.offset:][:data_len]))) if data_type == DT_BOOLEAN: return data_len != 0 # length is actually value if data_type == DT_INTEGER: return self.unpack_unsigned_int(data_len) if data_type == DT_PROPERTY: return property_name(self.unpack_unsigned_int(data_len)) if data_type == DT_STRING: return self.extract(data_len).decode('utf8') if data_type == DT_STRUCT or data_type == DT_LIST: ion = PackedIon(self.extract(data_len)) result = [] while ion.remaining(): result.append(ion.unpack_typed_value()) if data_type == DT_STRUCT: result = tuple(result) return result if data_type == DT_OBJECT: ion = PackedIon(self.extract(data_len)) result = {} while (ion.remaining()): symbol = property_name(ion.unpack_number()) result[symbol] = ion.unpack_typed_value() return result if data_type == DT_TYPED_DATA: ion = PackedIon(self.extract(data_len)) ion.unpack_number() ion.unpack_number() return ion.unpack_typed_value() # ignore unknown types self.advance(data_len) return None def unpack_number(self): # variable length numbers, MSB first, 7 bits per byte, last byte is # flagged by MSB set raw = self.buffer[self.offset:self.offset+10] number, consumed = decint(raw) self.advance(consumed) return number def unpack_unsigned_int(self, length): # unsigned big-endian (MSB first) return struct.unpack_from(b'>Q', b'\0' * (8 - length) + self.extract(length))[0] def property_name(property_number): # This should be changed to translate property numbers to the proper # strings using a symbol table return b"P%d" % property_number def extract_metadata(container_data): metadata = defaultdict(list) # locate book metadata within the container data structures metadata_entity = {} for entity_type, entity_id, entity_value in container_data: if entity_type == PROP_METADATA: metadata_entity = entity_value elif entity_type == PROP_METADATA2: if entity_value is not None: for value1 in entity_value[PROP_METADATA3]: for meta in value1[PROP_METADATA]: metadata[meta[PROP_METADATA_KEY]].append(meta[PROP_METADATA_VALUE]) elif entity_type == PROP_IMAGE and COVER_KEY not in metadata: # assume first image is the cover metadata[COVER_KEY] = entity_value for key, value in metadata_entity.items(): if key in METADATA_PROPERTIES and METADATA_PROPERTIES[key] not in metadata: metadata[METADATA_PROPERTIES[key]].append(value) return metadata def dump_metadata(m): d = dict(m) d[COVER_KEY] = bool(d.get(COVER_KEY)) from pprint import pprint pprint(d) def read_book_key_kfx(stream, read_cover=True): ' Read the metadata.kfx file that is found in the sdr book folder for KFX files ' c = Container(stream.read()) m = extract_metadata(c.decode()) def val(x): return m[x][0] if x in m else '' return (val('content_id') or val('ASIN')), val('cde_content_type') def read_metadata_kfx(stream, read_cover=True): ' Read the metadata.kfx file that is found in the sdr book folder for KFX files ' c = Container(stream.read()) m = extract_metadata(c.decode()) # dump_metadata(m) def has(x): return m[x] and m[x][0] def get(x, single=True): ans = m[x] if single: ans = clean_xml_chars(ans[0]) if ans else '' else: ans = [clean_xml_chars(y) for y in ans] return ans title = get('title') or _('Unknown') authors = get('author', False) or [_('Unknown')] auth_pat = re.compile(r'([^,]+?)\s*,\s+([^,]+)$') def fix_author(x): if tweaks['author_sort_copy_method'] != 'copy': m = auth_pat.match(x.strip()) if m is not None: return m.group(2) + ' ' + m.group(1) return x unique_authors = [] # remove duplicates while retaining order for f in [fix_author(x) for x in authors]: if f not in unique_authors: unique_authors.append(f) mi = Metadata(title, unique_authors) if has('author'): mi.author_sort = get('author') if has('ASIN'): mi.set_identifier('mobi-asin', get('ASIN')) elif has('content_id'): mi.set_identifier('mobi-asin', get('content_id')) if has('languages'): langs = list(filter(None, (canonicalize_lang(x) for x in get('languages', False)))) if langs: mi.languages = langs if has('issue_date'): try: mi.pubdate = parse_only_date(get('issue_date')) except Exception: pass if has('publisher') and get('publisher') != 'Unknown': mi.publisher = get('publisher') if read_cover and m[COVER_KEY]: try: data = from_base64_bytes(m[COVER_KEY]) fmt, w, h = identify(data) except Exception: w, h, fmt = 0, 0, None if fmt and w > -1 and h > -1: mi.cover_data = (fmt, data) return mi if __name__ == '__main__': from calibre import prints with open(sys.argv[-1], 'rb') as f: mi = read_metadata_kfx(f) prints(str(mi))
10,819
Python
.py
273
32.043956
103
0.623039
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,497
topaz.py
kovidgoyal_calibre/src/calibre/ebooks/metadata/topaz.py
__license__ = 'GPL 3' __copyright__ = '2010, Greg Riker <griker@hotmail.com>' __docformat__ = 'restructuredtext en' ''' Read/write metadata from Amazon's topaz format ''' import io import numbers import sys from struct import pack from calibre import force_unicode from calibre.ebooks.metadata import MetaInformation from polyglot.builtins import codepoint_to_chr, int_to_byte def is_dkey(x): q = b'dkey' if isinstance(x, bytes) else 'dkey' return x == q class StringIO(io.StringIO): def write(self, x): if isinstance(x, bytes): x = x.decode('iso-8859-1') return io.StringIO.write(self, x) class StreamSlicer: def __init__(self, stream, start=0, stop=None): self._stream = stream self.start = start if stop is None: stream.seek(0, 2) stop = stream.tell() self.stop = stop self._len = stop - start def __len__(self): return self._len def __getitem__(self, key): stream = self._stream base = self.start if isinstance(key, numbers.Integral): stream.seek(base + key) return stream.read(1) if isinstance(key, slice): start, stop, stride = key.indices(self._len) if stride < 0: start, stop = stop, start size = stop - start if size <= 0: return b"" stream.seek(base + start) data = stream.read(size) if stride != 1: data = data[::stride] return data raise TypeError("stream indices must be integers") def __setitem__(self, key, value): stream = self._stream base = self.start if isinstance(key, numbers.Integral): if len(value) != 1: raise ValueError("key and value lengths must match") stream.seek(base + key) return stream.write(value) if isinstance(key, slice): start, stop, stride = key.indices(self._len) if stride < 0: start, stop = stop, start size = stop - start if stride != 1: value = value[::stride] if len(value) != size: raise ValueError("key and value lengths must match") stream.seek(base + start) return stream.write(value) raise TypeError("stream indices must be integers") def update(self, data_blocks): # Rewrite the stream stream = self._stream base = self.start stream.seek(base) self._stream.truncate(base) for block in data_blocks: stream.write(block) def truncate(self, value): self._stream.truncate(value) class MetadataUpdater: def __init__(self, stream): self.stream = stream self.data = StreamSlicer(stream) sig = self.data[:4] if not sig.startswith(b'TPZ'): raise ValueError("'%s': Not a Topaz file" % getattr(stream, 'name', 'Unnamed stream')) offset = 4 self.header_records, consumed = self.decode_vwi(self.data[offset:offset+4]) offset += consumed self.topaz_headers, self.th_seq = self.get_headers(offset) # First integrity test - metadata header if 'metadata' not in self.topaz_headers: raise ValueError("'%s': Invalid Topaz format - no metadata record" % getattr(stream, 'name', 'Unnamed stream')) # Second integrity test - metadata body md_offset = self.topaz_headers['metadata']['blocks'][0]['offset'] md_offset += self.base if self.data[md_offset+1:md_offset+9] != b'metadata': raise ValueError("'%s': Damaged metadata record" % getattr(stream, 'name', 'Unnamed stream')) def book_length(self): ''' convenience method for retrieving book length ''' self.get_original_metadata() if 'bookLength' in self.metadata: return int(self.metadata['bookLength']) return 0 def decode_vwi(self, byts): pos, val = 0, 0 done = False byts = bytearray(byts) while pos < len(byts) and not done: b = byts[pos] pos += 1 if (b & 0x80) == 0: done = True b &= 0x7F val <<= 7 val |= b if done: break return val, pos def dump_headers(self): ''' Diagnostic ''' print("\ndump_headers():") for tag in self.topaz_headers: print("%s: " % (tag)) num_recs = len(self.topaz_headers[tag]['blocks']) print(" num_recs: %d" % num_recs) if num_recs: print(" starting offset: 0x%x" % self.topaz_headers[tag]['blocks'][0]['offset']) def dump_hex(self, src, length=16): ''' Diagnostic ''' FILTER=''.join([(len(repr(codepoint_to_chr(x)))==3) and codepoint_to_chr(x) or '.' for x in range(256)]) N=0 result='' while src: s,src = src[:length],src[length:] hexa = ' '.join(["%02X"%ord(x) for x in s]) s = s.translate(FILTER) result += "%04X %-*s %s\n" % (N, length*3, hexa, s) N+=length print(result) def dump_metadata(self): ''' Diagnostic ''' for tag in self.metadata: print(f'{tag}: {repr(self.metadata[tag])}') def encode_vwi(self,value): ans = [] multi_byte = (value > 0x7f) while value: b = value & 0x7f value >>= 7 if value == 0: if multi_byte: ans.append(b|0x80) if ans[-1] == 0xFF: ans.append(0x80) if len(ans) == 4: return pack('>BBBB',ans[3],ans[2],ans[1],ans[0]).decode('iso-8859-1') elif len(ans) == 3: return pack('>BBB',ans[2],ans[1],ans[0]).decode('iso-8859-1') elif len(ans) == 2: return pack('>BB',ans[1],ans[0]).decode('iso-8859-1') else: return pack('>B', b).decode('iso-8859-1') else: if len(ans): ans.append(b|0x80) else: ans.append(b) # If value == 0, return 0 return pack('>B', 0x0).decode('iso-8859-1') def generate_dkey(self): for x in self.topaz_headers: if is_dkey(self.topaz_headers[x]['tag']): if self.topaz_headers[x]['blocks']: offset = self.base + self.topaz_headers[x]['blocks'][0]['offset'] len_uncomp = self.topaz_headers[x]['blocks'][0]['len_uncomp'] break else: return None dkey = self.topaz_headers[x] dks = StringIO() dks.write(self.encode_vwi(len(dkey['tag']))) offset += 1 dks.write(dkey['tag']) offset += len('dkey') dks.write('\0') offset += 1 dks.write(self.data[offset:offset + len_uncomp].decode('iso-8859-1')) return dks.getvalue().encode('iso-8859-1') def get_headers(self, offset): # Build a dict of topaz_header records, list of order topaz_headers = {} th_seq = [] for x in range(self.header_records): offset += 1 taglen, consumed = self.decode_vwi(self.data[offset:offset+4]) offset += consumed tag = self.data[offset:offset+taglen] offset += taglen num_vals, consumed = self.decode_vwi(self.data[offset:offset+4]) offset += consumed blocks = {} for val in range(num_vals): hdr_offset, consumed = self.decode_vwi(self.data[offset:offset+4]) offset += consumed len_uncomp, consumed = self.decode_vwi(self.data[offset:offset+4]) offset += consumed len_comp, consumed = self.decode_vwi(self.data[offset:offset+4]) offset += consumed blocks[val] = dict(offset=hdr_offset,len_uncomp=len_uncomp,len_comp=len_comp) topaz_headers[tag] = dict(blocks=blocks) th_seq.append(tag) self.eoth = self.data[offset] offset += 1 self.base = offset return topaz_headers, th_seq def generate_metadata_stream(self): ms = StringIO() ms.write(self.encode_vwi(len(self.md_header['tag'])).encode('iso-8859-1')) ms.write(self.md_header['tag']) ms.write(int_to_byte(self.md_header['flags'])) ms.write(int_to_byte(len(self.metadata))) # Add the metadata fields. # for tag in self.metadata: for tag in self.md_seq: ms.write(self.encode_vwi(len(tag)).encode('iso-8859-1')) ms.write(tag) ms.write(self.encode_vwi(len(self.metadata[tag])).encode('iso-8859-1')) ms.write(self.metadata[tag]) return ms.getvalue() def get_metadata(self): ''' Return MetaInformation with title, author''' self.get_original_metadata() title = force_unicode(self.metadata['Title'], 'utf-8') authors = force_unicode(self.metadata['Authors'], 'utf-8').split(';') return MetaInformation(title, authors) def get_original_metadata(self): offset = self.base + self.topaz_headers['metadata']['blocks'][0]['offset'] self.md_header = {} taglen, consumed = self.decode_vwi(self.data[offset:offset+4]) offset += consumed self.md_header['tag'] = self.data[offset:offset+taglen] offset += taglen self.md_header['flags'] = ord(self.data[offset:offset+1]) offset += 1 self.md_header['num_recs'] = ord(self.data[offset:offset+1]) offset += 1 # print "self.md_header: %s" % self.md_header self.metadata = {} self.md_seq = [] for x in range(self.md_header['num_recs']): taglen, consumed = self.decode_vwi(self.data[offset:offset+4]) offset += consumed tag = self.data[offset:offset+taglen] offset += taglen md_len, consumed = self.decode_vwi(self.data[offset:offset+4]) offset += consumed metadata = self.data[offset:offset + md_len] offset += md_len self.metadata[tag] = metadata self.md_seq.append(tag) def regenerate_headers(self, updated_md_len): original_md_len = self.topaz_headers['metadata']['blocks'][0]['len_uncomp'] original_md_offset = self.topaz_headers['metadata']['blocks'][0]['offset'] delta = updated_md_len - original_md_len # Copy the first 5 bytes of the file: sig + num_recs ths = io.StringIO() ths.write(self.data[:5]) # Rewrite the offsets for hdr_offsets > metadata offset for tag in self.th_seq: ths.write('c') ths.write(self.encode_vwi(len(tag))) ths.write(tag) if self.topaz_headers[tag]['blocks']: ths.write(self.encode_vwi(len(self.topaz_headers[tag]['blocks']))) for block in self.topaz_headers[tag]['blocks']: b = self.topaz_headers[tag]['blocks'][block] if b['offset'] <= original_md_offset: ths.write(self.encode_vwi(b['offset'])) else: ths.write(self.encode_vwi(b['offset'] + delta)) if tag == 'metadata': ths.write(self.encode_vwi(updated_md_len)) else: ths.write(self.encode_vwi(b['len_uncomp'])) ths.write(self.encode_vwi(b['len_comp'])) else: ths.write(self.encode_vwi(0)) self.original_md_start = original_md_offset + self.base self.original_md_len = original_md_len return ths.getvalue().encode('iso-8859-1') def update(self,mi): # Collect the original metadata self.get_original_metadata() try: from calibre.ebooks.conversion.config import load_defaults prefs = load_defaults('mobi_output') pas = prefs.get('prefer_author_sort', False) except: pas = False if mi.author_sort and pas: authors = mi.author_sort self.metadata['Authors'] = authors.encode('utf-8') elif mi.authors: authors = '; '.join(mi.authors) self.metadata['Authors'] = authors.encode('utf-8') self.metadata['Title'] = mi.title.encode('utf-8') updated_metadata = self.generate_metadata_stream() # Skip tag_len, tag, extra prefix = len('metadata') + 2 um_buf_len = len(updated_metadata) - prefix head = self.regenerate_headers(um_buf_len) # Chunk1: self.base -> original metadata start # Chunk2: original metadata end -> eof chunk1 = self.data[self.base:self.original_md_start] chunk2 = self.data[prefix + self.original_md_start + self.original_md_len:] self.stream.seek(0) self.stream.truncate(0) # Write the revised stream self.stream.write(head) self.stream.write('d') self.stream.write(chunk1) self.stream.write(updated_metadata) self.stream.write(chunk2) def get_metadata(stream): mu = MetadataUpdater(stream) return mu.get_metadata() def set_metadata(stream, mi): mu = MetadataUpdater(stream) mu.update(mi) return if __name__ == '__main__': if False: # Test get_metadata() print(get_metadata(open(sys.argv[1], 'rb'))) else: # Test set_metadata() stream = io.BytesIO() with open(sys.argv[1], 'rb') as data: stream.write(data.read()) mi = MetaInformation(title="Updated Title", authors=['Author, Random']) set_metadata(stream, mi) # Write the result tokens = sys.argv[1].rpartition('.') with open(tokens[0]+'-updated' + '.' + tokens[2],'wb') as updated_data: updated_data.write(stream.getvalue())
14,349
Python
.py
346
30.433526
123
0.552775
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,498
txt.py
kovidgoyal_calibre/src/calibre/ebooks/metadata/txt.py
__license__ = 'GPL v3' __copyright__ = '2009, John Schember <john@nachtimwald.com>' ''' Read meta information from TXT files ''' import os import re from calibre.ebooks.metadata import MetaInformation def get_metadata(stream, extract_cover=True): ''' Return metadata as a L{MetaInfo} object ''' name = getattr(stream, 'name', '').rpartition('.')[0] if name: name = os.path.basename(name) mi = MetaInformation(name or _('Unknown'), [_('Unknown')]) stream.seek(0) mdata = '' for x in range(0, 4): line = stream.readline().decode('utf-8', 'replace') if not line: break else: mdata += line mdata = mdata[:1024] mo = re.search('(?u)^[ ]*(?P<title>.+)[ ]*(\n{3}|(\r\n){3}|\r{3})[ ]*(?P<author>.+)[ ]*(\n|\r\n|\r)$', mdata) if mo is not None: mi.title = mo.group('title') mi.authors = mo.group('author').split(',') return mi
953
Python
.py
30
26.333333
113
0.574398
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)
27,499
pdf.py
kovidgoyal_calibre/src/calibre/ebooks/metadata/pdf.py
__license__ = 'GPL v3' __copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>' '''Read meta information from PDF files''' import os import re import shutil import subprocess from functools import partial from calibre import prints from calibre.constants import iswindows from calibre.ebooks.metadata import MetaInformation, check_doi, check_isbn, string_to_authors from calibre.ptempfile import TemporaryDirectory from calibre.utils.ipc.simple_worker import WorkerError, fork_job from polyglot.builtins import iteritems def get_tools(): from calibre.ebooks.pdf.pdftohtml import PDFTOHTML base = os.path.dirname(PDFTOHTML) suffix = '.exe' if iswindows else '' pdfinfo = os.path.join(base, 'pdfinfo') + suffix pdftoppm = os.path.join(base, 'pdftoppm') + suffix return pdfinfo, pdftoppm def read_info(outputdir, get_cover): ''' Read info dict and cover from a pdf file named src.pdf in outputdir. Note that this function changes the cwd to outputdir and is therefore not thread safe. Run it using fork_job. This is necessary as there is no safe way to pass unicode paths via command line arguments. This also ensures that if poppler crashes, no stale file handles are left for the original file, only for src.pdf.''' os.chdir(outputdir) pdfinfo, pdftoppm = get_tools() ans = {} try: raw = subprocess.check_output([pdfinfo, '-enc', 'UTF-8', '-isodates', 'src.pdf']) except subprocess.CalledProcessError as e: prints('pdfinfo errored out with return code: %d'%e.returncode) return None try: info_raw = raw.decode('utf-8') except UnicodeDecodeError: prints('pdfinfo returned no UTF-8 data') return None for line in info_raw.splitlines(): if ':' not in line: continue field, val = line.partition(':')[::2] val = val.strip() if field and val: ans[field] = val.strip() # Now read XMP metadata # Versions of poppler before 0.47.0 used to print out both the Info dict and # XMP metadata packet together. However, since that changed in # https://cgit.freedesktop.org/poppler/poppler/commit/?id=c91483aceb1b640771f572cb3df9ad707e5cad0d # we can no longer rely on it. try: raw = subprocess.check_output([pdfinfo, '-meta', 'src.pdf']).strip() except subprocess.CalledProcessError as e: prints('pdfinfo failed to read XML metadata with return code: %d'%e.returncode) else: parts = re.split(br'^Metadata:', raw, 1, flags=re.MULTILINE) if len(parts) > 1: # old poppler < 0.47.0 raw = parts[1].strip() if raw: ans['xmp_metadata'] = raw if get_cover: try: subprocess.check_call([pdftoppm, '-singlefile', '-jpeg', '-cropbox', 'src.pdf', 'cover']) except subprocess.CalledProcessError as e: prints('pdftoppm errored out with return code: %d'%e.returncode) return ans def page_images(pdfpath, outputdir='.', first=1, last=1, image_format='jpeg', prefix='page-images'): pdftoppm = get_tools()[1] outputdir = os.path.abspath(outputdir) args = {} if iswindows: args['creationflags'] = subprocess.HIGH_PRIORITY_CLASS | subprocess.CREATE_NO_WINDOW try: subprocess.check_call([ pdftoppm, '-cropbox', '-' + image_format, '-f', str(first), '-l', str(last), pdfpath, os.path.join(outputdir, prefix) ], **args) except subprocess.CalledProcessError as e: raise ValueError('Failed to render PDF, pdftoppm errorcode: %s'%e.returncode) def is_pdf_encrypted(path_to_pdf): pdfinfo = get_tools()[0] raw = subprocess.check_output([pdfinfo, path_to_pdf]) q = re.search(br'^Encrypted:\s*(\S+)', raw, flags=re.MULTILINE) if q is not None: return q.group(1) == b'yes' return False def get_metadata(stream, cover=True): with TemporaryDirectory('_pdf_metadata_read') as pdfpath: stream.seek(0) with open(os.path.join(pdfpath, 'src.pdf'), 'wb') as f: shutil.copyfileobj(stream, f) try: res = fork_job('calibre.ebooks.metadata.pdf', 'read_info', (pdfpath, bool(cover))) except WorkerError as e: prints(e.orig_tb) raise RuntimeError('Failed to run pdfinfo') info = res['result'] with open(res['stdout_stderr'], 'rb') as f: raw = f.read().strip() if raw: prints(raw) if info is None: raise ValueError('Could not read info dict from PDF') covpath = os.path.join(pdfpath, 'cover.jpg') cdata = None if cover and os.path.exists(covpath): with open(covpath, 'rb') as f: cdata = f.read() title = info.get('Title', None) or _('Unknown') au = info.get('Author', None) if au is None: au = [_('Unknown')] else: au = string_to_authors(au) mi = MetaInformation(title, au) # if isbn is not None: # mi.isbn = isbn creator = info.get('Creator', None) if creator: mi.book_producer = creator keywords = info.get('Keywords', None) mi.tags = [] if keywords: mi.tags = [x.strip() for x in keywords.split(',')] isbn = [check_isbn(x) for x in mi.tags if check_isbn(x)] if isbn: mi.isbn = isbn = isbn[0] mi.tags = [x for x in mi.tags if check_isbn(x) != isbn] subject = info.get('Subject', None) if subject: mi.tags.insert(0, subject) if 'xmp_metadata' in info: from calibre.ebooks.metadata.xmp import consolidate_metadata mi = consolidate_metadata(mi, info) # Look for recognizable identifiers in the info dict, if they were not # found in the XMP metadata for scheme, check_func in iteritems({'doi':check_doi, 'isbn':check_isbn}): if scheme not in mi.get_identifiers(): for k, v in iteritems(info): if k != 'xmp_metadata': val = check_func(v) if val: mi.set_identifier(scheme, val) break if cdata: mi.cover_data = ('jpeg', cdata) return mi get_quick_metadata = partial(get_metadata, cover=False) from calibre.utils.podofo import set_metadata as podofo_set_metadata def set_metadata(stream, mi): stream.seek(0) return podofo_set_metadata(stream, mi)
6,525
Python
.py
158
33.734177
102
0.634112
kovidgoyal/calibre
19,243
2,250
4
GPL-3.0
9/5/2024, 5:13:50 PM (Europe/Amsterdam)