id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27,700 | caches.py | kovidgoyal_calibre/src/calibre/library/caches.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import locale
import time
import traceback
from contextlib import suppress
from datetime import timedelta
from itertools import repeat
from threading import Thread
from calibre import force_unicode, prints
from calibre.db.search import CONTAINS_MATCH, EQUALS_MATCH, REGEXP_MATCH, _match
from calibre.ebooks.metadata import author_to_author_sort, title_sort
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.utils.config import prefs, tweaks
from calibre.utils.date import UNDEFINED_DATE, clean_date_for_sort, now, parse_date
from calibre.utils.icu import lower as icu_lower
from calibre.utils.localization import _, canonicalize_lang, get_udc, lang_map
from calibre.utils.search_query_parser import ParseException, SearchQueryParser
from polyglot.builtins import cmp, iteritems, itervalues, string_or_bytes
class MetadataBackup(Thread): # {{{
'''
Continuously backup changed metadata into OPF files
in the book directory. This class runs in its own
thread and makes sure that the actual file write happens in the
GUI thread to prevent Windows' file locking from causing problems.
'''
def __init__(self, db):
Thread.__init__(self)
self.daemon = True
self.db = db
self.keep_running = True
from calibre.gui2 import FunctionDispatcher
self.do_write = FunctionDispatcher(self.write)
self.get_metadata_for_dump = FunctionDispatcher(db.get_metadata_for_dump)
self.clear_dirtied = FunctionDispatcher(db.clear_dirtied)
self.set_dirtied = FunctionDispatcher(db.dirtied)
def stop(self):
self.keep_running = False
def break_cycles(self):
# Break cycles so that this object doesn't hold references to db
self.do_write = self.get_metadata_for_dump = self.clear_dirtied = \
self.set_dirtied = self.db = None
def run(self):
while self.keep_running:
try:
time.sleep(2) # Limit to one book per two seconds
(id_, sequence) = self.db.get_a_dirtied_book()
if id_ is None:
continue
# print 'writer thread', id_, sequence
except:
# Happens during interpreter shutdown
break
if not self.keep_running:
break
try:
path, mi, sequence = self.get_metadata_for_dump(id_)
except:
prints('Failed to get backup metadata for id:', id_, 'once')
traceback.print_exc()
time.sleep(2)
try:
path, mi, sequence = self.get_metadata_for_dump(id_)
except:
prints('Failed to get backup metadata for id:', id_, 'again, giving up')
traceback.print_exc()
continue
if mi is None:
self.clear_dirtied(id_, sequence)
continue
if not self.keep_running:
break
# Give the GUI thread a chance to do something. Python threads don't
# have priorities, so this thread would naturally keep the processor
# until some scheduling event happens. The sleep makes such an event
time.sleep(0.1)
try:
raw = metadata_to_opf(mi)
except:
prints('Failed to convert to opf for id:', id_)
traceback.print_exc()
continue
if not self.keep_running:
break
time.sleep(0.1) # Give the GUI thread a chance to do something
try:
self.do_write(path, raw)
except:
prints('Failed to write backup metadata for id:', id_, 'once')
time.sleep(2)
try:
self.do_write(path, raw)
except:
prints('Failed to write backup metadata for id:', id_,
'again, giving up')
continue
self.clear_dirtied(id_, sequence)
self.break_cycles()
def write(self, path, raw):
with open(path, 'wb') as f:
f.write(raw)
# }}}
# Global utility function for get_match here and in gui2/library.py
# This is a global for performance
pref_use_primary_find_in_search = False
def set_use_primary_find_in_search(toWhat):
global pref_use_primary_find_in_search
pref_use_primary_find_in_search = toWhat
y, c, n, u = map(icu_lower, (_('yes'), _('checked'), _('no'), _('unchecked')))
yes_vals = {y, c, 'true'}
no_vals = {n, u, 'false'}
del y, c, n, u
def force_to_bool(val):
if isinstance(val, (bytes, str)):
if isinstance(val, bytes):
val = force_unicode(val)
try:
val = icu_lower(val)
if not val:
val = None
elif val in yes_vals:
val = True
elif val in no_vals:
val = False
else:
val = bool(int(val))
except:
val = None
return val
class CacheRow(list): # {{{
def __init__(self, db, composites, datetimes, val, series_col, series_sort_col):
from calibre.db.tables import c_parse
self.db = db
self._composites = composites
for num in datetimes:
val[num] = c_parse(val[num])
if val[num] is UNDEFINED_DATE:
val[num] = None
list.__init__(self, val)
self._must_do = len(composites) > 0
self._series_col = series_col
self._series_sort_col = series_sort_col
self._series_sort = None
def __getitem__(self, col):
if self._must_do:
is_comp = False
if isinstance(col, slice):
start = 0 if col.start is None else col.start
step = 1 if col.stop is None else col.stop
for c in range(start, col.stop, step):
if c in self._composites:
is_comp = True
break
elif col in self._composites:
is_comp = True
if is_comp:
id_ = list.__getitem__(self, 0)
self._must_do = False
mi = self.db.get_metadata(id_, index_is_id=True,
get_user_categories=False)
for c in self._composites:
self[c] = mi.get(self._composites[c])
if col == self._series_sort_col and self._series_sort is None:
if self[self._series_col]:
self._series_sort = title_sort(self[self._series_col])
self[self._series_sort_col] = self._series_sort
else:
self._series_sort = ''
self[self._series_sort_col] = ''
return list.__getitem__(self, col)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def refresh_composites(self):
for c in self._composites:
self[c] = None
self._must_do = True
# }}}
class ResultCache(SearchQueryParser): # {{{
'''
Stores sorted and filtered metadata in memory.
'''
def __init__(self, FIELD_MAP, field_metadata, db_prefs=None):
self.FIELD_MAP = FIELD_MAP
self.db_prefs = db_prefs
self.composites = {}
self.datetimes = set()
self.udc = get_udc()
for key in field_metadata:
dt = field_metadata[key]['datatype']
if dt == 'composite':
self.composites[field_metadata[key]['rec_index']] = key
elif dt == 'datetime':
self.datetimes.add(field_metadata[key]['rec_index'])
self.series_col = field_metadata['series']['rec_index']
self.series_sort_col = field_metadata['series_sort']['rec_index']
self._data = []
self._map = self._map_filtered = []
self.first_sort = True
self.search_restriction = self.base_restriction = ''
self.base_restriction_name = self.search_restriction_name = ''
self.search_restriction_book_count = 0
self.marked_ids_dict = {}
self.field_metadata = field_metadata
self.all_search_locations = field_metadata.get_search_terms()
SearchQueryParser.__init__(self, self.all_search_locations, optimize=True)
self.build_date_relop_dict()
self.build_numeric_relop_dict()
# Do this here so the var get updated when a library changes
global pref_use_primary_find_in_search
pref_use_primary_find_in_search = prefs['use_primary_find_in_search']
self._uuid_column_index = self.FIELD_MAP['uuid']
self._uuid_map = {}
def break_cycles(self):
self._data = self.field_metadata = self.FIELD_MAP = \
self.numeric_search_relops = self.date_search_relops = \
self.db_prefs = self.all_search_locations = None
self.sqp_change_locations([])
def __getitem__(self, row):
return self._data[self._map_filtered[row]]
def __len__(self):
return len(self._map_filtered)
def __iter__(self):
for id in self._map_filtered:
yield self._data[id]
def iterall(self):
for x in self._data:
if x is not None:
yield x
def iterallids(self):
idx = self.FIELD_MAP['id']
for x in self.iterall():
yield x[idx]
# Search functions {{{
def universal_set(self):
return {i[0] for i in self._data if i is not None}
def change_search_locations(self, locations):
self.sqp_change_locations(locations)
self.all_search_locations = locations
def build_date_relop_dict(self):
'''
Because the database dates have time in them, we can't use direct
comparisons even when field_count == 3. The query has time = 0, but
the database object has time == something. As such, a complete compare
will almost never be correct.
'''
def relop_eq(db, query, field_count):
if db.year == query.year:
if field_count == 1:
return True
if db.month == query.month:
if field_count == 2:
return True
return db.day == query.day
return False
def relop_gt(db, query, field_count):
if db.year > query.year:
return True
if field_count > 1 and db.year == query.year:
if db.month > query.month:
return True
return field_count == 3 and db.month == query.month and db.day > query.day
return False
def relop_lt(db, query, field_count):
if db.year < query.year:
return True
if field_count > 1 and db.year == query.year:
if db.month < query.month:
return True
return field_count == 3 and db.month == query.month and db.day < query.day
return False
def relop_ne(db, query, field_count):
return not relop_eq(db, query, field_count)
def relop_ge(db, query, field_count):
return not relop_lt(db, query, field_count)
def relop_le(db, query, field_count):
return not relop_gt(db, query, field_count)
self.date_search_relops = {
'=' :[1, relop_eq],
'>' :[1, relop_gt],
'<' :[1, relop_lt],
'!=':[2, relop_ne],
'>=':[2, relop_ge],
'<=':[2, relop_le]
}
local_today = ('_today', icu_lower(_('today')))
local_yesterday = ('_yesterday', icu_lower(_('yesterday')))
local_thismonth = ('_thismonth', icu_lower(_('thismonth')))
local_daysago = icu_lower(_('daysago'))
local_daysago_len = len(local_daysago)
untrans_daysago = '_daysago'
untrans_daysago_len = len('_daysago')
def get_dates_matches(self, location, query, candidates):
matches = set()
if len(query) < 2:
return matches
if location == 'date':
location = 'timestamp'
loc = self.field_metadata[location]['rec_index']
if query == 'false':
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
v = item[loc]
if isinstance(v, (bytes, str)):
v = parse_date(v)
if v is None or v <= UNDEFINED_DATE:
matches.add(item[0])
return matches
if query == 'true':
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
v = item[loc]
if isinstance(v, (bytes, str)):
v = parse_date(v)
if v is not None and v > UNDEFINED_DATE:
matches.add(item[0])
return matches
relop = None
for k in self.date_search_relops.keys():
if query.startswith(k):
(p, relop) = self.date_search_relops[k]
query = query[p:]
if relop is None:
(p, relop) = self.date_search_relops['=']
if query in self.local_today:
qd = now()
field_count = 3
elif query in self.local_yesterday:
qd = now() - timedelta(1)
field_count = 3
elif query in self.local_thismonth:
qd = now()
field_count = 2
elif query.endswith(self.local_daysago) or query.endswith(self.untrans_daysago):
num = query[0:-(self.untrans_daysago_len if query.endswith(self.untrans_daysago) else self.local_daysago_len)]
try:
qd = now() - timedelta(int(num))
except:
raise ParseException(_('Number conversion error: {0}').format(num))
field_count = 3
else:
try:
qd = parse_date(query, as_utc=False)
except:
raise ParseException(_('Date conversion error: {0}').format(query))
if '-' in query:
field_count = query.count('-') + 1
else:
field_count = query.count('/') + 1
for id_ in candidates:
item = self._data[id_]
if item is None or item[loc] is None:
continue
v = item[loc]
if isinstance(v, (bytes, str)):
v = parse_date(v)
if relop(v, qd, field_count):
matches.add(item[0])
return matches
def build_numeric_relop_dict(self):
self.numeric_search_relops = {
'=':[1, lambda r, q: r == q],
'>':[1, lambda r, q: r is not None and r > q],
'<':[1, lambda r, q: r is not None and r < q],
'!=':[2, lambda r, q: r != q],
'>=':[2, lambda r, q: r is not None and r >= q],
'<=':[2, lambda r, q: r is not None and r <= q]
}
def get_numeric_matches(self, location, query, candidates, val_func=None):
matches = set()
if len(query) == 0:
return matches
if val_func is None:
loc = self.field_metadata[location]['rec_index']
def val_func(item, loc=loc):
return item[loc]
q = ''
cast = adjust = lambda x: x
dt = self.field_metadata[location]['datatype']
if query == 'false':
if dt == 'rating' or location == 'cover':
def relop(x, y):
return (not bool(x))
else:
def relop(x, y):
return (x is None)
elif query == 'true':
if dt == 'rating' or location == 'cover':
def relop(x, y):
return bool(x)
else:
def relop(x, y):
return (x is not None)
else:
relop = None
for k in self.numeric_search_relops.keys():
if query.startswith(k):
(p, relop) = self.numeric_search_relops[k]
query = query[p:]
if relop is None:
(p, relop) = self.numeric_search_relops['=']
if dt == 'int':
def cast(x):
return int(x)
elif dt == 'rating':
def cast(x):
return (0 if x is None else int(x))
def adjust(x):
return (x // 2)
elif dt in ('float', 'composite'):
def cast(x):
return float(x)
else: # count operation
def cast(x):
return int(x)
if len(query) > 1:
mult = query[-1:].lower()
mult = {'k':1024.,'m': 1024.**2, 'g': 1024.**3}.get(mult, 1.0)
if mult != 1.0:
query = query[:-1]
else:
mult = 1.0
try:
q = cast(query) * mult
except:
raise ParseException(_('Non-numeric value in query: {0}').format(query))
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
try:
v = cast(val_func(item))
except:
v = None
if v:
v = adjust(v)
if relop(v, q):
matches.add(item[0])
return matches
def get_user_category_matches(self, location, query, candidates):
matches = set()
if self.db_prefs is None or len(query) < 2:
return matches
user_cats = self.db_prefs.get('user_categories', [])
c = set(candidates)
if query.startswith('.'):
check_subcats = True
query = query[1:]
else:
check_subcats = False
for key in user_cats:
if key == location or (check_subcats and key.startswith(location + '.')):
for (item, category, ign) in user_cats[key]:
s = self.get_matches(category, '=' + item, candidates=c)
c -= s
matches |= s
if query == 'false':
return candidates - matches
return matches
def get_keypair_matches(self, location, query, candidates):
matches = set()
if query.find(':') >= 0:
q = [q.strip() for q in query.split(':')]
if len(q) != 2:
raise ParseException(
_('Invalid query format for colon-separated search: {0}').format(query))
(keyq, valq) = q
keyq_mkind, keyq = self._matchkind(keyq)
valq_mkind, valq = self._matchkind(valq)
else:
keyq = keyq_mkind = ''
valq_mkind, valq = self._matchkind(query)
loc = self.field_metadata[location]['rec_index']
split_char = self.field_metadata[location]['is_multiple'].get(
'cache_to_list', ',')
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
if item[loc] is None:
if valq == 'false':
matches.add(id_)
continue
add_if_nothing_matches = valq == 'false'
pairs = [p.strip() for p in item[loc].split(split_char)]
for pair in pairs:
parts = pair.split(':')
if len(parts) != 2:
continue
k = parts[:1]
v = parts[1:]
if keyq and not _match(keyq, k, keyq_mkind,
use_primary_find_in_search=pref_use_primary_find_in_search):
continue
if valq:
if valq == 'true':
if not v:
continue
elif valq == 'false':
if v:
add_if_nothing_matches = False
continue
elif not _match(valq, v, valq_mkind,
use_primary_find_in_search=pref_use_primary_find_in_search):
continue
matches.add(id_)
if add_if_nothing_matches:
matches.add(id_)
return matches
def _matchkind(self, query):
matchkind = CONTAINS_MATCH
if (len(query) > 1):
if query.startswith('\\'):
query = query[1:]
elif query.startswith('='):
matchkind = EQUALS_MATCH
query = query[1:]
elif query.startswith('~'):
matchkind = REGEXP_MATCH
query = query[1:]
if matchkind != REGEXP_MATCH:
# leave case in regexps because it can be significant e.g. \S \W \D
query = icu_lower(query)
return matchkind, query
local_no = icu_lower(_('no'))
local_yes = icu_lower(_('yes'))
local_unchecked = icu_lower(_('unchecked'))
local_checked = icu_lower(_('checked'))
local_empty = icu_lower(_('empty'))
local_blank = icu_lower(_('blank'))
local_bool_values = (
local_no, local_unchecked, '_no', 'false',
local_yes, local_checked, '_yes', 'true',
local_empty, local_blank, '_empty')
def get_bool_matches(self, location, query, candidates):
bools_are_tristate = self.db_prefs.get('bools_are_tristate')
loc = self.field_metadata[location]['rec_index']
matches = set()
query = icu_lower(query)
if query not in self.local_bool_values:
raise ParseException(_('Invalid boolean query "{0}"').format(query))
for id_ in candidates:
item = self._data[id_]
if item is None:
continue
val = force_to_bool(item[loc])
if not bools_are_tristate:
if val is None or not val: # item is None or set to false
if query in (self.local_no, self.local_unchecked, '_no', 'false'):
matches.add(item[0])
else: # item is explicitly set to true
if query in (self.local_yes, self.local_checked, '_yes', 'true'):
matches.add(item[0])
else:
if val is None:
if query in (self.local_empty, self.local_blank, '_empty', 'false'):
matches.add(item[0])
elif not val: # is not None and false
if query in (self.local_no, self.local_unchecked, '_no', 'true'):
matches.add(item[0])
else: # item is not None and true
if query in (self.local_yes, self.local_checked, '_yes', 'true'):
matches.add(item[0])
return matches
def get_matches(self, location, query, candidates=None,
allow_recursion=True):
# If candidates is not None, it must not be modified. Changing its
# value will break query optimization in the search parser
matches = set()
if candidates is None:
candidates = self.universal_set()
if len(candidates) == 0:
return matches
if location not in self.all_search_locations:
return matches
if len(location) > 2 and location.startswith('@') and \
location[1:] in self.db_prefs['grouped_search_terms']:
location = location[1:]
if query and query.strip():
# get metadata key associated with the search term. Eliminates
# dealing with plurals and other aliases
original_location = location
location = self.field_metadata.search_term_to_field_key(icu_lower(location.strip()))
# grouped search terms
if isinstance(location, list):
if allow_recursion:
if query.lower() == 'false':
invert = True
query = 'true'
else:
invert = False
for loc in location:
c = candidates.copy()
m = self.get_matches(loc, query,
candidates=c, allow_recursion=False)
matches |= m
c -= m
if len(c) == 0:
break
if invert:
matches = self.universal_set() - matches
return matches
raise ParseException(_('Recursive query group detected: {0}').format(query))
# apply the limit if appropriate
if location == 'all' and prefs['limit_search_columns'] and \
prefs['limit_search_columns_to']:
terms = set()
for l in prefs['limit_search_columns_to']:
l = icu_lower(l.strip())
if l and l != 'all' and l in self.all_search_locations:
terms.add(l)
if terms:
c = candidates.copy()
for l in terms:
try:
m = self.get_matches(l, query,
candidates=c, allow_recursion=allow_recursion)
matches |= m
c -= m
if len(c) == 0:
break
except:
pass
return matches
if location in self.field_metadata:
fm = self.field_metadata[location]
# take care of dates special case
if fm['datatype'] == 'datetime' or \
(fm['datatype'] == 'composite' and
fm['display'].get('composite_sort', '') == 'date'):
return self.get_dates_matches(location, query.lower(), candidates)
# take care of numbers special case
if fm['datatype'] in ('rating', 'int', 'float') or \
(fm['datatype'] == 'composite' and
fm['display'].get('composite_sort', '') == 'number'):
return self.get_numeric_matches(location, query.lower(), candidates)
if fm['datatype'] == 'bool':
return self.get_bool_matches(location, query, candidates)
# take care of the 'count' operator for is_multiples
if fm['is_multiple'] and \
len(query) > 1 and query.startswith('#') and \
query[1:1] in '=<>!':
def vf(item, loc=fm['rec_index'], ms=fm['is_multiple']['cache_to_list']):
return (len(item[loc].split(ms)) if item[loc] is not None else 0)
return self.get_numeric_matches(location, query[1:],
candidates, val_func=vf)
# special case: colon-separated fields such as identifiers. isbn
# is a special case within the case
if fm.get('is_csp', False):
if location == 'identifiers' and original_location == 'isbn':
return self.get_keypair_matches('identifiers',
'=isbn:'+query, candidates)
return self.get_keypair_matches(location, query, candidates)
# check for user categories
if len(location) >= 2 and location.startswith('@'):
return self.get_user_category_matches(location[1:], query.lower(),
candidates)
# everything else, or 'all' matches
matchkind, query = self._matchkind(query)
if not isinstance(query, str):
query = query.decode('utf-8')
db_col = {}
exclude_fields = [] # fields to not check when matching against text.
col_datatype = []
is_multiple_cols = {}
for x in range(len(self.FIELD_MAP)):
col_datatype.append('')
for x in self.field_metadata:
if x.startswith('@'):
continue
if len(self.field_metadata[x]['search_terms']):
db_col[x] = self.field_metadata[x]['rec_index']
if self.field_metadata[x]['datatype'] not in \
['composite', 'text', 'comments', 'series', 'enumeration']:
exclude_fields.append(db_col[x])
col_datatype[db_col[x]] = self.field_metadata[x]['datatype']
is_multiple_cols[db_col[x]] = \
self.field_metadata[x]['is_multiple'].get('cache_to_list', None)
try:
rating_query = int(query) * 2
except:
rating_query = None
location = [location] if location != 'all' else list(db_col.keys())
for i, loc in enumerate(location):
location[i] = db_col[loc]
current_candidates = candidates.copy()
for loc in location: # location is now an array of field indices
if loc == db_col['authors']:
# DB stores authors with commas changed to bars, so change query
if matchkind == REGEXP_MATCH:
q = query.replace(',', r'\|')
else:
q = query.replace(',', '|')
elif loc == db_col['languages']:
q = canonicalize_lang(query)
if q is None:
lm = lang_map()
rm = {v.lower():k for k,v in iteritems(lm)}
q = rm.get(query, query)
else:
q = query
for id_ in current_candidates:
item = self._data[id_]
if item is None:
continue
if not item[loc]:
if q == 'false' and matchkind == CONTAINS_MATCH:
matches.add(item[0])
continue # item is empty. No possible matches below
if q == 'false'and matchkind == CONTAINS_MATCH:
# Field has something in it, so a false query does not match
continue
if q == 'true' and matchkind == CONTAINS_MATCH:
if isinstance(item[loc], string_or_bytes):
if item[loc].strip() == '':
continue
matches.add(item[0])
continue
if col_datatype[loc] == 'rating': # get here if 'all' query
if rating_query and rating_query == int(item[loc]):
matches.add(item[0])
continue
try: # a conversion below might fail
# relationals are not supported in 'all' queries
if col_datatype[loc] == 'float':
if float(query) == item[loc]:
matches.add(item[0])
continue
if col_datatype[loc] == 'int':
if int(query) == item[loc]:
matches.add(item[0])
continue
except:
# A conversion threw an exception. Because of the type,
# no further match is possible
continue
if loc not in exclude_fields: # time for text matching
if is_multiple_cols[loc] is not None:
vals = [v.strip() for v in item[loc].split(is_multiple_cols[loc])]
else:
vals = [item[loc]] # make into list to make _match happy
if _match(q, vals, matchkind,
use_primary_find_in_search=pref_use_primary_find_in_search):
matches.add(item[0])
continue
current_candidates -= matches
return matches
def search(self, query, return_matches=False, sort_results=True):
ans = self.search_getting_ids(query, self.search_restriction,
set_restriction_count=True, sort_results=sort_results)
if return_matches:
return ans
self._map_filtered = ans
def _build_restriction_string(self, restriction):
if self.base_restriction:
if restriction:
return f'({self.base_restriction}) and ({restriction})'
else:
return self.base_restriction
else:
return restriction
def search_getting_ids(self, query, search_restriction,
set_restriction_count=False, use_virtual_library=True, sort_results=True):
if use_virtual_library:
search_restriction = self._build_restriction_string(search_restriction)
q = ''
if not query or not query.strip():
q = search_restriction
else:
q = query
if search_restriction:
q = f'({search_restriction}) and ({query})'
if not q:
if set_restriction_count:
self.search_restriction_book_count = len(self._map)
return list(self._map)
matches = self.parse(q)
tmap = list(repeat(False, len(self._data)))
for x in matches:
tmap[x] = True
rv = [x for x in self._map if tmap[x]]
if set_restriction_count and q == search_restriction:
self.search_restriction_book_count = len(rv)
return rv
def get_search_restriction(self):
return self.search_restriction
def set_search_restriction(self, s):
self.search_restriction = s
def get_base_restriction(self):
return self.base_restriction
def set_base_restriction(self, s):
self.base_restriction = s
def get_base_restriction_name(self):
return self.base_restriction_name
def set_base_restriction_name(self, s):
self.base_restriction_name = s
def get_search_restriction_name(self):
return self.search_restriction_name
def set_search_restriction_name(self, s):
self.search_restriction_name = s
def search_restriction_applied(self):
return bool(self.search_restriction) or bool(self.base_restriction)
def get_search_restriction_book_count(self):
return self.search_restriction_book_count
def set_marked_ids(self, id_dict):
'''
ids in id_dict are "marked". They can be searched for by
using the search term ``marked:true`` or ``marked:value``.
Pass in an empty dictionary or set to clear marked ids.
:param id_dict: Either a dictionary mapping ids to values or a set
of ids. If a mapping is provided, then the search can be used to search
for particular values: ``marked:value``
'''
if not hasattr(id_dict, 'items'):
# Simple list. Make it a dict of string 'true'
self.marked_ids_dict = dict.fromkeys(id_dict, 'true')
else:
# Ensure that all the items in the dict are text
self.marked_ids_dict = dict(zip(iter(id_dict), map(str,
itervalues(id_dict))))
# Set the values in the cache
marked_col = self.FIELD_MAP['marked']
in_tag_browser_col = self.FIELD_MAP['in_tag_browser']
for r in self.iterall():
r[marked_col] = r[in_tag_browser_col] = None
for id_, val in self.marked_ids_dict.items():
with suppress(Exception):
self._data[id_][marked_col] = val
def get_marked(self, idx, index_is_id=True, default_value=None):
id_ = idx if index_is_id else self[idx][0]
return self.marked_ids_dict.get(id_, default_value)
# }}}
def remove(self, id):
try:
self._uuid_map.pop(self._data[id][self._uuid_column_index], None)
except (IndexError, TypeError):
pass # id is out of bounds -- no uuid in the map to remove
try:
self._data[id] = None
except IndexError:
pass # id is out of bounds, no point setting it to None anyway
try:
self._map.remove(id)
except ValueError:
pass
try:
self._map_filtered.remove(id)
except ValueError:
pass
def set(self, row, col, val, row_is_id=False):
id = row if row_is_id else self._map_filtered[row]
d = self._data[id]
if col == self._uuid_column_index:
self._uuid_map.pop(d[col], None)
d[col] = val
if col == self._uuid_column_index:
self._uuid_map[val] = id
d.refresh_composites()
def get(self, row, col, row_is_id=False):
id = row if row_is_id else self._map_filtered[row]
return self._data[id][col]
def index(self, id, cache=False):
x = self._map if cache else self._map_filtered
return x.index(id)
def row(self, id):
return self.index(id)
def has_id(self, id):
try:
return self._data[id] is not None
except IndexError:
pass
return False
def refresh_ids(self, db, ids):
'''
Refresh the data in the cache for books identified by ids.
Returns a list of affected rows or None if the rows are filtered.
'''
for id in ids:
try:
self._data[id] = CacheRow(db, self.composites, self.datetimes,
db.conn.get('SELECT * from meta2 WHERE id=?', (id,))[0],
self.series_col, self.series_sort_col)
self._data[id].append(db.book_on_device_string(id))
self._data[id].append(self.marked_ids_dict.get(id, None))
self._data[id].append(None)
self._uuid_map[self._data[id][self._uuid_column_index]] = id
except IndexError:
return None
try:
return list(map(self.row, ids))
except ValueError:
pass
return None
def books_added(self, ids, db):
if not ids:
return
self._data.extend(repeat(None, max(ids)-len(self._data)+2))
for id in ids:
self._data[id] = CacheRow(db, self.composites, self.datetimes,
db.conn.get('SELECT * from meta2 WHERE id=?', (id,))[0],
self.series_col, self.series_sort_col)
self._data[id].append(db.book_on_device_string(id))
self._data[id].append(self.marked_ids_dict.get(id, None))
self._data[id].append(None) # Series sort column
self._uuid_map[self._data[id][self._uuid_column_index]] = id
self._map[0:0] = ids
self._map_filtered[0:0] = ids
def books_deleted(self, ids):
for id in ids:
self.remove(id)
def count(self):
return len(self._map)
def refresh_ondevice(self, db):
ondevice_col = self.FIELD_MAP['ondevice']
for item in self._data:
if item is not None:
item[ondevice_col] = db.book_on_device_string(item[0])
item.refresh_composites()
def refresh(self, db, field=None, ascending=True):
# reinitialize the template cache in case a composite column has changed
db.initialize_template_cache()
temp = db.conn.get('SELECT * FROM meta2')
self._data = list(repeat(None, temp[-1][0]+2)) if temp else []
for r in temp:
self._data[r[0]] = CacheRow(db, self.composites, self.datetimes, r,
self.series_col, self.series_sort_col)
self._uuid_map[self._data[r[0]][self._uuid_column_index]] = r[0]
for item in self._data:
if item is not None:
item.append(db.book_on_device_string(item[0]))
# Temp mark and series_sort columns
item.extend((None, None, None))
marked_col = self.FIELD_MAP['marked']
for id_,val in iteritems(self.marked_ids_dict):
try:
self._data[id_][marked_col] = val
except:
pass
in_tag_browser_col = self.FIELD_MAP['in_tag_browser']
for r in self.iterall():
r[in_tag_browser_col] = None
self._map = [i[0] for i in self._data if i is not None]
if field is not None:
self.sort(field, ascending)
self._map_filtered = list(self._map)
if self.search_restriction or self.base_restriction:
self.search('', return_matches=False)
# Sorting functions {{{
def sanitize_sort_field_name(self, field):
field = self.field_metadata.search_term_to_field_key(field.lower().strip())
# translate some fields to their hidden equivalent
if field == 'title':
field = 'sort'
elif field == 'authors':
field = 'author_sort'
return field
def sort(self, field, ascending, subsort=False):
self.multisort([(field, ascending)])
def multisort(self, fields=[], subsort=False, only_ids=None):
'''
fields is a list of 2-tuple, each tuple is of the form
(field_name, is_ascending)
If only_ids is a list of ids, this function will sort that list instead
of the internal mapping of ids.
'''
fields = [(self.sanitize_sort_field_name(x), bool(y)) for x, y in fields]
keys = self.field_metadata.sortable_field_keys()
fields = [x for x in fields if x[0] in keys]
if subsort and 'sort' not in [x[0] for x in fields]:
fields += [('sort', True)]
if not fields:
fields = [('timestamp', False)]
keyg = SortKeyGenerator(fields, self.field_metadata, self._data, self.db_prefs)
if only_ids is None:
self._map.sort(key=keyg)
tmap = list(repeat(False, len(self._data)))
for x in self._map_filtered:
tmap[x] = True
self._map_filtered = [x for x in self._map if tmap[x]]
else:
only_ids.sort(key=keyg)
class SortKey:
def __init__(self, orders, values):
self.orders, self.values = orders, values
def compare_to_other(self, other):
for i, ascending in enumerate(self.orders):
ans = cmp(self.values[i], other.values[i])
if ans != 0:
return ans * ascending
return 0
def __eq__(self, other):
return self.compare_to_other(other) == 0
def __ne__(self, other):
return self.compare_to_other(other) != 0
def __lt__(self, other):
return self.compare_to_other(other) < 0
def __le__(self, other):
return self.compare_to_other(other) <= 0
def __gt__(self, other):
return self.compare_to_other(other) > 0
def __ge__(self, other):
return self.compare_to_other(other) >= 0
class SortKeyGenerator:
def __init__(self, fields, field_metadata, data, db_prefs):
from calibre.utils.icu import sort_key
self.field_metadata = field_metadata
self.db_prefs = db_prefs
self.orders = [1 if x[1] else -1 for x in fields]
self.entries = [(x[0], field_metadata[x[0]]) for x in fields]
self.library_order = tweaks['title_series_sorting'] == 'library_order'
self.data = data
self.string_sort_key = sort_key
self.lang_idx = field_metadata['languages']['rec_index']
def __call__(self, record):
values = tuple(self.itervals(self.data[record]))
return SortKey(self.orders, values)
def itervals(self, record):
for name, fm in self.entries:
dt = fm['datatype']
val = record[fm['rec_index']]
if dt == 'composite':
sb = fm['display'].get('composite_sort', 'text')
if sb == 'date':
try:
val = parse_date(val)
except:
val = UNDEFINED_DATE
dt = 'datetime'
elif sb == 'number':
try:
p = 1
for i, candidate in enumerate(
('B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB')):
if val.endswith(candidate):
p = 1024**(i)
val = val[:-len(candidate)].strip()
break
val = locale.atof(val) * p
except:
val = 0.0
dt = 'float'
elif sb == 'bool':
val = force_to_bool(val)
dt = 'bool'
if dt == 'datetime':
if val is None:
val = UNDEFINED_DATE
if tweaks['sort_dates_using_visible_fields']:
format = None
if name == 'timestamp':
format = tweaks['gui_timestamp_display_format']
elif name == 'pubdate':
format = tweaks['gui_pubdate_display_format']
elif name == 'last_modified':
format = tweaks['gui_last_modified_display_format']
elif fm['is_custom']:
format = fm['display'].get('date_format', None)
val = clean_date_for_sort(val, format)
elif dt == 'series':
if val is None:
val = ('', 1)
else:
if self.library_order:
try:
lang = record[self.lang_idx].partition(',')[0]
except (AttributeError, ValueError, KeyError,
IndexError, TypeError):
lang = None
val = title_sort(val, order='library_order', lang=lang)
sidx_fm = self.field_metadata[name + '_index']
sidx = record[sidx_fm['rec_index']]
val = (self.string_sort_key(val), sidx)
elif dt in ('text', 'comments', 'composite', 'enumeration'):
if val:
if fm['is_multiple']:
jv = fm['is_multiple']['list_to_ui']
sv = fm['is_multiple']['cache_to_list']
if '&' in jv:
val = jv.join(
[author_to_author_sort(v) for v in val.split(sv)])
else:
val = jv.join(sorted(val.split(sv),
key=self.string_sort_key))
val = self.string_sort_key(val)
elif dt == 'bool':
if not self.db_prefs.get('bools_are_tristate'):
val = {True: 1, False: 2, None: 2}.get(val, 2)
else:
val = {True: 1, False: 2, None: 3}.get(val, 3)
yield val
# }}}
# }}}
| 48,647 | Python | .py | 1,097 | 29.919781 | 122 | 0.507302 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,701 | prefs.py | kovidgoyal_calibre/src/calibre/library/prefs.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import json
import os
from calibre import prints
from calibre.constants import preferred_encoding
from calibre.utils.config import from_json, to_json
from polyglot.builtins import iteritems
class DBPrefs(dict):
def __init__(self, db):
dict.__init__(self)
self.db = db
self.defaults = {}
self.disable_setting = False
for key, val in self.db.conn.get('SELECT key,val FROM preferences'):
try:
val = self.raw_to_object(val)
except:
prints('Failed to read value for:', key, 'from db')
continue
dict.__setitem__(self, key, val)
def raw_to_object(self, raw):
if not isinstance(raw, str):
raw = raw.decode(preferred_encoding)
return json.loads(raw, object_hook=from_json)
def to_raw(self, val):
return json.dumps(val, indent=2, default=to_json)
def has_setting(self, key):
return key in self
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.defaults[key]
def __delitem__(self, key):
dict.__delitem__(self, key)
self.db.conn.execute('DELETE FROM preferences WHERE key=?', (key,))
self.db.conn.commit()
def __setitem__(self, key, val):
if self.disable_setting:
return
raw = self.to_raw(val)
self.db.conn.execute('INSERT OR REPLACE INTO preferences (key,val) VALUES (?,?)', (key,
raw))
self.db.conn.commit()
dict.__setitem__(self, key, val)
def set(self, key, val):
self.__setitem__(key, val)
def get_namespaced(self, namespace, key, default=None):
key = 'namespaced:%s:%s'%(namespace, key)
try:
return dict.__getitem__(self, key)
except KeyError:
return default
def set_namespaced(self, namespace, key, val):
if ':' in key:
raise KeyError('Colons are not allowed in keys')
if ':' in namespace:
raise KeyError('Colons are not allowed in'
' the namespace')
key = 'namespaced:%s:%s'%(namespace, key)
self[key] = val
def write_serialized(self, library_path):
try:
to_filename = os.path.join(library_path, 'metadata_db_prefs_backup.json')
data = json.dumps(self, indent=2, default=to_json)
if not isinstance(data, bytes):
data = data.encode('utf-8')
with open(to_filename, "wb") as f:
f.write(data)
except:
import traceback
traceback.print_exc()
@classmethod
def read_serialized(cls, library_path, recreate_prefs=False):
try:
from_filename = os.path.join(library_path,
'metadata_db_prefs_backup.json')
with open(from_filename, "rb") as f:
d = json.load(f, object_hook=from_json)
if not recreate_prefs:
return d
cls.clear()
cls.db.conn.execute('DELETE FROM preferences')
for k,v in iteritems(d):
raw = cls.to_raw(v)
cls.db.conn.execute(
'INSERT INTO preferences (key,val) VALUES (?,?)', (k, raw))
cls.db.conn.commit()
cls.clear()
cls.update(d)
return d
except:
import traceback
traceback.print_exc()
raise
return None
| 3,740 | Python | .py | 99 | 27.222222 | 95 | 0.556445 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,702 | comments.py | kovidgoyal_calibre/src/calibre/library/comments.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2010, Kovid Goyal <kovid at kovidgoyal.net>
import re
from calibre import prepare_string_for_xml
from calibre.constants import preferred_encoding
from calibre.ebooks.BeautifulSoup import BeautifulSoup, CData, Comment, Declaration, NavigableString, ProcessingInstruction
from calibre.utils.html2text import html2text
# Hackish - ignoring sentences ending or beginning in numbers to avoid
# confusion with decimal points.
lost_cr_pat = re.compile('([a-z])([\\.\\?!])([A-Z])')
lost_cr_exception_pat = re.compile(r'(Ph\.D)|(D\.Phil)|((Dr|Mr|Mrs|Ms)\.[A-Z])')
sanitize_pat = re.compile(r'<script|<table|<tr|<td|<th|<style|<iframe',
re.IGNORECASE)
def comments_to_html(comments):
'''
Convert random comment text to normalized, xml-legal block of <p>s
'plain text' returns as
<p>plain text</p>
'plain text with <i>minimal</i> <b>markup</b>' returns as
<p>plain text with <i>minimal</i> <b>markup</b></p>
'<p>pre-formatted text</p> returns untouched
'A line of text\n\nFollowed by a line of text' returns as
<p>A line of text</p>
<p>Followed by a line of text</p>
'A line of text.\nA second line of text.\rA third line of text' returns as
<p>A line of text.<br />A second line of text.<br />A third line of text.</p>
'...end of a paragraph.Somehow the break was lost...' returns as
<p>...end of a paragraph.</p>
<p>Somehow the break was lost...</p>
Deprecated HTML returns as HTML via BeautifulSoup()
'''
if not comments:
return '<p></p>'
if not isinstance(comments, str):
comments = comments.decode(preferred_encoding, 'replace')
if comments.lstrip().startswith('<'):
# Comment is already HTML do not mess with it
return comments
if '<' not in comments:
comments = prepare_string_for_xml(comments)
parts = ['<p class="description">%s</p>'%x.replace('\n', '<br />')
for x in comments.split('\n\n')]
return '\n'.join(parts)
if sanitize_pat.search(comments) is not None:
try:
return sanitize_comments_html(comments)
except:
import traceback
traceback.print_exc()
return '<p></p>'
# Explode lost CRs to \n\n
comments = lost_cr_exception_pat.sub(lambda m: m.group().replace('.',
'.\r'), comments)
for lost_cr in lost_cr_pat.finditer(comments):
comments = comments.replace(lost_cr.group(),
'{}{}\n\n{}'.format(lost_cr.group(1),
lost_cr.group(2),
lost_cr.group(3)))
comments = comments.replace('\r', '')
# Convert \n\n to <p>s
comments = comments.replace('\n\n', '<p>')
# Convert solo returns to <br />
comments = comments.replace('\n', '<br />')
# Convert two hyphens to emdash
comments = comments.replace('--', '—')
soup = BeautifulSoup('<div>' + comments + '</div>').find('div')
result = BeautifulSoup('<div>')
container = result.find('div')
rtc = 0
open_pTag = False
all_tokens = list(soup.contents)
inline_tags = ('br', 'b', 'i', 'em', 'strong', 'span', 'font', 'a', 'hr')
for token in all_tokens:
if isinstance(token, (CData, Comment, Declaration, ProcessingInstruction)):
continue
if isinstance(token, NavigableString):
if not open_pTag:
pTag = result.new_tag('p')
open_pTag = True
ptc = 0
pTag.insert(ptc, token)
ptc += 1
elif token.name in inline_tags:
if not open_pTag:
pTag = result.new_tag('p')
open_pTag = True
ptc = 0
pTag.insert(ptc, token)
ptc += 1
else:
if open_pTag:
container.insert(rtc, pTag)
rtc += 1
open_pTag = False
ptc = 0
container.insert(rtc, token)
rtc += 1
if open_pTag:
container.insert(rtc, pTag)
for p in container.findAll('p'):
p['class'] = 'description'
return container.decode_contents()
def markdown(val):
try:
md = markdown.Markdown
except AttributeError:
from calibre.ebooks.markdown import Markdown
md = markdown.Markdown = Markdown()
val = md.convert(val)
# The Qt Rich text widgets display <p><br></p> as two blank lines instead
# of one so fix that here.
return re.sub(r'<p(|\s+[^>]*?)>\s*<br\s*/?>\s*</p>','<p\\1>\xa0</p>', val)
def merge_comments(one, two):
return comments_to_html(one) + '\n\n' + comments_to_html(two)
def sanitize_comments_html(html):
from calibre.ebooks.markdown import Markdown
text = html2text(html, single_line_break=False)
md = Markdown()
html = md.convert(text)
return html
def find_tests():
import unittest
class Test(unittest.TestCase):
def test_comments_to_html(self):
for pat, val in [
(b'lineone\n\nlinetwo',
'<p class="description">lineone</p>\n<p class="description">linetwo</p>'),
('a <b>b&c</b>\nf',
'<p class="description">a <b>b&c</b><br/>f</p>'),
('a <?xml asd> b\n\ncd',
'<p class="description">a b</p><p class="description">cd</p>'),
]:
try:
cval = comments_to_html(pat)
except DeprecationWarning:
pass # new lxml + old Beautiful soup == deprecation warning
else:
self.assertEqual(cval, val)
return unittest.defaultTestLoader.loadTestsFromTestCase(Test)
| 5,878 | Python | .py | 139 | 32.71223 | 123 | 0.576363 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,703 | __init__.py | kovidgoyal_calibre/src/calibre/library/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
''' Code to manage ebook library'''
import os
def db(path=None, read_only=False):
from calibre.db.legacy import LibraryDatabase
from calibre.utils.config import prefs
return LibraryDatabase(os.path.expanduser(path) if path else prefs['library_path'],
read_only=read_only)
def generate_test_db(library_path, # {{{
num_of_records=20000,
num_of_authors=6000,
num_of_tags=10000,
tag_length=7,
author_length=7,
title_length=10,
max_authors=10,
max_tags=10
):
import os
import random
import string
import sys
import time
from calibre.constants import preferred_encoding
if not os.path.exists(library_path):
os.makedirs(library_path)
letters = string.letters.decode(preferred_encoding)
def randstr(length):
return ''.join(random.choice(letters) for i in
range(length))
all_tags = [randstr(tag_length) for j in range(num_of_tags)]
print('Generated', num_of_tags, 'tags')
all_authors = [randstr(author_length) for j in range(num_of_authors)]
print('Generated', num_of_authors, 'authors')
all_titles = [randstr(title_length) for j in range(num_of_records)]
print('Generated', num_of_records, 'titles')
testdb = db(library_path)
print('Creating', num_of_records, 'records...')
start = time.time()
for i, title in enumerate(all_titles):
print(i+1, end=' ')
sys.stdout.flush()
authors = random.randint(1, max_authors)
authors = [random.choice(all_authors) for i in range(authors)]
tags = random.randint(0, max_tags)
tags = [random.choice(all_tags) for i in range(tags)]
from calibre.ebooks.metadata.book.base import Metadata
mi = Metadata(title, authors)
mi.tags = tags
testdb.import_book(mi, [])
t = time.time() - start
print('\nGenerated', num_of_records, 'records in:', t, 'seconds')
print('Time per record:', t/num_of_records)
# }}}
def current_library_path():
from calibre.utils.config import prefs
path = prefs['library_path']
if path:
path = path.replace('\\', '/')
while path.endswith('/'):
path = path[:-1]
return path
def current_library_name():
import posixpath
path = current_library_path()
if path:
return posixpath.basename(path)
| 2,504 | Python | .py | 68 | 30.161765 | 87 | 0.64265 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,704 | database2.py | kovidgoyal_calibre/src/calibre/library/database2.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
The database used to store ebook metadata
'''
import copy
import functools
import glob
import hashlib
import json
import numbers
import os
import random
import re
import shutil
import sys
import threading
import time
import traceback
import uuid
from collections import defaultdict, namedtuple
from calibre import force_unicode, isbytestring, prints
from calibre.constants import filesystem_encoding, iswindows, preferred_encoding
from calibre.customize.ui import run_plugins_on_import, run_plugins_on_postimport
from calibre.db import _get_next_series_num_for_list, _get_series_values, get_data_as_dict
from calibre.db.adding import find_books_in_directory, import_book_directory, import_book_directory_multiple, recursive_import
from calibre.db.categories import CATEGORY_SORTS, Tag
from calibre.db.errors import NoSuchFormat
from calibre.db.lazy import FormatMetadata, FormatsList
from calibre.ebooks import check_ebook_format
from calibre.ebooks.metadata import author_to_author_sort, get_title_sort_pat, string_to_authors, title_sort
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.opf2 import metadata_to_opf
from calibre.library.caches import ResultCache
from calibre.library.custom_columns import CustomColumns
from calibre.library.database import LibraryDatabase
from calibre.library.field_metadata import FieldMetadata
from calibre.library.prefs import DBPrefs
from calibre.library.schema_upgrades import SchemaUpgrade
from calibre.library.sqlite import IntegrityError, connect
from calibre.ptempfile import PersistentTemporaryFile, SpooledTemporaryFile, base_dir
from calibre.utils.config import from_json, prefs, to_json, tweaks
from calibre.utils.date import UNDEFINED_DATE, parse_date, parse_only_date, utcfromtimestamp, utcnow
from calibre.utils.date import now as nowf
from calibre.utils.filenames import WindowsAtomicFolderMove, ascii_filename, hardlink_file, samefile
from calibre.utils.formatter_functions import load_user_template_functions
from calibre.utils.icu import lower, sort_key, strcmp
from calibre.utils.icu import lower as icu_lower
from calibre.utils.img import save_cover_data_to
from calibre.utils.localization import _, calibre_langcode_to_name, canonicalize_lang
from calibre.utils.recycle_bin import delete_file, delete_tree
from calibre.utils.resources import get_path as P
from calibre.utils.search_query_parser import saved_searches, set_saved_searches
from polyglot.builtins import iteritems, string_or_bytes
copyfile = os.link if hasattr(os, 'link') else shutil.copyfile
SPOOL_SIZE = 30*1024*1024
ProxyMetadata = namedtuple('ProxyMetadata', 'book_size ondevice_col db_approx_formats')
class DBPrefsWrapper:
def __init__(self, db):
self.db = db
self.new_api = self
def pref(self, name, default=None):
return self.db.prefs.get(name, default)
def set_pref(self, name, val):
self.db.prefs[name] = val
class LibraryDatabase2(LibraryDatabase, SchemaUpgrade, CustomColumns):
'''
An ebook metadata database that stores references to ebook files on disk.
'''
PATH_LIMIT = 40 if 'win32' in sys.platform else 100
WINDOWS_LIBRARY_PATH_LIMIT = 75
@property
def user_version(self):
'The user version of this database'
return self.conn.get('pragma user_version;', all=False)
@user_version.setter
def user_version(self, val):
self.conn.execute('pragma user_version=%d'%int(val))
self.conn.commit()
@property
def library_id(self):
'''The UUID for this library. As long as the user only operates on libraries with calibre, it will be unique'''
if self._library_id_ is None:
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
if ans is None:
ans = str(uuid.uuid4())
self.library_id = ans
else:
self._library_id_ = ans
return self._library_id_
@library_id.setter
def library_id(self, val):
self._library_id_ = str(val)
self.conn.executescript('''
DELETE FROM library_id;
INSERT INTO library_id (uuid) VALUES ("%s");
'''%self._library_id_)
self.conn.commit()
def connect(self):
if iswindows and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%(259-4*self.PATH_LIMIT-10))
exists = os.path.exists(self.dbpath)
if not exists:
# Be more strict when creating new libraries as the old calculation
# allowed for max path lengths of 265 chars.
if (iswindows and len(self.library_path) >
self.WINDOWS_LIBRARY_PATH_LIMIT):
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%self.WINDOWS_LIBRARY_PATH_LIMIT)
self.conn = connect(self.dbpath, self.row_factory)
if exists and self.user_version == 0:
self.conn.close()
os.remove(self.dbpath)
self.conn = connect(self.dbpath, self.row_factory)
if self.user_version == 0:
self.initialize_database()
# remember to add any filter to the connect method in sqlite.py as well
# so that various code that connects directly will not complain about
# missing functions
self.books_list_filter = self.conn.create_dynamic_filter('books_list_filter')
# Store temporary tables in memory
self.conn.execute('pragma temp_store=2')
self.conn.commit()
@classmethod
def exists_at(cls, path):
return path and os.path.exists(os.path.join(path, 'metadata.db'))
get_data_as_dict = get_data_as_dict
def __init__(self, library_path, row_factory=False, default_prefs=None,
read_only=False, is_second_db=False, progress_callback=None,
restore_all_prefs=False):
self.is_second_db = is_second_db
try:
if isbytestring(library_path):
library_path = library_path.decode(filesystem_encoding)
except:
traceback.print_exc()
self.field_metadata = FieldMetadata()
self.format_filename_cache = defaultdict(dict)
self._library_id_ = None
# Create the lock to be used to guard access to the metadata writer
# queues. This must be an RLock, not a Lock
self.dirtied_lock = threading.RLock()
if not os.path.exists(library_path):
os.makedirs(library_path)
self.listeners = set()
self.library_path = os.path.abspath(library_path)
self.row_factory = row_factory
self.dbpath = os.path.join(library_path, 'metadata.db')
self.dbpath = os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH',
self.dbpath)
if read_only and os.path.exists(self.dbpath):
# Work on only a copy of metadata.db to ensure that
# metadata.db is not changed
pt = PersistentTemporaryFile('_metadata_ro.db')
pt.close()
shutil.copyfile(self.dbpath, pt.name)
self.dbpath = pt.name
apply_default_prefs = not os.path.exists(self.dbpath)
self.connect()
self.is_case_sensitive = (not iswindows and
not os.path.exists(self.dbpath.replace('metadata.db',
'MeTAdAtA.dB')))
SchemaUpgrade.__init__(self)
# Guarantee that the library_id is set
self.library_id
# if we are to copy the prefs and structure from some other DB, then
# we need to do it before we call initialize_dynamic
if apply_default_prefs and default_prefs is not None:
if progress_callback is None:
def progress_callback(x, y):
return True
dbprefs = DBPrefs(self)
progress_callback(None, len(default_prefs))
for i, key in enumerate(default_prefs):
# be sure that prefs not to be copied are listed below
if not restore_all_prefs and key in frozenset(['news_to_be_synced']):
continue
dbprefs[key] = default_prefs[key]
progress_callback(_('restored preference ') + key, i+1)
if 'field_metadata' in default_prefs:
fmvals = [f for f in default_prefs['field_metadata'].values() if f['is_custom']]
progress_callback(None, len(fmvals))
for i, f in enumerate(fmvals):
progress_callback(_('creating custom column ') + f['label'], i)
self.create_custom_column(f['label'], f['name'], f['datatype'],
f['is_multiple'] is not None and len(f['is_multiple']) > 0,
f['is_editable'], f['display'])
self.initialize_template_cache()
self.initialize_dynamic()
def initialize_template_cache(self):
self.formatter_template_cache = {}
def get_property(self, idx, index_is_id=False, loc=-1):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
return row[loc]
def initialize_dynamic(self):
self.field_metadata = FieldMetadata() # Ensure we start with a clean copy
self.prefs = DBPrefs(self)
defs = self.prefs.defaults
defs['gui_restriction'] = defs['cs_restriction'] = ''
defs['categories_using_hierarchy'] = []
defs['column_color_rules'] = []
defs['column_icon_rules'] = []
defs['grouped_search_make_user_categories'] = []
defs['similar_authors_search_key'] = 'authors'
defs['similar_authors_match_kind'] = 'match_any'
defs['similar_publisher_search_key'] = 'publisher'
defs['similar_publisher_match_kind'] = 'match_any'
defs['similar_tags_search_key'] = 'tags'
defs['similar_tags_match_kind'] = 'match_all'
defs['similar_series_search_key'] = 'series'
defs['similar_series_match_kind'] = 'match_any'
defs['book_display_fields'] = [
('title', False), ('authors', True), ('formats', True),
('series', True), ('identifiers', True), ('tags', True),
('path', True), ('publisher', False), ('rating', False),
('author_sort', False), ('sort', False), ('timestamp', False),
('uuid', False), ('comments', True), ('id', False), ('pubdate', False),
('last_modified', False), ('size', False), ('languages', False),
]
defs['virtual_libraries'] = {}
defs['virtual_lib_on_startup'] = defs['cs_virtual_lib_on_startup'] = ''
defs['virt_libs_hidden'] = defs['virt_libs_order'] = ()
# Migrate the bool tristate tweak
defs['bools_are_tristate'] = \
tweaks.get('bool_custom_columns_are_tristate', 'yes') == 'yes'
if self.prefs.get('bools_are_tristate') is None:
self.prefs.set('bools_are_tristate', defs['bools_are_tristate'])
# Migrate column coloring rules
if self.prefs.get('column_color_name_1', None) is not None:
from calibre.library.coloring import migrate_old_rule
old_rules = []
for i in range(1, 6):
col = self.prefs.get('column_color_name_'+str(i), None)
templ = self.prefs.get('column_color_template_'+str(i), None)
if col and templ:
try:
del self.prefs['column_color_name_'+str(i)]
rules = migrate_old_rule(self.field_metadata, templ)
for templ in rules:
old_rules.append((col, templ))
except:
pass
if old_rules:
self.prefs['column_color_rules'] += old_rules
# Migrate saved search and user categories to db preference scheme
def migrate_preference(key, default):
oldval = prefs[key]
if oldval != default:
self.prefs[key] = oldval
prefs[key] = default
if key not in self.prefs:
self.prefs[key] = default
migrate_preference('user_categories', {})
migrate_preference('saved_searches', {})
if not self.is_second_db:
set_saved_searches(DBPrefsWrapper(self), 'saved_searches')
# migrate grouped_search_terms
if self.prefs.get('grouped_search_terms', None) is None:
try:
ogst = tweaks.get('grouped_search_terms', {})
ngst = {}
for t in ogst:
ngst[icu_lower(t)] = ogst[t]
self.prefs.set('grouped_search_terms', ngst)
except:
pass
# migrate the gui_restriction preference to a virtual library
gr_pref = self.prefs.get('gui_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['gui_restriction'] = ''
self.prefs['virtual_lib_on_startup'] = gr_pref
# migrate the cs_restriction preference to a virtual library
gr_pref = self.prefs.get('cs_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['cs_restriction'] = ''
self.prefs['cs_virtual_lib_on_startup'] = gr_pref
# Rename any user categories with names that differ only in case
user_cats = self.prefs.get('user_categories', [])
catmap = {}
for uc in user_cats:
ucl = icu_lower(uc)
if ucl not in catmap:
catmap[ucl] = []
catmap[ucl].append(uc)
cats_changed = False
for uc in catmap:
if len(catmap[uc]) > 1:
prints('found user category case overlap', catmap[uc])
cat = catmap[uc][0]
suffix = 1
while icu_lower(cat + str(suffix)) in catmap:
suffix += 1
prints('Renaming user category %s to %s'%(cat, cat+str(suffix)))
user_cats[cat + str(suffix)] = user_cats[cat]
del user_cats[cat]
cats_changed = True
if cats_changed:
self.prefs.set('user_categories', user_cats)
if not self.is_second_db:
load_user_template_functions(self.library_id,
self.prefs.get('user_template_functions', []))
# Load the format filename cache
self.refresh_format_cache()
self.conn.executescript('''
DROP TRIGGER IF EXISTS author_insert_trg;
CREATE TEMP TRIGGER author_insert_trg
AFTER INSERT ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name) WHERE id=NEW.id;
END;
DROP TRIGGER IF EXISTS author_update_trg;
CREATE TEMP TRIGGER author_update_trg
BEFORE UPDATE ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name)
WHERE id=NEW.id AND name <> NEW.name;
END;
''')
self.conn.execute(
'UPDATE authors SET sort=author_to_author_sort(name) WHERE sort IS NULL')
self.conn.executescript('''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.executescript('''
CREATE TEMP VIEW IF NOT EXISTS tag_browser_filtered_news AS SELECT DISTINCT
id,
name,
(SELECT COUNT(books_tags_link.id) FROM books_tags_link WHERE tag=x.id and books_list_filter(book)) count,
(0) as avg_rating,
name as sort
FROM tags as x WHERE name!="{0}" AND id IN
(SELECT DISTINCT tag FROM books_tags_link WHERE book IN
(SELECT DISTINCT book FROM books_tags_link WHERE tag IN
(SELECT id FROM tags WHERE name="{0}")));
'''.format(_('News')))
self.conn.commit()
CustomColumns.__init__(self)
template = '''\
(SELECT {query} FROM books_{table}_link AS link INNER JOIN
{table} ON(link.{link_col}={table}.id) WHERE link.book=books.id)
{col}
'''
columns = ['id', 'title',
# col table link_col query
('authors', 'authors', 'author', 'sortconcat(link.id, name)'),
'timestamp',
'(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size',
('rating', 'ratings', 'rating', 'ratings.rating'),
('tags', 'tags', 'tag', 'group_concat(name)'),
'(SELECT text FROM comments WHERE book=books.id) comments',
('series', 'series', 'series', 'name'),
('publisher', 'publishers', 'publisher', 'name'),
'series_index',
'sort',
'author_sort',
'(SELECT group_concat(format) FROM data WHERE data.book=books.id) formats',
'path',
'pubdate',
'uuid',
'has_cover',
('au_map', 'authors', 'author',
'aum_sortconcat(link.id, authors.name, authors.sort, authors.link)'),
'last_modified',
'(SELECT identifiers_concat(type, val) FROM identifiers WHERE identifiers.book=books.id) identifiers',
('languages', 'languages', 'lang_code',
'sortconcat(link.id, languages.lang_code)'),
]
lines = []
for col in columns:
line = col
if isinstance(col, tuple):
line = template.format(col=col[0], table=col[1],
link_col=col[2], query=col[3])
lines.append(line)
custom_map = self.custom_columns_in_meta()
# custom col labels are numbers (the id in the custom_columns table)
custom_cols = sorted(custom_map.keys())
lines.extend([custom_map[x] for x in custom_cols])
self.FIELD_MAP = {'id':0, 'title':1, 'authors':2, 'timestamp':3,
'size':4, 'rating':5, 'tags':6, 'comments':7, 'series':8,
'publisher':9, 'series_index':10, 'sort':11, 'author_sort':12,
'formats':13, 'path':14, 'pubdate':15, 'uuid':16, 'cover':17,
'au_map':18, 'last_modified':19, 'identifiers':20, 'languages':21}
for k,v in iteritems(self.FIELD_MAP):
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(self.FIELD_MAP.values())
for col in custom_cols:
self.FIELD_MAP[col] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label'],
base,
prefer_custom=True)
if self.custom_column_num_map[col]['datatype'] == 'series':
# account for the series index column. Field_metadata knows that
# the series index is one larger than the series. If you change
# it here, be sure to change it there as well.
self.FIELD_MAP[str(col)+'_index'] = base = base+1
self.field_metadata.set_field_record_index(
self.custom_column_num_map[col]['label']+'_index',
base,
prefer_custom=True)
self.FIELD_MAP['ondevice'] = base = base+1
self.field_metadata.set_field_record_index('ondevice', base, prefer_custom=False)
self.FIELD_MAP['marked'] = base = base+1
self.field_metadata.set_field_record_index('marked', base, prefer_custom=False)
self.FIELD_MAP['series_sort'] = base = base+1
self.field_metadata.set_field_record_index('series_sort', base, prefer_custom=False)
self.FIELD_MAP['in_tag_browser'] = base = base+1
self.field_metadata.set_field_record_index('in_tag_browser', base, prefer_custom=False)
script = '''
DROP VIEW IF EXISTS meta2;
CREATE TEMP VIEW meta2 AS
SELECT
{}
FROM books;
'''.format(', \n'.join(lines))
self.conn.executescript(script)
self.conn.commit()
# Reconstruct the user categories, putting them into field_metadata
# Assumption is that someone else will fix them if they change.
self.field_metadata.remove_dynamic_categories()
for user_cat in sorted(self.prefs.get('user_categories', {}).keys(), key=sort_key):
cat_name = '@' + user_cat # add the '@' to avoid name collision
self.field_metadata.add_user_category(label=cat_name, name=user_cat)
# add grouped search term user categories
muc = self.prefs.get('grouped_search_make_user_categories', [])
for cat in sorted(self.prefs.get('grouped_search_terms', {}).keys(), key=sort_key):
if cat in muc:
# There is a chance that these can be duplicates of an existing
# user category. Print the exception and continue.
try:
self.field_metadata.add_user_category(label='@' + cat, name=cat)
except:
traceback.print_exc()
if len(saved_searches().names()):
self.field_metadata.add_search_category(label='search', name=_('Saved searches'))
self.field_metadata.add_grouped_search_terms(
self.prefs.get('grouped_search_terms', {}))
self.book_on_device_func = None
self.data = ResultCache(self.FIELD_MAP, self.field_metadata, db_prefs=self.prefs)
self.search = self.data.search
self.search_getting_ids = self.data.search_getting_ids
self.refresh = functools.partial(self.data.refresh, self)
self.sort = self.data.sort
self.multisort = self.data.multisort
self.index = self.data.index
self.refresh_ids = functools.partial(self.data.refresh_ids, self)
self.row = self.data.row
self.has_id = self.data.has_id
self.count = self.data.count
self.set_marked_ids = self.data.set_marked_ids
for prop in (
'author_sort', 'authors', 'comment', 'comments',
'publisher', 'rating', 'series', 'series_index', 'tags',
'title', 'timestamp', 'uuid', 'pubdate', 'ondevice',
'metadata_last_modified', 'languages',
):
fm = {'comment':'comments', 'metadata_last_modified':
'last_modified'}.get(prop, prop)
setattr(self, prop, functools.partial(self.get_property,
loc=self.FIELD_MAP[fm]))
setattr(self, 'title_sort', functools.partial(self.get_property,
loc=self.FIELD_MAP['sort']))
d = self.conn.get('SELECT book FROM metadata_dirtied', all=True)
with self.dirtied_lock:
self.dirtied_sequence = 0
self.dirtied_cache = {}
for x in d:
self.dirtied_cache[x[0]] = self.dirtied_sequence
self.dirtied_sequence += 1
self.refresh_ondevice = functools.partial(self.data.refresh_ondevice, self)
self.refresh()
self.last_update_check = self.last_modified()
def break_cycles(self):
self.data.break_cycles()
self.data = self.field_metadata = self.prefs = self.listeners = \
self.refresh_ondevice = None
def initialize_database(self):
metadata_sqlite = P('metadata_sqlite.sql', data=True,
allow_user_override=False).decode('utf-8')
self.conn.executescript(metadata_sqlite)
self.conn.commit()
if self.user_version == 0:
self.user_version = 1
def saved_search_names(self):
return saved_searches().names()
def saved_search_rename(self, old_name, new_name):
saved_searches().rename(old_name, new_name)
def saved_search_lookup(self, name):
return saved_searches().lookup(name)
def saved_search_add(self, name, val):
saved_searches().add(name, val)
def saved_search_delete(self, name):
saved_searches().delete(name)
def saved_search_set_all(self, smap):
saved_searches().set_all(smap)
def last_modified(self):
''' Return last modified time as a UTC datetime object'''
return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
def refresh_format_cache(self):
self.format_filename_cache = defaultdict(dict)
for book_id, fmt, name in self.conn.get(
'SELECT book,format,name FROM data'):
self.format_filename_cache[book_id][fmt.upper() if fmt else ''] = name
self.format_metadata_cache = defaultdict(dict)
def check_if_modified(self):
if self.last_modified() > self.last_update_check:
self.refresh()
self.refresh_format_cache()
self.last_update_check = utcnow()
def path(self, index, index_is_id=False):
'Return the relative path to the directory containing this books files as a unicode string.'
row = self.data._data[index] if index_is_id else self.data[index]
return row[self.FIELD_MAP['path']].replace('/', os.sep)
def abspath(self, index, index_is_id=False, create_dirs=True):
'Return the absolute path to the directory containing this books files as a unicode string.'
path = os.path.join(self.library_path, self.path(index, index_is_id=index_is_id))
if create_dirs and not os.path.exists(path):
os.makedirs(path)
return path
def construct_path_name(self, id):
'''
Construct the directory name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT]
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT]
while author[-1] in (' ', '.'):
author = author[:-1]
if not author:
author = ascii_filename(_('Unknown'))
path = author + '/' + title + ' (%d)'%id
return path
def construct_file_name(self, id):
'''
Construct the file name for this book based on its metadata.
'''
authors = self.authors(id, index_is_id=True)
if not authors:
authors = _('Unknown')
author = ascii_filename(authors.split(',')[0].replace('|', ',')
)[:self.PATH_LIMIT]
title = ascii_filename(self.title(id, index_is_id=True)
)[:self.PATH_LIMIT]
name = title + ' - ' + author
while name.endswith('.'):
name = name[:-1]
return name
def rmtree(self, path, permanent=False):
if not self.normpath(self.library_path).startswith(self.normpath(path)):
delete_tree(path, permanent=permanent)
def normpath(self, path):
path = os.path.abspath(os.path.realpath(path))
if not self.is_case_sensitive:
path = os.path.normcase(path).lower()
return path
def set_path(self, index, index_is_id=False):
'''
Set the path to the directory containing this books files based on its
current title and author. If there was a previous directory, its contents
are copied and it is deleted.
'''
id = index if index_is_id else self.id(index)
path = self.construct_path_name(id)
current_path = self.path(id, index_is_id=True).replace(os.sep, '/')
formats = self.formats(id, index_is_id=True)
formats = formats.split(',') if formats else []
# Check if the metadata used to construct paths has changed
fname = self.construct_file_name(id)
changed = False
for format in formats:
name = self.format_filename_cache[id].get(format.upper(), None)
if name and name != fname:
changed = True
break
if path == current_path and not changed:
return
spath = os.path.join(self.library_path, *current_path.split('/'))
tpath = os.path.join(self.library_path, *path.split('/'))
source_ok = current_path and os.path.exists(spath)
wam = WindowsAtomicFolderMove(spath) if iswindows and source_ok else None
try:
if not os.path.exists(tpath):
os.makedirs(tpath)
if source_ok: # Migrate existing files
self.copy_cover_to(id, os.path.join(tpath, 'cover.jpg'),
index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
for format in formats:
copy_function = functools.partial(self.copy_format_to, id,
format, index_is_id=True, windows_atomic_move=wam,
use_hardlink=True)
try:
self.add_format(id, format, None, index_is_id=True,
path=tpath, notify=False, copy_function=copy_function)
except NoSuchFormat:
continue
self.conn.execute('UPDATE books SET path=? WHERE id=?', (path, id))
self.dirtied([id], commit=False)
self.conn.commit()
self.data.set(id, self.FIELD_MAP['path'], path, row_is_id=True)
# Delete not needed directories
if source_ok:
if not samefile(spath, tpath):
if wam is not None:
wam.delete_originals()
self.rmtree(spath, permanent=True)
parent = os.path.dirname(spath)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=True)
finally:
if wam is not None:
wam.close_handles()
curpath = self.library_path
c1, c2 = current_path.split('/'), path.split('/')
if not self.is_case_sensitive and len(c1) == len(c2):
# On case-insensitive systems, title and author renames that only
# change case don't cause any changes to the directories in the file
# system. This can lead to having the directory names not match the
# title/author, which leads to trouble when libraries are copied to
# a case-sensitive system. The following code attempts to fix this
# by checking each segment. If they are different because of case,
# then rename the segment to some temp file name, then rename it
# back to the correct name. Note that the code above correctly
# handles files in the directories, so no need to do them here.
for oldseg, newseg in zip(c1, c2):
if oldseg.lower() == newseg.lower() and oldseg != newseg:
try:
os.rename(os.path.join(curpath, oldseg),
os.path.join(curpath, newseg))
except:
break # Fail silently since nothing catastrophic has happened
curpath = os.path.join(curpath, newseg)
def add_listener(self, listener):
'''
Add a listener. Will be called on change events with two arguments.
Event name and list of affected ids.
'''
self.listeners.add(listener)
def notify(self, event, ids=[]):
'Notify all listeners'
for listener in self.listeners:
try:
listener(event, ids)
except:
traceback.print_exc()
continue
def cover(self, index, index_is_id=False, as_file=False, as_image=False,
as_path=False):
'''
Return the cover image as a bytestring (in JPEG format) or None.
WARNING: Using as_path will copy the cover to a temp file and return
the path to the temp file. You should delete the temp file when you are
done with it.
:param as_file: If True return the image as an open file object (a SpooledTemporaryFile)
:param as_image: If True return the image as a QImage object
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.access(path, os.R_OK):
try:
f = open(path, 'rb')
except OSError:
time.sleep(0.2)
f = open(path, 'rb')
with f:
if as_path:
pt = PersistentTemporaryFile('_dbcover.jpg')
with pt:
shutil.copyfileobj(f, pt)
return pt.name
if as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
else:
ret = f.read()
if as_image:
from qt.core import QImage
i = QImage()
i.loadFromData(ret)
ret = i
return ret
def cover_last_modified(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
try:
return utcfromtimestamp(os.stat(path).st_mtime)
except:
# Cover doesn't exist
pass
return self.last_modified()
# The field-style interface. These use field keys.
def get_field(self, idx, key, default=None, index_is_id=False):
mi = self.get_metadata(idx, index_is_id=index_is_id,
get_cover=key == 'cover')
return mi.get(key, default)
def standard_field_keys(self):
return self.field_metadata.standard_field_keys()
def custom_field_keys(self, include_composites=True):
return self.field_metadata.custom_field_keys(include_composites)
def all_field_keys(self):
return self.field_metadata.all_field_keys()
def sortable_field_keys(self):
return self.field_metadata.sortable_field_keys()
def searchable_fields(self):
return self.field_metadata.searchable_fields()
def search_term_to_field_key(self, term):
return self.field_metadata.search_term_to_field_key(term)
def custom_field_metadata(self, include_composites=True):
return self.field_metadata.custom_field_metadata(include_composites)
def all_metadata(self):
return self.field_metadata.all_metadata()
def metadata_for_field(self, key):
return self.field_metadata[key]
def clear_dirtied(self, book_id, sequence):
'''
Clear the dirtied indicator for the books. This is used when fetching
metadata, creating an OPF, and writing a file are separated into steps.
The last step is clearing the indicator
'''
with self.dirtied_lock:
dc_sequence = self.dirtied_cache.get(book_id, None)
# print 'clear_dirty: check book', book_id, dc_sequence
if dc_sequence is None or sequence is None or dc_sequence == sequence:
# print 'needs to be cleaned'
self.conn.execute('DELETE FROM metadata_dirtied WHERE book=?',
(book_id,))
self.conn.commit()
try:
del self.dirtied_cache[book_id]
except:
pass
elif dc_sequence is not None:
# print 'book needs to be done again'
pass
def dump_metadata(self, book_ids=None, remove_from_dirtied=True,
commit=True, callback=None):
'''
Write metadata for each record to an individual OPF file. If callback
is not None, it is called once at the start with the number of book_ids
being processed. And once for every book_id, with arguments (book_id,
mi, ok).
'''
if book_ids is None:
book_ids = [x[0] for x in self.conn.get(
'SELECT book FROM metadata_dirtied', all=True)]
if callback is not None:
book_ids = tuple(book_ids)
callback(len(book_ids), True, False)
for book_id in book_ids:
if not self.data.has_id(book_id):
if callback is not None:
callback(book_id, None, False)
continue
path, mi, sequence = self.get_metadata_for_dump(book_id)
if path is None:
if callback is not None:
callback(book_id, mi, False)
continue
try:
raw = metadata_to_opf(mi)
with open(path, 'wb') as f:
f.write(raw)
if remove_from_dirtied:
self.clear_dirtied(book_id, sequence)
except:
pass
if callback is not None:
callback(book_id, mi, True)
if commit:
self.conn.commit()
def update_last_modified(self, book_ids, commit=False, now=None):
if now is None:
now = nowf()
if book_ids:
self.conn.executemany(
'UPDATE books SET last_modified=? WHERE id=?',
[(now, book) for book in book_ids])
for book_id in book_ids:
self.data.set(book_id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if commit:
self.conn.commit()
def dirtied(self, book_ids, commit=True):
self.update_last_modified(book_ids)
for book in book_ids:
with self.dirtied_lock:
# print 'dirtied: check id', book
if book in self.dirtied_cache:
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
continue
# print 'book not already dirty'
self.conn.execute(
'INSERT OR IGNORE INTO metadata_dirtied (book) VALUES (?)',
(book,))
self.dirtied_cache[book] = self.dirtied_sequence
self.dirtied_sequence += 1
# If the commit doesn't happen, then the DB table will be wrong. This
# could lead to a problem because on restart, we won't put the book back
# into the dirtied_cache. We deal with this by writing the dirtied_cache
# back to the table on GUI exit. Not perfect, but probably OK
if book_ids and commit:
self.conn.commit()
def get_a_dirtied_book(self):
with self.dirtied_lock:
l = len(self.dirtied_cache)
if l > 0:
# The random stuff is here to prevent a single book from
# blocking progress if its metadata cannot be written for some
# reason.
id_ = list(self.dirtied_cache.keys())[random.randint(0, l-1)]
sequence = self.dirtied_cache[id_]
return (id_, sequence)
return (None, None)
def dirty_queue_length(self):
return len(self.dirtied_cache)
def commit_dirty_cache(self):
'''
Set the dirty indication for every book in the cache. The vast majority
of the time, the indication will already be set. However, sometimes
exceptions may have prevented a commit, which may remove some dirty
indications from the DB. This call will put them back. Note that there
is no problem with setting a dirty indication for a book that isn't in
fact dirty. Just wastes a few cycles.
'''
with self.dirtied_lock:
book_ids = list(self.dirtied_cache.keys())
self.dirtied_cache = {}
self.dirtied(book_ids)
def get_metadata_for_dump(self, idx):
path, mi = (None, None)
# get the current sequence number for this book to pass back to the
# backup thread. This will avoid double calls in the case where the
# thread has not done the work between the put and the get_metadata
with self.dirtied_lock:
sequence = self.dirtied_cache.get(idx, None)
# print 'get_md_for_dump', idx, sequence
try:
# While a book is being created, the path is empty. Don't bother to
# try to write the opf, because it will go to the wrong folder.
if self.path(idx, index_is_id=True):
path = os.path.join(self.abspath(idx, index_is_id=True), 'metadata.opf')
mi = self.get_metadata(idx, index_is_id=True)
# Always set cover to cover.jpg. Even if cover doesn't exist,
# no harm done. This way no need to call dirtied when
# cover is set/removed
mi.cover = 'cover.jpg'
except:
# This almost certainly means that the book has been deleted while
# the backup operation sat in the queue.
pass
return (path, mi, sequence)
def get_metadata(self, idx, index_is_id=False, get_cover=False,
get_user_categories=True, cover_as_data=False):
'''
Convenience method to return metadata as a :class:`Metadata` object.
Note that the list of formats is not verified.
'''
idx = idx if index_is_id else self.id(idx)
try:
row = self.data._data[idx]
except:
row = None
if row is None:
raise ValueError('No book with id: %d'%idx)
fm = self.FIELD_MAP
mi = Metadata(None, template_cache=self.formatter_template_cache)
aut_list = row[fm['au_map']]
if aut_list:
aut_list = [p.split(':::') for p in aut_list.split(':#:') if p]
else:
aut_list = []
aum = []
aus = {}
aul = {}
try:
for (author, author_sort, link) in aut_list:
aut = author.replace('|', ',')
aum.append(aut)
aus[aut] = author_sort.replace('|', ',')
aul[aut] = link
except ValueError:
# Author has either ::: or :#: in it
for x in row[fm['authors']].split(','):
aum.append(x.replace('|', ','))
aul[aum[-1]] = ''
aus[aum[-1]] = aum[-1]
mi.title = row[fm['title']]
mi.authors = aum
mi.author_sort = row[fm['author_sort']]
mi.author_sort_map = aus
mi.author_link_map = aul
mi.comments = row[fm['comments']]
mi.publisher = row[fm['publisher']]
mi.timestamp = row[fm['timestamp']]
mi.pubdate = row[fm['pubdate']]
mi.uuid = row[fm['uuid']]
mi.title_sort = row[fm['sort']]
mi.last_modified = row[fm['last_modified']]
formats = row[fm['formats']]
mi.format_metadata = {}
if not formats:
good_formats = None
else:
formats = sorted(formats.split(','))
mi.format_metadata = FormatMetadata(self, idx, formats)
good_formats = FormatsList(formats, mi.format_metadata)
mi.formats = good_formats
mi.db_approx_formats = formats
mi._proxy_metadata = p = ProxyMetadata(row[fm['size']], row[fm['ondevice']], formats)
mi.book_size = p.book_size
mi.ondevice_col= p.ondevice_col
tags = row[fm['tags']]
if tags:
mi.tags = [i.strip() for i in tags.split(',')]
languages = row[fm['languages']]
if languages:
mi.languages = [i.strip() for i in languages.split(',')]
mi.series = row[fm['series']]
if mi.series:
mi.series_index = row[fm['series_index']]
mi.rating = row[fm['rating']]
mi.set_identifiers(self.get_identifiers(idx, index_is_id=True))
mi.application_id = idx
mi.id = idx
mi.set_all_user_metadata(self.field_metadata.custom_field_metadata())
for key, meta in self.field_metadata.custom_iteritems():
if meta['datatype'] == 'composite':
mi.set(key, val=row[meta['rec_index']])
else:
val, extra = self.get_custom_and_extra(idx, label=meta['label'],
index_is_id=True)
mi.set(key, val=val, extra=extra)
user_cats = self.prefs['user_categories']
user_cat_vals = {}
if get_user_categories:
for ucat in user_cats:
res = []
for name,cat,ign in user_cats[ucat]:
v = mi.get(cat, None)
if isinstance(v, list):
if name in v:
res.append([name,cat])
elif name == v:
res.append([name,cat])
user_cat_vals[ucat] = res
mi.user_categories = user_cat_vals
if get_cover:
if cover_as_data:
cdata = self.cover(idx, index_is_id=True)
if cdata:
mi.cover_data = ('jpeg', cdata)
else:
mi.cover = self.cover(idx, index_is_id=True, as_path=True)
mi.has_cover = _('Yes') if self.has_cover(idx) else ''
return mi
def has_book(self, mi):
title = mi.title
if title:
if not isinstance(title, str):
title = title.decode(preferred_encoding, 'replace')
return bool(self.conn.get('SELECT id FROM books where title=?', (title,), all=False))
return False
def has_id(self, id_):
return self.data._data[id_] is not None
def books_with_same_title(self, mi, all_matches=True):
title = mi.title
ans = set()
if title:
title = lower(force_unicode(title))
for book_id in self.all_ids():
x = self.title(book_id, index_is_id=True)
if lower(x) == title:
ans.add(book_id)
if not all_matches:
break
return ans
def find_identical_books(self, mi):
fuzzy_title_patterns = [(re.compile(pat, re.IGNORECASE) if
isinstance(pat, string_or_bytes) else pat, repl) for pat, repl in
[
(r'[\[\](){}<>\'";,:#]', ''),
(get_title_sort_pat(), ''),
(r'[-._]', ' '),
(r'\s+', ' ')
]
]
def fuzzy_title(title):
title = title.strip().lower()
for pat, repl in fuzzy_title_patterns:
title = pat.sub(repl, title)
return title
identical_book_ids = set()
if mi.authors:
try:
quathors = mi.authors[:10] # Too many authors causes parsing of
# the search expression to fail
query = ' and '.join(['author:"=%s"'%(a.replace('"', '')) for a in
quathors])
qauthors = mi.authors[10:]
except ValueError:
return identical_book_ids
try:
book_ids = self.data.parse(query)
except:
traceback.print_exc()
return identical_book_ids
if qauthors and book_ids:
matches = set()
qauthors = {lower(x) for x in qauthors}
for book_id in book_ids:
aut = self.authors(book_id, index_is_id=True)
if aut:
aut = {lower(x.replace('|', ',')) for x in
aut.split(',')}
if aut.issuperset(qauthors):
matches.add(book_id)
book_ids = matches
for book_id in book_ids:
fbook_title = self.title(book_id, index_is_id=True)
fbook_title = fuzzy_title(fbook_title)
mbook_title = fuzzy_title(mi.title)
if fbook_title == mbook_title:
identical_book_ids.add(book_id)
return identical_book_ids
def remove_cover(self, id, notify=True, commit=True):
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if os.path.exists(path):
try:
os.remove(path)
except OSError:
time.sleep(0.2)
os.remove(path)
self.conn.execute('UPDATE books SET has_cover=0 WHERE id=?', (id,))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], False, row_is_id=True)
if notify:
self.notify('cover', [id])
def set_cover(self, id, data, notify=True, commit=True):
'''
Set the cover for this book.
`data`: Can be either a QImage, QPixmap, file object or bytestring
'''
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
if not os.path.exists(base_path):
self.set_path(id, index_is_id=True)
base_path = os.path.join(self.library_path, self.path(id,
index_is_id=True))
self.dirtied([id])
if not os.path.exists(base_path):
os.makedirs(base_path)
path = os.path.join(base_path, 'cover.jpg')
if callable(getattr(data, 'save', None)):
data.save(path)
else:
if callable(getattr(data, 'read', None)):
data = data.read()
try:
save_cover_data_to(data, path)
except OSError:
time.sleep(0.2)
save_cover_data_to(data, path)
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=1,last_modified=? WHERE id=?', (now, id))
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['cover'], True, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
if notify:
self.notify('cover', [id])
def has_cover(self, id):
return self.data.get(id, self.FIELD_MAP['cover'], row_is_id=True)
def set_has_cover(self, id, val):
dval = 1 if val else 0
now = nowf()
self.conn.execute(
'UPDATE books SET has_cover=?,last_modified=? WHERE id=?',
(dval, now, id))
self.data.set(id, self.FIELD_MAP['cover'], val, row_is_id=True)
self.data.set(id, self.FIELD_MAP['last_modified'], now, row_is_id=True)
def book_on_device(self, id):
if callable(self.book_on_device_func):
return self.book_on_device_func(id)
return None
def book_on_device_string(self, id):
loc = []
count = 0
on = self.book_on_device(id)
if on is not None:
m, a, b, count = on[:4]
if m is not None:
loc.append(_('Main'))
if a is not None:
loc.append(_('Card A'))
if b is not None:
loc.append(_('Card B'))
return ', '.join(loc) + ((_(' (%s books)')%count) if count > 1 else '')
def set_book_on_device_func(self, func):
self.book_on_device_func = func
def all_formats(self):
formats = self.conn.get('SELECT DISTINCT format from data')
if not formats:
return set()
return {f[0] for f in formats}
def format_files(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
return [(v, k) for k, v in iteritems(self.format_filename_cache[id])]
def formats(self, index, index_is_id=False, verify_formats=True):
''' Return available formats as a comma separated list or None if there are no available formats '''
id_ = index if index_is_id else self.id(index)
formats = self.data.get(id_, self.FIELD_MAP['formats'], row_is_id=True)
if not formats:
return None
if not verify_formats:
return formats
formats = formats.split(',')
ans = []
for fmt in formats:
if self.format_abspath(id_, fmt, index_is_id=True) is not None:
ans.append(fmt)
if not ans:
return None
return ','.join(ans)
def has_format(self, index, format, index_is_id=False):
return self.format_abspath(index, format, index_is_id) is not None
def format_last_modified(self, id_, fmt):
m = self.format_metadata(id_, fmt)
if m:
return m['mtime']
def format_metadata(self, id_, fmt, allow_cache=True, update_db=False,
commit=False):
if not fmt:
return {}
fmt = fmt.upper()
if allow_cache:
x = self.format_metadata_cache[id_].get(fmt, None)
if x is not None:
return x
path = self.format_abspath(id_, fmt, index_is_id=True)
ans = {}
if path is not None:
stat = os.stat(path)
ans['path'] = path
ans['size'] = stat.st_size
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
self.format_metadata_cache[id_][fmt] = ans
if update_db:
self.conn.execute(
'UPDATE data SET uncompressed_size=? WHERE format=? AND'
' book=?', (stat.st_size, fmt, id_))
if commit:
self.conn.commit()
return ans
def format_hash(self, id_, fmt):
path = self.format_abspath(id_, fmt, index_is_id=True)
if path is None:
raise NoSuchFormat('Record %d has no fmt: %s'%(id_, fmt))
sha = hashlib.sha256()
with open(path, 'rb') as f:
while True:
raw = f.read(SPOOL_SIZE)
sha.update(raw)
if len(raw) < SPOOL_SIZE:
break
return sha.hexdigest()
def format_path(self, index, fmt, index_is_id=False):
'''
This method is intended to be used only in those rare situations, like
Drag'n Drop, when you absolutely need the path to the original file.
Otherwise, use format(..., as_path=True).
Note that a networked backend will always return None.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no format: %s'%(id_, fmt))
return path
def format_abspath(self, index, format, index_is_id=False):
'''
Return absolute path to the ebook file of format `format`
WARNING: This method will return a dummy path for a network backend DB,
so do not rely on it, use format(..., as_path=True) instead.
Currently used only in calibredb list, the viewer and the catalogs (via
get_data_as_dict()).
Apart from the viewer, I don't believe any of the others do any file
I/O with the results of this call.
'''
id = index if index_is_id else self.id(index)
try:
name = self.format_filename_cache[id][format.upper()]
except:
return None
if name:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
format = ('.' + format.lower()) if format else ''
fmt_path = os.path.join(path, name+format)
if os.path.exists(fmt_path):
return fmt_path
try:
candidates = glob.glob(os.path.join(path, '*'+format))
except: # If path contains strange characters this throws an exc
candidates = []
if format and candidates and os.path.exists(candidates[0]):
try:
shutil.copyfile(candidates[0], fmt_path)
except:
# This can happen if candidates[0] or fmt_path is too long,
# which can happen if the user copied the library from a
# non windows machine to a windows machine.
return None
return fmt_path
def copy_format_to(self, index, fmt, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the format ``fmt`` to the file like object ``dest``. If the
specified format does not exist, raises :class:`NoSuchFormat` error.
dest can also be a path, in which case the format is copied to it, iff
the path is different from the current path (taking case sensitivity
into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
path = self.format_abspath(index, fmt, index_is_id=index_is_id)
if path is None:
id_ = index if index_is_id else self.id(index)
raise NoSuchFormat('Record %d has no %s file'%(id_, fmt))
if windows_atomic_move is not None:
if not isinstance(dest, string_or_bytes):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if dest:
if samefile(path, dest):
# Ensure that the file has the same case as dest
try:
if path != dest:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
windows_atomic_move.copy_path_to(path, dest)
else:
if hasattr(dest, 'write'):
with open(path, 'rb') as f:
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
elif dest:
if samefile(dest, path):
if not self.is_case_sensitive and path != dest:
# Ensure that the file has the same case as dest
try:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
if use_hardlink:
try:
hardlink_file(path, dest)
return
except:
pass
with open(path, 'rb') as f, open(dest, 'wb') as d:
shutil.copyfileobj(f, d)
def copy_cover_to(self, index, dest, index_is_id=False,
windows_atomic_move=None, use_hardlink=False):
'''
Copy the cover to the file like object ``dest``. Returns False
if no cover exists or dest is the same file as the current cover.
dest can also be a path in which case the cover is
copied to it iff the path is different from the current path (taking
case sensitivity into account).
If use_hardlink is True, a hard link will be created instead of the
file being copied. Use with care, because a hard link means that
modifying any one file will cause both files to be modified.
windows_atomic_move is an internally used parameter. You should not use
it in any code outside this module.
'''
id = index if index_is_id else self.id(index)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
if windows_atomic_move is not None:
if not isinstance(dest, string_or_bytes):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if os.access(path, os.R_OK) and dest and not samefile(dest, path):
windows_atomic_move.copy_path_to(path, dest)
return True
else:
if os.access(path, os.R_OK):
try:
f = open(path, 'rb')
except OSError:
time.sleep(0.2)
f = open(path, 'rb')
with f:
if hasattr(dest, 'write'):
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
return True
elif dest and not samefile(dest, path):
if use_hardlink:
try:
hardlink_file(path, dest)
return True
except:
pass
with open(dest, 'wb') as d:
shutil.copyfileobj(f, d)
return True
return False
def format(self, index, format, index_is_id=False, as_file=False,
mode='r+b', as_path=False, preserve_filename=False):
'''
Return the ebook format as a bytestring or `None` if the format doesn't exist,
or we don't have permission to write to the ebook file.
:param as_file: If True the ebook format is returned as a file object. Note
that the file object is a SpooledTemporaryFile, so if what you want to
do is copy the format to another file, use :method:`copy_format_to`
instead for performance.
:param as_path: Copies the format file to a temp file and returns the
path to the temp file
:param preserve_filename: If True and returning a path the filename is
the same as that used in the library. Note that using
this means that repeated calls yield the same
temp file (which is re-created each time)
:param mode: This is ignored (present for legacy compatibility)
'''
path = self.format_abspath(index, format, index_is_id=index_is_id)
if path is not None:
with open(path, mode) as f:
if as_path:
if preserve_filename:
bd = base_dir()
d = os.path.join(bd, 'format_abspath')
try:
os.makedirs(d)
except:
pass
fname = os.path.basename(path)
ret = os.path.join(d, fname)
with open(ret, 'wb') as f2:
shutil.copyfileobj(f, f2)
else:
with PersistentTemporaryFile('.'+format.lower()) as pt:
shutil.copyfileobj(f, pt)
ret = pt.name
elif as_file:
ret = SpooledTemporaryFile(SPOOL_SIZE)
shutil.copyfileobj(f, ret)
ret.seek(0)
# Various bits of code try to use the name as the default
# title when reading metadata, so set it
ret.name = f.name
else:
ret = f.read()
return ret
def add_format_with_hooks(self, index, format, fpath, index_is_id=False,
path=None, notify=True, replace=True):
npath = self.run_import_plugins(fpath, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
stream = open(npath, 'rb')
format = check_ebook_format(stream, format)
id = index if index_is_id else self.id(index)
retval = self.add_format(id, format, stream, replace=replace,
index_is_id=True, path=path, notify=notify)
run_plugins_on_postimport(self, id, format)
return retval
def add_format(self, index, format, stream, index_is_id=False, path=None,
notify=True, replace=True, copy_function=None):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if path is None:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
if name and not replace:
return False
name = self.construct_file_name(id)
ext = ('.' + format.lower()) if format else ''
dest = os.path.join(path, name+ext)
pdir = os.path.dirname(dest)
if not os.path.exists(pdir):
os.makedirs(pdir)
size = 0
if copy_function is not None:
copy_function(dest)
size = os.path.getsize(dest)
else:
if (not getattr(stream, 'name', False) or not samefile(dest,
stream.name)):
with open(dest, 'wb') as f:
shutil.copyfileobj(stream, f)
size = f.tell()
elif os.path.exists(dest):
size = os.path.getsize(dest)
self.conn.execute('INSERT OR REPLACE INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)',
(id, format.upper(), size, name))
self.update_last_modified([id], commit=False)
self.conn.commit()
self.format_filename_cache[id][format.upper()] = name
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
return True
def save_original_format(self, book_id, fmt, notify=True):
fmt = fmt.upper()
if 'ORIGINAL' in fmt:
raise ValueError('Cannot save original of an original fmt')
opath = self.format_abspath(book_id, fmt, index_is_id=True)
if opath is None:
return False
nfmt = 'ORIGINAL_'+fmt
with open(opath, 'rb') as f:
return self.add_format(book_id, nfmt, f, index_is_id=True, notify=notify)
def original_fmt(self, book_id, fmt):
fmt = fmt
nfmt = ('ORIGINAL_%s'%fmt).upper()
opath = self.format_abspath(book_id, nfmt, index_is_id=True)
return fmt if opath is None else nfmt
def restore_original_format(self, book_id, original_fmt, notify=True):
opath = self.format_abspath(book_id, original_fmt, index_is_id=True)
if opath is not None:
fmt = original_fmt.partition('_')[2]
with open(opath, 'rb') as f:
self.add_format(book_id, fmt, f, index_is_id=True, notify=False)
self.remove_format(book_id, original_fmt, index_is_id=True, notify=notify)
return True
return False
def delete_book(self, id, notify=True, commit=True, permanent=False,
do_clean=True):
'''
Removes book from the result cache and the underlying database.
If you set commit to False, you must call clean() manually afterwards
'''
try:
path = os.path.join(self.library_path, self.path(id, index_is_id=True))
except:
path = None
if path and os.path.exists(path):
self.rmtree(path, permanent=permanent)
parent = os.path.dirname(path)
if len(os.listdir(parent)) == 0:
self.rmtree(parent, permanent=permanent)
self.conn.execute('DELETE FROM books WHERE id=?', (id,))
if commit:
self.conn.commit()
if do_clean:
self.clean()
self.data.books_deleted([id])
if notify:
self.notify('delete', [id])
def remove_format(self, index, format, index_is_id=False, notify=True,
commit=True, db_only=False):
id = index if index_is_id else self.id(index)
if not format:
format = ''
self.format_metadata_cache[id].pop(format.upper(), None)
name = self.format_filename_cache[id].get(format.upper(), None)
if name:
if not db_only:
try:
path = self.format_abspath(id, format, index_is_id=True)
if path:
delete_file(path)
except:
traceback.print_exc()
self.format_filename_cache[id].pop(format.upper(), None)
self.conn.execute('DELETE FROM data WHERE book=? AND format=?', (id, format.upper()))
if commit:
self.conn.commit()
self.refresh_ids([id])
if notify:
self.notify('metadata', [id])
def clean_standard_field(self, field, commit=False):
# Don't bother with validity checking. Let the exception fly out so
# we can see what happened
def doit(table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%table
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(table)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
table=table, ltable_col=ltable_col)
self.conn.execute(st)
fm = self.field_metadata[field]
doit(fm['table'], fm['link_column'])
if commit:
self.conn.commit()
def clean(self):
'''
Remove orphaned entries.
'''
def doit(ltable, table, ltable_col):
st = ('DELETE FROM books_%s_link WHERE (SELECT COUNT(id) '
'FROM books WHERE id=book) < 1;')%ltable
self.conn.execute(st)
st = ('DELETE FROM %(table)s WHERE (SELECT COUNT(id) '
'FROM books_%(ltable)s_link WHERE '
'%(ltable_col)s=%(table)s.id) < 1;') % dict(
ltable=ltable, table=table, ltable_col=ltable_col)
self.conn.execute(st)
for ltable, table, ltable_col in [
('authors', 'authors', 'author'),
('publishers', 'publishers', 'publisher'),
('tags', 'tags', 'tag'),
('series', 'series', 'series'),
('languages', 'languages', 'lang_code'),
]:
doit(ltable, table, ltable_col)
for id_, tag in self.conn.get('SELECT id, name FROM tags', all=True):
if not tag.strip():
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?',
(id_,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id_,))
self.clean_custom()
self.conn.commit()
def get_books_for_category(self, category, id_):
ans = set()
if category not in self.field_metadata:
return ans
field = self.field_metadata[category]
if field['datatype'] == 'composite':
dex = field['rec_index']
for book in self.data.iterall():
if field['is_multiple']:
vals = [v.strip() for v in
book[dex].split(field['is_multiple']['cache_to_list'])
if v.strip()]
if id_ in vals:
ans.add(book[0])
elif book[dex] == id_:
ans.add(book[0])
return ans
ans = self.conn.get(
'SELECT book FROM books_{tn}_link WHERE {col}=?'.format(
tn=field['table'], col=field['link_column']), (id_,))
return {x[0] for x in ans}
# data structures for get_categories
CATEGORY_SORTS = CATEGORY_SORTS
MATCH_TYPE = ('any', 'all')
class TCat_Tag:
def __init__(self, name, sort):
self.n = name
self.s = sort
self.c = 0
self.id_set = set()
self.rt = 0
self.rc = 0
self.id = None
def set_all(self, c, rt, rc, id):
self.c = c
self.rt = rt
self.rc = rc
self.id = id
def __unicode_representation__(self):
return 'n=%s s=%s c=%d rt=%d rc=%d id=%s' % (
self.n, self.s, self.c, self.rt, self.rc, self.id)
__str__ = __unicode_representation__
def clean_user_categories(self):
user_cats = self.prefs.get('user_categories', {})
new_cats = {}
for k in user_cats:
comps = [c.strip() for c in k.split('.') if c.strip()]
if len(comps) == 0:
i = 1
while True:
if str(i) not in user_cats:
new_cats[str(i)] = user_cats[k]
break
i += 1
else:
new_cats['.'.join(comps)] = user_cats[k]
try:
if new_cats != user_cats:
self.prefs.set('user_categories', new_cats)
except:
pass
return new_cats
def get_categories(self, sort='name', ids=None):
# start = last = time.clock()
if sort not in self.CATEGORY_SORTS:
raise ValueError('sort ' + sort + ' not a valid value')
self.books_list_filter.change([] if not ids else ids)
id_filter = None if ids is None else frozenset(ids)
tb_cats = self.field_metadata
tcategories = {}
tids = {}
md = []
# First, build the maps. We need a category->items map and an
# item -> (item_id, sort_val) map to use in the books loop
for category in tb_cats:
cat = tb_cats[category]
if not cat['is_category'] or cat['kind'] in ['user', 'search'] \
or category in ['news', 'formats'] or cat.get('is_csp',
False):
continue
# Get the ids for the item values
if not cat['is_custom']:
funcs = {
'authors': self.get_authors_with_ids,
'series': self.get_series_with_ids,
'publisher': self.get_publishers_with_ids,
'tags': self.get_tags_with_ids,
'languages': self.get_languages_with_ids,
'rating': self.get_ratings_with_ids,
}
func = funcs.get(category, None)
if func:
list = func()
else:
raise ValueError(category + ' has no get with ids function')
else:
list = self.get_custom_items_with_ids(label=cat['label'])
tids[category] = {}
if category == 'authors':
for l in list:
(id, val, sort_val) = (l[0], l[1], l[2])
tids[category][val] = (id, sort_val)
elif category == 'languages':
for l in list:
id, val = l[0], calibre_langcode_to_name(l[1])
tids[category][l[1]] = (id, val)
elif cat['datatype'] == 'series':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, title_sort(val))
elif cat['datatype'] == 'rating':
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, f'{val:05.2f}')
elif cat['datatype'] == 'text' and cat['is_multiple'] and \
cat['display'].get('is_names', False):
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, author_to_author_sort(val))
else:
for l in list:
(id, val) = (l[0], l[1])
tids[category][val] = (id, val)
# add an empty category to the category map
tcategories[category] = {}
# create a list of category/field_index for the books scan to use.
# This saves iterating through field_metadata for each book
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None), False))
for category in tb_cats:
cat = tb_cats[category]
if cat['datatype'] == 'composite' and \
cat['display'].get('make_category', False):
tids[category] = {}
tcategories[category] = {}
md.append((category, cat['rec_index'],
cat['is_multiple'].get('cache_to_list', None),
cat['datatype'] == 'composite'))
# print 'end phase "collection":', time.clock() - last, 'seconds'
# last = time.clock()
# Now scan every book looking for category items.
# Code below is duplicated because it shaves off 10% of the loop time
id_dex = self.FIELD_MAP['id']
rating_dex = self.FIELD_MAP['rating']
tag_class = LibraryDatabase2.TCat_Tag
for book in self.data.iterall():
if id_filter is not None and book[id_dex] not in id_filter:
continue
rating = book[rating_dex]
# We kept track of all possible category field_map positions above
for (cat, dex, mult, is_comp) in md:
if not book[dex]:
continue
tid_cat = tids[cat]
tcats_cat = tcategories[cat]
if not mult:
val = book[dex]
if is_comp:
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, val)
tcats_cat[val] = item
item.c += 1
item.id = val
if rating > 0:
item.rt += rating
item.rc += 1
continue
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
else:
vals = book[dex].split(mult)
if is_comp:
vals = [v.strip() for v in vals if v.strip()]
for val in vals:
if val not in tid_cat:
tid_cat[val] = (val, val)
for val in vals:
try:
(item_id, sort_val) = tid_cat[val] # let exceptions fly
item = tcats_cat.get(val, None)
if not item:
item = tag_class(val, sort_val)
tcats_cat[val] = item
item.c += 1
item.id_set.add(book[0])
item.id = item_id
if rating > 0:
item.rt += rating
item.rc += 1
except:
prints('get_categories: item', val, 'is not in', cat, 'list!')
# print 'end phase "books":', time.clock() - last, 'seconds'
# last = time.clock()
# Now do news
tcategories['news'] = {}
cat = tb_cats['news']
tn = cat['table']
cn = cat['column']
if ids is None:
query = '''SELECT id, {}, count, avg_rating, sort
FROM tag_browser_{}'''.format(cn, tn)
else:
query = '''SELECT id, {}, count, avg_rating, sort
FROM tag_browser_filtered_{}'''.format(cn, tn)
# results will be sorted later
data = self.conn.get(query)
for r in data:
item = LibraryDatabase2.TCat_Tag(r[1], r[1])
item.set_all(c=r[2], rt=r[2]*r[3], rc=r[2], id=r[0])
tcategories['news'][r[1]] = item
# print 'end phase "news":', time.clock() - last, 'seconds'
# last = time.clock()
# Build the real category list by iterating over the temporary copy
# and building the Tag instances.
categories = {}
tag_class = Tag
for category in tb_cats:
if category not in tcategories:
continue
cat = tb_cats[category]
# prepare the place where we will put the array of Tags
categories[category] = []
# icon_map is not None if get_categories is to store an icon and
# possibly a tooltip in the tag structure.
icon = icon_map = None
label = tb_cats.key_to_label(category)
if icon_map:
if not tb_cats.is_custom_field(category):
if category in icon_map:
icon = icon_map[label]
else:
icon = icon_map['custom:']
icon_map[category] = icon
datatype = cat['datatype']
def avgr(x):
return (0.0 if x.rc == 0 else x.rt / x.rc)
# Duplicate the build of items below to avoid using a lambda func
# in the main Tag loop. Saves a few %
if datatype == 'rating':
def formatter(x):
return ('★' * int(x // 2))
def avgr(x): # noqa
return x.n
# eliminate the zero ratings line as well as count == 0
items = [v for v in tcategories[category].values() if v.c > 0 and v.n != 0]
elif category == 'authors':
# Clean up the authors strings to human-readable form
def formatter(x):
return x.replace('|', ',')
items = [v for v in tcategories[category].values() if v.c > 0]
elif category == 'languages':
# Use a human readable language string
formatter = calibre_langcode_to_name
items = [v for v in tcategories[category].values() if v.c > 0]
else:
def formatter(x):
return str(x)
items = [v for v in tcategories[category].values() if v.c > 0]
# sort the list
if sort == 'name':
def kf(x):
return sort_key(x.s)
reverse=False
elif sort == 'popularity':
def kf(x):
return x.c
reverse=True
else:
kf = avgr
reverse=True
items.sort(key=kf, reverse=reverse)
if tweaks['categories_use_field_for_author_name'] == 'author_sort' and\
(category == 'authors' or
(cat['display'].get('is_names', False) and
cat['is_custom'] and cat['is_multiple'] and
cat['datatype'] == 'text')):
use_sort_as_name = True
else:
use_sort_as_name = False
is_editable = (category not in ['news', 'rating', 'languages'] and
datatype != "composite")
categories[category] = [tag_class(formatter(r.n), count=r.c, id=r.id,
avg=avgr(r), sort=r.s,
category=category,
id_set=r.id_set, is_editable=is_editable,
use_sort_as_name=use_sort_as_name)
for r in items]
# print 'end phase "tags list":', time.clock() - last, 'seconds'
# last = time.clock()
# Needed for legacy databases that have multiple ratings that
# map to n stars
for r in categories['rating']:
r.id_set = None
for x in categories['rating']:
if r.name == x.name and r.id != x.id:
r.count = r.count + x.count
categories['rating'].remove(x)
break
# We delayed computing the standard formats category because it does not
# use a view, but is computed dynamically
categories['formats'] = []
icon = None
if icon_map and 'formats' in icon_map:
icon = icon_map['formats']
for fmt in self.conn.get('SELECT DISTINCT format FROM data'):
fmt = fmt[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=? AND
books_list_filter(book)''', (fmt,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM data
WHERE format=?''', (fmt,),
all=False)
if count > 0:
categories['formats'].append(Tag(fmt, count=count,
category='formats', is_editable=False))
if sort == 'popularity':
categories['formats'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['formats'].sort(key=lambda x:x.name)
# Now do identifiers. This works like formats
categories['identifiers'] = []
icon = None
if icon_map and 'identifiers' in icon_map:
icon = icon_map['identifiers']
for ident in self.conn.get('SELECT DISTINCT type FROM identifiers'):
ident = ident[0]
if ids is not None:
count = self.conn.get('''SELECT COUNT(book)
FROM identifiers
WHERE type=? AND
books_list_filter(book)''', (ident,),
all=False)
else:
count = self.conn.get('''SELECT COUNT(id)
FROM identifiers
WHERE type=?''', (ident,),
all=False)
if count > 0:
categories['identifiers'].append(Tag(ident, count=count,
category='identifiers',
is_editable=False))
if sort == 'popularity':
categories['identifiers'].sort(key=lambda x: x.count, reverse=True)
else: # no ratings exist to sort on
# No need for ICU here.
categories['identifiers'].sort(key=lambda x:x.name)
# ### Now do the user-defined categories. ####
user_categories = dict.copy(self.clean_user_categories())
# We want to use same node in the user category as in the source
# category. To do that, we need to find the original Tag node. There is
# a time/space tradeoff here. By converting the tags into a map, we can
# do the verification in the category loop much faster, at the cost of
# temporarily duplicating the categories lists.
taglist = {}
for c in categories.keys():
taglist[c] = dict(map(lambda t:(icu_lower(t.name), t), categories[c]))
muc = self.prefs.get('grouped_search_make_user_categories', [])
gst = self.prefs.get('grouped_search_terms', {})
for c in gst:
if c not in muc:
continue
user_categories[c] = []
for sc in gst[c]:
if sc in list(categories.keys()):
for t in categories[sc]:
user_categories[c].append([t.name, sc, 0])
gst_icon = icon_map['gst'] if icon_map else None
for user_cat in sorted(user_categories.keys(), key=sort_key):
items = []
names_seen = {}
for (name,label,ign) in user_categories[user_cat]:
n = icu_lower(name)
if label in taglist and n in taglist[label]:
if user_cat in gst:
# for gst items, make copy and consolidate the tags by name.
if n in names_seen:
t = names_seen[n]
t.id_set |= taglist[label][n].id_set
t.count += taglist[label][n].count
else:
t = copy.copy(taglist[label][n])
t.icon = gst_icon
names_seen[t.name] = t
items.append(t)
else:
items.append(taglist[label][n])
# else: do nothing, to not include nodes w zero counts
cat_name = '@' + user_cat # add the '@' to avoid name collision
# Not a problem if we accumulate entries in the icon map
if icon_map is not None:
icon_map[cat_name] = icon_map['user:']
if sort == 'popularity':
categories[cat_name] = \
sorted(items, key=lambda x: x.count, reverse=True)
elif sort == 'name':
categories[cat_name] = \
sorted(items, key=lambda x: sort_key(x.sort))
else:
categories[cat_name] = \
sorted(items, key=lambda x:x.avg_rating, reverse=True)
# ### Finally, the saved searches category ####
items = []
icon = None
if icon_map and 'search' in icon_map:
icon = icon_map['search']
for srch in saved_searches().names():
items.append(Tag(srch,
sort=srch, category='search',
is_editable=False))
if len(items):
if icon_map is not None:
icon_map['search'] = icon_map['search']
categories['search'] = items
# print 'last phase ran in:', time.clock() - last, 'seconds'
# print 'get_categories ran in:', time.clock() - start, 'seconds'
return categories
# End get_categories
def tags_older_than(self, tag, delta, must_have_tag=None,
must_have_authors=None):
'''
Return the ids of all books having the tag ``tag`` that are older than
than the specified time. tag comparison is case insensitive.
:param delta: A timedelta object or None. If None, then all ids with
the tag are returned.
:param must_have_tag: If not None the list of matches will be
restricted to books that have this tag
:param must_have_authors: A list of authors. If not None the list of
matches will be restricted to books that have these authors (case
insensitive).
'''
tag = tag.lower().strip()
mht = must_have_tag.lower().strip() if must_have_tag else None
now = nowf()
tindex = self.FIELD_MAP['timestamp']
gindex = self.FIELD_MAP['tags']
iindex = self.FIELD_MAP['id']
aindex = self.FIELD_MAP['authors']
mah = must_have_authors
if mah is not None:
mah = [x.replace(',', '|').lower() for x in mah]
mah = ','.join(mah)
for r in self.data._data:
if r is not None:
if delta is None or (now - r[tindex]) > delta:
if mah:
authors = r[aindex] or ''
if authors.lower() != mah:
continue
tags = r[gindex]
if tags:
tags = [x.strip() for x in tags.lower().split(',')]
if tag in tags and (mht is None or mht in tags):
yield r[iindex]
def get_next_series_num_for(self, series):
series_id = None
if series:
series_id = self.conn.get('SELECT id from series WHERE name=?',
(series,), all=False)
if series_id is None:
if isinstance(tweaks['series_index_auto_increment'], numbers.Number):
return float(tweaks['series_index_auto_increment'])
return 1.0
series_indices = self.conn.get(
('SELECT series_index FROM books WHERE id IN '
'(SELECT book FROM books_series_link where series=?) '
'ORDER BY series_index'),
(series_id,))
return self._get_next_series_num_for_list(series_indices)
def _get_next_series_num_for_list(self, series_indices):
return _get_next_series_num_for_list(series_indices)
def set(self, row, column, val, allow_case_change=False):
'''
Convenience method for setting the title, authors, publisher, tags or
rating
'''
id = self.data[row][0]
col = self.FIELD_MAP[column]
books_to_refresh = {id}
set_args = (row, col, val)
if column == 'authors':
val = string_to_authors(val)
books_to_refresh |= self.set_authors(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'title':
self.set_title(id, val, notify=False)
elif column == 'publisher':
books_to_refresh |= self.set_publisher(id, val, notify=False,
allow_case_change=allow_case_change)
elif column == 'rating':
self.set_rating(id, val, notify=False)
elif column == 'tags':
books_to_refresh |= \
self.set_tags(id, [x.strip() for x in val.split(',') if x.strip()],
append=False, notify=False, allow_case_change=allow_case_change)
self.data.set(*set_args)
self.data.refresh_ids(self, [id])
self.set_path(id, True)
self.notify('metadata', [id])
return books_to_refresh
def set_metadata(self, id, mi, ignore_errors=False, set_title=True,
set_authors=True, commit=True, force_changes=False,
notify=True):
'''
Set metadata for the book `id` from the `Metadata` object `mi`
Setting force_changes=True will force set_metadata to update fields even
if mi contains empty values. In this case, 'None' is distinguished from
'empty'. If mi.XXX is None, the XXX is not replaced, otherwise it is.
The tags, identifiers, and cover attributes are special cases. Tags and
identifiers cannot be set to None so then will always be replaced if
force_changes is true. You must ensure that mi contains the values you
want the book to have. Covers are always changed if a new cover is
provided, but are never deleted. Also note that force_changes has no
effect on setting title or authors.
'''
if callable(getattr(mi, 'to_book_metadata', None)):
# Handle code passing in a OPF object instead of a Metadata object
mi = mi.to_book_metadata()
def doit(func, *args, **kwargs):
try:
func(*args, **kwargs)
except:
if ignore_errors:
traceback.print_exc()
else:
raise
def should_replace_field(attr):
return (force_changes and (mi.get(attr, None) is not None)) or \
not mi.is_null(attr)
path_changed = False
if set_title and mi.title:
self._set_title(id, mi.title)
path_changed = True
if set_authors:
if not mi.authors:
mi.authors = [_('Unknown')]
authors = []
for a in mi.authors:
authors += string_to_authors(a)
self._set_authors(id, authors)
path_changed = True
if path_changed:
self.set_path(id, index_is_id=True)
if should_replace_field('title_sort'):
self.set_title_sort(id, mi.title_sort, notify=False, commit=False)
if should_replace_field('author_sort'):
doit(self.set_author_sort, id, mi.author_sort, notify=False,
commit=False)
if should_replace_field('publisher'):
doit(self.set_publisher, id, mi.publisher, notify=False,
commit=False)
# Setting rating to zero is acceptable.
if mi.rating is not None:
doit(self.set_rating, id, mi.rating, notify=False, commit=False)
if should_replace_field('series'):
doit(self.set_series, id, mi.series, notify=False, commit=False)
# force_changes has no effect on cover manipulation
if mi.cover_data[1] is not None:
doit(self.set_cover, id, mi.cover_data[1], commit=False)
elif isinstance(mi.cover, string_or_bytes) and mi.cover:
if os.access(mi.cover, os.R_OK):
with open(mi.cover, 'rb') as f:
raw = f.read()
if raw:
doit(self.set_cover, id, raw, commit=False)
# if force_changes is true, tags are always replaced because the
# attribute cannot be set to None.
if should_replace_field('tags'):
doit(self.set_tags, id, mi.tags, notify=False, commit=False)
if should_replace_field('comments'):
doit(self.set_comment, id, mi.comments, notify=False, commit=False)
if should_replace_field('languages'):
doit(self.set_languages, id, mi.languages, notify=False, commit=False)
# Setting series_index to zero is acceptable
if mi.series_index is not None:
doit(self.set_series_index, id, mi.series_index, notify=False,
commit=False)
if should_replace_field('pubdate'):
doit(self.set_pubdate, id, mi.pubdate, notify=False, commit=False)
if getattr(mi, 'timestamp', None) is not None:
doit(self.set_timestamp, id, mi.timestamp, notify=False,
commit=False)
# identifiers will always be replaced if force_changes is True
mi_idents = mi.get_identifiers()
if force_changes:
self.set_identifiers(id, mi_idents, notify=False, commit=False)
elif mi_idents:
identifiers = self.get_identifiers(id, index_is_id=True)
for key, val in iteritems(mi_idents):
if val and val.strip(): # Don't delete an existing identifier
identifiers[icu_lower(key)] = val
self.set_identifiers(id, identifiers, notify=False, commit=False)
user_mi = mi.get_all_user_metadata(make_copy=False)
for key in user_mi:
if key in self.field_metadata and \
user_mi[key]['datatype'] == self.field_metadata[key]['datatype'] and \
(user_mi[key]['datatype'] != 'text' or
user_mi[key]['is_multiple'] == self.field_metadata[key]['is_multiple']):
val = mi.get(key, None)
if force_changes or val is not None:
doit(self.set_custom, id, val=val, extra=mi.get_extra(key),
label=user_mi[key]['label'], commit=False, notify=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def authors_sort_strings(self, id, index_is_id=False):
'''
Given a book, return the list of author sort strings
for the book's authors
'''
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT sort
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (sort,) in aut_strings:
result.append(sort)
return result
# Given a book, return the map of author sort strings for the book's authors
def authors_with_sort_strings(self, id, index_is_id=False):
id = id if index_is_id else self.id(id)
aut_strings = self.conn.get('''
SELECT authors.id, authors.name, authors.sort, authors.link
FROM authors, books_authors_link as bl
WHERE bl.book=? and authors.id=bl.author
ORDER BY bl.id''', (id,))
result = []
for (id_, author, sort, link) in aut_strings:
result.append((id_, author.replace('|', ','), sort, link))
return result
# Given a book, return the author_sort string for authors of the book
def author_sort_from_book(self, id, index_is_id=False):
auts = self.authors_sort_strings(id, index_is_id)
return ' & '.join(auts).replace('|', ',')
# Given an author, return a list of books with that author
def books_for_author(self, id_, index_is_id=False):
id_ = id_ if index_is_id else self.id(id_)
books = self.conn.get('''
SELECT bl.book
FROM books_authors_link as bl
WHERE bl.author=?''', (id_,))
return [b[0] for b in books]
# Given a list of authors, return the author_sort string for the authors,
# preferring the author sort associated with the author over the computed
# string
def author_sort_from_authors(self, authors):
result = []
for aut in authors:
r = self.conn.get('SELECT sort FROM authors WHERE name=?',
(aut.replace(',', '|'),), all=False)
if r is None:
result.append(author_to_author_sort(aut))
else:
result.append(r)
return ' & '.join(result).replace('|', ',')
def _update_author_in_cache(self, id_, ss, final_authors):
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (ss, id_))
self.data.set(id_, self.FIELD_MAP['authors'],
','.join([a.replace(',', '|') for a in final_authors]),
row_is_id=True)
self.data.set(id_, self.FIELD_MAP['author_sort'], ss, row_is_id=True)
aum = self.authors_with_sort_strings(id_, index_is_id=True)
self.data.set(id_, self.FIELD_MAP['au_map'],
':#:'.join([':::'.join((au.replace(',', '|'), aus, aul))
for (_, au, aus, aul) in aum]),
row_is_id=True)
def _set_authors(self, id, authors, allow_case_change=False):
if not authors:
authors = [_('Unknown')]
self.conn.execute('DELETE FROM books_authors_link WHERE book=?',(id,))
books_to_refresh = {id}
final_authors = []
for a in authors:
case_change = False
if not a:
continue
a = a.strip().replace(',', '|')
if not isinstance(a, str):
a = a.decode(preferred_encoding, 'replace')
aus = self.conn.get('SELECT id, name, sort FROM authors WHERE name=?', (a,))
if aus:
aid, name, sort = aus[0]
# Handle change of case
if name != a:
if allow_case_change:
ns = author_to_author_sort(a.replace('|', ','))
if strcmp(sort, ns) == 0:
sort = ns
self.conn.execute('''UPDATE authors SET name=?, sort=?
WHERE id=?''', (a, sort, aid))
case_change = True
else:
a = name
else:
aid = self.conn.execute('''INSERT INTO authors(name)
VALUES (?)''', (a,)).lastrowid
final_authors.append(a.replace('|', ','))
try:
self.conn.execute('''INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (id, aid))
except IntegrityError: # Sometimes books specify the same author twice in their metadata
pass
if case_change:
bks = self.conn.get('''SELECT book FROM books_authors_link
WHERE author=?''', (aid,))
books_to_refresh |= {bk[0] for bk in bks}
for bk in books_to_refresh:
ss = self.author_sort_from_book(id, index_is_id=True)
aus = self.author_sort(bk, index_is_id=True)
if strcmp(aus, ss) == 0:
self._update_author_in_cache(bk, ss, final_authors)
# This can repeat what was done above in rare cases. Let it.
ss = self.author_sort_from_book(id, index_is_id=True)
self._update_author_in_cache(id, ss, final_authors)
self.clean_standard_field('authors', commit=True)
return books_to_refresh
def windows_check_if_files_in_use(self, book_id):
'''
Raises an EACCES IOError if any of the files in the folder of book_id
are opened in another program on windows.
'''
if iswindows:
path = self.path(book_id, index_is_id=True)
if path:
spath = os.path.join(self.library_path, *path.split('/'))
wam = None
if os.path.exists(spath):
try:
wam = WindowsAtomicFolderMove(spath)
finally:
if wam is not None:
wam.close_handles()
def set_authors(self, id, authors, notify=True, commit=True,
allow_case_change=False):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
:param authors: A list of authors.
'''
self.windows_check_if_files_in_use(id)
books_to_refresh = self._set_authors(id, authors,
allow_case_change=allow_case_change)
self.dirtied({id}|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.set_path(id, index_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_title_sort(self, id, title_sort_, notify=True, commit=True):
if not title_sort_:
return False
if isbytestring(title_sort_):
title_sort_ = title_sort_.decode(preferred_encoding, 'replace')
self.conn.execute('UPDATE books SET sort=? WHERE id=?', (title_sort_, id))
self.data.set(id, self.FIELD_MAP['sort'], title_sort_, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
return True
def _set_title(self, id, title):
if not title:
return False
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
old_title = self.title(id, index_is_id=True)
# We cannot check if old_title == title as previous code might have
# already updated the cache
only_case_change = icu_lower(old_title) == icu_lower(title)
self.conn.execute('UPDATE books SET title=? WHERE id=?', (title, id))
self.data.set(id, self.FIELD_MAP['title'], title, row_is_id=True)
if only_case_change:
# SQLite update trigger will not update sort on a case change
self.conn.execute('UPDATE books SET sort=? WHERE id=?',
(title_sort(title), id))
ts = self.conn.get('SELECT sort FROM books WHERE id=?', (id,),
all=False)
if ts:
self.data.set(id, self.FIELD_MAP['sort'], ts, row_is_id=True)
return True
def set_title(self, id, title, notify=True, commit=True):
'''
Note that even if commit is False, the db will still be committed to
because this causes the location of files to change
'''
self.windows_check_if_files_in_use(id)
if not self._set_title(id, title):
return
self.set_path(id, index_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_languages(self, book_id, languages, notify=True, commit=True):
self.conn.execute(
'DELETE FROM books_languages_link WHERE book=?', (book_id,))
self.conn.execute('''DELETE FROM languages WHERE (SELECT COUNT(id)
FROM books_languages_link WHERE
books_languages_link.lang_code=languages.id) < 1''')
books_to_refresh = {book_id}
final_languages = []
for l in languages:
lc = canonicalize_lang(l)
if not lc or lc in final_languages or lc in ('und', 'zxx', 'mis',
'mul'):
continue
final_languages.append(lc)
lc_id = self.conn.get('SELECT id FROM languages WHERE lang_code=?',
(lc,), all=False)
if lc_id is None:
lc_id = self.conn.execute('''INSERT INTO languages(lang_code)
VALUES (?)''', (lc,)).lastrowid
self.conn.execute('''INSERT INTO books_languages_link(book, lang_code)
VALUES (?,?)''', (book_id, lc_id))
self.dirtied(books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(book_id, self.FIELD_MAP['languages'],
','.join(final_languages), row_is_id=True)
if notify:
self.notify('metadata', [book_id])
return books_to_refresh
def set_timestamp(self, id, dt, notify=True, commit=True):
if dt:
if isinstance(dt, (str, bytes)):
dt = parse_date(dt, as_utc=True, assume_utc=False)
self.conn.execute('UPDATE books SET timestamp=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['timestamp'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_pubdate(self, id, dt, notify=True, commit=True):
if not dt:
dt = UNDEFINED_DATE
if isinstance(dt, string_or_bytes):
dt = parse_only_date(dt)
self.conn.execute('UPDATE books SET pubdate=? WHERE id=?', (dt, id))
self.data.set(id, self.FIELD_MAP['pubdate'], dt, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def set_publisher(self, id, publisher, notify=True, commit=True,
allow_case_change=False):
self.conn.execute('DELETE FROM books_publishers_link WHERE book=?',(id,))
books_to_refresh = {id}
if publisher:
case_change = False
if not isinstance(publisher, str):
publisher = publisher.decode(preferred_encoding, 'replace')
pubx = self.conn.get('''SELECT id,name from publishers
WHERE name=?''', (publisher,))
if pubx:
aid, cur_name = pubx[0]
if publisher != cur_name:
if allow_case_change:
self.conn.execute('''UPDATE publishers SET name=?
WHERE id=?''', (publisher, aid))
case_change = True
else:
publisher = cur_name
books_to_refresh = set()
else:
aid = self.conn.execute('''INSERT INTO publishers(name)
VALUES (?)''', (publisher,)).lastrowid
self.conn.execute('''INSERT INTO books_publishers_link(book, publisher)
VALUES (?,?)''', (id, aid))
if case_change:
bks = self.conn.get('''SELECT book FROM books_publishers_link
WHERE publisher=?''', (aid,))
books_to_refresh |= {bk[0] for bk in bks}
self.conn.execute('''DELETE FROM publishers WHERE (SELECT COUNT(id)
FROM books_publishers_link
WHERE publisher=publishers.id) < 1''')
self.dirtied({id}|books_to_refresh, commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['publisher'], publisher, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_uuid(self, id, uuid, notify=True, commit=True):
if uuid:
self.conn.execute('UPDATE books SET uuid=? WHERE id=?', (uuid, id))
self.data.set(id, self.FIELD_MAP['uuid'], uuid, row_is_id=True)
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id])
def get_id_from_uuid(self, uuid):
if uuid:
return (self.data._uuid_map.get(uuid, None) or
self.conn.get('SELECT id FROM books WHERE uuid=?', (uuid,),
all=False))
# Convenience methods for tags_list_editor
# Note: we generally do not need to refresh_ids because library_view will
# refresh everything.
def get_ratings_with_ids(self):
result = self.conn.get('SELECT id,rating FROM ratings')
if not result:
return []
return result
def dirty_books_referencing(self, field, id, commit=True):
# Get the list of books to dirty -- all books that reference the item
table = self.field_metadata[field]['table']
link = self.field_metadata[field]['link_column']
bks = self.conn.get(
f'SELECT book from books_{table}_link WHERE {link}=?',
(id,))
books = []
for (book_id,) in bks:
books.append(book_id)
self.dirtied(books, commit=commit)
def get_tags_with_ids(self):
result = self.conn.get('SELECT id,name FROM tags')
if not result:
return []
return result
def get_languages_with_ids(self):
result = self.conn.get('SELECT id,lang_code FROM languages')
if not result:
return []
return result
def rename_tag(self, old_id, new_name):
# It is possible that new_name is in fact a set of names. Split it on
# comma to find out. If it is, then rename the first one and append the
# rest
new_names = [t.strip() for t in new_name.strip().split(',') if t.strip()]
new_name = new_names[0]
new_names = new_names[1:]
# get the list of books that reference the tag being changed
books = self.conn.get('''SELECT book from books_tags_link
WHERE tag=?''', (old_id,))
books = [b[0] for b in books]
new_id = self.conn.get(
'''SELECT id from tags
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
# easy cases. Simply rename the tag. Do it even if equal, in case
# there is a change of case
self.conn.execute('''UPDATE tags SET name=?
WHERE id=?''', (new_name, old_id))
new_id = old_id
else:
# It is possible that by renaming a tag, the tag will appear
# twice on a book. This will throw an integrity error, aborting
# all the changes. To get around this, we first delete any links
# to the new_id from books referencing the old_id, so that
# renaming old_id to new_id will be unique on the book
for book_id in books:
self.conn.execute('''DELETE FROM books_tags_link
WHERE book=? and tag=?''', (book_id, new_id))
# Change the link table to point at the new tag
self.conn.execute('''UPDATE books_tags_link SET tag=?
WHERE tag=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM tags WHERE id=?', (old_id,))
if new_names:
# have some left-over names to process. Add them to the book.
for book_id in books:
self.set_tags(book_id, new_names, append=True, notify=False,
commit=False)
self.dirtied(books, commit=False)
self.clean_standard_field('tags', commit=False)
self.conn.commit()
def delete_tag_using_id(self, id):
self.dirty_books_referencing('tags', id, commit=False)
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
def get_series_with_ids(self):
result = self.conn.get('SELECT id,name FROM series')
if not result:
return []
return result
def rename_series(self, old_id, new_name, change_index=True):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from series
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
self.conn.execute('UPDATE series SET name=? WHERE id=?',
(new_name, old_id))
else:
# New series exists. Must update the link, then assign a
# new series index to each of the books.
if change_index:
# Get the list of books where we must update the series index
books = self.conn.get('''SELECT books.id
FROM books, books_series_link as lt
WHERE books.id = lt.book AND lt.series=?
ORDER BY books.series_index''', (old_id,))
# Now update the link table
self.conn.execute('''UPDATE books_series_link
SET series=?
WHERE series=?''',(new_id, old_id,))
if change_index and tweaks['series_index_auto_increment'] != 'no_change':
# Now set the indices
for (book_id,) in books:
# Get the next series index
index = self.get_next_series_num_for(new_name)
self.conn.execute('''UPDATE books
SET series_index=?
WHERE id=?''',(index, book_id,))
self.dirty_books_referencing('series', new_id, commit=False)
self.clean_standard_field('series', commit=False)
self.conn.commit()
def delete_series_using_id(self, id):
self.dirty_books_referencing('series', id, commit=False)
books = self.conn.get('SELECT book from books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM books_series_link WHERE series=?', (id,))
self.conn.execute('DELETE FROM series WHERE id=?', (id,))
for (book_id,) in books:
self.conn.execute('UPDATE books SET series_index=1.0 WHERE id=?', (book_id,))
self.conn.commit()
def get_publishers_with_ids(self):
result = self.conn.get('SELECT id,name FROM publishers')
if not result:
return []
return result
def rename_publisher(self, old_id, new_name):
new_name = new_name.strip()
new_id = self.conn.get(
'''SELECT id from publishers
WHERE name=?''', (new_name,), all=False)
if new_id is None or old_id == new_id:
new_id = old_id
# New name doesn't exist. Simply change the old name
self.conn.execute('UPDATE publishers SET name=? WHERE id=?',
(new_name, old_id))
else:
# Change the link table to point at the new one
self.conn.execute('''UPDATE books_publishers_link
SET publisher=?
WHERE publisher=?''',(new_id, old_id,))
# Get rid of the no-longer used publisher
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.dirty_books_referencing('publisher', new_id, commit=False)
self.clean_standard_field('publisher', commit=False)
self.conn.commit()
def delete_publisher_using_id(self, old_id):
self.dirty_books_referencing('publisher', old_id, commit=False)
self.conn.execute('''DELETE FROM books_publishers_link
WHERE publisher=?''', (old_id,))
self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,))
self.conn.commit()
def get_authors_with_ids(self):
result = self.conn.get('SELECT id,name,sort,link FROM authors')
if not result:
return []
return result
def get_author_id(self, author):
author = author.replace(',', '|')
result = self.conn.get('SELECT id FROM authors WHERE name=?',
(author,), all=False)
return result
def set_link_field_for_author(self, aid, link, commit=True, notify=False):
if not link:
link = ''
self.conn.execute('UPDATE authors SET link=? WHERE id=?', (link.strip(), aid))
if commit:
self.conn.commit()
def set_sort_field_for_author(self, old_id, new_sort, commit=True, notify=False):
self.conn.execute('UPDATE authors SET sort=? WHERE id=?',
(new_sort.strip(), old_id))
if commit:
self.conn.commit()
# Now change all the author_sort fields in books by this author
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
for (book_id,) in bks:
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss, notify=notify, commit=commit)
def rename_author(self, old_id, new_name):
# Make sure that any commas in new_name are changed to '|'!
new_name = new_name.replace(',', '|').strip()
if not new_name:
new_name = _('Unknown')
# Get the list of books we must fix up, one way or the other
# Save the list so we can use it twice
bks = self.conn.get('SELECT book from books_authors_link WHERE author=?', (old_id,))
books = []
for (book_id,) in bks:
books.append(book_id)
# check if the new author already exists
new_id = self.conn.get('SELECT id from authors WHERE name=?',
(new_name,), all=False)
if new_id is None or old_id == new_id:
# No name clash. Go ahead and update the author's name
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
else:
# First check for the degenerate case -- changing a value to itself.
# Update it in case there is a change of case, but do nothing else
if old_id == new_id:
self.conn.execute('UPDATE authors SET name=? WHERE id=?',
(new_name, old_id))
self.conn.commit()
return new_id
# Author exists. To fix this, we must replace all the authors
# instead of replacing the one. Reason: db integrity checks can stop
# the rename process, which would leave everything half-done. We
# can't do it the same way as tags (delete and add) because author
# order is important.
for book_id in books:
# Get the existing list of authors
authors = self.conn.get('''
SELECT author from books_authors_link
WHERE book=?
ORDER BY id''',(book_id,))
# unpack the double-list structure, replacing the old author
# with the new one while we are at it
for i,aut in enumerate(authors):
authors[i] = aut[0] if aut[0] != old_id else new_id
# Delete the existing authors list
self.conn.execute('''DELETE FROM books_authors_link
WHERE book=?''',(book_id,))
# Change the authors to the new list
for aid in authors:
try:
self.conn.execute('''
INSERT INTO books_authors_link(book, author)
VALUES (?,?)''', (book_id, aid))
except IntegrityError:
# Sometimes books specify the same author twice in their
# metadata. Ignore it.
pass
# Now delete the old author from the DB
self.conn.execute('DELETE FROM authors WHERE id=?', (old_id,))
self.dirtied(books, commit=False)
self.conn.commit()
# the authors are now changed, either by changing the author's name
# or replacing the author in the list. Now must fix up the books.
for book_id in books:
# First, must refresh the cache to see the new authors
self.data.refresh_ids(self, [book_id])
# now fix the filesystem paths
self.set_path(book_id, index_is_id=True)
# Next fix the author sort. Reset it to the default
ss = self.author_sort_from_book(book_id, index_is_id=True)
self.set_author_sort(book_id, ss)
# the caller will do a general refresh, so we don't need to
# do one here
return new_id
# end convenience methods
def get_tags(self, id):
result = self.conn.get(
'SELECT name FROM tags WHERE id IN (SELECT tag FROM books_tags_link WHERE book=?)',
(id,), all=True)
if not result:
return set()
return {r[0] for r in result}
@classmethod
def cleanup_tags(cls, tags):
tags = [x.strip().replace(',', ';') for x in tags if x.strip()]
tags = [x.decode(preferred_encoding, 'replace')
if isbytestring(x) else x for x in tags]
tags = [' '.join(x.split()) for x in tags]
ans, seen = [], set()
for tag in tags:
if tag.lower() not in seen:
seen.add(tag.lower())
ans.append(tag)
return ans
def remove_all_tags(self, ids, notify=False, commit=True):
self.conn.executemany(
'DELETE FROM books_tags_link WHERE book=?', [(x,) for x in ids])
self.dirtied(ids, commit=False)
if commit:
self.conn.commit()
for x in ids:
self.data.set(x, self.FIELD_MAP['tags'], '', row_is_id=True)
if notify:
self.notify('metadata', ids)
def bulk_modify_tags(self, ids, add=[], remove=[], notify=False):
add = self.cleanup_tags(add)
remove = self.cleanup_tags(remove)
remove = set(remove) - set(add)
if not ids or (not add and not remove):
return
# Add tags that do not already exist into the tag table
all_tags = self.all_tags()
lt = [t.lower() for t in all_tags]
new_tags = [t for t in add if t.lower() not in lt]
if new_tags:
self.conn.executemany('INSERT INTO tags(name) VALUES (?)', [(x,) for x in
new_tags])
# Create the temporary tables to store the ids for books and tags
# to be operated on
tables = ('temp_bulk_tag_edit_books', 'temp_bulk_tag_edit_add',
'temp_bulk_tag_edit_remove')
drops = '\n'.join(['DROP TABLE IF EXISTS %s;'%t for t in tables])
creates = '\n'.join(['CREATE TEMP TABLE %s(id INTEGER PRIMARY KEY);'%t
for t in tables])
self.conn.executescript(drops + creates)
# Populate the books temp table
self.conn.executemany(
'INSERT INTO temp_bulk_tag_edit_books VALUES (?)', [(x,) for x in ids])
# Populate the add/remove tags temp tables
for table, tags in enumerate([add, remove]):
if not tags:
continue
table = tables[table+1]
insert = ('INSERT INTO %s(id) SELECT tags.id FROM tags WHERE name=?'
' COLLATE PYNOCASE LIMIT 1')
self.conn.executemany(insert%table, [(x,) for x in tags])
if remove:
self.conn.execute(
'''DELETE FROM books_tags_link WHERE
book IN (SELECT id FROM %s) AND
tag IN (SELECT id FROM %s)'''
% (tables[0], tables[2]))
if add:
self.conn.execute(
'''
INSERT OR REPLACE INTO books_tags_link(book, tag) SELECT {0}.id, {1}.id FROM
{0}, {1}
'''.format(tables[0], tables[1])
)
self.conn.executescript(drops)
self.dirtied(ids, commit=False)
self.conn.commit()
for x in ids:
tags = ','.join(self.get_tags(x))
self.data.set(x, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify:
self.notify('metadata', ids)
def commit(self):
self.conn.commit()
def set_tags(self, id, tags, append=False, notify=True, commit=True,
allow_case_change=False):
'''
@param tags: list of strings
@param append: If True existing tags are not removed
'''
if not tags:
tags = []
if not append:
self.conn.execute('DELETE FROM books_tags_link WHERE book=?', (id,))
otags = self.get_tags(id)
tags = self.cleanup_tags(tags)
books_to_refresh = {id}
for tag in (set(tags)-otags):
case_changed = False
tag = tag.strip()
if not tag:
continue
if not isinstance(tag, str):
tag = tag.decode(preferred_encoding, 'replace')
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
etag = existing_tags[idx]
tid = self.conn.get('SELECT id FROM tags WHERE name=?', (etag,), all=False)
if allow_case_change and etag != tag:
self.conn.execute('UPDATE tags SET name=? WHERE id=?', (tag, tid))
case_changed = True
else:
tid = self.conn.execute('INSERT INTO tags(name) VALUES(?)', (tag,)).lastrowid
if not self.conn.get('''SELECT book FROM books_tags_link
WHERE book=? AND tag=?''', (id, tid), all=False):
self.conn.execute('''INSERT INTO books_tags_link(book, tag)
VALUES (?,?)''', (id, tid))
if case_changed:
bks = self.conn.get('SELECT book FROM books_tags_link WHERE tag=?',
(tid,))
books_to_refresh |= {bk[0] for bk in bks}
self.conn.execute('''DELETE FROM tags WHERE (SELECT COUNT(id)
FROM books_tags_link WHERE tag=tags.id) < 1''')
self.dirtied({id}|books_to_refresh, commit=False)
if commit:
self.conn.commit()
tags = ','.join(self.get_tags(id))
self.data.set(id, self.FIELD_MAP['tags'], tags, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def unapply_tags(self, book_id, tags, notify=True):
for tag in tags:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (tag,), all=False)
if id:
self.conn.execute('''DELETE FROM books_tags_link
WHERE tag=? AND book=?''', (id, book_id))
self.conn.commit()
self.data.refresh_ids(self, [book_id])
if notify:
self.notify('metadata', [id])
def is_tag_used(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
lt.index(tag.lower())
return True
except ValueError:
return False
def delete_tag(self, tag):
existing_tags = self.all_tags()
lt = [t.lower() for t in existing_tags]
try:
idx = lt.index(tag.lower())
except ValueError:
idx = -1
if idx > -1:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (existing_tags[idx],), all=False)
if id:
self.conn.execute('DELETE FROM books_tags_link WHERE tag=?', (id,))
self.conn.execute('DELETE FROM tags WHERE id=?', (id,))
self.conn.commit()
series_index_pat = re.compile(r'(.*)\s+\[([.0-9]+)\]$')
def _get_series_values(self, val):
return _get_series_values(val)
def set_series(self, id, series, notify=True, commit=True, allow_case_change=True):
self.conn.execute('DELETE FROM books_series_link WHERE book=?',(id,))
(series, idx) = self._get_series_values(series)
books_to_refresh = {id}
if series:
case_change = False
if not isinstance(series, str):
series = series.decode(preferred_encoding, 'replace')
series = series.strip()
series = ' '.join(series.split())
sx = self.conn.get('SELECT id,name from series WHERE name=?', (series,))
if sx:
aid, cur_name = sx[0]
if cur_name != series:
if allow_case_change:
self.conn.execute('UPDATE series SET name=? WHERE id=?', (series, aid))
case_change = True
else:
series = cur_name
books_to_refresh = set()
else:
aid = self.conn.execute('INSERT INTO series(name) VALUES (?)', (series,)).lastrowid
self.conn.execute('INSERT INTO books_series_link(book, series) VALUES (?,?)', (id, aid))
if idx:
self.set_series_index(id, idx, notify=notify, commit=commit)
if case_change:
bks = self.conn.get('SELECT book FROM books_series_link WHERE series=?',
(aid,))
books_to_refresh |= {bk[0] for bk in bks}
self.conn.execute('''DELETE FROM series
WHERE (SELECT COUNT(id) FROM books_series_link
WHERE series=series.id) < 1''')
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series'], series, row_is_id=True)
if notify:
self.notify('metadata', [id])
return books_to_refresh
def set_series_index(self, id, idx, notify=True, commit=True):
if idx is None:
idx = 1.0
try:
idx = float(idx)
except:
idx = 1.0
self.conn.execute('UPDATE books SET series_index=? WHERE id=?', (idx, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['series_index'], idx, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_rating(self, id, rating, notify=True, commit=True):
if not rating:
rating = 0
rating = int(rating)
self.conn.execute('DELETE FROM books_ratings_link WHERE book=?',(id,))
rat = self.conn.get('SELECT id FROM ratings WHERE rating=?', (rating,), all=False)
rat = rat if rat is not None else self.conn.execute('INSERT INTO ratings(rating) VALUES (?)', (rating,)).lastrowid
self.conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rat))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['rating'], rating, row_is_id=True)
if notify:
self.notify('metadata', [id])
def set_comment(self, id, text, notify=True, commit=True):
self.conn.execute('DELETE FROM comments WHERE book=?', (id,))
if text:
self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text))
else:
text = ''
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['comments'], text, row_is_id=True)
self.dirtied([id], commit=False)
if notify:
self.notify('metadata', [id])
def set_author_sort(self, id, sort, notify=True, commit=True):
if not sort:
sort = ''
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id))
self.dirtied([id], commit=False)
if commit:
self.conn.commit()
self.data.set(id, self.FIELD_MAP['author_sort'], sort, row_is_id=True)
if notify:
self.notify('metadata', [id])
def isbn(self, idx, index_is_id=False):
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
if x.startswith('isbn:'):
return x[5:].strip()
def get_identifiers(self, idx, index_is_id=False):
ans = {}
row = self.data._data[idx] if index_is_id else self.data[idx]
if row is not None:
raw = row[self.FIELD_MAP['identifiers']]
if raw:
for x in raw.split(','):
key, _, val = x.partition(':')
key, val = key.strip(), val.strip()
if key and val:
ans[key] = val
return ans
def get_all_identifier_types(self):
idents = self.conn.get('SELECT DISTINCT type FROM identifiers')
return [ident[0] for ident in idents]
def _clean_identifier(self, typ, val):
typ = icu_lower(typ).strip().replace(':', '').replace(',', '')
val = val.strip().replace(',', '|').replace(':', '|')
return typ, val
def set_identifier(self, id_, typ, val, notify=True, commit=True):
'If val is empty, deletes identifier of type typ'
typ, val = self._clean_identifier(typ, val)
identifiers = self.get_identifiers(id_, index_is_id=True)
if not typ:
return
changed = False
if not val and typ in identifiers:
identifiers.pop(typ)
changed = True
self.conn.execute(
'DELETE from identifiers WHERE book=? AND type=?', (id_, typ))
if val and identifiers.get(typ, None) != val:
changed = True
identifiers[typ] = val
self.conn.execute(
'INSERT OR REPLACE INTO identifiers (book, type, val) VALUES (?, ?, ?)', (id_, typ, val))
if changed:
raw = ','.join(['%s:%s'%(k, v) for k, v in
iteritems(identifiers)])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_identifiers(self, id_, identifiers, notify=True, commit=True):
cleaned = {}
if not identifiers:
identifiers = {}
for typ, val in iteritems(identifiers):
typ, val = self._clean_identifier(typ, val)
if val:
cleaned[typ] = val
self.conn.execute('DELETE FROM identifiers WHERE book=?', (id_,))
self.conn.executemany(
'INSERT INTO identifiers (book, type, val) VALUES (?, ?, ?)',
[(id_, k, v) for k, v in iteritems(cleaned)])
raw = ','.join(['%s:%s'%(k, v) for k, v in
iteritems(cleaned)])
self.data.set(id_, self.FIELD_MAP['identifiers'], raw,
row_is_id=True)
if commit:
self.conn.commit()
if notify:
self.notify('metadata', [id_])
def set_isbn(self, id_, isbn, notify=True, commit=True):
self.set_identifier(id_, 'isbn', isbn, notify=notify, commit=commit)
def add_catalog(self, path, title):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
with open(path, 'rb') as stream:
matches = self.data.get_matches('title', '='+title)
if matches:
tag_matches = self.data.get_matches('tags', '='+_('Catalog'))
matches = matches.intersection(tag_matches)
db_id = None
if matches:
db_id = list(matches)[0]
if db_id is None:
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(title, 'calibre'))
db_id = obj.lastrowid
self.data.books_added([db_id], self)
self.set_path(db_id, index_is_id=True)
self.conn.commit()
try:
mi = get_metadata(stream, format)
except:
mi = Metadata(title, ['calibre'])
stream.seek(0)
mi.title, mi.authors = title, ['calibre']
mi.tags = [_('Catalog')]
mi.pubdate = mi.timestamp = utcnow()
if format == 'mobi':
mi.cover, mi.cover_data = None, (None, None)
self.set_metadata(db_id, mi)
self.add_format(db_id, format, stream, index_is_id=True)
self.conn.commit()
self.data.refresh_ids(self, [db_id]) # Needed to update format list and size
return db_id
def add_news(self, path, arg):
from calibre.ebooks.metadata.meta import get_metadata
format = os.path.splitext(path)[1][1:].lower()
stream = path if hasattr(path, 'read') else open(path, 'rb')
stream.seek(0)
mi = get_metadata(stream, format, use_libprs_metadata=False,
force_read_metadata=True)
# Force the author to calibre as the auto delete of old news checks for
# both the author==calibre and the tag News
mi.authors = ['calibre']
stream.seek(0)
if mi.series_index is None:
mi.series_index = self.get_next_series_num_for(mi.series)
mi.tags = [_('News')]
if arg['add_title_tag']:
mi.tags += [arg['title']]
if arg['custom_tags']:
mi.tags += arg['custom_tags']
obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)',
(mi.title, mi.authors[0]))
id = obj.lastrowid
self.data.books_added([id], self)
self.set_path(id, index_is_id=True)
self.conn.commit()
if mi.pubdate is None:
mi.pubdate = utcnow()
if mi.timestamp is None:
mi.timestamp = utcnow()
self.set_metadata(id, mi)
self.add_format(id, format, stream, index_is_id=True)
if not hasattr(path, 'read'):
stream.close()
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
return id
def run_import_plugins(self, path_or_stream, format):
format = format.lower()
if hasattr(path_or_stream, 'seek'):
path_or_stream.seek(0)
pt = PersistentTemporaryFile('_import_plugin.'+format)
shutil.copyfileobj(path_or_stream, pt, 1024**2)
pt.close()
path = pt.name
else:
path = path_or_stream
return run_plugins_on_import(path, format)
def _add_newbook_tag(self, mi):
tags = prefs['new_book_tags']
if tags:
for tag in [t.strip() for t in tags]:
if tag:
if mi.tags is None:
mi.tags = [tag]
else:
mi.tags.append(tag)
def create_book_entry(self, mi, cover=None, add_duplicates=True,
force_id=None):
if mi.tags:
mi.tags = list(mi.tags)
self._add_newbook_tag(mi)
if not add_duplicates and self.has_book(mi):
return None
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isbytestring(aus):
aus = aus.decode(preferred_encoding, 'replace')
if isbytestring(title):
title = title.decode(preferred_encoding, 'replace')
if force_id is None:
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
else:
id = force_id
obj = self.conn.execute(
'INSERT INTO books(id, title, series_index, '
'author_sort) VALUES (?, ?, ?, ?)', (id, title, series_index, aus))
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if cover is not None:
try:
self.set_cover(id, cover)
except:
traceback.print_exc()
return id
def add_books(self, paths, formats, metadata, add_duplicates=True,
return_ids=False):
'''
Add a book to the database. The result cache is not updated.
:param:`paths` List of paths to book files or file-like objects
'''
formats, metadata = iter(formats), iter(metadata)
duplicates = []
ids = []
postimport = []
for path in paths:
mi = next(metadata)
self._add_newbook_tag(mi)
format = next(formats)
if not add_duplicates and self.has_book(mi):
duplicates.append((path, format, mi))
continue
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
title = mi.title
if isinstance(aus, bytes):
aus = aus.decode(preferred_encoding, 'replace')
if isinstance(title, bytes):
title = title.decode(preferred_encoding)
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
ids.append(id)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, commit=True, ignore_errors=True)
npath = self.run_import_plugins(path, format)
format = os.path.splitext(npath)[-1].lower().replace('.', '').upper()
with open(npath, 'rb') as stream:
format = check_ebook_format(stream, format)
self.add_format(id, format, stream, index_is_id=True)
postimport.append((id, format))
self.conn.commit()
self.data.refresh_ids(self, ids) # Needed to update format list and size
for book_id, fmt in postimport:
run_plugins_on_postimport(self, book_id, fmt)
if duplicates:
paths = [duplicate[0] for duplicate in duplicates]
formats = [duplicate[1] for duplicate in duplicates]
metadata = [duplicate[2] for duplicate in duplicates]
return (paths, formats, metadata), (ids if return_ids else
len(ids))
return None, (ids if return_ids else len(ids))
def import_book(self, mi, formats, notify=True, import_hooks=True,
apply_import_tags=True, preserve_uuid=False):
series_index = self.get_next_series_num_for(mi.series) \
if mi.series_index is None else mi.series_index
if apply_import_tags:
self._add_newbook_tag(mi)
if not mi.title:
mi.title = _('Unknown')
if not mi.authors:
mi.authors = [_('Unknown')]
aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors)
if isinstance(aus, bytes):
aus = aus.decode(preferred_encoding, 'replace')
title = mi.title if isinstance(mi.title, str) else \
mi.title.decode(preferred_encoding, 'replace')
obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)',
(title, series_index, aus))
id = obj.lastrowid
self.data.books_added([id], self)
if mi.timestamp is None:
mi.timestamp = utcnow()
if mi.pubdate is None:
mi.pubdate = UNDEFINED_DATE
self.set_metadata(id, mi, ignore_errors=True, commit=True)
if preserve_uuid and mi.uuid:
self.set_uuid(id, mi.uuid, commit=False)
for path in formats:
ext = os.path.splitext(path)[1][1:].lower()
if ext == 'opf':
continue
if import_hooks:
self.add_format_with_hooks(id, ext, path, index_is_id=True)
else:
with open(path, 'rb') as f:
self.add_format(id, ext, f, index_is_id=True)
# Mark the book dirty, It probably already has been done by
# set_metadata, but probably isn't good enough
self.dirtied([id], commit=False)
self.conn.commit()
self.data.refresh_ids(self, [id]) # Needed to update format list and size
if notify:
self.notify('add', [id])
return id
def get_top_level_move_items(self):
items = set(os.listdir(self.library_path))
paths = set()
for x in self.data.universal_set():
path = self.path(x, index_is_id=True)
path = path.split(os.sep)[0]
paths.add(path)
paths.update({'metadata.db', 'metadata_db_prefs_backup.json'})
path_map = {}
for x in paths:
path_map[x] = x
if not self.is_case_sensitive:
for x in items:
path_map[x.lower()] = x
items = set(path_map)
paths = {x.lower() for x in paths}
items = items.intersection(paths)
return items, path_map
def move_library_to(self, newloc, progress=None):
if progress is None:
def progress(x):
return x
if not os.path.exists(newloc):
os.makedirs(newloc)
old_dirs = set()
items, path_map = self.get_top_level_move_items()
for x in items:
src = os.path.join(self.library_path, x)
dest = os.path.join(newloc, path_map[x])
if os.path.isdir(src):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
old_dirs.add(src)
else:
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
x = path_map[x]
if not isinstance(x, str):
x = x.decode(filesystem_encoding, 'replace')
progress(x)
dbpath = os.path.join(newloc, os.path.basename(self.dbpath))
opath = self.dbpath
self.conn.close()
self.library_path, self.dbpath = newloc, dbpath
self.connect()
try:
os.unlink(opath)
except:
pass
for dir in old_dirs:
try:
shutil.rmtree(dir)
except:
pass
def __iter__(self):
for record in self.data._data:
if record is not None:
yield record
def all_ids(self):
x = self.FIELD_MAP['id']
for i in iter(self):
yield i[x]
def find_books_in_directory(self, dirpath, single_book_per_directory):
return find_books_in_directory(dirpath, single_book_per_directory)
def import_book_directory_multiple(self, dirpath, callback=None,
added_ids=None):
return import_book_directory_multiple(self, dirpath, callback=callback, added_ids=added_ids)
def import_book_directory(self, dirpath, callback=None, added_ids=None):
return import_book_directory(self, dirpath, callback=callback, added_ids=added_ids)
def recursive_import(self, root, single_book_per_directory=True,
callback=None, added_ids=None):
return recursive_import(self, root, single_book_per_directory=single_book_per_directory, callback=callback, added_ids=added_ids)
def add_custom_book_data(self, book_id, name, val):
x = self.conn.get('SELECT id FROM books WHERE ID=?', (book_id,), all=False)
if x is None:
raise ValueError('add_custom_book_data: no such book_id %d'%book_id)
# Do the json encode first, in case it throws an exception
s = json.dumps(val, default=to_json)
self.conn.execute('''INSERT OR REPLACE INTO books_plugin_data(book, name, val)
VALUES(?, ?, ?)''', (book_id, name, s))
self.commit()
def add_multiple_custom_book_data(self, name, vals, delete_first=False):
if delete_first:
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.conn.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in iteritems(vals)])
self.commit()
def get_custom_book_data(self, book_id, name, default=None):
try:
s = self.conn.get('''select val FROM books_plugin_data
WHERE book=? AND name=?''', (book_id, name), all=False)
if s is None:
return default
return json.loads(s, object_hook=from_json)
except:
pass
return default
def get_all_custom_book_data(self, name, default=None):
try:
s = self.conn.get('''select book, val FROM books_plugin_data
WHERE name=?''', (name,))
if s is None:
return default
res = {}
for r in s:
res[r[0]] = json.loads(r[1], object_hook=from_json)
return res
except:
pass
return default
def delete_custom_book_data(self, book_id, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE book=? AND name=?',
(book_id, name))
self.commit()
def delete_all_custom_book_data(self, name):
self.conn.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.commit()
def get_ids_for_custom_book_data(self, name):
s = self.conn.get('''SELECT book FROM books_plugin_data WHERE name=?''', (name,))
return [x[0] for x in s]
def get_usage_count_by_id(self, field):
fm = self.field_metadata[field]
if not fm.get('link_column', None):
raise ValueError('%s is not an is_multiple field')
return self.conn.get(
'SELECT {0}, count(*) FROM books_{1}_link GROUP BY {0}'.format(
fm['link_column'], fm['table']))
def all_author_names(self):
ai = self.FIELD_MAP['authors']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x.replace('|', ','))
return ans
def all_tag_names(self):
ai = self.FIELD_MAP['tags']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
for x in auts.split(','):
ans.add(x)
return ans
def all_publisher_names(self):
ai = self.FIELD_MAP['publisher']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans
def all_series_names(self):
ai = self.FIELD_MAP['series']
ans = set()
for rec in self.data.iterall():
auts = rec[ai]
if auts:
ans.add(auts)
return ans
| 161,299 | Python | .py | 3,440 | 33.390698 | 136 | 0.539522 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,705 | field_metadata.py | kovidgoyal_calibre/src/calibre/library/field_metadata.py | '''
Created on 25 May 2010
@author: charles
'''
import traceback
from collections import OrderedDict
from calibre.utils.config_base import tweaks
from calibre.utils.icu import lower as icu_lower
from calibre.utils.localization import _, ngettext
from polyglot.builtins import iteritems, itervalues
category_icon_map = {
'authors' : 'user_profile.png',
'series' : 'series.png',
'formats' : 'book.png',
'publisher' : 'publisher.png',
'rating' : 'rating.png',
'news' : 'news.png',
'tags' : 'tags.png',
'custom:' : 'column.png',
'user:' : 'tb_folder.png',
'search' : 'search.png',
'identifiers': 'identifiers.png',
'gst' : 'catalog.png',
'languages' : 'languages.png',
}
# Builtin metadata {{{
def _builtin_field_metadata():
# This is a function so that changing the UI language allows newly created
# field metadata objects to have correctly translated labels for builtin
# fields.
return [
('authors', {'table':'authors',
'column':'name',
'link_column':'author',
'category_sort':'sort',
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': '&',
'list_to_ui': ' & '},
'kind':'field',
'name':_('Authors'),
'search_terms':['authors', 'author'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('languages', {'table':'languages',
'column':'lang_code',
'link_column':'lang_code',
'category_sort':'lang_code',
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Languages'),
'search_terms':['languages', 'language'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('series', {'table':'series',
'column':'name',
'link_column':'series',
'category_sort':'(title_sort(name))',
'datatype':'series',
'is_multiple':{},
'kind':'field',
'name':ngettext('Series', 'Series', 1),
'search_terms':['series'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('formats', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Formats'),
'search_terms':['formats', 'format'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('publisher', {'table':'publishers',
'column':'name',
'link_column':'publisher',
'category_sort':'name',
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Publisher'),
'search_terms':['publisher'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('rating', {'table':'ratings',
'column':'rating',
'link_column':'rating',
'category_sort':'rating',
'datatype':'rating',
'is_multiple':{},
'kind':'field',
'name':_('Rating'),
'search_terms':['rating'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('news', {'table':'news',
'column':'name',
'category_sort':'name',
'datatype':None,
'is_multiple':{},
'kind':'category',
'name':_('News'),
'search_terms':[],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('tags', {'table':'tags',
'column':'name',
'link_column': 'tag',
'category_sort':'name',
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Tags'),
'search_terms':['tags', 'tag'],
'is_custom':False,
'is_category':True,
'is_csp': False}),
('identifiers', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '},
'kind':'field',
'name':_('Identifiers'),
'search_terms':['identifiers', 'identifier', 'isbn'],
'is_custom':False,
'is_category':True,
'is_csp': True}),
('author_sort',{'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Author sort'),
'search_terms':['author_sort'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('au_map', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{'cache_to_list': ',',
'ui_to_list': None,
'list_to_ui': None},
'kind':'field',
'name':None,
'search_terms':[],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('comments', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Comments'),
'search_terms':['comments', 'comment'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('cover', {'table':None,
'column':None,
'datatype':'int',
'is_multiple':{},
'kind':'field',
'name':_('Cover'),
'search_terms':['cover'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('id', {'table':None,
'column':None,
'datatype':'int',
'is_multiple':{},
'kind':'field',
'name': _('Id'),
'search_terms':['id'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('last_modified', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':{},
'kind':'field',
'name':_('Modified'),
'search_terms':['last_modified'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('ondevice', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('On device'),
'search_terms':['ondevice'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('path', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Path'),
'search_terms':[],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('pubdate', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':{},
'kind':'field',
'name':_('Published'),
'search_terms':['pubdate'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('marked', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name': None,
'search_terms':['marked'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('in_tag_browser', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name': None,
'search_terms':['in_tag_browser'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('series_index',{'table':None,
'column':None,
'datatype':'float',
'is_multiple':{},
'kind':'field',
'name':None,
'search_terms':['series_index'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('series_sort', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Series sort'),
'search_terms':['series_sort'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('sort', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Title sort'),
'search_terms':['title_sort'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('size', {'table':None,
'column':None,
'datatype':'float',
'is_multiple':{},
'kind':'field',
'name':_('Size'),
'search_terms':['size'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('timestamp', {'table':None,
'column':None,
'datatype':'datetime',
'is_multiple':{},
'kind':'field',
'name':_('Date'),
'search_terms':['date'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('title', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':_('Title'),
'search_terms':['title'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
('uuid', {'table':None,
'column':None,
'datatype':'text',
'is_multiple':{},
'kind':'field',
'name':None,
'search_terms':['uuid'],
'is_custom':False,
'is_category':False,
'is_csp': False}),
]
# }}}
class FieldMetadata:
'''
key: the key to the dictionary is:
- for standard fields, the metadata field name.
- for custom fields, the metadata field name prefixed by '#'
This is done to create two 'namespaces' so the names don't clash
label: the actual column label. No prefixing.
datatype: the type of information in the field. Valid values are listed in
VALID_DATA_TYPES below.
is_multiple: valid for the text datatype. If {}, the field is to be
treated as a single term. If not None, it contains a dict of the form
{'cache_to_list': ',',
'ui_to_list': ',',
'list_to_ui': ', '}
where the cache_to_list contains the character used to split the value in
the meta2 table, ui_to_list contains the character used to create a list
from a value shown in the ui (each resulting value must be strip()ed and
empty values removed), and list_to_ui contains the string used in join()
to create a displayable string from the list.
kind == field: is a db field.
kind == category: standard tag category that isn't a field. see news.
kind == user: user-defined tag category.
kind == search: saved-searches category.
is_category: is a tag browser category. If true, then:
table: name of the db table used to construct item list
column: name of the column in the normalized table to join on
link_column: name of the column in the connection table to join on. This
key should not be present if there is no link table
category_sort: the field in the normalized table to sort on. This
key must be present if is_category is True
If these are None, then the category constructor must know how
to build the item list (e.g., formats, news).
The order below is the order that the categories will
appear in the tags pane.
name: the text that is to be used when displaying the field. Column headings
in the GUI, etc.
search_terms: the terms that can be used to identify the field when
searching. They can be thought of as aliases for metadata keys, but are only
valid when passed to search().
is_custom: the field has been added by the user.
rec_index: the index of the field in the db metadata record.
is_csp: field contains colon-separated pairs. Must also be text, is_multiple
'''
VALID_DATA_TYPES = frozenset([None, 'rating', 'text', 'comments', 'datetime',
'int', 'float', 'bool', 'series', 'composite', 'enumeration'])
# search labels that are not db columns
search_items = ['all', 'search', 'vl', 'template']
custom_field_prefix = '#'
__calibre_serializable__ = True
def __init__(self):
self._field_metadata = _builtin_field_metadata()
self._tb_cats = OrderedDict()
self._tb_custom_fields = {}
self._search_term_map = {}
self.custom_label_to_key_map = {}
for k,v in self._field_metadata:
if v['kind'] == 'field' and v['datatype'] not in self.VALID_DATA_TYPES:
raise ValueError('Unknown datatype %s for field %s'%(v['datatype'], k))
self._tb_cats[k] = v
self._tb_cats[k]['label'] = k
self._tb_cats[k]['display'] = {}
self._tb_cats[k]['is_editable'] = True
self._add_search_terms_to_map(k, v['search_terms'])
self._tb_cats['timestamp']['display'] = {
'date_format': tweaks['gui_timestamp_display_format']}
self._tb_cats['pubdate']['display'] = {
'date_format': tweaks['gui_pubdate_display_format']}
self._tb_cats['last_modified']['display'] = {
'date_format': tweaks['gui_last_modified_display_format']}
self.get = self._tb_cats.get
def __getitem__(self, key):
if key == 'title_sort':
return self._tb_cats['sort']
return self._tb_cats[key]
def __setitem__(self, key, val):
raise AttributeError('Assigning to this object is forbidden')
def __delitem__(self, key):
del self._tb_cats[key]
def __iter__(self):
yield from self._tb_cats
def __contains__(self, key):
return key in self._tb_cats or key == 'title_sort'
def has_key(self, key):
return key in self
def keys(self):
return list(self._tb_cats.keys())
def __eq__(self, other):
if not isinstance(other, FieldMetadata):
return False
for attr in ('_tb_custom_fields', '_search_term_map', 'custom_label_to_key_map', 'custom_field_prefix'):
if getattr(self, attr) != getattr(other, attr):
return False
return dict(self._tb_cats) == dict(other._tb_cats)
def __ne__(self, other):
return not self.__eq__(other)
def sortable_field_keys(self):
return [k for k in self._tb_cats.keys()
if self._tb_cats[k]['kind']=='field' and
self._tb_cats[k]['datatype'] is not None]
def ui_sortable_field_keys(self):
ans = {k:self._tb_cats[k]['name'] for k in set(self.sortable_field_keys()) - {
'sort', 'author_sort', 'au_map', 'series_sort', 'marked',
'series_index', 'path', 'formats', 'identifiers', 'uuid',
'comments',
} if self._tb_cats[k]['name']}
ans['cover'] = _('Has cover')
return ans
def displayable_field_keys(self):
return [k for k in self._tb_cats.keys()
if self._tb_cats[k]['kind']=='field' and
self._tb_cats[k]['datatype'] is not None and
k not in ('au_map', 'marked', 'ondevice', 'cover', 'series_sort', 'in_tag_browser') and
not self.is_series_index(k)]
def standard_field_keys(self):
return [k for k in self._tb_cats.keys()
if self._tb_cats[k]['kind']=='field' and
not self._tb_cats[k]['is_custom']]
def custom_field_keys(self, include_composites=True):
res = []
for k in self._tb_cats.keys():
fm = self._tb_cats[k]
if fm['kind']=='field' and fm['is_custom'] and \
(fm['datatype'] != 'composite' or include_composites):
res.append(k)
return res
def all_field_keys(self):
return [k for k in self._tb_cats.keys() if self._tb_cats[k]['kind']=='field']
def iterkeys(self):
yield from self._tb_cats
def itervalues(self):
return itervalues(self._tb_cats)
def values(self):
return list(self._tb_cats.values())
def iteritems(self):
for key in self._tb_cats:
yield (key, self._tb_cats[key])
iter_items = iteritems
def custom_iteritems(self):
yield from iteritems(self._tb_custom_fields)
def items(self):
return list(self.iter_items())
def is_custom_field(self, key):
return key.startswith(self.custom_field_prefix)
def is_ignorable_field(self, key):
'Custom fields and user categories are ignorable'
return self.is_custom_field(key) or key.startswith('@')
def ignorable_field_keys(self):
return [k for k in self._tb_cats if self.is_ignorable_field(k)]
def is_series_index(self, key):
try:
m = self._tb_cats[key]
return (m['datatype'] == 'float' and key.endswith('_index') and
key[:-6] in self._tb_cats)
except (KeyError, ValueError, TypeError, AttributeError):
return False
def key_to_label(self, key):
if 'label' not in self._tb_cats[key]:
return key
return self._tb_cats[key]['label']
def label_to_key(self, label, prefer_custom=False):
if prefer_custom:
if label in self.custom_label_to_key_map:
return self.custom_label_to_key_map[label]
if 'label' in self._tb_cats:
return label
if not prefer_custom:
if label in self.custom_label_to_key_map:
return self.custom_label_to_key_map[label]
raise ValueError('Unknown key [%s]'%(label))
def all_metadata(self):
l = {}
for k in self._tb_cats:
l[k] = self._tb_cats[k]
return l
def custom_field_metadata(self, include_composites=True):
if include_composites:
return self._tb_custom_fields
l = {}
for k in self.custom_field_keys(include_composites):
l[k] = self._tb_cats[k]
return l
def add_custom_field(self, label, table, column, datatype, colnum, name,
display, is_editable, is_multiple, is_category,
is_csp=False):
key = self.custom_field_prefix + label
if key in self._tb_cats:
raise ValueError('Duplicate custom field [%s]'%(label))
if datatype not in self.VALID_DATA_TYPES:
raise ValueError('Unknown datatype %s for field %s'%(datatype, key))
self._tb_cats[key] = {'table':table, 'column':column,
'datatype':datatype, 'is_multiple':is_multiple,
'kind':'field', 'name':name,
'search_terms':[key], 'label':label,
'colnum':colnum, 'display':display,
'is_custom':True, 'is_category':is_category,
'link_column':'value','category_sort':'value',
'is_csp' : is_csp, 'is_editable': is_editable,}
self._tb_custom_fields[key] = self._tb_cats[key]
self._add_search_terms_to_map(key, [key])
self.custom_label_to_key_map[label] = key
if datatype == 'series':
key += '_index'
self._tb_cats[key] = {'table':None, 'column':None,
'datatype':'float', 'is_multiple':{},
'kind':'field', 'name':'',
'search_terms':[key], 'label':label+'_index',
'colnum':None, 'display':{},
'is_custom':False, 'is_category':False,
'link_column':None, 'category_sort':None,
'is_editable': False, 'is_csp': False}
self._add_search_terms_to_map(key, [key])
self.custom_label_to_key_map[label+'_index'] = key
def remove_dynamic_categories(self):
for key in list(self._tb_cats.keys()):
val = self._tb_cats[key]
if val['is_category'] and val['kind'] in ('user', 'search'):
for k in self._tb_cats[key]['search_terms']:
if k in self._search_term_map:
del self._search_term_map[k]
del self._tb_cats[key]
def remove_user_categories(self):
for key in list(self._tb_cats.keys()):
val = self._tb_cats[key]
if val['is_category'] and val['kind'] == 'user':
for k in self._tb_cats[key]['search_terms']:
if k in self._search_term_map:
del self._search_term_map[k]
del self._tb_cats[key]
def _remove_grouped_search_terms(self):
to_remove = [v for v in self._search_term_map
if isinstance(self._search_term_map[v], list)]
for v in to_remove:
del self._search_term_map[v]
def add_grouped_search_terms(self, gst):
self._remove_grouped_search_terms()
for t in gst:
try:
self._add_search_terms_to_map(gst[t], [t])
except ValueError:
traceback.print_exc()
def cc_series_index_column_for(self, key):
return self._tb_cats[key]['rec_index'] + 1
def add_user_category(self, label, name):
if label in self._tb_cats:
raise ValueError('Duplicate user field [%s]'%(label))
st = [label]
if icu_lower(label) != label:
st.append(icu_lower(label))
self._tb_cats[label] = {'table':None, 'column':None,
'datatype':None, 'is_multiple':{},
'kind':'user', 'name':name,
'search_terms':st, 'is_custom':False,
'is_category':True, 'is_csp': False}
self._add_search_terms_to_map(label, st)
def add_search_category(self, label, name, fail_on_existing=True):
if label in self._tb_cats:
if not fail_on_existing:
return
raise ValueError('Duplicate user field [%s]'%(label))
self._tb_cats[label] = {'table':None, 'column':None,
'datatype':None, 'is_multiple':{},
'kind':'search', 'name':name,
'search_terms':[], 'is_custom':False,
'is_category':True, 'is_csp': False}
def set_field_record_index(self, label, index, prefer_custom=False):
if prefer_custom:
key = self.custom_field_prefix+label
if key not in self._tb_cats:
key = label
else:
if label in self._tb_cats:
key = label
else:
key = self.custom_field_prefix+label
self._tb_cats[key]['rec_index'] = index # let the exception fly ...
def get_search_terms(self):
s_keys = sorted(self._search_term_map.keys())
for v in self.search_items:
s_keys.append(v)
return s_keys
def _add_search_terms_to_map(self, key, terms):
if terms is not None:
for t in terms:
if t in self._search_term_map:
raise ValueError('Attempt to add duplicate search term "%s"'%t)
self._search_term_map[t] = key
def search_term_to_field_key(self, term):
return self._search_term_map.get(term, term)
def searchable_fields(self):
return [k for k in self._tb_cats.keys()
if self._tb_cats[k]['kind']=='field' and
len(self._tb_cats[k]['search_terms']) > 0]
# The following two methods are to support serialization
# Note that they do not create copies of internal structures, for performance,
# so they are not safe to use for anything else
def fm_as_dict(self):
return {
'custom_fields': self._tb_custom_fields,
'search_term_map': self._search_term_map,
'custom_label_to_key_map': self.custom_label_to_key_map,
'user_categories': {k:v for k, v in iteritems(self._tb_cats) if v['kind'] == 'user'},
'search_categories': {k:v for k, v in iteritems(self._tb_cats) if v['kind'] == 'search'},
}
def fm_from_dict(src):
ans = FieldMetadata()
ans._tb_custom_fields = src['custom_fields']
ans._search_term_map = src['search_term_map']
ans.custom_label_to_key_map = src['custom_label_to_key_map']
for q in ('custom_fields', 'user_categories', 'search_categories'):
for k, v in iteritems(src[q]):
ans._tb_cats[k] = v
return ans
| 30,706 | Python | .py | 640 | 29.165625 | 112 | 0.428643 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,706 | database.py | kovidgoyal_calibre/src/calibre/library/database.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Backend that implements storage of ebooks in an sqlite database.
'''
import datetime
import re
import sqlite3 as sqlite
from zlib import compress, decompress
from calibre import isbytestring
from calibre.ebooks.metadata import MetaInformation, string_to_authors
from calibre.utils.localization import _
from calibre.utils.serialize import pickle_dumps, pickle_loads
class Concatenate:
'''String concatenation aggregator for sqlite'''
def __init__(self, sep=','):
self.sep = sep
self.ans = ''
def step(self, value):
if value is not None:
self.ans += value + self.sep
def finalize(self):
try:
if not self.ans:
return None
if self.sep:
return self.ans[:-len(self.sep)]
return self.ans
except Exception:
import traceback
traceback.print_exc()
raise
class Connection(sqlite.Connection):
def get(self, *args, **kw):
ans = self.execute(*args)
if not kw.get('all', True):
ans = ans.fetchone()
if not ans:
ans = [None]
return ans[0]
return ans.fetchall()
def _connect(path):
if isinstance(path, str):
path = path.encode('utf-8')
conn = sqlite.connect(path, factory=Connection, detect_types=sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES)
conn.row_factory = lambda cursor, row : list(row)
conn.create_aggregate('concat', 1, Concatenate)
title_pat = re.compile(r'^(A|The|An)\s+', re.IGNORECASE)
def title_sort(title):
match = title_pat.search(title)
if match:
prep = match.group(1)
title = title.replace(prep, '') + ', ' + prep
return title.strip()
conn.create_function('title_sort', 1, title_sort)
return conn
class LibraryDatabase:
@staticmethod
def books_in_old_database(path):
'''
Iterator over the books in the old pre 0.4.0 database.
'''
conn = sqlite.connect(path)
cur = conn.execute('select * from books_meta order by id;')
book = cur.fetchone()
while book:
id = book[0]
meta = {'title':book[1], 'authors':book[2], 'publisher':book[3],
'tags':book[5], 'comments':book[7], 'rating':book[8],
'timestamp':datetime.datetime.strptime(book[6], '%Y-%m-%d %H:%M:%S'),
}
cover = {}
query = conn.execute('select uncompressed_size, data from books_cover where id=?', (id,)).fetchone()
if query:
cover = {'uncompressed_size': query[0], 'data': query[1]}
query = conn.execute('select extension, uncompressed_size, data from books_data where id=?', (id,)).fetchall()
formats = {}
for row in query:
formats[row[0]] = {'uncompressed_size':row[1], 'data':row[2]}
yield meta, cover, formats
book = cur.fetchone()
@staticmethod
def sizeof_old_database(path):
conn = sqlite.connect(path)
ans = conn.execute('SELECT COUNT(id) from books_meta').fetchone()[0]
conn.close()
return ans
@staticmethod
def import_old_database(path, conn, progress=None):
count = 0
for book, cover, formats in LibraryDatabase.books_in_old_database(path):
authors = book['authors']
if not authors:
authors = 'Unknown'
obj = conn.execute('INSERT INTO books(title, timestamp, author_sort) VALUES (?,?,?)',
(book['title'], book['timestamp'], authors))
id = obj.lastrowid
authors = string_to_authors(authors)
for a in authors:
author = conn.execute('SELECT id from authors WHERE name=?', (a,)).fetchone()
if author:
aid = author[0]
else:
aid = conn.execute('INSERT INTO authors(name) VALUES (?)', (a,)).lastrowid
conn.execute('INSERT INTO books_authors_link(book, author) VALUES (?,?)', (id, aid))
if book['publisher']:
candidate = conn.execute('SELECT id from publishers WHERE name=?', (book['publisher'],)).fetchone()
pid = candidate[0] if candidate else conn.execute('INSERT INTO publishers(name) VALUES (?)',
(book['publisher'],)).lastrowid
conn.execute('INSERT INTO books_publishers_link(book, publisher) VALUES (?,?)', (id, pid))
if book['rating']:
candidate = conn.execute('SELECT id from ratings WHERE rating=?', (2*book['rating'],)).fetchone()
rid = candidate[0] if candidate else conn.execute('INSERT INTO ratings(rating) VALUES (?)',
(2*book['rating'],)).lastrowid
conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rid))
tags = book['tags']
if tags:
tags = tags.split(',')
else:
tags = []
for a in tags:
a = a.strip()
if not a:
continue
tag = conn.execute('SELECT id from tags WHERE name=?', (a,)).fetchone()
if tag:
tid = tag[0]
else:
tid = conn.execute('INSERT INTO tags(name) VALUES (?)', (a,)).lastrowid
conn.execute('INSERT INTO books_tags_link(book, tag) VALUES (?,?)', (id, tid))
comments = book['comments']
if comments:
conn.execute('INSERT INTO comments(book, text) VALUES (?, ?)',
(id, comments))
if cover:
conn.execute('INSERT INTO covers(book, uncompressed_size, data) VALUES (?, ?, ?)',
(id, cover['uncompressed_size'], cover['data']))
for format in formats.keys():
conn.execute('INSERT INTO data(book, format, uncompressed_size, data) VALUES (?, ?, ?, ?)',
(id, format, formats[format]['uncompressed_size'],
formats[format]['data']))
conn.commit()
count += 1
if progress:
progress(count)
@staticmethod
def create_version1(conn):
conn.executescript(
'''
/**** books table *****/
CREATE TABLE books ( id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT NOT NULL DEFAULT 'Unknown' COLLATE NOCASE,
sort TEXT COLLATE NOCASE,
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
uri TEXT,
series_index INTEGER NOT NULL DEFAULT 1
);
CREATE INDEX books_idx ON books (sort COLLATE NOCASE);
CREATE TRIGGER books_insert_trg
AFTER INSERT ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
END;
CREATE TRIGGER books_update_trg
AFTER UPDATE ON books
BEGIN
UPDATE books SET sort=title_sort(NEW.title) WHERE id=NEW.id;
END;
/***** authors table *****/
CREATE TABLE authors ( id INTEGER PRIMARY KEY,
name TEXT NOT NULL COLLATE NOCASE,
sort TEXT COLLATE NOCASE,
UNIQUE(name)
);
CREATE INDEX authors_idx ON authors (sort COLLATE NOCASE);
CREATE TRIGGER authors_insert_trg
AFTER INSERT ON authors
BEGIN
UPDATE authors SET sort=NEW.name WHERE id=NEW.id;
END;
CREATE TRIGGER authors_update_trg
AFTER UPDATE ON authors
BEGIN
UPDATE authors SET sort=NEW.name WHERE id=NEW.id;
END;
CREATE TABLE books_authors_link ( id INTEGER PRIMARY KEY,
book INTEGER NOT NULL,
author INTEGER NOT NULL,
UNIQUE(book, author)
);
CREATE INDEX books_authors_link_bidx ON books_authors_link (book);
CREATE INDEX books_authors_link_aidx ON books_authors_link (author);
CREATE TRIGGER fkc_insert_books_authors_link
BEFORE INSERT ON books_authors_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
WHEN (SELECT id from authors WHERE id=NEW.author) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: author not in authors')
END;
END;
CREATE TRIGGER fkc_update_books_authors_link_a
BEFORE UPDATE OF book ON books_authors_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_books_authors_link_b
BEFORE UPDATE OF author ON books_authors_link
BEGIN
SELECT CASE
WHEN (SELECT id from authors WHERE id=NEW.author) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: author not in authors')
END;
END;
CREATE TRIGGER fkc_delete_books_authors_link
BEFORE DELETE ON authors
BEGIN
SELECT CASE
WHEN (SELECT COUNT(id) FROM books_authors_link WHERE book=OLD.book) > 0
THEN RAISE(ABORT, 'Foreign key violation: author is still referenced')
END;
END;
/***** publishers table *****/
CREATE TABLE publishers ( id INTEGER PRIMARY KEY,
name TEXT NOT NULL COLLATE NOCASE,
sort TEXT COLLATE NOCASE,
UNIQUE(name)
);
CREATE INDEX publishers_idx ON publishers (sort COLLATE NOCASE);
CREATE TRIGGER publishers_insert_trg
AFTER INSERT ON publishers
BEGIN
UPDATE publishers SET sort=NEW.name WHERE id=NEW.id;
END;
CREATE TRIGGER publishers_update_trg
AFTER UPDATE ON publishers
BEGIN
UPDATE publishers SET sort=NEW.name WHERE id=NEW.id;
END;
CREATE TABLE books_publishers_link ( id INTEGER PRIMARY KEY,
book INTEGER NOT NULL,
publisher INTEGER NOT NULL,
UNIQUE(book)
);
CREATE INDEX books_publishers_link_bidx ON books_publishers_link (book);
CREATE INDEX books_publishers_link_aidx ON books_publishers_link (publisher);
CREATE TRIGGER fkc_insert_books_publishers_link
BEFORE INSERT ON books_publishers_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
WHEN (SELECT id from publishers WHERE id=NEW.publisher) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: publisher not in publishers')
END;
END;
CREATE TRIGGER fkc_update_books_publishers_link_a
BEFORE UPDATE OF book ON books_publishers_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_books_publishers_link_b
BEFORE UPDATE OF publisher ON books_publishers_link
BEGIN
SELECT CASE
WHEN (SELECT id from publishers WHERE id=NEW.publisher) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: publisher not in publishers')
END;
END;
CREATE TRIGGER fkc_delete_books_publishers_link
BEFORE DELETE ON publishers
BEGIN
SELECT CASE
WHEN (SELECT COUNT(id) FROM books_publishers_link WHERE book=OLD.book) > 0
THEN RAISE(ABORT, 'Foreign key violation: publisher is still referenced')
END;
END;
/***** tags table *****/
CREATE TABLE tags ( id INTEGER PRIMARY KEY,
name TEXT NOT NULL COLLATE NOCASE,
UNIQUE (name)
);
CREATE INDEX tags_idx ON tags (name COLLATE NOCASE);
CREATE TABLE books_tags_link ( id INTEGER PRIMARY KEY,
book INTEGER NOT NULL,
tag INTEGER NOT NULL,
UNIQUE(book, tag)
);
CREATE INDEX books_tags_link_bidx ON books_tags_link (book);
CREATE INDEX books_tags_link_aidx ON books_tags_link (tag);
CREATE TRIGGER fkc_insert_books_tags_link
BEFORE INSERT ON books_tags_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
WHEN (SELECT id from tags WHERE id=NEW.tag) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: tag not in tags')
END;
END;
CREATE TRIGGER fkc_update_books_tags_link_a
BEFORE UPDATE OF book ON books_tags_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_books_tags_link_b
BEFORE UPDATE OF tag ON books_tags_link
BEGIN
SELECT CASE
WHEN (SELECT id from tags WHERE id=NEW.tag) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: tag not in tags')
END;
END;
CREATE TRIGGER fkc_delete_books_tags_link
BEFORE DELETE ON tags
BEGIN
SELECT CASE
WHEN (SELECT COUNT(id) FROM books_tags_link WHERE tag=OLD.book) > 0
THEN RAISE(ABORT, 'Foreign key violation: tag is still referenced')
END;
END;
/***** series table *****/
CREATE TABLE series ( id INTEGER PRIMARY KEY,
name TEXT NOT NULL COLLATE NOCASE,
sort TEXT COLLATE NOCASE,
UNIQUE (name)
);
CREATE INDEX series_idx ON series (sort COLLATE NOCASE);
CREATE TRIGGER series_insert_trg
AFTER INSERT ON series
BEGIN
UPDATE series SET sort=NEW.name WHERE id=NEW.id;
END;
CREATE TRIGGER series_update_trg
AFTER UPDATE ON series
BEGIN
UPDATE series SET sort=NEW.name WHERE id=NEW.id;
END;
CREATE TABLE books_series_link ( id INTEGER PRIMARY KEY,
book INTEGER NOT NULL,
series INTEGER NOT NULL,
UNIQUE(book)
);
CREATE INDEX books_series_link_bidx ON books_series_link (book);
CREATE INDEX books_series_link_aidx ON books_series_link (series);
CREATE TRIGGER fkc_insert_books_series_link
BEFORE INSERT ON books_series_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
WHEN (SELECT id from series WHERE id=NEW.series) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: series not in series')
END;
END;
CREATE TRIGGER fkc_update_books_series_link_a
BEFORE UPDATE OF book ON books_series_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_books_series_link_b
BEFORE UPDATE OF series ON books_series_link
BEGIN
SELECT CASE
WHEN (SELECT id from series WHERE id=NEW.series) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: series not in series')
END;
END;
CREATE TRIGGER fkc_delete_books_series_link
BEFORE DELETE ON series
BEGIN
SELECT CASE
WHEN (SELECT COUNT(id) FROM books_series_link WHERE book=OLD.book) > 0
THEN RAISE(ABORT, 'Foreign key violation: series is still referenced')
END;
END;
/**** ratings table ****/
CREATE TABLE ratings ( id INTEGER PRIMARY KEY,
rating INTEGER CHECK(rating > -1 AND rating < 11),
UNIQUE (rating)
);
INSERT INTO ratings (rating) VALUES (0);
INSERT INTO ratings (rating) VALUES (1);
INSERT INTO ratings (rating) VALUES (2);
INSERT INTO ratings (rating) VALUES (3);
INSERT INTO ratings (rating) VALUES (4);
INSERT INTO ratings (rating) VALUES (5);
INSERT INTO ratings (rating) VALUES (6);
INSERT INTO ratings (rating) VALUES (7);
INSERT INTO ratings (rating) VALUES (8);
INSERT INTO ratings (rating) VALUES (9);
INSERT INTO ratings (rating) VALUES (10);
CREATE TABLE books_ratings_link ( id INTEGER PRIMARY KEY,
book INTEGER NOT NULL,
rating INTEGER NOT NULL,
UNIQUE(book, rating)
);
CREATE INDEX books_ratings_link_bidx ON books_ratings_link (book);
CREATE INDEX books_ratings_link_aidx ON books_ratings_link (rating);
CREATE TRIGGER fkc_insert_books_ratings_link
BEFORE INSERT ON books_ratings_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
WHEN (SELECT id from ratings WHERE id=NEW.rating) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: rating not in ratings')
END;
END;
CREATE TRIGGER fkc_update_books_ratings_link_a
BEFORE UPDATE OF book ON books_ratings_link
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_books_ratings_link_b
BEFORE UPDATE OF rating ON books_ratings_link
BEGIN
SELECT CASE
WHEN (SELECT id from ratings WHERE id=NEW.rating) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: rating not in ratings')
END;
END;
/**** data table ****/
CREATE TABLE data ( id INTEGER PRIMARY KEY,
book INTEGER NON NULL,
format TEXT NON NULL COLLATE NOCASE,
uncompressed_size INTEGER NON NULL,
data BLOB NON NULL,
UNIQUE(book, format)
);
CREATE INDEX data_idx ON data (book);
CREATE TRIGGER fkc_data_insert
BEFORE INSERT ON data
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_data_update
BEFORE UPDATE OF book ON data
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
/**** covers table ****/
CREATE TABLE covers ( id INTEGER PRIMARY KEY,
book INTEGER NON NULL,
uncompressed_size INTEGER NON NULL,
data BLOB NON NULL,
UNIQUE(book)
);
CREATE INDEX covers_idx ON covers (book);
CREATE TRIGGER fkc_covers_insert
BEFORE INSERT ON covers
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_covers_update
BEFORE UPDATE OF book ON covers
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
/**** comments table ****/
CREATE TABLE comments ( id INTEGER PRIMARY KEY,
book INTEGER NON NULL,
text TEXT NON NULL COLLATE NOCASE,
UNIQUE(book)
);
CREATE INDEX comments_idx ON comments (book);
CREATE TRIGGER fkc_comments_insert
BEFORE INSERT ON comments
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_comments_update
BEFORE UPDATE OF book ON comments
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
/**** Handle deletion of book ****/
CREATE TRIGGER books_delete_trg
AFTER DELETE ON books
BEGIN
DELETE FROM books_authors_link WHERE book=OLD.id;
DELETE FROM books_publishers_link WHERE book=OLD.id;
DELETE FROM books_ratings_link WHERE book=OLD.id;
DELETE FROM books_series_link WHERE book=OLD.id;
DELETE FROM books_tags_link WHERE book=OLD.id;
DELETE FROM data WHERE book=OLD.id;
DELETE FROM covers WHERE book=OLD.id;
DELETE FROM comments WHERE book=OLD.id;
END;
/**** Views ****/
CREATE VIEW meta AS
SELECT id, title,
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
sort,
(SELECT sort FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors_sort,
(SELECT sort FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher_sort
FROM books;
'''
)
conn.execute('pragma user_version=1')
conn.commit()
@staticmethod
def upgrade_version1(conn):
conn.executescript(
'''
/***** authors_sort table *****/
ALTER TABLE books ADD COLUMN author_sort TEXT COLLATE NOCASE;
UPDATE books SET author_sort=(SELECT name FROM authors WHERE id=\
(SELECT author FROM books_authors_link WHERE book=books.id)) WHERE id IN (SELECT id FROM books ORDER BY id);
DROP INDEX authors_idx;
DROP TRIGGER authors_insert_trg;
DROP TRIGGER authors_update_trg;
CREATE INDEX authors_idx ON books (author_sort COLLATE NOCASE);
CREATE TABLE conversion_options ( id INTEGER PRIMARY KEY,
format TEXT NOT NULL COLLATE NOCASE,
book INTEGER,
data BLOB NOT NULL,
UNIQUE(format,book)
);
CREATE INDEX conversion_options_idx_a ON conversion_options (format COLLATE NOCASE);
CREATE INDEX conversion_options_idx_b ON conversion_options (book);
DROP TRIGGER books_delete_trg;
CREATE TRIGGER books_delete_trg
AFTER DELETE ON books
BEGIN
DELETE FROM books_authors_link WHERE book=OLD.id;
DELETE FROM books_publishers_link WHERE book=OLD.id;
DELETE FROM books_ratings_link WHERE book=OLD.id;
DELETE FROM books_series_link WHERE book=OLD.id;
DELETE FROM books_tags_link WHERE book=OLD.id;
DELETE FROM data WHERE book=OLD.id;
DELETE FROM covers WHERE book=OLD.id;
DELETE FROM comments WHERE book=OLD.id;
DELETE FROM conversion_options WHERE book=OLD.id;
END;
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
sort,
author_sort
FROM books;
DROP INDEX publishers_idx;
CREATE INDEX publishers_idx ON publishers (name COLLATE NOCASE);
DROP TRIGGER publishers_insert_trg;
DROP TRIGGER publishers_update_trg;
'''
)
conn.execute('pragma user_version=2')
conn.commit()
@staticmethod
def upgrade_version2(conn):
conn.executescript(
'''
/***** Add ISBN column ******/
ALTER TABLE books ADD COLUMN isbn TEXT DEFAULT "" COLLATE NOCASE;
''')
conn.execute('pragma user_version=3')
conn.commit()
@staticmethod
def upgrade_version3(conn):
conn.executescript(
'''
/***** Add series_index column to meta view ******/
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort
FROM books;
''')
conn.execute('pragma user_version=4')
conn.commit()
@staticmethod
def upgrade_version4(conn):
conn.executescript(
'''
/***** Add formats column to meta view ******/
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats
FROM books;
''')
conn.execute('pragma user_version=5')
conn.commit()
@staticmethod
def upgrade_version5(conn):
conn.executescript(
'''
DROP TRIGGER fkc_delete_books_tags_link;
CREATE TRIGGER fkc_delete_books_tags_link
BEFORE DELETE ON tags
BEGIN
SELECT CASE
WHEN (SELECT COUNT(id) FROM books_tags_link WHERE tag=OLD.id) > 0
THEN RAISE(ABORT, 'Foreign key violation: tag is still referenced')
END;
END;
''')
conn.execute('pragma user_version=6')
conn.commit()
@staticmethod
def upgrade_version6(conn):
conn.executescript('''CREATE TABLE feeds ( id INTEGER PRIMARY KEY,
title TEXT NOT NULL,
script TEXT NOT NULL,
UNIQUE(title)
);''')
conn.execute('pragma user_version=7')
conn.commit()
@staticmethod
def upgrade_version7(conn):
conn.executescript('''\
DROP TRIGGER fkc_update_books_series_link_b;
CREATE TRIGGER fkc_update_books_series_link_b
BEFORE UPDATE OF series ON books_series_link
BEGIN
SELECT CASE
WHEN (SELECT id from series WHERE id=NEW.series) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: series not in series')
END;
END;
DROP TRIGGER fkc_delete_books_series_link;
CREATE TRIGGER fkc_delete_books_series_link
BEFORE DELETE ON series
BEGIN
SELECT CASE
WHEN (SELECT COUNT(id) FROM books_series_link WHERE series=OLD.id) > 0
THEN RAISE(ABORT, 'Foreign key violation: series is still referenced')
END;
END;
'''
)
conn.execute('pragma user_version=8')
conn.commit()
@staticmethod
def upgrade_version8(conn):
conn.execute('DELETE FROM feeds')
conn.execute('pragma user_version=9')
conn.commit()
@staticmethod
def upgrade_version9(conn):
for id, title in conn.execute('SELECT id, title FROM books').fetchall():
conn.execute('UPDATE books SET title=? WHERE id=?', (title, id))
conn.execute('pragma user_version=10')
conn.commit()
@staticmethod
def upgrade_version10(conn):
for id, author_sort in conn.execute('SELECT id, author_sort FROM books').fetchall():
if not author_sort:
aus = conn.execute('SELECT authors FROM meta WHERE id=?',(id,)).fetchone()[0]
conn.execute('UPDATE books SET author_sort=? WHERE id=?', (aus, id))
conn.execute('pragma user_version=11')
conn.commit()
@staticmethod
def upgrade_version11(conn):
conn.executescript(
'''
/***** Add isbn column to meta view ******/
DROP VIEW meta;
CREATE VIEW meta AS
SELECT id, title,
(SELECT concat(name) FROM authors WHERE authors.id IN (SELECT author from books_authors_link WHERE book=books.id)) authors,
(SELECT name FROM publishers WHERE publishers.id IN (SELECT publisher from books_publishers_link WHERE book=books.id)) publisher,
(SELECT rating FROM ratings WHERE ratings.id IN (SELECT rating from books_ratings_link WHERE book=books.id)) rating,
timestamp,
(SELECT MAX(uncompressed_size) FROM data WHERE book=books.id) size,
(SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=books.id)) tags,
(SELECT text FROM comments WHERE book=books.id) comments,
(SELECT name FROM series WHERE series.id IN (SELECT series FROM books_series_link WHERE book=books.id)) series,
series_index,
sort,
author_sort,
(SELECT concat(format) FROM data WHERE data.book=books.id) formats,
isbn
FROM books;
''')
conn.execute('pragma user_version=12')
conn.commit()
def __init__(self, dbpath, row_factory=False):
self.dbpath = dbpath
self.conn = _connect(dbpath)
if row_factory:
self.conn.row_factory = sqlite.Row
self.cache = []
self.data = []
if self.user_version == 0: # No tables have been created
LibraryDatabase.create_version1(self.conn)
i = 0
while True:
i += 1
func = getattr(LibraryDatabase, 'upgrade_version%d'%i, None)
if func is None:
break
if self.user_version == i:
print('Upgrading database from version: %d'%i)
func(self.conn)
def close(self):
# global _lock_file
# _lock_file.close()
# os.unlink(_lock_file.name)
# _lock_file = None
self.conn.close()
@property
def user_version(self):
'The user version of this database'
return self.conn.get('pragma user_version;', all=False)
def is_empty(self):
return not self.conn.get('SELECT id FROM books LIMIT 1', all=False)
def refresh(self, sort_field, ascending):
'''
Rebuild self.data and self.cache. Filter results are lost.
'''
FIELDS = {
'title' : 'sort',
'authors' : 'author_sort',
'publisher' : 'publisher',
'size' : 'size',
'date' : 'timestamp',
'timestamp' : 'timestamp',
'formats' : 'formats',
'rating' : 'rating',
'tags' : 'tags',
'series' : 'series',
}
field = FIELDS[sort_field]
order = 'ASC'
if not ascending:
order = 'DESC'
sort = field + ' ' + order
if field == 'series':
sort += ',series_index '+order
elif field == 'title':
sort += ',author_sort ' + order
else:
sort += ',title '+order
self.cache = self.conn.get('SELECT * from meta ORDER BY '+sort)
self.data = self.cache
self.conn.commit()
def refresh_ids(self, ids):
indices = list(map(self.index, ids))
for id, idx in zip(ids, indices):
row = self.conn.get('SELECT * from meta WHERE id=?', (id,), all=False)
self.data[idx] = row
return indices
def filter(self, filters, refilter=False, OR=False):
'''
Filter data based on filters. All the filters must match for an item to
be accepted. Matching is case independent regexp matching.
@param filters: A list of SearchToken objects
@param refilter: If True filters are applied to the results of the previous
filtering.
@param OR: If True, keeps a match if any one of the filters matches. If False,
keeps a match only if all the filters match
'''
if not filters:
self.data = self.data if refilter else self.cache
else:
matches = []
for item in self.data if refilter else self.cache:
if OR:
keep = False
for token in filters:
if token.match(item):
keep = True
break
if keep:
matches.append(item)
else:
keep = True
for token in filters:
if not token.match(item):
keep = False
break
if keep:
matches.append(item)
self.data = matches
def rows(self):
return len(self.data) if self.data else 0
def id(self, index):
return self.data[index][0]
def row(self, id):
for r, record in enumerate(self.data):
if record[0] == id:
return r
def title(self, index, index_is_id=False):
if not index_is_id:
return self.data[index][1]
try:
return self.conn.get('SELECT title FROM meta WHERE id=?',(index,), all=False)
except:
return _('Unknown')
def authors(self, index, index_is_id=False):
'''
Authors as a comma separated list or None.
In the comma separated list, commas in author names are replaced by | symbols
'''
if not index_is_id:
return self.data[index][2]
try:
return self.conn.get('SELECT authors FROM meta WHERE id=?',(index,), all=False)
except:
pass
def author_id(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
return self.conn.get('SELECT author from books_authors_link WHERE book=?', (id,), all=False)
def isbn(self, idx, index_is_id=False):
id = idx if index_is_id else self.id(idx)
return self.conn.get('SELECT isbn FROM books WHERE id=?',(id,), all=False)
def author_sort(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
return self.conn.get('SELECT author_sort FROM books WHERE id=?', (id,), all=False)
def publisher(self, index, index_is_id=False):
if index_is_id:
return self.conn.get('SELECT publisher FROM meta WHERE id=?', (index,), all=False)
return self.data[index][3]
def publisher_id(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
return self.conn.get('SELECT publisher from books_publishers_link WHERE book=?', (id,), all=False)
def rating(self, index, index_is_id=False):
if index_is_id:
return self.conn.get('SELECT rating FROM meta WHERE id=?', (index,), all=False)
return self.data[index][4]
def timestamp(self, index, index_is_id=False):
if index_is_id:
return self.conn.get('SELECT timestamp FROM meta WHERE id=?', (index,), all=False)
return self.data[index][5]
def max_size(self, index, index_is_id=False):
if index_is_id:
return self.conn.get('SELECT size FROM meta WHERE id=?', (index,), all=False)
return self.data[index][4]
def cover(self, index, index_is_id=False):
'''Cover as a data string or None'''
id = index if index_is_id else self.id(index)
data = self.conn.get('SELECT data FROM covers WHERE book=?', (id,), all=False)
if not data:
return None
return decompress(data)
def tags(self, index, index_is_id=False):
'''tags as a comma separated list or None'''
id = index if index_is_id else self.id(index)
matches = self.conn.get('SELECT concat(name) FROM tags WHERE tags.id IN (SELECT tag from books_tags_link WHERE book=?)', (id,))
if not matches or not matches[0][0]:
return None
matches = [t.lower().strip() for t in matches[0][0].split(',')]
return ','.join(matches)
def series_id(self, index, index_is_id=False):
id = index if index_is_id else self.id(index)
return self.conn.get('SELECT series from books_series_link WHERE book=?', (id,), all=False)
def series(self, index, index_is_id=False):
id = self.series_id(index, index_is_id)
return self.conn.get('SELECT name from series WHERE id=?', (id,), all=False)
def series_index(self, index, index_is_id=False):
ans = None
if not index_is_id:
ans = self.data[index][10]
else:
ans = self.conn.get('SELECT series_index FROM books WHERE id=?', (index,), all=False)
try:
return float(ans)
except:
return 1.0
def books_in_series(self, series_id):
'''
Return an ordered list of all books in the series.
The list contains book ids.
'''
ans = self.conn.get('SELECT book from books_series_link WHERE series=?',
(series_id,))
if not ans:
return []
ans = [id[0] for id in ans]
ans.sort(key=lambda x: self.series_index(x, True))
return ans
def books_in_series_of(self, index, index_is_id=False):
'''
Return an ordered list of all books in the series that the book identified by index belongs to.
If the book does not belong to a series return an empty list. The list contains book ids.
'''
series_id = self.series_id(index, index_is_id=index_is_id)
return self.books_in_series(series_id)
def comments(self, index, index_is_id=False):
'''Comments as string or None'''
id = index if index_is_id else self.id(index)
return self.conn.get('SELECT text FROM comments WHERE book=?', (id,), all=False)
def formats(self, index, index_is_id=False):
''' Return available formats as a comma separated list '''
id = index if index_is_id else self.id(index)
return self.conn.get('SELECT concat(format) FROM data WHERE data.book=?', (id,), all=False)
def sizeof_format(self, index, format, index_is_id=False):
''' Return size of C{format} for book C{index} in bytes'''
id = index if index_is_id else self.id(index)
format = format.upper()
return self.conn.get('SELECT uncompressed_size FROM data WHERE data.book=? AND data.format=?', (id, format), all=False)
def format(self, index, format, index_is_id=False):
id = index if index_is_id else self.id(index)
return decompress(self.conn.get('SELECT data FROM data WHERE book=? AND format=?', (id, format), all=False))
def all_series(self):
return [(i[0], i[1]) for i in
self.conn.get('SELECT id, name FROM series')]
def series_name(self, series_id):
return self.conn.get('SELECT name FROM series WHERE id=%d'%series_id,
all=False)
def author_name(self, author_id):
return self.conn.get('SELECT name FROM authors WHERE id=%d'%author_id,
all=False)
def tag_name(self, tag_id):
return self.conn.get('SELECT name FROM tags WHERE id=%d'%tag_id,
all=False)
def all_authors(self):
return [(i[0], i[1]) for i in
self.conn.get('SELECT id, name FROM authors')]
def all_author_names(self):
return list(filter(None, [i[0].strip().replace('|', ',') for i in self.conn.get(
'SELECT name FROM authors')]))
def all_publishers(self):
return [(i[0], i[1]) for i in
self.conn.get('SELECT id, name FROM publishers')]
def all_tags(self):
return [i[0].strip() for i in self.conn.get('SELECT name FROM tags') if i[0].strip()]
def all_tags2(self):
return [(i[0], i[1]) for i in
self.conn.get('SELECT id, name FROM tags')]
def all_titles(self):
return [(i[0], i[1]) for i in
self.conn.get('SELECT id, title FROM books')]
def conversion_options(self, id, format):
data = self.conn.get('SELECT data FROM conversion_options WHERE book=? AND format=?', (id, format.upper()), all=False)
if data:
return pickle_loads(bytes(data))
return None
def has_conversion_options(self, ids, format='PIPE'):
ids = tuple(ids)
if len(ids) > 50000:
return True
if len(ids) == 1:
ids = '(%d)'%ids[0]
else:
ids = repr(ids)
return self.conn.get('''
SELECT data FROM conversion_options WHERE book IN %s AND
format=? LIMIT 1'''%(ids,), (format,), all=False) is not None
def delete_conversion_options(self, id, format, commit=True):
self.conn.execute('DELETE FROM conversion_options WHERE book=? AND format=?',
(id, format.upper()))
if commit:
self.conn.commit()
def add_format(self, index, ext, stream, index_is_id=False):
'''
Add the format specified by ext. If it already exists it is replaced.
'''
id = index if index_is_id else self.id(index)
stream.seek(0, 2)
usize = stream.tell()
stream.seek(0)
data = sqlite.Binary(compress(stream.read()))
exts = self.formats(index, index_is_id=index_is_id)
if not exts:
exts = []
if not ext:
ext = ''
ext = ext.lower()
if ext in exts:
self.conn.execute('UPDATE data SET data=? WHERE format=? AND book=?',
(data, ext, id))
self.conn.execute('UPDATE data SET uncompressed_size=? WHERE format=? AND book=?',
(usize, ext, id))
else:
self.conn.execute('INSERT INTO data(book, format, uncompressed_size, data) VALUES (?, ?, ?, ?)',
(id, ext, usize, data))
self.conn.commit()
def remove_format(self, index, ext, index_is_id=False):
id = index if index_is_id else self.id(index)
self.conn.execute('DELETE FROM data WHERE book=? AND format=?', (id, ext.lower()))
self.conn.commit()
def set(self, row, column, val):
'''
Convenience method for setting the title, authors, publisher or rating
'''
id = self.data[row][0]
col = {'title':1, 'authors':2, 'publisher':3, 'rating':4, 'tags':7}[column]
self.data[row][col] = val
for item in self.cache:
if item[0] == id:
item[col] = val
break
if column == 'authors':
val = string_to_authors(val)
self.set_authors(id, val)
elif column == 'title':
self.set_title(id, val)
elif column == 'publisher':
self.set_publisher(id, val)
elif column == 'rating':
self.set_rating(id, val)
elif column == 'tags':
self.set_tags(id, val.split(','), append=False)
def set_conversion_options(self, id, format, options):
data = sqlite.Binary(pickle_dumps(options))
oid = self.conn.get('SELECT id FROM conversion_options WHERE book=? AND format=?', (id, format.upper()), all=False)
if oid:
self.conn.execute('UPDATE conversion_options SET data=? WHERE id=?', (data, oid))
else:
self.conn.execute('INSERT INTO conversion_options(book,format,data) VALUES (?,?,?)', (id,format.upper(),data))
self.conn.commit()
def set_authors(self, id, authors):
'''
@param authors: A list of authors.
'''
self.conn.execute('DELETE FROM books_authors_link WHERE book=?',(id,))
for a in authors:
if not a:
continue
a = a.strip()
author = self.conn.get('SELECT id from authors WHERE name=?', (a,), all=False)
if author:
aid = author
# Handle change of case
self.conn.execute('UPDATE authors SET name=? WHERE id=?', (a, aid))
else:
aid = self.conn.execute('INSERT INTO authors(name) VALUES (?)', (a,)).lastrowid
try:
self.conn.execute('INSERT INTO books_authors_link(book, author) VALUES (?,?)', (id, aid))
except sqlite.IntegrityError: # Sometimes books specify the same author twice in their metadata
pass
self.conn.commit()
def set_author_sort(self, id, sort):
self.conn.execute('UPDATE books SET author_sort=? WHERE id=?', (sort, id))
self.conn.commit()
def set_title(self, id, title):
if not title:
return
self.conn.execute('UPDATE books SET title=? WHERE id=?', (title, id))
self.conn.commit()
def set_isbn(self, id, isbn):
self.conn.execute('UPDATE books SET isbn=? WHERE id=?', (isbn, id))
self.conn.commit()
def set_publisher(self, id, publisher):
self.conn.execute('DELETE FROM books_publishers_link WHERE book=?',(id,))
if publisher:
pub = self.conn.get('SELECT id from publishers WHERE name=?', (publisher,), all=False)
if pub:
aid = pub
else:
aid = self.conn.execute('INSERT INTO publishers(name) VALUES (?)', (publisher,)).lastrowid
self.conn.execute('INSERT INTO books_publishers_link(book, publisher) VALUES (?,?)', (id, aid))
self.conn.commit()
def set_comment(self, id, text):
self.conn.execute('DELETE FROM comments WHERE book=?', (id,))
self.conn.execute('INSERT INTO comments(book,text) VALUES (?,?)', (id, text))
self.conn.commit()
def delete_tags(self, tags):
for tag in tags:
self.delete_tag(tag)
def unapply_tags(self, book_id, tags):
for tag in tags:
id = self.conn.get('SELECT id FROM tags WHERE name=?', (tag,), all=False)
if id:
self.conn.execute('DELETE FROM books_tags_link WHERE tag=? AND book=?', (id, book_id))
self.conn.commit()
def set_tags(self, id, tags, append=False):
'''
@param tags: list of strings
@param append: If True existing tags are not removed
'''
if not append:
self.conn.execute('DELETE FROM books_tags_link WHERE book=?', (id,))
for tag in set(tags):
tag = tag.lower().strip()
if not tag:
continue
t = self.conn.get('SELECT id FROM tags WHERE name=?', (tag,), all=False)
if t:
tid = t
else:
tid = self.conn.execute('INSERT INTO tags(name) VALUES(?)', (tag,)).lastrowid
if not self.conn.get('SELECT book FROM books_tags_link WHERE book=? AND tag=?',
(id, tid), all=False):
self.conn.execute('INSERT INTO books_tags_link(book, tag) VALUES (?,?)',
(id, tid))
self.conn.commit()
def set_series(self, id, series):
self.conn.execute('DELETE FROM books_series_link WHERE book=?',(id,))
if series:
s = self.conn.get('SELECT id from series WHERE name=?', (series,), all=False)
if s:
aid = s
else:
aid = self.conn.execute('INSERT INTO series(name) VALUES (?)', (series,)).lastrowid
self.conn.execute('INSERT INTO books_series_link(book, series) VALUES (?,?)', (id, aid))
self.conn.commit()
row = self.row(id)
if row is not None:
self.data[row][9] = series
def remove_unused_series(self):
for id, in self.conn.get('SELECT id FROM series'):
if not self.conn.get('SELECT id from books_series_link WHERE series=?', (id,)):
self.conn.execute('DELETE FROM series WHERE id=?', (id,))
self.conn.commit()
def set_series_index(self, id, idx):
idx = int(idx)
self.conn.execute('UPDATE books SET series_index=? WHERE id=?', (int(idx), id))
self.conn.commit()
row = self.row(id)
if row is not None:
self.data[row][10] = idx
def set_rating(self, id, rating):
rating = int(rating)
self.conn.execute('DELETE FROM books_ratings_link WHERE book=?',(id,))
rat = self.conn.get('SELECT id FROM ratings WHERE rating=?', (rating,), all=False)
rat = rat if rat else self.conn.execute('INSERT INTO ratings(rating) VALUES (?)', (rating,)).lastrowid
self.conn.execute('INSERT INTO books_ratings_link(book, rating) VALUES (?,?)', (id, rat))
self.conn.commit()
def set_cover(self, id, data):
self.conn.execute('DELETE FROM covers where book=?', (id,))
if data:
usize = len(data)
data = compress(data)
self.conn.execute('INSERT INTO covers(book, uncompressed_size, data) VALUES (?,?,?)',
(id, usize, sqlite.Binary(data)))
self.conn.commit()
def set_metadata(self, id, mi):
'''
Set metadata for the book C{id} from the L{MetaInformation} object C{mi}
'''
if mi.title:
self.set_title(id, mi.title)
if not mi.authors:
mi.authors = ['Unknown']
authors = []
for a in mi.authors:
authors += string_to_authors(a)
self.set_authors(id, authors)
if mi.author_sort:
self.set_author_sort(id, mi.author_sort)
if mi.publisher:
self.set_publisher(id, mi.publisher)
if mi.rating:
self.set_rating(id, mi.rating)
if mi.series:
self.set_series(id, mi.series)
if mi.cover_data[1] is not None:
self.set_cover(id, mi.cover_data[1])
def add_books(self, paths, formats, metadata, uris=[], add_duplicates=True):
'''
Add a book to the database. self.data and self.cache are not updated.
@param paths: List of paths to book files of file-like objects
'''
formats, metadata, uris = iter(formats), iter(metadata), iter(uris)
duplicates = []
for path in paths:
mi = next(metadata)
format = next(formats)
try:
uri = next(uris)
except StopIteration:
uri = None
if not add_duplicates and self.has_book(mi):
duplicates.append((path, format, mi, uri))
continue
series_index = 1 if mi.series_index is None else mi.series_index
aus = mi.author_sort if mi.author_sort else ', '.join(mi.authors)
obj = self.conn.execute('INSERT INTO books(title, uri, series_index, author_sort) VALUES (?, ?, ?, ?)',
(mi.title, uri, series_index, aus))
id = obj.lastrowid
self.conn.commit()
self.set_metadata(id, mi)
stream = path if hasattr(path, 'read') else open(path, 'rb')
stream.seek(0, 2)
usize = stream.tell()
stream.seek(0)
self.conn.execute('INSERT INTO data(book, format, uncompressed_size, data) VALUES (?,?,?,?)',
(id, format, usize, sqlite.Binary(compress(stream.read()))))
if not hasattr(path, 'read'):
stream.close()
self.conn.commit()
if duplicates:
paths = tuple(duplicate[0] for duplicate in duplicates)
formats = tuple(duplicate[1] for duplicate in duplicates)
metadata = tuple(duplicate[2] for duplicate in duplicates)
uris = tuple(duplicate[3] for duplicate in duplicates)
return (paths, formats, metadata, uris)
return None
def index(self, id, cache=False):
data = self.cache if cache else self.data
for i in range(len(data)):
if data[i][0] == id:
return i
def get_feeds(self):
feeds = self.conn.get('SELECT title, script FROM feeds')
yield from feeds
def get_feed(self, id):
return self.conn.get('SELECT script FROM feeds WHERE id=%d'%id,
all=False)
def update_feed(self, id, script, title):
self.conn.execute('UPDATE feeds set title=? WHERE id=?', (title, id))
self.conn.execute('UPDATE feeds set script=? WHERE id=?', (script, id))
self.conn.commit()
def remove_feeds(self, ids):
for x in ids:
self.conn.execute('DELETE FROM feeds WHERE id=?', (x,))
self.conn.commit()
def add_feed(self, title, script):
if isbytestring(title):
title = title.decode('utf-8')
if isbytestring(script):
script = script.decode('utf-8')
self.conn.execute('INSERT INTO feeds(title, script) VALUES (?, ?)',
(title, script))
self.conn.commit()
def set_feeds(self, feeds):
self.conn.execute('DELETE FROM feeds')
for title, script in feeds:
self.conn.execute('INSERT INTO feeds(title, script) VALUES (?, ?)',
(title, script))
self.conn.commit()
def delete_book(self, id):
'''
Removes book from self.cache, self.data and underlying database.
'''
try:
self.cache.pop(self.index(id, cache=True))
self.data.pop(self.index(id, cache=False))
except TypeError: # If data and cache are the same object
pass
self.conn.execute('DELETE FROM books WHERE id=?', (id,))
self.conn.commit()
def get_metadata(self, idx, index_is_id=False):
'''
Convenience method to return metadata as a L{MetaInformation} object.
'''
aum = self.authors(idx, index_is_id=index_is_id)
if aum:
aum = [a.strip().replace('|', ',') for a in aum.split(',')]
mi = MetaInformation(self.title(idx, index_is_id=index_is_id), aum)
mi.author_sort = self.author_sort(idx, index_is_id=index_is_id)
mi.comments = self.comments(idx, index_is_id=index_is_id)
mi.publisher = self.publisher(idx, index_is_id=index_is_id)
tags = self.tags(idx, index_is_id=index_is_id)
if tags:
mi.tags = [i.strip() for i in tags.split(',')]
mi.series = self.series(idx, index_is_id=index_is_id)
if mi.series:
mi.series_index = self.series_index(idx, index_is_id=index_is_id)
mi.rating = self.rating(idx, index_is_id=index_is_id)
mi.isbn = self.isbn(idx, index_is_id=index_is_id)
id = idx if index_is_id else self.id(idx)
mi.application_id = id
return mi
def vacuum(self):
self.conn.execute('VACUUM;')
self.conn.commit()
def all_ids(self):
return [i[0] for i in self.conn.get('SELECT id FROM books')]
def has_id(self, id):
return self.conn.get('SELECT id FROM books where id=?', (id,), all=False) is not None
class SearchToken:
FIELD_MAP = {'title' : 1,
'author' : 2,
'publisher' : 3,
'tag' : 7,
'comments' : 8,
'series' : 9,
'format' : 13,
}
def __init__(self, text_token):
self.index = -1
text_token = text_token.strip()
for field in self.FIELD_MAP.keys():
if text_token.lower().startswith(field+':'):
text_token = text_token[len(field)+1:]
self.index = self.FIELD_MAP[field]
break
self.negate = False
if text_token.startswith('!'):
self.negate = True
text_token = text_token[1:]
self.pattern = re.compile(text_token, re.IGNORECASE)
def match(self, item):
if self.index >= 0:
text = item[self.index]
if not text:
text = ''
else:
text = ' '.join([item[i] if item[i] else '' for i in self.FIELD_MAP.values()])
return bool(self.pattern.search(text)) ^ self.negate
def text_to_tokens(text):
OR = False
match = re.match(r'\[(.*)\]', text)
if match:
text = match.group(1)
OR = True
tokens = []
quot = re.search('"(.*?)"', text)
while quot:
tokens.append(quot.group(1))
text = text.replace('"'+quot.group(1)+'"', '')
quot = re.search('"(.*?)"', text)
tokens += text.split(' ')
ans = []
for i in tokens:
try:
ans.append(SearchToken(i))
except re.error:
continue
return ans, OR
if __name__ == '__main__':
sqlite.enable_callback_tracebacks(True)
db = LibraryDatabase('/home/kovid/temp/library1.db.orig')
| 62,391 | Python | .py | 1,378 | 33.090711 | 148 | 0.565134 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,707 | csv_xml.py | kovidgoyal_calibre/src/calibre/library/catalogs/csv_xml.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import codecs
import os
import re
from collections import namedtuple
from calibre.customize import CatalogPlugin
from calibre.customize.conversion import DummyReporter
from calibre.library.catalogs import FIELDS
from calibre.utils.localization import _
class CSV_XML(CatalogPlugin):
'CSV/XML catalog generator'
Option = namedtuple('Option', 'option, default, dest, action, help')
name = 'Catalog_CSV_XML'
description = _('CSV/XML catalog generator')
supported_platforms = ['windows', 'osx', 'linux']
author = 'Greg Riker'
version = (1, 0, 0)
file_types = {'csv', 'xml'}
cli_options = [
Option('--fields',
default='all',
dest='fields',
action=None,
help=_('The fields to output when cataloging books in the '
'database. Should be a comma-separated list of fields.\n'
'Available fields: %(fields)s,\n'
'plus user-created custom fields.\n'
'Example: %(opt)s=title,authors,tags\n'
"Default: '%%default'\n"
"Applies to: CSV, XML output formats") % dict(
fields=', '.join(FIELDS), opt='--fields')),
Option('--sort-by',
default='id',
dest='sort_by',
action=None,
help=_('Output field to sort on.\n'
'Available fields: author_sort, id, rating, size, timestamp, title_sort\n'
"Default: '%default'\n"
"Applies to: CSV, XML output formats"))]
def run(self, path_to_output, opts, db, notification=DummyReporter()):
from lxml import etree
from calibre.ebooks.metadata import authors_to_string
from calibre.library import current_library_name
from calibre.utils.date import isoformat
from calibre.utils.html2text import html2text
from calibre.utils.logging import default_log as log
self.fmt = path_to_output.rpartition('.')[2]
self.notification = notification
current_library = current_library_name()
if getattr(opts, 'library_path', None):
current_library = os.path.basename(opts.library_path)
if opts.verbose:
opts_dict = vars(opts)
log(f"{self.name}('{current_library}'): Generating {self.fmt.upper()}")
if opts.connected_device['is_device_connected']:
log(" connected_device: %s" % opts.connected_device['name'])
if opts_dict['search_text']:
log(" --search='%s'" % opts_dict['search_text'])
if opts_dict['ids']:
log(" Book count: %d" % len(opts_dict['ids']))
if opts_dict['search_text']:
log(" (--search ignored when a subset of the database is specified)")
if opts_dict['fields']:
if opts_dict['fields'] == 'all':
log(" Fields: %s" % ', '.join(FIELDS[1:]))
else:
log(" Fields: %s" % opts_dict['fields'])
# If a list of ids are provided, don't use search_text
if opts.ids:
opts.search_text = None
data = self.search_sort_db(db, opts)
if not len(data):
log.error("\nNo matching database entries for search criteria '%s'" % opts.search_text)
# raise SystemExit(1)
# Get the requested output fields as a list
fields = self.get_output_fields(db, opts)
# If connected device, add 'On Device' values to data
if opts.connected_device['is_device_connected'] and 'ondevice' in fields:
for entry in data:
entry['ondevice'] = db.catalog_plugin_on_device_temp_mapping[entry['id']]['ondevice']
fm = {x: db.field_metadata.get(x, {}) for x in fields}
if self.fmt == 'csv':
outfile = codecs.open(path_to_output, 'w', 'utf8')
# Write a UTF-8 BOM
outfile.write('\ufeff')
# Output the field headers
outfile.write('%s\n' % ','.join(fields))
# Output the entry fields
for entry in data:
outstr = []
for field in fields:
if field.startswith('#'):
item = db.get_field(entry['id'], field, index_is_id=True)
if isinstance(item, (list, tuple)):
if fm.get(field, {}).get('display', {}).get('is_names', False):
item = ' & '.join(item)
else:
item = ', '.join(item)
elif field == 'library_name':
item = current_library
elif field == 'title_sort':
item = entry['sort']
else:
item = entry[field]
if item is None:
outstr.append('""')
continue
elif field == 'formats':
fmt_list = []
for format in item:
fmt_list.append(format.rpartition('.')[2].lower())
item = ', '.join(fmt_list)
elif field == 'authors':
item = authors_to_string(item)
elif field == 'tags':
item = ', '.join(item)
elif field == 'isbn':
# Could be 9, 10 or 13 digits, with hyphens, possibly ending in 'X'
item = '%s' % re.sub(r'[^\dX-]', '', item)
elif fm.get(field, {}).get('datatype') == 'datetime':
item = isoformat(item, as_utc=False)
elif field == 'comments':
item = item.replace('\r\n', ' ')
item = item.replace('\n', ' ')
elif fm.get(field, {}).get('datatype', None) == 'rating' and item:
item = '%.2g' % (item / 2)
# Convert HTML to markdown text
if isinstance(item, str):
opening_tag = re.search(r'<(\w+)( |>)', item)
if opening_tag:
closing_tag = re.search(r'<\/%s>$' % opening_tag.group(1), item)
if closing_tag:
item = html2text(item)
outstr.append('"%s"' % str(item).replace('"', '""'))
outfile.write(','.join(outstr) + '\n')
outfile.close()
elif self.fmt == 'xml':
from lxml.builder import E
if getattr(opts, 'catalog_title', None):
root = E.calibredb(title=opts.catalog_title)
else:
root = E.calibredb()
for r in data:
try:
record = E.record()
root.append(record)
for field in fields:
if field.startswith('#'):
val = db.get_field(r['id'], field, index_is_id=True)
if not isinstance(val, str):
val = str(val)
item = getattr(E, field.replace('#', '_'))(val)
record.append(item)
for field in ('id', 'uuid', 'publisher', 'rating', 'size',
'isbn', 'ondevice', 'identifiers'):
if field in fields:
val = r[field]
if not val:
continue
if not isinstance(val, (bytes, str)):
if (fm.get(field, {}).get('datatype', None) ==
'rating' and val):
val = '%.2g' % (val / 2)
val = str(val)
item = getattr(E, field)(val)
record.append(item)
if 'title' in fields:
title = E.title(r['title'], sort=r['sort'])
record.append(title)
if 'authors' in fields:
aus = E.authors(sort=r['author_sort'])
for au in r['authors']:
aus.append(E.author(au))
record.append(aus)
for field in ('timestamp', 'pubdate'):
if field in fields:
record.append(getattr(E, field)(isoformat(r[field], as_utc=False)))
if 'tags' in fields and r['tags']:
tags = E.tags()
for tag in r['tags']:
tags.append(E.tag(tag))
record.append(tags)
if 'comments' in fields and r['comments']:
record.append(E.comments(r['comments']))
if 'series' in fields and r['series']:
record.append(E.series(r['series'],
index=str(r['series_index'])))
if 'languages' in fields and r['languages']:
record.append(E.languages(r['languages']))
if 'cover' in fields and r['cover']:
record.append(E.cover(r['cover'].replace(os.sep, '/')))
if 'formats' in fields and r['formats']:
fmt = E.formats()
for f in r['formats']:
fmt.append(E.format(f.replace(os.sep, '/')))
record.append(fmt)
if 'library_name' in fields:
record.append(E.library_name(current_library))
except Exception as e:
raise Exception('Failed to convert {} to XML with error: {}'.format(r['title'], e)) from e
with open(path_to_output, 'wb') as f:
f.write(etree.tostring(root, encoding='utf-8',
xml_declaration=True, pretty_print=True))
| 10,513 | Python | .py | 206 | 32.490291 | 110 | 0.463015 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,708 | epub_mobi_builder.py | kovidgoyal_calibre/src/calibre/library/catalogs/epub_mobi_builder.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2010, Greg Riker
import datetime
import os
import platform
import re
import shutil
import time
import unicodedata
import zlib
from copy import deepcopy
from xml.sax.saxutils import escape
from lxml import etree
from calibre import as_unicode, force_unicode, isbytestring, prepare_string_for_xml, replace_entities, strftime, xml_replace_entities
from calibre.constants import cache_dir, ismacos
from calibre.customize.conversion import DummyReporter
from calibre.customize.ui import output_profiles
from calibre.ebooks.BeautifulSoup import BeautifulSoup, NavigableString, prettify
from calibre.ebooks.metadata import author_to_author_sort
from calibre.ebooks.oeb.polish.pretty import pretty_opf, pretty_xml_tree
from calibre.library.catalogs import AuthorSortMismatchException, EmptyCatalogException, InvalidGenresSourceFieldException
from calibre.library.comments import comments_to_html
from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.utils.date import as_local_time, format_date, is_date_undefined, utcfromtimestamp
from calibre.utils.date import now as nowf
from calibre.utils.filenames import ascii_text, shorten_components_to
from calibre.utils.formatter import TemplateFormatter
from calibre.utils.icu import capitalize, collation_order, sort_key
from calibre.utils.icu import title_case as icu_title
from calibre.utils.icu import upper as icu_upper
from calibre.utils.localization import _, get_lang, lang_as_iso639_1, ngettext
from calibre.utils.resources import get_image_path as I
from calibre.utils.resources import get_path as P
from calibre.utils.xml_parse import safe_xml_fromstring
from calibre.utils.zipfile import ZipFile
from polyglot.builtins import iteritems
NBSP = '\u00a0'
def makeelement(tag_name, parent, **attrs):
ans = parent.makeelement(tag_name)
for k, v in attrs.items():
k = k.replace('_', '-').rstrip('-')
ans.set(k, str(v))
parent.append(ans)
ans.tail = '\n'
return ans
class Formatter(TemplateFormatter):
def get_value(self, key, args, kwargs):
if not key:
return ''
if key in kwargs:
return kwargs[key]
if key not in self.book.all_field_keys():
raise Exception(_('column not in book: ') + key)
return self.book.format_field(key, series_with_index=False)[1]
class CatalogBuilder:
'''
Generates catalog source files from calibre database
Flow of control:
gui2.actions.catalog:generate_catalog()
gui2.tools:generate_catalog() or db.cli.cmd_catalog
called from gui2.convert.gui_conversion:gui_catalog()
catalog = Catalog(notification=Reporter())
catalog.build_sources()
Options managed in gui2.catalog.catalog_epub_mobi.py
Turned off fetch_bookmarks as of 0.8.70
self.generate_recently_read = True if (_opts.generate_recently_added and
_opts.connected_kindle and
self.generate_for_kindle_mobi) else False
Does not work with AZW3, interferes with new prefix handling
'''
DEBUG = False
# A single number creates 'Last x days' only.
# Multiple numbers create 'Last x days', 'x to y days ago' ...
# e.g, [7,15,30,60] or [30]
# [] = No date ranges added
DATE_RANGE = [30]
# Text used in generated catalog for title section with other-than-ASCII leading letter
SYMBOLS = _('Symbols')
# basename output file basename
# creator dc:creator in OPF metadata
# description_clip limits size of NCX descriptions (Kindle only)
# includeSources Used in filter_excluded_genres to skip tags like '[SPL]'
# notification Used to check for cancel, report progress
# stylesheet CSS stylesheet
# title dc:title in OPF metadata, NCX periodical
# verbosity level of diagnostic printout
''' device-specific symbol (default empty star) '''
@property
def SYMBOL_EMPTY_RATING(self):
return self.output_profile.empty_ratings_char
''' device-specific symbol (default filled star) '''
@property
def SYMBOL_FULL_RATING(self):
return self.output_profile.ratings_char
''' device-specific symbol for reading progress '''
@property
def SYMBOL_PROGRESS_READ(self):
psr = '+'
if self.generate_for_kindle_mobi:
psr = '▪'
return psr
''' device-specific symbol for reading progress '''
@property
def SYMBOL_PROGRESS_UNREAD(self):
psu = '-'
if self.generate_for_kindle_mobi:
psu = '▫'
return psu
''' device-specific symbol for reading progress '''
@property
def SYMBOL_READING(self):
if self.generate_for_kindle_mobi:
return '▷'
else:
return NBSP
def __init__(self, db, _opts, plugin,
report_progress=DummyReporter(),
stylesheet="content/stylesheet.css",
init_resources=True):
self.formatter = Formatter()
self.db = db
self.opts = _opts
self.plugin = plugin
self.reporter = report_progress
self.stylesheet = stylesheet
self.cache_dir = os.path.join(cache_dir(), 'catalog')
self.catalog_path = PersistentTemporaryDirectory("_epub_mobi_catalog", prefix='')
self.content_dir = os.path.join(self.catalog_path, "content")
self.excluded_tags = self.get_excluded_tags()
self.generate_for_kindle_azw3 = True if (_opts.fmt == 'azw3' and
_opts.output_profile and
_opts.output_profile.startswith("kindle")) else False
self.generate_for_kindle_mobi = True if (_opts.fmt == 'mobi' and
_opts.output_profile and
_opts.output_profile.startswith("kindle")) else False
self.all_series = set()
self.authors = None
self.bookmarked_books = None
self.bookmarked_books_by_date_read = None
self.books_by_author = None
self.books_by_date_range = None
self.books_by_description = []
self.books_by_month = None
self.books_by_series = None
self.books_by_title = None
self.books_by_title_no_series_prefix = None
self.books_to_catalog = None
self.current_step = 0.0
self.error = []
self.generate_recently_read = False
self.genres = []
self.genre_tags_dict = \
self.filter_genre_tags(max_len=245 - len("%s/Genre_.html" % self.content_dir)) \
if self.opts.generate_genres else None
self.html_filelist_1 = []
self.html_filelist_2 = []
self.individual_authors = None
self.merge_comments_rule = dict(zip(['field', 'position', 'hr'],
_opts.merge_comments_rule.split(':')))
self.ncx_root = None
self.output_profile = self.get_output_profile(_opts)
self.play_order = 1
self.prefix_rules = self.get_prefix_rules()
self.progress_int = 0.0
self.progress_string = ''
self.thumb_height = 0
self.thumb_width = 0
self.thumbs = None
self.thumbs_path = os.path.join(self.cache_dir, "thumbs.zip")
self.total_steps = 6.0
self.use_series_prefix_in_titles_section = False
self.dump_custom_fields()
self.books_to_catalog = self.fetch_books_to_catalog()
self.compute_total_steps()
self.calculate_thumbnail_dimensions()
self.confirm_thumbs_archive()
self.load_section_templates()
if init_resources:
self.copy_catalog_resources()
""" key() functions """
def _kf_author_to_author_sort(self, author):
""" Compute author_sort value from author
Tokenize author string, return capitalized string with last token first
Args:
author (str): author, e.g. 'John Smith'
Return:
(str): 'Smith, john'
"""
tokens = author.split()
tokens = tokens[-1:] + tokens[:-1]
if len(tokens) > 1:
tokens[0] += ','
return ' '.join(tokens).capitalize()
def _kf_books_by_author_sorter_author(self, book):
""" Generate book sort key with computed author_sort.
Generate a sort key of computed author_sort, title. Used to look for
author_sort mismatches.
Twiddle included to force series to sort after non-series books.
'Smith, john Star Wars'
'Smith, john ~Star Wars 0001.0000'
Args:
book (dict): book metadata
Return:
(str): sort key
"""
if not book['series']:
key = '{} {}'.format(self._kf_author_to_author_sort(book['author']),
capitalize(book['title_sort']))
else:
index = book['series_index']
integer = int(index)
fraction = index - integer
series_index = '%04d%s' % (integer, str('%0.4f' % fraction).lstrip('0'))
key = '{} ~{} {}'.format(self._kf_author_to_author_sort(book['author']),
self.generate_sort_title(book['series']),
series_index)
return key
def _kf_books_by_author_sorter_author_sort(self, book, longest_author_sort=60):
""" Generate book sort key with supplied author_sort.
Generate a sort key of author_sort, title.
Bang, tilde included to force series to sort after non-series books.
Args:
book (dict): book metadata
Return:
(str): sort key
"""
if not book['series']:
fs = '{:<%d}!{!s}' % longest_author_sort
key = fs.format(capitalize(book['author_sort']),
capitalize(book['title_sort']))
else:
index = book['series_index']
integer = int(index)
fraction = index - integer
series_index = '%04d%s' % (integer, str('%0.4f' % fraction).lstrip('0'))
fs = '{:<%d}~{!s}{!s}' % longest_author_sort
key = fs.format(capitalize(book['author_sort']),
self.generate_sort_title(book['series']),
series_index)
return key
def _kf_books_by_series_sorter(self, book):
index = book['series_index']
integer = int(index)
fraction = index - integer
series_index = '%04d%s' % (integer, str('%0.4f' % fraction).lstrip('0'))
key = '{} {}'.format(self.generate_sort_title(book['series']),
series_index)
return key
""" Methods """
def build_sources(self):
""" Generate catalog source files.
Assemble OPF, HTML and NCX files reflecting catalog options.
Generated source is OEB compliant.
Called from gui2.convert.gui_conversion:gui_catalog()
Args:
Exceptions:
AuthorSortMismatchException
EmptyCatalogException
Results:
error: problems reported during build
"""
self.fetch_books_by_title()
self.fetch_books_by_author()
self.fetch_bookmarks()
if self.opts.generate_descriptions:
self.generate_thumbnails()
self.generate_html_descriptions()
if self.opts.generate_authors:
self.generate_html_by_author()
if self.opts.generate_titles:
self.generate_html_by_title()
if self.opts.generate_series:
self.generate_html_by_series()
if self.opts.generate_genres:
self.generate_html_by_genres()
# If this is the only Section, and there are no genres, bail
if self.opts.section_list == ['Genres'] and not self.genres:
error_msg = _("No genres to catalog.\n")
if not self.opts.cli_environment:
error_msg += _("Check 'Excluded genres' regex in the E-book options.\n")
self.opts.log.error(error_msg)
self.error.append(_('No books available to catalog'))
self.error.append(error_msg)
raise EmptyCatalogException("No genres to catalog")
if self.opts.generate_recently_added:
self.generate_html_by_date_added()
if self.generate_recently_read:
self.generate_html_by_date_read()
self.generate_opf()
self.generate_ncx_header()
if self.opts.generate_authors:
self.generate_ncx_by_author(_("Authors"))
if self.opts.generate_titles:
self.generate_ncx_by_title(_("Titles"))
if self.opts.generate_series:
self.generate_ncx_by_series(ngettext('Series', 'Series', 2))
if self.opts.generate_genres:
self.generate_ncx_by_genre(_("Genres"))
if self.opts.generate_recently_added:
self.generate_ncx_by_date_added(_("Recently Added"))
if self.generate_recently_read:
self.generate_ncx_by_date_read(_("Recently Read"))
if self.opts.generate_descriptions:
self.generate_ncx_descriptions(_("Descriptions"))
self.write_ncx()
def calculate_thumbnail_dimensions(self):
""" Calculate thumb dimensions based on device DPI.
Using the specified output profile, calculate thumb_width
in pixels, then set height to width * 1.33. Special-case for
Kindle/MOBI, as rendering off by 2.
*** dead code? ***
Inputs:
opts.thumb_width (str|float): specified thumb_width
opts.output_profile.dpi (int): device DPI
Outputs:
thumb_width (float): calculated thumb_width
thumb_height (float): calculated thumb_height
"""
for x in output_profiles():
if x.short_name == self.opts.output_profile:
# aspect ratio: 3:4
self.thumb_width = x.dpi * float(self.opts.thumb_width)
self.thumb_height = self.thumb_width * 1.33
if 'kindle' in x.short_name and self.opts.fmt == 'mobi':
# Kindle DPI appears to be off by a factor of 2
self.thumb_width = self.thumb_width // 2
self.thumb_height = self.thumb_height // 2
break
if self.opts.verbose:
self.opts.log(" Thumbnails:")
self.opts.log(" DPI = %d; thumbnail dimensions: %d x %d" %
(x.dpi, self.thumb_width, self.thumb_height))
def compute_total_steps(self):
""" Calculate number of build steps to generate catalog.
Calculate total number of build steps based on enabled sections.
Inputs:
opts.generate_* (bool): enabled sections
Outputs:
total_steps (int): updated
"""
# Tweak build steps based on optional sections: 1 call for HTML, 1 for NCX
incremental_jobs = 0
if self.opts.generate_authors:
incremental_jobs += 2
if self.opts.generate_titles:
incremental_jobs += 2
if self.opts.generate_recently_added:
incremental_jobs += 2
if self.generate_recently_read:
incremental_jobs += 2
if self.opts.generate_series:
incremental_jobs += 2
if self.opts.generate_descriptions:
# +1 thumbs
incremental_jobs += 3
self.total_steps += incremental_jobs
def confirm_thumbs_archive(self):
""" Validate thumbs archive.
Confirm existence of thumbs archive, or create if absent.
Confirm stored thumb_width matches current opts.thumb_width,
or invalidate archive.
generate_thumbnails() writes current thumb_width to archive.
Inputs:
opts.thumb_width (float): requested thumb_width
thumbs_path (file): existing thumbs archive
Outputs:
thumbs_path (file): new (non_existent or invalidated), or
validated existing thumbs archive
"""
if self.opts.generate_descriptions:
if not os.path.exists(self.cache_dir):
self.opts.log.info(" creating new thumb cache '%s'" % self.cache_dir)
os.makedirs(self.cache_dir)
if not os.path.exists(self.thumbs_path):
self.opts.log.info(' creating thumbnail archive, thumb_width: %1.2f"' %
float(self.opts.thumb_width))
with ZipFile(self.thumbs_path, mode='w') as zfw:
zfw.writestr("Catalog Thumbs Archive", '')
else:
try:
with ZipFile(self.thumbs_path, mode='r') as zfr:
try:
cached_thumb_width = zfr.read('thumb_width')
except:
cached_thumb_width = "-1"
except:
os.remove(self.thumbs_path)
cached_thumb_width = '-1'
if float(cached_thumb_width) != float(self.opts.thumb_width):
self.opts.log.warning(" invalidating cache at '%s'" % self.thumbs_path)
self.opts.log.warning(' thumb_width changed: %1.2f" => %1.2f"' %
(float(cached_thumb_width), float(self.opts.thumb_width)))
with ZipFile(self.thumbs_path, mode='w') as zfw:
zfw.writestr("Catalog Thumbs Archive", '')
else:
self.opts.log.info(' existing thumb cache at %s, cached_thumb_width: %1.2f"' %
(self.thumbs_path, float(cached_thumb_width)))
def convert_html_entities(self, s):
""" Convert string containing HTML entities to its unicode equivalent.
Convert a string containing HTML entities of the form '&' or '&97;'
to a normalized unicode string. E.g., 'AT&T' converted to 'AT&T'.
Args:
s (str): str containing one or more HTML entities.
Return:
s (str): converted string
"""
return replace_entities(s)
def copy_catalog_resources(self):
""" Copy resources from calibre source to self.catalog_path.
Copy basic resources - default cover, stylesheet, and masthead (Kindle only)
from calibre resource directory to self.catalog_path, a temporary directory
for constructing the catalog. Files stored to specified destination dirs.
Inputs:
files_to_copy (files): resource files from calibre resources, which may be overridden locally
Output:
resource files copied to self.catalog_path/*
"""
self.create_catalog_directory_structure()
catalog_resources = P("catalog")
files_to_copy = [('', 'DefaultCover.jpg'),
('content', 'stylesheet.css')]
if self.generate_for_kindle_mobi:
files_to_copy.extend([('images', 'mastheadImage.gif')])
for file in files_to_copy:
if file[0] == '':
shutil.copy(os.path.join(catalog_resources, file[1]),
self.catalog_path)
else:
shutil.copy(os.path.join(catalog_resources, file[1]),
os.path.join(self.catalog_path, file[0]))
if self.generate_for_kindle_mobi:
try:
self.generate_masthead_image(os.path.join(self.catalog_path,
'images/mastheadImage.gif'))
except:
pass
def create_catalog_directory_structure(self):
""" Create subdirs in catalog output dir.
Create /content and /images in self.catalog_path
Inputs:
catalog_path (path): path to catalog output dir
Output:
/content, /images created
"""
if not os.path.isdir(self.catalog_path):
os.makedirs(self.catalog_path)
content_path = self.catalog_path + "/content"
if not os.path.isdir(content_path):
os.makedirs(content_path)
images_path = self.catalog_path + "/images"
if not os.path.isdir(images_path):
os.makedirs(images_path)
def detect_author_sort_mismatches(self, books_to_test):
""" Detect author_sort mismatches.
Sort by author, look for inconsistencies in author_sort among
similarly-named authors. Fatal for MOBI generation, a mere
annoyance for EPUB.
Inputs:
books_by_author (list): list of books to test, possibly unsorted
Output:
(none)
Exceptions:
AuthorSortMismatchException: author_sort mismatch detected
"""
books_by_author = sorted(list(books_to_test), key=self._kf_books_by_author_sorter_author)
authors = [(record['author'], record['author_sort']) for record in books_by_author]
current_author = authors[0]
for (i, author) in enumerate(authors):
if author != current_author and i:
if author[0] == current_author[0]:
if self.opts.fmt == 'mobi':
# Exit if building MOBI
error_msg = _("<p>Inconsistent author sort values for author<br/>" +
f"'{author[0]!s}':</p>" +
f"<p><center><b>{author[1]!s}</b> != <b>{current_author[1]!s}</b></center></p>" +
"<p>Unable to build MOBI catalog.<br/>" +
f"Select all books by '{author[0]!s}', apply correct Author Sort value in Edit Metadata dialog, then rebuild the catalog.\n<p>") # noqa
self.opts.log.warn('\n*** Metadata error ***')
self.opts.log.warn(error_msg)
self.error.append('Author sort mismatch')
self.error.append(error_msg)
raise AuthorSortMismatchException("author_sort mismatch while building MOBI")
else:
# Warning if building non-MOBI
if not self.error:
self.error.append('Author sort mismatch')
error_msg = _(f"Warning: Inconsistent author sort values for author '{author[0]!s}':\n" +
f" {author[1]!s} != {current_author[1]!s}\n")
self.opts.log.warn('\n*** Metadata warning ***')
self.opts.log.warn(error_msg)
self.error.append(error_msg)
continue
current_author = author
def discover_prefix(self, record):
""" Return a prefix for record.
Evaluate record against self.prefix_rules. Return assigned prefix
if matched.
Args:
record (dict): book metadata
Return:
prefix (str): matched a prefix_rule
None: no match
"""
def _log_prefix_rule_match_info(rule, record, matched):
self.opts.log.info(" %s '%s' by %s (%s: '%s' contains '%s')" %
(rule['prefix'], record['title'],
record['authors'][0], rule['name'],
self.db.metadata_for_field(rule['field'])['name'],
matched))
# Compare the record to each rule looking for a match
for rule in self.prefix_rules:
# Literal comparison for Tags field
if rule['field'].lower() == 'tags' or rule['field'] == _('Tags'):
if rule['pattern'].lower() in tuple(map(str.lower, record['tags'])):
if self.DEBUG and self.opts.verbose:
self.opts.log.info(" %s '%s' by %s (%s: Tags includes '%s')" %
(rule['prefix'], record['title'],
record['authors'][0], rule['name'],
rule['pattern']))
return rule['prefix']
# Regex match for custom field
elif rule['field'].startswith('#'):
field_contents = self.db.get_field(record['id'],
rule['field'],
index_is_id=True)
if field_contents == '':
field_contents = None
# Handle condition where bools_are_tristate is False,
# field is a bool and contents is None, which is displayed as No
if (not self.db.new_api.pref('bools_are_tristate') and
self.db.metadata_for_field(rule['field'])['datatype'] == 'bool' and
field_contents is None):
field_contents = _('False')
if field_contents is not None:
if self.db.metadata_for_field(rule['field'])['datatype'] == 'bool':
# For Yes/No fields, need to translate field_contents to
# locale version
field_contents = _(repr(field_contents))
try:
if re.search(rule['pattern'], str(field_contents),
re.IGNORECASE) is not None:
if self.DEBUG:
_log_prefix_rule_match_info(rule, record, field_contents)
return rule['prefix']
except:
if self.opts.verbose:
self.opts.log.error("pattern failed to compile: %s" % rule['pattern'])
pass
elif field_contents is None and rule['pattern'] == 'None':
if self.DEBUG:
_log_prefix_rule_match_info(rule, record, field_contents)
return rule['prefix']
return None
def dump_custom_fields(self):
"""
Dump custom field mappings for debugging
"""
if self.opts.verbose:
self.opts.log.info(" Custom fields:")
all_custom_fields = self.db.custom_field_keys()
for cf in all_custom_fields:
self.opts.log.info(" %-20s %-20s %s" %
(cf, "'%s'" % self.db.metadata_for_field(cf)['name'],
self.db.metadata_for_field(cf)['datatype']))
def establish_equivalencies(self, item_list, key=None):
""" Return icu equivalent sort letter.
Returns base sort letter for accented characters. Code provided by
chaley, modified to force unaccented base letters for A, O & U when
an accented version would otherwise be returned.
Args:
item_list (list): list of items, sorted by icu_sort
Return:
cl_list (list): list of equivalent leading chars, 1:1 correspondence to item_list
"""
# Hack to force the cataloged leading letter to be
# an unadorned character if the accented version sorts before the unaccented
exceptions = {
'Ä': 'A',
'Ö': 'O',
'Ü': 'U'
}
if key is not None:
sort_field = key
cl_list = [None] * len(item_list)
last_ordnum = 0
for idx, item in enumerate(item_list):
if key:
c = item[sort_field]
else:
c = item
ordnum, ordlen = collation_order(c)
if ismacos and platform.mac_ver()[0] < '10.8':
# Hackhackhackhackhack
# icu returns bogus results with curly apostrophes, maybe others under OS X 10.6.x
# When we see the magic combo of 0/-1 for ordnum/ordlen, special case the logic
last_c = ''
if ordnum == 0 and ordlen == -1:
if icu_upper(c[0]) != last_c:
last_c = icu_upper(c[0])
if last_c in exceptions.keys():
last_c = exceptions[str(last_c)]
last_ordnum = ordnum
cl_list[idx] = last_c
else:
if last_ordnum != ordnum:
last_c = icu_upper(c[0:ordlen])
if last_c in exceptions.keys():
last_c = exceptions[str(last_c)]
last_ordnum = ordnum
else:
last_c = cl_list[idx-1]
cl_list[idx] = last_c
else:
if last_ordnum != ordnum:
last_c = icu_upper(c[0:ordlen])
if last_c in exceptions.keys():
last_c = exceptions[str(last_c)]
last_ordnum = ordnum
else:
last_c = cl_list[idx-1]
cl_list[idx] = last_c
if self.DEBUG and self.opts.verbose:
print(" establish_equivalencies():")
if key:
for idx, item in enumerate(item_list):
print(f" {cl_list[idx]} {item[sort_field]}")
else:
print(f" {cl_list[idx]} {item}")
return cl_list
def fetch_books_by_author(self):
""" Generate a list of books sorted by author.
For books with multiple authors, relist book with additional authors.
Sort the database by author. Report author_sort inconsistencies as warning when
building EPUB or MOBI, error when building MOBI. Collect a list of unique authors
to self.authors.
Inputs:
self.books_to_catalog (list): database, sorted by title
Outputs:
books_by_author: database, sorted by author
authors: list of book authors. Two credited authors are considered an
individual entity
error: author_sort mismatches
Return:
True: no errors
False: author_sort mismatch detected while building MOBI
"""
self.update_progress_full_step(_("Sorting database"))
books_by_author = list(self.books_to_catalog)
self.detect_author_sort_mismatches(books_by_author)
# Assumes books_by_title already populated
# init books_by_description before relisting multiple authors
if self.opts.generate_descriptions:
books_by_description = list(books_by_author) if self.opts.sort_descriptions_by_author \
else list(self.books_by_title)
if self.opts.cross_reference_authors:
books_by_author = self.relist_multiple_authors(books_by_author)
# books_by_author = sorted(list(books_by_author), key=self._kf_books_by_author_sorter_author)
# Determine the longest author_sort length before sorting
asl = [i['author_sort'] for i in books_by_author]
las = max(asl, key=len)
if self.opts.generate_descriptions:
self.books_by_description = sorted(books_by_description,
key=lambda x: sort_key(self._kf_books_by_author_sorter_author_sort(x, len(las))))
books_by_author = sorted(books_by_author,
key=lambda x: sort_key(self._kf_books_by_author_sorter_author_sort(x, len(las))))
if self.DEBUG and self.opts.verbose:
tl = [i['title'] for i in books_by_author]
lt = max(tl, key=len)
fs = '{:<6}{:<%d} {:<%d} {!s}' % (len(lt), len(las))
print(fs.format('', 'Title', 'Author', 'Series'))
for i in books_by_author:
print(fs.format('', i['title'], i['author_sort'], i['series']))
# Build the unique_authors set from existing data
authors = [(record['author'], capitalize(record['author_sort'])) for record in books_by_author]
# authors[] contains a list of all book authors, with multiple entries for multiple books by author
# authors[]: (([0]:friendly [1]:sort))
# unique_authors[]: (([0]:friendly [1]:sort [2]:book_count))
books_by_current_author = 0
current_author = authors[0]
multiple_authors = False
unique_authors = []
individual_authors = set()
for (i, author) in enumerate(authors):
if author != current_author:
# Note that current_author and author are tuples: (friendly, sort)
multiple_authors = True
# New author, save the previous author/sort/count
unique_authors.append((current_author[0], icu_title(current_author[1]),
books_by_current_author))
current_author = author
books_by_current_author = 1
elif i == 0 and len(authors) == 1:
# Allow for single-book lists
unique_authors.append((current_author[0], icu_title(current_author[1]),
books_by_current_author))
else:
books_by_current_author += 1
else:
# Add final author to list or single-author dataset
if (current_author == author and len(authors) > 1) or not multiple_authors:
unique_authors.append((current_author[0], icu_title(current_author[1]),
books_by_current_author))
self.authors = list(unique_authors)
self.books_by_author = books_by_author
for ua in unique_authors:
for ia in ua[0].replace(' & ', ' & ').split(' & '):
individual_authors.add(ia)
self.individual_authors = list(individual_authors)
if self.DEBUG and self.opts.verbose:
self.opts.log.info("\nfetch_books_by_author(): %d unique authors" % len(unique_authors))
for author in unique_authors:
self.opts.log.info((" %-50s %-25s %2d" % (author[0][0:45], author[1][0:20],
author[2])).encode('utf-8'))
self.opts.log.info("\nfetch_books_by_author(): %d individual authors" % len(individual_authors))
for author in sorted(individual_authors):
self.opts.log.info("%s" % author)
return True
def fetch_books_by_title(self):
""" Generate a list of books sorted by title.
Sort the database by title.
Inputs:
self.books_to_catalog (list): database
Outputs:
books_by_title: database, sorted by title
Return:
True: no errors
False: author_sort mismatch detected while building MOBI
"""
self.update_progress_full_step(_("Sorting titles"))
# Re-sort based on title_sort
if len(self.books_to_catalog):
self.books_by_title = sorted(self.books_to_catalog, key=lambda x: sort_key(x['title_sort'].upper()))
if self.DEBUG and self.opts.verbose:
self.opts.log.info("fetch_books_by_title(): %d books" % len(self.books_by_title))
self.opts.log.info(" %-40s %-40s" % ('title', 'title_sort'))
for title in self.books_by_title:
self.opts.log.info((" %-40s %-40s" % (title['title'][0:40],
title['title_sort'][0:40])).encode('utf-8'))
else:
error_msg = _("No books to catalog.\nCheck 'Excluded books' rules in the E-book options.\n")
self.opts.log.error('*** ' + error_msg + ' ***')
self.error.append(_('No books available to include in catalog'))
self.error.append(error_msg)
raise EmptyCatalogException(error_msg)
def fetch_books_to_catalog(self):
""" Populate self.books_to_catalog from database
Create self.books_to_catalog from filtered database.
Keys:
authors massaged
author_sort record['author_sort'] or computed
cover massaged record['cover']
date massaged record['pubdate']
description massaged record['comments'] + merge_comments
id record['id']
formats massaged record['formats']
notes from opts.header_note_source_field
prefix from self.discover_prefix()
publisher massaged record['publisher']
rating record['rating'] (0 if None)
series record['series'] or None
series_index record['series_index'] or 0.0
short_description truncated description
tags filtered record['tags']
timestamp record['timestamp']
title massaged record['title']
title_sort computed from record['title']
uuid record['uuid']
Inputs:
data (list): filtered list of book metadata dicts
Outputs:
(list) books_to_catalog
Returns:
True: Successful
False: Empty data, (check filter restrictions)
"""
def _populate_title(record):
''' populate this_title with massaged metadata '''
this_title = {}
this_title['id'] = record['id']
this_title['uuid'] = record['uuid']
this_title['title'] = self.convert_html_entities(record['title'])
if record['series']:
this_title['series'] = record['series']
self.all_series.add(this_title['series'])
this_title['series_index'] = record['series_index']
else:
this_title['series'] = None
this_title['series_index'] = 0.0
this_title['title_sort'] = self.generate_sort_title(this_title['title'])
if 'authors' in record:
this_title['authors'] = record['authors']
# Synthesize author attribution from authors list
if record['authors']:
this_title['author'] = " & ".join(record['authors'])
else:
this_title['author'] = _('Unknown')
this_title['authors'] = [this_title['author']]
if 'author_sort' in record and record['author_sort'].strip():
this_title['author_sort'] = record['author_sort']
else:
this_title['author_sort'] = self._kf_author_to_author_sort(this_title['author'])
if record['publisher']:
this_title['publisher'] = record['publisher']
this_title['rating'] = record['rating'] if record['rating'] else 0
if is_date_undefined(record['pubdate']):
this_title['date'] = None
else:
this_title['date'] = strftime('%B %Y', as_local_time(record['pubdate']).timetuple())
this_title['timestamp'] = record['timestamp']
if record['comments']:
# Strip annotations
a_offset = record['comments'].find('<div class="user_annotations">')
ad_offset = record['comments'].find('<hr class="annotations_divider" />')
if a_offset >= 0:
record['comments'] = record['comments'][:a_offset]
if ad_offset >= 0:
record['comments'] = record['comments'][:ad_offset]
this_title['description'] = comments_to_html(record['comments'])
# Create short description
paras = BeautifulSoup(this_title['description']).findAll('p')
tokens = []
for p in paras:
for token in p.contents:
if token.string is not None:
tokens.append(token.string)
this_title['short_description'] = self.generate_short_description(' '.join(tokens), dest="description")
else:
this_title['description'] = None
this_title['short_description'] = None
# Merge with custom field/value
if self.merge_comments_rule['field']:
this_title['description'] = self.merge_comments(this_title)
if record['cover']:
this_title['cover'] = record['cover']
this_title['prefix'] = self.discover_prefix(record)
this_title['tags'] = []
if record['tags']:
this_title['tags'] = self.filter_excluded_genres(record['tags'],
self.opts.exclude_genre)
this_title['genres'] = []
if self.opts.genre_source_field == _('Tags'):
this_title['genres'] = this_title['tags']
else:
record_genres = self.db.get_field(record['id'],
self.opts.genre_source_field,
index_is_id=True)
if record_genres:
if not isinstance(record_genres, list):
record_genres = [record_genres]
this_title['genres'] = self.filter_excluded_genres(record_genres,
self.opts.exclude_genre)
if record['formats']:
formats = []
for format in record['formats']:
formats.append(self.convert_html_entities(format))
this_title['formats'] = formats
# Add user notes to be displayed in header
# Special case handling for datetime fields and lists
if self.opts.header_note_source_field:
field_md = self.db.metadata_for_field(self.opts.header_note_source_field)
notes = self.db.get_field(record['id'],
self.opts.header_note_source_field,
index_is_id=True)
if notes:
if field_md['datatype'] == 'text':
if isinstance(notes, list):
notes = ' · '.join(notes)
elif field_md['datatype'] == 'datetime':
notes = format_date(notes, 'dd MMM yyyy')
this_title['notes'] = {'source': field_md['name'], 'content': notes}
return this_title
# Entry point
self.opts.sort_by = 'title'
search_phrase = ''
if self.excluded_tags:
search_terms = []
for tag in self.excluded_tags:
search_terms.append('tags:"=%s"' % tag)
search_phrase = "not (%s)" % " or ".join(search_terms)
# If a list of ids are provided, don't use search_text
if self.opts.ids:
self.opts.search_text = search_phrase
else:
if self.opts.search_text:
self.opts.search_text += " " + search_phrase
else:
self.opts.search_text = search_phrase
# Fetch the database as a dictionary
data = self.plugin.search_sort_db(self.db, self.opts)
data = self.process_exclusions(data)
if self.DEBUG:
if self.prefix_rules:
self.opts.log.info(" Added prefixes (bools_are_tristate: {}):".format(self.db.new_api.pref('bools_are_tristate')))
else:
self.opts.log.info(" No added prefixes")
# Populate this_title{} from data[{},{}]
titles = []
for record in data:
this_title = _populate_title(record)
titles.append(this_title)
return titles
def fetch_bookmarks(self):
""" Interrogate connected Kindle for bookmarks.
Discover bookmarks associated with books on Kindle downloaded by calibre.
Used in Descriptions to show reading progress, Last Read section showing date
last read. Kindle-specific, for AZW, MOBI, TAN and TXT formats.
Uses the system default save_template specified in
Preferences|Add/Save|Sending to device, not a customized one specified in
the Kindle plugin.
Inputs:
(): bookmarks from connected Kindle
Output:
bookmarked_books (dict): dict of Bookmarks
"""
from calibre.devices.kindle.bookmark import Bookmark
from calibre.devices.usbms.device import Device
from calibre.ebooks.metadata import MetaInformation
MBP_FORMATS = ['azw', 'mobi', 'prc', 'txt']
mbp_formats = set(MBP_FORMATS)
PDR_FORMATS = ['pdf']
pdr_formats = set(PDR_FORMATS)
TAN_FORMATS = ['tpz', 'azw1']
tan_formats = set(TAN_FORMATS)
class BookmarkDevice(Device):
def initialize(self, save_template):
self._save_template = save_template
self.SUPPORTS_SUB_DIRS = True
def save_template(self):
return self._save_template
def _resolve_bookmark_paths(storage, path_map):
pop_list = []
book_ext = {}
for id in path_map:
file_fmts = set()
for fmt in path_map[id]['fmts']:
file_fmts.add(fmt)
bookmark_extension = None
if file_fmts.intersection(tan_formats):
book_extension = list(file_fmts.intersection(tan_formats))[0]
bookmark_extension = 'han'
elif file_fmts.intersection(mbp_formats):
book_extension = list(file_fmts.intersection(mbp_formats))[0]
bookmark_extension = 'mbp'
elif file_fmts.intersection(tan_formats):
book_extension = list(file_fmts.intersection(tan_formats))[0]
bookmark_extension = 'tan'
elif file_fmts.intersection(pdr_formats):
book_extension = list(file_fmts.intersection(pdr_formats))[0]
bookmark_extension = 'pdr'
if bookmark_extension:
for vol in storage:
bkmk_path = path_map[id]['path'].replace(os.path.abspath('/<storage>'), vol)
bkmk_path = bkmk_path.replace('bookmark', bookmark_extension)
if os.path.exists(bkmk_path):
path_map[id] = bkmk_path
book_ext[id] = book_extension
break
else:
pop_list.append(id)
else:
pop_list.append(id)
# Remove non-existent bookmark templates
for id in pop_list:
path_map.pop(id)
return path_map, book_ext
self.bookmarked_books = {}
if self.generate_recently_read:
self.opts.log.info(" Collecting Kindle bookmarks matching catalog entries")
d = BookmarkDevice(None)
d.initialize(self.opts.connected_device['save_template'])
bookmarks = {}
for book in self.books_to_catalog:
if 'formats' in book:
path_map = {}
id = book['id']
original_title = book['title'][book['title'].find(':') + 2:] if book['series'] \
else book['title']
myMeta = MetaInformation(original_title,
authors=book['authors'])
myMeta.author_sort = book['author_sort']
a_path = d.create_upload_path('/<storage>', myMeta, 'x.bookmark', create_dirs=False)
path_map[id] = dict(path=a_path, fmts=[x.rpartition('.')[2] for x in book['formats']])
path_map, book_ext = _resolve_bookmark_paths(self.opts.connected_device['storage'], path_map)
if path_map:
bookmark_ext = path_map[id].rpartition('.')[2]
myBookmark = Bookmark(path_map[id], id, book_ext[id], bookmark_ext)
try:
book['percent_read'] = min(float(100 * myBookmark.last_read / myBookmark.book_length), 100)
except:
book['percent_read'] = 0
dots = int((book['percent_read'] + 5) // 10)
dot_string = self.SYMBOL_PROGRESS_READ * dots
empty_dots = self.SYMBOL_PROGRESS_UNREAD * (10 - dots)
book['reading_progress'] = f'{dot_string}{empty_dots}'
bookmarks[id] = ((myBookmark, book))
self.bookmarked_books = bookmarks
def filter_genre_tags(self, max_len):
""" Remove excluded tags from data set, return normalized genre list.
Filter all db tags, removing excluded tags supplied in opts.
Test for multiple tags resolving to same normalized form. Normalized
tags are flattened to alphanumeric ascii_text.
Args:
max_len: maximum length of normalized tag to fit within OS constraints
Return:
genre_tags_dict (dict): dict of filtered, normalized tags in data set
"""
def _format_tag_list(tags, indent=1, line_break=70, header='Tag list'):
def _next_tag(sorted_tags):
for (i, tag) in enumerate(sorted_tags):
if i < len(tags) - 1:
yield tag + ", "
else:
yield tag
ans = '%s%d %s:\n' % (' ' * indent, len(tags), header)
ans += ' ' * (indent + 1)
out_str = ''
sorted_tags = sorted(tags, key=sort_key)
for tag in _next_tag(sorted_tags):
out_str += tag
if len(out_str) >= line_break:
ans += out_str + '\n'
out_str = ' ' * (indent + 1)
return ans + out_str
def _normalize_tag(tag, max_len):
""" Generate an XHTML-legal anchor string from tag.
Parse tag for non-ascii, convert to unicode name.
Args:
tags (str): tag name possible containing symbols
max_len (int): maximum length of tag
Return:
normalized (str): unicode names substituted for non-ascii chars,
clipped to max_len
"""
normalized = massaged = re.sub(r'\s', '', ascii_text(tag).lower())
if re.search(r'\W', normalized):
normalized = ''
for c in massaged:
if re.search(r'\W', c):
normalized += self.generate_unicode_name(c)
else:
normalized += c
shortened = shorten_components_to(max_len, [normalized])[0]
return shortened
# Entry point
normalized_tags = []
friendly_tags = []
excluded_tags = []
# Fetch all possible genres from source field
all_genre_tags = []
if self.opts.genre_source_field == _('Tags'):
all_genre_tags = self.db.all_tags()
else:
# Validate custom field is usable as a genre source
field_md = self.db.metadata_for_field(self.opts.genre_source_field)
if field_md is None or field_md['datatype'] not in ['enumeration', 'text']:
all_custom_fields = self.db.custom_field_keys()
eligible_custom_fields = []
for cf in all_custom_fields:
if self.db.metadata_for_field(cf)['datatype'] in ['enumeration', 'text']:
eligible_custom_fields.append(cf)
self.opts.log.error("Custom genre_source_field must be either:\n"
" 'Comma separated text, like tags, shown in the browser',\n"
" 'Text, column shown in the tag browser', or\n"
" 'Text, but with a fixed set of permitted values'.")
self.opts.log.error("Eligible custom fields: %s" % ', '.join(eligible_custom_fields))
raise InvalidGenresSourceFieldException("invalid custom field specified for genre_source_field")
all_genre_tags = list(self.db.all_custom(self.db.field_metadata.key_to_label(self.opts.genre_source_field)))
all_genre_tags.sort()
for tag in all_genre_tags:
if tag in self.excluded_tags:
excluded_tags.append(tag)
continue
try:
if re.search(self.opts.exclude_genre, tag):
excluded_tags.append(tag)
continue
except:
self.opts.log.error("\tfilterDbTags(): malformed --exclude-genre regex pattern: %s" % self.opts.exclude_genre)
if tag == ' ':
continue
normalized_tags.append(_normalize_tag(tag, max_len))
friendly_tags.append(tag)
genre_tags_dict = dict(zip(friendly_tags, normalized_tags))
# Test for multiple genres resolving to same normalized form
normalized_set = set(normalized_tags)
for normalized in normalized_set:
if normalized_tags.count(normalized) > 1:
self.opts.log.warn(" Warning: multiple tags resolving to genre '%s':" % normalized)
for key in genre_tags_dict:
if genre_tags_dict[key] == normalized:
self.opts.log.warn(" %s" % key)
if self.opts.verbose:
self.opts.log.info('%s' % _format_tag_list(genre_tags_dict, header="enabled genres"))
self.opts.log.info('%s' % _format_tag_list(excluded_tags, header="excluded genres"))
return genre_tags_dict
def filter_excluded_genres(self, tags, regex):
""" Remove excluded tags from a tag list
Run regex against list of tags, remove matching tags. Return filtered list.
Args:
tags (list): list of tags
Return:
tag_list(list): filtered list of tags
"""
tag_list = []
try:
for tag in tags:
tag = self.convert_html_entities(tag)
if re.search(regex, tag):
continue
else:
tag_list.append(tag)
except:
self.opts.log.error("\tfilter_excluded_genres(): malformed --exclude-genre regex pattern: %s" % regex)
return tags
return tag_list
def format_ncx_text(self, description, dest=None):
""" Massage NCX text for Kindle.
Convert HTML entities for proper display on Kindle, convert
'&' to '&' (Kindle fails).
Args:
description (str): string, possibly with HTM entities
dest (kwarg): author, title or description
Return:
(str): massaged, possibly truncated description
"""
# Kindle TOC descriptions won't render certain characters
# Fix up
massaged = xml_replace_entities(str(description))
# Replace '&' with '&'
# massaged = re.sub("&", "&", massaged)
if massaged.strip() and dest:
# print traceback.print_stack(limit=3)
return self.generate_short_description(massaged.strip(), dest=dest)
else:
return None
def insert_prefix(self, soup, parent_tag, pos, prefix_char):
""" Generate HTML snippet with prefix character.
Insert a <code> snippet for Kindle, <span> snippet for EPUB.
Optimized to preserve first-column alignment for MOBI, EPUB.
"""
if self.opts.fmt == 'mobi':
tag = soup.new_tag('code')
else:
tag = soup.new_tag('span')
tag['class'] = 'prefix'
tag.append(prefix_char or NBSP)
parent_tag.insert(pos, tag)
def generate_author_anchor(self, author):
""" Generate legal XHTML anchor.
Convert author to legal XHTML (may contain unicode chars), stripping
non-alphanumeric chars.
Args:
author (str): author name
Return:
(str): asciized version of author
"""
return re.sub(r"\W", "", ascii_text(author))
def generate_format_args(self, book):
""" Generate the format args for template substitution.
self.load_section_templates imports string formatting templates of the form
'by_*_template.py' for use in the various sections. The templates are designed to use
format args, supplied by this method.
Args:
book (dict): book metadata
Return:
(dict): formatted args for templating
"""
series_index = str(book['series_index'])
if series_index.endswith('.0'):
series_index = series_index[:-2]
args = dict(
title=book['title'],
series=book['series'],
series_index=series_index,
rating=self.generate_rating_string(book),
rating_parens='(%s)' % self.generate_rating_string(book) if 'rating' in book else '',
pubyear=book['date'].split()[1] if book['date'] else '',
pubyear_parens="(%s)" % book['date'].split()[1] if book['date'] else '')
return args
def generate_html_by_author(self):
""" Generate content/ByAlphaAuthor.html.
Loop through self.books_by_author, generate HTML
with anchors for author and index letters.
Input:
books_by_author (list): books, sorted by author
Output:
content/ByAlphaAuthor.html (file)
"""
friendly_name = _("Authors")
self.update_progress_full_step("%s HTML" % friendly_name)
soup = self.generate_html_empty_header(friendly_name)
body = soup.find('body')
btc = 0
divTag = soup.new_tag("div")
dtc = 0
divOpeningTag = None
dotc = 0
divRunningTag = None
drtc = 0
# Loop through books_by_author
# Each author/books group goes in an openingTag div (first) or
# a runningTag div (subsequent)
book_count = 0
current_author = ''
current_letter = ''
current_series = None
# Establish initial letter equivalencies
sort_equivalents = self.establish_equivalencies(self.books_by_author, key='author_sort')
for idx, book in enumerate(self.books_by_author):
book_count += 1
if self.letter_or_symbol(sort_equivalents[idx]) != current_letter:
# Start a new letter with Index letter
if divOpeningTag is not None:
divTag.insert(dtc, divOpeningTag)
dtc += 1
dotc = 0
if divRunningTag is not None:
divTag.insert(dtc, divRunningTag)
dtc += 1
drtc = 0
divRunningTag = None
author_count = 0
divOpeningTag = soup.new_tag('div')
if dtc > 0:
divOpeningTag['class'] = "initial_letter"
dotc = 0
pIndexTag = soup.new_tag("p")
pIndexTag['class'] = "author_title_letter_index"
aTag = soup.new_tag("a")
# current_letter = self.letter_or_symbol(book['author_sort'][0].upper())
current_letter = self.letter_or_symbol(sort_equivalents[idx])
if current_letter == self.SYMBOLS:
aTag['id'] = self.SYMBOLS + '_authors'
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(self.SYMBOLS))
else:
aTag['id'] = self.generate_unicode_name(current_letter) + '_authors'
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(sort_equivalents[idx]))
divOpeningTag.insert(dotc, pIndexTag)
dotc += 1
if book['author'] != current_author:
# Start a new authorl
current_author = book['author']
author_count += 1
if author_count >= 2:
# Add divOpeningTag to divTag, kill divOpeningTag
if divOpeningTag:
divTag.insert(dtc, divOpeningTag)
dtc += 1
divOpeningTag = None
dotc = 0
# Create a divRunningTag for the next author
if author_count > 2:
divTag.insert(dtc, divRunningTag)
dtc += 1
divRunningTag = soup.new_tag('div')
divRunningTag['class'] = "author_logical_group"
drtc = 0
non_series_books = 0
current_series = None
pAuthorTag = soup.new_tag("p")
pAuthorTag['class'] = "author_index"
aTag = soup.new_tag("a")
aTag['id'] = "%s" % self.generate_author_anchor(current_author)
aTag.insert(0, NavigableString(current_author))
pAuthorTag.insert(0, aTag)
if author_count == 1:
divOpeningTag.insert(dotc, pAuthorTag)
dotc += 1
else:
divRunningTag.insert(drtc, pAuthorTag)
drtc += 1
# Check for series
if book['series'] and book['series'] != current_series:
# Start a new series
current_series = book['series']
pSeriesTag = soup.new_tag('p')
pSeriesTag['class'] = "series"
if self.opts.fmt == 'mobi':
pSeriesTag['class'] = "series_mobi"
if self.opts.generate_series:
aTag = soup.new_tag('a')
aTag['href'] = "{}.html#{}".format('BySeries', self.generate_series_anchor(book['series']))
aTag.insert(0, book['series'])
pSeriesTag.insert(0, aTag)
else:
pSeriesTag.insert(0, NavigableString('%s' % book['series']))
if author_count == 1:
divOpeningTag.insert(dotc, pSeriesTag)
dotc += 1
elif divRunningTag is not None:
divRunningTag.insert(drtc, pSeriesTag)
drtc += 1
if current_series and not book['series']:
current_series = None
# Add books
pBookTag = soup.new_tag("p")
pBookTag['class'] = "line_item"
ptc = 0
self.insert_prefix(soup, pBookTag, ptc, book['prefix'])
ptc += 1
spanTag = soup.new_tag("span")
spanTag['class'] = "entry"
stc = 0
aTag = soup.new_tag("a")
if self.opts.generate_descriptions:
aTag['href'] = "book_%d.html" % (int(float(book['id'])))
# Generate the title from the template
args = self.generate_format_args(book)
if current_series:
# aTag.insert(0,'%s%s' % (escape(book['title'][len(book['series'])+1:]),pubyear))
formatted_title = self.formatter.safe_format(
self.by_authors_series_title_template, args,
_('error in') + ' by_authors_series_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
else:
# aTag.insert(0,'%s%s' % (escape(book['title']), pubyear))
formatted_title = self.formatter.safe_format(
self.by_authors_normal_title_template, args,
_('error in') + ' by_authors_normal_title_template:', self.db.new_api.get_proxy_metadata(book['id']))
non_series_books += 1
aTag.insert(0, NavigableString(formatted_title))
spanTag.insert(ptc, aTag)
stc += 1
pBookTag.insert(ptc, spanTag)
ptc += 1
if author_count == 1:
divOpeningTag.insert(dotc, pBookTag)
dotc += 1
elif divRunningTag:
divRunningTag.insert(drtc, pBookTag)
drtc += 1
# loop ends here
pTag = soup.new_tag("p")
pTag['class'] = 'title'
ptc = 0
aTag = soup.new_tag('a')
aTag['id'] = 'section_start'
pTag.insert(ptc, aTag)
ptc += 1
if not self.generate_for_kindle_mobi:
# Kindle don't need this because it shows section titles in Periodical format
aTag = soup.new_tag("a")
anchor_name = friendly_name.lower()
aTag['id'] = anchor_name.replace(" ", "")
pTag.insert(ptc, aTag)
ptc += 1
pTag.insert(ptc, NavigableString('%s' % (friendly_name)))
body.insert(btc, pTag)
btc += 1
if author_count == 1:
divTag.insert(dtc, divOpeningTag)
dtc += 1
elif divRunningTag is not None:
divTag.insert(dtc, divRunningTag)
dtc += 1
# Add the divTag to the body
body.insert(btc, divTag)
# Write the generated file to content_dir
outfile_spec = "%s/ByAlphaAuthor.html" % (self.content_dir)
with open(outfile_spec, 'wb') as outfile:
outfile.write(prettify(soup).encode('utf-8'))
self.html_filelist_1.append("content/ByAlphaAuthor.html")
def generate_html_by_date_added(self):
""" Generate content/ByDateAdded.html.
Loop through self.books_to_catalog sorted by reverse date, generate HTML.
Input:
books_by_title (list): books, sorted by title
Output:
content/ByDateAdded.html (file)
"""
def _add_books_to_html_by_month(this_months_list, dtc):
if len(this_months_list):
# Determine the longest author_sort_length before sorting
asl = [i['author_sort'] for i in this_months_list]
las = max(asl, key=len)
this_months_list = sorted(this_months_list,
key=lambda x: sort_key(self._kf_books_by_author_sorter_author_sort(x, len(las))))
# Create a new month anchor
date_string = strftime('%B %Y', current_date.timetuple())
pIndexTag = soup.new_tag("p")
pIndexTag['class'] = "date_index"
aTag = soup.new_tag("a")
aTag['id'] = f"bda_{current_date.year}-{current_date.month}"
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(date_string))
divTag.insert(dtc, pIndexTag)
dtc += 1
current_author = None
current_series = None
for new_entry in this_months_list:
if new_entry['author'] != current_author:
# Start a new author
current_author = new_entry['author']
non_series_books = 0
current_series = None
pAuthorTag = soup.new_tag("p")
pAuthorTag['class'] = "author_index"
aTag = soup.new_tag("a")
if self.opts.generate_authors:
aTag['href'] = "{}.html#{}".format("ByAlphaAuthor", self.generate_author_anchor(current_author))
aTag.insert(0, NavigableString(current_author))
pAuthorTag.insert(0, aTag)
divTag.insert(dtc, pAuthorTag)
dtc += 1
# Check for series
if new_entry['series'] and new_entry['series'] != current_series:
# Start a new series
current_series = new_entry['series']
pSeriesTag = soup.new_tag('p')
pSeriesTag['class'] = "series"
if self.opts.fmt == 'mobi':
pSeriesTag['class'] = "series_mobi"
if self.opts.generate_series:
aTag = soup.new_tag('a')
aTag['href'] = "{}.html#{}".format('BySeries', self.generate_series_anchor(new_entry['series']))
aTag.insert(0, new_entry['series'])
pSeriesTag.insert(0, aTag)
else:
pSeriesTag.insert(0, NavigableString('%s' % new_entry['series']))
divTag.insert(dtc, pSeriesTag)
dtc += 1
if current_series and not new_entry['series']:
current_series = None
# Add books
pBookTag = soup.new_tag("p")
pBookTag['class'] = "line_item"
ptc = 0
self.insert_prefix(soup, pBookTag, ptc, new_entry['prefix'])
ptc += 1
spanTag = soup.new_tag("span")
spanTag['class'] = "entry"
stc = 0
aTag = soup.new_tag("a")
if self.opts.generate_descriptions:
aTag['href'] = "book_%d.html" % (int(float(new_entry['id'])))
# Generate the title from the template
args = self.generate_format_args(new_entry)
if current_series:
formatted_title = self.formatter.safe_format(
self.by_month_added_series_title_template, args,
_('error in') + ' by_month_added_series_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
else:
formatted_title = self.formatter.safe_format(
self.by_month_added_normal_title_template, args,
_('error in') + ' by_month_added_normal_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
non_series_books += 1
aTag.insert(0, NavigableString(formatted_title))
spanTag.insert(stc, aTag)
stc += 1
pBookTag.insert(ptc, spanTag)
ptc += 1
divTag.insert(dtc, pBookTag)
dtc += 1
return dtc
def _add_books_to_html_by_date_range(date_range_list, date_range, dtc):
if len(date_range_list):
pIndexTag = soup.new_tag("p")
pIndexTag['class'] = "date_index"
aTag = soup.new_tag("a")
aTag['id'] = "bda_%s" % date_range.replace(' ', '')
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(date_range))
divTag.insert(dtc, pIndexTag)
dtc += 1
for new_entry in date_range_list:
# Add books
pBookTag = soup.new_tag("p")
pBookTag['class'] = "line_item"
ptc = 0
self.insert_prefix(soup, pBookTag, ptc, new_entry['prefix'])
ptc += 1
spanTag = soup.new_tag("span")
spanTag['class'] = "entry"
stc = 0
aTag = soup.new_tag("a")
if self.opts.generate_descriptions:
aTag['href'] = "book_%d.html" % (int(float(new_entry['id'])))
# Generate the title from the template
args = self.generate_format_args(new_entry)
if new_entry['series']:
formatted_title = self.formatter.safe_format(
self.by_recently_added_series_title_template, args,
_('error in') + ' by_recently_added_series_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
else:
formatted_title = self.formatter.safe_format(
self.by_recently_added_normal_title_template, args,
_('error in') + ' by_recently_added_normal_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
aTag.insert(0, NavigableString(formatted_title))
spanTag.insert(stc, aTag)
stc += 1
# Dot
spanTag.insert(stc, NavigableString(" · "))
stc += 1
# Link to author
emTag = soup.new_tag("em")
aTag = soup.new_tag("a")
if self.opts.generate_authors:
aTag['href'] = "{}.html#{}".format("ByAlphaAuthor", self.generate_author_anchor(new_entry['author']))
aTag.insert(0, NavigableString(new_entry['author']))
emTag.insert(0, aTag)
spanTag.insert(stc, emTag)
stc += 1
pBookTag.insert(ptc, spanTag)
ptc += 1
divTag.insert(dtc, pBookTag)
dtc += 1
return dtc
friendly_name = _("Recently Added")
self.update_progress_full_step("%s HTML" % friendly_name)
soup = self.generate_html_empty_header(friendly_name)
body = soup.find('body')
btc = 0
pTag = soup.new_tag("p")
pTag['class'] = 'title'
ptc = 0
aTag = soup.new_tag('a')
aTag['id'] = 'section_start'
pTag.insert(ptc, aTag)
ptc += 1
if not self.generate_for_kindle_mobi:
# Kindle don't need this because it shows section titles in Periodical format
aTag = soup.new_tag("a")
anchor_name = friendly_name.lower()
aTag['id'] = anchor_name.replace(" ", "")
pTag.insert(ptc, aTag)
ptc += 1
pTag.insert(ptc, NavigableString('%s' % friendly_name))
body.insert(btc, pTag)
btc += 1
divTag = soup.new_tag("div")
dtc = 0
# >>> Books by date range <<<
if self.use_series_prefix_in_titles_section:
self.books_by_date_range = sorted(self.books_to_catalog,
key=lambda x: (x['timestamp'], x['timestamp']), reverse=True)
else:
nspt = deepcopy(self.books_to_catalog)
self.books_by_date_range = sorted(nspt, key=lambda x: (x['timestamp'], x['timestamp']), reverse=True)
date_range_list = []
today_time = nowf().replace(hour=23, minute=59, second=59)
for (i, date) in enumerate(self.DATE_RANGE):
date_range_limit = self.DATE_RANGE[i]
if i:
date_range = '%d to %d days ago' % (self.DATE_RANGE[i - 1], self.DATE_RANGE[i])
else:
date_range = 'Last %d days' % (self.DATE_RANGE[i])
for book in self.books_by_date_range:
book_time = book['timestamp']
delta = today_time - book_time
if delta.days <= date_range_limit:
date_range_list.append(book)
else:
break
dtc = _add_books_to_html_by_date_range(date_range_list, date_range, dtc)
date_range_list = [book]
# >>>> Books by month <<<<
# Sort titles case-insensitive for by month using series prefix
self.books_by_month = sorted(self.books_to_catalog,
key=lambda x: (x['timestamp'], x['timestamp']), reverse=True)
# Loop through books by date
current_date = datetime.date.fromordinal(1)
this_months_list = []
for book in self.books_by_month:
if book['timestamp'].month != current_date.month or \
book['timestamp'].year != current_date.year:
dtc = _add_books_to_html_by_month(this_months_list, dtc)
this_months_list = []
current_date = book['timestamp'].date()
this_months_list.append(book)
# Add the last month's list
_add_books_to_html_by_month(this_months_list, dtc)
# Add the divTag to the body
body.insert(btc, divTag)
# Write the generated file to content_dir
outfile_spec = "%s/ByDateAdded.html" % (self.content_dir)
with open(outfile_spec, 'wb') as outfile:
outfile.write(prettify(soup).encode('utf-8'))
self.html_filelist_2.append("content/ByDateAdded.html")
def generate_html_by_date_read(self):
""" Generate content/ByDateRead.html.
Create self.bookmarked_books_by_date_read from self.bookmarked_books.
Loop through self.bookmarked_books_by_date_read, generate HTML.
Input:
bookmarked_books_by_date_read (list)
Output:
content/ByDateRead.html (file)
"""
def _add_books_to_html_by_day(todays_list, dtc):
if len(todays_list):
# Create a new day anchor
date_string = strftime('%A, %B %d', current_date.timetuple())
pIndexTag = soup.new_tag("p")
pIndexTag['class'] = "date_index"
aTag = soup.new_tag("a")
aTag['name'] = f"bdr_{current_date.year}-{current_date.month}-{current_date.day}"
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(date_string))
divTag.insert(dtc, pIndexTag)
dtc += 1
for new_entry in todays_list:
pBookTag = soup.new_tag("p")
pBookTag['class'] = "date_read"
ptc = 0
# Percent read
pBookTag.insert(ptc, NavigableString(new_entry['reading_progress']))
ptc += 1
aTag = soup.new_tag("a")
if self.opts.generate_descriptions:
aTag['href'] = "book_%d.html" % (int(float(new_entry['id'])))
aTag.insert(0, NavigableString(new_entry['title']))
pBookTag.insert(ptc, aTag)
ptc += 1
# Dot
pBookTag.insert(ptc, NavigableString(" · "))
ptc += 1
# Link to author
emTag = soup.new_tag("em")
aTag = soup.new_tag("a")
if self.opts.generate_authors:
aTag['href'] = "{}.html#{}".format("ByAlphaAuthor", self.generate_author_anchor(new_entry['author']))
aTag.insert(0, NavigableString(new_entry['author']))
emTag.insert(0, aTag)
pBookTag.insert(ptc, emTag)
ptc += 1
divTag.insert(dtc, pBookTag)
dtc += 1
return dtc
def _add_books_to_html_by_date_range(date_range_list, date_range, dtc):
if len(date_range_list):
pIndexTag = soup.new_tag("p")
pIndexTag['class'] = "date_index"
aTag = soup.new_tag("a")
aTag['name'] = "bdr_%s" % date_range.replace(' ', '')
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(date_range))
divTag.insert(dtc, pIndexTag)
dtc += 1
for new_entry in date_range_list:
# Add books
pBookTag = soup.new_tag("p")
pBookTag['class'] = "date_read"
ptc = 0
# Percent read
dots = int((new_entry['percent_read'] + 5) // 10)
dot_string = self.SYMBOL_PROGRESS_READ * dots
empty_dots = self.SYMBOL_PROGRESS_UNREAD * (10 - dots)
pBookTag.insert(ptc, NavigableString(f'{dot_string}{empty_dots}'))
ptc += 1
aTag = soup.new_tag("a")
if self.opts.generate_descriptions:
aTag['href'] = "book_%d.html" % (int(float(new_entry['id'])))
aTag.insert(0, NavigableString(new_entry['title']))
pBookTag.insert(ptc, aTag)
ptc += 1
# Dot
pBookTag.insert(ptc, NavigableString(" · "))
ptc += 1
# Link to author
emTag = soup.new_tag("em")
aTag = soup.new_tag("a")
if self.opts.generate_authors:
aTag['href'] = "{}.html#{}".format("ByAlphaAuthor", self.generate_author_anchor(new_entry['author']))
aTag.insert(0, NavigableString(new_entry['author']))
emTag.insert(0, aTag)
pBookTag.insert(ptc, emTag)
ptc += 1
divTag.insert(dtc, pBookTag)
dtc += 1
return dtc
friendly_name = _('Recently Read')
self.update_progress_full_step("%s HTML" % friendly_name)
if not self.bookmarked_books:
return
soup = self.generate_html_empty_header(friendly_name)
body = soup.find('body')
btc = 0
# Insert section tag
aTag = soup.new_tag('a')
aTag['name'] = 'section_start'
body.insert(btc, aTag)
btc += 1
# Insert the anchor
aTag = soup.new_tag("a")
anchor_name = friendly_name.lower()
aTag['name'] = anchor_name.replace(" ", "")
body.insert(btc, aTag)
btc += 1
divTag = soup.new_tag("div")
dtc = 0
# self.bookmarked_books: (Bookmark, book)
bookmarked_books = []
for bm_book in self.bookmarked_books:
book = self.bookmarked_books[bm_book]
# print "bm_book: %s" % bm_book
book[1]['bookmark_timestamp'] = book[0].timestamp
try:
book[1]['percent_read'] = min(float(100 * book[0].last_read / book[0].book_length), 100)
except:
book[1]['percent_read'] = 0
bookmarked_books.append(book[1])
self.bookmarked_books_by_date_read = sorted(bookmarked_books,
key=lambda x: (x['bookmark_timestamp'], x['bookmark_timestamp']), reverse=True)
# >>>> Recently read by day <<<<
current_date = datetime.date.fromordinal(1)
todays_list = []
for book in self.bookmarked_books_by_date_read:
bookmark_time = utcfromtimestamp(book['bookmark_timestamp'])
if bookmark_time.day != current_date.day or \
bookmark_time.month != current_date.month or \
bookmark_time.year != current_date.year:
dtc = _add_books_to_html_by_day(todays_list, dtc)
todays_list = []
current_date = utcfromtimestamp(book['bookmark_timestamp']).date()
todays_list.append(book)
# Add the last day's list
_add_books_to_html_by_day(todays_list, dtc)
# Add the divTag to the body
body.insert(btc, divTag)
# Write the generated file to content_dir
outfile_spec = "%s/ByDateRead.html" % (self.content_dir)
with open(outfile_spec, 'wb') as outfile:
outfile.write(prettify(soup).encode('utf-8'))
self.html_filelist_2.append("content/ByDateRead.html")
def generate_html_by_genres(self):
""" Generate individual HTML files per tag.
Filter out excluded tags. For each tag qualifying as a genre,
create a separate HTML file. Normalize tags to flatten synonymous tags.
Inputs:
self.genre_tags_dict (list): all genre tags
Output:
(files): HTML file per genre
"""
self.update_progress_full_step(_("Genres HTML"))
# Extract books matching filtered_tags
genre_list = []
for friendly_tag in sorted(self.genre_tags_dict, key=sort_key):
# print("\ngenerate_html_by_genres(): looking for books with friendly_tag '%s'" % friendly_tag)
# tag_list => { normalized_genre_tag : [{book},{},{}],
# normalized_genre_tag : [{book},{},{}] }
tag_list = {}
for book in self.books_by_author:
# Scan each book for tag matching friendly_tag
if 'genres' in book and friendly_tag in book['genres']:
this_book = {}
this_book['author'] = book['author']
this_book['title'] = book['title']
this_book['author_sort'] = capitalize(book['author_sort'])
this_book['prefix'] = book['prefix']
this_book['tags'] = book['tags']
this_book['id'] = book['id']
this_book['series'] = book['series']
this_book['series_index'] = book['series_index']
this_book['date'] = book['date']
normalized_tag = self.genre_tags_dict[friendly_tag]
genre_tag_list = [key for genre in genre_list for key in genre]
if normalized_tag in genre_tag_list:
for existing_genre in genre_list:
for key in existing_genre:
new_book = None
if key == normalized_tag:
for book in existing_genre[key]:
if (book['title'], book['author']) == (this_book['title'], this_book['author']):
new_book = False
break
else:
new_book = True
if new_book:
existing_genre[key].append(this_book)
else:
tag_list[normalized_tag] = [this_book]
genre_list.append(tag_list)
if self.opts.verbose:
if len(genre_list):
self.opts.log.info(" Genre summary: %d active genre tags used in generating catalog with %d titles" %
(len(genre_list), len(self.books_to_catalog)))
for genre in genre_list:
for key in genre:
self.opts.log.info(" %s: %d %s" % (self.get_friendly_genre_tag(key),
len(genre[key]),
'titles' if len(genre[key]) > 1 else 'title'))
# Write the results
# genre_list = [ {friendly_tag:[{book},{book}]}, {friendly_tag:[{book},{book}]}, ...]
master_genre_list = []
for genre_tag_set in genre_list:
for (index, genre) in enumerate(genre_tag_set):
# print "genre: %s \t genre_tag_set[genre]: %s" % (genre, genre_tag_set[genre])
# Create sorted_authors[0] = friendly, [1] = author_sort for NCX creation
authors = []
for book in genre_tag_set[genre]:
authors.append((book['author'], book['author_sort']))
# authors[] contains a list of all book authors, with multiple entries for multiple books by author
# Create unique_authors with a count of books per author as the third tuple element
books_by_current_author = 1
current_author = authors[0]
unique_authors = []
for (i, author) in enumerate(authors):
if author != current_author and i:
unique_authors.append((current_author[0], current_author[1], books_by_current_author))
current_author = author
books_by_current_author = 1
elif i == 0 and len(authors) == 1:
# Allow for single-book lists
unique_authors.append((current_author[0], current_author[1], books_by_current_author))
else:
books_by_current_author += 1
# Write the genre book list as an article
outfile = f"{self.content_dir}/Genre_{genre}.html"
titles_spanned = self.generate_html_by_genre(genre,
True if index == 0 else False,
genre_tag_set[genre],
outfile)
tag_file = "content/Genre_%s.html" % genre
master_genre_list.append({
'tag': genre,
'file': tag_file,
'authors': unique_authors,
'books': genre_tag_set[genre],
'titles_spanned': titles_spanned})
self.genres = master_genre_list
def generate_html_by_genre(self, genre, section_head, books, outfile):
""" Generate individual genre HTML file.
Generate an individual genre HTML file. Called from generate_html_by_genres()
Args:
genre (str): genre name
section_head (bool): True if starting section
books (dict): list of books in genre
outfile (str): full pathname to output file
Results:
(file): Genre HTML file written
Returns:
titles_spanned (list): [(first_author, first_book), (last_author, last_book)]
"""
soup = self.generate_html_genre_header(genre)
body = soup.find('body')
btc = 0
divTag = soup.new_tag('div')
dtc = 0
# Insert section tag if this is the section start - first article only
if section_head:
aTag = soup.new_tag('a')
aTag['id'] = 'section_start'
divTag.insert(dtc, aTag)
dtc += 1
# body.insert(btc, aTag)
# btc += 1
# Create an anchor from the tag
aTag = soup.new_tag('a')
aTag['id'] = "Genre_%s" % genre
divTag.insert(dtc, aTag)
body.insert(btc, divTag)
btc += 1
titleTag = body.find(attrs={'class': 'title'})
titleTag.insert(0, NavigableString('%s' % self.get_friendly_genre_tag(genre)))
# Insert the books by author list
divTag = body.find(attrs={'class': 'authors'})
dtc = 0
current_author = ''
current_series = None
for book in books:
if book['author'] != current_author:
# Start a new author with link
current_author = book['author']
non_series_books = 0
current_series = None
pAuthorTag = soup.new_tag("p")
pAuthorTag['class'] = "author_index"
aTag = soup.new_tag("a")
if self.opts.generate_authors:
aTag['href'] = "{}.html#{}".format("ByAlphaAuthor", self.generate_author_anchor(book['author']))
aTag.insert(0, book['author'])
pAuthorTag.insert(0, aTag)
divTag.insert(dtc, pAuthorTag)
dtc += 1
# Check for series
if book['series'] and book['series'] != current_series:
# Start a new series
current_series = book['series']
pSeriesTag = soup.new_tag('p')
pSeriesTag['class'] = "series"
if self.opts.fmt == 'mobi':
pSeriesTag['class'] = "series_mobi"
if self.opts.generate_series:
aTag = soup.new_tag('a')
aTag['href'] = "{}.html#{}".format('BySeries', self.generate_series_anchor(book['series']))
aTag.insert(0, book['series'])
pSeriesTag.insert(0, aTag)
else:
pSeriesTag.insert(0, NavigableString('%s' % book['series']))
divTag.insert(dtc, pSeriesTag)
dtc += 1
if current_series and not book['series']:
current_series = None
# Add books
pBookTag = soup.new_tag("p")
pBookTag['class'] = "line_item"
ptc = 0
self.insert_prefix(soup, pBookTag, ptc, book['prefix'])
ptc += 1
spanTag = soup.new_tag("span")
spanTag['class'] = "entry"
stc = 0
# Add the book title
aTag = soup.new_tag("a")
if self.opts.generate_descriptions:
aTag['href'] = "book_%d.html" % (int(float(book['id'])))
# Generate the title from the template
args = self.generate_format_args(book)
if current_series:
# aTag.insert(0,escape(book['title'][len(book['series'])+1:]))
formatted_title = self.formatter.safe_format(
self.by_genres_series_title_template, args,
_('error in') + ' by_genres_series_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
else:
# aTag.insert(0,escape(book['title']))
formatted_title = self.formatter.safe_format(
self.by_genres_normal_title_template, args,
_('error in') + ' by_genres_normal_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
non_series_books += 1
aTag.insert(0, NavigableString(formatted_title))
spanTag.insert(stc, aTag)
stc += 1
pBookTag.insert(ptc, spanTag)
ptc += 1
divTag.insert(dtc, pBookTag)
dtc += 1
# Write the generated file to content_dir
with open(outfile, 'wb') as outfile:
outfile.write(prettify(soup).encode('utf-8'))
if len(books) > 1:
titles_spanned = [(books[0]['author'], books[0]['title']), (books[-1]['author'], books[-1]['title'])]
else:
titles_spanned = [(books[0]['author'], books[0]['title'])]
return titles_spanned
def generate_html_by_series(self):
""" Generate content/BySeries.html.
Search database for books in series.
Input:
database
Output:
content/BySeries.html (file)
"""
friendly_name = ngettext('Series', 'Series', 2)
self.update_progress_full_step("%s HTML" % friendly_name)
self.opts.sort_by = 'series'
# *** Convert the existing database, resort by series/index ***
self.books_by_series = [i for i in self.books_to_catalog if i['series']]
self.books_by_series = sorted(self.books_by_series, key=lambda x: sort_key(self._kf_books_by_series_sorter(x)))
if not self.books_by_series:
self.opts.generate_series = False
self.opts.log(" no series found in selected books, skipping Series section")
return
# Generate series_sort
for book in self.books_by_series:
book['series_sort'] = self.generate_sort_title(book['series'])
# Establish initial letter equivalencies
sort_equivalents = self.establish_equivalencies(self.books_by_series, key='series_sort')
soup = self.generate_html_empty_header(friendly_name)
body = soup.find('body')
btc = 0
divTag = soup.new_tag("div")
dtc = 0
current_letter = ""
current_series = None
# Loop through books_by_series
series_count = 0
for idx, book in enumerate(self.books_by_series):
# Check for initial letter change
if self.letter_or_symbol(sort_equivalents[idx]) != current_letter:
# Start a new letter with Index letter
current_letter = self.letter_or_symbol(sort_equivalents[idx])
pIndexTag = soup.new_tag("p")
pIndexTag['class'] = "series_letter_index"
aTag = soup.new_tag("a")
if current_letter == self.SYMBOLS:
aTag['id'] = self.SYMBOLS + "_series"
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(self.SYMBOLS))
else:
aTag['id'] = self.generate_unicode_name(current_letter) + "_series"
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(sort_equivalents[idx]))
divTag.insert(dtc, pIndexTag)
dtc += 1
# Check for series change
if book['series'] != current_series:
# Start a new series
series_count += 1
current_series = book['series']
pSeriesTag = soup.new_tag('p')
pSeriesTag['class'] = "series"
if self.opts.fmt == 'mobi':
pSeriesTag['class'] = "series_mobi"
aTag = soup.new_tag('a')
aTag['id'] = self.generate_series_anchor(book['series'])
pSeriesTag.insert(0, aTag)
pSeriesTag.insert(1, NavigableString('%s' % book['series']))
divTag.insert(dtc, pSeriesTag)
dtc += 1
# Add books
pBookTag = soup.new_tag("p")
pBookTag['class'] = "line_item"
ptc = 0
book['prefix'] = self.discover_prefix(book)
self.insert_prefix(soup, pBookTag, ptc, book['prefix'])
ptc += 1
spanTag = soup.new_tag("span")
spanTag['class'] = "entry"
stc = 0
aTag = soup.new_tag("a")
if self.opts.generate_descriptions:
aTag['href'] = "book_%d.html" % (int(float(book['id'])))
# Use series, series index if avail else just title
# aTag.insert(0,'%d. %s · %s' % (book['series_index'],escape(book['title']), ' & '.join(book['authors'])))
args = self.generate_format_args(book)
formatted_title = self.formatter.safe_format(
self.by_series_title_template, args,
_('error in') + ' by_series_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
aTag.insert(0, NavigableString(formatted_title))
spanTag.insert(stc, aTag)
stc += 1
# ·
spanTag.insert(stc, NavigableString(' · '))
stc += 1
# Link to author
aTag = soup.new_tag("a")
if self.opts.generate_authors:
aTag['href'] = "{}.html#{}".format("ByAlphaAuthor",
self.generate_author_anchor(' & '.join(book['authors'])))
aTag.insert(0, NavigableString(' & '.join(book['authors'])))
spanTag.insert(stc, aTag)
stc += 1
pBookTag.insert(ptc, spanTag)
ptc += 1
divTag.insert(dtc, pBookTag)
dtc += 1
pTag = soup.new_tag("p")
pTag['class'] = 'title'
ptc = 0
aTag = soup.new_tag('a')
aTag['id'] = 'section_start'
pTag.insert(ptc, aTag)
ptc += 1
if not self.generate_for_kindle_mobi:
# Insert the <h2> tag with book_count at the head
aTag = soup.new_tag("a")
anchor_name = friendly_name.lower()
aTag['id'] = anchor_name.replace(" ", "")
pTag.insert(0, aTag)
pTag.insert(1, NavigableString('%s' % friendly_name))
body.insert(btc, pTag)
btc += 1
# Add the divTag to the body
body.insert(btc, divTag)
# Write the generated file to content_dir
outfile_spec = "%s/BySeries.html" % (self.content_dir)
with open(outfile_spec, 'wb') as outfile:
outfile.write(prettify(soup).encode('utf-8'))
self.html_filelist_1.append("content/BySeries.html")
def generate_html_by_title(self):
""" Generate content/ByAlphaTitle.html.
Generate HTML of books sorted by title.
Input:
books_by_title
Output:
content/ByAlphaTitle.html (file)
"""
self.update_progress_full_step(_("Titles HTML"))
soup = self.generate_html_empty_header("Books By Alpha Title")
body = soup.find('body')
btc = 0
pTag = soup.new_tag("p")
pTag['class'] = 'title'
ptc = 0
aTag = soup.new_tag('a')
aTag['id'] = 'section_start'
pTag.insert(ptc, aTag)
ptc += 1
if not self.generate_for_kindle_mobi:
# Kindle don't need this because it shows section titles in Periodical format
aTag = soup.new_tag("a")
aTag['id'] = "bytitle"
pTag.insert(ptc, aTag)
ptc += 1
pTag.insert(ptc, NavigableString(_('Titles')))
body.insert(btc, pTag)
btc += 1
divTag = soup.new_tag("div")
dtc = 0
current_letter = ""
# Re-sort title list without leading series/series_index
# Incoming title <series> <series_index>: <title>
if not self.use_series_prefix_in_titles_section:
nspt = deepcopy(self.books_to_catalog)
nspt = sorted(nspt, key=lambda x: sort_key(x['title_sort'].upper()))
self.books_by_title_no_series_prefix = nspt
# Establish initial letter equivalencies
sort_equivalents = self.establish_equivalencies(self.books_by_title, key='title_sort')
# Loop through the books by title
# Generate one divRunningTag per initial letter for the purposes of
# minimizing widows and orphans on readers that can handle large
# <divs> styled as inline-block
title_list = self.books_by_title
if not self.use_series_prefix_in_titles_section:
title_list = self.books_by_title_no_series_prefix
drtc = 0
divRunningTag = None
for idx, book in enumerate(title_list):
if self.letter_or_symbol(sort_equivalents[idx]) != current_letter:
# Start a new letter
if drtc and divRunningTag is not None:
divTag.insert(dtc, divRunningTag)
dtc += 1
divRunningTag = soup.new_tag('div')
if dtc > 0:
divRunningTag['class'] = "initial_letter"
drtc = 0
pIndexTag = soup.new_tag("p")
pIndexTag['class'] = "author_title_letter_index"
aTag = soup.new_tag("a")
current_letter = self.letter_or_symbol(sort_equivalents[idx])
if current_letter == self.SYMBOLS:
aTag['id'] = self.SYMBOLS + "_titles"
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(self.SYMBOLS))
else:
aTag['id'] = self.generate_unicode_name(current_letter) + "_titles"
pIndexTag.insert(0, aTag)
pIndexTag.insert(1, NavigableString(sort_equivalents[idx]))
divRunningTag.insert(dtc, pIndexTag)
drtc += 1
# Add books
pBookTag = soup.new_tag("p")
pBookTag['class'] = "line_item"
ptc = 0
self.insert_prefix(soup, pBookTag, ptc, book['prefix'])
ptc += 1
spanTag = soup.new_tag("span")
spanTag['class'] = "entry"
stc = 0
# Link to book
aTag = soup.new_tag("a")
if self.opts.generate_descriptions:
aTag['href'] = "book_%d.html" % (int(float(book['id'])))
# Generate the title from the template
args = self.generate_format_args(book)
if book['series']:
formatted_title = self.formatter.safe_format(
self.by_titles_series_title_template, args,
_('error in') + ' by_titles_series_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
else:
formatted_title = self.formatter.safe_format(
self.by_titles_normal_title_template, args,
_('error in') + ' by_titles_normal_title_template:',
self.db.new_api.get_proxy_metadata(book['id']))
aTag.insert(0, NavigableString(formatted_title))
spanTag.insert(stc, aTag)
stc += 1
# Dot
spanTag.insert(stc, NavigableString(" · "))
stc += 1
# Link to author
emTag = soup.new_tag("em")
aTag = soup.new_tag("a")
if self.opts.generate_authors:
aTag['href'] = "{}.html#{}".format("ByAlphaAuthor", self.generate_author_anchor(book['author']))
aTag.insert(0, NavigableString(book['author']))
emTag.insert(0, aTag)
spanTag.insert(stc, emTag)
stc += 1
pBookTag.insert(ptc, spanTag)
ptc += 1
if divRunningTag is not None:
divRunningTag.insert(drtc, pBookTag)
drtc += 1
# Add the last divRunningTag to divTag
if divRunningTag is not None:
divTag.insert(dtc, divRunningTag)
dtc += 1
# Add the divTag to the body
body.insert(btc, divTag)
btc += 1
# Write the volume to content_dir
outfile_spec = "%s/ByAlphaTitle.html" % (self.content_dir)
with open(outfile_spec, 'wb') as outfile:
outfile.write(prettify(soup).encode('utf-8'))
self.html_filelist_1.append("content/ByAlphaTitle.html")
def generate_html_description_header(self, book):
""" Generate the HTML Description header from template.
Create HTML Description from book metadata and template.
Called by generate_html_descriptions()
Args:
book (dict): book metadata
Return:
soup (BeautifulSoup): HTML Description for book
"""
from calibre.ebooks.oeb.base import XHTML_NS
def _generate_html():
args = dict(
author=escape(author),
author_prefix=escape(author_prefix),
comments=comments,
css=css,
formats=formats,
genres=genres,
note_content=note_content,
note_source=note_source,
pubdate=pubdate,
publisher=publisher,
pubmonth=pubmonth,
pubyear=pubyear,
rating=rating,
series=escape(series),
series_index=series_index,
thumb=thumb,
title=escape(title),
title_str=escape(title_str),
xmlns=XHTML_NS,
)
for k, v in iteritems(args):
if isbytestring(v):
args[k] = v.decode('utf-8')
generated_html = P('catalog/template.xhtml',
data=True).decode('utf-8').format(**args)
generated_html = xml_replace_entities(generated_html)
return BeautifulSoup(generated_html)
# Generate the template arguments
css = P('catalog/stylesheet.css', data=True).decode('utf-8')
title_str = title = book['title']
series = ''
series_index = ''
if book['series']:
series = book['series']
series_index = str(book['series_index'])
if series_index.endswith('.0'):
series_index = series_index[:-2]
# Author, author_prefix (read|reading|none symbol or missing symbol)
author = book['author']
if book['prefix']:
author_prefix = book['prefix'] + ' ' + _("by ")
elif self.opts.connected_kindle and book['id'] in self.bookmarked_books:
author_prefix = self.SYMBOL_READING + ' ' + _("by ")
else:
author_prefix = _("by ")
# Genres
genres = ''
if 'genres' in book:
_soup = BeautifulSoup('')
genresTag = _soup.new_tag('p')
gtc = 0
for (i, tag) in enumerate(sorted(book.get('genres', []))):
aTag = _soup.new_tag('a')
if self.opts.generate_genres:
try:
aTag['href'] = "Genre_%s.html" % self.genre_tags_dict[tag]
except KeyError:
pass
aTag.insert(0, NavigableString(tag))
genresTag.insert(gtc, aTag)
gtc += 1
if i < len(book['genres']) - 1:
genresTag.insert(gtc, NavigableString(' · '))
gtc += 1
genres = genresTag.decode_contents()
# Formats
formats = []
if 'formats' in book:
for format in sorted(book['formats']):
formats.append(format.rpartition('.')[2].upper())
formats = ' · '.join(formats)
# Date of publication
if book['date']:
pubdate = book['date']
try:
pubmonth, pubyear = pubdate.split()
except Exception:
pubmonth = pubyear = ''
else:
pubdate = pubyear = pubmonth = ''
# Thumb
_soup = BeautifulSoup('<html>', selfClosingTags=['img'])
thumb = _soup.new_tag("img")
if 'cover' in book and book['cover']:
thumb['src'] = "../images/thumbnail_%d.jpg" % int(book['id'])
else:
thumb['src'] = "../images/thumbnail_default.jpg"
thumb['alt'] = "cover thumbnail"
# Publisher
publisher = ' '
if 'publisher' in book:
publisher = book['publisher']
# Rating
stars = int(book['rating']) // 2
rating = ''
if stars:
star_string = self.SYMBOL_FULL_RATING * stars
empty_stars = self.SYMBOL_EMPTY_RATING * (5 - stars)
rating = f'{star_string}{empty_stars} <br/>'
# Notes
note_source = ''
note_content = ''
if 'notes' in book:
note_source = book['notes']['source']
note_content = book['notes']['content']
# Comments
comments = ''
if book.get('description'):
comments = book['description']
# >>>> Populate the template <<<<
soup = _generate_html()
# >>>> Post-process the template <<<<
body = soup.find('body')
btc = 0
# Insert the title anchor for inbound links
aTag = soup.new_tag("a")
aTag['id'] = "book%d" % int(book['id'])
divTag = soup.new_tag('div')
divTag.insert(0, aTag)
body.insert(btc, divTag)
btc += 1
# Insert the link to the series or remove <a class="series">
aTag = body.find('a', attrs={'class': 'series_id'})
if aTag:
if book['series']:
if self.opts.generate_series:
aTag['href'] = "{}.html#{}".format('BySeries', self.generate_series_anchor(book['series']))
else:
aTag.extract()
# Insert the author link
aTag = body.find('a', attrs={'class': 'author'})
if self.opts.generate_authors and aTag:
aTag['href'] = "{}.html#{}".format("ByAlphaAuthor",
self.generate_author_anchor(book['author']))
if publisher == ' ':
publisherTag = body.find('td', attrs={'class': 'publisher'})
if publisherTag:
publisherTag.contents[0].replaceWith(NBSP)
if not genres:
genresTag = body.find('p', attrs={'class': 'genres'})
if genresTag:
genresTag.extract()
if not formats:
formatsTag = body.find('p', attrs={'class': 'formats'})
if formatsTag:
formatsTag.extract()
if note_content == '':
tdTag = body.find('td', attrs={'class': 'notes'})
if tdTag:
tdTag.contents[0].replaceWith(NBSP)
emptyTags = body.findAll('td', attrs={'class': 'empty'})
for mt in emptyTags:
newEmptyTag = soup.new_tag('td')
newEmptyTag.insert(0, NBSP)
mt.replaceWith(newEmptyTag)
return soup
def generate_html_descriptions(self):
""" Generate Description HTML for each book.
Loop though books, write Description HTML for each book.
Inputs:
books_by_title (list)
Output:
(files): Description HTML for each book
"""
self.update_progress_full_step(_("Descriptions HTML"))
for (title_num, title) in enumerate(self.books_by_title):
self.update_progress_micro_step("%s %d of %d" %
(_("Description HTML"),
title_num, len(self.books_by_title)),
float(title_num * 100 / len(self.books_by_title)) / 100)
# Generate the header from user-customizable template
soup = self.generate_html_description_header(title)
# Write the book entry to content_dir
with open("%s/book_%d.html" % (self.content_dir, int(title['id'])), 'wb') as outfile:
outfile.write(prettify(soup).encode('utf-8'))
def generate_html_empty_header(self, title):
""" Return a boilerplate HTML header.
Generate an HTML header with document title.
Args:
title (str): document title
Return:
soup (BeautifulSoup): HTML header with document title inserted
"""
header = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:calibre="http://calibre.kovidgoyal.net/2009/metadata">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<link rel="stylesheet" type="text/css" href="stylesheet.css" media="screen" />
<title></title>
</head>
<body>
</body>
</html>
'''
# Insert the supplied title
soup = BeautifulSoup(header)
titleTag = soup.find('title')
titleTag.insert(0, NavigableString(title))
return soup
def generate_html_genre_header(self, title):
""" Generate HTML header with initial body content
Start with a generic HTML header, add <p> and <div>
Args:
title (str): document title
Return:
soup (BeautifulSoup): HTML with initial <p> and <div> tags
"""
soup = self.generate_html_empty_header(title)
bodyTag = soup.find('body')
pTag = soup.new_tag('p')
pTag['class'] = 'title'
bodyTag.insert(0, pTag)
divTag = soup.new_tag('div')
divTag['class'] = 'authors'
bodyTag.insert(1, divTag)
return soup
def generate_masthead_image(self, out_path):
""" Generate a Kindle masthead image.
Generate a Kindle masthead image, used with Kindle periodical format.
Args:
out_path (str): path to write generated masthead image
Input:
opts.catalog_title (str): Title to render
masthead_font: User-specified font preference (MOBI output option)
Output:
out_path (file): masthead image (GIF)
"""
from calibre.ebooks.conversion.config import load_defaults
MI_WIDTH = 600
MI_HEIGHT = 60
font_path = default_font = P('fonts/liberation/LiberationSerif-Bold.ttf')
recs = load_defaults('mobi_output')
masthead_font_family = recs.get('masthead_font', 'Default')
if masthead_font_family != 'Default':
from calibre.utils.fonts.scanner import font_scanner
faces = font_scanner.fonts_for_family(masthead_font_family)
if faces:
font_path = faces[0]['path']
if not font_path or not os.access(font_path, os.R_OK):
font_path = default_font
from PIL import Image, ImageDraw, ImageFont
img = Image.new('RGB', (MI_WIDTH, MI_HEIGHT), 'white')
draw = ImageDraw.Draw(img)
try:
font = ImageFont.truetype(font_path, 48)
except:
self.opts.log.error(" Failed to load user-specifed font '%s'" % font_path)
font = ImageFont.truetype(default_font, 48)
text = self.opts.catalog_title.encode('utf-8')
width, height = draw.textsize(text, font=font)
left = max(int((MI_WIDTH - width) / 2), 0)
top = max(int((MI_HEIGHT - height) / 2), 0)
draw.text((left, top), text, fill=(0, 0, 0), font=font)
with open(out_path, 'wb') as f:
img.save(f, 'GIF')
def generate_ncx_header(self):
""" Generate the basic NCX file.
Generate the initial NCX, which is added to depending on included Sections.
Inputs:
None
Updated:
play_order (int)
Outputs:
ncx_root (file): NCX foundation
"""
self.update_progress_full_step(_("NCX header"))
header = '''
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" xmlns:calibre="http://calibre.kovidgoyal.net/2009/metadata" version="2005-1" xml:lang="en">
<navMap/>
</ncx>
'''
root = self.ncx_root = safe_xml_fromstring(header)
navMapTag = root[0]
if self.generate_for_kindle_mobi:
# Build a top-level navPoint for Kindle periodicals
navPointTag = makeelement('navPoint', navMapTag, class_='periodical', id='title', playOrder=self.play_order)
self.play_order += 1
makeelement('{http://calibre.kovidgoyal.net/2009/metadata}meta-img', navPointTag, id="mastheadImage", src="images/mastheadImage.gif")
navLabelTag = makeelement('navLabel', navPointTag)
makeelement('text', navLabelTag).text = self.opts.catalog_title
if self.opts.generate_authors:
makeelement('content', navPointTag, src="content/ByAlphaAuthor.html")
elif self.opts.generate_titles:
makeelement('content', navPointTag, src="content/ByAlphaTitle.html")
elif self.opts.generate_series:
makeelement('content', navPointTag, src="content/BySeries.html")
elif self.opts.generate_genres:
makeelement('content', navPointTag, src="%s" % self.genres[0]['file'])
elif self.opts.generate_recently_added:
makeelement('content', navPointTag, src="content/ByDateAdded.html")
elif self.opts.generate_descriptions:
# Descriptions only
makeelement('content', navPointTag, src="content/book_%d.html" % int(self.books_by_description[0]['id']))
def generate_ncx_section_header(self, section_id, section_header, content_src):
root = self.ncx_root
if self.generate_for_kindle_mobi:
body = root.xpath('//*[local-name()="navPoint"]')[0]
else:
body = root.xpath('//*[local-name()="navMap"]')[0]
navPointTag = makeelement('navPoint', body, id=section_id, playOrder=self.play_order)
if self.generate_for_kindle_mobi:
navPointTag.set('class', 'section')
self.play_order += 1
navLabelTag = makeelement('navLabel', navPointTag)
textTag = makeelement('text', navLabelTag)
textTag.text = section_header
makeelement('content', navPointTag, src=content_src)
return navPointTag
def generate_ncx_subsection(self, navPointTag, section_id, section_text, content_src, cm_tags={}):
navPointVolumeTag = makeelement('navPoint', navPointTag, id=section_id, playOrder=self.play_order)
if self.generate_for_kindle_mobi:
navPointVolumeTag.set('class', "article")
self.play_order += 1
navLabelTag = makeelement("navLabel", navPointVolumeTag)
makeelement("text", navLabelTag).text = section_text
makeelement("content", navPointVolumeTag, src=content_src)
if self.generate_for_kindle_mobi:
for name, text in cm_tags.items():
makeelement('{http://calibre.kovidgoyal.net/2009/metadata}meta', navPointVolumeTag, name=name).text = text
def generate_ncx_descriptions(self, tocTitle):
""" Add Descriptions to the basic NCX file.
Generate the Descriptions NCX content, add to self.ncx_soup.
Inputs:
books_by_author (list)
Updated:
play_order (int)
Outputs:
ncx_soup (file): updated
"""
section_header = '%s [%d]' % (tocTitle, len(self.books_by_description))
if self.generate_for_kindle_mobi:
section_header = tocTitle
navPointTag = self.generate_ncx_section_header('bydescription-ID', section_header, "content/book_%d.html" % int(self.books_by_description[0]['id']))
self.update_progress_full_step(_("NCX for descriptions"))
# --- Construct the 'Descriptions' section ---
# Add the section navPoint
# Loop over the titles
for book in self.books_by_description:
sec_id = "book%dID" % int(book['id'])
if book['series']:
series_index = str(book['series_index'])
if series_index.endswith('.0'):
series_index = series_index[:-2]
if self.generate_for_kindle_mobi:
# Don't include Author for Kindle
sec_text = self.format_ncx_text('%s (%s [%s])' %
(book['title'], book['series'], series_index), dest='title')
else:
# Include Author for non-Kindle
sec_text = self.format_ncx_text('%s (%s [%s]) · %s ' %
(book['title'], book['series'], series_index, book['author']), dest='title')
else:
if self.generate_for_kindle_mobi:
# Don't include Author for Kindle
title_str = self.format_ncx_text('%s' % (book['title']), dest='title')
if self.opts.connected_kindle and book['id'] in self.bookmarked_books:
'''
dots = int((book['percent_read'] + 5)/10)
dot_string = '+' * dots
empty_dots = '-' * (10 - dots)
title_str += ' %s%s' % (dot_string,empty_dots)
'''
title_str += '*'
sec_text = title_str
else:
# Include Author for non-Kindle
sec_text = self.format_ncx_text('%s · %s' %
(book['title'], book['author']), dest='title')
content_src="content/book_%d.html#book%d" % (int(book['id']), int(book['id']))
cm_tags = {}
if book['date']:
navStr = '{} | {}'.format(self.format_ncx_text(book['author'], dest='author'),
book['date'].split()[1])
else:
navStr = '%s' % (self.format_ncx_text(book['author'], dest='author'))
if 'tags' in book and len(book['tags']):
navStr = self.format_ncx_text(navStr + ' | ' + ' · '.join(sorted(book['tags'])), dest='author')
cm_tags['author'] = navStr
# Add the description tag
if book['short_description']:
cm_tags['description'] = self.format_ncx_text(book['short_description'], dest='description')
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
def generate_ncx_by_series(self, tocTitle):
""" Add Series to the basic NCX file.
Generate the Series NCX content, add to self.ncx_soup.
Inputs:
books_by_series (list)
Updated:
play_order (int)
Outputs:
ncx_soup (file): updated
"""
self.update_progress_full_step(_("NCX for Series"))
def _add_to_series_by_letter(current_series_list):
current_series_list = " • ".join(current_series_list)
current_series_list = self.format_ncx_text(current_series_list, dest="description")
series_by_letter.append(current_series_list)
# --- Construct the 'Books By Series' section ---
section_header = '%s [%d]' % (tocTitle, len(self.all_series))
if self.generate_for_kindle_mobi:
section_header = tocTitle
output = "BySeries"
navPointTag = self.generate_ncx_section_header('byseries-ID', section_header, "content/%s.html#section_start" % (output))
series_by_letter = []
# Establish initial letter equivalencies
sort_equivalents = self.establish_equivalencies(self.books_by_series, key='series_sort')
# Loop over the series titles, find start of each letter, add description_preview_count books
# Special switch for using different title list
title_list = self.books_by_series
# Prime the pump
current_letter = self.letter_or_symbol(sort_equivalents[0])
title_letters = [current_letter]
current_series_list = []
current_series = ""
for idx, book in enumerate(title_list):
sort_title = self.generate_sort_title(book['series'])
self.establish_equivalencies([sort_title])[0]
if self.letter_or_symbol(sort_equivalents[idx]) != current_letter:
# Save the old list
_add_to_series_by_letter(current_series_list)
# Start the new list
current_letter = self.letter_or_symbol(sort_equivalents[idx])
title_letters.append(current_letter)
current_series = book['series']
current_series_list = [book['series']]
else:
if len(current_series_list) < self.opts.description_clip and \
book['series'] != current_series:
current_series = book['series']
current_series_list.append(book['series'])
# Add the last book list
_add_to_series_by_letter(current_series_list)
# Add *article* entries for each populated series title letter
for (i, books) in enumerate(series_by_letter):
sec_id = "%sSeries-ID" % (title_letters[i].upper())
if len(title_letters[i]) > 1:
fmt_string = _("Series beginning with %s")
else:
fmt_string = _("Series beginning with '%s'")
sec_text = fmt_string % (title_letters[i] if len(title_letters[i]) > 1 else title_letters[i])
if title_letters[i] == self.SYMBOLS:
content_src = f"content/{output}.html#{self.SYMBOLS}_series"
else:
content_src = f"content/{output}.html#{self.generate_unicode_name(title_letters[i])}_series"
cm_tags = {'description': self.format_ncx_text(books, dest='description')}
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
def generate_ncx_by_title(self, tocTitle):
""" Add Titles to the basic NCX file.
Generate the Titles NCX content, add to self.ncx_soup.
Inputs:
books_by_title (list)
Updated:
play_order (int)
Outputs:
ncx_soup (file): updated
"""
self.update_progress_full_step(_("NCX for Titles"))
def _add_to_books_by_letter(current_book_list):
current_book_list = " • ".join(current_book_list)
current_book_list = self.format_ncx_text(current_book_list, dest="description")
books_by_letter.append(current_book_list)
# --- Construct the 'Books By Title' section ---
section_header = '%s [%d]' % (tocTitle, len(self.books_by_title))
if self.generate_for_kindle_mobi:
section_header = tocTitle
output = "ByAlphaTitle"
navPointTag = self.generate_ncx_section_header("byalphatitle-ID", section_header, "content/%s.html#section_start" % (output))
books_by_letter = []
# Establish initial letter equivalencies
sort_equivalents = self.establish_equivalencies(self.books_by_title, key='title_sort')
# Loop over the titles, find start of each letter, add description_preview_count books
# Special switch for using different title list
if self.use_series_prefix_in_titles_section:
title_list = self.books_by_title
else:
title_list = self.books_by_title_no_series_prefix
# Prime the list
current_letter = self.letter_or_symbol(sort_equivalents[0])
title_letters = [current_letter]
current_book_list = []
current_book = ""
for idx, book in enumerate(title_list):
# if self.letter_or_symbol(book['title_sort'][0]) != current_letter:
if self.letter_or_symbol(sort_equivalents[idx]) != current_letter:
# Save the old list
_add_to_books_by_letter(current_book_list)
# Start the new list
# current_letter = self.letter_or_symbol(book['title_sort'][0])
current_letter = self.letter_or_symbol(sort_equivalents[idx])
title_letters.append(current_letter)
current_book = book['title']
current_book_list = [book['title']]
else:
if len(current_book_list) < self.opts.description_clip and \
book['title'] != current_book:
current_book = book['title']
current_book_list.append(book['title'])
# Add the last book list
_add_to_books_by_letter(current_book_list)
# Add *article* entries for each populated title letter
for (i, books) in enumerate(books_by_letter):
sec_id = "%sTitles-ID" % (title_letters[i].upper())
if len(title_letters[i]) > 1:
fmt_string = _("Titles beginning with %s")
else:
fmt_string = _("Titles beginning with '%s'")
sec_text = fmt_string % (title_letters[i] if len(title_letters[i]) > 1 else title_letters[i])
if title_letters[i] == self.SYMBOLS:
content_src = f"content/{output}.html#{self.SYMBOLS}_titles"
else:
content_src = f"content/{output}.html#{self.generate_unicode_name(title_letters[i])}_titles"
cm_tags = {'description': self.format_ncx_text(books, dest='description')}
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
def generate_ncx_by_author(self, tocTitle):
""" Add Authors to the basic NCX file.
Generate the Authors NCX content, add to self.ncx_soup.
Inputs:
authors (list)
Updated:
play_order (int)
Outputs:
ncx_soup (file): updated
"""
self.update_progress_full_step(_("NCX for Authors"))
def _add_to_author_list(current_author_list, current_letter):
current_author_list = " • ".join(current_author_list)
current_author_list = self.format_ncx_text(current_author_list, dest="description")
master_author_list.append((current_author_list, current_letter))
HTML_file = "content/ByAlphaAuthor.html"
# --- Construct the 'Books By Author' *section* ---
file_ID = "%s" % tocTitle.lower()
file_ID = file_ID.replace(" ", "")
section_header = '%s [%d]' % (tocTitle, len(self.individual_authors))
if self.generate_for_kindle_mobi:
section_header = tocTitle
navPointTag = self.generate_ncx_section_header("%s-ID" % file_ID, section_header, "%s#section_start" % HTML_file)
# Create an NCX article entry for each populated author index letter
# Loop over the sorted_authors list, find start of each letter,
# add description_preview_count artists
# self.authors[0]:friendly [1]:author_sort [2]:book_count
# (<friendly name>, author_sort, book_count)
# Need to extract a list of author_sort, generate sort_equivalents from that
sort_equivalents = self.establish_equivalencies([x[1] for x in self.authors])
master_author_list = []
# Prime the pump
current_letter = self.letter_or_symbol(sort_equivalents[0])
current_author_list = []
for idx, author in enumerate(self.authors):
if self.letter_or_symbol(sort_equivalents[idx]) != current_letter:
# Save the old list
_add_to_author_list(current_author_list, current_letter)
# Start the new list
current_letter = self.letter_or_symbol(sort_equivalents[idx])
current_author_list = [author[0]]
else:
if len(current_author_list) < self.opts.description_clip:
current_author_list.append(author[0])
# Add the last author list
_add_to_author_list(current_author_list, current_letter)
# Add *article* entries for each populated author initial letter
# master_author_list{}: [0]:author list [1]:Initial letter
for authors_by_letter in master_author_list:
sec_id = "%sauthors-ID" % (authors_by_letter[1])
if authors_by_letter[1] == self.SYMBOLS:
fmt_string = _("Authors beginning with %s")
else:
fmt_string = _("Authors beginning with '%s'")
sec_text = fmt_string % authors_by_letter[1]
if authors_by_letter[1] == self.SYMBOLS:
content_src = f"{HTML_file}#{authors_by_letter[1]}_authors"
else:
content_src = f"{HTML_file}#{self.generate_unicode_name(authors_by_letter[1])}_authors"
cm_tags = {'description': authors_by_letter[0]}
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
def generate_ncx_by_date_added(self, tocTitle):
""" Add Recently Added to the basic NCX file.
Generate the Recently Added NCX content, add to self.ncx_soup.
Inputs:
books_by_date_range (list)
Updated:
play_order (int)
Outputs:
ncx_soup (file): updated
"""
self.update_progress_full_step(_("NCX for Recently Added"))
def _add_to_master_month_list(current_titles_list):
book_count = len(current_titles_list)
current_titles_list = " • ".join(current_titles_list)
current_titles_list = self.format_ncx_text(current_titles_list, dest='description')
master_month_list.append((current_titles_list, current_date, book_count))
def _add_to_master_date_range_list(current_titles_list):
book_count = len(current_titles_list)
current_titles_list = " • ".join(current_titles_list)
current_titles_list = self.format_ncx_text(current_titles_list, dest='description')
master_date_range_list.append((current_titles_list, date_range, book_count))
HTML_file = "content/ByDateAdded.html"
# --- Construct the 'Recently Added' *section* ---
file_ID = "%s" % tocTitle.lower()
file_ID = file_ID.replace(" ", "")
navPointTag = self.generate_ncx_section_header("%s-ID" % file_ID, tocTitle, "%s#section_start" % HTML_file)
# Create an NCX article entry for each date range
current_titles_list = []
master_date_range_list = []
today = datetime.datetime.now()
today_time = datetime.datetime(today.year, today.month, today.day)
for (i, date) in enumerate(self.DATE_RANGE):
if i:
date_range = '%d to %d days ago' % (self.DATE_RANGE[i - 1], self.DATE_RANGE[i])
else:
date_range = 'Last %d days' % (self.DATE_RANGE[i])
date_range_limit = self.DATE_RANGE[i]
for book in self.books_by_date_range:
book_time = datetime.datetime(book['timestamp'].year, book['timestamp'].month, book['timestamp'].day)
if (today_time - book_time).days <= date_range_limit:
# print "generate_ncx_by_date_added: %s added %d days ago" % (book['title'], (today_time-book_time).days)
current_titles_list.append(book['title'])
else:
break
if current_titles_list:
_add_to_master_date_range_list(current_titles_list)
current_titles_list = [book['title']]
# Add *article* entries for each populated date range
# master_date_range_list{}: [0]:titles list [1]:datestr
for books_by_date_range in master_date_range_list:
sec_id = "%s-ID" % books_by_date_range[1].replace(' ', '')
sec_text = books_by_date_range[1]
content_src = "{}#bda_{}".format(HTML_file,
books_by_date_range[1].replace(' ', ''))
navStr = '%d titles' % books_by_date_range[2] if books_by_date_range[2] > 1 else \
'%d title' % books_by_date_range[2]
cm_tags = {'description': books_by_date_range[0], 'author': navStr}
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
# Create an NCX article entry for each populated month
# Loop over the booksByDate list, find start of each month,
# add description_preview_count titles
# master_month_list(list,date,count)
current_titles_list = []
master_month_list = []
current_date = self.books_by_month[0]['timestamp']
for book in self.books_by_month:
if book['timestamp'].month != current_date.month or \
book['timestamp'].year != current_date.year:
# Save the old lists
_add_to_master_month_list(current_titles_list)
# Start the new list
current_date = book['timestamp'].date()
current_titles_list = [book['title']]
else:
current_titles_list.append(book['title'])
# Add the last month list
_add_to_master_month_list(current_titles_list)
# Add *article* entries for each populated month
# master_months_list{}: [0]:titles list [1]:date
for books_by_month in master_month_list:
datestr = strftime('%B %Y', books_by_month[1].timetuple())
sec_id = f"bda_{books_by_month[1].year}-{books_by_month[1].month}-ID"
sec_text = datestr
content_src = "{}#bda_{}-{}".format(HTML_file,
books_by_month[1].year, books_by_month[1].month)
navStr = '%d titles' % books_by_month[2] if books_by_month[2] > 1 else \
'%d title' % books_by_month[2]
cm_tags = {'description': books_by_month[0], 'author': navStr}
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
def generate_ncx_by_date_read(self, tocTitle):
""" Add By Date Read to the basic NCX file.
Generate the By Date Read NCX content (Kindle only), add to self.ncx_soup.
Inputs:
bookmarked_books_by_date_read (list)
Updated:
play_order (int)
Outputs:
ncx_soup (file): updated
"""
def _add_to_master_day_list(current_titles_list):
book_count = len(current_titles_list)
current_titles_list = " • ".join(current_titles_list)
current_titles_list = self.format_ncx_text(current_titles_list, dest='description')
master_day_list.append((current_titles_list, current_date, book_count))
def _add_to_master_date_range_list(current_titles_list):
book_count = len(current_titles_list)
current_titles_list = " • ".join(current_titles_list)
current_titles_list = self.format_ncx_text(current_titles_list, dest='description')
master_date_range_list.append((current_titles_list, date_range, book_count))
self.update_progress_full_step(_("NCX for Recently Read"))
if not self.bookmarked_books_by_date_read:
return
HTML_file = "content/ByDateRead.html"
# --- Construct the 'Recently Read' *section* ---
file_ID = "%s" % tocTitle.lower()
file_ID = file_ID.replace(" ", "")
navPointTag = self.generate_ncx_section_header("%s-ID" % file_ID, tocTitle, "%s#section_start" % HTML_file)
# Create an NCX article entry for each date range
current_titles_list = []
master_date_range_list = []
today = datetime.datetime.now()
today_time = datetime.datetime(today.year, today.month, today.day)
for (i, date) in enumerate(self.DATE_RANGE):
if i:
date_range = '%d to %d days ago' % (self.DATE_RANGE[i - 1], self.DATE_RANGE[i])
else:
date_range = 'Last %d days' % (self.DATE_RANGE[i])
date_range_limit = self.DATE_RANGE[i]
for book in self.bookmarked_books_by_date_read:
bookmark_time = utcfromtimestamp(book['bookmark_timestamp'])
if (today_time - bookmark_time).days <= date_range_limit:
# print "generate_ncx_by_date_added: %s added %d days ago" % (book['title'], (today_time-book_time).days)
current_titles_list.append(book['title'])
else:
break
if current_titles_list:
_add_to_master_date_range_list(current_titles_list)
current_titles_list = [book['title']]
# Create an NCX article entry for each populated day
# Loop over the booksByDate list, find start of each month,
# add description_preview_count titles
# master_month_list(list,date,count)
current_titles_list = []
master_day_list = []
current_date = utcfromtimestamp(self.bookmarked_books_by_date_read[0]['bookmark_timestamp'])
for book in self.bookmarked_books_by_date_read:
bookmark_time = utcfromtimestamp(book['bookmark_timestamp'])
if bookmark_time.day != current_date.day or \
bookmark_time.month != current_date.month or \
bookmark_time.year != current_date.year:
# Save the old lists
_add_to_master_day_list(current_titles_list)
# Start the new list
current_date = utcfromtimestamp(book['bookmark_timestamp']).date()
current_titles_list = [book['title']]
else:
current_titles_list.append(book['title'])
# Add the last day list
_add_to_master_day_list(current_titles_list)
# Add *article* entries for each populated day
# master_day_list{}: [0]:titles list [1]:date
for books_by_day in master_day_list:
datestr = strftime('%A, %B %d', books_by_day[1].timetuple())
sec_id = "bdr_{}-{}-{}ID".format(books_by_day[1].year,
books_by_day[1].month,
books_by_day[1].day)
sec_text = datestr
content_src = "{}#bdr_{}-{}-{}".format(HTML_file,
books_by_day[1].year,
books_by_day[1].month,
books_by_day[1].day)
navStr = '%d titles' % books_by_day[2] if books_by_day[2] > 1 else \
'%d title' % books_by_day[2]
cm_tags = {'description': books_by_day[0], 'author': navStr}
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
def generate_ncx_by_genre(self, tocTitle):
""" Add Genres to the basic NCX file.
Generate the Genre NCX content, add to self.ncx_soup.
Inputs:
genres (list)
Updated:
play_order (int)
Outputs:
ncx_soup (file): updated
"""
self.update_progress_full_step(_("NCX for genres"))
if not len(self.genres):
self.opts.log.warn(" No genres found\n"
" No Genre section added to Catalog")
return
# --- Construct the 'Books By Genre' *section* ---
file_ID = "%s" % tocTitle.lower()
file_ID = file_ID.replace(" ", "")
section_header = '%s [%d]' % (tocTitle, len(self.genres))
if self.generate_for_kindle_mobi:
section_header = tocTitle
navPointTag = self.generate_ncx_section_header("%s-ID" % file_ID, section_header, "content/Genre_%s.html#section_start" % self.genres[0]['tag'])
for genre in self.genres:
# Add an article for each genre
sec_id = "genre-%s-ID" % genre['tag']
# GwR *** Can this be optimized?
normalized_tag = None
for friendly_tag in self.genre_tags_dict:
if self.genre_tags_dict[friendly_tag] == genre['tag']:
normalized_tag = self.genre_tags_dict[friendly_tag]
break
sec_text = self.format_ncx_text(NavigableString(friendly_tag), dest='description')
content_src = f"content/Genre_{normalized_tag}.html#Genre_{normalized_tag}"
if len(genre['titles_spanned']) > 1:
author_range = "{} - {}".format(genre['titles_spanned'][0][0], genre['titles_spanned'][1][0])
else:
author_range = "%s" % (genre['titles_spanned'][0][0])
titles = []
for title in genre['books']:
titles.append(title['title'])
titles = sorted(titles, key=lambda x: (self.generate_sort_title(x), self.generate_sort_title(x)))
titles_list = self.generate_short_description(" • ".join(titles), dest="description")
cm_tags = {'author': author_range, 'description': self.format_ncx_text(titles_list, dest='description')}
self.generate_ncx_subsection(navPointTag, sec_id, sec_text, content_src, cm_tags)
def generate_opf(self):
""" Generate the OPF file.
Start with header template, construct manifest, spine and guide.
Inputs:
genres (list)
html_filelist_1 (list)
html_filelist_2 (list)
thumbs (list)
Updated:
play_order (int)
Outputs:
opts.basename + '.opf' (file): written
"""
self.update_progress_full_step(_("Generating OPF"))
lang = get_lang() or 'en'
if lang_as_iso639_1(lang):
lang = lang_as_iso639_1(lang)
header = '''\
<package xmlns="http://www.idpf.org/2007/opf" version="2.0" unique-identifier="calibre_id">
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:calibre="http://calibre.kovidgoyal.net/2009/metadata" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<dc:title>{title}</dc:title>
<dc:creator>{creator}</dc:creator>
<dc:language>{lang}</dc:language>
<meta name="calibre:publication_type" content="{pt}"/>
</metadata>
<manifest></manifest>
<spine toc="ncx"></spine>
<guide></guide>
</package>
'''.format(
title=prepare_string_for_xml(self.opts.catalog_title),
creator=prepare_string_for_xml(self.opts.creator),
lang=prepare_string_for_xml(lang),
pt="periodical:default" if self.generate_for_kindle_mobi else ""
)
root = safe_xml_fromstring(header)
manifest = root.xpath('//*[local-name()="manifest"]')[0]
spine = root.xpath('//*[local-name()="spine"]')[0]
guide = root.xpath('//*[local-name()="guide"]')[0]
# Create the OPF tags
def manifest_item(id, href, media_type, add_to_spine=False):
ans = makeelement('item', manifest, id=id, href=href, media_type=media_type)
if add_to_spine:
makeelement('itemref', spine, idref=id)
return ans
manifest_item(id='ncx', href='%s.ncx' % self.opts.basename, media_type="application/x-dtbncx+xml")
manifest_item(id='stylesheet', href=self.stylesheet, media_type='text/css')
if self.generate_for_kindle_mobi:
manifest_item('mastheadimage-image', "images/mastheadImage.gif", 'image/gif')
# Write the thumbnail images, descriptions to the manifest
if self.opts.generate_descriptions:
for thumb in self.thumbs:
end = thumb.find('.jpg')
manifest_item("%s-image" % thumb[:end], "images/%s" % (thumb), 'image/jpeg')
# Add html_files to manifest and spine
for file in self.html_filelist_1:
# By Author, By Title, By Series,
start = file.find('/') + 1
end = file.find('.')
manifest_item(file[start:end].lower(), file, "application/xhtml+xml", add_to_spine=True)
# Add genre files to manifest and spine
for genre in self.genres:
start = genre['file'].find('/') + 1
end = genre['file'].find('.')
manifest_item(genre['file'][start:end].lower(), genre['file'], "application/xhtml+xml", add_to_spine=True)
for file in self.html_filelist_2:
# By Date Added, By Date Read
start = file.find('/') + 1
end = file.find('.')
manifest_item(file[start:end].lower(), file, "application/xhtml+xml", add_to_spine=True)
for book in self.books_by_description:
# manifest
manifest_item("book%d" % int(book['id']), "content/book_%d.html" % int(book['id']), "application/xhtml+xml", add_to_spine=True)
# Guide
if self.generate_for_kindle_mobi:
makeelement('reference', guide, type='masthead', title='masthead-image', href='images/mastheadImage.gif')
# Write the OPF file
pretty_opf(root), pretty_xml_tree(root)
output = etree.tostring(root, encoding='utf-8')
with open(f"{self.catalog_path}/{self.opts.basename}.opf", 'wb') as outfile:
outfile.write(output.strip())
def generate_rating_string(self, book):
""" Generate rating string for Descriptions.
Starting with database rating (0-10), return 5 stars, with 0-5 filled,
balance empty.
Args:
book (dict): book metadata
Return:
rating (str): 5 stars, 1-5 solid, balance empty. Empty str for no rating.
"""
rating = ''
try:
if 'rating' in book:
stars = int(book['rating']) // 2
if stars:
star_string = self.SYMBOL_FULL_RATING * stars
empty_stars = self.SYMBOL_EMPTY_RATING * (5 - stars)
rating = f'{star_string}{empty_stars}'
except:
# Rating could be None
pass
return rating
def generate_series_anchor(self, series):
""" Generate legal XHTML anchor for series names.
Flatten series name to ascii_legal text.
Args:
series (str): series name
Return:
(str): asciized version of series name
"""
# Generate a legal XHTML id/href string
if self.letter_or_symbol(series) == self.SYMBOLS:
return "symbol_%s_series" % re.sub(r'\W', '', series).lower()
else:
return "%s_series" % re.sub(r'\W', '', ascii_text(series)).lower()
def generate_short_description(self, description, dest=None):
""" Generate a truncated version of the supplied string.
Given a string and NCX destination, truncate string to length specified
in self.opts.
Args:
description (str): string to truncate
dest (str): NCX destination
description NCX summary
title NCX title
author NCX author
Return:
(str): truncated description
"""
def _short_description(description, limit):
short_description = ""
words = description.split()
for word in words:
short_description += word + " "
if len(short_description) > limit:
short_description += "..."
return short_description
if not description:
return None
if dest == 'title':
# No truncation for titles, let the device deal with it
return description
elif dest == 'author':
if self.opts.author_clip and len(description) < self.opts.author_clip:
return description
else:
return _short_description(description, self.opts.author_clip)
elif dest == 'description':
if self.opts.description_clip and len(description) < self.opts.description_clip:
return description
else:
return _short_description(description, self.opts.description_clip)
else:
print(" returning description with unspecified destination '%s'" % description)
raise RuntimeError
def generate_sort_title(self, title):
""" Generates a sort string from title.
Based on trunk title_sort algorithm, but also accommodates series
numbers by padding with leading zeroes to force proper numeric
sorting. Option to sort numbers alphabetically, e.g. '1942' sorts
as 'Nineteen forty two'.
Args:
title (str):
Return:
(str): sort string
"""
from calibre.ebooks.metadata import title_sort
from calibre.library.catalogs.utils import NumberToText
# Strip stop words
title_words = title_sort(title).split()
translated = []
for (i, word) in enumerate(title_words):
# Leading numbers optionally translated to text equivalent
# Capitalize leading sort word
if i == 0:
# *** Keep this code in case we need to restore numbers_as_text ***
if False:
# if self.opts.numbers_as_text and re.match('[0-9]+',word[0]):
translated.append(NumberToText(word).text.capitalize())
else:
if re.match('[0-9]+', word[0]):
word = word.replace(',', '')
suffix = re.search(r'[\D]', word)
if suffix:
word = f'{float(word[:suffix.start()]):10.0f}{word[suffix.start():]}'
else:
word = '%10.0f' % (float(word))
# If leading char > 'A', insert symbol as leading forcing lower sort
# '/' sorts below numbers, g
if self.letter_or_symbol(word[0]) != word[0]:
if word[0] > 'A' or (ord('9') < ord(word[0]) < ord('A')):
translated.append('/')
translated.append(capitalize(word))
else:
if re.search('[0-9]+', word[0]):
word = word.replace(',', '')
suffix = re.search(r'[\D]', word)
if suffix:
word = f'{float(word[:suffix.start()]):10.0f}{word[suffix.start():]}'
else:
word = '%10.0f' % (float(word))
translated.append(word)
return ' '.join(translated)
def generate_thumbnail(self, title, image_dir, thumb_file):
""" Create thumbnail of cover or return previously cached thumb.
Test thumb archive for currently cached cover. Return cached version, or create
and cache new version.
Args:
title (dict): book metadata
image_dir (str): directory to write thumb data to
thumb_file (str): filename to save thumb as
Output:
(file): thumb written to /images
(archive): current thumb archived under cover crc
"""
from calibre.utils.img import scale_image
def _open_archive(mode='r'):
try:
return ZipFile(self.thumbs_path, mode=mode, allowZip64=True)
except:
# occurs under windows if the file is opened by another
# process
pass
# Generate crc for current cover
with open(title['cover'], 'rb') as f:
data = f.read()
cover_crc = hex(zlib.crc32(data))
# Test cache for uuid
uuid = title.get('uuid')
if uuid:
zf = _open_archive()
if zf is not None:
with zf:
try:
zf.getinfo(uuid + cover_crc)
except:
pass
else:
# uuid found in cache with matching crc
thumb_data = zf.read(uuid + cover_crc)
with open(os.path.join(image_dir, thumb_file), 'wb') as f:
f.write(thumb_data)
return
# Save thumb for catalog. If invalid data, error returns to generate_thumbnails()
thumb_data = scale_image(data,
width=self.thumb_width, height=self.thumb_height)[-1]
with open(os.path.join(image_dir, thumb_file), 'wb') as f:
f.write(thumb_data)
# Save thumb to archive
if zf is not None:
# Ensure that the read succeeded
# If we failed to open the zip file for reading,
# we dont know if it contained the thumb or not
zf = _open_archive('a')
if zf is not None:
with zf:
zf.writestr(uuid + cover_crc, thumb_data)
def generate_thumbnails(self):
""" Generate a thumbnail cover for each book.
Generate or retrieve a thumbnail for each cover. If nonexistent or faulty
cover data, substitute default cover. Checks for updated default cover.
At completion, writes self.opts.thumb_width to archive.
Inputs:
books_by_title (list): books to catalog
Output:
thumbs (list): list of referenced thumbnails
"""
self.update_progress_full_step(_("Thumbnails"))
thumbs = ['thumbnail_default.jpg']
image_dir = "%s/images" % self.catalog_path
for (i, title) in enumerate(self.books_by_title):
# Update status
self.update_progress_micro_step("%s %d of %d" %
(_("Thumbnail"), i, len(self.books_by_title)),
i / float(len(self.books_by_title)))
thumb_file = 'thumbnail_%d.jpg' % int(title['id'])
thumb_generated = True
valid_cover = True
try:
self.generate_thumbnail(title, image_dir, thumb_file)
thumbs.append("thumbnail_%d.jpg" % int(title['id']))
except:
if 'cover' in title and os.path.exists(title['cover']):
valid_cover = False
self.opts.log.warn(" *** Invalid cover file for '%s'***" %
(title['title']))
if not self.error:
self.error.append('Invalid cover files')
self.error.append("Warning: invalid cover file for '%s', default cover substituted.\n" % (title['title']))
thumb_generated = False
if not thumb_generated:
self.opts.log.warn(" using default cover for '%s' (%d)" % (title['title'], title['id']))
# Confirm thumb exists, default is current
default_thumb_fp = os.path.join(image_dir, "thumbnail_default.jpg")
cover = os.path.join(self.catalog_path, "DefaultCover.png")
title['cover'] = cover
if not os.path.exists(cover):
shutil.copyfile(I('default_cover.png'), cover)
if os.path.isfile(default_thumb_fp):
# Check to see if default cover is newer than thumbnail
# os.path.getmtime() = modified time
# os.path.ctime() = creation time
cover_timestamp = os.path.getmtime(cover)
thumb_timestamp = os.path.getmtime(default_thumb_fp)
if thumb_timestamp < cover_timestamp:
if self.DEBUG and self.opts.verbose:
self.opts.log.warn("updating thumbnail_default for %s" % title['title'])
self.generate_thumbnail(title, image_dir,
"thumbnail_default.jpg" if valid_cover else thumb_file)
else:
if self.DEBUG and self.opts.verbose:
self.opts.log.warn(" generating new thumbnail_default.jpg")
self.generate_thumbnail(title, image_dir,
"thumbnail_default.jpg" if valid_cover else thumb_file)
# Clear the book's cover property
title['cover'] = None
# Write thumb_width to the file, validating cache contents
# Allows detection of aborted catalog builds
try:
with ZipFile(self.thumbs_path, mode='a') as zfw:
zfw.writestr('thumb_width', self.opts.thumb_width)
except Exception as err:
raise ValueError('There was an error writing to the thumbnail cache: %s\n'
'Try deleting it. Underlying error: %s' % (force_unicode(self.thumbs_path), as_unicode(err)))
self.thumbs = thumbs
def generate_unicode_name(self, c):
""" Generate a legal XHTML anchor from unicode character.
Generate a legal XHTML anchor from unicode character.
Args:
c (unicode): character(s)
Return:
(str): legal XHTML anchor string of unicode character name
"""
fullname = ''.join(unicodedata.name(str(cc)) for cc in c)
terms = fullname.split()
return "_".join(terms)
def get_excluded_tags(self):
""" Get excluded_tags from opts.exclusion_rules.
Parse opts.exclusion_rules for tags to be excluded, return list.
Log books that will be excluded by excluded_tags.
Inputs:
opts.excluded_tags (tuples): exclusion rules
Return:
excluded_tags (list): excluded tags
"""
excluded_tags = []
for rule in self.opts.exclusion_rules:
if rule[1] == _('Tags'):
excluded_tags.extend(rule[2].split(','))
# Remove dups
excluded_tags = list(set(excluded_tags))
# Report excluded books
if excluded_tags:
self.opts.log.info(" Books excluded by tag:")
data = self.db.get_data_as_dict(ids=self.opts.ids)
for record in data:
matched = list(set(record['tags']) & set(excluded_tags))
if matched:
for rule in self.opts.exclusion_rules:
if rule[1] == _('Tags') and rule[2] == str(matched[0]):
self.opts.log.info(" - '%s' by %s (Exclusion rule '%s')" %
(record['title'], record['authors'][0], rule[0]))
return excluded_tags
def get_friendly_genre_tag(self, genre):
""" Return the first friendly_tag matching genre.
Scan self.genre_tags_dict[] for first friendly_tag matching genre.
genre_tags_dict[] populated in filter_genre_tags().
Args:
genre (str): genre to match
Return:
friendly_tag (str): friendly_tag matching genre
"""
# Find the first instance of friendly_tag matching genre
for friendly_tag in self.genre_tags_dict:
if self.genre_tags_dict[friendly_tag] == genre:
return friendly_tag
def get_output_profile(self, _opts):
""" Return profile matching opts.output_profile
Input:
_opts (object): build options object
Return:
(profile): output profile matching name
"""
for profile in output_profiles():
if profile.short_name == _opts.output_profile:
return profile
def get_prefix_rules(self):
""" Convert opts.prefix_rules to dict.
Convert opts.prefix_rules to dict format. The model for a prefix rule is
('<rule name>','<#source_field_lookup>','<pattern>','<prefix>')
Input:
opts.prefix_rules (tuples): (name, field, pattern, prefix)
Return:
(list): list of prefix_rules dicts
"""
pr = []
if self.opts.prefix_rules:
try:
for rule in self.opts.prefix_rules:
prefix_rule = {}
prefix_rule['name'] = rule[0]
prefix_rule['field'] = rule[1]
prefix_rule['pattern'] = rule[2]
prefix_rule['prefix'] = rule[3]
pr.append(prefix_rule)
except:
self.opts.log.error("malformed prefix_rules: %s" % repr(self.opts.prefix_rules))
raise
return pr
def letter_or_symbol(self, char):
""" Test asciized char for A-z.
Convert char to ascii, test for A-z.
Args:
char (chr): character to test
Return:
(str): char if A-z, else SYMBOLS
"""
if not re.search('[a-zA-Z]', ascii_text(char)):
return self.SYMBOLS
else:
return char
def load_section_templates(self):
""" Add section templates to local namespace.
Load section templates from resource directory. If user has made local copies,
these will be used for individual section generation.
generate_format_args() builds args that populate templates.
Templates referenced in individual section builders, e.g.
generate_html_by_title().
Inputs:
(files): section template files from resource dir
Results:
(strs): section templates added to local namespace
"""
for line in P('catalog/section_list_templates.conf', data=True).decode('utf-8').splitlines():
line = line.lstrip()
if line.startswith('#'):
continue
if line.startswith('by_'):
key, val = line.split(' ', 1)
key, val = key.strip(), val.strip()
if key.endswith('_template'):
setattr(self, key, val)
def merge_comments(self, record):
""" Merge comments with custom column content.
Merge comments from book metadata with user-specified custom column
content, optionally before or after. Optionally insert <hr> between
fields.
Args:
record (dict): book metadata
Return:
merged (str): comments merged with addendum
"""
merged = ''
if record['description']:
addendum = self.db.get_field(record['id'],
self.merge_comments_rule['field'],
index_is_id=True)
if addendum is None:
addendum = ''
elif isinstance(addendum, list):
addendum = (', '.join(addendum))
include_hr = eval(self.merge_comments_rule['hr'])
if self.merge_comments_rule['position'] == 'before':
merged = addendum
if include_hr:
merged += '<hr class="merged_comments_divider"/>'
else:
merged += '\n'
merged += record['description']
else:
merged = record['description']
if include_hr:
merged += '<hr class="merged_comments_divider"/>'
else:
merged += '\n'
merged += addendum
else:
# Return only the custom field contents
merged = self.db.get_field(record['id'],
self.merge_comments_rule['field'],
index_is_id=True)
if isinstance(merged, list):
merged = (', '.join(merged))
return merged
def process_exclusions(self, data_set):
""" Filter data_set based on exclusion_rules.
Compare each book in data_set to each exclusion_rule. Remove
books matching exclusion criteria.
Args:
data_set (list): all candidate books
Return:
(list): filtered data_set
"""
filtered_data_set = []
exclusion_pairs = []
exclusion_set = []
for rule in self.opts.exclusion_rules:
if rule[1].startswith('#') and rule[2] != '':
field = rule[1]
pat = rule[2]
exclusion_pairs.append((field, pat))
else:
continue
if exclusion_pairs:
if self.opts.verbose:
self.opts.log.info(" Books excluded by custom field contents:")
for record in data_set:
for exclusion_pair in exclusion_pairs:
field, pat = exclusion_pair
field_contents = self.db.get_field(record['id'],
field,
index_is_id=True)
if field_contents == '':
field_contents = None
if (self.db.metadata_for_field(field)['datatype'] == 'bool' and
field_contents is None):
# Handle condition where field is a bool and contents is None,
# which is displayed as No
field_contents = _('False')
if field_contents is not None:
if self.db.metadata_for_field(field)['datatype'] == 'bool':
# For Yes/No fields, need to translate field_contents to
# locale version
field_contents = _(repr(field_contents))
matched = re.search(pat, str(field_contents),
re.IGNORECASE)
if matched is not None:
if self.opts.verbose:
field_md = self.db.metadata_for_field(field)
for rule in self.opts.exclusion_rules:
if rule[1] == '#%s' % field_md['label']:
self.opts.log.info(" - '%s' by %s (%s: '%s' contains '%s')" %
(record['title'], record['authors'][0],
rule[0],
self.db.metadata_for_field(field)['name'],
field_contents))
exclusion_set.append(record)
if record in filtered_data_set:
filtered_data_set.remove(record)
break
else:
if record not in filtered_data_set:
filtered_data_set.append(record)
elif field_contents is None and pat == 'None':
exclusion_set.append(record)
if record in filtered_data_set:
filtered_data_set.remove(record)
else:
if (record not in filtered_data_set and
record not in exclusion_set):
filtered_data_set.append(record)
return filtered_data_set
else:
return data_set
def relist_multiple_authors(self, books_by_author):
""" Create multiple entries for books with multiple authors
Given a list of books by author, scan list for books with multiple
authors. Add a cloned copy of the book per additional author.
Args:
books_by_author (list): book list possibly containing books
with multiple authors
Return:
(list): books_by_author with additional cloned entries for books with
multiple authors
"""
multiple_author_books = []
# Find the multiple author books
for book in books_by_author:
if len(book['authors']) > 1:
multiple_author_books.append(book)
for book in multiple_author_books:
cloned_authors = list(book['authors'])
for x, author in enumerate(book['authors']):
if x:
first_author = cloned_authors.pop(0)
cloned_authors.append(first_author)
new_book = deepcopy(book)
new_book['author'] = ' & '.join(cloned_authors)
new_book['authors'] = list(cloned_authors)
asl = [author_to_author_sort(auth) for auth in cloned_authors]
new_book['author_sort'] = ' & '.join(asl)
books_by_author.append(new_book)
return books_by_author
def update_progress_full_step(self, description):
""" Update calibre's job status UI.
Call ProgessReporter() with updates.
Args:
description (str): text describing current step
Result:
(UI): Jobs UI updated
"""
self.current_step += 1
self.progress_string = description
self.progress_int = float((self.current_step - 1) / self.total_steps)
if not self.progress_int:
self.progress_int = 0.01
self.reporter(self.progress_int, self.progress_string)
if self.opts.cli_environment:
log_msg = f"{self.progress_int * 100:3.0f}% {self.progress_string}"
if self.opts.verbose:
log_msg += " (%s)" % str(datetime.timedelta(seconds=int(time.time() - self.opts.start_time)))
else:
log_msg = ("{} ({})".format(self.progress_string,
str(datetime.timedelta(seconds=int(time.time() - self.opts.start_time)))))
self.opts.log(log_msg)
def update_progress_micro_step(self, description, micro_step_pct):
""" Update calibre's job status UI.
Called from steps requiring more time:
generate_html_descriptions()
generate_thumbnails()
Args:
description (str): text describing microstep
micro_step_pct (float): percentage of full step
Results:
(UI): Jobs UI updated
"""
step_range = 100 / self.total_steps
self.progress_string = description
coarse_progress = float((self.current_step - 1) / self.total_steps)
fine_progress = float((micro_step_pct * step_range) / 100)
self.progress_int = coarse_progress + fine_progress
self.reporter(self.progress_int, self.progress_string)
def write_ncx(self):
""" Write accumulated ncx_soup to file.
Expanded description
Inputs:
catalog_path (str): path to generated catalog
opts.basename (str): catalog basename
Output:
(file): basename.NCX written
"""
self.update_progress_full_step(_("Saving NCX"))
pretty_xml_tree(self.ncx_root)
ncx = etree.tostring(self.ncx_root, encoding='utf-8')
with open(f"{self.catalog_path}/{self.opts.basename}.ncx", 'wb') as outfile:
outfile.write(ncx)
| 179,329 | Python | .py | 3,638 | 34.822705 | 174 | 0.539662 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,709 | epub_mobi.py | kovidgoyal_calibre/src/calibre/library/catalogs/epub_mobi.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import datetime
import os
import time
from collections import namedtuple
from calibre import strftime
from calibre.customize import CatalogPlugin
from calibre.customize.conversion import DummyReporter, OptionRecommendation
from calibre.library import current_library_name
from calibre.library.catalogs import AuthorSortMismatchException, EmptyCatalogException
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.localization import _, calibre_langcode_to_name, canonicalize_lang, get_lang
Option = namedtuple('Option', 'option, default, dest, action, help')
class EPUB_MOBI(CatalogPlugin):
'EPUB catalog generator'
name = 'Catalog_EPUB_MOBI'
description = _('AZW3/EPUB/MOBI catalog generator')
supported_platforms = ['windows', 'osx', 'linux']
minimum_calibre_version = (0, 7, 40)
author = 'Greg Riker'
version = (1, 0, 0)
file_types = {'azw3', 'epub', 'mobi'}
THUMB_SMALLEST = "1.0"
THUMB_LARGEST = "3.0"
cli_options = [Option('--catalog-title', # {{{
default='My Books',
dest='catalog_title',
action=None,
help=_('Title of generated catalog used as title in metadata.\n'
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--cross-reference-authors',
default=False,
dest='cross_reference_authors',
action='store_true',
help=_("Create cross-references in Authors section for books with multiple authors.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--debug-pipeline',
default=None,
dest='debug_pipeline',
action=None,
help=_("Save the output from different stages of the conversion "
"pipeline to the specified "
"folder. Useful if you are unsure at which stage "
"of the conversion process a bug is occurring.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--exclude-genre',
default=r'\[.+\]|^\+$',
dest='exclude_genre',
action=None,
help=_("Regex describing tags to exclude as genres.\n"
"Default: '%default' excludes bracketed tags, e.g. '[Project Gutenberg]', and '+', the default tag for read books.\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--exclusion-rules',
default="(('Catalogs','Tags','Catalog'),)",
dest='exclusion_rules',
action=None,
help=_("Specifies the rules used to exclude books from the generated catalog.\n"
"The model for an exclusion rule is either\n('<rule name>','Tags','<comma-separated list of tags>') or\n"
"('<rule name>','<custom column>','<pattern>').\n"
"For example:\n"
"(('Archived books','#status','Archived'),)\n"
"will exclude a book with a value of 'Archived' in the custom column 'status'.\n"
"When multiple rules are defined, all rules will be applied.\n"
"Default: \n" + '"' + '%default' + '"' + "\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--generate-authors',
default=False,
dest='generate_authors',
action='store_true',
help=_("Include 'Authors' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--generate-descriptions',
default=False,
dest='generate_descriptions',
action='store_true',
help=_("Include 'Descriptions' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--generate-genres',
default=False,
dest='generate_genres',
action='store_true',
help=_("Include 'Genres' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--generate-titles',
default=False,
dest='generate_titles',
action='store_true',
help=_("Include 'Titles' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--generate-series',
default=False,
dest='generate_series',
action='store_true',
help=_("Include 'Series' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--generate-recently-added',
default=False,
dest='generate_recently_added',
action='store_true',
help=_("Include 'Recently Added' section in catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--genre-source-field',
default=_('Tags'),
dest='genre_source_field',
action=None,
help=_("Source field for 'Genres' section.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--header-note-source-field',
default='',
dest='header_note_source_field',
action=None,
help=_("Custom field containing note text to insert in Description header.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--merge-comments-rule',
default='::',
dest='merge_comments_rule',
action=None,
help=_("#<custom field>:[before|after]:[True|False] specifying:\n"
" <custom field> Custom field containing notes to merge with comments\n"
" [before|after] Placement of notes with respect to comments\n"
" [True|False] - A horizontal rule is inserted between notes and comments\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--output-profile',
default=None,
dest='output_profile',
action=None,
help=_("Specifies the output profile. In some cases, an output profile is required to optimize"
" the catalog for the device. For example, 'kindle' or 'kindle_dx' creates a structured"
" Table of Contents with Sections and Articles.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--prefix-rules',
default="(('Read books','tags','+','\u2713'),('Wishlist item','tags','Wishlist','\u00d7'))",
dest='prefix_rules',
action=None,
help=_("Specifies the rules used to include prefixes indicating read books, wishlist items and other user-specified prefixes.\n"
"The model for a prefix rule is ('<rule name>','<source field>','<pattern>','<prefix>').\n"
"When multiple rules are defined, the first matching rule will be used.\n"
"Default:\n" + '"' + '%default' + '"' + "\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--preset',
default=None,
dest='preset',
action=None,
help=_("Use a named preset created with the GUI catalog builder.\n"
"A preset specifies all settings for building a catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--use-existing-cover',
default=False,
dest='use_existing_cover',
action='store_true',
help=_("Replace existing cover when generating the catalog.\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
Option('--thumb-width',
default='1.0',
dest='thumb_width',
action=None,
help=_("Size hint (in inches) for book covers in catalog.\n"
"Range: 1.0 - 2.0\n"
"Default: '%default'\n"
"Applies to: AZW3, EPUB, MOBI output formats")),
]
# }}}
def run(self, path_to_output, opts, db, notification=DummyReporter()):
from calibre.library.catalogs.epub_mobi_builder import CatalogBuilder
from calibre.utils.config import JSONConfig
from calibre.utils.logging import default_log as log
# If preset specified from the cli, insert stored options from JSON file
if hasattr(opts, 'preset') and opts.preset:
available_presets = JSONConfig("catalog_presets")
if opts.preset not in available_presets:
if available_presets:
print(_('Error: Preset "%s" not found.' % opts.preset))
print(_('Stored presets: %s' % ', '.join([p for p in sorted(available_presets.keys())])))
else:
print(_('Error: No stored presets.'))
return 1
# Copy the relevant preset values to the opts object
for item in available_presets[opts.preset]:
if item not in ['exclusion_rules_tw', 'format', 'prefix_rules_tw']:
setattr(opts, item, available_presets[opts.preset][item])
# Provide an unconnected device
opts.connected_device = {
'is_device_connected': False,
'kind': None,
'name': None,
'save_template': None,
'serial': None,
'storage': None,
}
# Convert prefix_rules and exclusion_rules from JSON lists to tuples
prs = []
for rule in opts.prefix_rules:
prs.append(tuple(rule))
opts.prefix_rules = tuple(prs)
ers = []
for rule in opts.exclusion_rules:
ers.append(tuple(rule))
opts.exclusion_rules = tuple(ers)
opts.log = log
opts.fmt = self.fmt = path_to_output.rpartition('.')[2]
# Add local options
opts.creator = '{}, {} {}, {}'.format(strftime('%A'), strftime('%B'), strftime('%d').lstrip('0'), strftime('%Y'))
opts.creator_sort_as = '{} {}'.format('calibre', strftime('%Y-%m-%d'))
opts.connected_kindle = False
# Finalize output_profile
op = opts.output_profile
if op is None:
op = 'default'
if opts.connected_device['name'] and 'kindle' in opts.connected_device['name'].lower():
opts.connected_kindle = True
if opts.connected_device['serial'] and \
opts.connected_device['serial'][:4] in ['B004', 'B005']:
op = "kindle_dx"
else:
op = "kindle"
opts.description_clip = 380 if op.endswith('dx') or 'kindle' not in op else 100
opts.author_clip = 100 if op.endswith('dx') or 'kindle' not in op else 60
opts.output_profile = op
opts.basename = "Catalog"
opts.cli_environment = not hasattr(opts, 'sync')
# Hard-wired to always sort descriptions by author, with series after non-series
opts.sort_descriptions_by_author = True
build_log = []
build_log.append("%s('%s'): Generating %s %sin %s environment, locale: '%s'" %
(self.name,
current_library_name(),
self.fmt,
'for %s ' % opts.output_profile if opts.output_profile else '',
'CLI' if opts.cli_environment else 'GUI',
calibre_langcode_to_name(canonicalize_lang(get_lang()), localize=False))
)
# If exclude_genre is blank, assume user wants all tags as genres
if opts.exclude_genre.strip() == '':
# opts.exclude_genre = '\[^.\]'
# build_log.append(" converting empty exclude_genre to '\[^.\]'")
opts.exclude_genre = 'a^'
build_log.append(" converting empty exclude_genre to 'a^'")
if opts.connected_device['is_device_connected'] and \
opts.connected_device['kind'] == 'device':
if opts.connected_device['serial']:
build_log.append(" connected_device: '%s' #%s%s " %
(opts.connected_device['name'],
opts.connected_device['serial'][0:4],
'x' * (len(opts.connected_device['serial']) - 4)))
for storage in opts.connected_device['storage']:
if storage:
build_log.append(" mount point: %s" % storage)
else:
build_log.append(" connected_device: '%s'" % opts.connected_device['name'])
try:
for storage in opts.connected_device['storage']:
if storage:
build_log.append(" mount point: %s" % storage)
except:
build_log.append(" (no mount points)")
else:
build_log.append(" connected_device: '%s'" % opts.connected_device['name'])
opts_dict = vars(opts)
if opts_dict['ids']:
build_log.append(" book count: %d" % len(opts_dict['ids']))
sections_list = []
if opts.generate_authors:
sections_list.append('Authors')
if opts.generate_titles:
sections_list.append('Titles')
if opts.generate_series:
sections_list.append('Series')
if opts.generate_genres:
sections_list.append('Genres')
if opts.generate_recently_added:
sections_list.append('Recently Added')
if opts.generate_descriptions:
sections_list.append('Descriptions')
if not sections_list:
if opts.cli_environment:
opts.log.warn('*** No Section switches specified, enabling all Sections ***')
opts.generate_authors = True
opts.generate_titles = True
opts.generate_series = True
opts.generate_genres = True
opts.generate_recently_added = True
opts.generate_descriptions = True
sections_list = ['Authors', 'Titles', 'Series', 'Genres', 'Recently Added', 'Descriptions']
else:
opts.log.warn('\n*** No enabled Sections, terminating catalog generation ***')
return ["No Included Sections", "No enabled Sections.\nCheck E-book options tab\n'Included sections'\n"]
if opts.fmt == 'mobi' and sections_list == ['Descriptions']:
warning = _("\n*** Adding 'By authors' section required for MOBI output ***")
opts.log.warn(warning)
sections_list.insert(0, 'Authors')
opts.generate_authors = True
opts.log(" Sections: %s" % ', '.join(sections_list))
opts.section_list = sections_list
# Limit thumb_width to 1.0" - 2.0"
try:
if float(opts.thumb_width) < float(self.THUMB_SMALLEST):
log.warning(f"coercing thumb_width from '{opts.thumb_width}' to '{self.THUMB_SMALLEST}'")
opts.thumb_width = self.THUMB_SMALLEST
if float(opts.thumb_width) > float(self.THUMB_LARGEST):
log.warning(f"coercing thumb_width from '{opts.thumb_width}' to '{self.THUMB_LARGEST}'")
opts.thumb_width = self.THUMB_LARGEST
opts.thumb_width = "%.2f" % float(opts.thumb_width)
except Exception:
log.error(f"coercing thumb_width from '{opts.thumb_width}' to '{self.THUMB_SMALLEST}'")
opts.thumb_width = "1.0"
# eval prefix_rules if passed from command line
if type(opts.prefix_rules) is not tuple:
try:
opts.prefix_rules = eval(opts.prefix_rules)
except:
log.error("malformed --prefix-rules: %s" % opts.prefix_rules)
raise
for rule in opts.prefix_rules:
if len(rule) != 4:
log.error("incorrect number of args for --prefix-rules: %s" % repr(rule))
# eval exclusion_rules if passed from command line
if type(opts.exclusion_rules) is not tuple:
try:
opts.exclusion_rules = eval(opts.exclusion_rules)
except:
log.error("malformed --exclusion-rules: %s" % opts.exclusion_rules)
raise
for rule in opts.exclusion_rules:
if len(rule) != 3:
log.error("incorrect number of args for --exclusion-rules: %s" % repr(rule))
# Display opts
keys = sorted(opts_dict.keys())
build_log.append(" opts:")
for key in keys:
if key in ['catalog_title', 'author_clip', 'connected_kindle', 'creator',
'cross_reference_authors', 'description_clip', 'exclude_book_marker',
'exclude_genre', 'exclude_tags', 'exclusion_rules', 'fmt',
'genre_source_field', 'header_note_source_field', 'merge_comments_rule',
'output_profile', 'prefix_rules', 'preset', 'read_book_marker',
'search_text', 'sort_by', 'sort_descriptions_by_author', 'sync',
'thumb_width', 'use_existing_cover', 'wishlist_tag']:
build_log.append(f" {key}: {repr(opts_dict[key])}")
if opts.verbose:
log('\n'.join(line for line in build_log))
# Capture start_time
opts.start_time = time.time()
self.opts = opts
if opts.verbose:
log.info(" Begin catalog source generation (%s)" %
str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
# Launch the Catalog builder
catalog = CatalogBuilder(db, opts, self, report_progress=notification)
try:
catalog.build_sources()
if opts.verbose:
log.info(" Completed catalog source generation (%s)\n" %
str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
except (AuthorSortMismatchException, EmptyCatalogException) as e:
log.error(" *** Terminated catalog generation: %s ***" % e)
except:
log.error(" unhandled exception in catalog generator")
raise
else:
recommendations = []
recommendations.append(('remove_fake_margins', False,
OptionRecommendation.HIGH))
recommendations.append(('comments', '', OptionRecommendation.HIGH))
"""
>>> Use to debug generated catalog code before pipeline conversion <<<
"""
GENERATE_DEBUG_EPUB = False
if GENERATE_DEBUG_EPUB:
catalog_debug_path = os.path.join(os.path.expanduser('~'), 'Desktop', 'Catalog debug')
setattr(opts, 'debug_pipeline', os.path.expanduser(catalog_debug_path))
dp = getattr(opts, 'debug_pipeline', None)
if dp is not None:
recommendations.append(('debug_pipeline', dp,
OptionRecommendation.HIGH))
if opts.output_profile and opts.output_profile.startswith("kindle"):
recommendations.append(('output_profile', opts.output_profile,
OptionRecommendation.HIGH))
recommendations.append(('book_producer', opts.output_profile,
OptionRecommendation.HIGH))
if opts.fmt == 'mobi':
recommendations.append(('no_inline_toc', True,
OptionRecommendation.HIGH))
recommendations.append(('verbose', 2,
OptionRecommendation.HIGH))
# Use existing cover or generate new cover
cpath = None
existing_cover = False
try:
search_text = 'title:"{}" author:{}'.format(
opts.catalog_title.replace('"', '\\"'), 'calibre')
matches = db.search(search_text, return_matches=True, sort_results=False)
if matches:
cpath = db.cover(matches[0], index_is_id=True, as_path=True)
if cpath and os.path.exists(cpath):
existing_cover = True
except:
pass
if self.opts.use_existing_cover and not existing_cover:
log.warning("no existing catalog cover found")
if self.opts.use_existing_cover and existing_cover:
recommendations.append(('cover', cpath, OptionRecommendation.HIGH))
log.info("using existing catalog cover")
else:
from calibre.ebooks.covers import calibre_cover2
log.info("replacing catalog cover")
new_cover_path = PersistentTemporaryFile(suffix='.jpg')
new_cover = calibre_cover2(opts.catalog_title, 'calibre')
new_cover_path.write(new_cover)
new_cover_path.close()
recommendations.append(('cover', new_cover_path.name, OptionRecommendation.HIGH))
# Run ebook-convert
from calibre.ebooks.conversion.plumber import Plumber
plumber = Plumber(os.path.join(catalog.catalog_path, opts.basename + '.opf'),
path_to_output, log, report_progress=notification,
abort_after_input_dump=False)
plumber.merge_ui_recommendations(recommendations)
plumber.run()
try:
os.remove(cpath)
except:
pass
if GENERATE_DEBUG_EPUB:
from calibre.ebooks.epub import initialize_container
from calibre.ebooks.tweak import zip_rebuilder
from calibre.utils.zipfile import ZipFile
input_path = os.path.join(catalog_debug_path, 'input')
epub_shell = os.path.join(catalog_debug_path, 'epub_shell.zip')
initialize_container(epub_shell, opf_name='content.opf')
with ZipFile(epub_shell, 'r') as zf:
zf.extractall(path=input_path)
os.remove(epub_shell)
zip_rebuilder(input_path, os.path.join(catalog_debug_path, 'input.epub'))
if opts.verbose:
log.info(" Catalog creation complete (%s)\n" %
str(datetime.timedelta(seconds=int(time.time() - opts.start_time))))
# returns to gui2.actions.catalog:catalog_generated()
return catalog.error
| 25,222 | Python | .py | 454 | 37.563877 | 154 | 0.516184 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,710 | utils.py | kovidgoyal_calibre/src/calibre/library/catalogs/utils.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Greg Riker'
__docformat__ = 'restructuredtext en'
import re
from calibre import prints
from calibre.utils.logging import default_log as log
class NumberToText: # {{{
'''
Converts numbers to text
4.56 => four point fifty-six
456 => four hundred fifty-six
4:56 => four fifty-six
'''
ORDINALS = ['zeroth','first','second','third','fourth','fifth','sixth','seventh','eighth','ninth']
lessThanTwenty = ["<zero>","one","two","three","four","five","six","seven","eight","nine",
"ten","eleven","twelve","thirteen","fourteen","fifteen","sixteen","seventeen",
"eighteen","nineteen"]
tens = ["<zero>","<tens>","twenty","thirty","forty","fifty","sixty","seventy","eighty","ninety"]
hundreds = ["<zero>","one","two","three","four","five","six","seven","eight","nine"]
def __init__(self, number, verbose=False):
self.number = number
self.number_as_float = 0.0
self.text = ''
self.verbose = verbose
self.log = log
self.numberTranslate()
def stringFromInt(self, intToTranslate):
# Convert intToTranslate to string
# intToTranslate is a three-digit number
tensComponentString = ""
hundredsComponent = intToTranslate - (intToTranslate % 100)
tensComponent = intToTranslate % 100
# Build the hundreds component
if hundredsComponent:
hundredsComponentString = "%s hundred" % self.hundreds[hundredsComponent//100]
else:
hundredsComponentString = ""
# Build the tens component
if tensComponent < 20:
tensComponentString = self.lessThanTwenty[tensComponent]
else:
tensPart = ""
onesPart = ""
# Get the tens part
tensPart = self.tens[tensComponent // 10]
onesPart = self.lessThanTwenty[tensComponent % 10]
if intToTranslate % 10:
tensComponentString = f"{tensPart}-{onesPart}"
else:
tensComponentString = "%s" % tensPart
# Concatenate the results
result = ''
if hundredsComponent and not tensComponent:
result = hundredsComponentString
elif not hundredsComponent and tensComponent:
result = tensComponentString
elif hundredsComponent and tensComponent:
result = hundredsComponentString + " " + tensComponentString
else:
prints(" NumberToText.stringFromInt(): empty result translating %d" % intToTranslate)
return result
def numberTranslate(self):
hundredsNumber = 0
thousandsNumber = 0
hundredsString = ""
thousandsString = ""
resultString = ""
self.suffix = ''
if self.verbose:
self.log("numberTranslate(): %s" % self.number)
# Special case ordinals
if re.search('[st|nd|rd|th]',self.number):
self.number = re.sub(',','',self.number)
ordinal_suffix = re.search(r'[\D]', self.number)
ordinal_number = re.sub(r'\D','',re.sub(',','',self.number))
if self.verbose:
self.log("Ordinal: %s" % ordinal_number)
self.number_as_float = ordinal_number
self.suffix = self.number[ordinal_suffix.start():]
if int(ordinal_number) > 9:
# Some typos (e.g., 'twentyth'), acceptable
self.text = '%s' % (NumberToText(ordinal_number).text)
else:
self.text = '%s' % (self.ORDINALS[int(ordinal_number)])
# Test for time
elif re.search(':',self.number):
if self.verbose:
self.log("Time: %s" % self.number)
self.number_as_float = re.sub(':','.',self.number)
time_strings = self.number.split(":")
hours = NumberToText(time_strings[0]).text
minutes = NumberToText(time_strings[1]).text
self.text = f'{hours.capitalize()}-{minutes}'
# Test for %
elif re.search('%', self.number):
if self.verbose:
self.log("Percent: %s" % self.number)
self.number_as_float = self.number.split('%')[0]
self.text = NumberToText(self.number.replace('%',' percent')).text
# Test for decimal
elif re.search('\\.',self.number):
if self.verbose:
self.log("Decimal: %s" % self.number)
self.number_as_float = self.number
decimal_strings = self.number.split(".")
left = NumberToText(decimal_strings[0]).text
right = NumberToText(decimal_strings[1]).text
self.text = f'{left.capitalize()} point {right}'
# Test for hyphenated
elif re.search('-', self.number):
if self.verbose:
self.log("Hyphenated: %s" % self.number)
self.number_as_float = self.number.split('-')[0]
strings = self.number.split('-')
if re.search('[0-9]+', strings[0]):
left = NumberToText(strings[0]).text
right = strings[1]
else:
left = strings[0]
right = NumberToText(strings[1]).text
self.text = f'{left}-{right}'
# Test for only commas and numbers
elif re.search(',', self.number) and not re.search('[^0-9,]',self.number):
if self.verbose:
self.log("Comma(s): %s" % self.number)
self.number_as_float = re.sub(',','',self.number)
self.text = NumberToText(self.number_as_float).text
# Test for hybrid e.g., 'K2, 2nd, 10@10'
elif re.search('[\\D]+', self.number):
if self.verbose:
self.log("Hybrid: %s" % self.number)
# Split the token into number/text
number_position = re.search(r'\d',self.number).start()
text_position = re.search(r'\D',self.number).start()
if number_position < text_position:
number = self.number[:text_position]
text = self.number[text_position:]
self.text = f'{NumberToText(number).text}{text}'
else:
text = self.number[:number_position]
number = self.number[number_position:]
self.text = f'{text}{NumberToText(number).text}'
else:
if self.verbose:
self.log("Clean: %s" % self.number)
try:
self.float_as_number = float(self.number)
number = int(self.number)
except:
return
if number > 10**9:
self.text = "%d out of range" % number
return
if number == 10**9:
self.text = "one billion"
else :
# Isolate the three-digit number groups
millionsNumber = number//10**6
thousandsNumber = (number - (millionsNumber * 10**6))//10**3
hundredsNumber = number - (millionsNumber * 10**6) - (thousandsNumber * 10**3)
if self.verbose:
print(f"Converting {millionsNumber} {thousandsNumber} {hundredsNumber}")
# Convert hundredsNumber
if hundredsNumber :
hundredsString = self.stringFromInt(hundredsNumber)
# Convert thousandsNumber
if thousandsNumber:
if number > 1099 and number < 2000:
resultString = '{} {}'.format(self.lessThanTwenty[number//100],
self.stringFromInt(number % 100))
self.text = resultString.strip().capitalize()
return
else:
thousandsString = self.stringFromInt(thousandsNumber)
# Convert millionsNumber
if millionsNumber:
millionsString = self.stringFromInt(millionsNumber)
# Concatenate the strings
resultString = ''
if millionsNumber:
resultString += "%s million " % millionsString
if thousandsNumber:
resultString += "%s thousand " % thousandsString
if hundredsNumber:
resultString += "%s" % hundredsString
if not millionsNumber and not thousandsNumber and not hundredsNumber:
resultString = "zero"
if self.verbose:
self.log('resultString: %s' % resultString)
self.text = resultString.strip().capitalize()
# }}}
| 8,839 | Python | .py | 192 | 32.921875 | 102 | 0.545919 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,711 | __init__.py | kovidgoyal_calibre/src/calibre/library/catalogs/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
FIELDS = ['all', 'title', 'title_sort', 'author_sort', 'authors', 'comments',
'cover', 'formats','id', 'isbn', 'library_name','ondevice', 'pubdate', 'publisher',
'rating', 'series_index', 'series', 'size', 'tags', 'timestamp',
'uuid', 'languages', 'identifiers']
# Allowed fields for template
TEMPLATE_ALLOWED_FIELDS = ['author_sort', 'authors', 'id', 'isbn', 'pubdate', 'title_sort',
'publisher', 'series_index', 'series', 'tags', 'timestamp', 'title', 'uuid']
class AuthorSortMismatchException(Exception):
pass
class EmptyCatalogException(Exception):
pass
class InvalidGenresSourceFieldException(Exception):
pass
| 817 | Python | .py | 17 | 43.647059 | 93 | 0.668782 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,712 | bibtex.py | kovidgoyal_calibre/src/calibre/library/catalogs/bibtex.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import codecs
import numbers
import os
import re
from collections import namedtuple
from calibre import strftime
from calibre.customize import CatalogPlugin
from calibre.customize.conversion import DummyReporter
from calibre.ebooks.metadata import format_isbn
from calibre.library.catalogs import FIELDS, TEMPLATE_ALLOWED_FIELDS
from calibre.utils.localization import _
from polyglot.builtins import string_or_bytes
class BIBTEX(CatalogPlugin):
'BIBTEX catalog generator'
Option = namedtuple('Option', 'option, default, dest, action, help')
name = 'Catalog_BIBTEX'
description = _('BIBTEX catalog generator')
supported_platforms = ['windows', 'osx', 'linux']
author = 'Sengian'
version = (1, 0, 0)
file_types = {'bib'}
cli_options = [
Option('--fields',
default='all',
dest='fields',
action=None,
help=_('The fields to output when cataloging books in the '
'database. Should be a comma-separated list of fields.\n'
'Available fields: %(fields)s.\n'
'plus user-created custom fields.\n'
'Example: %(opt)s=title,authors,tags\n'
"Default: '%%default'\n"
"Applies to: BIBTEX output format")%dict(
fields=', '.join(FIELDS), opt='--fields')),
Option('--sort-by',
default='id',
dest='sort_by',
action=None,
help=_('Output field to sort on.\n'
'Available fields: author_sort, id, rating, size, timestamp, title.\n'
"Default: '%default'\n"
"Applies to: BIBTEX output format")),
Option('--create-citation',
default='True',
dest='impcit',
action=None,
help=_('Create a citation for BibTeX entries.\n'
'Boolean value: True, False\n'
"Default: '%default'\n"
"Applies to: BIBTEX output format")),
Option('--add-files-path',
default='True',
dest='addfiles',
action=None,
help=_('Create a file entry if formats is selected for BibTeX entries.\n'
'Boolean value: True, False\n'
"Default: '%default'\n"
"Applies to: BIBTEX output format")),
Option('--citation-template',
default='{authors}{id}',
dest='bib_cit',
action=None,
help=_('The template for citation creation from database fields.\n'
'Should be a template with {} enclosed fields.\n'
'Available fields: %s.\n'
"Default: '%%default'\n"
"Applies to: BIBTEX output format")%', '.join(TEMPLATE_ALLOWED_FIELDS)),
Option('--choose-encoding',
default='utf8',
dest='bibfile_enc',
action=None,
help=_('BibTeX file encoding output.\n'
'Available types: utf8, cp1252, ascii.\n'
"Default: '%default'\n"
"Applies to: BIBTEX output format")),
Option('--choose-encoding-configuration',
default='strict',
dest='bibfile_enctag',
action=None,
help=_('BibTeX file encoding flag.\n'
'Available types: strict, replace, ignore, backslashreplace.\n'
"Default: '%default'\n"
"Applies to: BIBTEX output format")),
Option('--entry-type',
default='book',
dest='bib_entry',
action=None,
help=_('Entry type for BibTeX catalog.\n'
'Available types: book, misc, mixed.\n'
"Default: '%default'\n"
"Applies to: BIBTEX output format"))]
def run(self, path_to_output, opts, db, notification=DummyReporter()):
from calibre.library.save_to_disk import preprocess_template
from calibre.utils.bibtex import BibTeX
from calibre.utils.date import isoformat
from calibre.utils.filenames import ascii_text
from calibre.utils.html2text import html2text
from calibre.utils.logging import default_log as log
library_name = os.path.basename(db.library_path)
def create_bibtex_entry(entry, fields, mode, template_citation,
bibtexdict, db, citation_bibtex=True, calibre_files=True):
# Bibtex doesn't like UTF-8 but keep unicode until writing
# Define starting chain or if book valid strict and not book return a Fail string
bibtex_entry = []
if mode != "misc" and check_entry_book_valid(entry) :
bibtex_entry.append('@book{')
elif mode != "book" :
bibtex_entry.append('@misc{')
else :
# case strict book
return ''
if citation_bibtex :
# Citation tag
bibtex_entry.append(make_bibtex_citation(entry, template_citation,
bibtexdict))
bibtex_entry = [' '.join(bibtex_entry)]
for field in fields:
if field.startswith('#'):
item = db.get_field(entry['id'],field,index_is_id=True)
m = db.new_api.field_metadata[field]
if isinstance(item, (bool, numbers.Number)):
item = repr(item)
elif m.get('is_multiple'):
item = m['is_multiple']['list_to_ui'].join(filter(None, item))
elif field == 'title_sort':
item = entry['sort']
elif field == 'library_name':
item = library_name
else:
item = entry[field]
# check if the field should be included (none or empty)
if item is None:
continue
try:
if len(item) == 0 :
continue
except TypeError:
pass
if field == 'authors' :
bibtex_entry.append('author = "%s"' % bibtexdict.bibtex_author_format(item))
elif field == 'id' :
bibtex_entry.append('calibreid = "%s"' % int(item))
elif field == 'rating' :
bibtex_entry.append('rating = "%s"' % int(item))
elif field == 'size' :
bibtex_entry.append(f'{field} = "{int(item)} octets"')
elif field == 'tags' :
# A list to flatten
bibtex_entry.append('tags = "%s"' % bibtexdict.utf8ToBibtex(', '.join(item)))
elif field == 'comments' :
# \n removal
item = item.replace('\r\n', ' ')
item = item.replace('\n', ' ')
# unmatched brace removal (users should use \leftbrace or \rightbrace for single braces)
item = bibtexdict.stripUnmatchedSyntax(item, '{', '}')
# html to text
try:
item = html2text(item)
except:
log.warn("Failed to convert comments to text")
bibtex_entry.append('note = "%s"' % bibtexdict.utf8ToBibtex(item))
elif field == 'isbn' :
# Could be 9, 10 or 13 digits
bibtex_entry.append('isbn = "%s"' % format_isbn(item))
elif field == 'formats' :
# Add file path if format is selected
formats = [format.rpartition('.')[2].lower() for format in item]
bibtex_entry.append('formats = "%s"' % ', '.join(formats))
if calibre_files:
files = [':{}:{}'.format(format, format.rpartition('.')[2].upper())
for format in item]
bibtex_entry.append('file = "%s"' % ', '.join(files))
elif field == 'series_index' :
bibtex_entry.append('volume = "%s"' % int(item))
elif field == 'timestamp' :
bibtex_entry.append('timestamp = "%s"' % isoformat(item).partition('T')[0])
elif field == 'pubdate' :
bibtex_entry.append('year = "%s"' % item.year)
bibtex_entry.append('month = "%s"' % bibtexdict.utf8ToBibtex(strftime("%b", item)))
elif field.startswith('#') and isinstance(item, string_or_bytes):
bibtex_entry.append('custom_{} = "{}"'.format(field[1:],
bibtexdict.utf8ToBibtex(item)))
elif isinstance(item, string_or_bytes):
# elif field in ['title', 'publisher', 'cover', 'uuid', 'ondevice',
# 'author_sort', 'series', 'title_sort'] :
bibtex_entry.append(f'{field} = "{bibtexdict.utf8ToBibtex(item)}"')
bibtex_entry = ',\n '.join(bibtex_entry)
bibtex_entry += ' }\n\n'
return bibtex_entry
def check_entry_book_valid(entry):
# Check that the required fields are ok for a book entry
for field in ['title', 'authors', 'publisher'] :
if entry[field] is None or len(entry[field]) == 0 :
return False
if entry['pubdate'] is None :
return False
else :
return True
def make_bibtex_citation(entry, template_citation, bibtexclass):
# define a function to replace the template entry by its value
def tpl_replace(objtplname) :
tpl_field = re.sub(r'[\{\}]', '', objtplname.group())
if tpl_field in TEMPLATE_ALLOWED_FIELDS :
if tpl_field in ['pubdate', 'timestamp'] :
tpl_field = isoformat(entry[tpl_field]).partition('T')[0]
elif tpl_field in ['tags', 'authors'] :
tpl_field =entry[tpl_field][0]
elif tpl_field in ['id', 'series_index'] :
tpl_field = str(entry[tpl_field])
else :
tpl_field = entry[tpl_field]
return ascii_text(tpl_field)
else:
return ''
if len(template_citation) >0 :
tpl_citation = bibtexclass.utf8ToBibtex(
bibtexclass.ValidateCitationKey(re.sub(r'\{[^{}]*\}',
tpl_replace, template_citation)))
if len(tpl_citation) >0 :
return tpl_citation
if len(entry["isbn"]) > 0 :
template_citation = '%s' % re.sub(r'[\D]','', entry["isbn"])
else :
template_citation = '%s' % str(entry["id"])
return bibtexclass.ValidateCitationKey(template_citation)
self.fmt = path_to_output.rpartition('.')[2]
self.notification = notification
# Combobox options
bibfile_enc = ['utf8', 'cp1252', 'ascii']
bibfile_enctag = ['strict', 'replace', 'ignore', 'backslashreplace']
bib_entry = ['mixed', 'misc', 'book']
# Needed because CLI return str vs int by widget
try:
bibfile_enc = bibfile_enc[opts.bibfile_enc]
bibfile_enctag = bibfile_enctag[opts.bibfile_enctag]
bib_entry = bib_entry[opts.bib_entry]
except:
if opts.bibfile_enc in bibfile_enc :
bibfile_enc = opts.bibfile_enc
else :
log.warn("Incorrect --choose-encoding flag, revert to default")
bibfile_enc = bibfile_enc[0]
if opts.bibfile_enctag in bibfile_enctag :
bibfile_enctag = opts.bibfile_enctag
else :
log.warn("Incorrect --choose-encoding-configuration flag, revert to default")
bibfile_enctag = bibfile_enctag[0]
if opts.bib_entry in bib_entry :
bib_entry = opts.bib_entry
else :
log.warn("Incorrect --entry-type flag, revert to default")
bib_entry = bib_entry[0]
if opts.verbose:
opts_dict = vars(opts)
log(f"{self.name}(): Generating {self.fmt}")
if opts.connected_device['is_device_connected']:
log(" connected_device: %s" % opts.connected_device['name'])
if opts_dict['search_text']:
log(" --search='%s'" % opts_dict['search_text'])
if opts_dict['ids']:
log(" Book count: %d" % len(opts_dict['ids']))
if opts_dict['search_text']:
log(" (--search ignored when a subset of the database is specified)")
if opts_dict['fields']:
if opts_dict['fields'] == 'all':
log(" Fields: %s" % ', '.join(FIELDS[1:]))
else:
log(" Fields: %s" % opts_dict['fields'])
log(f" Output file will be encoded in {bibfile_enc} with {bibfile_enctag} flag")
log(" BibTeX entry type is {} with a citation like '{}' flag".format(bib_entry, opts_dict['bib_cit']))
# If a list of ids are provided, don't use search_text
if opts.ids:
opts.search_text = None
data = self.search_sort_db(db, opts)
if not len(data):
log.error("\nNo matching database entries for search criteria '%s'" % opts.search_text)
# Get the requested output fields as a list
fields = self.get_output_fields(db, opts)
if not len(data):
log.error("\nNo matching database entries for search criteria '%s'" % opts.search_text)
# Initialize BibTeX class
bibtexc = BibTeX()
# Entries writing after Bibtex formatting (or not)
if bibfile_enc != 'ascii' :
bibtexc.ascii_bibtex = False
else :
bibtexc.ascii_bibtex = True
# Check citation choice and go to default in case of bad CLI
if isinstance(opts.impcit, string_or_bytes) :
if opts.impcit == 'False' :
citation_bibtex= False
elif opts.impcit == 'True' :
citation_bibtex= True
else :
log.warn("Incorrect --create-citation, revert to default")
citation_bibtex= True
else :
citation_bibtex= opts.impcit
# Check add file entry and go to default in case of bad CLI
if isinstance(opts.addfiles, string_or_bytes) :
if opts.addfiles == 'False' :
addfiles_bibtex = False
elif opts.addfiles == 'True' :
addfiles_bibtex = True
else :
log.warn("Incorrect --add-files-path, revert to default")
addfiles_bibtex= True
else :
addfiles_bibtex = opts.addfiles
# Preprocess for error and light correction
template_citation = preprocess_template(opts.bib_cit)
# Open output and write entries
with codecs.open(path_to_output, 'w', bibfile_enc, bibfile_enctag)\
as outfile:
# File header
nb_entries = len(data)
# check in book strict if all is ok else throw a warning into log
if bib_entry == 'book' :
nb_books = len(list(filter(check_entry_book_valid, data)))
if nb_books < nb_entries :
log.warn("Only %d entries in %d are book compatible" % (nb_books, nb_entries))
nb_entries = nb_books
# If connected device, add 'On Device' values to data
if opts.connected_device['is_device_connected'] and 'ondevice' in fields:
for entry in data:
entry['ondevice'] = db.catalog_plugin_on_device_temp_mapping[entry['id']]['ondevice']
# outfile.write('%%%Calibre catalog\n%%%{0} entries in catalog\n\n'.format(nb_entries))
outfile.write('@preamble{"This catalog of %d entries was generated by calibre on %s"}\n\n'
% (nb_entries, strftime("%A, %d. %B %Y %H:%M")))
for entry in data:
outfile.write(create_bibtex_entry(entry, fields, bib_entry, template_citation,
bibtexc, db, citation_bibtex, addfiles_bibtex))
| 16,947 | Python | .py | 336 | 34.997024 | 114 | 0.522402 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,713 | qt_backend.py | kovidgoyal_calibre/src/calibre/scraper/qt_backend.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
import json
import os
import sys
from contextlib import suppress
from threading import Thread
from time import monotonic
from typing import Any, TypedDict
from qt.core import (
QApplication,
QNetworkAccessManager,
QNetworkCookie,
QNetworkCookieJar,
QNetworkReply,
QNetworkRequest,
QObject,
QSslError,
Qt,
QTimer,
QUrl,
pyqtSignal,
sip,
)
from calibre.utils.random_ua import random_common_chrome_user_agent
default_timeout: float = 60. # seconds
def qurl_to_string(url: QUrl | str) -> str:
return bytes(QUrl(url).toEncoded()).decode()
def qurl_to_key(url: QUrl | str) -> str:
return qurl_to_string(url).rstrip('/')
Headers = list[tuple[str, str]]
class Request(TypedDict):
id: int
url: str
headers: Headers
data_path: str
method: str
filename: str
timeout: float
class CookieJar(QNetworkCookieJar):
def __init__(self, parent=None):
super().__init__(parent)
self.all_request_cookies = []
def add_cookie(self, c: QNetworkCookie) -> None:
if c.domain():
self.insertCookie(c)
else:
self.all_request_cookies.append(c)
def cookiesForUrl(self, url: QUrl) -> list[QNetworkCookie]:
ans = []
for c in self.all_request_cookies:
c = QNetworkCookie(c)
c.normalize(url)
ans.append(c)
return super().cookiesForUrl(url) + ans
def too_slow_or_timed_out(timeout: float, last_activity_at: float, created_at: float, downloaded_bytes: int, now: float) -> bool:
if timeout and last_activity_at + timeout < now:
return True
time_taken = now - created_at
if time_taken > default_timeout:
rate = downloaded_bytes / time_taken
return rate < 10
return False
class DownloadRequest(QObject):
worth_retry: bool = False
def __init__(self, url: str, output_path: str, reply: QNetworkReply, timeout: float, req_id: int, parent: 'FetchBackend'):
super().__init__(parent)
self.url, self.filename = url, os.path.basename(output_path)
self.output_path = output_path
self.reply = reply
self.req_id: int = req_id
self.created_at = self.last_activity_at = monotonic()
self.timeout = timeout
self.bytes_received = 0
self.reply.downloadProgress.connect(self.on_download_progress, type=Qt.ConnectionType.QueuedConnection)
self.reply.uploadProgress.connect(self.on_upload_progress, type=Qt.ConnectionType.QueuedConnection)
# self.reply.readyRead.connect(self.on_data_available)
def on_download_progress(self, bytes_received: int, bytes_total: int) -> None:
self.bytes_received = bytes_received
self.last_activity_at = monotonic()
def on_upload_progress(self, bytes_received: int, bytes_total: int) -> None:
self.bytes_received = bytes_received
self.last_activity_at = monotonic()
def save_data(self) -> None:
with open(self.output_path, 'wb') as f:
ba = self.reply.readAll()
f.write(memoryview(ba))
def on_ssl_errors(self, err) -> None:
pass
def as_result(self) -> dict[str, str]:
self.save_data()
e = self.reply.error()
result = {
'action': 'finished', 'id': self.req_id, 'url': self.url, 'output': self.output_path,
'final_url': qurl_to_string(self.reply.url()), 'headers': []
}
h = result['headers']
for (k, v) in self.reply.rawHeaderPairs():
h.append((bytes(k).decode('utf-8', 'replace'), bytes(v).decode('utf-8', 'replace')))
if code := self.reply.attribute(QNetworkRequest.Attribute.HttpStatusCodeAttribute):
result['http_code'] = code
if msg := self.reply.attribute(QNetworkRequest.Attribute.HttpReasonPhraseAttribute):
result['http_status_message'] = msg
if e != QNetworkReply.NetworkError.NoError:
if e in (
QNetworkReply.NetworkError.TimeoutError,
QNetworkReply.NetworkError.TemporaryNetworkFailureError,
QNetworkReply.NetworkError.ConnectionRefusedError,
QNetworkReply.NetworkError.RemoteHostClosedError,
QNetworkReply.NetworkError.OperationCanceledError, # abort() called in overall timeout check
QNetworkReply.NetworkError.SslHandshakeFailedError,
):
self.worth_retry = True
es = f'{e}: {self.reply.errorString()}'
result['error'], result['worth_retry'] = es, self.worth_retry
return result
def too_slow_or_timed_out(self, now: float) -> bool:
return too_slow_or_timed_out(self.timeout, self.last_activity_at, self.created_at, self.bytes_received, now)
class FetchBackend(QNetworkAccessManager):
request_download = pyqtSignal(object)
input_finished = pyqtSignal(str)
set_cookies = pyqtSignal(object)
set_user_agent_signal = pyqtSignal(str)
download_finished = pyqtSignal(object)
def __init__(self, output_dir: str = '', cache_name: str = '', parent: QObject = None, user_agent: str = '', verify_ssl_certificates: bool = True) -> None:
super().__init__(parent)
self.cookie_jar = CookieJar(self)
self.verify_ssl_certificates = verify_ssl_certificates
self.setCookieJar(self.cookie_jar)
self.user_agent = user_agent or random_common_chrome_user_agent()
self.setTransferTimeout(int(default_timeout * 1000))
self.output_dir = output_dir or os.getcwd()
sys.excepthook = self.excepthook
self.request_download.connect(self.download, type=Qt.ConnectionType.QueuedConnection)
self.set_cookies.connect(self._set_cookies, type=Qt.ConnectionType.QueuedConnection)
self.set_user_agent_signal.connect(self.set_user_agent, type=Qt.ConnectionType.QueuedConnection)
self.input_finished.connect(self.on_input_finished, type=Qt.ConnectionType.QueuedConnection)
self.finished.connect(self.on_reply_finished, type=Qt.ConnectionType.QueuedConnection)
self.sslErrors.connect(self.on_ssl_errors)
self.live_requests: set[DownloadRequest] = set()
self.all_request_cookies: list[QNetworkCookie] = []
self.timeout_timer = t = QTimer(self)
t.setInterval(50)
t.timeout.connect(self.enforce_timeouts)
def excepthook(self, cls: type, exc: Exception, tb) -> None:
if not isinstance(exc, KeyboardInterrupt):
sys.__excepthook__(cls, exc, tb)
QApplication.instance().exit(1)
def on_input_finished(self, error_msg: str) -> None:
if error_msg:
self.send_response({'action': 'input_error', 'error': error_msg})
QApplication.instance().exit(1)
def enforce_timeouts(self):
now = monotonic()
timed_out = tuple(dr for dr in self.live_requests if dr.too_slow_or_timed_out(now))
for dr in timed_out:
dr.reply.abort()
if not self.live_requests:
self.timeout_timer.stop()
def current_user_agent(self) -> str:
return self.user_agent
def download(self, req: Request) -> None:
filename = os.path.basename(req['filename'])
qurl = QUrl(req['url'])
rq = QNetworkRequest(qurl)
timeout = req['timeout']
rq.setTransferTimeout(int(timeout * 1000))
rq.setRawHeader(b'User-Agent', self.current_user_agent().encode())
for (name, val) in req['headers']:
ex = rq.rawHeader(name)
if len(ex):
val = bytes(ex).decode() + ', ' + val
rq.setRawHeader(name.encode(), val.encode())
qmethod = req['method'].lower()
data_path = req['data_path']
data = None
if data_path:
with open(data_path, 'rb') as f:
data = f.read()
if qmethod == 'get':
reply = self.get(rq, data)
elif qmethod == 'post':
reply = self.post(rq, data)
elif qmethod == 'put':
reply = self.put(rq, data)
elif qmethod == 'head':
reply = self.head(rq, data)
elif qmethod == 'delete':
reply = self.deleteRequest(rq)
else:
reply = self.sendCustomRequest(rq, req['method'].encode(), data)
dr = DownloadRequest(req['url'], os.path.join(self.output_dir, filename), reply, timeout, req['id'], self)
self.live_requests.add(dr)
if not self.timeout_timer.isActive():
self.timeout_timer.start()
def on_ssl_errors(self, reply: QNetworkReply, errors: list[QSslError]) -> None:
if not self.verify_ssl_certificates:
reply.ignoreSslErrors()
def on_reply_finished(self, reply: QNetworkReply) -> None:
reply.deleteLater()
for x in tuple(self.live_requests):
if x.reply is reply:
self.live_requests.discard(x)
self.report_finish(x)
x.reply = None
break
def report_finish(self, dr: DownloadRequest) -> None:
result = dr.as_result()
self.download_finished.emit(result)
self.send_response(result)
def send_response(self, r: dict[str, str]) -> None:
with suppress(OSError):
print(json.dumps(r), flush=True, file=sys.__stdout__)
def set_user_agent(self, new_val: str) -> None:
self.user_agent = new_val
def _set_cookie_from_header(self, cookie_string: str) -> None:
for c in QNetworkCookie.parseCookies(cookie_string.encode()):
self.cookie_jar.add_cookie(c)
def _set_cookies(self, cookies: list[dict[str, str]]) -> None:
for c in cookies:
if 'header' in c:
self._set_cookie_from_header(c['header'])
else:
self.set_simple_cookie(c['name'], c['value'], c.get('domain'), c.get('path'))
def set_simple_cookie(self, name: str, value: str, domain: str | None = None, path: str | None = '/'):
c = QNetworkCookie()
c.setName(name.encode())
c.setValue(value.encode())
if domain is not None:
c.setDomain(domain)
if path is not None:
c.setPath(path)
self.cookie_jar.add_cookie(c)
def request_from_cmd(cmd: dict[str, Any], filename: str) -> Request:
timeout = cmd.get('timeout')
if timeout is None:
timeout = default_timeout
req: Request = {
'id': int(cmd['id']),
'url': cmd['url'],
'headers': cmd.get('headers') or [],
'data_path': cmd.get('data_path') or '',
'method': cmd.get('method') or 'get',
'filename': filename,
'timeout': float(timeout),
}
return req
def read_commands(backend: FetchBackend, tdir: str) -> None:
error_msg = ''
try:
for line in sys.stdin:
cmd = json.loads(line)
ac = cmd['action']
if ac == 'download':
backend.request_download.emit(request_from_cmd(cmd, f'o{cmd["id"]}'))
elif ac == 'set_cookies':
backend.set_cookies.emit(cmd['cookies'])
elif ac == 'set_user_agent':
backend.set_user_agent_signal.emit(cmd['user_agent'])
elif ac == 'quit':
break
except Exception as err:
import traceback
traceback.print_exc()
error_msg = str(err)
backend.input_finished.emit(error_msg)
def worker(tdir: str, user_agent: str, verify_ssl_certificates: bool, backend_class: type = FetchBackend) -> None:
app = QApplication.instance()
sys.stdout = sys.stderr
backend = backend_class(parent=app, user_agent=user_agent, output_dir=tdir, verify_ssl_certificates=verify_ssl_certificates)
try:
read_thread = Thread(target=read_commands, args=(backend, tdir), daemon=True)
read_thread.start()
app.exec()
finally:
sip.delete(backend)
del app
def develop(url: str) -> None:
from calibre.gui2 import must_use_qt, setup_unix_signals
must_use_qt()
app = QApplication.instance()
app.signal_received = lambda : app.exit(1)
setup_unix_signals(app)
backend = FetchBackend()
num_left = 0
def download_finished(dr: DownloadRequest):
nonlocal num_left
num_left -= 1
if not num_left:
backend.input_finished.emit('')
backend.download_finished.connect(download_finished)
for i, url in enumerate(sys.argv[1:]):
backend.download(request_from_cmd({'url':url, 'id': i}, f'test-output-{i}'))
num_left += 1
app.exec()
if __name__ == '__main__':
develop(sys.argv[-1])
| 12,798 | Python | .py | 297 | 34.579125 | 159 | 0.627471 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,714 | qt.py | kovidgoyal_calibre/src/calibre/scraper/qt.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
import atexit
import json
import os
import shutil
import subprocess
import time
import weakref
from contextlib import suppress
from io import BytesIO
from queue import Queue
from threading import RLock, Thread
from urllib.error import URLError
from urllib.parse import urlencode
from urllib.request import Request
from calibre.ptempfile import PersistentTemporaryDirectory
class FakeResponse:
def __init__(self):
self.queue = Queue()
self.done = False
self.final_url = ''
self._reason = ''
self._status = None
self._headers = []
self._data = BytesIO()
def _wait(self):
if self.done:
return
self.done = True
res = self.queue.get()
del self.queue
if res['action'] == 'input_error':
raise Exception(res['error'])
self.final_url = res['final_url']
self._status = res.get('http_code')
self._reason = res.get('http_status_message')
if not self._reason:
from http.client import responses
with suppress(KeyError):
self._reason = responses[self._status]
self._headers = res['headers']
if 'error' in res:
ex = URLError(res['error'])
ex.worth_retry = bool(res.get('worth_retry'))
raise ex
with suppress(FileNotFoundError):
self._data = open(res['output'], 'rb')
def read(self, *a, **kw):
self._wait()
ans = self._data.read(*a, **kw)
return ans
def seek(self, *a, **kw):
self._wait()
return self._data.seek(*a, **kw)
def tell(self, *a, **kw):
return self._data.tell(*a, **kw)
@property
def url(self) -> str:
self._wait()
return self.final_url
@property
def status(self) -> int | None:
self._wait()
return self._status
code = status
@property
def headers(self):
self._wait()
from email.message import EmailMessage
ans = EmailMessage()
for k, v in self._headers:
ans[k] = v
return ans
@property
def reason(self) -> str:
self._wait()
return self._reason or ''
def getcode(self) -> int | None:
return self.status
def geturl(self):
return self.url
def getinfo(self):
return self.headers
def close(self):
self._data.close()
def __enter__(self):
return self
def __exit__(self, *a):
self._data.close()
def shutdown_browser(bref):
br = bref()
if br is not None:
br.shutdown()
class Browser:
def __init__(self, user_agent: str = '', headers: tuple[tuple[str, str], ...] = (), verify_ssl_certificates: bool = True, start_worker: bool = False):
self.tdir = ''
self.worker = self.dispatcher = None
self.dispatch_map = {}
self.verify_ssl_certificates = verify_ssl_certificates
self.id_counter = 0
self.addheaders: list[tuple[str, str]] = list(headers)
self.user_agent = user_agent
self.lock = RLock()
self.shutting_down = False
atexit.register(shutdown_browser, weakref.ref(self))
if start_worker:
self._ensure_state()
def _open(self, url_or_request: Request, data=None, timeout=None, visit: bool = True):
method = 'POST' if data else 'GET'
headers = []
if hasattr(url_or_request, 'get_method'):
r = url_or_request
method = r.get_method()
data = data or r.data
headers = r.header_items()
url = r.full_url
else:
url = url_or_request
def has_header(x: str) -> bool:
x = x.lower()
for (h, v) in headers:
if h.lower() == x:
return True
return False
if isinstance(data, dict):
headers.append(('Content-Type', 'application/x-www-form-urlencoded'))
data = urlencode(data)
if isinstance(data, str):
data = data.encode('utf-8')
if not has_header('Content-Type'):
headers.append(('Content-Type', 'text/plain'))
if not self.is_method_ok(method):
raise KeyError(f'The HTTP {method} request method is not supported')
with self.lock:
self._ensure_state()
self.id_counter += 1
cmd = {
'action': 'download', 'id': self.id_counter, 'url': url, 'method': method, 'timeout': timeout,
'headers': self.addheaders + headers, 'visit': visit,}
if data:
with open(os.path.join(self.tdir, f'i{self.id_counter}'), 'wb') as f:
if hasattr(data, 'read'):
shutil.copyfileobj(data, f)
else:
f.write(data)
cmd['data_path'] = f.name
for k, v in cmd['headers']:
if k.lower() == 'content-type':
break
else:
cmd['headers'].append(('Content-Type', 'application/x-www-form-urlencoded'))
res = FakeResponse()
self.dispatch_map[self.id_counter] = res.queue
self._send_command(cmd)
return res
def open(self, url_or_request: Request, data=None, timeout=None):
return self._open(url_or_request, data, timeout)
def open_novisit(self, url_or_request: Request, data=None, timeout=None):
return self._open(url_or_request, data, timeout, visit=False)
def is_method_ok(self, method: str) -> bool:
return True
def set_simple_cookie(self, name: str, value: str, domain: str | None = None, path: str | None = '/'):
'''
Set a simple cookie using a name and value. If domain is specified, the cookie is only sent with requests
to matching domains, otherwise it is sent with all requests. The leading dot in domain is optional.
Similarly, by default all paths match, to restrict to certain path use the path parameter.
'''
c = {'name': name, 'value': value, 'domain': domain, 'path': path}
self._send_command({'action': 'set_cookies', 'cookies':[c]})
set_cookie = set_simple_cookie
def set_user_agent(self, val: str = '') -> None:
self.user_agent = val
self._send_command({'action': 'set_user_agent', 'user_agent': val})
def clone_browser(self):
return self
def _send_command(self, cmd):
with self.lock:
self._ensure_state()
self.worker.stdin.write(json.dumps(cmd).encode())
self.worker.stdin.write(b'\n')
self.worker.stdin.flush()
def _ensure_state(self):
with self.lock:
if not self.tdir:
self.tdir = PersistentTemporaryDirectory()
self.worker = self.run_worker()
self.dispatcher = Thread(target=self._dispatch, daemon=True)
self.dispatcher.start()
def run_worker(self) -> subprocess.Popen:
return run_worker(self.tdir, self.user_agent, self.verify_ssl_certificates)
def _dispatch(self):
try:
for line in self.worker.stdout:
cmd = json.loads(line)
if cmd.get('action') == 'finished':
with self.lock:
q = self.dispatch_map.pop(cmd['id'])
q.put(cmd)
else:
raise Exception(f'Unexpected response from backend fetch worker process: {cmd}')
except Exception:
if not self.shutting_down:
import traceback
traceback.print_exc()
def shutdown(self):
self.shutting_down = True
if self.worker:
w, self.worker = self.worker, None
with suppress(OSError):
w.stdin.close()
with suppress(OSError):
w.stdout.close()
give_up_at = time.monotonic() + 1.5
while time.monotonic() < give_up_at and w.poll() is None:
time.sleep(0.01)
if w.poll() is None:
w.kill()
if self.tdir:
with suppress(OSError):
shutil.rmtree(self.tdir)
self.tdir = ''
if self.dispatcher:
self.dispatcher.join()
self.dispatcher = None
def __del__(self):
self.shutdown()
class WebEngineBrowser(Browser):
def run_worker(self) -> subprocess.Popen:
return run_worker(self.tdir, self.user_agent, self.verify_ssl_certificates, function='webengine_worker')
def run_worker(tdir: str, user_agent: str, verify_ssl_certificates: bool, function: str = 'worker'):
from calibre.utils.ipc.simple_worker import start_pipe_worker
return start_pipe_worker(f'from calibre.scraper.qt import {function}; {function}({tdir!r}, {user_agent!r}, {verify_ssl_certificates!r})')
def worker(*args):
from calibre.gui2 import must_use_qt
must_use_qt()
from .qt_backend import worker
worker(*args)
def webengine_worker(*args):
from calibre.gui2 import must_use_qt
must_use_qt()
from .webengine_backend import worker
worker(*args)
def develop():
import sys
br = Browser()
try:
for url in sys.argv[1:]:
res = br.open(url)
print(url, len(res.read()))
finally:
del br
if __name__ == '__main__':
develop()
| 9,640 | Python | .py | 254 | 28.251969 | 154 | 0.573082 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,715 | simple.py | kovidgoyal_calibre/src/calibre/scraper/simple.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2022, Kovid Goyal <kovid at kovidgoyal.net>
import sys
import weakref
from threading import Lock
overseers = []
def cleanup_overseers():
browsers = tuple(filter(None, (x() for x in overseers)))
del overseers[:]
def join_all():
for br in browsers:
br.shutdown()
return join_all
read_url_lock = Lock()
def read_url(storage, url, timeout=60, as_html=True):
with read_url_lock:
from calibre.scraper.qt import WebEngineBrowser
if not storage:
storage.append(WebEngineBrowser())
overseers.append(weakref.ref(storage[-1]))
scraper = storage[0]
raw_bytes = scraper.open_novisit(url, timeout=timeout).read()
if not as_html:
return raw_bytes
from calibre.ebooks.chardet import xml_to_unicode
return xml_to_unicode(raw_bytes, strip_encoding_pats=True)[0]
if __name__ == '__main__':
try:
print(read_url([], sys.argv[-1]))
finally:
cleanup_overseers()()
| 1,039 | Python | .py | 31 | 27.774194 | 72 | 0.661986 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,716 | webengine_backend.py | kovidgoyal_calibre/src/calibre/scraper/webengine_backend.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
import base64
import html
import json
import os
import secrets
import sys
from collections import deque
from contextlib import suppress
from http import HTTPStatus
from time import monotonic
from qt.core import QApplication, QByteArray, QNetworkCookie, QObject, Qt, QTimer, QUrl, pyqtSignal, sip
from qt.webengine import QWebEnginePage, QWebEngineProfile, QWebEngineScript, QWebEngineSettings
from calibre.scraper.qt_backend import Request, too_slow_or_timed_out
from calibre.scraper.qt_backend import worker as qt_worker
from calibre.utils.resources import get_path as P
from calibre.utils.webengine import create_script, insert_scripts, setup_profile
def create_base_profile(cache_name='', allow_js=False):
from calibre.utils.random_ua import random_common_chrome_user_agent
if cache_name:
ans = QWebEngineProfile(cache_name, QApplication.instance())
else:
ans = QWebEngineProfile(QApplication.instance())
setup_profile(ans)
ans.setHttpUserAgent(random_common_chrome_user_agent())
ans.setHttpCacheMaximumSize(0) # managed by webengine
s = ans.settings()
a = s.setAttribute
a(QWebEngineSettings.WebAttribute.PluginsEnabled, False)
a(QWebEngineSettings.WebAttribute.JavascriptEnabled, allow_js)
s.setUnknownUrlSchemePolicy(QWebEngineSettings.UnknownUrlSchemePolicy.DisallowUnknownUrlSchemes)
a(QWebEngineSettings.WebAttribute.JavascriptCanOpenWindows, False)
a(QWebEngineSettings.WebAttribute.JavascriptCanAccessClipboard, False)
# ensure javascript cannot read from local files
a(QWebEngineSettings.WebAttribute.LocalContentCanAccessFileUrls, False)
a(QWebEngineSettings.WebAttribute.AllowWindowActivationFromJavaScript, False)
return ans
class DownloadRequest(QObject):
aborted_on_timeout: bool = False
response_received = pyqtSignal(object)
def __init__(self, url: str, output_path: str, timeout: float, req_id: int, parent: 'FetchBackend'):
super().__init__(parent)
self.url, self.filename = url, os.path.basename(output_path)
self.output_path = output_path
self.req_id: int = req_id
self.created_at = self.last_activity_at = monotonic()
self.timeout = timeout
self.bytes_received = 0
self.result = {
'action': 'finished', 'id': self.req_id, 'url': self.url, 'output': self.output_path,
'headers': [], 'final_url': self.url, 'worth_retry': False,
}
def metadata_received(self, r: dict) -> None:
if r['response_type'] != 'basic':
print(f'WARNING: response type for {self.url} indicates headers are restrcited: {r["type"]}')
self.result['worth_retry'] = r['status_code'] in (
HTTPStatus.TOO_MANY_REQUESTS, HTTPStatus.REQUEST_TIMEOUT, HTTPStatus.SERVICE_UNAVAILABLE, HTTPStatus.GATEWAY_TIMEOUT)
self.result['final_url'] = r['url']
self.result['headers'] = r['headers']
self.result['http_code'] = r['status_code']
self.result['http_status_message'] = r['status_msg']
def chunk_received(self, chunk: QByteArray) -> None:
mv = memoryview(chunk)
self.bytes_received += len(mv)
with open(self.output_path, 'ab') as f:
f.write(mv)
def as_result(self, r: dict | None = {}) -> dict:
if self.aborted_on_timeout:
self.result['error'] = 'Timed out'
self.result['worth_retry'] = True
else:
if r:
self.result['error'] = r['error']
self.result['worth_retry'] = True # usually some kind of network error
return self.result
def too_slow_or_timed_out(self, now: float) -> bool:
return too_slow_or_timed_out(self.timeout, self.last_activity_at, self.created_at, self.bytes_received, now)
class Worker(QWebEnginePage):
working_on_request: DownloadRequest | None = None
messages_dispatch = pyqtSignal(object)
result_received = pyqtSignal(object)
def __init__(self, profile, parent):
super().__init__(profile, parent)
self.messages_dispatch.connect(self.on_messages)
def javaScriptAlert(self, url, msg):
pass
def javaScriptConfirm(self, url, msg):
return True
def javaScriptPrompt(self, url, msg, defval):
return True, defval
def javaScriptConsoleMessage(self, level: QWebEnginePage.JavaScriptConsoleMessageLevel, message: str, line_num: int, source_id: str) -> None:
if source_id == 'userscript:scraper.js':
if level == QWebEnginePage.JavaScriptConsoleMessageLevel.InfoMessageLevel and message.startswith(self.token):
msg = json.loads(message.partition(' ')[2])
t = msg.get('type')
if t == 'messages_available':
self.runjs('window.get_messages()', self.dispatch_messages)
else:
print(f'{source_id}:{line_num}:{message}')
return
def dispatch_messages(self, messages: list) -> None:
if not sip.isdeleted(self):
self.messages_dispatch.emit(messages)
def runjs(self, js: str, callback = None) -> None:
if callback is None:
self.runJavaScript(js, QWebEngineScript.ScriptWorldId.ApplicationWorld)
else:
self.runJavaScript(js, QWebEngineScript.ScriptWorldId.ApplicationWorld, callback)
def start_download(self, output_dir: str, req: Request, data: str) -> DownloadRequest:
filename = os.path.basename(req['filename'])
payload = json.dumps({'req': req, 'data': data})
content = f'''<!DOCTYPE html>
<html><head></head></body><div id="payload">{html.escape(payload)}</div></body></html>
'''
self.setContent(content.encode(), 'text/html;charset=utf-8', QUrl(req['url']))
self.working_on_request = DownloadRequest(req['url'], os.path.join(output_dir, filename), req['timeout'], req['id'], self.parent())
return self.working_on_request
def abort_on_timeout(self) -> None:
if self.working_on_request is not None:
self.working_on_request.aborted_on_timeout = True
self.runjs(f'window.abort_download({self.working_on_request.req_id})')
def on_messages(self, messages: list[dict]) -> None:
if not messages:
return
if self.working_on_request is None:
print('Got messages without request:', messages)
return
self.working_on_request.last_activity_at = monotonic()
for m in messages:
t = m['type']
if t == 'metadata_received':
self.working_on_request.metadata_received(m)
elif t == 'chunk_received':
self.working_on_request.chunk_received(m['chunk'])
elif t == 'finished':
result = self.working_on_request.as_result()
self.working_on_request = None
self.result_received.emit(result)
elif t == 'error':
result = self.working_on_request.as_result(m)
self.working_on_request = None
self.result_received.emit(result)
class FetchBackend(QObject):
request_download = pyqtSignal(object)
input_finished = pyqtSignal(str)
set_cookies = pyqtSignal(object)
set_user_agent_signal = pyqtSignal(str)
download_finished = pyqtSignal(object)
def __init__(self, output_dir: str = '', cache_name: str = '', parent: QObject = None, user_agent: str = '', verify_ssl_certificates: bool = True) -> None:
profile = create_base_profile(cache_name)
self.token = secrets.token_hex()
js = P('scraper.js', allow_user_override=False, data=True).decode('utf-8').replace('TOKEN', self.token)
insert_scripts(profile, create_script('scraper.js', js))
if user_agent:
profile.setHttpUserAgent(user_agent)
self.output_dir = output_dir or os.getcwd()
self.profile = profile
super().__init__(parent)
self.workers: list[Worker] = []
self.pending_requests: deque[tuple[Request, str]] = deque()
sys.excepthook = self.excepthook
self.request_download.connect(self.download, type=Qt.ConnectionType.QueuedConnection)
self.set_cookies.connect(self._set_cookies, type=Qt.ConnectionType.QueuedConnection)
self.set_user_agent_signal.connect(self.set_user_agent, type=Qt.ConnectionType.QueuedConnection)
self.input_finished.connect(self.on_input_finished, type=Qt.ConnectionType.QueuedConnection)
self.all_request_cookies: list[QNetworkCookie] = []
self.timeout_timer = t = QTimer(self)
t.setInterval(50)
t.timeout.connect(self.enforce_timeouts)
def excepthook(self, cls: type, exc: Exception, tb) -> None:
if not isinstance(exc, KeyboardInterrupt):
sys.__excepthook__(cls, exc, tb)
QApplication.instance().exit(1)
def on_input_finished(self, error_msg: str) -> None:
if error_msg:
self.send_response({'action': 'input_error', 'error': error_msg})
QApplication.instance().exit(1)
def enforce_timeouts(self):
now = monotonic()
has_workers = False
for w in self.workers:
if w.working_on_request is not None:
if w.working_on_request.too_slow_or_timed_out(now):
w.abort_on_timeout()
else:
has_workers = True
if not has_workers:
self.timeout_timer.stop()
def download(self, req: Request) -> None:
qurl = QUrl(req['url'])
cs = self.profile.cookieStore()
for c in self.all_request_cookies:
c = QNetworkCookie(c)
c.normalize(qurl)
cs.setCookie(c)
data_path = req['data_path']
data = ''
if data_path:
with open(data_path, 'rb') as f:
data = base64.standard_b64encode(f.read()).decode()
if not self.workers:
self.workers.append(self.create_worker())
for w in self.workers:
if w.working_on_request is None:
w.start_download(self.output_dir, req, data)
self.timeout_timer.start()
return
if len(self.workers) < 5:
self.workers.append(self.create_worker())
self.workers[-1].start_download(self.output_dir, req, data)
self.timeout_timer.start()
return
self.pending_requests.append((req, data))
def create_worker(self) -> Worker:
ans = Worker(self.profile, self)
ans.token = self.token + ' '
ans.result_received.connect(self.result_received)
return ans
def result_received(self, result: dict) -> None:
self.send_response(result)
self.download_finished.emit(result)
if self.pending_requests:
w = self.sender()
req, data = self.pending_requests.popleft()
w.start_download(self.output_dir, req, data)
self.timeout_timer.start()
def send_response(self, r: dict[str, str]) -> None:
with suppress(OSError):
print(json.dumps(r), flush=True, file=sys.__stdout__)
def set_user_agent(self, new_val: str) -> None:
self.profile.setHttpUserAgent(new_val)
def add_cookie(self, c: QNetworkCookie) -> None:
cs = self.profile.cookieStore()
if c.domain():
cs.setCookie(c)
else:
self.all_request_cookies.append(c)
def _set_cookie_from_header(self, cookie_string: str) -> None:
for c in QNetworkCookie.parseCookies(cookie_string.encode()):
self.add_cookie(c)
def _set_cookies(self, cookies: list[dict[str, str]]) -> None:
for c in cookies:
if 'header' in c:
self._set_cookie_from_header(c['header'])
else:
self.set_simple_cookie(c['name'], c['value'], c.get('domain'), c.get('path'))
def set_simple_cookie(self, name: str, value: str, domain: str | None = None, path: str | None = '/'):
c = QNetworkCookie()
c.setName(name.encode())
c.setValue(value.encode())
if domain is not None:
c.setDomain(domain)
if path is not None:
c.setPath(path)
self.add_cookie(c)
def worker(tdir: str, user_agent: str, verify_ssl_certificates: bool) -> None:
return qt_worker(tdir, user_agent, verify_ssl_certificates, FetchBackend)
def develop(*urls: str) -> None:
from calibre.scraper.qt import WebEngineBrowser
br = WebEngineBrowser()
for url in urls:
print(url)
res = br.open(url)
print(f'{res.code} {res.reason}')
print(res.headers)
print(len(res.read()))
if __name__ == '__main__':
develop(*sys.argv[1:])
| 12,936 | Python | .py | 269 | 39 | 159 | 0.641255 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,717 | test_fetch_backend.py | kovidgoyal_calibre/src/calibre/scraper/test_fetch_backend.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2024, Kovid Goyal <kovid at kovidgoyal.net>
import http.server
import json
import os
import unittest
from threading import Event, Thread
from calibre.constants import iswindows
from .qt import Browser, WebEngineBrowser
is_ci = os.environ.get('CI', '').lower() == 'true'
skip = ''
is_sanitized = 'libasan' in os.environ.get('LD_PRELOAD', '')
if is_sanitized:
skip = 'Skipping Scraper tests as ASAN is enabled'
elif 'SKIP_QT_BUILD_TEST' in os.environ:
skip = 'Skipping Scraper tests as it causes crashes in macOS VM'
class Handler(http.server.BaseHTTPRequestHandler):
def __init__(self, test_obj, *a):
self.test_obj = test_obj
super().__init__(*a)
def do_POST(self):
if self.test_obj.dont_send_response:
return
self.do_response()
def do_GET(self):
if self.test_obj.dont_send_response:
return
if self.path == '/favicon.ico':
self.send_response(http.HTTPStatus.NOT_FOUND)
return
if self.path == '/redirect':
self.send_response(http.HTTPStatus.FOUND)
self.send_header('Location', '/redirected')
self.end_headers()
self.flush_headers()
return
self.do_response()
def do_response(self):
h = {}
for k, v in self.headers.items():
h.setdefault(k, []).append(v)
self.test_obj.request_count += 1
ans = {
'path': self.path,
'headers': h,
'request_count': self.test_obj.request_count,
'method': self.command,
}
if 'Content-Length' in self.headers:
ans['data'] = self.rfile.read(int(self.headers['Content-Length'])).decode()
data = json.dumps(ans).encode()
self.send_response(http.HTTPStatus.OK)
self.send_header('Content-type', 'application/json')
self.send_header('Content-Length', str(len(data)))
self.send_header('Set-Cookie', 'sc=1')
self.end_headers()
if self.test_obj.dont_send_body:
self.flush_headers()
else:
self.wfile.write(data)
def log_request(self, code='-', size='-'):
pass
@unittest.skipIf(skip, skip)
class TestFetchBackend(unittest.TestCase):
ae = unittest.TestCase.assertEqual
def setUp(self):
self.server_started = Event()
self.server_thread = Thread(target=self.run_server, daemon=True)
self.server_thread.start()
if not self.server_started.wait(15):
raise Exception('Test server failed to start')
self.request_count = 0
self.dont_send_response = self.dont_send_body = False
def tearDown(self):
self.server.shutdown()
self.server_thread.join(5)
def test_recipe_browser_qt(self):
self.do_recipe_browser_test(Browser)
@unittest.skipIf(iswindows and is_ci, 'WebEngine browser test hangs on windows CI')
def test_recipe_browser_webengine(self):
self.do_recipe_browser_test(WebEngineBrowser)
def do_recipe_browser_test(self, browser_class):
from urllib.error import URLError
from urllib.request import Request
br = browser_class(user_agent='test-ua', headers=(('th', '1'),), start_worker=True)
def u(path=''):
return f'http://localhost:{self.port}{path}'
def get(path='', headers=None, timeout=None, data=None):
url = u(path)
if headers:
req = Request(url, headers=headers)
else:
req = url
with br.open(req, data=data, timeout=timeout) as res:
raw = res.read()
ans = json.loads(raw)
ans['final_url'] = res.geturl()
return ans
def test_with_timeout(no_response=True):
self.dont_send_body = True
if no_response:
self.dont_send_response = True
try:
get(timeout=0.02)
except URLError as e:
self.assertTrue(e.worth_retry)
else:
raise AssertionError('Expected timeout not raised')
self.dont_send_body = False
self.dont_send_response = False
def header(name, *expected):
name = name.lower()
ans = []
for k, v in r['headers'].items():
if k.lower() == name:
ans.extend(v)
self.ae(expected, tuple(ans))
def has_header(name):
self.assertIn(name.lower(), [h.lower() for h in r['headers']])
try:
r = get()
self.ae(r['method'], 'GET')
self.ae(r['request_count'], 1)
header('th', '1')
header('User-Agent', 'test-ua')
has_header('accept-encoding')
r = get()
self.ae(r['request_count'], 2)
header('Cookie', 'sc=1')
test_with_timeout(True)
test_with_timeout(False)
r = get('/redirect')
self.ae(r['path'], '/redirected')
header('th', '1')
self.assertTrue(r['final_url'].endswith('/redirected'))
header('User-Agent', 'test-ua')
r = get(headers={'th': '2', 'tc': '1'})
header('Th', '1, 2')
header('Tc', '1')
br.set_simple_cookie('cook', 'ie')
br.set_user_agent('man in black')
r = get()
header('User-Agent', 'man in black')
header('Cookie', 'sc=1; cook=ie')
r = get(data=b'1234')
self.ae(r['method'], 'POST')
self.ae(r['data'], '1234')
header('Content-Type', 'application/x-www-form-urlencoded')
finally:
br.shutdown()
def run_server(self):
from http.server import ThreadingHTTPServer
def create_handler(*a):
ans = Handler(self, *a)
return ans
with ThreadingHTTPServer(("", 0), create_handler) as httpd:
self.server = httpd
self.port = httpd.server_address[1]
self.server_started.set()
httpd.serve_forever()
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(TestFetchBackend)
| 6,324 | Python | .py | 162 | 28.808642 | 91 | 0.568097 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,718 | msgfmt.py | kovidgoyal_calibre/src/calibre/translations/msgfmt.py | #! /usr/bin/env python
# Written by Martin v. Löwis <loewis@informatik.hu-berlin.de>
"""Generate binary message catalog from textual translation description.
This program converts a textual Uniforum-style message catalog (.po file) into
a binary GNU catalog (.mo file). This is essentially the same function as the
GNU msgfmt program, however, it is a simpler implementation. Currently it
does not handle plural forms but it does handle message contexts.
Usage: msgfmt.py [OPTIONS] filename.po
Options:
-o file
--output-file=file
Specify the output file to write to. If omitted, output will go to a
file named filename.mo (based off the input file name).
-h
--help
Print this message and exit.
-V
--version
Display version information and exit.
"""
import array
import ast
import getopt
import os
import struct
import sys
from email.parser import HeaderParser
__version__ = "1.2"
MESSAGES = {}
STATS = {'translated': 0, 'untranslated': 0, 'uniqified': 0}
MAKE_UNIQUE = False
NON_UNIQUE = set()
def usage(code, msg=''):
print(__doc__, file=sys.stderr)
if msg:
print(msg, file=sys.stderr)
sys.exit(code)
def add(ctxt, msgid, msgstr, fuzzy):
"Add a non-fuzzy translation to the dictionary."
if (not fuzzy or not msgid) and msgstr:
if msgid:
STATS['translated'] += 1
if ctxt is None:
if msgstr in NON_UNIQUE:
STATS['uniqified'] += 1
if MAKE_UNIQUE:
msgstr += b' (' + msgid + b')'
else:
NON_UNIQUE.add(msgstr)
MESSAGES[msgid] = msgstr
else:
MESSAGES[b"%b\x04%b" % (ctxt, msgid)] = msgstr
else:
if msgid:
STATS['untranslated'] += 1
def generate():
"Return the generated output."
# the keys are sorted in the .mo file
keys = sorted(MESSAGES.keys())
offsets = []
ids = strs = b''
for id in keys:
# For each string, we need size and file offset. Each string is NUL
# terminated; the NUL does not count into the size.
offsets.append((len(ids), len(id), len(strs), len(MESSAGES[id])))
ids += id + b'\0'
strs += MESSAGES[id] + b'\0'
output = ''
# The header is 7 32-bit unsigned integers. We don't use hash tables, so
# the keys start right after the index tables.
# translated string.
keystart = 7*4+16*len(keys)
# and the values start after the keys
valuestart = keystart + len(ids)
koffsets = []
voffsets = []
# The string table first has the list of keys, then the list of values.
# Each entry has first the size of the string, then the file offset.
for o1, l1, o2, l2 in offsets:
koffsets += [l1, o1+keystart]
voffsets += [l2, o2+valuestart]
offsets = koffsets + voffsets
output = struct.pack("Iiiiiii",
0x950412de, # Magic
0, # Version
len(keys), # # of entries
7*4, # start of key index
7*4+len(keys)*8, # start of value index
0, 0) # size and offset of hash table
try:
output += array.array("i", offsets).tobytes()
except AttributeError:
output += array.array("i", offsets).tostring()
output += ids
output += strs
return output
def make(filename, outfile):
ID = 1
STR = 2
CTXT = 3
unicode_prefix = 'u' if sys.version_info.major < 3 else ''
# Compute .mo name from .po name and arguments
if filename.endswith('.po'):
infile = filename
else:
infile = filename + '.po'
if outfile is None:
outfile = os.path.splitext(infile)[0] + '.mo'
try:
with open(infile, 'rb') as f:
lines = f.readlines()
except OSError as msg:
print(msg, file=sys.stderr)
sys.exit(1)
section = msgctxt = None
fuzzy = 0
msgid = msgstr = b''
# Start off assuming Latin-1, so everything decodes without failure,
# until we know the exact encoding
encoding = 'latin-1'
def check_encoding():
nonlocal encoding
if not msgid and msgstr:
# See whether there is an encoding declaration
p = HeaderParser()
charset = p.parsestr(msgstr.decode(encoding)).get_content_charset()
if charset:
encoding = charset
# Parse the catalog
lno = 0
for l in lines:
l = l.decode(encoding)
lno += 1
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgctxt, msgid, msgstr, fuzzy)
check_encoding()
section = msgctxt = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and 'fuzzy' in l:
fuzzy = 1
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid or msgctxt section, output previous section
if l.startswith('msgctxt'):
if section == STR:
add(msgctxt, msgid, msgstr, fuzzy)
check_encoding()
section = CTXT
l = l[7:]
msgctxt = b''
elif l.startswith('msgid') and not l.startswith('msgid_plural'):
if section == STR:
add(msgctxt, msgid, msgstr, fuzzy)
section = ID
l = l[5:]
msgid = msgstr = b''
is_plural = False
# This is a message with plural forms
elif l.startswith('msgid_plural'):
if section != ID:
print('msgid_plural not preceded by msgid on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[12:]
msgid += b'\0' # separator of singular and plural
is_plural = True
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
if l.startswith('msgstr['):
if not is_plural:
print('plural without msgid_plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l.split(']', 1)[1]
if msgstr:
msgstr += b'\0' # Separator of the various plural forms
else:
if is_plural:
print('indexed msgstr required for plural on %s:%d' % (infile, lno),
file=sys.stderr)
sys.exit(1)
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
l = ast.literal_eval(unicode_prefix + l)
lb = l.encode(encoding)
if section == CTXT:
msgctxt += lb
elif section == ID:
msgid += lb
elif section == STR:
msgstr += lb
else:
print('Syntax error on %s:%d' % (infile, lno),
'before:', file=sys.stderr)
print(l, file=sys.stderr)
sys.exit(1)
# Add last entry
if section == STR:
add(msgctxt, msgid, msgstr, fuzzy)
# Compute output
output = generate()
try:
if hasattr(outfile, 'write'):
outfile.write(output)
else:
with open(outfile, "wb") as f:
f.write(output)
except OSError as msg:
print(msg, file=sys.stderr)
def make_with_stats(filename, outfile):
MESSAGES.clear()
NON_UNIQUE.clear()
STATS['translated'] = STATS['untranslated'] = STATS['uniqified'] = 0
make(filename, outfile)
return STATS.copy()
def run_batch(pairs):
for (filename, outfile) in pairs:
yield make_with_stats(filename, outfile)
def main():
global MAKE_UNIQUE
args = sys.argv[1:]
if args[0] == 'STDIN':
MAKE_UNIQUE = args[1] == 'uniqify'
import json
results = tuple(run_batch(json.loads(sys.stdin.buffer.read())))
sys.stdout.buffer.write(json.dumps(results).encode('utf-8'))
sys.stdout.close()
return
try:
opts, args = getopt.getopt(args, 'hVso:',
['help', 'version', 'statistics', 'output-file='])
except getopt.error as msg:
usage(1, msg)
outfile = None
output_stats = False
# parse options
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print("msgfmt.py", __version__, file=sys.stderr)
sys.exit(0)
elif opt in ('-o', '--output-file'):
outfile = arg
elif opt in ('-s', '--statistics'):
output_stats = True
# do it
if not args:
print('No input file given', file=sys.stderr)
print("Try `msgfmt --help' for more information.", file=sys.stderr)
return
for filename in args:
make_with_stats(filename, outfile)
if output_stats:
print(STATS['translated'], 'translated messages,', STATS['untranslated'], 'untranslated messages.')
if __name__ == '__main__':
main()
| 9,298 | Python | .py | 264 | 26.068182 | 111 | 0.552839 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,719 | dynamic.py | kovidgoyal_calibre/src/calibre/translations/dynamic.py | '''
Dynamic language lookup of translations for user-visible strings.
'''
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
import io
from gettext import GNUTranslations
from zipfile import ZipFile
from calibre.utils.localization import get_lc_messages_path
from calibre.utils.resources import get_path
__all__ = ['translate']
_CACHE = {}
def translate(lang, text):
trans = None
if lang in _CACHE:
trans = _CACHE[lang]
else:
mpath = get_lc_messages_path(lang)
if mpath is not None:
with ZipFile(get_path('localization/locales.zip',
allow_user_override=False), 'r') as zf:
try:
buf = io.BytesIO(zf.read(mpath + '/messages.mo'))
except Exception:
pass
else:
trans = GNUTranslations(buf)
_CACHE[lang] = trans
if trans is None:
return getattr(__builtins__, '_', lambda x: x)(text)
return trans.gettext(text)
| 1,061 | Python | .py | 31 | 26.290323 | 69 | 0.604106 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,720 | mime.py | kovidgoyal_calibre/src/calibre/devices/mime.py | __license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre import guess_type
def _mt(path):
mt = guess_type(path)[0]
if not mt:
mt = 'application/octet-stream'
return mt
def mime_type_ext(ext):
if not ext.startswith('.'):
ext = '.'+ext
return _mt('a'+ext)
def mime_type_path(path):
return _mt(path)
| 419 | Python | .py | 15 | 23.8 | 58 | 0.63728 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,721 | errors.py | kovidgoyal_calibre/src/calibre/devices/errors.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
"""
Defines the errors that the device drivers generate.
G{classtree ProtocolError}
"""
class ProtocolError(Exception):
""" The base class for all exceptions in this package """
def __init__(self, msg):
Exception.__init__(self, msg)
class TimeoutError(ProtocolError):
""" There was a timeout during communication """
def __init__(self, func_name):
ProtocolError.__init__(
self,
"There was a timeout while communicating with the device in function: " +
func_name
)
class DeviceError(ProtocolError):
""" Raised when device is not found """
def __init__(self, msg=None):
if msg is None:
msg = "Unable to find SONY Reader. Is it connected?"
ProtocolError.__init__(self, msg)
class UserFeedback(DeviceError):
INFO = 0
WARN = WARNING = 1
ERROR = 2
def __init__(self, msg, details, level):
Exception.__init__(self, msg)
self.level = level
self.details = details
self.msg = msg
class OpenFeedback(DeviceError):
def __init__(self, msg):
self.feedback_msg = msg
DeviceError.__init__(self, msg)
def custom_dialog(self, parent):
'''
If you need to show the user a custom dialog, instead of just
displaying the feedback_msg, create and return it here.
'''
raise NotImplementedError()
class OpenActionNeeded(DeviceError):
def __init__(self, device_name, msg, only_once_id):
self.device_name, self.feedback_msg, self.only_once_id = device_name, msg, only_once_id
DeviceError.__init__(self, msg)
def custom_dialog(self, parent):
raise NotImplementedError()
class InitialConnectionError(OpenFeedback):
""" Errors detected during connection after detection but before open, for
e.g. in the is_connected() method. """
class OpenFailed(ProtocolError):
""" Raised when device cannot be opened this time. No retry is to be done.
The device should continue to be polled for future opens. If the
message is empty, no exception trace is produced. """
def __init__(self, msg):
ProtocolError.__init__(self, msg)
self.show_me = bool(msg and msg.strip())
class DeviceBusy(ProtocolError):
""" Raised when device is busy """
def __init__(self, uerr=""):
ProtocolError.__init__(
self, "Device is in use by another application:"
"\nUnderlying error:" + str(uerr)
)
class DeviceLocked(ProtocolError):
""" Raised when device has been locked """
def __init__(self):
ProtocolError.__init__(self, "Device is locked")
class PacketError(ProtocolError):
""" Errors with creating/interpreting packets """
class FreeSpaceError(ProtocolError):
""" Errors caused when trying to put files onto an overcrowded device """
class ArgumentError(ProtocolError):
""" Errors caused by invalid arguments to a public interface function """
class PathError(ArgumentError):
""" When a user supplies an incorrect/invalid path """
def __init__(self, msg, path=None):
ArgumentError.__init__(self, msg)
self.path = path
class ControlError(ProtocolError):
""" Errors in Command/Response pairs while communicating with the device """
def __init__(self, query=None, response=None, desc=None):
self.query = query
self.response = response
self.desc = desc
ProtocolError.__init__(self, desc)
def __str__(self):
if self.query and self.response:
return "Got unexpected response:\n" + \
"query:\n"+str(self.query.query)+"\n"+\
"expected:\n"+str(self.query.response)+"\n" +\
"actual:\n"+str(self.response)
if self.desc:
return self.desc
return "Unknown control error occurred"
class WrongDestinationError(PathError):
''' The user chose the wrong destination to send books to, for example by
trying to send books to a non existent storage card.'''
pass
class BlacklistedDevice(OpenFailed):
''' Raise this error during open() when the device being opened has been
blacklisted by the user. Only used in drivers that manage device presence,
like the MTP driver. '''
pass
| 4,386 | Python | .py | 106 | 34.660377 | 95 | 0.657764 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,722 | interface.py | kovidgoyal_calibre/src/calibre/devices/interface.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import os
from collections import namedtuple
from calibre import prints
from calibre.constants import iswindows
from calibre.customize import Plugin
class OpenPopupMessage:
def __init__(self, title='', message='', level='info', skip_dialog_skip_precheck=True):
self.title = title
self.message = message
self.level = level
self.skip_dialog_skip_precheck = skip_dialog_skip_precheck
class DevicePlugin(Plugin):
"""
Defines the interface that should be implemented by backends that
communicate with an e-book reader.
"""
type = _('Device interface')
#: Ordered list of supported formats
FORMATS = ["lrf", "rtf", "pdf", "txt"]
# If True, the config dialog will not show the formats box
HIDE_FORMATS_CONFIG_BOX = False
#: VENDOR_ID can be either an integer, a list of integers or a dictionary
#: If it is a dictionary, it must be a dictionary of dictionaries,
#: of the form::
#:
#: {
#: integer_vendor_id : { product_id : [list of BCDs], ... },
#: ...
#: }
#:
VENDOR_ID = 0x0000
#: An integer or a list of integers
PRODUCT_ID = 0x0000
#: BCD can be either None to not distinguish between devices based on BCD, or
#: it can be a list of the BCD numbers of all devices supported by this driver.
BCD = None
#: Height for thumbnails on the device
THUMBNAIL_HEIGHT = 68
#: Compression quality for thumbnails. Set this closer to 100 to have better
#: quality thumbnails with fewer compression artifacts. Of course, the
#: thumbnails get larger as well.
THUMBNAIL_COMPRESSION_QUALITY = 75
#: Set this to True if the device supports updating cover thumbnails during
#: sync_booklists. Setting it to true will ask device.py to refresh the
#: cover thumbnails during book matching
WANTS_UPDATED_THUMBNAILS = False
#: Whether the metadata on books can be set via the GUI.
CAN_SET_METADATA = ['title', 'authors', 'collections']
#: Whether the device can handle device_db metadata plugboards
CAN_DO_DEVICE_DB_PLUGBOARD = False
# Set this to None if the books on the device are files that the GUI can
# access in order to add the books from the device to the library
BACKLOADING_ERROR_MESSAGE = _('Cannot get files from this device')
#: Path separator for paths to books on device
path_sep = os.sep
#: Icon for this device
icon = 'reader.png'
# Encapsulates an annotation fetched from the device
UserAnnotation = namedtuple('Annotation','type, value')
#: GUI displays this as a message if not None in the status bar. Useful if opening can take a
#: long time
OPEN_FEEDBACK_MESSAGE = None
#: Set of extensions that are "virtual books" on the device
#: and therefore cannot be viewed/saved/added to library.
#: For example: ``frozenset(['kobo'])``
VIRTUAL_BOOK_EXTENSIONS = frozenset()
#: Message to display to user for virtual book extensions.
VIRTUAL_BOOK_EXTENSION_MESSAGE = None
#: Whether to nuke comments in the copy of the book sent to the device. If
#: not None this should be short string that the comments will be replaced
#: by.
NUKE_COMMENTS = None
#: If True indicates that this driver completely manages device detection,
#: ejecting and so forth. If you set this to True, you *must* implement the
#: detect_managed_devices and debug_managed_device_detection methods.
#: A driver with this set to true is responsible for detection of devices,
#: managing a blacklist of devices, a list of ejected devices and so forth.
#: calibre will periodically call the detect_managed_devices() method and
#: if it returns a detected device, calibre will call open(). open() will
#: be called every time a device is returned even if previous calls to open()
#: failed, therefore the driver must maintain its own blacklist of failed
#: devices. Similarly, when ejecting, calibre will call eject() and then
#: assuming the next call to detect_managed_devices() returns None, it will
#: call post_yank_cleanup().
MANAGES_DEVICE_PRESENCE = False
#: If set the True, calibre will call the :meth:`get_driveinfo()` method
#: after the books lists have been loaded to get the driveinfo.
SLOW_DRIVEINFO = False
#: If set to True, calibre will ask the user if they want to manage the
#: device with calibre, the first time it is detected. If you set this to
#: True you must implement :meth:`get_device_uid()` and
#: :meth:`ignore_connected_device()` and
#: :meth:`get_user_blacklisted_devices` and
#: :meth:`set_user_blacklisted_devices`
ASK_TO_ALLOW_CONNECT = False
#: Set this to a dictionary of the form {'title':title, 'msg':msg, 'det_msg':detailed_msg} to have calibre popup
#: a message to the user after some callbacks are run (currently only upload_books).
#: Be careful to not spam the user with too many messages. This variable is checked after *every* callback,
#: so only set it when you really need to.
user_feedback_after_callback = None
@classmethod
def get_gui_name(cls):
if hasattr(cls, 'gui_name'):
return cls.gui_name
if hasattr(cls, '__name__'):
return cls.__name__
return cls.name
@classmethod
def get_open_popup_message(self):
' GUI displays this as a non-modal popup. Should be an instance of OpenPopupMessage '
return None
# Device detection {{{
def test_bcd(self, bcdDevice, bcd):
if bcd is None or len(bcd) == 0:
return True
for c in bcd:
if c == bcdDevice:
return True
return False
def is_usb_connected(self, devices_on_system, debug=False, only_presence=False):
'''
Return True, device_info if a device handled by this plugin is currently connected.
:param devices_on_system: List of devices currently connected
'''
vendors_on_system = {x[0] for x in devices_on_system}
vendors = set(self.VENDOR_ID) if hasattr(self.VENDOR_ID, '__len__') else {self.VENDOR_ID}
if hasattr(self.VENDOR_ID, 'keys'):
products = []
for ven in self.VENDOR_ID:
products.extend(self.VENDOR_ID[ven].keys())
else:
products = self.PRODUCT_ID if hasattr(self.PRODUCT_ID, '__len__') else [self.PRODUCT_ID]
ch = self.can_handle_windows if iswindows else self.can_handle
for vid in vendors_on_system.intersection(vendors):
for dev in devices_on_system:
cvid, pid, bcd = dev[:3]
if cvid == vid:
if pid in products:
if hasattr(self.VENDOR_ID, 'keys'):
try:
cbcd = self.VENDOR_ID[vid][pid]
except KeyError:
# Vendor vid does not have product pid, pid
# exists for some other vendor in this
# device
continue
else:
cbcd = self.BCD
if self.test_bcd(bcd, cbcd):
if debug:
prints(dev)
if ch(dev, debug=debug):
return True, dev
return False, None
def detect_managed_devices(self, devices_on_system, force_refresh=False):
'''
Called only if MANAGES_DEVICE_PRESENCE is True.
Scan for devices that this driver can handle. Should return a device
object if a device is found. This object will be passed to the open()
method as the connected_device. If no device is found, return None. The
returned object can be anything, calibre does not use it, it is only
passed to open().
This method is called periodically by the GUI, so make sure it is not
too resource intensive. Use a cache to avoid repeatedly scanning the
system.
:param devices_on_system: Set of USB devices found on the system.
:param force_refresh: If True and the driver uses a cache to prevent
repeated scanning, the cache must be flushed.
'''
raise NotImplementedError()
def debug_managed_device_detection(self, devices_on_system, output):
'''
Called only if MANAGES_DEVICE_PRESENCE is True.
Should write information about the devices detected on the system to
output, which is a file like object.
Should return True if a device was detected and successfully opened,
otherwise False.
'''
raise NotImplementedError()
# }}}
def reset(self, key='-1', log_packets=False, report_progress=None,
detected_device=None):
"""
:param key: The key to unlock the device
:param log_packets: If true the packet stream to/from the device is logged
:param report_progress: Function that is called with a % progress
(number between 0 and 100) for various tasks.
If it is called with -1 that means that the
task does not have any progress information
:param detected_device: Device information from the device scanner
"""
raise NotImplementedError()
def can_handle_windows(self, usbdevice, debug=False):
'''
Optional method to perform further checks on a device to see if this driver
is capable of handling it. If it is not it should return False. This method
is only called after the vendor, product ids and the bcd have matched, so
it can do some relatively time intensive checks. The default implementation
returns True. This method is called only on Windows. See also
:meth:`can_handle`.
Note that for devices based on USBMS this method by default delegates
to :meth:`can_handle`. So you only need to override :meth:`can_handle`
in your subclass of USBMS.
:param usbdevice: A usbdevice as returned by :func:`calibre.devices.winusb.scan_usb_devices`
'''
return True
def can_handle(self, device_info, debug=False):
'''
Unix version of :meth:`can_handle_windows`.
:param device_info: Is a tuple of (vid, pid, bcd, manufacturer, product,
serial number)
'''
return True
can_handle.is_base_class_implementation = True
def open(self, connected_device, library_uuid):
'''
Perform any device specific initialization. Called after the device is
detected but before any other functions that communicate with the device.
For example: For devices that present themselves as USB Mass storage
devices, this method would be responsible for mounting the device or
if the device has been automounted, for finding out where it has been
mounted. The method :meth:`calibre.devices.usbms.device.Device.open` has
an implementation of
this function that should serve as a good example for USB Mass storage
devices.
This method can raise an OpenFeedback exception to display a message to
the user.
:param connected_device: The device that we are trying to open. It is
a tuple of (vendor id, product id, bcd, manufacturer name, product
name, device serial number). However, some devices have no serial
number and on Windows only the first three fields are present, the
rest are None.
:param library_uuid: The UUID of the current calibre library. Can be
None if there is no library (for example when used from the command
line).
'''
raise NotImplementedError()
def eject(self):
'''
Un-mount / eject the device from the OS. This does not check if there
are pending GUI jobs that need to communicate with the device.
NOTE: That this method may not be called on the same thread as the rest
of the device methods.
'''
raise NotImplementedError()
def post_yank_cleanup(self):
'''
Called if the user yanks the device without ejecting it first.
'''
raise NotImplementedError()
def set_progress_reporter(self, report_progress):
'''
Set a function to report progress information.
:param report_progress: Function that is called with a % progress
(number between 0 and 100) for various tasks.
If it is called with -1 that means that the
task does not have any progress information
'''
raise NotImplementedError()
def get_device_information(self, end_session=True):
"""
Ask device for device information. See L{DeviceInfoQuery}.
:return: (device name, device version, software version on device, MIME type)
The tuple can optionally have a fifth element, which is a
drive information dictionary. See usbms.driver for an example.
"""
raise NotImplementedError()
def get_driveinfo(self):
'''
Return the driveinfo dictionary. Usually called from
get_device_information(), but if loading the driveinfo is slow for this
driver, then it should set SLOW_DRIVEINFO. In this case, this method
will be called by calibre after the book lists have been loaded. Note
that it is not called on the device thread, so the driver should cache
the drive info in the books() method and this function should return
the cached data.
'''
return {}
def card_prefix(self, end_session=True):
'''
Return a 2 element list of the prefix to paths on the cards.
If no card is present None is set for the card's prefix.
E.G.
('/place', '/place2')
(None, 'place2')
('place', None)
(None, None)
'''
raise NotImplementedError()
def total_space(self, end_session=True):
"""
Get total space available on the mountpoints:
1. Main memory
2. Memory Card A
3. Memory Card B
:return: A 3 element list with total space in bytes of (1, 2, 3). If a
particular device doesn't have any of these locations it should return 0.
"""
raise NotImplementedError()
def free_space(self, end_session=True):
"""
Get free space available on the mountpoints:
1. Main memory
2. Card A
3. Card B
:return: A 3 element list with free space in bytes of (1, 2, 3). If a
particular device doesn't have any of these locations it should return -1.
"""
raise NotImplementedError()
def books(self, oncard=None, end_session=True):
"""
Return a list of e-books on the device.
:param oncard: If 'carda' or 'cardb' return a list of e-books on the
specific storage card, otherwise return list of e-books
in main memory of device. If a card is specified and no
books are on the card return empty list.
:return: A BookList.
"""
raise NotImplementedError()
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
'''
Upload a list of books to the device. If a file already
exists on the device, it should be replaced.
This method should raise a :class:`FreeSpaceError` if there is not enough
free space on the device. The text of the FreeSpaceError must contain the
word "card" if ``on_card`` is not None otherwise it must contain the word "memory".
:param files: A list of paths
:param names: A list of file names that the books should have
once uploaded to the device. len(names) == len(files)
:param metadata: If not None, it is a list of :class:`Metadata` objects.
The idea is to use the metadata to determine where on the device to
put the book. len(metadata) == len(files). Apart from the regular
cover (path to cover), there may also be a thumbnail attribute, which should
be used in preference. The thumbnail attribute is of the form
(width, height, cover_data as jpeg).
:return: A list of 3-element tuples. The list is meant to be passed
to :meth:`add_books_to_metadata`.
'''
raise NotImplementedError()
@classmethod
def add_books_to_metadata(cls, locations, metadata, booklists):
'''
Add locations to the booklists. This function must not communicate with
the device.
:param locations: Result of a call to L{upload_books}
:param metadata: List of :class:`Metadata` objects, same as for
:meth:`upload_books`.
:param booklists: A tuple containing the result of calls to
(:meth:`books(oncard=None)`,
:meth:`books(oncard='carda')`,
:meth`books(oncard='cardb')`).
'''
raise NotImplementedError()
def delete_books(self, paths, end_session=True):
'''
Delete books at paths on device.
'''
raise NotImplementedError()
@classmethod
def remove_books_from_metadata(cls, paths, booklists):
'''
Remove books from the metadata list. This function must not communicate
with the device.
:param paths: paths to books on the device.
:param booklists: A tuple containing the result of calls to
(:meth:`books(oncard=None)`,
:meth:`books(oncard='carda')`,
:meth`books(oncard='cardb')`).
'''
raise NotImplementedError()
def sync_booklists(self, booklists, end_session=True):
'''
Update metadata on device.
:param booklists: A tuple containing the result of calls to
(:meth:`books(oncard=None)`,
:meth:`books(oncard='carda')`,
:meth`books(oncard='cardb')`).
'''
raise NotImplementedError()
def get_file(self, path, outfile, end_session=True):
'''
Read the file at ``path`` on the device and write it to outfile.
:param outfile: file object like ``sys.stdout`` or the result of an
:func:`open` call.
'''
raise NotImplementedError()
@classmethod
def config_widget(cls):
'''
Should return a QWidget. The QWidget contains the settings for the
device interface
'''
raise NotImplementedError()
@classmethod
def save_settings(cls, settings_widget):
'''
Should save settings to disk. Takes the widget created in
:meth:`config_widget` and saves all settings to disk.
'''
raise NotImplementedError()
@classmethod
def settings(cls):
'''
Should return an opts object. The opts object should have at least one
attribute `format_map` which is an ordered list of formats for the
device.
'''
raise NotImplementedError()
def set_plugboards(self, plugboards, pb_func):
'''
provide the driver the current set of plugboards and a function to
select a specific plugboard. This method is called immediately before
add_books and sync_booklists.
pb_func is a callable with the following signature::
def pb_func(device_name, format, plugboards)
You give it the current device name (either the class name or
DEVICE_PLUGBOARD_NAME), the format you are interested in (a 'real'
format or 'device_db'), and the plugboards (you were given those by
set_plugboards, the same place you got this method).
:return: None or a single plugboard instance.
'''
pass
def set_driveinfo_name(self, location_code, name):
'''
Set the device name in the driveinfo file to 'name'. This setting will
persist until the file is re-created or the name is changed again.
Non-disk devices should implement this method based on the location
codes returned by the get_device_information() method.
'''
pass
def prepare_addable_books(self, paths):
'''
Given a list of paths, returns another list of paths. These paths
point to addable versions of the books.
If there is an error preparing a book, then instead of a path, the
position in the returned list for that book should be a three tuple:
(original_path, the exception instance, traceback)
'''
return paths
def startup(self):
'''
Called when calibre is starting the device. Do any initialization
required. Note that multiple instances of the class can be instantiated,
and thus __init__ can be called multiple times, but only one instance
will have this method called. This method is called on the device
thread, not the GUI thread.
'''
pass
def shutdown(self):
'''
Called when calibre is shutting down, either for good or in preparation
to restart. Do any cleanup required. This method is called on the
device thread, not the GUI thread.
'''
pass
def get_device_uid(self):
'''
Must return a unique id for the currently connected device (this is
called immediately after a successful call to open()). You must
implement this method if you set ASK_TO_ALLOW_CONNECT = True
'''
raise NotImplementedError()
def ignore_connected_device(self, uid):
'''
Should ignore the device identified by uid (the result of a call to
get_device_uid()) in the future. You must implement this method if you
set ASK_TO_ALLOW_CONNECT = True. Note that this function is called
immediately after open(), so if open() caches some state, the driver
should reset that state.
'''
raise NotImplementedError()
def get_user_blacklisted_devices(self):
'''
Return map of device uid to friendly name for all devices that the user
has asked to be ignored.
'''
return {}
def set_user_blacklisted_devices(self, devices):
'''
Set the list of device uids that should be ignored by this driver.
'''
pass
def specialize_global_preferences(self, device_prefs):
'''
Implement this method if your device wants to override a particular
preference. You must ensure that all call sites that want a preference
that can be overridden use device_prefs['something'] instead
of prefs['something']. Your
method should call device_prefs.set_overrides(pref=val, pref=val, ...).
Currently used for:
metadata management (prefs['manage_device_metadata'])
'''
device_prefs.set_overrides()
def set_library_info(self, library_name, library_uuid, field_metadata):
'''
Implement this method if you want information about the current calibre
library. This method is called at startup and when the calibre library
changes while connected.
'''
pass
# Dynamic control interface.
# The following methods are probably called on the GUI thread. Any driver
# that implements these methods must take pains to be thread safe, because
# the device_manager might be using the driver at the same time that one of
# these methods is called.
def is_dynamically_controllable(self):
'''
Called by the device manager when starting plugins. If this method returns
a string, then a) it supports the device manager's dynamic control
interface, and b) that name is to be used when talking to the plugin.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
return None
def start_plugin(self):
'''
This method is called to start the plugin. The plugin should begin
to accept device connections however it does that. If the plugin is
already accepting connections, then do nothing.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
pass
def stop_plugin(self):
'''
This method is called to stop the plugin. The plugin should no longer
accept connections, and should cleanup behind itself. It is likely that
this method should call shutdown. If the plugin is already not accepting
connections, then do nothing.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
pass
def get_option(self, opt_string, default=None):
'''
Return the value of the option indicated by opt_string. This method can
be called when the plugin is not started. Return None if the option does
not exist.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
return default
def set_option(self, opt_string, opt_value):
'''
Set the value of the option indicated by opt_string. This method can
be called when the plugin is not started.
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
pass
def is_running(self):
'''
Return True if the plugin is started, otherwise false
This method can be called on the GUI thread. A driver that implements
this method must be thread safe.
'''
return False
def synchronize_with_db(self, db, book_id, book_metadata, first_call):
'''
Called during book matching when a book on the device is matched with
a book in calibre's db. The method is responsible for synchronizing
data from the device to calibre's db (if needed).
The method must return a two-value tuple. The first value is a set of
calibre book ids changed if calibre's database was changed or None if the
database was not changed. If the first value is an empty set then the
metadata for the book on the device is updated with calibre's metadata
and given back to the device, but no GUI refresh of that book is done.
This is useful when the calibre data is correct but must be sent to the
device.
The second value is itself a 2-value tuple. The first value in the tuple
specifies whether a book format should be sent to the device. The intent
is to permit verifying that the book on the device is the same as the
book in calibre. This value must be None if no book is to be sent,
otherwise return the base file name on the device (a string like
foobar.epub). Be sure to include the extension in the name. The device
subsystem will construct a send_books job for all books with not- None
returned values. Note: other than to later retrieve the extension, the
name is ignored in cases where the device uses a template to generate
the file name, which most do. The second value in the returned tuple
indicated whether the format is future-dated. Return True if it is,
otherwise return False. calibre will display a dialog to the user
listing all future dated books.
Extremely important: this method is called on the GUI thread. It must
be threadsafe with respect to the device manager's thread.
book_id: the calibre id for the book in the database.
book_metadata: the Metadata object for the book coming from the device.
first_call: True if this is the first call during a sync, False otherwise
'''
return (None, (None, False))
class BookList(list):
'''
A list of books. Each Book object must have the fields
#. title
#. authors
#. size (file size of the book)
#. datetime (a UTC time tuple)
#. path (path on the device to the book)
#. thumbnail (can be None) thumbnail is either a str/bytes object with the
image data or it should have an attribute image_path that stores an
absolute (platform native) path to the image
#. tags (a list of strings, can be empty).
'''
__getslice__ = None
__setslice__ = None
def __init__(self, oncard, prefix, settings):
pass
def supports_collections(self):
''' Return True if the device supports collections for this book list. '''
raise NotImplementedError()
def add_book(self, book, replace_metadata):
'''
Add the book to the booklist. Intent is to maintain any device-internal
metadata. Return True if booklists must be sync'ed
'''
raise NotImplementedError()
def remove_book(self, book):
'''
Remove a book from the booklist. Correct any device metadata at the
same time
'''
raise NotImplementedError()
def get_collections(self, collection_attributes):
'''
Return a dictionary of collections created from collection_attributes.
Each entry in the dictionary is of the form collection name:[list of
books]
The list of books is sorted by book title, except for collections
created from series, in which case series_index is used.
:param collection_attributes: A list of attributes of the Book object
'''
raise NotImplementedError()
class CurrentlyConnectedDevice:
def __init__(self):
self._device = None
@property
def device(self):
return self._device
# A device driver can check if a device is currently connected to calibre using
# the following code::
# from calibre.device.interface import currently_connected_device
# if currently_connected_device.device is None:
# # no device connected
# The device attribute will be either None or the device driver object
# (DevicePlugin instance) for the currently connected device.
currently_connected_device = CurrentlyConnectedDevice()
| 31,230 | Python | .py | 648 | 38.635802 | 116 | 0.643259 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,723 | cli.py | kovidgoyal_calibre/src/calibre/devices/cli.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
"""
Provides a command-line interface to ebook devices.
For usage information run the script.
"""
import os
import sys
import time
from optparse import OptionParser
from calibre import __appname__, __version__, fsync, human_readable, prints
from calibre.customize.ui import device_plugins
from calibre.devices.errors import ArgumentError, DeviceError, DeviceLocked
from calibre.devices.scanner import DeviceScanner
from calibre.utils.config import device_prefs
from polyglot.io import PolyglotStringIO
MINIMUM_COL_WIDTH = 12 # : Minimum width of columns in ls output
class FileFormatter:
def __init__(self, file):
self.is_dir = file.is_dir
self.is_readonly = file.is_readonly
self.size = file.size
self.ctime = file.ctime
self.wtime = file.wtime
self.name = file.name
self.path = file.path
@property
def mode_string(self):
""" The mode string for this file. There are only two modes read-only and read-write """
mode, x = "-", "-"
if self.is_dir:
mode, x = "d", "x"
if self.is_readonly:
mode += "r-"+x+"r-"+x+"r-"+x
else:
mode += "rw"+x+"rw"+x+"rw"+x
return mode
@property
def isdir_name(self):
'''Return self.name + '/' if self is a directory'''
name = self.name
if self.is_dir:
name += '/'
return name
@property
def name_in_color(self):
""" The name in ANSI text. Directories are blue, ebooks are green """
cname = self.name
blue, green, normal = "", "", ""
if self.term:
blue, green, normal = self.term.BLUE, self.term.GREEN, self.term.NORMAL
if self.is_dir:
cname = blue + self.name + normal
else:
ext = self.name[self.name.rfind("."):]
if ext in (".pdf", ".rtf", ".lrf", ".lrx", ".txt"):
cname = green + self.name + normal
return cname
@property
def human_readable_size(self):
""" File size in human readable form """
return human_readable(self.size)
@property
def modification_time(self):
""" Last modified time in the Linux ls -l format """
return time.strftime("%Y-%m-%d %H:%M", time.localtime(self.wtime))
@property
def creation_time(self):
""" Last modified time in the Linux ls -l format """
return time.strftime("%Y-%m-%d %H:%M", time.localtime(self.ctime))
def info(dev):
info = dev.get_device_information()
print("Device name: ", info[0])
print("Device version: ", info[1])
print("Software version:", info[2])
print("Mime type: ", info[3])
def ls(dev, path, recurse=False, human_readable_size=False, ll=False, cols=0):
def col_split(l, cols): # split list l into columns
rows = len(l) // cols
if len(l) % cols:
rows += 1
m = []
for i in range(rows):
m.append(l[i::rows])
return m
def row_widths(table): # Calculate widths for each column in the row-wise table
tcols = len(table[0])
rowwidths = [0 for i in range(tcols)]
for row in table:
c = 0
for item in row:
rowwidths[c] = len(item) if len(item) > rowwidths[c] else rowwidths[c]
c += 1
return rowwidths
output = PolyglotStringIO()
if path.endswith("/") and len(path) > 1:
path = path[:-1]
dirs = dev.list(path, recurse)
for dir in dirs:
if recurse:
prints(dir[0] + ":", file=output)
lsoutput, lscoloutput = [], []
files = dir[1]
maxlen = 0
if ll: # Calculate column width for size column
for file in files:
size = len(str(file.size))
if human_readable_size:
file = FileFormatter(file)
size = len(file.human_readable_size)
if size > maxlen:
maxlen = size
for file in files:
file = FileFormatter(file)
name = file.name if ll else file.isdir_name
lsoutput.append(name)
lscoloutput.append(name)
if ll:
size = str(file.size)
if human_readable_size:
size = file.human_readable_size
prints(file.mode_string, ("%"+str(maxlen)+"s")%size, file.modification_time, name, file=output)
if not ll and len(lsoutput) > 0:
trytable = []
for colwidth in range(MINIMUM_COL_WIDTH, cols):
trycols = int(cols//colwidth)
trytable = col_split(lsoutput, trycols)
works = True
for row in trytable:
row_break = False
for item in row:
if len(item) > colwidth - 1:
works, row_break = False, True
break
if row_break:
break
if works:
break
rowwidths = row_widths(trytable)
trytablecol = col_split(lscoloutput, len(trytable[0]))
for r in range(len(trytable)):
for c in range(len(trytable[r])):
padding = rowwidths[c] - len(trytable[r][c])
prints(trytablecol[r][c], "".ljust(padding), end=' ', file=output)
prints(file=output)
prints(file=output)
listing = output.getvalue().rstrip() + "\n"
output.close()
return listing
def shutdown_plugins():
for d in device_plugins():
try:
d.shutdown()
except:
pass
def main():
from calibre.utils.terminal import geometry
cols = geometry()[0]
parser = OptionParser(usage="usage: %prog [options] command args\n\ncommand "+
"is one of: info, books, df, ls, cp, mkdir, touch, cat, rm, eject, test_file\n\n"+
"For help on a particular command: %prog command", version=__appname__+" version: " + __version__)
parser.add_option("--log-packets", help="print out packet stream to stdout. "+
"The numbers in the left column are byte offsets that allow the packet size to be read off easily.",
dest="log_packets", action="store_true", default=False)
parser.remove_option("-h")
parser.disable_interspersed_args() # Allow unrecognized options
options, args = parser.parse_args()
if len(args) < 1:
parser.print_help()
return 1
command = args[0]
args = args[1:]
dev = None
scanner = DeviceScanner()
scanner.scan()
connected_devices = []
for d in device_plugins():
try:
d.startup()
except:
print('Startup failed for device plugin: %s'%d)
if d.MANAGES_DEVICE_PRESENCE:
cd = d.detect_managed_devices(scanner.devices)
if cd is not None:
connected_devices.append((cd, d))
dev = d
break
continue
ok, det = scanner.is_device_connected(d)
if ok:
dev = d
dev.reset(log_packets=options.log_packets, detected_device=det)
connected_devices.append((det, dev))
if dev is None:
print('Unable to find a connected ebook reader.', file=sys.stderr)
shutdown_plugins()
return 1
for det, d in connected_devices:
try:
d.open(det, None)
except:
continue
else:
dev = d
d.specialize_global_preferences(device_prefs)
break
try:
if command == "df":
total = dev.total_space(end_session=False)
free = dev.free_space()
where = ("Memory", "Card A", "Card B")
print("Filesystem\tSize \tUsed \tAvail \tUse%")
for i in range(3):
print("%-10s\t%s\t%s\t%s\t%s"%(where[i], human_readable(total[i]), human_readable(total[i]-free[i]), human_readable(free[i]),
str(0 if total[i]==0 else int(100*(total[i]-free[i])/(total[i]*1.)))+"%"))
elif command == 'eject':
dev.eject()
elif command == "books":
print("Books in main memory:")
for book in dev.books():
print(book)
print("\nBooks on storage carda:")
for book in dev.books(oncard='carda'):
print(book)
print("\nBooks on storage cardb:")
for book in dev.books(oncard='cardb'):
print(book)
elif command == "mkdir":
parser = OptionParser(usage="usage: %prog mkdir [options] path\nCreate a folder on the device\n\npath must begin with / or card:/")
if len(args) != 1:
parser.print_help()
sys.exit(1)
dev.mkdir(args[0])
elif command == "ls":
parser = OptionParser(usage="usage: %prog ls [options] path\nList files on the device\n\npath must begin with / or card:/")
parser.add_option(
"-l", help="In addition to the name of each file, print the file type, permissions, and timestamp (the modification time, in the local timezone). Times are local.", # noqa
dest="ll", action="store_true", default=False)
parser.add_option("-R", help="Recursively list subfolders encountered. /dev and /proc are omitted",
dest="recurse", action="store_true", default=False)
parser.remove_option("-h")
parser.add_option("-h", "--human-readable", help="show sizes in human readable format", dest="hrs", action="store_true", default=False)
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
print(ls(dev, args[0], recurse=options.recurse, ll=options.ll, human_readable_size=options.hrs, cols=cols), end=' ')
elif command == "info":
info(dev)
elif command == "cp":
usage="usage: %prog cp [options] source destination\nCopy files to/from the device\n\n"+\
"One of source or destination must be a path on the device. \n\nDevice paths have the form\n"+\
"dev:mountpoint/my/path\n"+\
"where mountpoint is one of / or carda: or cardb:/\n\n"+\
"source must point to a file for which you have read permissions\n"+\
"destination must point to a file or folder for which you have write permissions"
parser = OptionParser(usage=usage)
parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Overwrite the destination file if it exists already.')
options, args = parser.parse_args(args)
if len(args) != 2:
parser.print_help()
return 1
if args[0].startswith("dev:"):
outfile = args[1]
path = args[0][4:]
if path.endswith("/"):
path = path[:-1]
if os.path.isdir(outfile):
outfile = os.path.join(outfile, path[path.rfind("/")+1:])
try:
outfile = open(outfile, "wb")
except OSError as e:
print(e, file=sys.stderr)
parser.print_help()
return 1
dev.get_file(path, outfile)
fsync(outfile)
outfile.close()
elif args[1].startswith("dev:"):
try:
infile = open(args[0], "rb")
except OSError as e:
print(e, file=sys.stderr)
parser.print_help()
return 1
dev.put_file(infile, args[1][4:], replace_file=options.force)
infile.close()
else:
parser.print_help()
return 1
elif command == "cat":
outfile = sys.stdout
parser = OptionParser(
usage="usage: %prog cat path\nShow file on the device\n\npath should point to a file on the device and must begin with /,a:/ or b:/")
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
if args[0].endswith("/"):
path = args[0][:-1]
else:
path = args[0]
outfile = sys.stdout
dev.get_file(path, outfile)
elif command == "rm":
parser = OptionParser(usage="usage: %prog rm path\nDelete files from the device\n\npath should point to a file or empty folder on the device "+
"and must begin with / or card:/\n\n"+
"rm will DELETE the file. Be very CAREFUL")
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
dev.rm(args[0])
elif command == "touch":
parser = OptionParser(usage="usage: %prog touch path\nCreate an empty file on the device\n\npath should point to a file on the device and must begin with /,a:/ or b:/\n\n"+ # noqa
"Unfortunately, I can't figure out how to update file times on the device, so if path already exists, touch does nothing")
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
dev.touch(args[0])
elif command == 'test_file':
parser = OptionParser(usage=("usage: %prog test_file path\n"
'Open device, copy file specified by path to device and '
'then eject device.'))
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
return 1
path = args[0]
from calibre.ebooks.metadata.meta import get_metadata
mi = get_metadata(open(path, 'rb'), path.rpartition('.')[-1].lower())
print(dev.upload_books([args[0]], [os.path.basename(args[0])],
end_session=False, metadata=[mi]))
dev.eject()
else:
parser.print_help()
if getattr(dev, 'handle', False):
dev.close()
return 1
except DeviceLocked:
print("The device is locked. Use the --unlock option", file=sys.stderr)
except (ArgumentError, DeviceError) as e:
print(e, file=sys.stderr)
return 1
finally:
shutdown_plugins()
return 0
if __name__ == '__main__':
main()
| 15,091 | Python | .py | 352 | 30.943182 | 192 | 0.540533 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,724 | utils.py | kovidgoyal_calibre/src/calibre/devices/utils.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
import time
from functools import partial
from calibre.devices.errors import DeviceError, FreeSpaceError, WrongDestinationError
def sanity_check(on_card, files, card_prefixes, free_space):
if on_card == 'carda' and not card_prefixes[0]:
raise WrongDestinationError(_(
'The reader has no storage card %s. You may have changed '
'the default send to device action. Right click on the "Send '
'to device" button and reset the default action to be '
'"Send to main memory".')%'A')
elif on_card == 'cardb' and not card_prefixes[1]:
raise WrongDestinationError(_(
'The reader has no storage card %s. You may have changed '
'the default send to device action. Right click on the "Send '
'to device" button and reset the default action to be '
'"Send to main memory".')%'B')
elif on_card and on_card not in ('carda', 'cardb'):
raise DeviceError(_('Selected slot: %s is not supported.') % on_card)
size = 0
for f in files:
size += os.path.getsize(getattr(f, 'name', f))
if not on_card and size > free_space[0] - 2*1024*1024:
raise FreeSpaceError(_("There is insufficient free space in main memory"))
if on_card == 'carda' and size > free_space[1] - 1024*1024:
raise FreeSpaceError(_("There is insufficient free space on the storage card"))
if on_card == 'cardb' and size > free_space[2] - 1024*1024:
raise FreeSpaceError(_("There is insufficient free space on the storage card"))
def build_template_regexp(template):
from calibre import prints
def replfunc(match, seen=None):
v = match.group(1)
if v in ['authors', 'author_sort']:
v = 'author'
if v in ('title', 'series', 'series_index', 'isbn', 'author'):
if v not in seen:
seen.add(v)
return '(?P<' + v + '>.+?)'
return '(.+?)'
s = set()
f = partial(replfunc, seen=s)
try:
template = template.rpartition('/')[2]
return re.compile(re.sub('{([^}]*)}', f, template) + r'([_\d]*$)')
except:
prints('Failed to parse template: %r'%template)
template = '{title} - {authors}'
return re.compile(re.sub('{([^}]*)}', f, template) + r'([_\d]*$)')
def create_upload_path(mdata, fname, template, sanitize,
prefix_path='',
path_type=os.path,
maxlen=250,
use_subdirs=True,
news_in_folder=True,
filename_callback=lambda x, y:x,
sanitize_path_components=lambda x: x
):
from calibre.library.save_to_disk import config, get_components
from calibre.utils.filenames import shorten_components_to
special_tag = None
if mdata.tags:
for t in mdata.tags:
if t.startswith(_('News')) or t.startswith('/'):
special_tag = t
break
if mdata.tags and _('News') in mdata.tags:
try:
p = mdata.pubdate
date = (p.year, p.month, p.day)
except:
today = time.localtime()
date = (today[0], today[1], today[2])
template = "{title}_%d-%d-%d" % date
fname = sanitize(fname)
ext = path_type.splitext(fname)[1]
opts = config().parse()
if not isinstance(template, str):
template = template.decode('utf-8')
app_id = str(getattr(mdata, 'application_id', ''))
id_ = mdata.get('id', fname)
extra_components = get_components(template, mdata, id_,
timefmt=opts.send_timefmt, length=maxlen-len(app_id)-1,
sanitize_func=sanitize, last_has_extension=False)
if not extra_components:
extra_components.append(sanitize(filename_callback(fname,
mdata)))
else:
extra_components[-1] = sanitize(filename_callback(extra_components[-1]+ext, mdata))
if extra_components[-1] and extra_components[-1][0] in ('.', '_'):
extra_components[-1] = 'x' + extra_components[-1][1:]
if special_tag is not None:
name = extra_components[-1]
extra_components = []
tag = special_tag
if tag.startswith(_('News')):
if news_in_folder:
extra_components.append('News')
else:
for c in tag.split('/'):
c = sanitize(c)
if not c:
continue
extra_components.append(c)
extra_components.append(name)
if not use_subdirs:
extra_components = extra_components[-1:]
def remove_trailing_periods(x):
ans = x
while ans.endswith('.'):
ans = ans[:-1].strip()
if not ans:
ans = 'x'
return ans
extra_components = list(map(remove_trailing_periods, extra_components))
components = shorten_components_to(maxlen - len(prefix_path), extra_components)
components = sanitize_path_components(components)
if prefix_path:
filepath = path_type.join(prefix_path, *components)
else:
filepath = path_type.join(*components)
return filepath
| 5,275 | Python | .py | 126 | 33.34127 | 91 | 0.59922 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,725 | __init__.py | kovidgoyal_calibre/src/calibre/devices/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Device drivers.
'''
import pprint
import sys
import time
from functools import partial
DAY_MAP = dict(Sun=0, Mon=1, Tue=2, Wed=3, Thu=4, Fri=5, Sat=6)
MONTH_MAP = dict(Jan=1, Feb=2, Mar=3, Apr=4, May=5, Jun=6, Jul=7, Aug=8, Sep=9, Oct=10, Nov=11, Dec=12)
INVERSE_DAY_MAP = dict(zip(DAY_MAP.values(), DAY_MAP.keys()))
INVERSE_MONTH_MAP = dict(zip(MONTH_MAP.values(), MONTH_MAP.keys()))
def strptime(src):
src = src.strip()
src = src.split()
src[0] = str(DAY_MAP[src[0][:-1]])+','
src[2] = str(MONTH_MAP[src[2]])
return time.strptime(' '.join(src), '%w, %d %m %Y %H:%M:%S %Z')
def strftime(epoch, zone=time.gmtime):
src = time.strftime("%w, %d %m %Y %H:%M:%S GMT", zone(epoch)).split()
src[0] = INVERSE_DAY_MAP[int(src[0][:-1])]+','
src[2] = INVERSE_MONTH_MAP[int(src[2])]
return ' '.join(src)
def get_connected_device():
from calibre.customize.ui import device_plugins
from calibre.devices.scanner import DeviceScanner
dev = None
scanner = DeviceScanner()
scanner.scan()
connected_devices = []
for d in device_plugins():
ok, det = scanner.is_device_connected(d)
if ok:
dev = d
dev.reset(log_packets=False, detected_device=det)
connected_devices.append((det, dev))
if dev is None:
print('Unable to find a connected ebook reader.', file=sys.stderr)
return
for det, d in connected_devices:
try:
d.open(det, None)
except:
continue
else:
dev = d
break
return dev
def debug(ioreg_to_tmp=False, buf=None, plugins=None,
disabled_plugins=None):
'''
If plugins is None, then this method calls startup and shutdown on the
device plugins. So if you are using it in a context where startup could
already have been called (for example in the main GUI), pass in the list of
device plugins as the plugins parameter.
'''
import textwrap
from calibre import prints
from calibre.constants import debug, is_debugging, ismacos, iswindows
from calibre.customize.ui import device_plugins, disabled_device_plugins
from calibre.debug import print_basic_debug_info
from calibre.devices.scanner import DeviceScanner
from polyglot.io import PolyglotStringIO
oldo, olde = sys.stdout, sys.stderr
if buf is None:
buf = PolyglotStringIO()
sys.stdout = sys.stderr = buf
out = partial(prints, file=buf)
devplugins = device_plugins() if plugins is None else plugins
devplugins = list(sorted(devplugins, key=lambda x: x.__class__.__name__))
if plugins is None:
for d in devplugins:
try:
d.startup()
except:
out('Startup failed for device plugin: %s'%d)
if disabled_plugins is None:
disabled_plugins = list(disabled_device_plugins())
orig_debug = is_debugging()
debug(True)
try:
print_basic_debug_info(out=buf)
s = DeviceScanner()
s.scan()
devices = (s.devices)
if not iswindows:
devices = [list(x) for x in devices]
for d in devices:
for i in range(3):
d[i] = hex(d[i])
out('USB devices on system:')
out(pprint.pformat(devices))
ioreg = None
if ismacos:
from calibre.devices.usbms.device import Device
mount = '\n'.join(repr(x) for x in Device.osx_run_mount().splitlines())
drives = pprint.pformat(Device.osx_get_usb_drives())
ioreg = 'Output from mount:\n'+mount+'\n\n'
ioreg += 'Output from osx_get_usb_drives:\n'+drives+'\n\n'
iro = Device.run_ioreg()
try:
ioreg += iro.decode('utf-8', 'replace')
except UnicodeDecodeError:
ioreg += repr(iro)
connected_devices = []
if disabled_plugins:
out('\nDisabled plugins:', textwrap.fill(' '.join([x.__class__.__name__ for x in
disabled_plugins])))
out(' ')
else:
out('\nNo disabled plugins')
found_dev = False
for dev in devplugins:
if not dev.MANAGES_DEVICE_PRESENCE:
continue
out('Looking for devices of type:', dev.__class__.__name__)
if dev.debug_managed_device_detection(s.devices, buf):
found_dev = True
break
out(' ')
if not found_dev:
out('Looking for devices...')
for dev in devplugins:
if dev.MANAGES_DEVICE_PRESENCE:
continue
connected, det = s.is_device_connected(dev, debug=True)
if connected:
out('\t\tDetected possible device', dev.__class__.__name__)
connected_devices.append((dev, det))
out(' ')
errors = {}
success = False
out('Devices possibly connected:', end=' ')
for dev, det in connected_devices:
out(dev.name, end=', ')
if not connected_devices:
out('None', end='')
out(' ')
for dev, det in connected_devices:
out('Trying to open', dev.name, '...', end=' ')
dev.do_device_debug = True
try:
dev.reset(detected_device=det)
dev.open(det, None)
out('OK')
except:
import traceback
errors[dev] = traceback.format_exc()
out('failed')
continue
dev.do_device_debug = False
success = True
if hasattr(dev, '_main_prefix'):
out('Main memory:', repr(dev._main_prefix))
out('Total space:', dev.total_space())
break
if not success and errors:
out('Opening of the following devices failed')
for dev,msg in errors.items():
out(dev)
out(msg)
out(' ')
if ioreg is not None:
ioreg = 'IOREG Output\n'+ioreg
out(' ')
if ioreg_to_tmp:
open('/tmp/ioreg.txt', 'w').write(ioreg)
out('Dont forget to send the contents of /tmp/ioreg.txt')
out('You can open it with the command: open /tmp/ioreg.txt')
else:
out(ioreg)
if hasattr(buf, 'getvalue'):
return buf.getvalue()
finally:
debug(orig_debug)
sys.stdout = oldo
sys.stderr = olde
if plugins is None:
for d in devplugins:
try:
d.shutdown()
except:
pass
def device_info(ioreg_to_tmp=False, buf=None):
from calibre.devices.scanner import DeviceScanner
res = {}
res['device_set'] = device_set = set()
res['device_details'] = device_details = {}
s = DeviceScanner()
s.scan()
devices = s.devices
devices = [tuple(x) for x in devices]
for dev in devices:
device_set.add(dev)
device_details[dev] = dev[0:3]
return res
| 7,460 | Python | .py | 197 | 27.208122 | 103 | 0.54754 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,726 | udisks.py | kovidgoyal_calibre/src/calibre/devices/udisks.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import re
from contextlib import suppress
def node_mountpoint(node):
if isinstance(node, str):
node = node.encode('utf-8')
def de_mangle(raw):
return raw.replace(b'\\040', b' ').replace(b'\\011', b'\t').replace(b'\\012',
b'\n').replace(b'\\0134', b'\\').decode('utf-8')
with open('/proc/mounts', 'rb') as src:
for line in src.readlines():
line = line.split()
if line[0] == node:
return de_mangle(line[1])
return None
def basic_mount_options():
return ['rw', 'noexec', 'nosuid', 'nodev', 'uid=%d'%os.geteuid(), 'gid=%d'%os.getegid()]
class UDisks:
BUS_NAME = 'org.freedesktop.UDisks2'
BLOCK = f'{BUS_NAME}.Block'
FILESYSTEM = f'{BUS_NAME}.Filesystem'
DRIVE = f'{BUS_NAME}.Drive'
PATH = '/org/freedesktop/UDisks2'
def __enter__(self):
from jeepney.io.blocking import open_dbus_connection
self.connection = open_dbus_connection(bus='SYSTEM')
return self
def __exit__(self, *args):
self.connection.close()
del self.connection
def address(self, path='', interface=None):
from jeepney import DBusAddress
path = os.path.join(self.PATH, path)
return DBusAddress(path, bus_name=self.BUS_NAME, interface=interface)
def send(self, msg):
from jeepney import DBusErrorResponse, MessageType
reply = self.connection.send_and_get_reply(msg)
if reply.header.message_type is MessageType.error:
raise DBusErrorResponse(reply)
return reply
def introspect(self, object_path):
from jeepney import Introspectable
r = self.send(Introspectable(f'{self.PATH}/{object_path}', self.BUS_NAME).Introspect())
return r.body[0]
def get_device_node_path(self, devname):
from jeepney import Properties
p = Properties(self.address(f'block_devices/{devname}', self.BLOCK))
r = self.send(p.get('Device'))
return bytearray(r.body[0][1]).replace(b'\x00', b'').decode('utf-8')
def iter_block_devices(self):
xml = self.introspect('block_devices')
for m in re.finditer(r'name=[\'"](.+?)[\'"]', xml):
devname = m.group(1)
with suppress(Exception):
yield devname, self.get_device_node_path(devname)
def device(self, device_node_path):
device_node_path = os.path.realpath(device_node_path)
devname = device_node_path.split('/')[-1]
# First try the device name directly
with suppress(Exception):
if self.get_device_node_path(devname) == device_node_path:
return devname
# Enumerate all devices known to UDisks
for q, devpath in self.iter_block_devices():
if devpath == device_node_path:
return q
raise KeyError(f'{device_node_path} not known to UDisks2')
def filesystem_operation_message(self, device_node_path, function_name, **kw):
from jeepney import new_method_call
devname = self.device(device_node_path)
a = self.address(f'block_devices/{devname}', self.FILESYSTEM)
kw['auth.no_user_interaction'] = ('b', True)
return new_method_call(a, function_name, 'a{sv}', (kw,))
def mount(self, device_node_path):
msg = self.filesystem_operation_message(device_node_path, 'Mount', options=('s', ','.join(basic_mount_options())))
try:
self.send(msg)
except Exception:
# May be already mounted, check
mp = node_mountpoint(str(device_node_path))
if mp is None:
raise
return mp
def unmount(self, device_node_path):
msg = self.filesystem_operation_message(device_node_path, 'Unmount', force=('b', True))
self.send(msg)
def drive_for_device(self, device_node_path):
from jeepney import Properties
devname = self.device(device_node_path)
a = self.address(f'block_devices/{devname}', self.BLOCK)
msg = Properties(a).get('Drive')
r = self.send(msg)
return r.body[0][1]
def eject(self, device_node_path):
from jeepney import new_method_call
drive = self.drive_for_device(device_node_path)
a = self.address(drive, self.DRIVE)
msg = new_method_call(a, 'Eject', 'a{sv}', ({
'auth.no_user_interaction': ('b', True),
},))
self.send(msg)
def get_udisks():
return UDisks()
def mount(node_path):
with get_udisks() as u:
u.mount(node_path)
def eject(node_path):
with get_udisks() as u:
u.eject(node_path)
def umount(node_path):
with get_udisks() as u:
u.unmount(node_path)
def test_udisks():
import sys
dev = sys.argv[1]
print('Testing with node', dev)
with get_udisks() as u:
print('Using Udisks:', u.__class__.__name__)
print('Mounted at:', u.mount(dev))
print('Unmounting')
u.unmount(dev)
print('Ejecting:')
u.eject(dev)
def develop():
dev = '/dev/nvme0n1p3'
with get_udisks() as u:
print(u.device(dev))
print(u.drive_for_device(dev))
if __name__ == '__main__':
test_udisks()
| 5,402 | Python | .py | 134 | 32.298507 | 122 | 0.612471 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,727 | winusb.py | kovidgoyal_calibre/src/calibre/devices/winusb.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import errno
import os
import re
import string
from collections import defaultdict, namedtuple
from ctypes import (
POINTER,
WINFUNCTYPE,
Structure,
WinError,
addressof,
byref,
c_ubyte,
c_uint,
c_void_p,
c_wchar,
cast,
create_string_buffer,
create_unicode_buffer,
get_last_error,
memset,
sizeof,
string_at,
windll,
wstring_at,
)
from ctypes import c_uint64 as QWORD
from ctypes.wintypes import BOOL, BYTE, DWORD, HANDLE, HWND, LPCWSTR, LPWSTR, UINT, ULONG, USHORT, WORD
from operator import itemgetter
from pprint import pformat, pprint
from calibre import as_unicode, prints
from polyglot.builtins import iteritems, itervalues
try:
import winreg
except ImportError:
import _winreg as winreg
# Data and function type definitions {{{
class GUID(Structure):
_fields_ = [
("data1", DWORD),
("data2", WORD),
("data3", WORD),
("data4", c_ubyte * 8)]
def __init__(self, l, w1, w2, b1, b2, b3, b4, b5, b6, b7, b8):
self.data1 = l
self.data2 = w1
self.data3 = w2
self.data4[0] = b1
self.data4[1] = b2
self.data4[2] = b3
self.data4[3] = b4
self.data4[4] = b5
self.data4[5] = b6
self.data4[6] = b7
self.data4[7] = b8
def __str__(self):
return "{{{:08x}-{:04x}-{:04x}-{}-{}}}".format(
self.data1,
self.data2,
self.data3,
''.join(["%02x" % d for d in self.data4[:2]]),
''.join(["%02x" % d for d in self.data4[2:]]),
)
CONFIGRET = DWORD
DEVINST = DWORD
LPDWORD = POINTER(DWORD)
LPVOID = c_void_p
REG_QWORD = 11
IOCTL_STORAGE_MEDIA_REMOVAL = 0x2D4804
IOCTL_STORAGE_EJECT_MEDIA = 0x2D4808
IOCTL_STORAGE_GET_DEVICE_NUMBER = 0x2D1080
def CTL_CODE(DeviceType, Function, Method, Access):
return (DeviceType << 16) | (Access << 14) | (Function << 2) | Method
def USB_CTL(id):
# CTL_CODE(FILE_DEVICE_USB, (id), METHOD_BUFFERED, FILE_ANY_ACCESS)
return CTL_CODE(0x22, id, 0, 0)
IOCTL_USB_GET_ROOT_HUB_NAME = USB_CTL(258)
IOCTL_USB_GET_NODE_INFORMATION = USB_CTL(258)
IOCTL_USB_GET_NODE_CONNECTION_INFORMATION = USB_CTL(259)
IOCTL_USB_GET_NODE_CONNECTION_INFORMATION_EX = USB_CTL(274)
IOCTL_USB_GET_NODE_CONNECTION_DRIVERKEY_NAME = USB_CTL(264)
IOCTL_USB_GET_NODE_CONNECTION_NAME = USB_CTL(261)
IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION = USB_CTL(260)
USB_CONFIGURATION_DESCRIPTOR_TYPE = 2
USB_STRING_DESCRIPTOR_TYPE = 3
USB_INTERFACE_DESCRIPTOR_TYPE = 4
USB_REQUEST_GET_DESCRIPTOR = 0x06
MAXIMUM_USB_STRING_LENGTH = 255
StorageDeviceNumber = namedtuple('StorageDeviceNumber', 'type number partition_number')
class STORAGE_DEVICE_NUMBER(Structure):
_fields_ = [
('DeviceType', DWORD),
('DeviceNumber', ULONG),
('PartitionNumber', ULONG)
]
def as_tuple(self):
return StorageDeviceNumber(self.DeviceType, self.DeviceNumber, self.PartitionNumber)
class SP_DEVINFO_DATA(Structure):
_fields_ = [
('cbSize', DWORD),
('ClassGuid', GUID),
('DevInst', DEVINST),
('Reserved', POINTER(ULONG)),
]
def __str__(self):
return f"ClassGuid:{self.ClassGuid} DevInst:{self.DevInst}"
PSP_DEVINFO_DATA = POINTER(SP_DEVINFO_DATA)
class SP_DEVICE_INTERFACE_DATA(Structure):
_fields_ = [
('cbSize', DWORD),
('InterfaceClassGuid', GUID),
('Flags', DWORD),
('Reserved', POINTER(ULONG)),
]
def __str__(self):
return f"InterfaceClassGuid:{self.InterfaceClassGuid} Flags:{self.Flags}"
ANYSIZE_ARRAY = 1
class SP_DEVICE_INTERFACE_DETAIL_DATA(Structure):
_fields_ = [
("cbSize", DWORD),
("DevicePath", c_wchar*ANYSIZE_ARRAY)
]
UCHAR = c_ubyte
class USB_DEVICE_DESCRIPTOR(Structure):
_fields_ = (
('bLength', UCHAR),
('bDescriptorType', UCHAR),
('bcdUSB', USHORT),
('bDeviceClass', UCHAR),
('bDeviceSubClass', UCHAR),
('bDeviceProtocol', UCHAR),
('bMaxPacketSize0', UCHAR),
('idVendor', USHORT),
('idProduct', USHORT),
('bcdDevice', USHORT),
('iManufacturer', UCHAR),
('iProduct', UCHAR),
('iSerialNumber', UCHAR),
('bNumConfigurations', UCHAR),
)
def __repr__(self):
return 'USBDevice(class=0x%x sub_class=0x%x protocol=0x%x vendor_id=0x%x product_id=0x%x bcd=0x%x manufacturer=%d product=%d serial_number=%d)' % (
self.bDeviceClass, self.bDeviceSubClass, self.bDeviceProtocol,
self.idVendor, self.idProduct, self.bcdDevice, self.iManufacturer,
self.iProduct, self.iSerialNumber)
class USB_ENDPOINT_DESCRIPTOR(Structure):
_fields_ = (
('bLength', UCHAR),
('bDescriptorType', UCHAR),
('bEndpointAddress', UCHAR),
('bmAttributes', UCHAR),
('wMaxPacketSize', USHORT),
('bInterval', UCHAR)
)
class USB_PIPE_INFO(Structure):
_fields_ = (
('EndpointDescriptor', USB_ENDPOINT_DESCRIPTOR),
('ScheduleOffset', ULONG),
)
class USB_NODE_CONNECTION_INFORMATION_EX(Structure):
_fields_ = (
('ConnectionIndex', ULONG),
('DeviceDescriptor', USB_DEVICE_DESCRIPTOR),
('CurrentConfigurationValue', UCHAR),
('Speed', UCHAR),
('DeviceIsHub', BOOL),
('DeviceAddress', USHORT),
('NumberOfOpenPipes', ULONG),
('ConnectionStatus', c_uint),
('PipeList', USB_PIPE_INFO*ANYSIZE_ARRAY),
)
class USB_STRING_DESCRIPTOR(Structure):
_fields_ = (
('bLength', UCHAR),
('bType', UCHAR),
('String', UCHAR * ANYSIZE_ARRAY),
)
class USB_DESCRIPTOR_REQUEST(Structure):
class SetupPacket(Structure):
_fields_ = (
('bmRequest', UCHAR),
('bRequest', UCHAR),
('wValue', UCHAR*2),
('wIndex', USHORT),
('wLength', USHORT),
)
_fields_ = (
('ConnectionIndex', ULONG),
('SetupPacket', SetupPacket),
('Data', USB_STRING_DESCRIPTOR),
)
PUSB_DESCRIPTOR_REQUEST = POINTER(USB_DESCRIPTOR_REQUEST)
PSP_DEVICE_INTERFACE_DETAIL_DATA = POINTER(SP_DEVICE_INTERFACE_DETAIL_DATA)
PSP_DEVICE_INTERFACE_DATA = POINTER(SP_DEVICE_INTERFACE_DATA)
INVALID_HANDLE_VALUE = c_void_p(-1).value
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
FILE_SHARE_READ = 0x1
FILE_SHARE_WRITE = 0x2
OPEN_EXISTING = 0x3
GUID_DEVINTERFACE_VOLUME = GUID(0x53F5630D, 0xB6BF, 0x11D0, 0x94, 0xF2, 0x00, 0xA0, 0xC9, 0x1E, 0xFB, 0x8B)
GUID_DEVINTERFACE_DISK = GUID(0x53F56307, 0xB6BF, 0x11D0, 0x94, 0xF2, 0x00, 0xA0, 0xC9, 0x1E, 0xFB, 0x8B)
GUID_DEVINTERFACE_CDROM = GUID(0x53f56308, 0xb6bf, 0x11d0, 0x94, 0xf2, 0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b)
GUID_DEVINTERFACE_FLOPPY = GUID(0x53f56311, 0xb6bf, 0x11d0, 0x94, 0xf2, 0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b)
GUID_DEVINTERFACE_USB_DEVICE = GUID(0xA5DCBF10, 0x6530, 0x11D2, 0x90, 0x1F, 0x00, 0xC0, 0x4F, 0xB9, 0x51, 0xED)
GUID_DEVINTERFACE_USB_HUB = GUID(0xf18a0e88, 0xc30c, 0x11d0, 0x88, 0x15, 0x00, 0xa0, 0xc9, 0x06, 0xbe, 0xd8)
DRIVE_UNKNOWN, DRIVE_NO_ROOT_DIR, DRIVE_REMOVABLE, DRIVE_FIXED, DRIVE_REMOTE, DRIVE_CDROM, DRIVE_RAMDISK = 0, 1, 2, 3, 4, 5, 6
DIGCF_PRESENT = 0x00000002
DIGCF_ALLCLASSES = 0x00000004
DIGCF_DEVICEINTERFACE = 0x00000010
ERROR_INSUFFICIENT_BUFFER = 0x7a
ERROR_MORE_DATA = 234
ERROR_INVALID_DATA = 0xd
ERROR_GEN_FAILURE = 31
HDEVINFO = HANDLE
SPDRP_DEVICEDESC = DWORD(0x00000000)
SPDRP_HARDWAREID = DWORD(0x00000001)
SPDRP_COMPATIBLEIDS = DWORD(0x00000002)
SPDRP_UNUSED0 = DWORD(0x00000003)
SPDRP_SERVICE = DWORD(0x00000004)
SPDRP_UNUSED1 = DWORD(0x00000005)
SPDRP_UNUSED2 = DWORD(0x00000006)
SPDRP_CLASS = DWORD(0x00000007)
SPDRP_CLASSGUID = DWORD(0x00000008)
SPDRP_DRIVER = DWORD(0x00000009)
SPDRP_CONFIGFLAGS = DWORD(0x0000000A)
SPDRP_MFG = DWORD(0x0000000B)
SPDRP_FRIENDLYNAME = DWORD(0x0000000C)
SPDRP_LOCATION_INFORMATION = DWORD(0x0000000D)
SPDRP_PHYSICAL_DEVICE_OBJECT_NAME = DWORD(0x0000000E)
SPDRP_CAPABILITIES = DWORD(0x0000000F)
SPDRP_UI_NUMBER = DWORD(0x00000010)
SPDRP_UPPERFILTERS = DWORD(0x00000011)
SPDRP_LOWERFILTERS = DWORD(0x00000012)
SPDRP_BUSTYPEGUID = DWORD(0x00000013)
SPDRP_LEGACYBUSTYPE = DWORD(0x00000014)
SPDRP_BUSNUMBER = DWORD(0x00000015)
SPDRP_ENUMERATOR_NAME = DWORD(0x00000016)
SPDRP_SECURITY = DWORD(0x00000017)
SPDRP_SECURITY_SDS = DWORD(0x00000018)
SPDRP_DEVTYPE = DWORD(0x00000019)
SPDRP_EXCLUSIVE = DWORD(0x0000001A)
SPDRP_CHARACTERISTICS = DWORD(0x0000001B)
SPDRP_ADDRESS = DWORD(0x0000001C)
SPDRP_UI_NUMBER_DESC_FORMAT = DWORD(0x0000001D)
SPDRP_DEVICE_POWER_DATA = DWORD(0x0000001E)
SPDRP_REMOVAL_POLICY = DWORD(0x0000001F)
SPDRP_REMOVAL_POLICY_HW_DEFAULT = DWORD(0x00000020)
SPDRP_REMOVAL_POLICY_OVERRIDE = DWORD(0x00000021)
SPDRP_INSTALL_STATE = DWORD(0x00000022)
SPDRP_LOCATION_PATHS = DWORD(0x00000023)
CR_CODES, CR_CODE_NAMES = {}, {}
for line in '''\
#define CR_SUCCESS 0x00000000
#define CR_DEFAULT 0x00000001
#define CR_OUT_OF_MEMORY 0x00000002
#define CR_INVALID_POINTER 0x00000003
#define CR_INVALID_FLAG 0x00000004
#define CR_INVALID_DEVNODE 0x00000005
#define CR_INVALID_DEVINST CR_INVALID_DEVNODE
#define CR_INVALID_RES_DES 0x00000006
#define CR_INVALID_LOG_CONF 0x00000007
#define CR_INVALID_ARBITRATOR 0x00000008
#define CR_INVALID_NODELIST 0x00000009
#define CR_DEVNODE_HAS_REQS 0x0000000A
#define CR_DEVINST_HAS_REQS CR_DEVNODE_HAS_REQS
#define CR_INVALID_RESOURCEID 0x0000000B
#define CR_DLVXD_NOT_FOUND 0x0000000C
#define CR_NO_SUCH_DEVNODE 0x0000000D
#define CR_NO_SUCH_DEVINST CR_NO_SUCH_DEVNODE
#define CR_NO_MORE_LOG_CONF 0x0000000E
#define CR_NO_MORE_RES_DES 0x0000000F
#define CR_ALREADY_SUCH_DEVNODE 0x00000010
#define CR_ALREADY_SUCH_DEVINST CR_ALREADY_SUCH_DEVNODE
#define CR_INVALID_RANGE_LIST 0x00000011
#define CR_INVALID_RANGE 0x00000012
#define CR_FAILURE 0x00000013
#define CR_NO_SUCH_LOGICAL_DEV 0x00000014
#define CR_CREATE_BLOCKED 0x00000015
#define CR_NOT_SYSTEM_VM 0x00000016
#define CR_REMOVE_VETOED 0x00000017
#define CR_APM_VETOED 0x00000018
#define CR_INVALID_LOAD_TYPE 0x00000019
#define CR_BUFFER_SMALL 0x0000001A
#define CR_NO_ARBITRATOR 0x0000001B
#define CR_NO_REGISTRY_HANDLE 0x0000001C
#define CR_REGISTRY_ERROR 0x0000001D
#define CR_INVALID_DEVICE_ID 0x0000001E
#define CR_INVALID_DATA 0x0000001F
#define CR_INVALID_API 0x00000020
#define CR_DEVLOADER_NOT_READY 0x00000021
#define CR_NEED_RESTART 0x00000022
#define CR_NO_MORE_HW_PROFILES 0x00000023
#define CR_DEVICE_NOT_THERE 0x00000024
#define CR_NO_SUCH_VALUE 0x00000025
#define CR_WRONG_TYPE 0x00000026
#define CR_INVALID_PRIORITY 0x00000027
#define CR_NOT_DISABLEABLE 0x00000028
#define CR_FREE_RESOURCES 0x00000029
#define CR_QUERY_VETOED 0x0000002A
#define CR_CANT_SHARE_IRQ 0x0000002B
#define CR_NO_DEPENDENT 0x0000002C
#define CR_SAME_RESOURCES 0x0000002D
#define CR_NO_SUCH_REGISTRY_KEY 0x0000002E
#define CR_INVALID_MACHINENAME 0x0000002F
#define CR_REMOTE_COMM_FAILURE 0x00000030
#define CR_MACHINE_UNAVAILABLE 0x00000031
#define CR_NO_CM_SERVICES 0x00000032
#define CR_ACCESS_DENIED 0x00000033
#define CR_CALL_NOT_IMPLEMENTED 0x00000034
#define CR_INVALID_PROPERTY 0x00000035
#define CR_DEVICE_INTERFACE_ACTIVE 0x00000036
#define CR_NO_SUCH_DEVICE_INTERFACE 0x00000037
#define CR_INVALID_REFERENCE_STRING 0x00000038
#define CR_INVALID_CONFLICT_LIST 0x00000039
#define CR_INVALID_INDEX 0x0000003A
#define CR_INVALID_STRUCTURE_SIZE 0x0000003B'''.splitlines():
line = line.strip()
if line:
name, code = line.split()[1:]
if code.startswith('0x'):
code = int(code, 16)
else:
code = CR_CODES[code]
CR_CODES[name] = code
CR_CODE_NAMES[code] = name
CM_GET_DEVICE_INTERFACE_LIST_PRESENT = 0
CM_GET_DEVICE_INTERFACE_LIST_ALL_DEVICES = 1
CM_GET_DEVICE_INTERFACE_LIST_BITS = 1
setupapi = windll.setupapi
cfgmgr = windll.CfgMgr32
kernel32 = windll.Kernel32
def cwrap(name, restype, *argtypes, **kw):
errcheck = kw.pop('errcheck', None)
use_last_error = bool(kw.pop('use_last_error', True))
prototype = WINFUNCTYPE(restype, *argtypes, use_last_error=use_last_error)
lib = cfgmgr if name.startswith('CM') else setupapi
func = prototype((name, kw.pop('lib', lib)))
if kw:
raise TypeError('Unknown keyword arguments: %r' % kw)
if errcheck is not None:
func.errcheck = errcheck
return func
def handle_err_check(result, func, args):
if result == INVALID_HANDLE_VALUE:
raise WinError(get_last_error())
return result
def bool_err_check(result, func, args):
if not result:
raise WinError(get_last_error())
return result
def config_err_check(result, func, args):
if result != CR_CODES['CR_SUCCESS']:
raise WinError(result, 'The cfgmgr32 function failed with err: %s' % CR_CODE_NAMES.get(result, result))
return args
GetLogicalDrives = cwrap('GetLogicalDrives', DWORD, errcheck=bool_err_check, lib=kernel32)
GetDriveType = cwrap('GetDriveTypeW', UINT, LPCWSTR, lib=kernel32)
GetVolumeNameForVolumeMountPoint = cwrap('GetVolumeNameForVolumeMountPointW', BOOL, LPCWSTR, LPWSTR, DWORD, errcheck=bool_err_check, lib=kernel32)
GetVolumePathNamesForVolumeName = cwrap('GetVolumePathNamesForVolumeNameW', BOOL, LPCWSTR, LPWSTR, DWORD, LPDWORD, errcheck=bool_err_check, lib=kernel32)
GetVolumeInformation = cwrap(
'GetVolumeInformationW', BOOL, LPCWSTR, LPWSTR, DWORD, POINTER(DWORD), POINTER(DWORD), POINTER(DWORD), LPWSTR, DWORD, errcheck=bool_err_check, lib=kernel32)
ExpandEnvironmentStrings = cwrap('ExpandEnvironmentStringsW', DWORD, LPCWSTR, LPWSTR, DWORD, errcheck=bool_err_check, lib=kernel32)
CreateFile = cwrap('CreateFileW', HANDLE, LPCWSTR, DWORD, DWORD, c_void_p, DWORD, DWORD, HANDLE, errcheck=handle_err_check, lib=kernel32)
DeviceIoControl = cwrap('DeviceIoControl', BOOL, HANDLE, DWORD, LPVOID, DWORD, LPVOID, DWORD, POINTER(DWORD), LPVOID, errcheck=bool_err_check, lib=kernel32)
CloseHandle = cwrap('CloseHandle', BOOL, HANDLE, errcheck=bool_err_check, lib=kernel32)
QueryDosDevice = cwrap('QueryDosDeviceW', DWORD, LPCWSTR, LPWSTR, DWORD, errcheck=bool_err_check, lib=kernel32)
SetupDiGetClassDevs = cwrap('SetupDiGetClassDevsW', HDEVINFO, POINTER(GUID), LPCWSTR, HWND, DWORD, errcheck=handle_err_check)
SetupDiEnumDeviceInterfaces = cwrap('SetupDiEnumDeviceInterfaces', BOOL, HDEVINFO, PSP_DEVINFO_DATA, POINTER(GUID), DWORD, PSP_DEVICE_INTERFACE_DATA)
SetupDiDestroyDeviceInfoList = cwrap('SetupDiDestroyDeviceInfoList', BOOL, HDEVINFO, errcheck=bool_err_check)
SetupDiGetDeviceInterfaceDetail = cwrap(
'SetupDiGetDeviceInterfaceDetailW', BOOL, HDEVINFO, PSP_DEVICE_INTERFACE_DATA, PSP_DEVICE_INTERFACE_DETAIL_DATA, DWORD, POINTER(DWORD), PSP_DEVINFO_DATA)
SetupDiEnumDeviceInfo = cwrap('SetupDiEnumDeviceInfo', BOOL, HDEVINFO, DWORD, PSP_DEVINFO_DATA)
SetupDiGetDeviceRegistryProperty = cwrap(
'SetupDiGetDeviceRegistryPropertyW', BOOL, HDEVINFO, PSP_DEVINFO_DATA, DWORD, POINTER(DWORD), POINTER(BYTE), DWORD, POINTER(DWORD))
CM_Get_Parent = cwrap('CM_Get_Parent', CONFIGRET, POINTER(DEVINST), DEVINST, ULONG, errcheck=config_err_check)
CM_Get_Child = cwrap('CM_Get_Child', CONFIGRET, POINTER(DEVINST), DEVINST, ULONG, errcheck=config_err_check)
CM_Get_Sibling = cwrap('CM_Get_Sibling', CONFIGRET, POINTER(DEVINST), DEVINST, ULONG, errcheck=config_err_check)
CM_Get_Device_ID_Size = cwrap('CM_Get_Device_ID_Size', CONFIGRET, POINTER(ULONG), DEVINST, ULONG)
CM_Get_Device_ID = cwrap('CM_Get_Device_IDW', CONFIGRET, DEVINST, LPWSTR, ULONG, ULONG)
# }}}
# Utility functions {{{
_devid_pat = None
def devid_pat():
global _devid_pat
if _devid_pat is None:
_devid_pat = re.compile(r'VID_([a-f0-9]{4})&PID_([a-f0-9]{4})&REV_([a-f0-9:]{4})', re.I)
return _devid_pat
class DeviceSet:
def __init__(self, guid=GUID_DEVINTERFACE_VOLUME, enumerator=None, flags=DIGCF_PRESENT | DIGCF_DEVICEINTERFACE):
self.guid_ref, self.enumerator, self.flags = (None if guid is None else byref(guid)), enumerator, flags
self.dev_list = SetupDiGetClassDevs(self.guid_ref, self.enumerator, None, self.flags)
def __del__(self):
SetupDiDestroyDeviceInfoList(self.dev_list)
del self.dev_list
def interfaces(self, ignore_errors=False, yield_devlist=False):
interface_data = SP_DEVICE_INTERFACE_DATA()
interface_data.cbSize = sizeof(SP_DEVICE_INTERFACE_DATA)
buf = None
i = -1
while True:
i += 1
if not SetupDiEnumDeviceInterfaces(self.dev_list, None, self.guid_ref, i, byref(interface_data)):
break
try:
buf, devinfo, devpath = get_device_interface_detail_data(self.dev_list, byref(interface_data), buf)
except OSError:
if ignore_errors:
continue
raise
if yield_devlist:
yield self.dev_list, devinfo, devpath
else:
yield devinfo, devpath
def devices(self):
devinfo = SP_DEVINFO_DATA()
devinfo.cbSize = sizeof(SP_DEVINFO_DATA)
i = -1
while True:
i += 1
if not SetupDiEnumDeviceInfo(self.dev_list, i, byref(devinfo)):
break
yield self.dev_list, devinfo
def iterchildren(parent_devinst):
child = DEVINST(0)
NO_MORE = CR_CODES['CR_NO_SUCH_DEVINST']
try:
CM_Get_Child(byref(child), parent_devinst, 0)
except OSError as err:
if err.winerror == NO_MORE:
return
raise
yield child.value
while True:
try:
CM_Get_Sibling(byref(child), child, 0)
except OSError as err:
if err.winerror == NO_MORE:
break
raise
yield child.value
def iterdescendants(parent_devinst):
for child in iterchildren(parent_devinst):
yield child
yield from iterdescendants(child)
def iterancestors(devinst):
NO_MORE = CR_CODES['CR_NO_SUCH_DEVINST']
parent = DEVINST(devinst)
while True:
try:
CM_Get_Parent(byref(parent), parent, 0)
except OSError as err:
if err.winerror == NO_MORE:
break
raise
yield parent.value
def device_io_control(handle, which, inbuf, outbuf, initbuf):
bytes_returned = DWORD(0)
while True:
initbuf(inbuf)
try:
DeviceIoControl(handle, which, inbuf, len(inbuf), outbuf, len(outbuf), byref(bytes_returned), None)
except OSError as err:
if err.winerror not in (ERROR_INSUFFICIENT_BUFFER, ERROR_MORE_DATA):
raise
outbuf = create_string_buffer(2*len(outbuf))
else:
return outbuf, bytes_returned
def get_storage_number(devpath):
sdn = STORAGE_DEVICE_NUMBER()
handle = CreateFile(devpath, 0, FILE_SHARE_READ | FILE_SHARE_WRITE, None, OPEN_EXISTING, 0, None)
bytes_returned = DWORD(0)
try:
DeviceIoControl(handle, IOCTL_STORAGE_GET_DEVICE_NUMBER, None, 0, byref(sdn), sizeof(STORAGE_DEVICE_NUMBER), byref(bytes_returned), None)
finally:
CloseHandle(handle)
return sdn.as_tuple()
def get_device_id(devinst, buf=None):
if buf is None:
buf = create_unicode_buffer(512)
while True:
ret = CM_Get_Device_ID(devinst, buf, len(buf), 0)
if ret == CR_CODES['CR_BUFFER_SMALL']:
devid_size = ULONG(0)
CM_Get_Device_ID_Size(byref(devid_size), devinst, 0)
buf = create_unicode_buffer(devid_size.value)
continue
if ret != CR_CODES['CR_SUCCESS']:
raise WinError(ret, 'The cfgmgr32 function failed with err: %s' % CR_CODE_NAMES.get(ret, ret))
break
return wstring_at(buf), buf
def expand_environment_strings(src):
sz = ExpandEnvironmentStrings(src, None, 0)
while True:
buf = create_unicode_buffer(sz)
nsz = ExpandEnvironmentStrings(src, buf, len(buf))
if nsz <= sz:
return buf.value
sz = nsz
def convert_registry_data(raw, size, dtype):
if dtype == winreg.REG_NONE:
return None
if dtype == winreg.REG_BINARY:
return string_at(raw, size)
if dtype in (winreg.REG_SZ, winreg.REG_EXPAND_SZ, winreg.REG_MULTI_SZ):
ans = wstring_at(raw, size // 2).rstrip('\0')
if dtype == winreg.REG_MULTI_SZ:
ans = tuple(ans.split('\0'))
elif dtype == winreg.REG_EXPAND_SZ:
ans = expand_environment_strings(ans)
return ans
if dtype == winreg.REG_DWORD:
if size == 0:
return 0
return cast(raw, LPDWORD).contents.value
if dtype == REG_QWORD:
if size == 0:
return 0
return cast(raw, POINTER(QWORD)).contents.value
raise ValueError('Unsupported data type: %r' % dtype)
def get_device_registry_property(dev_list, p_devinfo, property_type=SPDRP_HARDWAREID, buf=None):
if buf is None:
buf = create_string_buffer(1024)
data_type = DWORD(0)
required_size = DWORD(0)
ans = None
while True:
if not SetupDiGetDeviceRegistryProperty(dev_list, p_devinfo, property_type, byref(data_type), cast(buf, POINTER(BYTE)), len(buf), byref(required_size)):
err = get_last_error()
if err == ERROR_INSUFFICIENT_BUFFER:
buf = create_string_buffer(required_size.value)
continue
if err == ERROR_INVALID_DATA:
break
raise WinError(err)
ans = convert_registry_data(buf, required_size.value, data_type.value)
break
return buf, ans
def get_device_interface_detail_data(dev_list, p_interface_data, buf=None):
if buf is None:
buf = create_string_buffer(512)
detail = cast(buf, PSP_DEVICE_INTERFACE_DETAIL_DATA)
# See http://stackoverflow.com/questions/10728644/properly-declare-sp-device-interface-detail-data-for-pinvoke
# for why cbSize needs to be hardcoded below
detail.contents.cbSize = 8
required_size = DWORD(0)
devinfo = SP_DEVINFO_DATA()
devinfo.cbSize = sizeof(devinfo)
while True:
if not SetupDiGetDeviceInterfaceDetail(dev_list, p_interface_data, detail, len(buf), byref(required_size), byref(devinfo)):
err = get_last_error()
if err == ERROR_INSUFFICIENT_BUFFER:
buf = create_string_buffer(required_size.value + 50)
detail = cast(buf, PSP_DEVICE_INTERFACE_DETAIL_DATA)
detail.contents.cbSize = 8
continue
raise WinError(err)
break
return buf, devinfo, wstring_at(addressof(buf) + sizeof(SP_DEVICE_INTERFACE_DETAIL_DATA._fields_[0][1]))
def get_volume_information(drive_letter):
if not drive_letter.endswith('\\'):
drive_letter += ':\\'
fsname = create_unicode_buffer(255)
vname = create_unicode_buffer(500)
flags, serial_number, max_component_length = DWORD(0), DWORD(0), DWORD(0)
GetVolumeInformation(drive_letter, vname, len(vname), byref(serial_number), byref(max_component_length), byref(flags), fsname, len(fsname))
flags = flags.value
ans = {
'name': vname.value,
'filesystem': fsname.value,
'serial_number': serial_number.value,
'max_component_length': max_component_length.value,
}
for name, num in iteritems({'FILE_CASE_PRESERVED_NAMES':0x00000002, 'FILE_CASE_SENSITIVE_SEARCH':0x00000001, 'FILE_FILE_COMPRESSION':0x00000010,
'FILE_NAMED_STREAMS':0x00040000, 'FILE_PERSISTENT_ACLS':0x00000008, 'FILE_READ_ONLY_VOLUME':0x00080000,
'FILE_SEQUENTIAL_WRITE_ONCE':0x00100000, 'FILE_SUPPORTS_ENCRYPTION':0x00020000, 'FILE_SUPPORTS_EXTENDED_ATTRIBUTES':0x00800000,
'FILE_SUPPORTS_HARD_LINKS':0x00400000, 'FILE_SUPPORTS_OBJECT_IDS':0x00010000, 'FILE_SUPPORTS_OPEN_BY_FILE_ID':0x01000000,
'FILE_SUPPORTS_REPARSE_POINTS':0x00000080, 'FILE_SUPPORTS_SPARSE_FILES':0x00000040, 'FILE_SUPPORTS_TRANSACTIONS':0x00200000,
'FILE_SUPPORTS_USN_JOURNAL':0x02000000, 'FILE_UNICODE_ON_DISK':0x00000004, 'FILE_VOLUME_IS_COMPRESSED':0x00008000,
'FILE_VOLUME_QUOTAS':0x00000020}):
ans[name] = bool(num & flags)
return ans
def get_volume_pathnames(volume_id, buf=None):
if buf is None:
buf = create_unicode_buffer(512)
bufsize = DWORD(0)
while True:
try:
GetVolumePathNamesForVolumeName(volume_id, buf, len(buf), byref(bufsize))
break
except OSError as err:
if err.winerror == ERROR_MORE_DATA:
buf = create_unicode_buffer(bufsize.value + 10)
continue
raise
ans = wstring_at(buf, bufsize.value)
return buf, list(filter(None, ans.split('\0')))
# }}}
# def scan_usb_devices(): {{{
_USBDevice = namedtuple('USBDevice', 'vendor_id product_id bcd devid devinst')
class USBDevice(_USBDevice):
def __repr__(self):
def r(x):
if x is None:
return 'None'
return '0x%x' % x
return 'USBDevice(vendor_id={} product_id={} bcd={} devid={} devinst={})'.format(
r(self.vendor_id), r(self.product_id), r(self.bcd), self.devid, self.devinst)
def parse_hex(x):
return int(x.replace(':', 'a'), 16)
def iterusbdevices():
buf = None
pat = devid_pat()
for dev_list, devinfo in DeviceSet(guid=None, enumerator='USB', flags=DIGCF_PRESENT | DIGCF_ALLCLASSES).devices():
buf, devid = get_device_registry_property(dev_list, byref(devinfo), buf=buf)
if devid:
devid = devid[0].lower()
m = pat.search(devid)
if m is None:
yield USBDevice(None, None, None, devid, devinfo.DevInst)
else:
try:
vid, pid, bcd = map(parse_hex, m.group(1, 2, 3))
except Exception:
yield USBDevice(None, None, None, devid, devinfo.DevInst)
else:
yield USBDevice(vid, pid, bcd, devid, devinfo.DevInst)
def scan_usb_devices():
return tuple(iterusbdevices())
# }}}
def get_drive_letters_for_device(usbdev, storage_number_map=None, debug=False): # {{{
'''
Get the drive letters for a connected device. The drive letters are sorted
by storage number, which (I think) corresponds to the order they are
exported by the firmware.
:param usbdevice: As returned by :function:`scan_usb_devices`
'''
ans = {'pnp_id_map': {}, 'drive_letters':[], 'readonly_drives':set(), 'sort_map':{}}
sn_map = get_storage_number_map(debug=debug) if storage_number_map is None else storage_number_map
if debug:
prints('Storage number map:')
prints(pformat(sn_map))
if not sn_map:
return ans
devid, mi = (usbdev.devid or '').rpartition('&')[::2]
if mi.startswith('mi_'):
if debug:
prints('Iterating over all devices of composite device:', devid)
dl = ans['drive_letters']
for c in iterusbdevices():
if c.devid and c.devid.startswith(devid):
a = get_drive_letters_for_device_single(c, sn_map, debug=debug)
if debug:
prints('Drive letters for:', c.devid, ':', a['drive_letters'])
for m in ('pnp_id_map', 'sort_map'):
ans[m].update(a[m])
ans['readonly_drives'] |= a['readonly_drives']
for x in a['drive_letters']:
if x not in dl:
dl.append(x)
ans['drive_letters'].sort(key=ans['sort_map'].get)
return ans
else:
return get_drive_letters_for_device_single(usbdev, sn_map, debug=debug)
def get_drive_letters_for_device_single(usbdev, storage_number_map, debug=False):
ans = {'pnp_id_map': {}, 'drive_letters':[], 'readonly_drives':set(), 'sort_map':{}}
descendants = frozenset(iterdescendants(usbdev.devinst))
for devinfo, devpath in DeviceSet(GUID_DEVINTERFACE_DISK).interfaces():
if devinfo.DevInst in descendants:
if debug:
try:
devid = get_device_id(devinfo.DevInst)[0]
except Exception:
devid = 'Unknown'
try:
storage_number = get_storage_number(devpath)
except OSError as err:
if debug:
prints(f'Failed to get storage number for: {devid} with error: {as_unicode(err)}')
continue
if debug:
prints(f'Storage number for {devid}: {storage_number}')
if storage_number:
partitions = storage_number_map.get(storage_number[:2])
drive_letters = []
for partition_number, dl in partitions or ():
drive_letters.append(dl)
ans['sort_map'][dl] = storage_number.number, partition_number
if drive_letters:
for dl in drive_letters:
ans['pnp_id_map'][dl] = devpath
ans['drive_letters'].append(dl)
ans['drive_letters'].sort(key=ans['sort_map'].get)
for dl in ans['drive_letters']:
try:
if is_readonly(dl):
ans['readonly_drives'].add(dl)
except OSError as err:
if debug:
prints(f'Failed to get readonly status for drive: {dl} with error: {as_unicode(err)}')
return ans
def get_storage_number_map(drive_types=(DRIVE_REMOVABLE, DRIVE_FIXED), debug=False):
' Get a mapping of drive letters to storage numbers for all drives on system (of the specified types) '
mask = GetLogicalDrives()
type_map = {letter:GetDriveType(letter + ':' + os.sep) for i, letter in enumerate(string.ascii_uppercase) if mask & (1 << i)}
drives = (letter for letter, dt in iteritems(type_map) if dt in drive_types)
ans = defaultdict(list)
for letter in drives:
try:
sn = get_storage_number('\\\\.\\' + letter + ':')
ans[sn[:2]].append((sn[2], letter))
except OSError as err:
if debug:
prints(f'Failed to get storage number for drive: {letter} with error: {as_unicode(err)}')
continue
for val in itervalues(ans):
val.sort(key=itemgetter(0))
return dict(ans)
def get_storage_number_map_alt(debug=False):
' Alternate implementation that works without needing to call GetDriveType() (which causes floppy drives to seek) '
wbuf = create_unicode_buffer(512)
ans = defaultdict(list)
for devinfo, devpath in DeviceSet().interfaces():
if not devpath.endswith(os.sep):
devpath += os.sep
try:
GetVolumeNameForVolumeMountPoint(devpath, wbuf, len(wbuf))
except OSError as err:
if debug:
prints(f'Failed to get volume id for drive: {devpath} with error: {as_unicode(err)}')
continue
vname = wbuf.value
try:
wbuf, names = get_volume_pathnames(vname, buf=wbuf)
except OSError as err:
if debug:
prints(f'Failed to get mountpoints for volume {devpath} with error: {as_unicode(err)}')
continue
for name in names:
name = name.upper()
if len(name) == 3 and name.endswith(':\\') and name[0] in string.ascii_uppercase:
break
else:
if debug:
prints(f'Ignoring volume {devpath} as it has no assigned drive letter. Mountpoints: {names}')
continue
try:
sn = get_storage_number('\\\\.\\' + name[0] + ':')
ans[sn[:2]].append((sn[2], name[0]))
except OSError as err:
if debug:
prints(f'Failed to get storage number for drive: {name[0]} with error: {as_unicode(err)}')
continue
for val in itervalues(ans):
val.sort(key=itemgetter(0))
return dict(ans)
# }}}
def is_usb_device_connected(vendor_id, product_id): # {{{
for usbdev in iterusbdevices():
if usbdev.vendor_id == vendor_id and usbdev.product_id == product_id:
return True
return False
# }}}
def get_usb_info(usbdev, debug=False): # {{{
'''
The USB info (manufacturer/product names and serial number) Requires communication with the hub the device is connected to.
:param usbdev: A usb device as returned by :function:`scan_usb_devices`
'''
ans = {}
hub_map = {devinfo.DevInst:path for devinfo, path in DeviceSet(guid=GUID_DEVINTERFACE_USB_HUB).interfaces()}
for parent in iterancestors(usbdev.devinst):
parent_path = hub_map.get(parent)
if parent_path is not None:
break
else:
if debug:
prints('Cannot get USB info as parent of device is not a HUB or device has no parent (was probably disconnected)')
return ans
for devlist, devinfo in DeviceSet(guid=GUID_DEVINTERFACE_USB_DEVICE).devices():
if devinfo.DevInst == usbdev.devinst:
device_port = get_device_registry_property(devlist, byref(devinfo), SPDRP_ADDRESS)[1]
break
else:
return ans
if not device_port:
if debug:
prints('Cannot get usb info as the SPDRP_ADDRESS property is not present in the registry (can happen with broken USB hub drivers)')
return ans
handle = CreateFile(parent_path, GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, None, OPEN_EXISTING, 0, None)
try:
buf, dd = get_device_descriptor(handle, device_port)
if dd.idVendor == usbdev.vendor_id and dd.idProduct == usbdev.product_id and dd.bcdDevice == usbdev.bcd:
# Dont need to read language since we only care about english names
# buf, langs = get_device_languages(handle, device_port)
# print(111, langs)
for index, name in ((dd.iManufacturer, 'manufacturer'), (dd.iProduct, 'product'), (dd.iSerialNumber, 'serial_number')):
if index:
try:
buf, ans[name] = get_device_string(handle, device_port, index, buf=buf)
except OSError as err:
if debug:
# Note that I have observed that this fails
# randomly after some time of my Kindle being
# connected. Disconnecting and reconnecting causes
# it to start working again.
prints('Failed to read %s from device, with error: [%d] %s' % (name, err.winerror, as_unicode(err)))
finally:
CloseHandle(handle)
return ans
def alloc_descriptor_buf(buf):
if buf is None:
buf = create_string_buffer(sizeof(USB_DESCRIPTOR_REQUEST) + 700)
else:
memset(buf, 0, len(buf))
return buf
def get_device_descriptor(hub_handle, device_port, buf=None):
buf = alloc_descriptor_buf(buf)
def initbuf(b):
cast(b, POINTER(USB_NODE_CONNECTION_INFORMATION_EX)).contents.ConnectionIndex = device_port
buf, bytes_returned = device_io_control(hub_handle, IOCTL_USB_GET_NODE_CONNECTION_INFORMATION_EX, buf, buf, initbuf)
return buf, USB_DEVICE_DESCRIPTOR.from_buffer_copy(cast(buf, POINTER(USB_NODE_CONNECTION_INFORMATION_EX)).contents.DeviceDescriptor)
def get_device_string(hub_handle, device_port, index, buf=None, lang=0x409):
buf = alloc_descriptor_buf(buf)
def initbuf(b):
p = cast(b, PUSB_DESCRIPTOR_REQUEST).contents
p.ConnectionIndex = device_port
sp = p.SetupPacket
sp.bmRequest, sp.bRequest = 0x80, USB_REQUEST_GET_DESCRIPTOR
sp.wValue[0], sp.wValue[1] = index, USB_STRING_DESCRIPTOR_TYPE
sp.wIndex = lang
sp.wLength = MAXIMUM_USB_STRING_LENGTH + 2
buf, bytes_returned = device_io_control(hub_handle, IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION, buf, buf, initbuf)
data = cast(buf, PUSB_DESCRIPTOR_REQUEST).contents.Data
sz, dtype = data.bLength, data.bType
if dtype != 0x03:
raise OSError(errno.EINVAL, 'Invalid datatype for string descriptor: 0x%x' % dtype)
return buf, wstring_at(addressof(data.String), sz // 2).rstrip('\0')
def get_device_languages(hub_handle, device_port, buf=None):
' Get the languages supported by the device for strings '
buf = alloc_descriptor_buf(buf)
def initbuf(b):
p = cast(b, PUSB_DESCRIPTOR_REQUEST).contents
p.ConnectionIndex = device_port
sp = p.SetupPacket
sp.bmRequest, sp.bRequest = 0x80, USB_REQUEST_GET_DESCRIPTOR
sp.wValue[1] = USB_STRING_DESCRIPTOR_TYPE
sp.wLength = MAXIMUM_USB_STRING_LENGTH + 2
buf, bytes_returned = device_io_control(hub_handle, IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION, buf, buf, initbuf)
data = cast(buf, PUSB_DESCRIPTOR_REQUEST).contents.Data
sz, dtype = data.bLength, data.bType
if dtype != 0x03:
raise OSError(errno.EINVAL, 'Invalid datatype for string descriptor: 0x%x' % dtype)
data = cast(data.String, POINTER(USHORT*(sz//2)))
return buf, list(filter(None, data.contents))
# }}}
def is_readonly(drive_letter): # {{{
return get_volume_information(drive_letter)['FILE_READ_ONLY_VOLUME']
# }}}
def develop(): # {{{
from calibre.customize.ui import device_plugins
usb_devices = scan_usb_devices()
drive_letters = set()
pprint(usb_devices)
print()
devplugins = list(sorted(device_plugins(), key=lambda x: x.__class__.__name__))
for dev in devplugins:
dev.startup()
for dev in devplugins:
if dev.MANAGES_DEVICE_PRESENCE:
continue
connected, usbdev = dev.is_usb_connected(usb_devices, debug=True)
if connected:
print('\n')
print(f'Potentially connected device: {dev.get_gui_name()} at {usbdev}')
print()
print('Drives for this device:')
data = get_drive_letters_for_device(usbdev, debug=True)
pprint(data)
drive_letters |= set(data['drive_letters'])
print()
print('Is device connected:', is_usb_device_connected(*usbdev[:2]))
print()
print('Device USB data:', get_usb_info(usbdev, debug=True))
def drives_for(vendor_id, product_id=None):
usb_devices = scan_usb_devices()
pprint(usb_devices)
for usbdev in usb_devices:
if usbdev.vendor_id == vendor_id and (product_id is None or usbdev.product_id == product_id):
print(f'Drives for: {usbdev}')
pprint(get_drive_letters_for_device(usbdev, debug=True))
print('USB info:', get_usb_info(usbdev, debug=True))
if __name__ == '__main__':
develop()
# }}}
| 39,902 | Python | .py | 895 | 37.335196 | 160 | 0.644848 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,728 | scanner.py | kovidgoyal_calibre/src/calibre/devices/scanner.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Device scanner that fetches list of devices on system ina platform dependent
manner.
'''
import os
import sys
import time
from collections import namedtuple
from threading import Lock
from calibre import as_unicode, prints
from calibre.constants import isfreebsd, islinux, ismacos, isnetbsd, iswindows
osx_scanner = linux_scanner = freebsd_scanner = netbsd_scanner = None
if iswindows:
drive_ok_lock = Lock()
def drive_is_ok(letter, max_tries=10, debug=False):
from calibre_extensions import winutil
with drive_ok_lock:
for i in range(max_tries):
try:
winutil.get_disk_free_space(letter+':\\')
return True
except Exception as e:
if i >= max_tries - 1 and debug:
prints('Unable to get free space for drive:', letter)
prints(as_unicode(e))
time.sleep(0.2)
return False
_USBDevice = namedtuple('USBDevice',
'vendor_id product_id bcd manufacturer product serial')
class USBDevice(_USBDevice):
def __new__(cls, *args, **kwargs):
self = super().__new__(cls, *args)
self.busnum = self.devnum = -1
return self
def __repr__(self):
return ('USBDevice(busnum=%s, devnum=%s, '
'vendor_id=0x%04x, product_id=0x%04x, bcd=0x%04x, '
'manufacturer=%s, product=%s, serial=%s)')%(
self.busnum, self.devnum, self.vendor_id, self.product_id,
self.bcd, self.manufacturer, self.product, self.serial)
__str__ = __repr__
__unicode__ = __repr__
class LibUSBScanner:
def __call__(self):
if not hasattr(self, 'libusb'):
from calibre_extensions import libusb
self.libusb = libusb
ans = set()
seen = set()
for fingerprint, ids in self.libusb.get_devices():
seen.add(fingerprint)
man = ids.get('manufacturer', None)
prod = ids.get('product', None)
serial = ids.get('serial', None)
dev = fingerprint[2:] + (man, prod, serial)
dev = USBDevice(*dev)
dev.busnum, dev.devnum = fingerprint[:2]
ans.add(dev)
extra = set(self.libusb.cache) - seen
for x in extra:
self.libusb.cache.pop(x, None)
return ans
def check_for_mem_leak(self):
import gc
from calibre.utils.mem import memory
memory()
for num in (1, 10, 100):
start = memory()
for i in range(num):
self()
for i in range(3):
gc.collect()
print('Mem consumption increased by:', memory() - start, 'MB', end=' ')
print('after', num, 'repeats')
class LinuxScanner:
SYSFS_PATH = os.environ.get('SYSFS_PATH', '/sys')
def __init__(self):
self.base = os.path.join(self.SYSFS_PATH, 'subsystem', 'usb', 'devices')
if not os.path.exists(self.base):
self.base = os.path.join(self.SYSFS_PATH, 'bus', 'usb', 'devices')
self.ok = os.path.exists(self.base)
def __call__(self):
ans = set()
if not self.ok:
raise RuntimeError('DeviceScanner requires the /sys filesystem to work.')
def read(f):
with open(f, 'rb') as s:
return s.read().strip()
for x in os.listdir(self.base):
base = os.path.join(self.base, x)
ven = os.path.join(base, 'idVendor')
prod = os.path.join(base, 'idProduct')
bcd = os.path.join(base, 'bcdDevice')
man = os.path.join(base, 'manufacturer')
serial = os.path.join(base, 'serial')
prod_string = os.path.join(base, 'product')
dev = []
try:
# Ignore USB HUBs
if read(os.path.join(base, 'bDeviceClass')) == b'09':
continue
except Exception:
continue
try:
dev.append(int(b'0x'+read(ven), 16))
except Exception:
continue
try:
dev.append(int(b'0x'+read(prod), 16))
except Exception:
continue
try:
dev.append(int(b'0x'+read(bcd), 16))
except Exception:
continue
try:
dev.append(read(man).decode('utf-8'))
except Exception:
dev.append('')
try:
dev.append(read(prod_string).decode('utf-8'))
except Exception:
dev.append('')
try:
dev.append(read(serial).decode('utf-8'))
except Exception:
dev.append('')
dev = USBDevice(*dev)
try:
dev.busnum = int(read(os.path.join(base, 'busnum')))
except Exception:
pass
try:
dev.devnum = int(read(os.path.join(base, 'devnum')))
except Exception:
pass
ans.add(dev)
return ans
if islinux:
linux_scanner = LinuxScanner()
libusb_scanner = LibUSBScanner()
if isfreebsd:
freebsd_scanner = libusb_scanner
''' NetBSD support currently not written yet '''
if isnetbsd:
netbsd_scanner = None
class DeviceScanner:
def __init__(self, *args):
if iswindows:
from calibre.devices.winusb import scan_usb_devices as win_scanner
self.scanner = (win_scanner if iswindows else osx_scanner if ismacos else
freebsd_scanner if isfreebsd else netbsd_scanner if isnetbsd
else linux_scanner if islinux else libusb_scanner)
if self.scanner is None:
self.scanner = libusb_scanner
self.devices = []
def scan(self):
'''Fetch list of connected USB devices from operating system'''
self.devices = self.scanner()
def is_device_connected(self, device, debug=False, only_presence=False):
''' If only_presence is True don't perform any expensive checks '''
return device.is_usb_connected(self.devices, debug=debug,
only_presence=only_presence)
def test_for_mem_leak():
import gc
from calibre.utils.mem import diff_hists, gc_histogram, memory
gc.disable()
scanner = DeviceScanner()
scanner.scan()
memory() # load the psutil library
for i in range(3):
gc.collect()
for reps in (1, 10, 100, 1000):
for i in range(3):
gc.collect()
h1 = gc_histogram()
startmem = memory()
for i in range(reps):
scanner.scan()
for i in range(3):
gc.collect()
usedmem = memory(startmem)
prints('Memory used in %d repetitions of scan(): %.5f KB'%(reps,
1024*usedmem))
prints('Differences in python object counts:')
diff_hists(h1, gc_histogram())
prints()
def main(args=sys.argv):
test_for_mem_leak()
return 0
if __name__ == '__main__':
sys.exit(main())
| 7,246 | Python | .py | 194 | 26.994845 | 85 | 0.557712 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,729 | misc.py | kovidgoyal_calibre/src/calibre/devices/misc.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from calibre import fsync
from calibre.devices.usbms.driver import USBMS
class PALMPRE(USBMS):
name = 'Palm Pre Device Interface'
gui_name = 'Palm Pre'
description = _('Communicate with the Palm Pre')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'mobi', 'prc', 'pdb', 'txt']
VENDOR_ID = [0x0830]
PRODUCT_ID = [0x8004, 0x8002, 0x0101, 0x8042]
BCD = [0x0316]
VENDOR_NAME = 'PALM'
WINDOWS_MAIN_MEM = ['PRE', 'PALM_DEVICE']
EBOOK_DIR_MAIN = 'E-books'
class AVANT(USBMS):
name = 'Booq Avant Device Interface'
gui_name = 'bq Avant'
description = _('Communicate with the Bq Avant')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'html', 'rtf', 'pdf', 'txt']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x0319]
VENDOR_NAME = 'E-BOOK'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'READER'
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = True
class SWEEX(USBMS):
# Identical to the Promedia
name = 'Sweex Device Interface'
gui_name = 'Sweex/Kogan/Q600/Wink'
description = _('Communicate with the Sweex/Kogan/Q600/Wink')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'mobi', 'prc', 'fb2', 'html', 'rtf', 'chm', 'pdf', 'txt']
VENDOR_ID = [0x0525, 0x177f]
PRODUCT_ID = [0xa4a5, 0x300]
BCD = [0x0319, 0x110, 0x325]
VENDOR_NAME = ['SWEEX', 'LINUX']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EBOOKREADER', 'FILE-STOR_GADGET']
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = True
class PDNOVEL(USBMS):
name = 'Pandigital Novel device interface'
gui_name = 'PD Novel'
description = _('Communicate with the Pandigital Novel')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'linux', 'osx']
FORMATS = ['epub', 'pdf']
VENDOR_ID = [0x18d1]
PRODUCT_ID = [0xb004, 0xa004]
BCD = [0x224]
VENDOR_NAME = 'ANDROID'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = '__UMS_COMPOSITE'
THUMBNAIL_HEIGHT = 130
EBOOK_DIR_MAIN = EBOOK_DIR_CARD_A = 'eBooks'
SUPPORTS_SUB_DIRS = False
DELETE_EXTS = ['.jpg', '.jpeg', '.png']
def upload_cover(self, path, filename, metadata, filepath):
coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]:
with open('%s.jpg' % os.path.join(path, filename), 'wb') as coverfile:
coverfile.write(coverdata[2])
fsync(coverfile)
class PDNOVEL_KOBO(PDNOVEL):
name = 'Pandigital Kobo device interface'
gui_name = 'PD Novel (Kobo)'
description = _('Communicate with the Pandigital Novel')
BCD = [0x222]
EBOOK_DIR_MAIN = 'eBooks'
def upload_cover(self, path, filename, metadata, filepath):
coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]:
dirpath = os.path.join(path, '.thumbnail')
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with open(os.path.join(dirpath, filename+'.jpg'), 'wb') as coverfile:
coverfile.write(coverdata[2])
fsync(coverfile)
class VELOCITYMICRO(USBMS):
name = 'VelocityMicro device interface'
gui_name = 'VelocityMicro'
description = _('Communicate with the VelocityMicro')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'linux', 'osx']
FORMATS = ['epub', 'pdb', 'txt', 'html', 'pdf']
VENDOR_ID = [0x18d1]
PRODUCT_ID = [0xb015]
BCD = [0x224]
VENDOR_NAME = 'ANDROID'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = '__UMS_COMPOSITE'
EBOOK_DIR_MAIN = 'eBooks'
SUPPORTS_SUB_DIRS = False
class GEMEI(USBMS):
name = 'Gemei Device Interface'
gui_name = 'GM2000'
description = _('Communicate with the GM2000')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'chm', 'html', 'pdb', 'pdf', 'txt']
VENDOR_ID = [0x07c4]
PRODUCT_ID = [0xa4a5]
BCD = None
VENDOR_NAME = 'CHINA'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'CHIP'
EBOOK_DIR_MAIN = 'eBooks'
SUPPORTS_SUB_DIRS = True
class LUMIREAD(USBMS):
name = 'Acer Lumiread Device Interface'
gui_name = 'Lumiread'
description = _('Communicate with the Acer Lumiread')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'pdf', 'mobi', 'chm', 'txt', 'doc', 'docx', 'rtf']
VENDOR_ID = [0x1025]
PRODUCT_ID = [0x048d]
BCD = [0x323]
EBOOK_DIR_MAIN = EBOOK_DIR_CARD_A = 'books'
SUPPORTS_SUB_DIRS = True
THUMBNAIL_HEIGHT = 200
VENDOR_NAME = 'ACER'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'LUMIREAD_600'
def upload_cover(self, path, filename, metadata, filepath):
if metadata.thumbnail and metadata.thumbnail[-1]:
cfilepath = filepath.replace('/', os.sep)
cfilepath = cfilepath.replace(os.sep+'books'+os.sep,
os.sep+'covers'+os.sep, 1)
pdir = os.path.dirname(cfilepath)
if not os.path.exists(pdir):
os.makedirs(pdir)
with open(cfilepath+'.jpg', 'wb') as f:
f.write(metadata.thumbnail[-1])
fsync(f)
class ALURATEK_COLOR(USBMS):
name = 'Aluratek Color Device Interface'
gui_name = 'Aluratek Color'
description = _('Communicate with the Aluratek Color')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'txt', 'pdf']
VENDOR_ID = [0x1f3a]
PRODUCT_ID = [0x1000]
BCD = [0x0002]
EBOOK_DIR_MAIN = EBOOK_DIR_CARD_A = 'books'
VENDOR_NAME = ['USB_2.0', 'EZREADER', 'C4+', 'WOXTER', 'KIANO', 'SAD', 'LARK']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['USB_FLASH_DRIVER', '.', 'TOUCH', 'SCRIBA_190', 'BOOKYLIGHT', 'SAPIENS_V2', 'EBOOK']
SCAN_FROM_ROOT = True
SUPPORTS_SUB_DIRS_FOR_SCAN = True
class TREKSTOR(USBMS):
name = 'Trekstor E-book player device interface'
gui_name = 'Trekstor'
description = _('Communicate with the Trekstor')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'txt', 'pdf']
VENDOR_ID = [0x1e68]
PRODUCT_ID = [0x0041, 0x0042, 0x0052, 0x004e, 0x0056,
0x0067, # This is for the Pyrus Mini
0x006f, # This is for the Pyrus Maxi
0x003e, # This is for the EBOOK_PLAYER_5M https://bugs.launchpad.net/bugs/792091
0x05c, # This is for the 4ink https://www.mobileread.com/forums/showthread.php?t=191318
0x006c, # This is for the 4ink https://www.mobileread.com/forums/showthread.php?t=218273
0x006d, # Another Pyrus? https://www.mobileread.com/forums/showthread.php?t=231982
0x73, # This is for the Pyrus 2 LED https://bugs.launchpad.net/bugs/1376018
]
BCD = [0x0002, 0x100, 0x0222, 0x2]
EBOOK_DIR_MAIN = 'Ebooks'
VENDOR_NAME = 'TREKSTOR'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EBOOK_PLAYER_7',
'EBOOK_PLAYER_5M', 'EBOOK-READER_3.0', 'EREADER_PYRUS', 'PYRUS_MINI', 'PYRUS_MAXI', 'PYRUS_2_LED']
SUPPORTS_SUB_DIRS = True
SUPPORTS_SUB_DIRS_DEFAULT = False
class EEEREADER(USBMS):
name = 'Asus EEE Reader device interface'
gui_name = 'EEE Reader'
description = _('Communicate with the EEE Reader')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'txt', 'pdf']
VENDOR_ID = [0x0b05]
PRODUCT_ID = [0x178f, 0x17a1]
BCD = [0x0319]
EBOOK_DIR_MAIN = EBOOK_DIR_CARD_A = 'Book'
VENDOR_NAME = ['LINUX', 'ASUS']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['FILE-STOR_GADGET', 'EEE_NOTE']
class ADAM(USBMS):
name = 'Notion Ink Adam device interface'
gui_name = 'Adam'
description = _('Communicate with the Adam tablet')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'pdf', 'doc']
VENDOR_ID = [0x0955]
PRODUCT_ID = [0x7100]
BCD = [0x9999]
EBOOK_DIR_MAIN = 'eBooks'
VENDOR_NAME = 'NI'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['ADAM']
SUPPORTS_SUB_DIRS = True
class NEXTBOOK(USBMS):
name = 'Nextbook device interface'
gui_name = 'Nextbook'
description = _('Communicate with the Nextbook Reader')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'txt', 'pdf']
VENDOR_ID = [0x05e3]
PRODUCT_ID = [0x0726]
BCD = [0x021a]
EBOOK_DIR_MAIN = ''
VENDOR_NAME = ['NEXT2', 'BK7005']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['1.0.14', 'PLAYER']
SUPPORTS_SUB_DIRS = True
THUMBNAIL_HEIGHT = 120
'''
def upload_cover(self, path, filename, metadata, filepath):
if metadata.thumbnail and metadata.thumbnail[-1]:
path = path.replace('/', os.sep)
is_main = path.startswith(self._main_prefix)
prefix = None
if is_main:
prefix = self._main_prefix
else:
if self._card_a_prefix and \
path.startswith(self._card_a_prefix):
prefix = self._card_a_prefix
elif self._card_b_prefix and \
path.startswith(self._card_b_prefix):
prefix = self._card_b_prefix
if prefix is None:
prints('WARNING: Failed to find prefix for:', filepath)
return
thumbnail_dir = os.path.join(prefix, '.Cover')
relpath = os.path.relpath(filepath, prefix)
if relpath.startswith('..\\'):
relpath = relpath[3:]
thumbnail_dir = os.path.join(thumbnail_dir, relpath)
if not os.path.exists(thumbnail_dir):
os.makedirs(thumbnail_dir)
with open(os.path.join(thumbnail_dir, filename+'.jpg'), 'wb') as f:
f.write(metadata.thumbnail[-1])
fsync(f)
'''
class MOOVYBOOK(USBMS):
name = 'Moovybook device interface'
gui_name = 'Moovybook'
description = _('Communicate with the Moovybook Reader')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'txt', 'pdf']
VENDOR_ID = [0x1cae]
PRODUCT_ID = [0x9b08]
BCD = [0x02]
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = True
def get_main_ebook_dir(self, for_upload=False):
return 'Books' if for_upload else self.EBOOK_DIR_MAIN
class COBY(USBMS):
name = 'COBY MP977 device interface'
gui_name = 'COBY'
description = _('Communicate with the COBY')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'pdf']
VENDOR_ID = [0x1e74]
PRODUCT_ID = [0x7121]
BCD = [0x02]
VENDOR_NAME = 'USB_2.0'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'MP977_DRIVER'
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = False
def get_carda_ebook_dir(self, for_upload=False):
if for_upload:
return 'eBooks'
return self.EBOOK_DIR_CARD_A
class EX124G(USBMS):
name = 'Motorola Ex124G device interface'
gui_name = 'Ex124G'
description = _('Communicate with the Ex124G')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['mobi', 'prc', 'azw']
VENDOR_ID = [0x0e8d]
PRODUCT_ID = [0x0002]
BCD = [0x0100]
VENDOR_NAME = 'MOTOROLA'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = '_PHONE'
EBOOK_DIR_MAIN = 'eBooks'
SUPPORTS_SUB_DIRS = False
def get_carda_ebook_dir(self, for_upload=False):
if for_upload:
return 'eBooks'
return self.EBOOK_DIR_CARD_A
class WAYTEQ(USBMS):
name = 'WayteQ device interface'
gui_name = 'WayteQ xBook'
description = _('Communicate with the WayteQ and SPC Dickens Readers')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'mobi', 'prc', 'fb2', 'txt', 'pdf', 'html', 'rtf', 'chm', 'djvu', 'doc']
VENDOR_ID = [0x05e3, 0x05e8]
PRODUCT_ID = [0x0726, 0x0728]
BCD = [0x0222]
EBOOK_DIR_MAIN = 'Documents'
SCAN_FROM_ROOT = True
VENDOR_NAME = ['ROCKCHIP', 'CBR']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['RK28_SDK_DEMO', 'EINK_EBOOK_READE']
SUPPORTS_SUB_DIRS = True
def get_gui_name(self):
try:
if self.detected_device.idVendor == 0x05e8:
return 'SPC Dickens'
except Exception:
pass
return self.gui_name
def get_carda_ebook_dir(self, for_upload=False):
if for_upload:
return 'Documents'
return self.EBOOK_DIR_CARD_A
def windows_sort_drives(self, drives):
if len(drives) < 2:
return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda:
drives['main'] = carda
drives['carda'] = main
return drives
def linux_swap_drives(self, drives):
# See https://bugs.launchpad.net/bugs/1151901
if len(drives) < 2 or not drives[0] or not drives[1]:
return drives
drives = list(drives)
t = drives[0]
drives[0] = drives[1]
drives[1] = t
return tuple(drives)
def osx_sort_names(self, names):
if len(names) < 2:
return names
main = names.get('main', None)
card = names.get('carda', None)
if main is not None and card is not None:
names['main'] = card
names['carda'] = main
return names
class WOXTER(USBMS):
name = 'Woxter Scriba device interface'
gui_name = 'Woxter Scriba'
description = _('Communicate with the Woxter Scriba reader')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'mobi', 'fb2', 'txt', 'pdf', 'html', 'rtf', 'djvu', 'doc']
VENDOR_ID = [0x2207]
PRODUCT_ID = [0x2818]
BCD = [0x0100]
EBOOK_DIR_MAIN = 'Books'
SCAN_FROM_ROOT = True
SUPPORTS_SUB_DIRS = True
VENDOR_NAME = ['ROCKCHIP', 'TEXET']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EREADER', 'TB-146SE']
class POCKETBOOK626(USBMS):
name = 'PocketBook Touch Lux 2'
gui_name = 'PocketBook'
description = _('Communicate with the PocketBook Touch Lux 2 and Inkpad X readers')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'pdf', 'fb2', 'txt', 'pdf', 'html', 'djvu', 'doc', 'docx', 'rtf', 'chm']
VENDOR_ID = [0xfffe]
PRODUCT_ID = [0x0001]
BCD = [0x0230, 0x101]
EBOOK_DIR_MAIN = 'Books'
SCAN_FROM_ROOT = True
SUPPORTS_SUB_DIRS = True
VENDOR_NAME = ['USB_2.0']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['USB_FLASH_DRIVER']
class SONYDPTS1(USBMS):
name = 'SONY DPT-S1'
gui_name = 'SONY DPT'
description = _('Communicate with the SONY DPT-S1')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['pdf']
VENDOR_ID = [0x054c]
PRODUCT_ID = [0x0854]
BCD = [0x0226]
EBOOK_DIR_MAIN = 'Books'
SCAN_FROM_ROOT = True
SUPPORTS_SUB_DIRS = True
VENDOR_NAME = ['SONY']
WINDOWS_MAIN_MEM = ['DPT-S1']
WINDOWS_CARD_A_MEM = ['DPT-S1__SD']
class CERVANTES(USBMS):
name = 'Bq Cervantes Device Interface'
gui_name = 'Bq Cervantes'
description = _('Communicate with the Bq Cervantes')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'fb2', 'mobi', 'doc', 'rtf', 'pdf', 'txt']
VENDOR_ID = [0x2a47]
PRODUCT_ID = [0xad79, 0xad78, 0xad77, 0xad75]
BCD = [0x0110]
EBOOK_DIR_MAIN = 'Books'
SCAN_FROM_ROOT = True
SUPPORTS_SUB_DIRS = True
| 17,559 | Python | .py | 429 | 34.048951 | 129 | 0.591436 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,730 | __init__.py | kovidgoyal_calibre/src/calibre/devices/smart_device_app/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,731 | driver.py | kovidgoyal_calibre/src/calibre/devices/smart_device_app/driver.py | #!/usr/bin/env python
'''
Created on 29 Jun 2012
@author: charles
'''
import hashlib
import json
import os
import posixpath
import random
import select
import socket
import sys
import threading
import time
import traceback
from collections import defaultdict
from errno import EAGAIN, EINTR
from functools import wraps
from threading import Thread
from calibre import prints
from calibre.constants import DEBUG, cache_dir, numeric_version
from calibre.devices.errors import ControlError, InitialConnectionError, OpenFailed, OpenFeedback, PacketError, TimeoutError, UserFeedback
from calibre.devices.interface import DevicePlugin, currently_connected_device
from calibre.devices.usbms.books import Book, CollectionsBookList
from calibre.devices.usbms.deviceconfig import DeviceConfig
from calibre.devices.usbms.driver import USBMS
from calibre.devices.utils import build_template_regexp, sanity_check
from calibre.ebooks import BOOK_EXTENSIONS
from calibre.ebooks.metadata import title_sort
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from calibre.library import current_library_name
from calibre.ptempfile import PersistentTemporaryFile
from calibre.utils.config_base import tweaks
from calibre.utils.filenames import ascii_filename as sanitize
from calibre.utils.filenames import shorten_components_to
from calibre.utils.ipc import eintr_retry_call
from calibre.utils.mdns import get_all_ips
from calibre.utils.mdns import publish as publish_zeroconf
from calibre.utils.mdns import unpublish as unpublish_zeroconf
from calibre.utils.socket_inheritance import set_socket_inherit
from polyglot import queue
from polyglot.builtins import as_bytes, iteritems, itervalues
def synchronous(tlockname):
"""A decorator to place an instance based lock around a method """
def _synched(func):
@wraps(func)
def _synchronizer(self, *args, **kwargs):
with self.__getattribute__(tlockname):
return func(self, *args, **kwargs)
return _synchronizer
return _synched
class ConnectionListener(Thread):
def __init__(self, driver):
Thread.__init__(self)
self.daemon = True
self.driver = driver
self.keep_running = True
self.all_ip_addresses = dict()
def stop(self):
self.keep_running = False
def _close_socket(self, the_socket):
try:
the_socket.shutdown(socket.SHUT_RDWR)
except:
# the shutdown can fail if the socket isn't fully connected. Ignore it
pass
the_socket.close()
def run(self):
device_socket = None
get_all_ips(reinitialize=True)
while self.keep_running:
try:
time.sleep(1)
except:
# Happens during interpreter shutdown
break
if not self.keep_running:
break
if not self.all_ip_addresses:
self.all_ip_addresses = get_all_ips()
if self.all_ip_addresses:
self.driver._debug("All IP addresses", self.all_ip_addresses)
if not self.driver.connection_queue.empty():
d = currently_connected_device.device
if d is not None:
self.driver._debug('queue not serviced', d.get_gui_name())
try:
sock = self.driver.connection_queue.get_nowait()
s = self.driver._json_encode(
self.driver.opcodes['CALIBRE_BUSY'],
{'otherDevice': d.get_gui_name()})
self.driver._send_byte_string(device_socket, (b'%d' % len(s)) + as_bytes(s))
sock.close()
except queue.Empty:
pass
if getattr(self.driver, 'broadcast_socket', None) is not None:
while True:
ans = select.select((self.driver.broadcast_socket,), (), (), 0)
if len(ans[0]) > 0:
try:
packet = self.driver.broadcast_socket.recvfrom(100)
remote = packet[1]
content_server_port = ''
try:
from calibre.srv.opts import server_config
content_server_port = str(server_config().port)
except Exception:
pass
message = (self.driver.ZEROCONF_CLIENT_STRING + ' (on ' +
str(socket.gethostname().partition('.')[0]) +
');' + content_server_port +
',' + str(self.driver.port)).encode('utf-8')
self.driver._debug('received broadcast', packet, message)
self.driver.broadcast_socket.sendto(message, remote)
except:
pass
else:
break
if self.driver.connection_queue.empty() and \
getattr(self.driver, 'listen_socket', None) is not None:
ans = select.select((self.driver.listen_socket,), (), (), 0)
if len(ans[0]) > 0:
# timeout in 100 ms to detect rare case where the socket goes
# away between the select and the accept
try:
self.driver._debug('attempt to open device socket')
device_socket = None
self.driver.listen_socket.settimeout(0.100)
device_socket, ign = eintr_retry_call(
self.driver.listen_socket.accept)
set_socket_inherit(device_socket, False)
self.driver.listen_socket.settimeout(None)
device_socket.settimeout(None)
try:
self.driver.connection_queue.put_nowait(device_socket)
except queue.Full:
self._close_socket(device_socket)
device_socket = None
self.driver._debug('driver is not answering')
except socket.timeout:
pass
except OSError:
x = sys.exc_info()[1]
self.driver._debug('unexpected socket exception', x.args[0])
self._close_socket(device_socket)
device_socket = None
# raise
class SDBook(Book):
def __init__(self, prefix, lpath, size=None, other=None):
Book.__init__(self, prefix, lpath, size=size, other=other)
path = getattr(self, 'path', lpath)
self.path = path.replace('\\', '/')
class SMART_DEVICE_APP(DeviceConfig, DevicePlugin):
name = 'SmartDevice App Interface'
gui_name = _('Wireless device')
gui_name_template = '%s: %s'
icon = 'devices/tablet.png'
description = _('Communicate with Smart Device apps')
supported_platforms = ['windows', 'osx', 'linux']
author = 'Charles Haley'
version = (0, 0, 1)
# Invalid USB vendor information so the scanner will never match
VENDOR_ID = [0xffff]
PRODUCT_ID = [0xffff]
BCD = [0xffff]
FORMATS = list(BOOK_EXTENSIONS)
ALL_FORMATS = list(BOOK_EXTENSIONS)
HIDE_FORMATS_CONFIG_BOX = True
USER_CAN_ADD_NEW_FORMATS = False
DEVICE_PLUGBOARD_NAME = 'SMART_DEVICE_APP'
CAN_SET_METADATA = []
CAN_DO_DEVICE_DB_PLUGBOARD = False
SUPPORTS_SUB_DIRS = True
MUST_READ_METADATA = True
NEWS_IN_FOLDER = True
SUPPORTS_USE_AUTHOR_SORT = False
WANTS_UPDATED_THUMBNAILS = True
MANAGES_DEVICE_PRESENCE = True
# Guess about the max length on windows. This number will be reduced by
# the length of the path on the client, and by the fudge factor below. We
# use this on all platforms because the device might be connected to windows
# in the future.
MAX_PATH_LEN = 250
# guess of length of MTP name. The length of the full path to the folder
# on the device is added to this. That path includes the device's mount point
# making this number effectively around 10 to 15 larger.
PATH_FUDGE_FACTOR = 40
THUMBNAIL_HEIGHT = 160
DEFAULT_THUMBNAIL_HEIGHT = 160
THUMBNAIL_COMPRESSION_QUALITY = 75
DEFAULT_THUMBNAIL_COMPRESSION_QUALITY = 75
PREFIX = ''
BACKLOADING_ERROR_MESSAGE = None
SAVE_TEMPLATE = '{title} - {authors} ({id})'
# Some network protocol constants
BASE_PACKET_LEN = 4096
PROTOCOL_VERSION = 1
MAX_UNSUCCESSFUL_CONNECTS = 5
SEND_NOOP_EVERY_NTH_PROBE = 5
DISCONNECT_AFTER_N_SECONDS = 30*60 # 30 minutes
PURGE_CACHE_ENTRIES_DAYS = 30
CURRENT_CC_VERSION = 128
ZEROCONF_CLIENT_STRING = 'calibre wireless device client'
# A few "random" port numbers to use for detecting clients using broadcast
# The clients are expected to broadcast a UDP 'hi there' on all of these
# ports when they attempt to connect. Calibre will respond with the port
# number the client should use. This scheme backs up mdns. And yes, we
# must hope that no other application on the machine is using one of these
# ports in datagram mode.
# If you change the ports here, all clients will also need to change.
BROADCAST_PORTS = [54982, 48123, 39001, 44044, 59678]
opcodes = {
'NOOP' : 12,
'OK' : 0,
'BOOK_DONE' : 11,
'CALIBRE_BUSY' : 18,
'SET_LIBRARY_INFO' : 19,
'DELETE_BOOK' : 13,
'DISPLAY_MESSAGE' : 17,
'ERROR' : 20,
'FREE_SPACE' : 5,
'GET_BOOK_FILE_SEGMENT' : 14,
'GET_BOOK_METADATA' : 15,
'GET_BOOK_COUNT' : 6,
'GET_DEVICE_INFORMATION' : 3,
'GET_INITIALIZATION_INFO': 9,
'SEND_BOOKLISTS' : 7,
'SEND_BOOK' : 8,
'SEND_BOOK_METADATA' : 16,
'SET_CALIBRE_DEVICE_INFO': 1,
'SET_CALIBRE_DEVICE_NAME': 2,
'TOTAL_SPACE' : 4,
}
reverse_opcodes = {v: k for k, v in iteritems(opcodes)}
MESSAGE_PASSWORD_ERROR = 1
MESSAGE_UPDATE_NEEDED = 2
MESSAGE_SHOW_TOAST = 3
ALL_BY_TITLE = _('All by title')
ALL_BY_AUTHOR = _('All by author')
ALL_BY_SOMETHING = _('All by something')
EXTRA_CUSTOMIZATION_MESSAGE = [
_('Enable connections at startup') + ':::<p>' +
_('Check this box to allow connections when calibre starts') + '</p>',
'',
_('Security password') + ':::<p>' +
_('Enter a password that the device app must use to connect to calibre') + '</p>',
'',
_('Use fixed network port') + ':::<p>' +
_('If checked, use the port number in the "Port" box, otherwise '
'the driver will pick a random port') + '</p>',
_('Port number: ') + ':::<p>' +
_('Enter the port number the driver is to use if the "fixed port" box is checked') + '</p>',
_('Print extra debug information') + ':::<p>' +
_('Check this box if requested when reporting problems') + '</p>',
'',
_('Comma separated list of metadata fields '
'to turn into collections on the device.') + ':::<p>' +
_('Possibilities include: series, tags, authors, etc' +
'. Three special collections are available: %(abt)s:%(abtv)s, '
'%(aba)s:%(abav)s, and %(abs)s:%(absv)s. Add '
'these values to the list to enable them. The collections will be '
'given the name provided after the ":" character.')%dict(
abt='abt', abtv=ALL_BY_TITLE, aba='aba', abav=ALL_BY_AUTHOR,
abs='abs', absv=ALL_BY_SOMETHING),
'',
_('Enable the no-activity timeout') + ':::<p>' +
_('If this box is checked, calibre will automatically disconnect if '
'a connected device does nothing for %d minutes. Unchecking this '
' box disables this timeout, so calibre will never automatically '
'disconnect.')%(DISCONNECT_AFTER_N_SECONDS/60,) + '</p>',
_('Use this IP address') + ':::<p>' +
_('Use this option if you want to force the driver to listen on a '
'particular IP address. The driver will listen only on the '
'entered address, and this address will be the one advertised '
'over mDNS (BonJour).') + '</p>',
_('Replace books with same calibre ID') + ':::<p>' +
_('Use this option to overwrite a book on the device if that book '
'has the same calibre identifier as the book being sent. The file name of the '
'book will not change even if the save template produces a '
'different result. Using this option in most cases prevents '
'having multiple copies of a book on the device.') + '</p>',
_('Cover thumbnail compression quality') + ':::<p>' +
_('Use this option to control the size and quality of the cover '
'file sent to the device. It must be between 50 and 99. '
'The larger the number the higher quality the cover, but also '
'the larger the file. For example, changing this from 70 to 90 '
'results in a much better cover that is approximately 2.5 '
'times as big. To see the changes you must force calibre '
'to resend metadata to the device, either by changing '
'the metadata for the book (updating the last modification '
'time) or resending the book itself.') + '</p>',
_('Use metadata cache') + ':::<p>' +
_('Setting this option allows calibre to keep a copy of metadata '
'on the device, speeding up device connections. Unsetting this '
'option disables keeping the copy, forcing the device to send '
'metadata to calibre on every connect. Unset this option if '
'you think that the cache might not be operating correctly.') + '</p>',
'',
_('Additional file extensions to send to the device') + ':::<p>' +
_('This is a comma-separated list of format file extensions you want '
'to be able to send to the device. For example, you might have '
'audio books in your library with the extension "m4b" that you '
'want to listen to on your device. Don\'t worry about the "extra '
'enabled extensions" warning.'),
_('Ignore device free space') + ':::<p>' +
_("Check this box to ignore the amount of free space reported by your "
"devices. This might be needed if you store books on an SD card and "
"the device doesn't have much free main memory.") + '</p>',
]
EXTRA_CUSTOMIZATION_DEFAULT = [
False, '',
'', '',
False, '9090',
False, '',
'', '',
False, '',
True, '75',
True, '',
'', False,
]
OPT_AUTOSTART = 0
OPT_PASSWORD = 2
OPT_USE_PORT = 4
OPT_PORT_NUMBER = 5
OPT_EXTRA_DEBUG = 6
OPT_COLLECTIONS = 8
OPT_AUTODISCONNECT = 10
OPT_FORCE_IP_ADDRESS = 11
OPT_OVERWRITE_BOOKS_UUID = 12
OPT_COMPRESSION_QUALITY = 13
OPT_USE_METADATA_CACHE = 14
OPT_EXTRA_EXTENSIONS = 16
OPT_IGNORE_FREESPACE = 17
OPTNAME_TO_NUMBER_MAP = {
'password': OPT_PASSWORD,
'autostart': OPT_AUTOSTART,
'use_fixed_port': OPT_USE_PORT,
'port_number': OPT_PORT_NUMBER,
'force_ip_address': OPT_FORCE_IP_ADDRESS,
'thumbnail_compression_quality': OPT_COMPRESSION_QUALITY,
}
def __init__(self, path):
self.sync_lock = threading.RLock()
self.noop_counter = 0
self.debug_start_time = time.time()
self.debug_time = time.time()
self.is_connected = False
# Don't call this method from the GUI unless you are sure that there is no
# network traffic in progress. Otherwise the gui might hang waiting for the
# network timeout
def _debug(self, *args):
# manual synchronization so we don't lose the calling method name
import inspect
with self.sync_lock:
if not DEBUG:
return
total_elapsed = time.time() - self.debug_start_time
elapsed = time.time() - self.debug_time
print('SMART_DEV (%7.2f:%7.3f) %s'%(total_elapsed, elapsed,
inspect.stack()[1][3]), end='')
for a in args:
try:
if isinstance(a, dict):
printable = {}
for k,v in iteritems(a):
if isinstance(v, (bytes, str)) and len(v) > 50:
printable[k] = 'too long'
else:
printable[k] = v
prints('', printable, end='')
else:
prints('', a, end='')
except:
prints('', 'value too long', end='')
print()
self.debug_time = time.time()
# local utilities
# copied from USBMS. Perhaps this could be a classmethod in usbms?
def _update_driveinfo_record(self, dinfo, prefix, location_code, name=None):
import uuid
from calibre.utils.date import isoformat, now
if not isinstance(dinfo, dict):
dinfo = {}
if dinfo.get('device_store_uuid', None) is None:
dinfo['device_store_uuid'] = str(uuid.uuid4())
if dinfo.get('device_name') is None:
dinfo['device_name'] = self.get_gui_name()
if name is not None:
dinfo['device_name'] = name
dinfo['location_code'] = location_code
dinfo['last_library_uuid'] = getattr(self, 'current_library_uuid', None)
dinfo['calibre_version'] = '.'.join([str(i) for i in numeric_version])
dinfo['date_last_connected'] = isoformat(now())
dinfo['prefix'] = self.PREFIX
return dinfo
# copied with changes from USBMS.Device. In particular, we needed to
# remove the 'path' argument and all its uses. Also removed the calls to
# filename_callback and sanitize_path_components
def _create_upload_path(self, mdata, fname, create_dirs=True):
fname = sanitize(fname)
ext = os.path.splitext(fname)[1]
try:
# If we have already seen this book's UUID, use the existing path
if self.settings().extra_customization[self.OPT_OVERWRITE_BOOKS_UUID]:
existing_book = self._uuid_in_cache(mdata.uuid, ext)
if (existing_book and existing_book.lpath and
self.known_metadata.get(existing_book.lpath, None)):
return existing_book.lpath
# If the device asked for it, try to use the UUID as the file name.
# Fall back to the ch if the UUID doesn't exist.
if self.client_wants_uuid_file_names and mdata.uuid:
return (mdata.uuid + ext)
except:
pass
dotless_ext = ext[1:] if len(ext) > 0 else ext
maxlen = (self.MAX_PATH_LEN - (self.PATH_FUDGE_FACTOR +
self.exts_path_lengths.get(dotless_ext, self.PATH_FUDGE_FACTOR)))
special_tag = None
if mdata.tags:
for t in mdata.tags:
if t.startswith(_('News')) or t.startswith('/'):
special_tag = t
break
settings = self.settings()
template = self.save_template()
if mdata.tags and _('News') in mdata.tags:
try:
p = mdata.pubdate
date = (p.year, p.month, p.day)
except:
today = time.localtime()
date = (today[0], today[1], today[2])
template = "{title}_%d-%d-%d" % date
use_subdirs = self.SUPPORTS_SUB_DIRS and settings.use_subdirs
from calibre.library.save_to_disk import config, get_components
opts = config().parse()
if not isinstance(template, str):
template = template.decode('utf-8')
app_id = str(getattr(mdata, 'application_id', ''))
id_ = mdata.get('id', fname)
extra_components = get_components(template, mdata, id_,
timefmt=opts.send_timefmt, length=maxlen-len(app_id)-1,
last_has_extension=False)
if not extra_components:
extra_components.append(sanitize(fname))
else:
extra_components[-1] = sanitize(extra_components[-1]+ext)
if extra_components[-1] and extra_components[-1][0] in ('.', '_'):
extra_components[-1] = 'x' + extra_components[-1][1:]
if special_tag is not None:
name = extra_components[-1]
extra_components = []
tag = special_tag
if tag.startswith(_('News')):
if self.NEWS_IN_FOLDER:
extra_components.append('News')
else:
for c in tag.split('/'):
c = sanitize(c)
if not c:
continue
extra_components.append(c)
extra_components.append(name)
if not use_subdirs:
# Leave this stuff here in case we later decide to use subdirs
extra_components = extra_components[-1:]
def remove_trailing_periods(x):
ans = x
while ans.endswith('.'):
ans = ans[:-1].strip()
if not ans:
ans = 'x'
return ans
extra_components = list(map(remove_trailing_periods, extra_components))
components = shorten_components_to(maxlen, extra_components)
filepath = posixpath.join(*components)
self._debug('lengths', dotless_ext, maxlen,
self.exts_path_lengths.get(dotless_ext, self.PATH_FUDGE_FACTOR),
len(filepath))
return filepath
def _strip_prefix(self, path):
if self.PREFIX and path.startswith(self.PREFIX):
return path[len(self.PREFIX):]
return path
# JSON booklist encode & decode
# If the argument is a booklist or contains a book, use the metadata json
# codec to first convert it to a string dict
def _json_encode(self, op, arg):
res = {}
for k,v in iteritems(arg):
if isinstance(v, (Book, Metadata)):
res[k] = self.json_codec.encode_book_metadata(v)
series = v.get('series', None)
if series:
tsorder = tweaks['save_template_title_series_sorting']
series = title_sort(series, order=tsorder)
else:
series = ''
self._debug('series sort = ', series)
res[k]['_series_sort_'] = series
else:
res[k] = v
from calibre.utils.config import to_json
return json.dumps([op, res], default=to_json)
# Network functions
def _read_binary_from_net(self, length):
try:
v = self.device_socket.recv(length)
return v
except:
self._close_device_socket()
raise
def _read_string_from_net(self):
data = b'0'
while True:
dex = data.find(b'[')
if dex >= 0:
break
# recv seems to return a pointer into some internal buffer.
# Things get trashed if we don't make a copy of the data.
v = self._read_binary_from_net(2)
if len(v) == 0:
return b'' # documentation says the socket is broken permanently.
data += v
total_len = int(data[:dex])
data = data[dex:]
pos = len(data)
while pos < total_len:
v = self._read_binary_from_net(total_len - pos)
if len(v) == 0:
return b'' # documentation says the socket is broken permanently.
data += v
pos += len(v)
return data
def _send_byte_string(self, sock, s):
if not isinstance(s, bytes):
self._debug('given a non-byte string!')
self._close_device_socket()
raise PacketError("Internal error: found a string that isn't bytes")
sent_len = 0
total_len = len(s)
while sent_len < total_len:
try:
if sent_len == 0:
amt_sent = sock.send(s)
else:
amt_sent = sock.send(s[sent_len:])
if amt_sent <= 0:
raise OSError('Bad write on socket')
sent_len += amt_sent
except OSError as e:
self._debug('socket error', e, e.errno)
if e.args[0] != EAGAIN and e.args[0] != EINTR:
self._close_device_socket()
raise
time.sleep(0.1) # lets not hammer the OS too hard
except:
self._close_device_socket()
raise
# This must be protected by a lock because it is called from the GUI thread
# (the sync stuff) and the device manager thread
@synchronous('sync_lock')
def _call_client(self, op, arg, print_debug_info=True, wait_for_response=True):
if op != 'NOOP':
self.noop_counter = 0
extra_debug = self.settings().extra_customization[self.OPT_EXTRA_DEBUG]
if print_debug_info or extra_debug:
if extra_debug:
self._debug(op, 'wfr', wait_for_response, arg)
else:
self._debug(op, 'wfr', wait_for_response)
if self.device_socket is None:
return None, None
try:
s = self._json_encode(self.opcodes[op], arg)
if print_debug_info and extra_debug:
self._debug('send string', s)
self._send_byte_string(self.device_socket, (b'%d' % len(s)) + as_bytes(s))
if not wait_for_response:
return None, None
return self._receive_from_client(print_debug_info=print_debug_info)
except socket.timeout:
self._debug('timeout communicating with device')
self._close_device_socket()
raise TimeoutError('Device did not respond in reasonable time')
except OSError:
self._debug('device went away')
self._close_device_socket()
raise ControlError(desc='Device closed the network connection')
except:
self._debug('other exception')
traceback.print_exc()
self._close_device_socket()
raise
raise ControlError(desc='Device responded with incorrect information')
def _receive_from_client(self, print_debug_info=True):
from calibre.utils.config import from_json
extra_debug = self.settings().extra_customization[self.OPT_EXTRA_DEBUG]
try:
v = self._read_string_from_net()
if print_debug_info and extra_debug:
self._debug('received string', v)
if v:
v = json.loads(v, object_hook=from_json)
if print_debug_info and extra_debug:
self._debug('receive after decode') # , v)
return (self.reverse_opcodes[v[0]], v[1])
self._debug('protocol error -- empty json string')
except socket.timeout:
self._debug('timeout communicating with device')
self._close_device_socket()
raise TimeoutError('Device did not respond in reasonable time')
except OSError:
self._debug('device went away')
self._close_device_socket()
raise ControlError(desc='Device closed the network connection')
except:
self._debug('other exception')
traceback.print_exc()
self._close_device_socket()
raise
raise ControlError(desc='Device responded with incorrect information')
# Write a file to the device as a series of binary strings.
def _put_file(self, infile, lpath, book_metadata, this_book, total_books):
close_ = False
if not hasattr(infile, 'read'):
infile, close_ = open(infile, 'rb'), True
infile.seek(0, os.SEEK_END)
length = infile.tell()
book_metadata.size = length
infile.seek(0)
opcode, result = self._call_client('SEND_BOOK', {'lpath': lpath, 'length': length,
'metadata': book_metadata, 'thisBook': this_book,
'totalBooks': total_books,
'willStreamBooks': True,
'willStreamBinary' : True,
'wantsSendOkToSendbook' : self.can_send_ok_to_sendbook,
'canSupportLpathChanges': True},
print_debug_info=False,
wait_for_response=self.can_send_ok_to_sendbook)
if self.can_send_ok_to_sendbook:
if opcode == 'ERROR':
raise UserFeedback(msg='Sending book %s to device failed' % lpath,
details=result.get('message', ''),
level=UserFeedback.ERROR)
return
lpath = result.get('lpath', lpath)
book_metadata.lpath = lpath
self._set_known_metadata(book_metadata)
pos = 0
failed = False
with infile:
while True:
b = infile.read(self.max_book_packet_len)
blen = len(b)
if not b:
break
self._send_byte_string(self.device_socket, b)
pos += blen
self.time = None
if close_:
infile.close()
return (-1, None) if failed else (length, lpath)
def _metadata_in_cache(self, uuid, ext_or_lpath, lastmod):
from calibre.utils.date import now, parse_date
try:
key = self._make_metadata_cache_key(uuid, ext_or_lpath)
if isinstance(lastmod, str):
if lastmod == 'None':
return None
lastmod = parse_date(lastmod)
if key in self.device_book_cache and self.device_book_cache[key]['book'].last_modified == lastmod:
self.device_book_cache[key]['last_used'] = now()
return self.device_book_cache[key]['book'].deepcopy(lambda : SDBook('', ''))
except:
traceback.print_exc()
return None
def _metadata_already_on_device(self, book):
try:
v = self.known_metadata.get(book.lpath, None)
if v is not None:
# Metadata is the same if the uuids match, if the last_modified dates
# match, and if the height of the thumbnails is the same. The last
# is there to allow a device to demand a different thumbnail size
if (v.get('uuid', None) == book.get('uuid', None) and
v.get('last_modified', None) == book.get('last_modified', None)):
v_thumb = v.get('thumbnail', None)
b_thumb = book.get('thumbnail', None)
if bool(v_thumb) != bool(b_thumb):
return False
return not v_thumb or v_thumb[1] == b_thumb[1]
except:
traceback.print_exc()
return False
def _uuid_in_cache(self, uuid, ext):
try:
for b in itervalues(self.device_book_cache):
metadata = b['book']
if metadata.get('uuid', '') != uuid:
continue
if metadata.get('lpath', '').endswith(ext):
return metadata
except:
traceback.print_exc()
return None
def _read_metadata_cache(self):
self._debug('device uuid', self.device_uuid)
from calibre.utils.config import from_json
try:
old_cache_file_name = os.path.join(cache_dir(),
'device_drivers_' + self.__class__.__name__ +
'_metadata_cache.pickle')
if os.path.exists(old_cache_file_name):
os.remove(old_cache_file_name)
except:
pass
try:
old_cache_file_name = os.path.join(cache_dir(),
'device_drivers_' + self.__class__.__name__ +
'_metadata_cache.json')
if os.path.exists(old_cache_file_name):
os.remove(old_cache_file_name)
except:
pass
cache_file_name = os.path.join(cache_dir(),
'wireless_device_' + self.device_uuid +
'_metadata_cache.json')
self.device_book_cache = defaultdict(dict)
self.known_metadata = {}
try:
count = 0
if os.path.exists(cache_file_name):
with open(cache_file_name, mode='rb') as fd:
while True:
rec_len = fd.readline()
if len(rec_len) != 8:
break
raw = fd.read(int(rec_len))
book = json.loads(raw.decode('utf-8'), object_hook=from_json)
key = list(book.keys())[0]
metadata = self.json_codec.raw_to_book(book[key]['book'],
SDBook, self.PREFIX)
book[key]['book'] = metadata
self.device_book_cache.update(book)
lpath = metadata.get('lpath')
self.known_metadata[lpath] = metadata
count += 1
self._debug('loaded', count, 'cache items')
except:
traceback.print_exc()
self.device_book_cache = defaultdict(dict)
self.known_metadata = {}
try:
if os.path.exists(cache_file_name):
os.remove(cache_file_name)
except:
traceback.print_exc()
def _write_metadata_cache(self):
self._debug()
from calibre.utils.date import now
now_ = now()
from calibre.utils.config import to_json
try:
purged = 0
count = 0
prefix = os.path.join(cache_dir(),
'wireless_device_' + self.device_uuid + '_metadata_cache')
with open(prefix + '.tmp', mode='wb') as fd:
for key,book in iteritems(self.device_book_cache):
if (now_ - book['last_used']).days > self.PURGE_CACHE_ENTRIES_DAYS:
purged += 1
continue
json_metadata = defaultdict(dict)
json_metadata[key]['book'] = self.json_codec.encode_book_metadata(book['book'])
json_metadata[key]['last_used'] = book['last_used']
result = as_bytes(json.dumps(json_metadata, indent=2, default=to_json))
fd.write(("%0.7d\n"%(len(result)+1)).encode('ascii'))
fd.write(result)
fd.write(b'\n')
count += 1
self._debug('wrote', count, 'entries, purged', purged, 'entries')
from calibre.utils.filenames import atomic_rename
atomic_rename(fd.name, prefix + '.json')
except:
traceback.print_exc()
def _make_metadata_cache_key(self, uuid, lpath_or_ext):
key = None
if uuid and lpath_or_ext:
key = uuid + lpath_or_ext
return key
def _set_known_metadata(self, book, remove=False):
from calibre.utils.date import now
lpath = book.lpath
ext = os.path.splitext(lpath)[1]
uuid = book.get('uuid', None)
if self.client_cache_uses_lpaths:
key = self._make_metadata_cache_key(uuid, lpath)
else:
key = self._make_metadata_cache_key(uuid, ext)
if remove:
self.known_metadata.pop(lpath, None)
if key:
self.device_book_cache.pop(key, None)
else:
# Check if we have another UUID with the same lpath. If so, remove it
# Must try both the extension and the lpath because of the cache change
existing_uuid = self.known_metadata.get(lpath, {}).get('uuid', None)
if existing_uuid and existing_uuid != uuid:
self.device_book_cache.pop(self._make_metadata_cache_key(existing_uuid, ext), None)
self.device_book_cache.pop(self._make_metadata_cache_key(existing_uuid, lpath), None)
new_book = book.deepcopy()
self.known_metadata[lpath] = new_book
if key:
self.device_book_cache[key]['book'] = new_book
self.device_book_cache[key]['last_used'] = now()
# Force close a socket. The shutdown permits the close even if data transfer
# is in progress
def _close_socket(self, the_socket):
try:
the_socket.shutdown(socket.SHUT_RDWR)
except:
# the shutdown can fail if the socket isn't fully connected. Ignore it
pass
the_socket.close()
def _close_device_socket(self):
if self.device_socket is not None:
try:
self._close_socket(self.device_socket)
except:
pass
self.device_socket = None
self._write_metadata_cache()
self.is_connected = False
def _attach_to_port(self, sock, port):
try:
ip_addr = self.settings().extra_customization[self.OPT_FORCE_IP_ADDRESS]
self._debug('try ip address "'+ ip_addr + '"', 'on port', port)
if ip_addr:
sock.bind((ip_addr, port))
else:
sock.bind(('', port))
except OSError:
self._debug('socket error on port', port)
port = 0
except:
self._debug('Unknown exception while attaching port to socket')
traceback.print_exc()
raise
return port
def _close_listen_socket(self):
self._close_socket(self.listen_socket)
self.listen_socket = None
self.is_connected = False
if getattr(self, 'broadcast_socket', None) is not None:
self._close_socket(self.broadcast_socket)
self.broadcast_socket = None
def _read_file_metadata(self, temp_file_name):
from calibre.customize.ui import quick_metadata
from calibre.ebooks.metadata.meta import get_metadata
ext = temp_file_name.rpartition('.')[-1].lower()
with open(temp_file_name, 'rb') as stream:
with quick_metadata:
return get_metadata(stream, stream_type=ext,
force_read_metadata=True,
pattern=build_template_regexp(self.save_template()))
# The public interface methods.
@synchronous('sync_lock')
def detect_managed_devices(self, devices_on_system, force_refresh=False):
if getattr(self, 'listen_socket', None) is None:
self.is_connected = False
if self.is_connected:
self.noop_counter += 1
if (self.noop_counter > self.SEND_NOOP_EVERY_NTH_PROBE and
(self.noop_counter % self.SEND_NOOP_EVERY_NTH_PROBE) != 1):
try:
ans = select.select((self.device_socket,), (), (), 0)
if len(ans[0]) == 0:
return self
# The socket indicates that something is there. Given the
# protocol, this can only be a disconnect notification. Fall
# through and actually try to talk to the client.
# This will usually toss an exception if the socket is gone.
except:
pass
if (self.settings().extra_customization[self.OPT_AUTODISCONNECT] and
self.noop_counter > self.DISCONNECT_AFTER_N_SECONDS):
self._close_device_socket()
self._debug('timeout -- disconnected')
else:
try:
if self._call_client('NOOP', dict())[0] is None:
self._close_device_socket()
except:
self._close_device_socket()
return self if self.is_connected else None
if getattr(self, 'listen_socket', None) is not None:
try:
ans = self.connection_queue.get_nowait()
self.device_socket = ans
self.is_connected = True
try:
peer = self.device_socket.getpeername()[0]
attempts = self.connection_attempts.get(peer, 0)
if attempts >= self.MAX_UNSUCCESSFUL_CONNECTS:
self._debug('too many connection attempts from', peer)
self._close_device_socket()
raise InitialConnectionError(_('Too many connection attempts from %s') % peer)
else:
self.connection_attempts[peer] = attempts + 1
except InitialConnectionError:
raise
except:
pass
except queue.Empty:
self.is_connected = False
return self if self.is_connected else None
return None
@synchronous('sync_lock')
def debug_managed_device_detection(self, devices_on_system, output):
from functools import partial
p = partial(prints, file=output)
if self.is_connected:
p("A wireless device is connected")
return True
all_ip_addresses = get_all_ips()
if all_ip_addresses:
p("All IP addresses", all_ip_addresses)
else:
p("No IP addresses found")
p("No device is connected")
return False
@synchronous('sync_lock')
def open(self, connected_device, library_uuid):
from calibre.utils.date import isoformat, now
self._debug()
if not self.is_connected:
# We have been called to retry the connection. Give up immediately
raise ControlError(desc='Attempt to open a closed device')
self.current_library_uuid = library_uuid
self.current_library_name = current_library_name()
self.device_uuid = ''
try:
password = self.settings().extra_customization[self.OPT_PASSWORD]
if password:
challenge = isoformat(now())
hasher = hashlib.sha1()
hasher.update(password.encode('UTF-8'))
hasher.update(challenge.encode('UTF-8'))
hash_digest = hasher.hexdigest()
else:
challenge = ''
hash_digest = ''
formats = self.ALL_FORMATS[:]
extras = [f.lower() for f in
self.settings().extra_customization[self.OPT_EXTRA_EXTENSIONS].split(',') if f]
formats.extend(extras)
opcode, result = self._call_client('GET_INITIALIZATION_INFO',
{'serverProtocolVersion': self.PROTOCOL_VERSION,
'validExtensions': formats,
'passwordChallenge': challenge,
'currentLibraryName': self.current_library_name,
'currentLibraryUUID': library_uuid,
'pubdateFormat': tweaks['gui_pubdate_display_format'],
'timestampFormat': tweaks['gui_timestamp_display_format'],
'lastModifiedFormat': tweaks['gui_last_modified_display_format'],
'calibre_version': numeric_version,
'canSupportUpdateBooks': True,
'canSupportLpathChanges': True})
if opcode != 'OK':
# Something wrong with the return. Close the socket
# and continue.
self._debug('Protocol error - Opcode not OK')
self._close_device_socket()
return False
if not result.get('versionOK', False):
# protocol mismatch
self._debug('Protocol error - protocol version mismatch')
self._close_device_socket()
return False
if result.get('maxBookContentPacketLen', 0) <= 0:
# protocol mismatch
self._debug('Protocol error - bogus book packet length')
self._close_device_socket()
return False
# Set up to recheck the sync columns
self.have_checked_sync_columns = False
client_can_stream_books = result.get('canStreamBooks', False)
self._debug('Device can stream books', client_can_stream_books)
client_can_stream_metadata = result.get('canStreamMetadata', False)
self._debug('Device can stream metadata', client_can_stream_metadata)
client_can_receive_book_binary = result.get('canReceiveBookBinary', False)
self._debug('Device can receive book binary', client_can_receive_book_binary)
client_can_delete_multiple = result.get('canDeleteMultipleBooks', False)
self._debug('Device can delete multiple books', client_can_delete_multiple)
if not (client_can_stream_books and
client_can_stream_metadata and
client_can_receive_book_binary and
client_can_delete_multiple):
self._debug('Software on device too old')
self._close_device_socket()
raise OpenFeedback(_('The app on your device is too old and is no '
'longer supported. Update it to a newer version.'))
self.client_can_use_metadata_cache = result.get('canUseCachedMetadata', False)
self._debug('Device can use cached metadata', self.client_can_use_metadata_cache)
self.client_cache_uses_lpaths = result.get('cacheUsesLpaths', False)
self._debug('Cache uses lpaths', self.client_cache_uses_lpaths)
self.can_send_ok_to_sendbook = result.get('canSendOkToSendbook', False)
self._debug('Can send OK to sendbook', self.can_send_ok_to_sendbook)
self.can_accept_library_info = result.get('canAcceptLibraryInfo', False)
self._debug('Can accept library info', self.can_accept_library_info)
self.will_ask_for_update_books = result.get('willAskForUpdateBooks', False)
self._debug('Will ask for update books', self.will_ask_for_update_books)
self.set_temp_mark_when_syncing_read = \
result.get('setTempMarkWhenReadInfoSynced', False)
self._debug('Will set temp mark when syncing read',
self.set_temp_mark_when_syncing_read)
if not self.settings().extra_customization[self.OPT_USE_METADATA_CACHE]:
self.client_can_use_metadata_cache = False
self._debug('metadata caching disabled by option')
self.client_device_kind = result.get('deviceKind', '')
self._debug('Client device kind', self.client_device_kind)
self.client_device_name = result.get('deviceName', self.client_device_kind)
self._debug('Client device name', self.client_device_name)
self.client_app_name = result.get('appName', "")
self._debug('Client app name', self.client_app_name)
self.app_version_number = result.get('ccVersionNumber', '0')
self._debug('App version #:', self.app_version_number)
try:
if (self.client_app_name == 'CalibreCompanion' and
self.app_version_number < self.CURRENT_CC_VERSION):
self._debug('Telling client to update')
self._call_client("DISPLAY_MESSAGE",
{'messageKind': self.MESSAGE_UPDATE_NEEDED,
'lastestKnownAppVersion': self.CURRENT_CC_VERSION})
except:
pass
self.max_book_packet_len = result.get('maxBookContentPacketLen',
self.BASE_PACKET_LEN)
self._debug('max_book_packet_len', self.max_book_packet_len)
exts = result.get('acceptedExtensions', None)
if exts is None or not isinstance(exts, list) or len(exts) == 0:
self._debug('Protocol error - bogus accepted extensions')
self._close_device_socket()
return False
self.client_wants_uuid_file_names = result.get('useUuidFileNames', False)
self._debug('Device wants UUID file names', self.client_wants_uuid_file_names)
config = self._configProxy()
config['format_map'] = exts
self._debug('selected formats', config['format_map'])
self.exts_path_lengths = result.get('extensionPathLengths', {})
self._debug('extension path lengths', self.exts_path_lengths)
self.THUMBNAIL_HEIGHT = result.get('coverHeight', self.DEFAULT_THUMBNAIL_HEIGHT)
self._debug('cover height', self.THUMBNAIL_HEIGHT)
if 'coverWidth' in result:
# Setting this field forces the aspect ratio
self.THUMBNAIL_WIDTH = result.get('coverWidth',
(self.DEFAULT_THUMBNAIL_HEIGHT/3) * 4)
self._debug('cover width', self.THUMBNAIL_WIDTH)
elif hasattr(self, 'THUMBNAIL_WIDTH'):
delattr(self, 'THUMBNAIL_WIDTH')
self.is_read_sync_col = result.get('isReadSyncCol', None)
self._debug('Device is_read sync col', self.is_read_sync_col)
self.is_read_date_sync_col = result.get('isReadDateSyncCol', None)
self._debug('Device is_read_date sync col', self.is_read_date_sync_col)
if password:
returned_hash = result.get('passwordHash', None)
if result.get('passwordHash', None) is None:
# protocol mismatch
self._debug('Protocol error - missing password hash')
self._close_device_socket()
return False
if returned_hash != hash_digest:
# bad password
self._debug('password mismatch')
try:
self._call_client("DISPLAY_MESSAGE",
{'messageKind': self.MESSAGE_PASSWORD_ERROR,
'currentLibraryName': self.current_library_name,
'currentLibraryUUID': library_uuid})
except:
pass
self._close_device_socket()
# Don't bother with a message. The user will be informed on
# the device.
raise OpenFailed('')
try:
peer = self.device_socket.getpeername()[0]
self.connection_attempts[peer] = 0
except:
pass
return True
except socket.timeout:
self._close_device_socket()
except OSError:
x = sys.exc_info()[1]
self._debug('unexpected socket exception', x.args[0])
self._close_device_socket()
raise
return False
def get_gui_name(self):
if getattr(self, 'client_device_name', None):
return self.gui_name_template%(self.gui_name, self.client_device_name)
if getattr(self, 'client_device_kind', None):
return self.gui_name_template%(self.gui_name, self.client_device_kind)
return self.gui_name
def config_widget(self):
from calibre.gui2.device_drivers.configwidget import ConfigWidget
cw = ConfigWidget(self.settings(), self.FORMATS, self.SUPPORTS_SUB_DIRS,
self.MUST_READ_METADATA, self.SUPPORTS_USE_AUTHOR_SORT,
self.EXTRA_CUSTOMIZATION_MESSAGE, self)
return cw
@synchronous('sync_lock')
def get_device_information(self, end_session=True):
self._debug()
self.report_progress(1.0, _('Get device information...'))
opcode, result = self._call_client('GET_DEVICE_INFORMATION', dict())
if opcode == 'OK':
self.driveinfo = result['device_info']
self._update_driveinfo_record(self.driveinfo, self.PREFIX, 'main')
self.device_uuid = self.driveinfo['device_store_uuid']
self._call_client('SET_CALIBRE_DEVICE_INFO', self.driveinfo)
self._read_metadata_cache()
return (self.get_gui_name(), result['device_version'],
result['version'], '', {'main':self.driveinfo})
return (self.get_gui_name(), '', '', '')
@synchronous('sync_lock')
def set_driveinfo_name(self, location_code, name):
self._update_driveinfo_record(self.driveinfo, "main", name)
self._call_client('SET_CALIBRE_DEVICE_NAME',
{'location_code': 'main', 'name':name})
@synchronous('sync_lock')
def reset(self, key='-1', log_packets=False, report_progress=None,
detected_device=None) :
self._debug()
self.set_progress_reporter(report_progress)
@synchronous('sync_lock')
def set_progress_reporter(self, report_progress):
self._debug()
self.report_progress = report_progress
if self.report_progress is None:
self.report_progress = lambda x, y: x
@synchronous('sync_lock')
def card_prefix(self, end_session=True):
self._debug()
return (None, None)
@synchronous('sync_lock')
def total_space(self, end_session=True):
self._debug()
opcode, result = self._call_client('TOTAL_SPACE', {})
if opcode == 'OK':
return (result['total_space_on_device'], 0, 0)
# protocol error if we get here
return (0, 0, 0)
@synchronous('sync_lock')
def free_space(self, end_session=True):
self._debug()
opcode, result = self._call_client('FREE_SPACE', {})
if opcode == 'OK':
self._debug('free space:', result['free_space_on_device'])
return (result['free_space_on_device'], 0, 0)
# protocol error if we get here
return (0, 0, 0)
@synchronous('sync_lock')
def books(self, oncard=None, end_session=True):
self._debug(oncard)
if oncard is not None:
return CollectionsBookList(None, None, None)
opcode, result = self._call_client('GET_BOOK_COUNT',
{'canStream':True,
'canScan':True,
'willUseCachedMetadata': self.client_can_use_metadata_cache,
'supportsSync': (bool(self.is_read_sync_col) or
bool(self.is_read_date_sync_col)),
'canSupportBookFormatSync': True})
bl = CollectionsBookList(None, self.PREFIX, self.settings)
if opcode == 'OK':
count = result['count']
will_use_cache = self.client_can_use_metadata_cache
if will_use_cache:
books_on_device = []
self._debug('caching. count=', count)
for i in range(0, count):
opcode, result = self._receive_from_client(print_debug_info=False)
books_on_device.append(result)
self._debug('received all books. count=', count)
books_to_send = []
lpaths_on_device = set()
for r in books_on_device:
if r.get('lpath', None):
book = self._metadata_in_cache(r['uuid'], r['lpath'],
r['last_modified'])
else:
book = self._metadata_in_cache(r['uuid'], r['extension'],
r['last_modified'])
if book:
if self.client_cache_uses_lpaths:
lpaths_on_device.add(r.get('lpath'))
bl.add_book_extended(book, replace_metadata=True,
check_for_duplicates=not self.client_cache_uses_lpaths)
book.set('_is_read_', r.get('_is_read_', None))
book.set('_sync_type_', r.get('_sync_type_', None))
book.set('_last_read_date_', r.get('_last_read_date_', None))
book.set('_format_mtime_', r.get('_format_mtime_', None))
else:
books_to_send.append(r['priKey'])
self._debug('processed cache. count=', len(books_on_device))
count_of_cache_items_deleted = 0
if self.client_cache_uses_lpaths:
for lpath in tuple(self.known_metadata):
if lpath not in lpaths_on_device:
try:
uuid = self.known_metadata[lpath].get('uuid', None)
if uuid is not None:
key = self._make_metadata_cache_key(uuid, lpath)
self.device_book_cache.pop(key, None)
self.known_metadata.pop(lpath, None)
count_of_cache_items_deleted += 1
except:
self._debug('Exception while deleting book from caches', lpath)
traceback.print_exc()
self._debug('removed', count_of_cache_items_deleted, 'books from caches')
count = len(books_to_send)
self._debug('caching. Need count from device', count)
self._call_client('NOOP', {'count': count},
print_debug_info=False, wait_for_response=False)
for priKey in books_to_send:
self._call_client('NOOP', {'priKey':priKey},
print_debug_info=False, wait_for_response=False)
for i in range(0, count):
if (i % 100) == 0:
self._debug('getting book metadata. Done', i, 'of', count)
opcode, result = self._receive_from_client(print_debug_info=False)
if opcode == 'OK':
try:
if '_series_sort_' in result:
del result['_series_sort_']
book = self.json_codec.raw_to_book(result, SDBook, self.PREFIX)
book.set('_is_read_', result.get('_is_read_', None))
book.set('_sync_type_', result.get('_sync_type_', None))
book.set('_last_read_date_', result.get('_last_read_date_', None))
bl.add_book_extended(book, replace_metadata=True,
check_for_duplicates=not self.client_cache_uses_lpaths)
if '_new_book_' in result:
book.set('_new_book_', True)
else:
self._set_known_metadata(book)
except:
self._debug('exception retrieving metadata for book', result.get('title', 'Unknown'))
traceback.print_exc()
else:
raise ControlError(desc='book metadata not returned')
total = 0
for book in bl:
if book.get('_new_book_', None):
total += 1
count = 0
for book in bl:
if book.get('_new_book_', None):
paths = [book.lpath]
self._set_known_metadata(book, remove=True)
self.prepare_addable_books(paths, this_book=count, total_books=total)
book.smart_update(self._read_file_metadata(paths[0]))
del book._new_book_
count += 1
self._debug('finished getting book metadata')
return bl
@synchronous('sync_lock')
def sync_booklists(self, booklists, end_session=True):
colattrs = [x.strip() for x in
self.settings().extra_customization[self.OPT_COLLECTIONS].split(',')]
self._debug('collection attributes', colattrs)
coldict = {}
if colattrs:
collections = booklists[0].get_collections(colattrs)
for k,v in iteritems(collections):
lpaths = []
for book in v:
lpaths.append(book.lpath)
coldict[k] = lpaths
# If we ever do device_db plugboards, this is where it will go. We will
# probably need to send two booklists, one with calibre's data that is
# given back by "books", and one that has been plugboarded.
books_to_send = []
for book in booklists[0]:
if (book.get('_force_send_metadata_', None) or
not self._metadata_already_on_device(book)):
books_to_send.append(book)
count = len(books_to_send)
self._call_client('SEND_BOOKLISTS', {'count': count,
'collections': coldict,
'willStreamMetadata': True,
'supportsSync': (bool(self.is_read_sync_col) or
bool(self.is_read_date_sync_col))},
wait_for_response=False)
if count:
for i,book in enumerate(books_to_send):
self._debug('sending metadata for book', book.lpath, book.title)
self._set_known_metadata(book)
opcode, result = self._call_client(
'SEND_BOOK_METADATA',
{'index': i, 'count': count, 'data': book,
'supportsSync': (bool(self.is_read_sync_col) or
bool(self.is_read_date_sync_col))},
print_debug_info=False,
wait_for_response=False)
if not self.have_bad_sync_columns:
# Update the local copy of the device's read info just in case
# the device is re-synced. This emulates what happens on the device
# when the metadata is received.
try:
if bool(self.is_read_sync_col):
book.set('_is_read_', book.get(self.is_read_sync_col, None))
except:
self._debug('failed to set local copy of _is_read_')
traceback.print_exc()
try:
if bool(self.is_read_date_sync_col):
book.set('_last_read_date_',
book.get(self.is_read_date_sync_col, None))
except:
self._debug('failed to set local copy of _last_read_date_')
traceback.print_exc()
# Write the cache here so that if we are interrupted on disconnect then the
# almost-latest info will be available.
self._write_metadata_cache()
@synchronous('sync_lock')
def eject(self):
self._debug()
self._call_client('NOOP', {'ejecting': True})
self._close_device_socket()
@synchronous('sync_lock')
def post_yank_cleanup(self):
self._debug()
@synchronous('sync_lock')
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
if self.settings().extra_customization[self.OPT_EXTRA_DEBUG]:
self._debug(names)
else:
self._debug()
if not self.settings().extra_customization[self.OPT_IGNORE_FREESPACE]:
sanity_check(on_card='', files=files, card_prefixes=[],
free_space=self.free_space())
paths = []
names = iter(names)
metadata = iter(metadata)
for i, infile in enumerate(files):
mdata, fname = next(metadata), next(names)
lpath = self._create_upload_path(mdata, fname, create_dirs=False)
self._debug('lpath', lpath)
if not hasattr(infile, 'read'):
infile = USBMS.normalize_path(infile)
book = SDBook(self.PREFIX, lpath, other=mdata)
length, lpath = self._put_file(infile, lpath, book, i, len(files))
if length < 0:
raise ControlError(desc='Sending book %s to device failed' % lpath)
paths.append((lpath, length))
# No need to deal with covers. The client will get the thumbnails
# in the mi structure
self.report_progress((i + 1) / float(len(files)), _('Transferring books to device...'))
self.report_progress(1.0, _('Transferring books to device...'))
self._debug('finished uploading %d books' % (len(files)))
return paths
@synchronous('sync_lock')
def add_books_to_metadata(self, locations, metadata, booklists):
self._debug('adding metadata for %d books' % (len(metadata)))
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i + 1) / float(len(locations)),
_('Adding books to device metadata listing...'))
info = next(metadata)
lpath = location[0]
length = location[1]
lpath = self._strip_prefix(lpath)
book = SDBook(self.PREFIX, lpath, other=info)
if book.size is None:
book.size = length
b = booklists[0].add_book(book, replace_metadata=True)
if b:
b._new_book = True
from calibre.utils.date import isoformat, now
b.set('_format_mtime_', isoformat(now()))
self.report_progress(1.0, _('Adding books to device metadata listing...'))
self._debug('finished adding metadata')
@synchronous('sync_lock')
def delete_books(self, paths, end_session=True):
if self.settings().extra_customization[self.OPT_EXTRA_DEBUG]:
self._debug(paths)
else:
self._debug()
new_paths = []
for path in paths:
new_paths.append(self._strip_prefix(path))
opcode, result = self._call_client('DELETE_BOOK', {'lpaths': new_paths})
for i in range(0, len(new_paths)):
opcode, result = self._receive_from_client(False)
self._debug('removed book with UUID', result['uuid'])
self._debug('removed', len(new_paths), 'books')
@synchronous('sync_lock')
def remove_books_from_metadata(self, paths, booklists):
if self.settings().extra_customization[self.OPT_EXTRA_DEBUG]:
self._debug(paths)
else:
self._debug()
for i, path in enumerate(paths):
path = self._strip_prefix(path)
self.report_progress((i + 1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists:
for book in bl:
if path == book.path:
bl.remove_book(book)
self._set_known_metadata(book, remove=True)
self.report_progress(1.0, _('Removing books from device metadata listing...'))
self._debug('finished removing metadata for %d books' % (len(paths)))
@synchronous('sync_lock')
def get_file(self, path, outfile, end_session=True, this_book=None, total_books=None):
if self.settings().extra_customization[self.OPT_EXTRA_DEBUG]:
self._debug(path)
else:
self._debug()
eof = False
position = 0
while not eof:
opcode, result = self._call_client('GET_BOOK_FILE_SEGMENT',
{'lpath' : path, 'position': position,
'thisBook': this_book, 'totalBooks': total_books,
'canStream':True, 'canStreamBinary': True},
print_debug_info=False)
if opcode == 'OK':
length = result.get('fileLength')
remaining = length
while remaining > 0:
v = self._read_binary_from_net(min(remaining, self.max_book_packet_len))
outfile.write(v)
remaining -= len(v)
eof = True
else:
raise ControlError(desc='request for book data failed')
@synchronous('sync_lock')
def prepare_addable_books(self, paths, this_book=None, total_books=None):
for idx, path in enumerate(paths):
(ign, ext) = os.path.splitext(path)
with PersistentTemporaryFile(suffix=ext) as tf:
self.get_file(path, tf, this_book=this_book, total_books=total_books)
paths[idx] = tf.name
tf.name = path
return paths
@synchronous('sync_lock')
def set_plugboards(self, plugboards, pb_func):
self._debug()
self.plugboards = plugboards
self.plugboard_func = pb_func
@synchronous('sync_lock')
def set_library_info(self, library_name, library_uuid, field_metadata):
self._debug(library_name, library_uuid)
if self.can_accept_library_info:
other_info = {}
from calibre.ebooks.metadata.sources.prefs import msprefs
other_info['id_link_rules'] = msprefs.get('id_link_rules', {})
self._call_client('SET_LIBRARY_INFO',
{'libraryName' : library_name,
'libraryUuid': library_uuid,
'fieldMetadata': field_metadata.all_metadata(),
'otherInfo': other_info},
print_debug_info=True)
@synchronous('sync_lock')
def specialize_global_preferences(self, device_prefs):
device_prefs.set_overrides(manage_device_metadata='on_connect')
def _show_message(self, message):
self._call_client("DISPLAY_MESSAGE",
{'messageKind': self.MESSAGE_SHOW_TOAST,
'message': message})
def _check_if_format_send_needed(self, db, id_, book):
if not self.will_ask_for_update_books:
return (None, False)
from calibre.utils.date import isoformat, parse_date
try:
if not hasattr(book, '_format_mtime_'):
return (None, False)
ext = posixpath.splitext(book.lpath)[1][1:]
fmt_metadata = db.new_api.format_metadata(id_, ext)
if fmt_metadata:
calibre_mtime = fmt_metadata['mtime']
if calibre_mtime > self.now:
if not self.have_sent_future_dated_book_message:
self.have_sent_future_dated_book_message = True
self._show_message(_('You have book formats in your library '
'with dates in the future. See calibre '
'for details'))
return (None, True)
cc_mtime = parse_date(book.get('_format_mtime_'), as_utc=True)
self._debug(book.title, 'cal_mtime', calibre_mtime, 'cc_mtime', cc_mtime)
if cc_mtime < calibre_mtime:
book.set('_format_mtime_', isoformat(self.now))
return (posixpath.basename(book.lpath), False)
except:
self._debug('exception checking if must send format', book.title)
traceback.print_exc()
return (None, False)
@synchronous('sync_lock')
def synchronize_with_db(self, db, id_, book, first_call):
from calibre.utils.date import is_date_undefined, now, parse_date
if first_call:
self.have_sent_future_dated_book_message = False
self.now = now()
if self.have_bad_sync_columns or not (self.is_read_sync_col or
self.is_read_date_sync_col):
# Not syncing or sync columns are invalid
return (None, self._check_if_format_send_needed(db, id_, book))
# Check the validity of the columns once per connection. We do it
# here because we have access to the db to get field_metadata
if not self.have_checked_sync_columns:
fm = db.field_metadata.custom_field_metadata()
if self.is_read_sync_col:
if self.is_read_sync_col not in fm:
self._debug('is_read_sync_col not in field_metadata')
self._show_message(_("The read sync column %s is "
"not in calibre's library")%self.is_read_sync_col)
self.have_bad_sync_columns = True
elif fm[self.is_read_sync_col]['datatype'] != 'bool':
self._debug('is_read_sync_col not bool type')
self._show_message(_("The read sync column %s is "
"not a Yes/No column")%self.is_read_sync_col)
self.have_bad_sync_columns = True
if self.is_read_date_sync_col:
if self.is_read_date_sync_col not in fm:
self._debug('is_read_date_sync_col not in field_metadata')
self._show_message(_("The read date sync column %s is "
"not in calibre's library")%self.is_read_date_sync_col)
self.have_bad_sync_columns = True
elif fm[self.is_read_date_sync_col]['datatype'] != 'datetime':
self._debug('is_read_date_sync_col not date type')
self._show_message(_("The read date sync column %s is "
"not a date column")%self.is_read_date_sync_col)
self.have_bad_sync_columns = True
self.have_checked_sync_columns = True
if self.have_bad_sync_columns:
return (None, self._check_if_format_send_needed(db, id_, book))
# if we are marking synced books, clear all the current marks
if self.set_temp_mark_when_syncing_read:
self._debug('clearing temp marks')
db.set_marked_ids(())
sync_type = book.get('_sync_type_', None)
# We need to check if our attributes are in the book. If they are not
# then this is metadata coming from calibre to the device for the first
# time, in which case we must not sync it.
if hasattr(book, '_is_read_'):
is_read = book.get('_is_read_', None)
has_is_read = True
else:
has_is_read = False
if hasattr(book, '_last_read_date_'):
# parse_date returns UNDEFINED_DATE if the value is None
is_read_date = parse_date(book.get('_last_read_date_', None))
if is_date_undefined(is_read_date):
is_read_date = None
has_is_read_date = True
else:
has_is_read_date = False
force_return_changed_books = False
changed_books = set()
if sync_type == 3:
# The book metadata was built by the device from metadata in the
# book file itself. It must not be synced, because the metadata is
# almost surely wrong. However, the fact that we got here means that
# book matching has succeeded. Arrange that calibre's metadata is
# sent back to the device. This isn't strictly necessary as sending
# back the info will be arranged in other ways.
self._debug('Book with device-generated metadata', book.get('title', 'huh?'))
book.set('_force_send_metadata_', True)
force_return_changed_books = True
elif sync_type == 2:
# This is a special case where the user just set a sync column. In
# this case the device value wins if it is not None, otherwise the
# calibre value wins.
# Check is_read
if has_is_read and self.is_read_sync_col:
try:
calibre_val = db.new_api.field_for(self.is_read_sync_col,
id_, default_value=None)
if is_read is not None:
# The CC value wins. Check if it is different from calibre's
# value to avoid updating the db to the same value
if is_read != calibre_val:
self._debug('special update calibre to is_read',
book.get('title', 'huh?'), 'to', is_read, calibre_val)
changed_books = db.new_api.set_field(self.is_read_sync_col,
{id_: is_read})
if self.set_temp_mark_when_syncing_read:
db.data.toggle_marked_ids({id_})
elif calibre_val is not None:
# Calibre value wins. Force the metadata for the
# book to be sent to the device even if the mod
# dates haven't changed.
self._debug('special update is_read to calibre value',
book.get('title', 'huh?'), 'to', calibre_val)
book.set('_force_send_metadata_', True)
force_return_changed_books = True
except:
self._debug('exception special syncing is_read', self.is_read_sync_col)
traceback.print_exc()
# Check is_read_date.
if has_is_read_date and self.is_read_date_sync_col:
try:
# The db method returns None for undefined dates.
calibre_val = db.new_api.field_for(self.is_read_date_sync_col,
id_, default_value=None)
if is_read_date is not None:
if is_read_date != calibre_val:
self._debug('special update calibre to is_read_date',
book.get('title', 'huh?'), 'to', is_read_date, calibre_val)
changed_books |= db.new_api.set_field(self.is_read_date_sync_col,
{id_: is_read_date})
if self.set_temp_mark_when_syncing_read:
db.data.toggle_marked_ids({id_})
elif calibre_val is not None:
self._debug('special update is_read_date to calibre value',
book.get('title', 'huh?'), 'to', calibre_val)
book.set('_force_send_metadata_', True)
force_return_changed_books = True
except:
self._debug('exception special syncing is_read_date',
self.is_read_sync_col)
traceback.print_exc()
else:
# This is the standard sync case. If the CC value has changed, it
# wins, otherwise the calibre value is synced to CC in the normal
# fashion (mod date)
if has_is_read and self.is_read_sync_col:
try:
orig_is_read = book.get(self.is_read_sync_col, None)
if is_read != orig_is_read:
# The value in the device's is_read checkbox is not the
# same as the last one that came to the device from
# calibre during the last connect, meaning that the user
# changed it. Write the one from the device to calibre's
# db.
self._debug('standard update is_read', book.get('title', 'huh?'),
'to', is_read, 'was', orig_is_read)
changed_books = db.new_api.set_field(self.is_read_sync_col,
{id_: is_read})
if self.set_temp_mark_when_syncing_read:
db.data.toggle_marked_ids({id_})
except:
self._debug('exception standard syncing is_read', self.is_read_sync_col)
traceback.print_exc()
if has_is_read_date and self.is_read_date_sync_col:
try:
orig_is_read_date = book.get(self.is_read_date_sync_col, None)
if is_date_undefined(orig_is_read_date):
orig_is_read_date = None
if is_read_date != orig_is_read_date:
self._debug('standard update is_read_date', book.get('title', 'huh?'),
'to', is_read_date, 'was', orig_is_read_date)
changed_books |= db.new_api.set_field(self.is_read_date_sync_col,
{id_: is_read_date})
if self.set_temp_mark_when_syncing_read:
db.data.toggle_marked_ids({id_})
except:
self._debug('Exception standard syncing is_read_date',
self.is_read_date_sync_col)
traceback.print_exc()
if changed_books or force_return_changed_books:
# One of the two values was synced, giving a (perhaps empty) list of
# changed books. Return that.
return (changed_books, self._check_if_format_send_needed(db, id_, book))
# Nothing was synced. The user might have changed the value in calibre.
# If so, that value will be sent to the device in the normal way. Note
# that because any updated value has already been synced and so will
# also be sent, the device should put the calibre value into its
# checkbox (or whatever it uses)
return (None, self._check_if_format_send_needed(db, id_, book))
@synchronous('sync_lock')
def startup(self):
self.listen_socket = None
self.is_connected = False
def _startup_on_demand(self):
if getattr(self, 'listen_socket', None) is not None:
# we are already running
return
message = None
# The driver is not running so must be started. It needs to protect itself
# from access by the device thread before it is fully setup. Thus the lock.
with self.sync_lock:
if len(self.opcodes) != len(self.reverse_opcodes):
self._debug(self.opcodes, self.reverse_opcodes)
self.is_connected = False
self.listen_socket = None
self.device_socket = None
self.json_codec = JsonCodec()
self.known_metadata = {}
self.device_book_cache = defaultdict(dict)
self.debug_time = time.time()
self.debug_start_time = time.time()
self.max_book_packet_len = 0
self.noop_counter = 0
self.connection_attempts = {}
self.client_wants_uuid_file_names = False
self.is_read_sync_col = None
self.is_read_date_sync_col = None
self.have_checked_sync_columns = False
self.have_bad_sync_columns = False
self.have_sent_future_dated_book_message = False
self.now = None
compression_quality_ok = True
try:
cq = int(self.settings().extra_customization[self.OPT_COMPRESSION_QUALITY])
if cq < 50 or cq > 99:
compression_quality_ok = False
else:
self.THUMBNAIL_COMPRESSION_QUALITY = cq
except:
compression_quality_ok = False
if not compression_quality_ok:
self.THUMBNAIL_COMPRESSION_QUALITY = 70
message = _('Bad compression quality setting. It must be a number '
'between 50 and 99. Forced to be %d.')%self.DEFAULT_THUMBNAIL_COMPRESSION_QUALITY
self._debug(message)
self.set_option('thumbnail_compression_quality',
str(self.DEFAULT_THUMBNAIL_COMPRESSION_QUALITY))
try:
self.listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
set_socket_inherit(self.listen_socket, False)
except:
traceback.print_exc()
message = 'creation of listen socket failed'
self._debug(message)
return message
i = 0
if self.settings().extra_customization[self.OPT_USE_PORT]:
try:
opt_port = int(self.settings().extra_customization[self.OPT_PORT_NUMBER])
except:
message = _('Invalid port in options: %s')% \
self.settings().extra_customization[self.OPT_PORT_NUMBER]
self._debug(message)
self._close_listen_socket()
return message
port = self._attach_to_port(self.listen_socket, opt_port)
if port == 0:
message = _('Failed to connect to port %d. Try a different value.')%opt_port
self._debug(message)
self._close_listen_socket()
return message
else:
while i < 100: # try 9090 then up to 99 random port numbers
i += 1
port = self._attach_to_port(self.listen_socket,
9090 if i == 1 else random.randint(8192, 65525))
if port != 0:
break
if port == 0:
message = _('Failed to allocate a random port')
self._debug(message)
self._close_listen_socket()
return message
try:
self.listen_socket.listen(1)
except:
message = 'listen on port %d failed' % port
self._debug(message)
self._close_listen_socket()
return message
try:
ip_addr = self.settings().extra_customization[self.OPT_FORCE_IP_ADDRESS]
publish_zeroconf('calibre smart device client',
'_calibresmartdeviceapp._tcp', port, {},
use_ip_address=ip_addr, strict=False)
except:
self._debug('registration with bonjour failed')
traceback.print_exc()
self._debug('listening on port', port)
self.port = port
# Now try to open a UDP socket to receive broadcasts on
try:
self.broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
set_socket_inherit(self.broadcast_socket, False)
except:
message = 'creation of broadcast socket failed. This is not fatal.'
self._debug(message)
self.broadcast_socket = None
else:
for p in self.BROADCAST_PORTS:
port = self._attach_to_port(self.broadcast_socket, p)
if port != 0:
self._debug('broadcast socket listening on port', port)
break
if port == 0:
self._close_socket(self.broadcast_socket)
self.broadcast_socket = None
message = 'attaching port to broadcast socket failed. This is not fatal.'
self._debug(message)
self.connection_queue = queue.Queue(1)
self.connection_listener = ConnectionListener(self)
self.connection_listener.start()
return message
def _shutdown(self):
# Force close any socket open by a device. This will cause any IO on the
# socket to fail, eventually releasing the transaction lock.
self._close_device_socket()
# Now lockup so we can shutdown the control socket and unpublish mDNS
with self.sync_lock:
if getattr(self, 'listen_socket', None) is not None:
self.connection_listener.stop()
try:
unpublish_zeroconf('calibre smart device client',
'_calibresmartdeviceapp._tcp', self.port, {})
except:
self._debug('deregistration with bonjour failed')
traceback.print_exc()
self._close_listen_socket()
# Methods for dynamic control. Do not call _debug in these methods, as it
# uses the sync lock.
def is_dynamically_controllable(self):
return 'smartdevice'
def start_plugin(self):
return self._startup_on_demand()
def stop_plugin(self):
self._shutdown()
def get_option(self, opt_string, default=None):
opt = self.OPTNAME_TO_NUMBER_MAP.get(opt_string)
if opt is not None:
return self.settings().extra_customization[opt]
return default
def set_option(self, opt_string, value):
opt = self.OPTNAME_TO_NUMBER_MAP.get(opt_string)
if opt is not None:
config = self._configProxy()
ec = config['extra_customization']
ec[opt] = value
config['extra_customization'] = ec
def is_running(self):
return getattr(self, 'listen_socket', None) is not None
| 92,144 | Python | .py | 1,835 | 35.249591 | 138 | 0.538753 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,732 | kobotouch_config.py | kovidgoyal_calibre/src/calibre/devices/kobo/kobotouch_config.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015-2019, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import textwrap
from qt.core import QCheckBox, QDialog, QDialogButtonBox, QGridLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton, QVBoxLayout, QWidget
from calibre.gui2 import error_dialog
from calibre.gui2.device_drivers.tabbed_device_config import DeviceConfigTab, DeviceOptionsGroupBox, TabbedDeviceConfig
from calibre.gui2.dialogs.template_dialog import TemplateDialog
from calibre.gui2.dialogs.template_line_editor import TemplateLineEditor
from calibre.gui2.widgets2 import ColorButton
from calibre.prints import debug_print
def wrap_msg(msg):
return textwrap.fill(msg.strip(), 100)
def setToolTipFor(widget, tt):
widget.setToolTip(wrap_msg(tt))
def create_checkbox(title, tt, state):
cb = QCheckBox(title)
cb.setToolTip(wrap_msg(tt))
cb.setChecked(bool(state))
return cb
class KOBOTOUCHConfig(TabbedDeviceConfig):
def __init__(self, device_settings, all_formats, supports_subdirs,
must_read_metadata, supports_use_author_sort,
extra_customization_message, device, extra_customization_choices=None, parent=None):
super().__init__(device_settings, all_formats, supports_subdirs,
must_read_metadata, supports_use_author_sort,
extra_customization_message, device, extra_customization_choices, parent)
self.device_settings = device_settings
self.all_formats = all_formats
self.supports_subdirs = supports_subdirs
self.must_read_metadata = must_read_metadata
self.supports_use_author_sort = supports_use_author_sort
self.extra_customization_message = extra_customization_message
self.extra_customization_choices = extra_customization_choices
self.tab1 = Tab1Config(self, self.device)
self.tab2 = Tab2Config(self, self.device)
self.addDeviceTab(self.tab1, _("Collections, covers && uploads"))
self.addDeviceTab(self.tab2, _('Metadata, on device && advanced'))
def get_pref(self, key):
return self.device.get_pref(key)
@property
def device(self):
return self._device()
def validate(self):
validated = super().validate()
validated &= self.tab2.validate()
return validated
@property
def book_uploads_options(self):
return self.tab1.book_uploads_options
@property
def collections_options(self):
return self.tab1.collections_options
@property
def cover_options(self):
return self.tab1.covers_options
@property
def device_list_options(self):
return self.tab2.device_list_options
@property
def advanced_options(self):
return self.tab2.advanced_options
@property
def metadata_options(self):
return self.tab2.metadata_options
def commit(self):
debug_print("KOBOTOUCHConfig::commit: start")
p = super().commit()
p['manage_collections'] = self.manage_collections
p['create_collections'] = self.create_collections
p['use_collections_columns'] = self.use_collections_columns
p['collections_columns'] = self.collections_columns
p['use_collections_template'] = self.use_collections_template
p['collections_template'] = self.collections_template
p['ignore_collections_names'] = self.ignore_collections_names
p['delete_empty_collections'] = self.delete_empty_collections
p['upload_covers'] = self.upload_covers
p['keep_cover_aspect'] = self.keep_cover_aspect
p['upload_grayscale'] = self.upload_grayscale
p['dithered_covers'] = self.dithered_covers
p['letterbox_fs_covers'] = self.letterbox_fs_covers
p['letterbox_fs_covers_color'] = self.letterbox_fs_covers_color
p['png_covers'] = self.png_covers
p['show_recommendations'] = self.show_recommendations
p['show_previews'] = self.show_previews
p['show_archived_books'] = self.show_archived_books
p['update_device_metadata'] = self.update_device_metadata
p['update_series'] = self.update_series
p['force_series_id'] = self.force_series_id
p['update_core_metadata'] = self.update_core_metadata
p['update_purchased_kepubs'] = self.update_purchased_kepubs
p['subtitle_template'] = self.subtitle_template
p['update_subtitle'] = self.update_subtitle
p['update_bookstats'] = self.update_bookstats
p['bookstats_wordcount_template'] = self.bookstats_wordcount_template
p['bookstats_pagecount_template'] = self.bookstats_pagecount_template
p['bookstats_timetoread_upper_template'] = self.bookstats_timetoread_upper_template
p['bookstats_timetoread_lower_template'] = self.bookstats_timetoread_lower_template
p['modify_css'] = self.modify_css
p['override_kobo_replace_existing'] = self.override_kobo_replace_existing
p['support_newer_firmware'] = self.support_newer_firmware
p['debugging_title'] = self.debugging_title
p['driver_version'] = '.'.join([str(i) for i in self.device.version])
return p
class Tab1Config(DeviceConfigTab): # {{{
def __init__(self, parent, device):
super().__init__(parent)
self.l = QVBoxLayout(self)
self.setLayout(self.l)
self.collections_options = CollectionsGroupBox(self, device)
self.l.addWidget(self.collections_options)
self.addDeviceWidget(self.collections_options)
self.covers_options = CoversGroupBox(self, device)
self.l.addWidget(self.covers_options)
self.addDeviceWidget(self.covers_options)
self.book_uploads_options = BookUploadsGroupBox(self, device)
self.l.addWidget(self.book_uploads_options)
self.addDeviceWidget(self.book_uploads_options)
self.l.addStretch()
# }}}
class Tab2Config(DeviceConfigTab): # {{{
def __init__(self, parent, device):
super().__init__(parent)
self.l = QVBoxLayout(self)
self.setLayout(self.l)
self.metadata_options = MetadataGroupBox(self, device)
self.l.addWidget(self.metadata_options)
self.addDeviceWidget(self.metadata_options)
self.device_list_options = DeviceListGroupBox(self, device)
self.l.addWidget(self.device_list_options)
self.addDeviceWidget(self.device_list_options)
self.advanced_options = AdvancedGroupBox(self, device)
self.l.addWidget(self.advanced_options)
self.addDeviceWidget(self.advanced_options)
self.l.addStretch()
def validate(self):
return self.metadata_options.validate()
# }}}
class BookUploadsGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
self.setTitle(_("Uploading of books"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.modify_css_checkbox = create_checkbox(
_("Modify CSS"),
_('This allows addition of user CSS rules and removal of some CSS. '
'When sending a book, the driver adds the contents of {0} to all stylesheets in the EPUB. '
'This file is searched for in the root folder of the main memory of the device. '
'As well as this, if the file contains settings for the "orphans" or "widows", '
'these are removed for all styles in the original stylesheet.').format(device.KOBO_EXTRA_CSSFILE),
device.get_pref('modify_css')
)
self.override_kobo_replace_existing_checkbox = create_checkbox(
_("Do not treat replacements as new books"),
_('When a new book is side-loaded, the Kobo firmware imports details of the book into the internal database. '
'Even if the book is a replacement for an existing book, the Kobo will remove the book from the database and then treat it as a new book. '
'This means that the reading status, bookmarks and collections for the book will be lost. '
'This option overrides firmware behavior and attempts to prevent a book that has been resent from being treated as a new book. '
'If you prefer to have replacements treated as new books, turn this option off.'
),
device.get_pref('override_kobo_replace_existing')
)
self.options_layout.addWidget(self.modify_css_checkbox, 0, 0, 1, 2)
self.options_layout.addWidget(self.override_kobo_replace_existing_checkbox, 1, 0, 1, 2)
@property
def modify_css(self):
return self.modify_css_checkbox.isChecked()
@property
def override_kobo_replace_existing(self):
return self.override_kobo_replace_existing_checkbox.isChecked()
class CollectionsGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
self.setTitle(_("Collections"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.setCheckable(True)
self.setChecked(device.get_pref('manage_collections'))
self.setToolTip(wrap_msg(_('Create new bookshelves on the Kobo if they do not exist. This is only for firmware V2.0.0 or later.')))
self.use_collections_columns_checkbox = create_checkbox(
_("Collections columns:"),
_('Use a column to generate collections.'),
device.get_pref('use_collections_columns')
)
self.collections_columns_edit = QLineEdit(self)
self.collections_columns_edit.setToolTip(_('The Kobo from firmware V2.0.0 supports bookshelves.'
' These are created on the Kobo. '
'Specify a tags type column for automatic management.'))
self.collections_columns_edit.setText(device.get_pref('collections_columns'))
self.use_collections_template_checkbox = create_checkbox(
_("Collections template:"),
_('Use a template to generate collections.'),
device.get_pref('use_collections_template')
)
self.collections_template_edit = TemplateConfig(
device.get_pref('collections_template'),
tooltip=_("Enter a template to generate collections."
" The result of the template will be combined with the values from Collections column."
" The template should return a list of collection names separated by ':@:' (without quotes)."
)
)
self.create_collections_checkbox = create_checkbox(
_("Create collections"),
_('Create new bookshelves on the Kobo if they do not exist. This is only for firmware V2.0.0 or later.'),
device.get_pref('create_collections')
)
self.delete_empty_collections_checkbox = create_checkbox(
_('Delete empty bookshelves'),
_('Delete any empty bookshelves from the Kobo when syncing is finished. This is only for firmware V2.0.0 or later.'),
device.get_pref('delete_empty_collections')
)
self.ignore_collections_names_label = QLabel(_('Ignore collections:'))
self.ignore_collections_names_edit = QLineEdit(self)
self.ignore_collections_names_edit.setToolTip(_('List the names of collections to be ignored by '
'the collection management. The collections listed '
'will not be changed. Names are separated by commas.'))
self.ignore_collections_names_edit.setText(device.get_pref('ignore_collections_names'))
self.options_layout.addWidget(self.use_collections_columns_checkbox, 1, 0, 1, 1)
self.options_layout.addWidget(self.collections_columns_edit, 1, 1, 1, 1)
self.options_layout.addWidget(self.use_collections_template_checkbox, 2, 0, 1, 1)
self.options_layout.addWidget(self.collections_template_edit, 2, 1, 1, 1)
self.options_layout.addWidget(self.create_collections_checkbox, 3, 0, 1, 2)
self.options_layout.addWidget(self.delete_empty_collections_checkbox, 4, 0, 1, 2)
self.options_layout.addWidget(self.ignore_collections_names_label, 5, 0, 1, 1)
self.options_layout.addWidget(self.ignore_collections_names_edit, 5, 1, 1, 1)
self.use_collections_columns_checkbox.clicked.connect(self.use_collections_columns_checkbox_clicked)
self.use_collections_template_checkbox.clicked.connect(self.use_collections_template_checkbox_clicked)
self.use_collections_columns_checkbox_clicked(device.get_pref('use_collections_columns'))
self.use_collections_template_checkbox_clicked(device.get_pref('use_collections_template'))
def use_collections_columns_checkbox_clicked(self, checked):
self.collections_columns_edit.setEnabled(checked)
def use_collections_template_checkbox_clicked(self, checked):
self.collections_template_edit.setEnabled(checked)
@property
def manage_collections(self):
return self.isChecked()
@property
def use_collections_columns(self):
return self.use_collections_columns_checkbox.isChecked()
@property
def collections_columns(self):
return self.collections_columns_edit.text().strip()
@property
def use_collections_template(self):
return self.use_collections_template_checkbox.isChecked()
@property
def collections_template(self):
return self.collections_template_edit.template
@property
def create_collections(self):
return self.create_collections_checkbox.isChecked()
@property
def delete_empty_collections(self):
return self.delete_empty_collections_checkbox.isChecked()
@property
def ignore_collections_names(self):
return self.ignore_collections_names_edit.text().strip()
class CoversGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
self.setTitle(_("Upload covers"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.setCheckable(True)
self.setChecked(device.get_pref('upload_covers'))
self.setToolTip(wrap_msg(_('Upload cover images from the calibre library when sending books to the device.')))
self.upload_grayscale_checkbox = create_checkbox(
_('Upload black and white covers'),
_('Convert covers to grayscale when uploading.'),
device.get_pref('upload_grayscale')
)
self.dithered_covers_checkbox = create_checkbox(
_('Upload dithered covers'),
_('Dither cover images to the appropriate 16c grayscale palette for an eInk screen.'
' This usually ensures greater accuracy and avoids banding, making sleep covers look better.'
' On FW >= 4.11, Nickel itself may sometimes do a decent job of it.'
' Has no effect without "Upload black and white covers"!'),
device.get_pref('dithered_covers')
)
# Make it visually depend on B&W being enabled!
# c.f., https://stackoverflow.com/q/36281103
self.dithered_covers_checkbox.setEnabled(device.get_pref('upload_grayscale'))
self.upload_grayscale_checkbox.toggled.connect(self.dithered_covers_checkbox.setEnabled)
self.upload_grayscale_checkbox.toggled.connect(
lambda checked: not checked and self.dithered_covers_checkbox.setChecked(False))
self.keep_cover_aspect_checkbox = create_checkbox(
_('Keep cover aspect ratio'),
_('When uploading covers, do not change the aspect ratio when resizing for the device.'
' This is for firmware versions 2.3.1 and later.'),
device.get_pref('keep_cover_aspect'))
self.letterbox_fs_covers_checkbox = create_checkbox(
_('Letterbox full-screen covers'),
_('Do it on our end, instead of letting Nickel handle it.'
' Provides pixel-perfect results on devices where Nickel does not do extra processing.'
' Obviously has no effect without "Keep cover aspect ratio".'
' This is probably undesirable if you disable the "Show book covers full screen"'
' setting on your device.'),
device.get_pref('letterbox_fs_covers'))
self.letterbox_fs_covers_color_button = ColorButton(self.options_layout)
self.letterbox_fs_covers_color_button.setToolTip(_('Choose the color to use when letterboxing the cover.'
' The default color is black (#000000)'
)
)
self.letterbox_fs_covers_color_button.color = device.get_pref('letterbox_fs_covers_color')
# Make it visually depend on AR being enabled!
self.letterbox_fs_covers_checkbox.setEnabled(device.get_pref('keep_cover_aspect'))
self.letterbox_fs_covers_color_button.setEnabled(device.get_pref('keep_cover_aspect') and device.get_pref('letterbox_fs_covers'))
self.keep_cover_aspect_checkbox.toggled.connect(self.letterbox_fs_covers_checkbox.setEnabled)
self.keep_cover_aspect_checkbox.toggled.connect(
lambda checked: not checked and self.letterbox_fs_covers_checkbox.setChecked(False))
self.letterbox_fs_covers_checkbox.toggled.connect(self.letterbox_fs_covers_color_button.setEnabled)
self.png_covers_checkbox = create_checkbox(
_('Save covers as PNG'),
_('Use the PNG image format instead of JPG.'
' Higher quality, especially with "Upload dithered covers" enabled,'
' which will also help generate potentially smaller files.'
' Behavior completely unknown on old (< 3.x) Kobo firmwares,'
' known to behave on FW >= 4.8.'
' Has no effect without "Upload black and white covers"!'),
device.get_pref('png_covers'))
# Make it visually depend on B&W being enabled, to avoid storing ridiculously large color PNGs.
self.png_covers_checkbox.setEnabled(device.get_pref('upload_grayscale'))
self.upload_grayscale_checkbox.toggled.connect(self.png_covers_checkbox.setEnabled)
self.upload_grayscale_checkbox.toggled.connect(
lambda checked: not checked and self.png_covers_checkbox.setChecked(False))
self.options_layout.addWidget(self.keep_cover_aspect_checkbox, 0, 0, 1, 1)
self.options_layout.addWidget(self.letterbox_fs_covers_checkbox, 0, 1, 1, 2)
self.options_layout.addWidget(self.letterbox_fs_covers_color_button, 1, 1, 1, 1)
self.options_layout.addWidget(self.upload_grayscale_checkbox, 2, 0, 1, 1)
self.options_layout.addWidget(self.dithered_covers_checkbox, 2, 1, 1, 2)
self.options_layout.addWidget(self.png_covers_checkbox, 3, 1, 1, 2)
self.options_layout.setColumnStretch(0, 0)
self.options_layout.setColumnStretch(1, 0)
self.options_layout.setColumnStretch(2, 1)
@property
def upload_covers(self):
return self.isChecked()
@property
def upload_grayscale(self):
return self.upload_grayscale_checkbox.isChecked()
@property
def dithered_covers(self):
return self.dithered_covers_checkbox.isChecked()
@property
def keep_cover_aspect(self):
return self.keep_cover_aspect_checkbox.isChecked()
@property
def letterbox_fs_covers(self):
return self.letterbox_fs_covers_checkbox.isChecked()
@property
def letterbox_fs_covers_color(self):
return self.letterbox_fs_covers_color_button.color
@property
def png_covers(self):
return self.png_covers_checkbox.isChecked()
class DeviceListGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
self.setTitle(_("Show as on device"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.show_recommendations_checkbox = create_checkbox(
_("Show recommendations"),
_('Kobo shows recommendations on the device. In some cases these have '
'files but in other cases they are just pointers to the web site to buy. '
'Enable if you wish to see/delete them.'),
device.get_pref('show_recommendations')
)
self.show_archived_books_checkbox = create_checkbox(
_("Show archived books"),
_('Archived books are listed on the device but need to be downloaded to read.'
' Use this option to show these books and match them with books in the calibre library.'),
device.get_pref('show_archived_books')
)
self.show_previews_checkbox = create_checkbox(
_('Show previews'),
_('Kobo previews are included on the Touch and some other versions.'
' By default, they are no longer displayed as there is no good reason to '
'see them. Enable if you wish to see/delete them.'),
device.get_pref('show_previews')
)
self.options_layout.addWidget(self.show_recommendations_checkbox, 0, 0, 1, 1)
self.options_layout.addWidget(self.show_archived_books_checkbox, 1, 0, 1, 1)
self.options_layout.addWidget(self.show_previews_checkbox, 2, 0, 1, 1)
@property
def show_recommendations(self):
return self.show_recommendations_checkbox.isChecked()
@property
def show_archived_books(self):
return self.show_archived_books_checkbox.isChecked()
@property
def show_previews(self):
return self.show_previews_checkbox.isChecked()
class AdvancedGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device, _("Advanced options"))
# self.setTitle(_("Advanced Options"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.support_newer_firmware_checkbox = create_checkbox(
_("Attempt to support newer firmware"),
_('Kobo routinely updates the firmware and the '
'database version. With this option calibre will attempt '
'to perform full read-write functionality - Here be Dragons!! '
'Enable only if you are comfortable with restoring your kobo '
'to factory defaults and testing software. '
'This driver supports firmware V2.x.x and DBVersion up to ') + str(
device.supported_dbversion), device.get_pref('support_newer_firmware')
)
self.debugging_title_checkbox = create_checkbox(
_("Title to test when debugging"),
_('Part of title of a book that can be used when doing some tests for debugging. '
'The test is to see if the string is contained in the title of a book. '
'The better the match, the less extraneous output.'),
device.get_pref('debugging_title')
)
self.debugging_title_label = QLabel(_('Title to test when debugging:'))
self.debugging_title_edit = QLineEdit(self)
self.debugging_title_edit.setToolTip(_('Part of title of a book that can be used when doing some tests for debugging. '
'The test is to see if the string is contained in the title of a book. '
'The better the match, the less extraneous output.'))
self.debugging_title_edit.setText(device.get_pref('debugging_title'))
self.debugging_title_label.setBuddy(self.debugging_title_edit)
self.options_layout.addWidget(self.support_newer_firmware_checkbox, 0, 0, 1, 2)
self.options_layout.addWidget(self.debugging_title_label, 1, 0, 1, 1)
self.options_layout.addWidget(self.debugging_title_edit, 1, 1, 1, 1)
@property
def support_newer_firmware(self):
return self.support_newer_firmware_checkbox.isChecked()
@property
def debugging_title(self):
return self.debugging_title_edit.text().strip()
class MetadataGroupBox(DeviceOptionsGroupBox):
def __init__(self, parent, device):
super().__init__(parent, device)
self.setTitle(_("Update metadata on the device"))
self.options_layout = QGridLayout()
self.options_layout.setObjectName("options_layout")
self.setLayout(self.options_layout)
self.setCheckable(True)
self.setChecked(device.get_pref('update_device_metadata'))
self.setToolTip(wrap_msg(_('Update the metadata on the device when it is connected. '
'Be careful when doing this as it will take time and could make the initial connection take a long time.')))
self.update_series_checkbox = create_checkbox(
_("Set series information"),
_('The book lists on the Kobo devices can display series information. '
'This is not read by the device from the sideloaded books. '
'Series information can only be added to the device after the '
'book has been processed by the device. '
'Enable if you wish to set series information.'),
device.get_pref('update_series')
)
self.force_series_id_checkbox = create_checkbox(
_("Force series ID"),
_('Kobo devices use a SeriesID to distinguish between different series. '
'Purchased books have a SeriesID assigned by Kobo. Sideloaded books '
'have a SeriesID assigned by calibre, which is usually different. '
'This causes a series to be shown twice on the Kobo device. '
'Enable if you wish to force all the SeriesID for books '
'in a series to the same value.'),
device.get_pref('force_series_id')
)
self.update_core_metadata_checkbox = create_checkbox(
_("Update metadata on Book Details pages"),
_('This will update the metadata in the device database when the device is connected. '
'The metadata updated is displayed on the device in the library and the Book details page. '
'This is the title, authors, comments/synopsis, series name and number, publisher and published Date, ISBN and language. '
'If a metadata plugboard exists for the device and book format, this will be used to set the metadata.'
),
device.get_pref('update_core_metadata')
)
self.update_purchased_kepubs_checkbox = create_checkbox(
_("Update purchased books"),
_('Update books purchased from Kobo and downloaded to the device.'
),
device.get_pref('update_purchased_kepubs')
)
self.update_subtitle_checkbox = create_checkbox(
_("Subtitle"),
_('Update the subtitle on the device using a template.'),
device.get_pref('update_subtitle')
)
self.subtitle_template_edit = TemplateConfig(
device.get_pref('subtitle_template'),
tooltip=_("Enter a template to use to set the subtitle. "
"If the template is empty, the subtitle will be cleared."
)
)
self.update_bookstats_checkbox = create_checkbox(
_("Book stats"),
_('Update the book stats '),
device.get_pref('update_bookstats')
)
self.bookstats_wordcount_template_edit = TemplateConfig(
device.get_pref('bookstats_wordcount_template'),
label=_("Words:"),
tooltip=_("Enter a template to use to set the word count for the book. "
"If the template is empty, the word count will be cleared."
)
)
self.bookstats_pagecount_template_edit = TemplateConfig(
device.get_pref('bookstats_pagecount_template'),
label=_("Pages:"),
tooltip=_("Enter a template to use to set the page count for the book. "
"If the template is empty, the page count will be cleared."
)
)
self.bookstats_timetoread_label = QLabel(_('Hours to read estimates:'))
self.bookstats_timetoread_upper_template_edit = TemplateConfig(
device.get_pref('bookstats_timetoread_upper_template'),
label=_("Upper:"),
tooltip=_("Enter a template to use to set the upper estimate of the time to read for the book. "
"The estimate is in hours. "
"If the template is empty, the time will be cleared."
)
)
self.bookstats_timetoread_lower_template_edit = TemplateConfig(
device.get_pref('bookstats_timetoread_lower_template'),
label=_("Lower:"),
tooltip=_("Enter a template to use to set the lower estimate of the time to read for the book. "
"The estimate is in hours. "
"If the template is empty, the time will be cleared."
)
)
line = 0
hbl = QHBoxLayout()
hbl.setContentsMargins(0, 0, 0, 0)
hbl.addWidget(self.update_series_checkbox)
hbl.addWidget(self.force_series_id_checkbox)
hbl.addStretch(1)
self.options_layout.addLayout(hbl, line, 0, 1, 4)
line += 1
self.options_layout.addWidget(self.update_core_metadata_checkbox, line, 0, 1, 4)
line += 1
self.options_layout.addWidget(self.update_subtitle_checkbox, line, 0, 1, 2)
self.options_layout.addWidget(self.subtitle_template_edit, line, 2, 1, 2)
line += 1
self.options_layout.addWidget(self.update_bookstats_checkbox, line, 0, 1, 2)
self.options_layout.addWidget(self.bookstats_wordcount_template_edit, line, 2, 1, 1)
self.options_layout.addWidget(self.bookstats_pagecount_template_edit, line, 3, 1, 1)
line += 1
self.options_layout.addWidget(self.bookstats_timetoread_label, line, 1, 1, 1)
self.options_layout.addWidget(self.bookstats_timetoread_lower_template_edit, line, 2, 1, 1)
self.options_layout.addWidget(self.bookstats_timetoread_upper_template_edit, line, 3, 1, 1)
line += 1
self.options_layout.addWidget(self.update_purchased_kepubs_checkbox, line, 0, 1, 4)
self.force_series_id_checkbox.setEnabled(self.update_series)
self.update_core_metadata_checkbox.clicked.connect(self.update_core_metadata_checkbox_clicked)
self.update_series_checkbox.clicked.connect(self.update_series_checkbox_clicked)
self.update_subtitle_checkbox.clicked.connect(self.update_subtitle_checkbox_clicked)
self.update_bookstats_checkbox.clicked.connect(self.update_bookstats_checkbox_clicked)
self.update_core_metadata_checkbox_clicked(device.get_pref('update_core_metadata'))
self.update_subtitle_checkbox_clicked(device.get_pref('update_subtitle'))
self.update_bookstats_checkbox_clicked(device.get_pref('update_bookstats'))
def update_series_checkbox_clicked(self, checked):
self.force_series_id_checkbox.setEnabled(checked)
if not checked:
self.force_series_id_checkbox.setChecked(False)
def update_core_metadata_checkbox_clicked(self, checked):
self.update_series_checkbox.setEnabled(not checked)
self.force_series_id_checkbox.setEnabled(self.update_series)
self.subtitle_template_edit.setEnabled(checked)
self.update_subtitle_checkbox.setEnabled(checked)
self.update_bookstats_checkbox.setEnabled(checked)
self.update_subtitle_checkbox_clicked(self.update_subtitle)
self.update_bookstats_checkbox_clicked(self.update_bookstats)
self.update_purchased_kepubs_checkbox.setEnabled(checked)
def update_subtitle_checkbox_clicked(self, checked):
self.subtitle_template_edit.setEnabled(checked and self.update_core_metadata)
def update_bookstats_checkbox_clicked(self, checked):
self.bookstats_timetoread_label.setEnabled(checked and self.update_bookstats and self.update_core_metadata)
self.bookstats_wordcount_template_edit.setEnabled(checked and self.update_bookstats and self.update_core_metadata)
self.bookstats_pagecount_template_edit.setEnabled(checked and self.update_bookstats and self.update_core_metadata)
self.bookstats_timetoread_upper_template_edit.setEnabled(checked and self.update_bookstats and self.update_core_metadata)
self.bookstats_timetoread_lower_template_edit.setEnabled(checked and self.update_bookstats and self.update_core_metadata)
def edit_template(self):
t = TemplateDialog(self, self.template)
t.setWindowTitle(_('Edit template'))
if t.exec():
self.t.setText(t.rule[1])
def validate(self):
if self.update_subtitle and not self.subtitle_template_edit.validate():
return False
if self.update_bookstats and not self.bookstats_pagecount_template_edit.validate():
return False
if self.update_bookstats and not self.bookstats_wordcount_template_edit.validate():
return False
if self.update_bookstats and not self.bookstats_timetoread_upper_template_edit.validate():
return False
if self.update_bookstats and not self.bookstats_timetoread_lower_template_edit.validate():
return False
return True
@property
def update_series(self):
return self.update_series_checkbox.isChecked()
@property
def force_series_id(self):
return self.update_series and self.force_series_id_checkbox.isChecked()
@property
def update_core_metadata(self):
return self.update_core_metadata_checkbox.isChecked()
@property
def update_purchased_kepubs(self):
return self.update_purchased_kepubs_checkbox.isChecked()
@property
def update_device_metadata(self):
return self.isChecked()
@property
def subtitle_template(self):
return self.subtitle_template_edit.template
@property
def update_subtitle(self):
return self.update_subtitle_checkbox.isChecked()
@property
def update_bookstats(self):
return self.update_bookstats_checkbox.isChecked()
@property
def bookstats_pagecount_template(self):
return self.bookstats_pagecount_template_edit.template
@property
def bookstats_wordcount_template(self):
return self.bookstats_wordcount_template_edit.template
@property
def bookstats_timetoread_lower_template(self):
return self.bookstats_timetoread_lower_template_edit.template
@property
def bookstats_timetoread_upper_template(self):
return self.bookstats_timetoread_upper_template_edit.template
class TemplateConfig(QWidget): # {{{
def __init__(self, val, label=None, tooltip=None):
super().__init__()
self.l = l = QGridLayout(self)
self.setLayout(l)
col = 0
if label is not None:
l.addWidget(QLabel(label), 0, col, 1, 1)
col += 1
self.t = t = TemplateLineEditor(self)
t.setText(val or '')
t.setCursorPosition(0)
self.setMinimumWidth(300)
l.addWidget(t, 0, col, 1, 1)
col += 1
b = self.b = QPushButton(_('&Template editor'))
l.addWidget(b, 0, col, 1, 1)
b.clicked.connect(self.edit_template)
self.setToolTip(wrap_msg(tooltip))
@property
def template(self):
return str(self.t.text()).strip()
@template.setter
def template(self, template):
self.t.setText(template)
def edit_template(self):
t = TemplateDialog(self, self.template)
t.setWindowTitle(_('Edit template'))
if t.exec():
self.t.setText(t.rule[1])
def validate(self):
from calibre.utils.formatter import validation_formatter
tmpl = self.template
try:
validation_formatter.validate(tmpl)
return True
except Exception as err:
error_dialog(self, _('Invalid template'),
'<p>'+_('The template "%s" is invalid:')%tmpl +
'<br>'+str(err), show=True)
return False
# }}}
if __name__ == '__main__':
from calibre.devices.kobo.driver import KOBOTOUCH
from calibre.devices.scanner import DeviceScanner
from calibre.gui2 import Application
s = DeviceScanner()
s.scan()
app = Application([])
debug_print("KOBOTOUCH:", KOBOTOUCH)
dev = KOBOTOUCH(None)
# dev.startup()
# cd = dev.detect_managed_devices(s.devices)
# dev.open(cd, 'test')
cw = dev.config_widget()
d = QDialog()
d.l = QVBoxLayout()
d.setLayout(d.l)
d.l.addWidget(cw)
bb = QDialogButtonBox(QDialogButtonBox.StandardButton.Ok|QDialogButtonBox.StandardButton.Cancel)
d.l.addWidget(bb)
bb.accepted.connect(d.accept)
bb.rejected.connect(d.reject)
if d.exec() == QDialog.DialogCode.Accepted:
cw.commit()
dev.shutdown()
| 40,655 | Python | .py | 701 | 44.455064 | 155 | 0.618762 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,733 | books.py | kovidgoyal_calibre/src/calibre/devices/kobo/books.py | __license__ = 'GPL v3'
__copyright__ = '2010-2012, , Timothy Legge <timlegge at gmail.com> and David Forrester <davidfor@internode.on.net>'
__docformat__ = 'restructuredtext en'
import os
import time
from calibre import isbytestring
from calibre.constants import DEBUG, preferred_encoding
from calibre.devices.usbms.books import Book as Book_
from calibre.devices.usbms.books import CollectionsBookList
from calibre.ebooks.metadata import author_to_author_sort
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.book.formatter import SafeFormat
from calibre.prints import debug_print
from calibre.utils.config_base import prefs
class Book(Book_):
def __init__(self, prefix, lpath, title=None, authors=None, mime=None, date=None, ContentType=None,
thumbnail_name=None, size=None, other=None):
from calibre.utils.date import parse_date
# debug_print('Book::__init__ - title=', title)
show_debug = title is not None and title.lower().find("xxxxx") >= 0
if other is not None:
other.title = title
other.published_date = date
if show_debug:
debug_print("Book::__init__ - title=", title, 'authors=', authors)
debug_print("Book::__init__ - other=", other)
super().__init__(prefix, lpath, size, other)
if title is not None and len(title) > 0:
self.title = title
if authors is not None and len(authors) > 0:
self.authors_from_string(authors)
if self.author_sort is None or self.author_sort == "Unknown":
self.author_sort = author_to_author_sort(authors)
self.mime = mime
self.size = size # will be set later if None
if ContentType == '6' and date is not None:
try:
self.datetime = time.strptime(date, "%Y-%m-%dT%H:%M:%S.%f")
except:
try:
self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%dT%H:%M:%S")
except:
try:
self.datetime = time.strptime(date.split('+')[0], "%Y-%m-%d")
except:
try:
self.datetime = parse_date(date,
assume_utc=True).timetuple()
except:
try:
self.datetime = time.gmtime(os.path.getctime(self.path))
except:
self.datetime = time.gmtime()
self.kobo_metadata = Metadata(title, self.authors)
self.contentID = None
self.current_shelves = []
self.kobo_collections = []
self.can_put_on_shelves = True
self.kobo_series = None
self.kobo_series_number = None # Kobo stores the series number as string. And it can have a leading "#".
self.kobo_series_id = None
self.kobo_subtitle = None
self.kobo_bookstats = {}
if thumbnail_name is not None:
self.thumbnail = ImageWrapper(thumbnail_name)
if show_debug:
debug_print("Book::__init__ end - self=", self)
debug_print("Book::__init__ end - title=", title, 'authors=', authors)
@property
def is_sideloaded(self):
# If we don't have a content Id, we don't know what type it is.
return self.contentID and self.contentID.startswith("file")
@property
def has_kobo_series(self):
return self.kobo_series is not None
@property
def is_purchased_kepub(self):
return self.contentID and not self.contentID.startswith("file")
def __str__(self):
'''
A string representation of this object, suitable for printing to
console
'''
ans = ["Kobo metadata:"]
def fmt(x, y):
ans.append('%-20s: %s'%(str(x), str(y)))
if self.contentID:
fmt('Content ID', self.contentID)
if self.kobo_series:
fmt('Kobo Series', self.kobo_series + ' #%s'%self.kobo_series_number)
if self.kobo_series_id:
fmt('Kobo Series ID', self.kobo_series_id)
if self.kobo_subtitle:
fmt('Subtitle', self.kobo_subtitle)
if self.mime:
fmt('MimeType', self.mime)
ans.append(str(self.kobo_metadata))
ans = '\n'.join(ans)
return super().__str__() + "\n" + ans
class ImageWrapper:
def __init__(self, image_path):
self.image_path = image_path
class KTCollectionsBookList(CollectionsBookList):
def __init__(self, oncard, prefix, settings):
super().__init__(oncard, prefix, settings)
self.set_device_managed_collections([])
def get_collections(self, collection_attributes, collections_template=None, template_globals=None):
debug_print("KTCollectionsBookList:get_collections - start - collection_attributes=", collection_attributes)
collections = {}
ca = []
for c in collection_attributes:
ca.append(c.lower())
collection_attributes = ca
debug_print("KTCollectionsBookList:get_collections - collection_attributes=", collection_attributes)
for book in self:
tsval = book.get('title_sort', book.title)
if tsval is None:
tsval = book.title
show_debug = self.is_debugging_title(tsval) or tsval is None
if show_debug: # or len(book.device_collections) > 0:
debug_print('KTCollectionsBookList:get_collections - tsval=', tsval, "book.title=", book.title, "book.title_sort=", book.title_sort)
debug_print('KTCollectionsBookList:get_collections - book.device_collections=', book.device_collections)
# debug_print(book)
# Make sure we can identify this book via the lpath
lpath = getattr(book, 'lpath', None)
if lpath is None:
continue
# If the book is not in the current library, we don't want to use the metadata for the collections
# or it is a book that cannot be put in a collection (such as recommendations or previews)
if book.application_id is None or not book.can_put_on_shelves:
# debug_print("KTCollectionsBookList:get_collections - Book not in current library or cannot be put in a collection")
continue
# Decide how we will build the collections. The default: leave the
# book in all existing collections. Do not add any new ones.
attrs = ['device_collections']
if getattr(book, '_new_book', False):
debug_print("KTCollectionsBookList:get_collections - sending new book")
if prefs['manage_device_metadata'] == 'manual':
# Ensure that the book is in all the book's existing
# collections plus all metadata collections
attrs += collection_attributes
else:
# For new books, both 'on_send' and 'on_connect' do the same
# thing. The book's existing collections are ignored. Put
# the book in collections defined by its metadata.
attrs = list(collection_attributes)
elif prefs['manage_device_metadata'] == 'on_connect':
# For existing books, modify the collections only if the user
# specified 'on_connect'
attrs = list(collection_attributes)
for cat_name in self.device_managed_collections:
if cat_name in book.device_collections:
if cat_name not in collections:
collections[cat_name] = {}
if show_debug:
debug_print("KTCollectionsBookList:get_collections - Device Managed Collection:", cat_name)
if lpath not in collections[cat_name]:
collections[cat_name][lpath] = book
if show_debug:
debug_print("KTCollectionsBookList:get_collections - Device Managed Collection -added book to cat_name", cat_name)
book.device_collections = []
if show_debug:
debug_print("KTCollectionsBookList:get_collections - attrs=", attrs)
if collections_template is not None:
attrs.append('%template%')
for attr in attrs:
fm = None
attr = attr.strip()
if show_debug:
debug_print("KTCollectionsBookList:get_collections - attr='%s'"%attr)
# If attr is device_collections, then we cannot use
# format_field, because we don't know the fields where the
# values came from.
if attr == 'device_collections':
doing_dc = True
val = book.device_collections # is a list
if show_debug:
debug_print("KTCollectionsBookList:get_collections - adding book.device_collections", book.device_collections)
elif attr == '%template%':
doing_dc = False
val = ''
if collections_template is not None:
nv = SafeFormat().safe_format(collections_template, book,
'KOBO', book, global_vars=template_globals)
if show_debug:
debug_print("KTCollectionsBookList:get_collections collections_template - result", nv)
if nv:
val = [v.strip() for v in nv.split(':@:') if v.strip()]
else:
doing_dc = False
ign, val, orig_val, fm = book.format_field_extended(attr)
val = book.get(attr, None)
if show_debug:
debug_print("KTCollectionsBookList:get_collections - not device_collections")
debug_print(' ign=', ign, ', val=', val, ' orig_val=', orig_val, 'fm=', fm)
debug_print(' val=', val)
if not val:
continue
if isbytestring(val):
val = val.decode(preferred_encoding, 'replace')
if isinstance(val, (list, tuple)):
val = list(val)
# debug_print("KTCollectionsBookList:get_collections - val is list=", val)
elif fm is not None and fm['datatype'] == 'series':
val = [orig_val]
elif fm is not None and fm['datatype'] == 'rating':
val = [str(orig_val / 2.0)]
elif fm is not None and fm['datatype'] == 'text' and fm['is_multiple']:
if isinstance(orig_val, (list, tuple)):
val = orig_val
else:
val = [orig_val]
if show_debug:
debug_print("KTCollectionsBookList:get_collections - val is text and multiple", val)
elif fm is not None and fm['datatype'] == 'composite' and fm['is_multiple']:
if show_debug:
debug_print("KTCollectionsBookList:get_collections - val is compositeand multiple", val)
val = [v.strip() for v in
val.split(fm['is_multiple']['ui_to_list'])]
else:
val = [val]
if show_debug:
debug_print("KTCollectionsBookList:get_collections - val=", val)
for category in val:
# debug_print("KTCollectionsBookList:get_collections - category=", category)
if doing_dc:
pass # No need to do anything with device_collections
elif fm is not None and fm['is_custom']: # is a custom field
if fm['datatype'] == 'text' and len(category) > 1 and \
category[0] == '[' and category[-1] == ']':
continue
else: # is a standard field
if attr == 'tags' and len(category) > 1 and \
category[0] == '[' and category[-1] == ']':
continue
# The category should not be None, but, it has happened.
if not category:
continue
cat_name = str(category).strip(' ,')
if cat_name not in collections:
collections[cat_name] = {}
if show_debug:
debug_print("KTCollectionsBookList:get_collections - created collection for cat_name", cat_name)
if lpath not in collections[cat_name]:
collections[cat_name][lpath] = book
if show_debug:
debug_print("KTCollectionsBookList:get_collections - added book to collection for cat_name", cat_name)
if show_debug:
debug_print("KTCollectionsBookList:get_collections - cat_name", cat_name)
# Sort collections
result = {}
for category, lpaths in collections.items():
result[category] = lpaths.values()
# debug_print("KTCollectionsBookList:get_collections - result=", result.keys())
debug_print("KTCollectionsBookList:get_collections - end")
return result
def set_device_managed_collections(self, collection_names):
self.device_managed_collections = collection_names
def set_debugging_title(self, title):
self.debugging_title = title
def is_debugging_title(self, title):
if not DEBUG:
return False
# debug_print("KTCollectionsBookList:is_debugging - title=", title, "self.debugging_title=", self.debugging_title)
is_debugging = self.debugging_title is not None and len(self.debugging_title) > 0 and title is not None and (
title.lower().find(self.debugging_title.lower()) >= 0 or len(title) == 0)
# debug_print("KTCollectionsBookList:is_debugging - is_debugging=", is_debugging)
return is_debugging
| 14,656 | Python | .py | 268 | 39.126866 | 148 | 0.550418 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,734 | __init__.py | kovidgoyal_calibre/src/calibre/devices/kobo/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,735 | bookmark.py | kovidgoyal_calibre/src/calibre/devices/kobo/bookmark.py | __license__ = 'GPL v3'
__copyright__ = '2011, Timothy Legge <timlegge@gmail.com> and Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from calibre.prints import debug_print
class Bookmark(): # {{{
'''
A simple class fetching bookmark data
kobo-specific
'''
def __init__(self, db_connection, contentId, path, id, book_format, bookmark_extension):
self.book_format = book_format
self.bookmark_extension = bookmark_extension
self.book_length = 0 # Not Used
self.id = id
self.last_read = 0
self.last_read_location = 0 # Not Used
self.path = path
self.timestamp = 0
self.user_notes = None
self.db_connection = db_connection
self.contentId = contentId
self.percent_read = 0
self.kepub = (self.contentId.endswith('.kepub.epub') or not os.path.splitext(self.contentId)[1])
self.get_bookmark_data()
self.get_book_length() # Not Used
def get_bookmark_data(self):
''' Return the timestamp and last_read_location '''
user_notes = {}
self.timestamp = os.path.getmtime(self.path)
cursor = self.db_connection.cursor()
book_query_values = (self.contentId,)
kepub_chapter_query = (
'SELECT c.ContentID, c.BookTitle, c.Title, c.VolumeIndex, c.___NumPages, c.MimeType '
'FROM content c '
'WHERE ContentType = 899 '
'AND c.BookID = ? '
'ORDER BY c.VolumeIndex'
)
bookmark_query = (
'SELECT bm.BookmarkID, bm.ContentID, bm.Text, bm.Annotation, '
'bm.ChapterProgress, bm.StartContainerChildIndex, bm.StartOffset, '
'c.BookTitle, c.Title, c.volumeIndex, c.MimeType '
'FROM Bookmark bm LEFT OUTER JOIN Content c ON c.ContentID = bm.ContentID '
'WHERE bm.Hidden = "false" AND bm.volumeid = ? '
'ORDER BY bm.ContentID, bm.chapterprogress'
)
debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub chapters: contentId={self.contentId}")
cursor.execute(kepub_chapter_query, book_query_values)
kepub_chapters = {}
if self.kepub:
try:
for chapter_row in cursor:
chapter_contentID = chapter_row['ContentID']
chapter_contentID = chapter_contentID[:chapter_contentID.rfind('-')]
kepub_chapters[chapter_contentID] = {
'chapter_title': chapter_row['Title'],
'chapter_index': chapter_row['VolumeIndex']
}
debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub chapter: kepub chapters={kepub_chapters}")
except:
debug_print("Kobo::Bookmark::get_bookmark_data - No chapters found")
cursor.execute(bookmark_query, book_query_values)
previous_chapter = 0
bm_count = 0
for row in cursor:
current_chapter = row['VolumeIndex'] if row['VolumeIndex'] is not None else 0
chapter_title = row['Title']
# For kepubs on newer firmware, the title needs to come from an 899 row.
if self.kepub:
chapter_contentID = row['ContentID']
debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub: chapter chapter_contentID='{chapter_contentID}'")
filename_index = chapter_contentID.find('!')
book_contentID_part = chapter_contentID[:filename_index]
debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub: chapter book_contentID_part='{book_contentID_part}'")
file_contentID_part = chapter_contentID[filename_index + 1:]
filename_index = file_contentID_part.find('!')
opf_reference = file_contentID_part[:filename_index]
debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub: chapter opf_reference='{opf_reference}'")
file_contentID_part = file_contentID_part[filename_index + 1:]
debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub: chapter file_contentID_part='{file_contentID_part}'")
# from urllib import quote
# file_contentID_part = quote(file_contentID_part)
chapter_contentID = book_contentID_part + "!" + opf_reference + "!" + file_contentID_part
debug_print(f"Kobo::Bookmark::get_bookmark_data - getting kepub chapter chapter_contentID='{chapter_contentID}'")
kepub_chapter = kepub_chapters.get(chapter_contentID, None)
if kepub_chapter is not None:
chapter_title = kepub_chapter['chapter_title']
current_chapter = kepub_chapter['chapter_index']
else:
chapter_title = ''
current_chapter = 0
if previous_chapter == current_chapter:
bm_count = bm_count + 1
else:
bm_count = 0
text = row['Text']
annotation = row['Annotation']
# A dog ear (bent upper right corner) is a bookmark
if row['StartContainerChildIndex'] == row['StartOffset'] == 0: # StartContainerChildIndex = StartOffset = 0
e_type = 'Bookmark'
text = row['Title']
# highlight is text with no annotation
elif text is not None and (annotation is None or annotation == ""):
e_type = 'Highlight'
elif text and annotation:
e_type = 'Annotation'
else:
e_type = 'Unknown annotation type'
note_id = current_chapter * 1000 + bm_count
# book_title = row[8]
chapter_progress = min(round(float(100*row['ChapterProgress']),2),100)
user_notes[note_id] = dict(id=self.id,
displayed_location=note_id,
type=e_type,
text=text,
annotation=annotation,
chapter=current_chapter,
chapter_title=chapter_title,
chapter_progress=chapter_progress)
previous_chapter = current_chapter
# debug_print("e_type:" , e_type, '\t', 'loc: ', note_id, 'text: ', text,
# 'annotation: ', annotation, 'chapter_title: ', chapter_title,
# 'chapter_progress: ', chapter_progress, 'date: ')
cursor.execute('SELECT DateLastRead, ___PercentRead, ReadStatus '
'FROM content '
'WHERE bookid IS NULL '
'AND ReadStatus > 0 '
'AND ContentID = ? '
'ORDER BY DateLastRead, ReadStatus',
book_query_values)
for row in cursor:
self.last_read = row['DateLastRead']
self.percent_read = 100 if (row['ReadStatus'] == 2) else row['___PercentRead']
# print row[1]
cursor.close()
# self.last_read_location = self.last_read - self.pdf_page_offset
self.user_notes = user_notes
def get_book_length(self):
# TL self.book_length = 0
# TL self.book_length = int(unpack('>I', record0[0x04:0x08])[0])
pass
def __str__(self):
'''
A string representation of this object, suitable for printing to
console
'''
ans = ["Kobo bookmark:"]
def fmt(x, y):
ans.append('%-20s: %s'%(str(x), str(y)))
if self.contentId:
fmt('ContentID', self.contentId)
if self.last_read:
fmt('Last Read', self.last_read)
if self.timestamp:
fmt('Timestamp', self.timestamp)
if self.percent_read:
fmt('Percent Read', self.percent_read)
if self.user_notes:
fmt('User Notes', self.user_notes)
ans = '\n'.join(ans) + "\n"
return ans
# }}}
| 8,345 | Python | .py | 163 | 37.165644 | 134 | 0.550638 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,736 | driver.py | kovidgoyal_calibre/src/calibre/devices/kobo/driver.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010-2019, Timothy Legge <timlegge@gmail.com>, Kovid Goyal <kovid@kovidgoyal.net> and David Forrester <davidfor@internode.on.net>'
__docformat__ = 'restructuredtext en'
'''
Driver for Kobo eReaders. Supports all e-ink devices.
Originally developed by Timothy Legge <timlegge@gmail.com>.
Extended to support Touch firmware 2.0.0 and later and newer devices by David Forrester <davidfor@internode.on.net>
Additional maintenance performed by Peter Thomas <peterjt@gmail.com>
'''
import os
import re
import shutil
import time
from contextlib import closing
from datetime import datetime
from calibre import fsync, prints, strftime
from calibre.constants import DEBUG
from calibre.devices.kobo.books import Book, ImageWrapper, KTCollectionsBookList
from calibre.devices.mime import mime_type_ext
from calibre.devices.usbms.books import BookList, CollectionsBookList
from calibre.devices.usbms.driver import USBMS
from calibre.ebooks.metadata import authors_to_string
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.utils import normalize_languages
from calibre.prints import debug_print
from calibre.ptempfile import PersistentTemporaryFile, better_mktemp
from calibre.utils.config_base import prefs
from calibre.utils.date import parse_date
from polyglot.builtins import iteritems, itervalues, string_or_bytes
EPUB_EXT = '.epub'
KEPUB_EXT = '.kepub'
KOBO_ROOT_DIR_NAME = ".kobo"
DEFAULT_COVER_LETTERBOX_COLOR = '#000000'
# Implementation of QtQHash for strings. This doesn't seem to be in the Python implementation.
def qhash(inputstr):
instr = b""
if isinstance(inputstr, bytes):
instr = inputstr
elif isinstance(inputstr, str):
instr = inputstr.encode("utf8")
else:
return -1
h = 0x00000000
for x in bytearray(instr):
h = (h << 4) + x
h ^= (h & 0xf0000000) >> 23
h &= 0x0fffffff
return h
def any_in(haystack, *needles):
for n in needles:
if n in haystack:
return True
return False
class DummyCSSPreProcessor:
def __call__(self, data, add_namespace=False):
return data
class KOBO(USBMS):
name = 'Kobo Reader Device Interface'
gui_name = 'Kobo Reader'
description = _('Communicate with the original Kobo Reader and the Kobo WiFi.')
author = 'Timothy Legge and David Forrester'
version = (2, 6, 0)
dbversion = 0
fwversion = (0,0,0)
_device_version_info = None
# The firmware for these devices is not being updated. But the Kobo desktop application
# will update the database if the device is connected. The database structure is completely
# backwardly compatible.
supported_dbversion = 170
has_kepubs = False
supported_platforms = ['windows', 'osx', 'linux']
booklist_class = CollectionsBookList
book_class = Book
# Ordered list of supported formats
FORMATS = ['kepub', 'epub', 'pdf', 'txt', 'cbz', 'cbr']
CAN_SET_METADATA = ['collections']
VENDOR_ID = [0x2237]
BCD = [0x0110, 0x0323, 0x0326]
ORIGINAL_PRODUCT_ID = [0x4165]
WIFI_PRODUCT_ID = [0x4161, 0x4162]
PRODUCT_ID = ORIGINAL_PRODUCT_ID + WIFI_PRODUCT_ID
VENDOR_NAME = ['KOBO_INC', 'KOBO']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['.KOBOEREADER', 'EREADER']
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = True
SUPPORTS_ANNOTATIONS = True
# "kepubs" do not have an extension. The name looks like a GUID. Using an empty string seems to work.
VIRTUAL_BOOK_EXTENSIONS = frozenset(('kobo', ''))
EXTRA_CUSTOMIZATION_MESSAGE = [
_('The Kobo supports several collections including ')+ 'Read, Closed, Im_Reading. ' + _(
'Create tags for automatic management'),
_('Upload covers for books (newer readers)') + ':::'+_(
'Normally, the Kobo readers get the cover image from the'
' e-book file itself. With this option, calibre will send a '
'separate cover image to the reader, useful if you '
'have modified the cover.'),
_('Upload black and white covers'),
_('Show expired books') + ':::'+_(
'A bug in an earlier version left non kepubs book records'
' in the database. With this option calibre will show the '
'expired records and allow you to delete them with '
'the new delete logic.'),
_('Show previews') + ':::'+_(
'Kobo previews are included on the Touch and some other versions.'
' By default, they are no longer displayed as there is no good reason to '
'see them. Enable if you wish to see/delete them.'),
_('Show recommendations') + ':::'+_(
'Kobo now shows recommendations on the device. In some cases these have '
'files but in other cases they are just pointers to the web site to buy. '
'Enable if you wish to see/delete them.'),
_('Attempt to support newer firmware') + ':::'+_(
'Kobo routinely updates the firmware and the '
'database version. With this option calibre will attempt '
'to perform full read-write functionality - Here be Dragons!! '
'Enable only if you are comfortable with restoring your kobo '
'to Factory defaults and testing software'),
]
EXTRA_CUSTOMIZATION_DEFAULT = [
', '.join(['tags']),
True,
True,
True,
False,
False,
False
]
OPT_COLLECTIONS = 0
OPT_UPLOAD_COVERS = 1
OPT_UPLOAD_GRAYSCALE_COVERS = 2
OPT_SHOW_EXPIRED_BOOK_RECORDS = 3
OPT_SHOW_PREVIEWS = 4
OPT_SHOW_RECOMMENDATIONS = 5
OPT_SUPPORT_NEWER_FIRMWARE = 6
def __init__(self, *args, **kwargs):
USBMS.__init__(self, *args, **kwargs)
self.plugboards = self.plugboard_func = None
def initialize(self):
USBMS.initialize(self)
self.dbversion = 7
self._device_version_info = None
def eject(self):
self._device_version_info = None
super().eject()
def device_database_path(self):
return os.path.join(self._main_prefix, KOBO_ROOT_DIR_NAME, 'KoboReader.sqlite')
def device_database_connection(self, use_row_factory=False):
import apsw
db_connection = apsw.Connection(self.device_database_path())
if use_row_factory:
db_connection.setrowtrace(self.row_factory)
return db_connection
def row_factory(self, cursor, row):
return {k[0]: row[i] for i, k in enumerate(cursor.getdescription())}
def get_database_version(self, connection):
cursor = connection.cursor()
cursor.execute('SELECT version FROM dbversion')
try:
result = next(cursor)
dbversion = result['version']
except StopIteration:
dbversion = 0
return dbversion
def device_version_info(self):
debug_print("device_version_info - start")
if not self._device_version_info:
version_file = os.path.join(self._main_prefix, KOBO_ROOT_DIR_NAME, "version")
debug_print(f"device_version_info - version_file={version_file}")
if os.path.isfile(version_file):
debug_print("device_version_info - have opened version_file")
with open(version_file) as vf:
self._device_version_info = vf.read().strip().split(",")
debug_print("device_version_info - self._device_version_info=", self._device_version_info)
return self._device_version_info
def device_serial_no(self):
try:
return self.device_version_info()[0]
except Exception as e:
debug_print(f"Kobo::device_serial_no - didn't get serial number from file' - Exception: {e}")
return ''
def get_firmware_version(self):
# Determine the firmware version
try:
fwversion = self.device_version_info()[2]
fwversion = tuple(int(x) for x in fwversion.split('.'))
except Exception as e:
debug_print(f"Kobo::get_firmware_version - didn't get firmware version from file' - Exception: {e}")
fwversion = (0,0,0)
return fwversion
def get_device_model_id(self):
try:
# Unique model id has format '00000000-0000-0000-0000-000000000388'
# So far (Apr2024) only the last 3 digits have ever been used
return self.device_version_info()[-1]
except Exception as e:
debug_print(f"Kobo::get_device_model_id - didn't get model id from file' - Exception: {e}")
return ''
def sanitize_path_components(self, components):
invalid_filename_chars_re = re.compile(r'[\/\\\?%\*:;\|\"\'><\$!]', re.IGNORECASE | re.UNICODE)
return [invalid_filename_chars_re.sub('_', x) for x in components]
def books(self, oncard=None, end_session=True):
from calibre.ebooks.metadata.meta import path_to_ext
dummy_bl = BookList(None, None, None)
if oncard == 'carda' and not self._card_a_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard == 'cardb' and not self._card_b_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
self._card_b_prefix if oncard == 'cardb' \
else self._main_prefix
self.fwversion = self.get_firmware_version()
if not (self.fwversion == (1,0) or self.fwversion == (1,4)):
self.has_kepubs = True
debug_print('Version of driver: ', self.version, 'Has kepubs:', self.has_kepubs)
debug_print('Version of firmware: ', self.fwversion, 'Has kepubs:', self.has_kepubs)
self.booklist_class.rebuild_collections = self.rebuild_collections
# get the metadata cache
bl = self.booklist_class(oncard, prefix, self.settings)
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx,b in enumerate(bl):
bl_cache[b.lpath] = idx
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex, accessibility):
changed = False
try:
lpath = path.partition(self.normalize_path(prefix))[2]
if lpath.startswith(os.sep):
lpath = lpath[len(os.sep):]
lpath = lpath.replace('\\', '/')
# debug_print("LPATH: ", lpath, " - Title: " , title)
playlist_map = {}
if lpath not in playlist_map:
playlist_map[lpath] = []
if readstatus == 1:
playlist_map[lpath].append('Im_Reading')
elif readstatus == 2:
playlist_map[lpath].append('Read')
elif readstatus == 3:
playlist_map[lpath].append('Closed')
# Related to a bug in the Kobo firmware that leaves an expired row for deleted books
# this shows an expired Collection so the user can decide to delete the book
if expired == 3:
playlist_map[lpath].append('Expired')
# A SHORTLIST is supported on the touch but the data field is there on most earlier models
if favouritesindex == 1:
playlist_map[lpath].append('Shortlist')
# Label Previews
if accessibility == 6:
playlist_map[lpath].append('Preview')
elif accessibility == 4:
playlist_map[lpath].append('Recommendation')
path = self.normalize_path(path)
# print "Normalized FileName: " + path
idx = bl_cache.get(lpath, None)
if idx is not None:
bl_cache[lpath] = None
if ImageID is not None:
imagename = self.normalize_path(self._main_prefix + KOBO_ROOT_DIR_NAME + '/images/' + ImageID + ' - NickelBookCover.parsed')
if not os.path.exists(imagename):
# Try the Touch version if the image does not exist
imagename = self.normalize_path(self._main_prefix + KOBO_ROOT_DIR_NAME + '/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed')
# print "Image name Normalized: " + imagename
if not os.path.exists(imagename):
debug_print("Strange - The image name does not exist - title: ", title)
if imagename is not None:
bl[idx].thumbnail = ImageWrapper(imagename)
if (ContentType != '6' and MimeType != 'Shortcover'):
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
if self.update_metadata_item(bl[idx]):
# print 'update_metadata_item returned true'
changed = True
else:
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
if lpath in playlist_map and \
playlist_map[lpath] not in bl[idx].device_collections:
bl[idx].device_collections = playlist_map.get(lpath,[])
else:
if ContentType == '6' and MimeType == 'Shortcover':
book = self.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
else:
try:
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
else:
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
title = "FILE MISSING: " + title
book = self.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576)
except:
debug_print("prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors,
"mime: ", mime, "date: ", date, "ContentType: ", ContentType, "ImageID: ", ImageID)
raise
# print 'Update booklist'
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
if bl.add_book(book, replace_metadata=False):
changed = True
except: # Probably a path encoding error
import traceback
traceback.print_exc()
return changed
with closing(self.device_database_connection(use_row_factory=True)) as connection:
self.dbversion = self.get_database_version(connection)
debug_print("Database Version: ", self.dbversion)
cursor = connection.cursor()
opts = self.settings()
if self.dbversion >= 33:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, IsDownloaded from content where '
'BookID is Null %(previews)s %(recommendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(
expiry=' and ContentType = 6)' if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')',
previews=' and Accessibility <> 6' if not self.show_previews else '',
recommendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] is False else '')
elif self.dbversion >= 16 and self.dbversion < 33:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, "1" as IsDownloaded from content where '
'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')')
elif self.dbversion < 16 and self.dbversion >= 14:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where '
'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')')
elif self.dbversion < 14 and self.dbversion >= 8:
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where '
'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)'
if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')')
else:
query = ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as FavouritesIndex, '
'"-1" as Accessibility, "1" as IsDownloaded from content where BookID is Null')
try:
cursor.execute(query)
except Exception as e:
err = str(e)
if not (any_in(err, '___ExpirationStatus', 'FavouritesIndex', 'Accessibility', 'IsDownloaded')):
raise
query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as '
'FavouritesIndex, "-1" as Accessibility from content where '
'BookID is Null')
cursor.execute(query)
changed = False
for row in cursor:
# self.report_progress((i+1) / float(numrows), _('Getting list of books on device...'))
if not hasattr(row['ContentID'], 'startswith') or row['ContentID'].startswith("file:///usr/local/Kobo/help/"):
# These are internal to the Kobo device and do not exist
continue
path = self.path_from_contentid(row['ContentID'], row['ContentType'], row['MimeType'], oncard)
mime = mime_type_ext(path_to_ext(path)) if path.find('kepub') == -1 else 'application/epub+zip'
# debug_print("mime:", mime)
if oncard != 'carda' and oncard != 'cardb' and not row['ContentID'].startswith("file:///mnt/sd/"):
prefix = self._main_prefix
elif oncard == 'carda' and row['ContentID'].startswith("file:///mnt/sd/"):
prefix = self._card_a_prefix
changed = update_booklist(self._main_prefix, path,
row['Title'], row['Attribution'], mime, row['DateCreated'], row['ContentType'],
row['ImageId'], row['ReadStatus'], row['MimeType'], row['___ExpirationStatus'],
row['FavouritesIndex'], row['Accessibility']
)
if changed:
need_sync = True
cursor.close()
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(itervalues(bl_cache), reverse=True, key=lambda x: x or -1):
if idx is not None:
need_sync = True
del bl[idx]
# print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \
# (len(bl_cache), len(bl), need_sync)
if need_sync: # self.count_found_in_bl != len(bl) or need_sync:
if oncard == 'cardb':
self.sync_booklists((None, None, bl))
elif oncard == 'carda':
self.sync_booklists((None, bl, None))
else:
self.sync_booklists((bl, None, None))
self.report_progress(1.0, _('Getting list of books on device...'))
return bl
def filename_callback(self, path, mi):
# debug_print("Kobo:filename_callback:Path - {0}".format(path))
idx = path.rfind('.')
ext = path[idx:]
if ext == KEPUB_EXT:
path = path + EPUB_EXT
# debug_print("Kobo:filename_callback:New path - {0}".format(path))
return path
def delete_via_sql(self, ContentID, ContentType):
# Delete Order:
# 1) shortcover_page
# 2) volume_shorcover
# 2) content
debug_print('delete_via_sql: ContentID: ', ContentID, 'ContentType: ', ContentType)
with closing(self.device_database_connection()) as connection:
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select ImageID from content where ContentID = ?', t)
ImageID = None
for row in cursor:
# First get the ImageID to delete the images
ImageID = row[0]
cursor.close()
cursor = connection.cursor()
if ContentType == 6 and self.dbversion < 8:
# Delete the shortcover_pages first
cursor.execute('delete from shortcover_page where shortcoverid in (select ContentID from content where BookID = ?)', t)
# Delete the volume_shortcovers second
cursor.execute('delete from volume_shortcovers where volumeid = ?', t)
# Delete the rows from content_keys
if self.dbversion >= 8:
cursor.execute('delete from content_keys where volumeid = ?', t)
# Delete the chapters associated with the book next
t = (ContentID,)
# Kobo does not delete the Book row (ie the row where the BookID is Null)
# The next server sync should remove the row
cursor.execute('delete from content where BookID = ?', t)
if ContentType == 6:
try:
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0, ___ExpirationStatus=3 '
'where BookID is Null and ContentID =?',t)
except Exception as e:
if 'no such column' not in str(e):
raise
try:
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0 '
'where BookID is Null and ContentID =?',t)
except Exception as e:
if 'no such column' not in str(e):
raise
cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\' '
'where BookID is Null and ContentID =?',t)
else:
cursor.execute('delete from content where BookID is Null and ContentID =?',t)
cursor.close()
if ImageID is None:
print("Error condition ImageID was not found")
print("You likely tried to delete a book that the kobo has not yet added to the database")
# If all this succeeds we need to delete the images files via the ImageID
return ImageID
def delete_images(self, ImageID, book_path):
if ImageID is not None:
path_prefix = KOBO_ROOT_DIR_NAME + '/images/'
path = self._main_prefix + path_prefix + ImageID
file_endings = (' - iPhoneThumbnail.parsed', ' - bbMediumGridList.parsed', ' - NickelBookCover.parsed', ' - N3_LIBRARY_FULL.parsed',
' - N3_LIBRARY_GRID.parsed', ' - N3_LIBRARY_LIST.parsed', ' - N3_SOCIAL_CURRENTREAD.parsed', ' - N3_FULL.parsed',)
for ending in file_endings:
fpath = path + ending
fpath = self.normalize_path(fpath)
if os.path.exists(fpath):
# print 'Image File Exists: ' + fpath
os.unlink(fpath)
def delete_books(self, paths, end_session=True):
if self.modify_database_check("delete_books") is False:
return
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
path = self.normalize_path(path)
# print "Delete file normalized path: " + path
extension = os.path.splitext(path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(path)
ContentID = self.contentid_from_path(path, ContentType)
ImageID = self.delete_via_sql(ContentID, ContentType)
# print " We would now delete the Images for" + ImageID
self.delete_images(ImageID, path)
if os.path.exists(path):
# Delete the ebook
# print "Delete the ebook: " + path
os.unlink(path)
filepath = os.path.splitext(path)[0]
for ext in self.DELETE_EXTS:
if os.path.exists(filepath + ext):
# print "Filename: " + filename
os.unlink(filepath + ext)
if os.path.exists(path + ext):
# print "Filename: " + filename
os.unlink(path + ext)
if self.SUPPORTS_SUB_DIRS:
try:
# print "removed"
os.removedirs(os.path.dirname(path))
except Exception:
pass
self.report_progress(1.0, _('Removing books from device...'))
def remove_books_from_metadata(self, paths, booklists):
if self.modify_database_check("remove_books_from_metatata") is False:
return
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...'))
for bl in booklists:
for book in bl:
# print "Book Path: " + book.path
if path.endswith(book.path):
# print " Remove: " + book.path
bl.remove_book(book)
self.report_progress(1.0, _('Removing books from device metadata listing...'))
def add_books_to_metadata(self, locations, metadata, booklists):
debug_print("KoboTouch::add_books_to_metadata - start. metadata=%s" % metadata[0])
metadata = iter(metadata)
for i, location in enumerate(locations):
self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...'))
info = next(metadata)
debug_print("KoboTouch::add_books_to_metadata - info=%s" % info)
blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0
# Extract the correct prefix from the pathname. To do this correctly,
# we must ensure that both the prefix and the path are normalized
# so that the comparison will work. Book's __init__ will fix up
# lpath, so we don't need to worry about that here.
path = self.normalize_path(location[0])
if self._main_prefix:
prefix = self._main_prefix if \
path.startswith(self.normalize_path(self._main_prefix)) else None
if not prefix and self._card_a_prefix:
prefix = self._card_a_prefix if \
path.startswith(self.normalize_path(self._card_a_prefix)) else None
if not prefix and self._card_b_prefix:
prefix = self._card_b_prefix if \
path.startswith(self.normalize_path(self._card_b_prefix)) else None
if prefix is None:
prints('in add_books_to_metadata. Prefix is None!', path,
self._main_prefix)
continue
# print "Add book to metadata: "
# print "prefix: " + prefix
lpath = path.partition(prefix)[2]
if lpath.startswith('/') or lpath.startswith('\\'):
lpath = lpath[1:]
# print "path: " + lpath
book = self.book_class(prefix, lpath, info.title, other=info)
if book.size is None or book.size == 0:
book.size = os.stat(self.normalize_path(path)).st_size
b = booklists[blist].add_book(book, replace_metadata=True)
if b:
debug_print("KoboTouch::add_books_to_metadata - have a new book - book=%s" % book)
b._new_book = True
self.report_progress(1.0, _('Adding books to device metadata listing...'))
def contentid_from_path(self, path, ContentType):
if ContentType == 6:
extension = os.path.splitext(path)[1]
if extension == '.kobo':
ContentID = os.path.splitext(path)[0]
# Remove the prefix on the file. it could be either
ContentID = ContentID.replace(self._main_prefix, '')
else:
ContentID = path
ContentID = ContentID.replace(self._main_prefix + self.normalize_path(KOBO_ROOT_DIR_NAME + '/kepub/'), '')
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, '')
elif ContentType == 999: # HTML Files
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "/mnt/onboard/")
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "/mnt/sd/")
else: # ContentType = 16
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
ContentID = ContentID.replace("\\", '/')
return ContentID
def get_content_type_from_path(self, path):
# Strictly speaking the ContentType could be 6 or 10
# however newspapers have the same storage format
ContentType = 901
if path.find('kepub') >= 0:
ContentType = 6
return ContentType
def get_content_type_from_extension(self, extension):
if extension == '.kobo':
# Kobo books do not have book files. They do have some images though
# print "kobo book"
ContentType = 6
elif extension == '.pdf' or extension == '.epub':
# print "ePub or pdf"
ContentType = 16
elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html':
# print "txt"
if self.fwversion == (1,0) or self.fwversion == (1,4) or self.fwversion == (1,7,4):
ContentType = 999
else:
ContentType = 901
else: # if extension == '.html' or extension == '.txt':
ContentType = 901 # Yet another hack: to get around Kobo changing how ContentID is stored
return ContentType
def path_from_contentid(self, ContentID, ContentType, MimeType, oncard):
path = ContentID
if oncard == 'cardb':
print('path from_contentid cardb')
elif oncard == 'carda':
path = path.replace("file:///mnt/sd/", self._card_a_prefix)
# print "SD Card: " + path
else:
if ContentType == "6" and MimeType == 'Shortcover':
# This is a hack as the kobo files do not exist
# but the path is required to make a unique id
# for calibre's reference
path = self._main_prefix + path + '.kobo'
# print "Path: " + path
elif (ContentType == "6" or ContentType == "10") and (
MimeType == 'application/x-kobo-epub+zip' or (
MimeType == 'application/epub+zip' and self.isTolinoDevice())
):
if path.startswith("file:///mnt/onboard/"):
path = self._main_prefix + path.replace("file:///mnt/onboard/", '')
else:
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
# print "Internal: " + path
else:
# if path.startswith("file:///mnt/onboard/"):
path = path.replace("file:///mnt/onboard/", self._main_prefix)
path = path.replace("/mnt/onboard/", self._main_prefix)
# print "Internal: " + path
return path
def modify_database_check(self, function):
# Checks to see whether the database version is supported
# and whether the user has chosen to support the firmware version
if self.dbversion > self.supported_dbversion:
# Unsupported database
opts = self.settings()
if not opts.extra_customization[self.OPT_SUPPORT_NEWER_FIRMWARE]:
debug_print('The database has been upgraded past supported version')
self.report_progress(1.0, _('Removing books from device...'))
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Kobo database version unsupported - See details"),
_('Your Kobo is running an updated firmware/database version.'
' As calibre does not know about this updated firmware,'
' database editing is disabled, to prevent corruption.'
' You can still send books to your Kobo with calibre, '
' but deleting books and managing collections is disabled.'
' If you are willing to experiment and know how to reset'
' your Kobo to Factory defaults, you can override this'
' check by right clicking the device icon in calibre and'
' selecting "Configure this device" and then the '
' "Attempt to support newer firmware" option.'
' Doing so may require you to perform a Factory reset of'
' your Kobo.') + ((
'\nDevice database version: %s.'
'\nDevice firmware version: %s') % (self.dbversion, self.display_fwversion))
, UserFeedback.WARN)
return False
else:
# The user chose to edit the database anyway
return True
else:
# Supported database version
return True
def get_file(self, path, *args, **kwargs):
tpath = self.munge_path(path)
extension = os.path.splitext(tpath)[1]
if extension == '.kobo':
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Not Implemented"),
_('".kobo" files do not exist on the device as books; '
'instead they are rows in the sqlite database. '
'Currently they cannot be exported or viewed.'),
UserFeedback.WARN)
return USBMS.get_file(self, path, *args, **kwargs)
@classmethod
def book_from_path(cls, prefix, lpath, title, authors, mime, date, ContentType, ImageID):
# debug_print("KOBO:book_from_path - title=%s"%title)
from calibre.ebooks.metadata import MetaInformation
if cls.read_metadata or cls.MUST_READ_METADATA:
mi = cls.metadata_from_path(cls.normalize_path(os.path.join(prefix, lpath)))
else:
from calibre.ebooks.metadata.meta import metadata_from_filename
mi = metadata_from_filename(cls.normalize_path(os.path.basename(lpath)),
cls.build_template_regexp())
if mi is None:
mi = MetaInformation(os.path.splitext(os.path.basename(lpath))[0],
[_('Unknown')])
size = os.stat(cls.normalize_path(os.path.join(prefix, lpath))).st_size
book = cls.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=size, other=mi)
return book
def get_device_paths(self):
paths = {}
for prefix, path, source_id in [
('main', 'metadata.calibre', 0),
('card_a', 'metadata.calibre', 1),
('card_b', 'metadata.calibre', 2)
]:
prefix = getattr(self, '_%s_prefix'%prefix)
if prefix is not None and os.path.exists(prefix):
paths[source_id] = os.path.join(prefix, *(path.split('/')))
return paths
def reset_readstatus(self, connection, oncard):
cursor = connection.cursor()
# Reset Im_Reading list in the database
if oncard == 'carda':
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\''
elif oncard != 'carda' and oncard != 'cardb':
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
try:
cursor.execute(query)
except:
debug_print(' Database Exception: Unable to reset ReadStatus list')
raise
finally:
cursor.close()
def set_readstatus(self, connection, ContentID, ReadStatus):
debug_print("Kobo::set_readstatus - ContentID=%s, ReadStatus=%d" % (ContentID, ReadStatus))
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select DateLastRead, ReadStatus from Content where BookID is Null and ContentID = ?', t)
try:
result = next(cursor)
datelastread = result['DateLastRead']
current_ReadStatus = result['ReadStatus']
except StopIteration:
datelastread = None
current_ReadStatus = 0
if not ReadStatus == current_ReadStatus:
if ReadStatus == 0:
datelastread = None
else:
datelastread = 'CURRENT_TIMESTAMP' if datelastread is None else datelastread
t = (ReadStatus, datelastread, ContentID,)
try:
debug_print("Kobo::set_readstatus - Making change - ContentID=%s, ReadStatus=%d, DateLastRead=%s" % (ContentID, ReadStatus, datelastread))
cursor.execute('update content set ReadStatus=?,FirstTimeReading=\'false\',DateLastRead=? where BookID is Null and ContentID = ?', t)
except:
debug_print(' Database Exception: Unable to update ReadStatus')
raise
cursor.close()
def reset_favouritesindex(self, connection, oncard):
# Reset FavouritesIndex list in the database
if oncard == 'carda':
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID like \'file:///mnt/sd/%\''
elif oncard != 'carda' and oncard != 'cardb':
query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID not like \'file:///mnt/sd/%\''
cursor = connection.cursor()
try:
cursor.execute(query)
except Exception as e:
debug_print(' Database Exception: Unable to reset Shortlist list')
if 'no such column' not in str(e):
raise
finally:
cursor.close()
def set_favouritesindex(self, connection, ContentID):
cursor = connection.cursor()
t = (ContentID,)
try:
cursor.execute('update content set FavouritesIndex=1 where BookID is Null and ContentID = ?', t)
except Exception as e:
debug_print(' Database Exception: Unable set book as Shortlist')
if 'no such column' not in str(e):
raise
finally:
cursor.close()
def update_device_database_collections(self, booklists, collections_attributes, oncard):
debug_print("Kobo:update_device_database_collections - oncard='%s'"%oncard)
if self.modify_database_check("update_device_database_collections") is False:
return
# Only process categories in this list
supportedcategories = {
"Im_Reading":1,
"Read":2,
"Closed":3,
"Shortlist":4,
# "Preview":99, # Unsupported as we don't want to change it
}
# Define lists for the ReadStatus
readstatuslist = {
"Im_Reading":1,
"Read":2,
"Closed":3,
}
accessibilitylist = {
"Preview":6,
"Recommendation":4,
}
# debug_print('Starting update_device_database_collections', collections_attributes)
# Force collections_attributes to be 'tags' as no other is currently supported
# debug_print('KOBO: overriding the provided collections_attributes:', collections_attributes)
collections_attributes = ['tags']
collections = booklists.get_collections(collections_attributes)
# debug_print('Kobo:update_device_database_collections - Collections:', collections)
# Create a connection to the sqlite database
# Needs to be outside books collection as in the case of removing
# the last book from the collection the list of books is empty
# and the removal of the last book would not occur
with closing(self.device_database_connection()) as connection:
if collections:
# Need to reset the collections outside the particular loops
# otherwise the last item will not be removed
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14:
self.reset_favouritesindex(connection, oncard)
# Process any collections that exist
for category, books in collections.items():
if category in supportedcategories:
# debug_print("Category: ", category, " id = ", readstatuslist.get(category))
for book in books:
# debug_print(' Title:', book.title, 'category: ', category)
if category not in book.device_collections:
book.device_collections.append(category)
extension = os.path.splitext(book.path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(book.path)
ContentID = self.contentid_from_path(book.path, ContentType)
if category in tuple(readstatuslist):
# Manage ReadStatus
self.set_readstatus(connection, ContentID, readstatuslist.get(category))
elif category == 'Shortlist' and self.dbversion >= 14:
# Manage FavouritesIndex/Shortlist
self.set_favouritesindex(connection, ContentID)
elif category in tuple(accessibilitylist):
# Do not manage the Accessibility List
pass
else: # No collections
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
debug_print("No Collections - resetting ReadStatus")
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14:
debug_print("No Collections - resetting FavouritesIndex")
self.reset_favouritesindex(connection, oncard)
# debug_print('Finished update_device_database_collections', collections_attributes)
def get_collections_attributes(self):
collections = [x.lower().strip() for x in self.collections_columns.split(',')]
return collections
@property
def collections_columns(self):
opts = self.settings()
return opts.extra_customization[self.OPT_COLLECTIONS]
@property
def read_metadata(self):
return self.settings().read_metadata
@property
def show_previews(self):
opts = self.settings()
return opts.extra_customization[self.OPT_SHOW_PREVIEWS] is False
@property
def display_fwversion(self):
if self.fwversion is None:
return ''
return '.'.join([str(v) for v in list(self.fwversion)])
def sync_booklists(self, booklists, end_session=True):
debug_print('KOBO:sync_booklists - start')
paths = self.get_device_paths()
# debug_print('KOBO:sync_booklists - booklists:', booklists)
blists = {}
for i in paths:
try:
if booklists[i] is not None:
# debug_print('Booklist: ', i)
blists[i] = booklists[i]
except IndexError:
pass
collections = self.get_collections_attributes()
# debug_print('KOBO: collection fields:', collections)
for i, blist in blists.items():
if i == 0:
oncard = 'main'
else:
oncard = 'carda'
self.update_device_database_collections(blist, collections, oncard)
USBMS.sync_booklists(self, booklists, end_session=end_session)
debug_print('KOBO:sync_booklists - end')
def rebuild_collections(self, booklist, oncard):
collections_attributes = []
self.update_device_database_collections(booklist, collections_attributes, oncard)
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload book cover to the device. Default implementation does nothing.
:param path: The full path to the folder where the associated book is located.
:param filename: The name of the book file without the extension.
:param metadata: metadata belonging to the book. Use metadata.thumbnail
for cover
:param filepath: The full path to the ebook file
'''
opts = self.settings()
if not opts.extra_customization[self.OPT_UPLOAD_COVERS]:
# Building thumbnails disabled
debug_print('KOBO: not uploading cover')
return
if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]:
uploadgrayscale = False
else:
uploadgrayscale = True
debug_print('KOBO: uploading cover')
try:
self._upload_cover(path, filename, metadata, filepath, uploadgrayscale)
except:
debug_print('FAILED to upload cover', filepath)
def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale):
from calibre.utils.img import save_cover_data_to
if metadata.cover:
cover = self.normalize_path(metadata.cover.replace('/', os.sep))
if os.path.exists(cover):
# Get ContentID for Selected Book
extension = os.path.splitext(filepath)[1]
ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(filepath)
ContentID = self.contentid_from_path(filepath, ContentType)
with closing(self.device_database_connection()) as connection:
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t)
try:
result = next(cursor)
# debug_print("ImageId: ", result[0])
ImageID = result[0]
except StopIteration:
debug_print("No rows exist in the database - cannot upload")
return
finally:
cursor.close()
if ImageID is not None:
path_prefix = KOBO_ROOT_DIR_NAME + '/images/'
path = self._main_prefix + path_prefix + ImageID
file_endings = {' - iPhoneThumbnail.parsed':(103,150),
' - bbMediumGridList.parsed':(93,135),
' - NickelBookCover.parsed':(500,725),
' - N3_LIBRARY_FULL.parsed':(355,530),
' - N3_LIBRARY_GRID.parsed':(149,233),
' - N3_LIBRARY_LIST.parsed':(60,90),
' - N3_FULL.parsed':(600,800),
' - N3_SOCIAL_CURRENTREAD.parsed':(120,186)}
for ending, resize in file_endings.items():
fpath = path + ending
fpath = self.normalize_path(fpath.replace('/', os.sep))
if os.path.exists(fpath):
with open(cover, 'rb') as f:
data = f.read()
# Return the data resized and grayscaled if
# required
data = save_cover_data_to(data, grayscale=uploadgrayscale, resize_to=resize, minify_to=resize)
with open(fpath, 'wb') as f:
f.write(data)
fsync(f)
else:
debug_print("ImageID could not be retrieved from the database")
def prepare_addable_books(self, paths):
'''
The Kobo supports an encrypted epub referred to as a kepub
Unfortunately Kobo decided to put the files on the device
with no file extension. I just hope that decision causes
them as much grief as it does me :-)
This has to make a temporary copy of the book files with a
epub extension to allow calibre's normal processing to
deal with the file appropriately
'''
for idx, path in enumerate(paths):
if path.find('kepub') >= 0:
with closing(open(path, 'rb')) as r:
tf = PersistentTemporaryFile(suffix='.epub')
shutil.copyfileobj(r, tf)
# tf.write(r.read())
paths[idx] = tf.name
return paths
@classmethod
def config_widget(self):
# TODO: Cleanup the following
self.current_friendly_name = self.gui_name
from calibre.gui2.device_drivers.tabbed_device_config import TabbedDeviceConfig
return TabbedDeviceConfig(self.settings(), self.FORMATS, self.SUPPORTS_SUB_DIRS,
self.MUST_READ_METADATA, self.SUPPORTS_USE_AUTHOR_SORT,
self.EXTRA_CUSTOMIZATION_MESSAGE, self,
extra_customization_choices=self.EXTRA_CUSTOMIZATION_CHOICES)
def migrate_old_settings(self, old_settings):
OPT_COLLECTIONS = 0
OPT_UPLOAD_COVERS = 1
OPT_UPLOAD_GRAYSCALE_COVERS = 2
OPT_SHOW_EXPIRED_BOOK_RECORDS = 3
OPT_SHOW_PREVIEWS = 4
OPT_SHOW_RECOMMENDATIONS = 5
OPT_SUPPORT_NEWER_FIRMWARE = 6
p = {}
p['format_map'] = old_settings.format_map
p['save_template'] = old_settings.save_template
p['use_subdirs'] = old_settings.use_subdirs
p['read_metadata'] = old_settings.read_metadata
p['use_author_sort'] = old_settings.use_author_sort
p['extra_customization'] = old_settings.extra_customization
p['collections_columns'] = old_settings.extra_customization[OPT_COLLECTIONS]
p['upload_covers'] = old_settings.extra_customization[OPT_UPLOAD_COVERS]
p['upload_grayscale'] = old_settings.extra_customization[OPT_UPLOAD_GRAYSCALE_COVERS]
p['show_expired_books'] = old_settings.extra_customization[OPT_SHOW_EXPIRED_BOOK_RECORDS]
p['show_previews'] = old_settings.extra_customization[OPT_SHOW_PREVIEWS]
p['show_recommendations'] = old_settings.extra_customization[OPT_SHOW_RECOMMENDATIONS]
p['support_newer_firmware'] = old_settings.extra_customization[OPT_SUPPORT_NEWER_FIRMWARE]
return p
def create_annotations_path(self, mdata, device_path=None):
if device_path:
return device_path
return USBMS.create_annotations_path(self, mdata)
def get_annotations(self, path_map):
from calibre.devices.kobo.bookmark import Bookmark
EPUB_FORMATS = ['epub']
epub_formats = set(EPUB_FORMATS)
def get_storage():
storage = []
if self._main_prefix:
storage.append(os.path.join(self._main_prefix, self.EBOOK_DIR_MAIN))
if self._card_a_prefix:
storage.append(os.path.join(self._card_a_prefix, self.EBOOK_DIR_CARD_A))
if self._card_b_prefix:
storage.append(os.path.join(self._card_b_prefix, self.EBOOK_DIR_CARD_B))
return storage
def resolve_bookmark_paths(storage, path_map):
pop_list = []
book_ext = {}
for book_id in path_map:
file_fmts = set()
for fmt in path_map[book_id]['fmts']:
file_fmts.add(fmt)
bookmark_extension = None
if file_fmts.intersection(epub_formats):
book_extension = list(file_fmts.intersection(epub_formats))[0]
bookmark_extension = 'epub'
if bookmark_extension:
for vol in storage:
bkmk_path = path_map[book_id]['path']
bkmk_path = bkmk_path
if os.path.exists(bkmk_path):
path_map[book_id] = bkmk_path
book_ext[book_id] = book_extension
break
else:
pop_list.append(book_id)
else:
pop_list.append(book_id)
# Remove non-existent bookmark templates
for book_id in pop_list:
path_map.pop(book_id)
return path_map, book_ext
storage = get_storage()
path_map, book_ext = resolve_bookmark_paths(storage, path_map)
bookmarked_books = {}
with closing(self.device_database_connection(use_row_factory=True)) as connection:
for book_id in path_map:
extension = os.path.splitext(path_map[book_id])[1]
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(path_map[book_id])
ContentID = self.contentid_from_path(path_map[book_id], ContentType)
debug_print("get_annotations - ContentID: ", ContentID, "ContentType: ", ContentType)
bookmark_ext = extension
myBookmark = Bookmark(connection, ContentID, path_map[book_id], book_id, book_ext[book_id], bookmark_ext)
bookmarked_books[book_id] = self.UserAnnotation(type='kobo_bookmark', value=myBookmark)
# This returns as job.result in gui2.ui.annotations_fetched(self,job)
return bookmarked_books
def generate_annotation_html(self, bookmark):
import calendar
from calibre.ebooks.BeautifulSoup import BeautifulSoup
# Returns <div class="user_annotations"> ... </div>
# last_read_location = bookmark.last_read_location
# timestamp = bookmark.timestamp
percent_read = bookmark.percent_read
debug_print("Kobo::generate_annotation_html - last_read: ", bookmark.last_read)
if bookmark.last_read is not None:
try:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S"))))
except:
try:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S.%f"))))
except:
try:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%SZ"))))
except:
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
else:
# self.datetime = time.gmtime()
last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# debug_print("Percent read: ", percent_read)
ka_soup = BeautifulSoup()
dtc = 0
divTag = ka_soup.new_tag('div')
divTag['class'] = 'user_annotations'
# Add the last-read location
if bookmark.book_format == 'epub':
markup = _("<hr /><b>Book last read:</b> %(time)s<br /><b>Percentage read:</b> %(pr)d%%<hr />") % dict(
time=last_read,
# loc=last_read_location,
pr=percent_read)
else:
markup = _("<hr /><b>Book last read:</b> %(time)s<br /><b>Percentage read:</b> %(pr)d%%<hr />") % dict(
time=last_read,
# loc=last_read_location,
pr=percent_read)
spanTag = BeautifulSoup('<span style="font-weight:normal">' + markup + '</span>').find('span')
divTag.insert(dtc, spanTag)
dtc += 1
divTag.insert(dtc, ka_soup.new_tag('br'))
dtc += 1
if bookmark.user_notes:
user_notes = bookmark.user_notes
annotations = []
# Add the annotations sorted by location
for location in sorted(user_notes):
if user_notes[location]['type'] == 'Bookmark':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b>'
'<br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br />%(annotation)s<br /><hr />') % dict(
chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
annotation=user_notes[location]['annotation'] if user_notes[location]['annotation'] is not None else ""))
elif user_notes[location]['type'] == 'Highlight':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br />'
'<b>Chapter progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><hr />') % dict(
chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text']))
elif user_notes[location]['type'] == 'Annotation':
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br />'
'<b>%(typ)s</b><br /><b>Chapter progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br />'
'<b>Notes:</b> %(annotation)s<br /><hr />') % dict(
chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text'],
annotation=user_notes[location]['annotation']))
else:
annotations.append(
_('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br />'
'<b>%(typ)s</b><br /><b>Chapter progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br />'
'<b>Notes:</b> %(annotation)s<br /><hr />') % dict(
chapter=user_notes[location]['chapter'],
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
chapter_title=user_notes[location]['chapter_title'],
chapter_progress=user_notes[location]['chapter_progress'],
text=user_notes[location]['text'],
annotation=user_notes[location]['annotation']))
for annotation in annotations:
annot = BeautifulSoup('<span>' + annotation + '</span>').find('span')
divTag.insert(dtc, annot)
dtc += 1
ka_soup.insert(0,divTag)
return ka_soup
def add_annotation_to_library(self, db, db_id, annotation):
from calibre.ebooks.BeautifulSoup import prettify
bm = annotation
ignore_tags = {'Catalog', 'Clippings'}
if bm.type == 'kobo_bookmark' and bm.value.last_read:
mi = db.get_metadata(db_id, index_is_id=True)
debug_print("KOBO:add_annotation_to_library - Title: ", mi.title)
user_notes_soup = self.generate_annotation_html(bm.value)
if mi.comments:
a_offset = mi.comments.find('<div class="user_annotations">')
ad_offset = mi.comments.find('<hr class="annotations_divider" />')
if a_offset >= 0:
mi.comments = mi.comments[:a_offset]
if ad_offset >= 0:
mi.comments = mi.comments[:ad_offset]
if set(mi.tags).intersection(ignore_tags):
return
if mi.comments:
hrTag = user_notes_soup.new_tag('hr')
hrTag['class'] = 'annotations_divider'
user_notes_soup.insert(0, hrTag)
mi.comments += prettify(user_notes_soup)
else:
mi.comments = prettify(user_notes_soup)
# Update library comments
db.set_comment(db_id, mi.comments)
# Add bookmark file to db_id
# NOTE: As it is, this copied the book from the device back to the library. That meant it replaced the
# existing file. Taking this out for that reason, but some books have a ANNOT file that could be
# copied.
# db.add_format_with_hooks(db_id, bm.value.bookmark_extension,
# bm.value.path, index_is_id=True)
class KOBOTOUCH(KOBO):
name = 'KoboTouch'
gui_name = 'Kobo eReader'
author = 'David Forrester'
description = _(
'Communicate with the Kobo Touch, Glo, Mini, Aura HD,'
' Aura H2O, Glo HD, Touch 2, Aura ONE, Aura Edition 2,'
' Aura H2O Edition 2, Clara HD, Forma, Libra H2O, Elipsa,'
' Sage, Libra 2, Clara 2E,'
' Clara BW, Clara Colour, Libra Colour'
' as well as tolino shine 5, shine color and'
' vision color eReaders.'
' Based on the existing Kobo driver by %s.') % KOBO.author
# icon = 'devices/kobotouch.jpg'
supported_dbversion = 190
min_supported_dbversion = 53
min_dbversion_series = 65
min_dbversion_externalid = 65
min_dbversion_archive = 71
min_dbversion_images_on_sdcard = 77
min_dbversion_activity = 77
min_dbversion_keywords = 82
min_dbversion_seriesid = 136
min_dbversion_bookstats = 168
min_dbversion_real_bools = 188 # newer (tolino) 5.x fw uses 0 and 1 as boolean values
# Starting with firmware version 3.19.x, the last number appears to be is a
# build number. A number will be recorded here but it can be safely ignored
# when testing the firmware version.
max_supported_fwversion = (5, 3, 195056)
# The following document firmware versions where new function or devices were added.
# Not all are used, but this feels a good place to record it.
min_fwversion_shelves = (2, 0, 0)
min_fwversion_images_on_sdcard = (2, 4, 1)
min_fwversion_images_tree = (2, 9, 0) # Cover images stored in tree under .kobo-images
min_aurah2o_fwversion = (3, 7, 0)
min_reviews_fwversion = (3, 12, 0)
min_glohd_fwversion = (3, 14, 0)
min_auraone_fwversion = (3, 20, 7280)
min_fwversion_overdrive = (4, 0, 7523)
min_clarahd_fwversion = (4, 8, 11090)
min_forma_fwversion = (4, 11, 11879)
min_librah20_fwversion = (4, 16, 13337) # "Reviewers" release.
min_fwversion_epub_location = (4, 17, 13651) # ePub reading location without full contentid.
min_fwversion_dropbox = (4, 18, 13737) # The Forma only at this point.
min_fwversion_serieslist = (4, 20, 14601) # Series list needs the SeriesID to be set.
min_nia_fwversion = (4, 22, 15202)
min_elipsa_fwversion = (4, 28, 17820)
min_libra2_fwversion = (4, 29, 18730)
min_sage_fwversion = (4, 29, 18730)
min_clara2e_fwversion = (4, 33, 19759)
min_fwversion_audiobooks = (4, 29, 18730)
min_fwversion_bookstats = (4, 32, 19501)
min_clarabw_fwversion = (4, 39, 22801) # not sure whether needed
min_claracolor_fwversion = (4, 39, 22801) # not sure whether needed
min_libracolor_fwversion = (4, 39, 22801) # not sure whether needed
has_kepubs = True
device_model_id = ''
booklist_class = KTCollectionsBookList
book_class = Book
kobo_series_dict = {}
MAX_PATH_LEN = 185 # 250 - (len(" - N3_LIBRARY_SHELF.parsed") + len("F:\.kobo\images\"))
KOBO_EXTRA_CSSFILE = 'kobo_extra.css'
EXTRA_CUSTOMIZATION_MESSAGE = []
EXTRA_CUSTOMIZATION_DEFAULT = []
OSX_MAIN_MEM_VOL_PAT = re.compile(r'/KOBOeReader')
opts = None
TIMESTAMP_STRING = "%Y-%m-%dT%H:%M:%SZ"
AURA_PRODUCT_ID = [0x4203]
AURA_EDITION2_PRODUCT_ID = [0x4226]
AURA_HD_PRODUCT_ID = [0x4193]
AURA_H2O_PRODUCT_ID = [0x4213]
AURA_H2O_EDITION2_PRODUCT_ID = [0x4227]
AURA_ONE_PRODUCT_ID = [0x4225]
CLARA_HD_PRODUCT_ID = [0x4228]
CLARA_2E_PRODUCT_ID = [0x4235]
ELIPSA_PRODUCT_ID = [0x4233]
ELIPSA_2E_PRODUCT_ID = [0x4236]
FORMA_PRODUCT_ID = [0x4229]
GLO_PRODUCT_ID = [0x4173]
GLO_HD_PRODUCT_ID = [0x4223]
LIBRA_H2O_PRODUCT_ID = [0x4232]
LIBRA2_PRODUCT_ID = [0x4234]
MINI_PRODUCT_ID = [0x4183]
NIA_PRODUCT_ID = [0x4230]
SAGE_PRODUCT_ID = [0x4231]
TOUCH_PRODUCT_ID = [0x4163]
TOUCH2_PRODUCT_ID = [0x4224]
LIBRA_COLOR_PRODUCT_ID = [0x4237] # This is shared by Kobo Libra Color, Clara Color and Clara BW
# as well as tolino shine 5, shine color and vision color. Sigh.
# Kobo says the following will be used in future firmware (end 2024/2025)
CLARA_COLOR_PRODUCT_ID = [0x4238]
CLARA_BW_PRODUCT_ID = [0x4239]
TOLINO_VISION_COLOR_PRODUCT_ID = [0x5237]
TOLINO_SHINE_COLOR_PRODUCT_ID = [0x5238]
TOLINO_SHINE_5thGEN_PRODUCT_ID = [0x5239]
PRODUCT_ID = AURA_PRODUCT_ID + AURA_EDITION2_PRODUCT_ID + \
AURA_HD_PRODUCT_ID + AURA_H2O_PRODUCT_ID + AURA_H2O_EDITION2_PRODUCT_ID + \
GLO_PRODUCT_ID + GLO_HD_PRODUCT_ID + \
MINI_PRODUCT_ID + TOUCH_PRODUCT_ID + TOUCH2_PRODUCT_ID + \
AURA_ONE_PRODUCT_ID + CLARA_HD_PRODUCT_ID + FORMA_PRODUCT_ID + LIBRA_H2O_PRODUCT_ID + \
NIA_PRODUCT_ID + ELIPSA_PRODUCT_ID + \
SAGE_PRODUCT_ID + LIBRA2_PRODUCT_ID + CLARA_2E_PRODUCT_ID + ELIPSA_2E_PRODUCT_ID + \
LIBRA_COLOR_PRODUCT_ID + CLARA_COLOR_PRODUCT_ID + CLARA_BW_PRODUCT_ID + \
TOLINO_VISION_COLOR_PRODUCT_ID + TOLINO_SHINE_COLOR_PRODUCT_ID + TOLINO_SHINE_5thGEN_PRODUCT_ID
BCD = [0x0110, 0x0326, 0x401, 0x409]
KOBO_AUDIOBOOKS_MIMETYPES = ['application/octet-stream', 'application/x-kobo-mp3z']
# Image file name endings. Made up of: image size, min_dbversion, max_dbversion, isFullSize,
# Note: "200" has been used just as a much larger number than the current versions. It is just a lazy
# way of making it open ended.
# NOTE: Values pulled from Nickel by @geek1011,
# c.f., this handy recap: https://github.com/shermp/Kobo-UNCaGED/issues/16#issuecomment-494229994
# Only the N3_FULL values differ, as they should match the screen's effective resolution.
# Note that all Kobo devices share a common AR at roughly 0.75,
# so results should be similar, no matter the exact device.
# Common to all Kobo models
COMMON_COVER_FILE_ENDINGS = {
# Used for Details screen before FW2.8.1, then for current book tile on home screen
' - N3_LIBRARY_FULL.parsed': [(355,530),0, 200,False,],
# Used for library lists
' - N3_LIBRARY_GRID.parsed': [(149,223),0, 200,False,],
# Used for library lists
' - N3_LIBRARY_LIST.parsed': [(60,90),0, 53,False,],
# Used for Details screen from FW2.8.1
' - AndroidBookLoadTablet_Aspect.parsed': [(355,530), 82, 100,False,],
}
# Legacy 6" devices
LEGACY_COVER_FILE_ENDINGS = {
# Used for screensaver, home screen
' - N3_FULL.parsed': [(600,800),0, 200,True,],
}
# Glo
GLO_COVER_FILE_ENDINGS = {
# Used for screensaver, home screen
' - N3_FULL.parsed': [(758,1024),0, 200,True,],
}
# Aura
AURA_COVER_FILE_ENDINGS = {
# Used for screensaver, home screen
# NOTE: The Aura's bezel covers 10 pixels at the bottom.
# Kobo officially advertised the screen resolution with those chopped off.
' - N3_FULL.parsed': [(758,1014),0, 200,True,],
}
# Glo HD, Clara HD, Clara 2E, Clara BW, Clara Colour share resolution, so the image sizes should be the same.
GLO_HD_COVER_FILE_ENDINGS = {
# Used for screensaver, home screen
' - N3_FULL.parsed': [(1072,1448), 0, 200,True,],
}
AURA_HD_COVER_FILE_ENDINGS = {
# Used for screensaver, home screen
' - N3_FULL.parsed': [(1080,1440), 0, 200,True,],
}
AURA_H2O_COVER_FILE_ENDINGS = {
# Used for screensaver, home screen
# NOTE: The H2O's bezel covers 11 pixels at the top.
# Unlike on the Aura, Nickel fails to account for this when generating covers.
# c.f., https://github.com/shermp/Kobo-UNCaGED/pull/17#discussion_r286209827
' - N3_FULL.parsed': [(1080,1429), 0, 200,True,],
}
# Aura ONE and Elipsa have the same resolution.
AURA_ONE_COVER_FILE_ENDINGS = {
# Used for screensaver, home screen
' - N3_FULL.parsed': [(1404,1872), 0, 200,True,],
}
FORMA_COVER_FILE_ENDINGS = {
# Used for screensaver, home screen
# NOTE: Nickel currently fails to honor the real screen resolution when generating covers,
# choosing instead to follow the Aura One codepath.
' - N3_FULL.parsed': [(1440,1920), 0, 200,True,],
}
LIBRA_H2O_COVER_FILE_ENDINGS = {
# Used for screensaver, home screen
' - N3_FULL.parsed': [(1264,1680), 0, 200,True,],
}
TOLINO_SHINE_COVER_FILE_ENDINGS = {
# There's probably only one ending used
'': [(1072,1448), 0, 200,True,],
}
TOLINO_VISION_COVER_FILE_ENDINGS = {
# There's probably only one ending used
'': [(1264,1680), 0, 200,True,],
}
# Following are the sizes used with pre2.1.4 firmware
# COVER_FILE_ENDINGS = {
# ' - N3_LIBRARY_FULL.parsed':[(355,530),0, 99,], # Used for Details screen
# ' - N3_LIBRARY_FULL.parsed':[(600,800),0, 99,],
# ' - N3_LIBRARY_GRID.parsed':[(149,223),0, 99,], # Used for library lists
# ' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,],
# ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,],
# ' - N3_FULL.parsed':[(600,800),0, 99,], # Used for screensaver if "Full screen" is checked.
# }
def __init__(self, *args, **kwargs):
KOBO.__init__(self, *args, **kwargs)
self.plugboards = self.plugboard_func = None
def initialize(self):
super().initialize()
self.bookshelvelist = []
def get_device_information(self, end_session=True):
self.set_device_name()
return super().get_device_information(end_session)
def open_linux(self):
super().open_linux()
self.swap_drives_if_needed()
def open_osx(self):
# Just dump some info to the logs.
super().open_osx()
# Wrap some debugging output in a try/except so that it is unlikely to break things completely.
try:
if DEBUG:
from calibre_extensions.usbobserver import get_mounted_filesystems
mount_map = get_mounted_filesystems()
debug_print('KoboTouch::open_osx - mount_map=', mount_map)
debug_print('KoboTouch::open_osx - self._main_prefix=', self._main_prefix)
debug_print('KoboTouch::open_osx - self._card_a_prefix=', self._card_a_prefix)
debug_print('KoboTouch::open_osx - self._card_b_prefix=', self._card_b_prefix)
except:
pass
self.swap_drives_if_needed()
def swap_drives_if_needed(self):
# Check the drives have been mounted as expected and swap if needed.
if self._card_a_prefix is None:
return
if not self.is_main_drive(self._main_prefix):
temp_prefix = self._main_prefix
self._main_prefix = self._card_a_prefix
self._card_a_prefix = temp_prefix
def windows_sort_drives(self, drives):
return self.sort_drives(drives)
def sort_drives(self, drives):
if len(drives) < 2:
return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda and not self.is_main_drive(main):
drives['main'] = carda
drives['carda'] = main
debug_print('KoboTouch::sort_drives - swapped drives - main={}, carda={}'.format(drives['main'], drives['carda']))
return drives
def is_main_drive(self, drive):
debug_print('KoboTouch::is_main_drive - drive={}, path={}'.format(drive, os.path.join(drive, '.kobo')))
return os.path.exists(self.normalize_path(os.path.join(drive, '.kobo')))
def is_false_value(self, x) -> bool:
if isinstance(x, str):
return x == 'false'
return not x
def is_true_value(self, x) -> bool:
if isinstance(x, str):
return x == 'true'
return bool(x)
@property
def needs_real_bools(self) -> bool:
return self.dbversion >= self.min_dbversion_real_bools and self.isTolinoDevice()
def bool_for_query(self, x: bool = False) -> str:
if self.needs_real_bools:
return 'true' if x else 'false'
return "'true'" if x else "'false'"
def books(self, oncard=None, end_session=True):
debug_print("KoboTouch:books - oncard='%s'"%oncard)
self.debugging_title = self.get_debugging_title()
dummy_bl = self.booklist_class(None, None, None)
if oncard == 'carda' and not self._card_a_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - Asked to process 'carda', but do not have one!")
return dummy_bl
elif oncard == 'cardb' and not self._card_b_prefix:
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - Asked to process 'cardb', but do not have one!")
return dummy_bl
elif oncard and oncard != 'carda' and oncard != 'cardb':
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - unknown card")
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else \
self._card_b_prefix if oncard == 'cardb' \
else self._main_prefix
debug_print("KoboTouch:books - oncard='%s', prefix='%s'"%(oncard, prefix))
self.fwversion = self.get_firmware_version()
debug_print('Kobo device: %s' % self.gui_name)
debug_print('Version of driver:', self.version, 'Has kepubs:', self.has_kepubs)
debug_print('Version of firmware:', self.fwversion, 'Has kepubs:', self.has_kepubs)
debug_print('Firmware supports cover image tree:', self.fwversion >= self.min_fwversion_images_tree)
self.booklist_class.rebuild_collections = self.rebuild_collections
# get the metadata cache
bl = self.booklist_class(oncard, prefix, self.settings)
opts = self.settings()
debug_print("KoboTouch:books - opts.extra_customization=", opts.extra_customization)
debug_print("KoboTouch:books - driver options=", self)
debug_print("KoboTouch:books - prefs['manage_device_metadata']=", prefs['manage_device_metadata'])
debugging_title = self.debugging_title
debug_print("KoboTouch:books - set_debugging_title to '%s'" % debugging_title)
bl.set_debugging_title(debugging_title)
debug_print("KoboTouch:books - length bl=%d"%len(bl))
need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE)
debug_print("KoboTouch:books - length bl after sync=%d"%len(bl))
# make a dict cache of paths so the lookup in the loop below is faster.
bl_cache = {}
for idx,b in enumerate(bl):
bl_cache[b.lpath] = idx
def update_booklist(prefix, path, ContentID, ContentType, MimeType, ImageID,
title, authors, DateCreated, Description, Publisher,
series, seriesnumber, SeriesID, SeriesNumberFloat,
ISBN, Language, Subtitle,
readstatus, expired, favouritesindex, accessibility, isdownloaded,
userid, bookshelves, book_stats=None
):
show_debug = self.is_debugging_title(title)
# show_debug = authors == 'L. Frank Baum'
if show_debug:
debug_print("KoboTouch:update_booklist - title='%s'"%title, "ContentType=%s"%ContentType, "isdownloaded=", isdownloaded)
debug_print(
" prefix=%s, DateCreated=%s, readstatus=%d, MimeType=%s, expired=%d, favouritesindex=%d, accessibility=%d, isdownloaded=%s"%
(prefix, DateCreated, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded,))
changed = False
try:
lpath = path.partition(self.normalize_path(prefix))[2]
if lpath.startswith(os.sep):
lpath = lpath[len(os.sep):]
lpath = lpath.replace('\\', '/')
# debug_print("KoboTouch:update_booklist - LPATH: ", lpath, " - Title: " , title)
playlist_map = {}
if lpath not in playlist_map:
playlist_map[lpath] = []
allow_shelves = True
if readstatus == 1:
playlist_map[lpath].append('Im_Reading')
elif readstatus == 2:
playlist_map[lpath].append('Read')
elif readstatus == 3:
playlist_map[lpath].append('Closed')
# Related to a bug in the Kobo firmware that leaves an expired row for deleted books
# this shows an expired Collection so the user can decide to delete the book
if expired == 3:
playlist_map[lpath].append('Expired')
allow_shelves = False
# A SHORTLIST is supported on the touch but the data field is there on most earlier models
if favouritesindex == 1:
playlist_map[lpath].append('Shortlist')
# Audiobooks are identified by their MimeType
if MimeType in self.KOBO_AUDIOBOOKS_MIMETYPES:
playlist_map[lpath].append('Audiobook')
# The following is in flux:
# - FW2.0.0, DBVersion 53,55 accessibility == 1
# - FW2.1.2 beta, DBVersion == 56, accessibility == -1:
# So, the following should be OK
if self.is_false_value(isdownloaded):
if self.dbversion < 56 and accessibility <= 1 or self.dbversion >= 56 and accessibility == -1:
playlist_map[lpath].append('Deleted')
allow_shelves = False
if show_debug:
debug_print("KoboTouch:update_booklist - have a deleted book")
elif self.supports_kobo_archive() and (accessibility == 1 or accessibility == 2):
playlist_map[lpath].append('Archived')
allow_shelves = True
# Label Previews and Recommendations
if accessibility == 6:
if userid == '':
playlist_map[lpath].append('Recommendation')
allow_shelves = False
else:
playlist_map[lpath].append('Preview')
allow_shelves = False
elif accessibility == 4: # Pre 2.x.x firmware
playlist_map[lpath].append('Recommendation')
allow_shelves = False
elif accessibility == 8: # From 4.22 but waa probably there earlier.
playlist_map[lpath].append('Kobo Plus')
allow_shelves = True
elif accessibility == 9: # From 4.0 on Aura One
playlist_map[lpath].append('OverDrive')
allow_shelves = True
kobo_collections = playlist_map[lpath][:]
if allow_shelves:
# debug_print('KoboTouch:update_booklist - allowing shelves - title=%s' % title)
if len(bookshelves) > 0:
playlist_map[lpath].extend(bookshelves)
if show_debug:
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
path = self.normalize_path(path)
# print "Normalized FileName: " + path
# Collect the Kobo metadata
authors_list = [a.strip() for a in authors.split("&")] if authors is not None else [_('Unknown')]
kobo_metadata = Metadata(title, authors_list)
kobo_metadata.series = series
kobo_metadata.series_index = seriesnumber
kobo_metadata.comments = Description
kobo_metadata.publisher = Publisher
kobo_metadata.language = Language
kobo_metadata.isbn = ISBN
if DateCreated is not None:
try:
kobo_metadata.pubdate = parse_date(DateCreated, assume_utc=True)
except:
try:
kobo_metadata.pubdate = datetime.strptime(DateCreated, "%Y-%m-%dT%H:%M:%S.%fZ")
except:
debug_print("KoboTouch:update_booklist - Cannot convert date - DateCreated='%s'"%DateCreated)
idx = bl_cache.get(lpath, None)
if idx is not None: # and not (accessibility == 1 and isdownloaded == 'false'):
if show_debug:
self.debug_index = idx
debug_print("KoboTouch:update_booklist - idx=%d"%idx)
debug_print("KoboTouch:update_booklist - lpath=%s"%lpath)
debug_print('KoboTouch:update_booklist - bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map)
debug_print('KoboTouch:update_booklist - bookshelves=', bookshelves)
debug_print('KoboTouch:update_booklist - kobo_collections=', kobo_collections)
debug_print('KoboTouch:update_booklist - series="%s"' % bl[idx].series)
debug_print('KoboTouch:update_booklist - the book=', bl[idx])
debug_print('KoboTouch:update_booklist - the authors=', bl[idx].authors)
debug_print('KoboTouch:update_booklist - application_id=', bl[idx].application_id)
debug_print('KoboTouch:update_booklist - size=', bl[idx].size)
bl_cache[lpath] = None
if ImageID is not None:
imagename = self.imagefilename_from_imageID(prefix, ImageID)
if imagename is not None:
bl[idx].thumbnail = ImageWrapper(imagename)
if (ContentType == '6' and MimeType != 'application/x-kobo-epub+zip'):
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
if self.update_metadata_item(bl[idx]):
# debug_print("KoboTouch:update_booklist - update_metadata_item returned true")
changed = True
else:
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
debug_print("KoboTouch:update_booklist - book size=", bl[idx].size)
if show_debug:
debug_print("KoboTouch:update_booklist - ContentID='%s'"%ContentID)
bl[idx].contentID = ContentID
bl[idx].kobo_metadata = kobo_metadata
bl[idx].kobo_series = series
bl[idx].kobo_series_number = seriesnumber
bl[idx].kobo_series_id = SeriesID
bl[idx].kobo_series_number_float = SeriesNumberFloat
bl[idx].kobo_subtitle = Subtitle
bl[idx].kobo_bookstats = book_stats
bl[idx].can_put_on_shelves = allow_shelves
bl[idx].mime = MimeType
if not bl[idx].is_sideloaded and bl[idx].has_kobo_series and SeriesID is not None:
if show_debug:
debug_print('KoboTouch:update_booklist - Have purchased kepub with series, saving SeriesID=', SeriesID)
self.kobo_series_dict[series] = SeriesID
if lpath in playlist_map:
bl[idx].device_collections = playlist_map.get(lpath,[])
bl[idx].current_shelves = bookshelves
bl[idx].kobo_collections = kobo_collections
if show_debug:
debug_print('KoboTouch:update_booklist - updated bl[idx].device_collections=', bl[idx].device_collections)
debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map, 'changed=', changed)
# debug_print('KoboTouch:update_booklist - book=', bl[idx])
debug_print("KoboTouch:update_booklist - book class=%s"%bl[idx].__class__)
debug_print("KoboTouch:update_booklist - book title=%s"%bl[idx].title)
else:
if show_debug:
debug_print('KoboTouch:update_booklist - idx is none')
try:
if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))):
book = self.book_from_path(prefix, lpath, title, authors, MimeType, DateCreated, ContentType, ImageID)
else:
if isdownloaded == 'true': # A recommendation or preview is OK to not have a file
debug_print(" Strange: The file: ", prefix, lpath, " does not exist!")
title = "FILE MISSING: " + title
book = self.book_class(prefix, lpath, title, authors, MimeType, DateCreated, ContentType, ImageID, size=0)
if show_debug:
debug_print('KoboTouch:update_booklist - book file does not exist. ContentID="%s"'%ContentID)
except Exception as e:
debug_print("KoboTouch:update_booklist - exception creating book: '%s'"%str(e))
debug_print(" prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors,
"MimeType: ", MimeType, "DateCreated: ", DateCreated, "ContentType: ", ContentType, "ImageID: ", ImageID)
raise
if show_debug:
debug_print('KoboTouch:update_booklist - class:', book.__class__)
# debug_print(' resolution:', book.__class__.__mro__)
debug_print(" contentid: '%s'"%book.contentID)
debug_print(" title:'%s'"%book.title)
debug_print(" the book:", book)
debug_print(" author_sort:'%s'"%book.author_sort)
debug_print(" bookshelves:", bookshelves)
debug_print(" kobo_collections:", kobo_collections)
# print 'Update booklist'
book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else []
book.current_shelves = bookshelves
book.kobo_collections = kobo_collections
book.contentID = ContentID
book.kobo_metadata = kobo_metadata
book.kobo_series = series
book.kobo_series_number = seriesnumber
book.kobo_series_id = SeriesID
book.kobo_series_number_float = SeriesNumberFloat
book.kobo_subtitle = Subtitle
book.kobo_bookstats = book_stats
book.can_put_on_shelves = allow_shelves
# debug_print('KoboTouch:update_booklist - title=', title, 'book.device_collections', book.device_collections)
if not book.is_sideloaded and book.has_kobo_series and SeriesID is not None:
if show_debug:
debug_print('KoboTouch:update_booklist - Have purchased kepub with series, saving SeriesID=', SeriesID)
self.kobo_series_dict[series] = SeriesID
if bl.add_book(book, replace_metadata=False):
changed = True
if show_debug:
debug_print(' book.device_collections', book.device_collections)
debug_print(' book.title', book.title)
except: # Probably a path encoding error
import traceback
traceback.print_exc()
return changed
def get_bookshelvesforbook(connection, ContentID):
# debug_print("KoboTouch:get_bookshelvesforbook - " + ContentID)
bookshelves = []
if not self.supports_bookshelves:
return bookshelves
cursor = connection.cursor()
query = "select ShelfName " \
"from ShelfContent " \
"where ContentId = ? " \
f"and _IsDeleted = {self.bool_for_query(False)} " \
"and ShelfName is not null" # This should never be null, but it is protection against an error cause by a sync to the Kobo server
values = (ContentID, )
cursor.execute(query, values)
for i, row in enumerate(cursor):
bookshelves.append(row['ShelfName'])
cursor.close()
# debug_print("KoboTouch:get_bookshelvesforbook - count bookshelves=" + str(count_bookshelves))
return bookshelves
self.debug_index = 0
with closing(self.device_database_connection(use_row_factory=True)) as connection:
debug_print("KoboTouch:books - reading device database")
self.dbversion = self.get_database_version(connection)
debug_print("Database Version: ", self.dbversion)
self.bookshelvelist = self.get_bookshelflist(connection)
debug_print("KoboTouch:books - shelf list:", self.bookshelvelist)
columns = 'Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ImageId, ReadStatus, Description, Publisher '
if self.dbversion >= 16:
columns += ', ___ExpirationStatus, FavouritesIndex, Accessibility'
else:
columns += ', -1 as ___ExpirationStatus, -1 as FavouritesIndex, -1 as Accessibility'
if self.dbversion >= 33:
columns += ', Language, IsDownloaded'
else:
columns += ', NULL AS Language, "1" AS IsDownloaded'
if self.dbversion >= 46:
columns += ', ISBN'
else:
columns += ', NULL AS ISBN'
if self.supports_series():
columns += ", Series, SeriesNumber, ___UserID, ExternalId, Subtitle"
else:
columns += ', null as Series, null as SeriesNumber, ___UserID, null as ExternalId, null as Subtitle'
if self.supports_series_list:
columns += ", SeriesID, SeriesNumberFloat"
else:
columns += ', null as SeriesID, null as SeriesNumberFloat'
if self.supports_bookstats:
columns += ", StorePages, StoreWordCount, StoreTimeToReadLowerEstimate, StoreTimeToReadUpperEstimate"
else:
columns += ', null as StorePages, null as StoreWordCount, null as StoreTimeToReadLowerEstimate, null as StoreTimeToReadUpperEstimate'
where_clause = ''
if self.supports_kobo_archive() or self.supports_overdrive():
where_clause = (" WHERE BookID IS NULL "
" AND ((Accessibility = -1 AND IsDownloaded in ('true', 1 )) " # Sideloaded books
" OR (Accessibility IN (%(downloaded_accessibility)s) %(expiry)s) " # Purchased books
" %(previews)s %(recommendations)s ) " # Previews or Recommendations
) % \
dict(
expiry="" if self.show_archived_books else "and IsDownloaded in ('true', 1)",
previews=" OR (Accessibility in (6) AND ___UserID <> '')" if self.show_previews else "",
recommendations=" OR (Accessibility IN (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else "",
downloaded_accessibility="1,2,8,9" if self.supports_overdrive() else "1,2"
)
elif self.supports_series():
where_clause = (" WHERE BookID IS NULL "
" AND ((Accessibility = -1 AND IsDownloaded IN ('true', 1)) or (Accessibility IN (1,2)) %(previews)s %(recommendations)s )"
" AND NOT ((___ExpirationStatus=3 OR ___ExpirationStatus is Null) %(expiry)s)"
) % \
dict(
expiry=" AND ContentType = 6" if self.show_archived_books else "",
previews=" or (Accessibility IN (6) AND ___UserID <> '')" if self.show_previews else "",
recommendations=" or (Accessibility in (-1, 4, 6) AND ___UserId = '')" if self.show_recommendations else ""
)
elif self.dbversion >= 33:
where_clause = (' WHERE BookID IS NULL %(previews)s %(recommendations)s AND NOT'
' ((___ExpirationStatus=3 or ___ExpirationStatus IS NULL) %(expiry)s)'
) % \
dict(
expiry=' AND ContentType = 6' if self.show_archived_books else '',
previews=' AND Accessibility <> 6' if not self.show_previews else '',
recommendations=' AND IsDownloaded IN (\'true\', 1)' if not self.show_recommendations else ''
)
elif self.dbversion >= 16:
where_clause = (' WHERE BookID IS NULL '
'AND NOT ((___ExpirationStatus=3 OR ___ExpirationStatus IS Null) %(expiry)s)'
) % \
dict(expiry=' and ContentType = 6' if self.show_archived_books else '')
else:
where_clause = ' WHERE BookID IS NULL'
# Note: The card condition should not need the contentId test for the SD
# card. But the ExternalId does not get set for sideloaded kepubs on the
# SD card.
card_condition = ''
if self.has_externalid():
card_condition = " AND (externalId IS NOT NULL AND externalId <> '' OR contentId LIKE 'file:///mnt/sd/%')" if oncard == 'carda' else (
" AND (externalId IS NULL OR externalId = '') AND contentId NOT LIKE 'file:///mnt/sd/%'")
else:
card_condition = " AND contentId LIKE 'file:///mnt/sd/%'" if oncard == 'carda' else " AND contentId NOT LIKE'file:///mnt/sd/%'"
query = 'SELECT ' + columns + ' FROM content ' + where_clause + card_condition
debug_print("KoboTouch:books - query=", query)
cursor = connection.cursor()
try:
cursor.execute(query)
except Exception as e:
err = str(e)
if not (any_in(err, '___ExpirationStatus', 'FavouritesIndex', 'Accessibility', 'IsDownloaded', 'Series', 'ExternalId')):
raise
query= ('SELECT Title, Attribution, DateCreated, ContentID, MimeType, ContentType, '
'ImageId, ReadStatus, -1 AS ___ExpirationStatus, "-1" AS FavouritesIndex, '
'null AS ISBN, NULL AS Language '
'-1 AS Accessibility, 1 AS IsDownloaded, NULL AS Series, NULL AS SeriesNumber, null as Subtitle '
'FROM content '
'WHERE BookID IS NULL'
)
cursor.execute(query)
changed = False
i = 0
for row in cursor:
i += 1
# self.report_progress((i) / float(books_on_device), _('Getting list of books on device...'))
show_debug = self.is_debugging_title(row['Title'])
if show_debug:
debug_print("KoboTouch:books - looping on database - row=%d" % i)
debug_print("KoboTouch:books - title='%s'"%row['Title'], "authors=", row['Attribution'])
debug_print("KoboTouch:books - row=", row)
if not hasattr(row['ContentID'], 'startswith') or row['ContentID'].lower().startswith(
"file:///usr/local/kobo/help/") or row['ContentID'].lower().startswith("/usr/local/kobo/help/"):
# These are internal to the Kobo device and do not exist
continue
externalId = None if row['ExternalId'] and len(row['ExternalId']) == 0 else row['ExternalId']
path = self.path_from_contentid(row['ContentID'], row['ContentType'], row['MimeType'], oncard, externalId)
if show_debug:
debug_print("KoboTouch:books - path='%s'"%path, " ContentID='%s'"%row['ContentID'], " externalId=%s" % externalId)
bookshelves = get_bookshelvesforbook(connection, row['ContentID'])
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
changed = update_booklist(prefix, path, row['ContentID'], row['ContentType'], row['MimeType'], row['ImageId'],
row['Title'], row['Attribution'], row['DateCreated'], row['Description'], row['Publisher'],
row['Series'], row['SeriesNumber'], row['SeriesID'], row['SeriesNumberFloat'],
row['ISBN'], row['Language'], row['Subtitle'],
row['ReadStatus'], row['___ExpirationStatus'],
int(row['FavouritesIndex']), row['Accessibility'], row['IsDownloaded'],
row['___UserID'], bookshelves,
book_stats={
'StorePages': row['StorePages'],
'StoreWordCount': row['StoreWordCount'],
'StoreTimeToReadLowerEstimate': row['StoreTimeToReadLowerEstimate'],
'StoreTimeToReadUpperEstimate': row['StoreTimeToReadUpperEstimate']
}
)
if changed:
need_sync = True
cursor.close()
if not prefs['manage_device_metadata'] == 'on_connect':
self.dump_bookshelves(connection)
else:
debug_print("KoboTouch:books - automatically managing metadata")
debug_print("KoboTouch:books - self.kobo_series_dict=", self.kobo_series_dict)
# Remove books that are no longer in the filesystem. Cache contains
# indices into the booklist if book not in filesystem, None otherwise
# Do the operation in reverse order so indices remain valid
for idx in sorted(itervalues(bl_cache), reverse=True, key=lambda x: x or -1):
if idx is not None:
if not os.path.exists(self.normalize_path(os.path.join(prefix, bl[idx].lpath))) or not bl[idx].contentID:
need_sync = True
del bl[idx]
else:
debug_print("KoboTouch:books - Book in mtadata.calibre, on file system but not database - bl[idx].title:'%s'"%bl[idx].title)
# print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \
# (len(bl_cache), len(bl), need_sync)
# Bypassing the KOBO sync_booklists as that does things we don't need to do
# Also forcing sync to see if this solves issues with updating shelves and matching books.
if need_sync or True: # self.count_found_in_bl != len(bl) or need_sync:
debug_print("KoboTouch:books - about to sync_booklists")
if oncard == 'cardb':
USBMS.sync_booklists(self, (None, None, bl))
elif oncard == 'carda':
USBMS.sync_booklists(self, (None, bl, None))
else:
USBMS.sync_booklists(self, (bl, None, None))
debug_print("KoboTouch:books - have done sync_booklists")
self.report_progress(1.0, _('Getting list of books on device...'))
debug_print("KoboTouch:books - end - oncard='%s'"%oncard)
return bl
@classmethod
def book_from_path(cls, prefix, lpath, title, authors, mime, date, ContentType, ImageID):
debug_print("KoboTouch:book_from_path - title=%s"%title)
book = super().book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
# Kobo Audiobooks are directories with files in them.
if mime in cls.KOBO_AUDIOBOOKS_MIMETYPES and book.size == 0:
audiobook_path = cls.normalize_path(os.path.join(prefix, lpath))
# debug_print("KoboTouch:book_from_path - audiobook=", audiobook_path)
for audiofile in os.scandir(audiobook_path):
# debug_print("KoboTouch:book_from_path - audiofile=", audiofile)
if audiofile.is_file():
size = audiofile.stat().st_size
# debug_print("KoboTouch:book_from_path - size=", size)
book.size += size
debug_print("KoboTouch:book_from_path - book.size=", book.size)
return book
def path_from_contentid(self, ContentID, ContentType, MimeType, oncard, externalId=None):
path = ContentID
if not (externalId or MimeType == 'application/octet-stream' or (self.isTolinoDevice() and MimeType == 'audio/mpeg')):
return super().path_from_contentid(ContentID, ContentType, MimeType, oncard)
if oncard == 'cardb':
print('path from_contentid cardb')
else:
if (ContentType == "6" or ContentType == "10"):
if (MimeType == 'application/octet-stream'): # Audiobooks purchased from Kobo are in a different location.
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/audiobook/' + path
elif (MimeType == 'audio/mpeg' and self.isTolinoDevice()):
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/audiobook/' + path
elif path.startswith("file:///mnt/onboard/"):
path = self._main_prefix + path.replace("file:///mnt/onboard/", '')
elif path.startswith("file:///mnt/sd/"):
path = self._card_a_prefix + path.replace("file:///mnt/sd/", '')
elif externalId:
path = self._card_a_prefix + 'koboExtStorage/kepub/' + path
else:
path = self._main_prefix + KOBO_ROOT_DIR_NAME + '/kepub/' + path
else: # Should never get here, but, just in case...
# if path.startswith("file:///mnt/onboard/"):
path = path.replace("file:///mnt/onboard/", self._main_prefix)
path = path.replace("file:///mnt/sd/", self._card_a_prefix)
path = path.replace("/mnt/onboard/", self._main_prefix)
# print "Internal: " + path
return path
def imagefilename_from_imageID(self, prefix, ImageID):
show_debug = self.is_debugging_title(ImageID)
if len(ImageID) > 0:
path = self.images_path(prefix, ImageID)
for ending in self.cover_file_endings():
fpath = path + ending
if os.path.exists(fpath):
if show_debug:
debug_print("KoboTouch:imagefilename_from_imageID - have cover image fpath=%s" % (fpath))
return fpath
if show_debug:
debug_print("KoboTouch:imagefilename_from_imageID - no cover image found - ImageID=%s" % (ImageID))
return None
def get_extra_css(self):
extra_sheet = None
from css_parser.css import CSSRule
if self.modifying_css():
extra_css_path = os.path.join(self._main_prefix, self.KOBO_EXTRA_CSSFILE)
if os.path.exists(extra_css_path):
from css_parser import parseFile as cssparseFile
try:
extra_sheet = cssparseFile(extra_css_path)
debug_print(f"KoboTouch:get_extra_css: Using extra CSS in {extra_css_path} ({len(extra_sheet.cssRules)} rules)")
if len(extra_sheet.cssRules) ==0:
debug_print("KoboTouch:get_extra_css: Extra CSS file has no valid rules. CSS will not be modified.")
extra_sheet = None
except Exception as e:
debug_print(f"KoboTouch:get_extra_css: Problem parsing extra CSS file {extra_css_path}")
debug_print(f"KoboTouch:get_extra_css: Exception {e}")
# create dictionary of features enabled in kobo extra css
self.extra_css_options = {}
if extra_sheet:
# search extra_css for @page rule
self.extra_css_options['has_atpage'] = len(self.get_extra_css_rules(extra_sheet, CSSRule.PAGE_RULE)) > 0
# search extra_css for style rule(s) containing widows or orphans
self.extra_css_options['has_widows_orphans'] = len(self.get_extra_css_rules_widow_orphan(extra_sheet)) > 0
debug_print('KoboTouch:get_extra_css - CSS options:', self.extra_css_options)
return extra_sheet
def get_extra_css_rules(self, sheet, css_rule):
return [r for r in sheet.cssRules.rulesOfType(css_rule)]
def get_extra_css_rules_widow_orphan(self, sheet):
from css_parser.css import CSSRule
return [r for r in self.get_extra_css_rules(sheet, CSSRule.STYLE_RULE)
if (r.style['widows'] or r.style['orphans'])]
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
debug_print('KoboTouch:upload_books - %d books'%(len(files)))
debug_print('KoboTouch:upload_books - files=', files)
if self.modifying_epub():
self.extra_sheet = self.get_extra_css()
i = 0
for file, n, mi in zip(files, names, metadata):
debug_print("KoboTouch:upload_books: Processing book: {} by {}".format(mi.title, " and ".join(mi.authors)))
debug_print(f"KoboTouch:upload_books: file={file}, name={n}")
self.report_progress(i / float(len(files)), "Processing book: {} by {}".format(mi.title, " and ".join(mi.authors)))
mi.kte_calibre_name = n
self._modify_epub(file, mi)
i += 1
self.report_progress(0, 'Working...')
result = super().upload_books(files, names, on_card, end_session, metadata)
# debug_print('KoboTouch:upload_books - result=', result)
if self.dbversion >= 53:
try:
with closing(self.device_database_connection()) as connection:
cursor = connection.cursor()
cleanup_query = f"DELETE FROM content WHERE ContentID = ? AND Accessibility = 1 AND IsDownloaded = {self.bool_for_query(False)}"
for fname, cycle in result:
show_debug = self.is_debugging_title(fname)
contentID = self.contentid_from_path(fname, 6)
if show_debug:
debug_print('KoboTouch:upload_books: fname=', fname)
debug_print('KoboTouch:upload_books: contentID=', contentID)
cleanup_values = (contentID,)
# debug_print('KoboTouch:upload_books: Delete record left if deleted on Touch')
cursor.execute(cleanup_query, cleanup_values)
if self.override_kobo_replace_existing:
self.set_filesize_in_device_database(connection, contentID, fname)
if not self.upload_covers:
imageID = self.imageid_from_contentid(contentID)
self.delete_images(imageID, fname)
cursor.close()
except Exception as e:
debug_print('KoboTouch:upload_books - Exception: %s'%str(e))
return result
def _modify_epub(self, book_file, metadata, container=None):
debug_print(f"KoboTouch:_modify_epub:Processing {metadata.author_sort} - {metadata.title}")
# Currently only modifying CSS, so if no stylesheet, don't do anything
if not self.extra_sheet:
debug_print("KoboTouch:_modify_epub: no CSS file")
return True
container, commit_container = self.create_container(book_file, metadata, container)
if not container:
return False
from calibre.ebooks.oeb.base import OEB_STYLES
is_dirty = False
for cssname, mt in iteritems(container.mime_map):
if mt in OEB_STYLES:
newsheet = container.parsed(cssname)
oldrules = len(newsheet.cssRules)
# future css mods may be epub/kepub specific, so pass file extension arg
fileext = os.path.splitext(book_file)[-1].lower()
debug_print(f"KoboTouch:_modify_epub: Modifying {cssname}")
if self._modify_stylesheet(newsheet, fileext):
debug_print(f"KoboTouch:_modify_epub:CSS rules {oldrules} -> {len(newsheet.cssRules)} ({cssname})")
container.dirty(cssname)
is_dirty = True
if commit_container:
debug_print("KoboTouch:_modify_epub: committing container.")
self.commit_container(container, is_dirty)
return True
def _modify_stylesheet(self, sheet, fileext, is_dirty=False):
from css_parser.css import CSSRule
# if fileext in (EPUB_EXT, KEPUB_EXT):
# if kobo extra css contains a @page rule
# remove any existing @page rules in epub css
if self.extra_css_options.get('has_atpage', False):
page_rules = self.get_extra_css_rules(sheet, CSSRule.PAGE_RULE)
if len(page_rules) > 0:
debug_print("KoboTouch:_modify_stylesheet: Removing existing @page rules")
for rule in page_rules:
rule.style = ''
is_dirty = True
# if kobo extra css contains any widow/orphan style rules
# remove any existing widow/orphan settings in epub css
if self.extra_css_options.get('has_widows_orphans', False):
widow_orphan_rules = self.get_extra_css_rules_widow_orphan(sheet)
if len(widow_orphan_rules) > 0:
debug_print("KoboTouch:_modify_stylesheet: Removing existing widows/orphans attribs")
for rule in widow_orphan_rules:
rule.style.removeProperty('widows')
rule.style.removeProperty('orphans')
is_dirty = True
# append all rules from kobo extra css
debug_print("KoboTouch:_modify_stylesheet: Append all kobo extra css rules")
for extra_rule in self.extra_sheet.cssRules:
sheet.insertRule(extra_rule)
is_dirty = True
return is_dirty
def create_container(self, book_file, metadata, container=None):
# create new container if not received, else pass through
if not container:
commit_container = True
try:
from calibre.ebooks.oeb.polish.container import get_container
debug_print("KoboTouch:create_container: try to create new container")
container = get_container(book_file)
container.css_preprocessor = DummyCSSPreProcessor()
except Exception as e:
debug_print(f"KoboTouch:create_container: exception from get_container {metadata.author_sort} - {metadata.title}")
debug_print(f"KoboTouch:create_container: exception is: {e}")
else:
commit_container = False
debug_print("KoboTouch:create_container: received container")
return container, commit_container
def commit_container(self, container, is_dirty=True):
# commit container if changes have been made
if is_dirty:
debug_print("KoboTouch:commit_container: commit container.")
container.commit()
# Clean-up-AYGO prevents build-up of TEMP exploded epub/kepub files
debug_print("KoboTouch:commit_container: removing container temp files.")
try:
shutil.rmtree(container.root)
except Exception:
pass
def delete_via_sql(self, ContentID, ContentType):
imageId = super().delete_via_sql(ContentID, ContentType)
if self.dbversion >= 53:
debug_print('KoboTouch:delete_via_sql: ContentID="%s"'%ContentID, 'ContentType="%s"'%ContentType)
try:
with closing(self.device_database_connection()) as connection:
debug_print('KoboTouch:delete_via_sql: have database connection')
cursor = connection.cursor()
debug_print('KoboTouch:delete_via_sql: have cursor')
t = (ContentID,)
# Delete the Bookmarks
debug_print('KoboTouch:delete_via_sql: Delete from Bookmark')
cursor.execute('DELETE FROM Bookmark WHERE VolumeID = ?', t)
# Delete from the Bookshelf
debug_print('KoboTouch:delete_via_sql: Delete from the Bookshelf')
cursor.execute('delete from ShelfContent where ContentID = ?', t)
# ContentType 6 is now for all books.
debug_print('KoboTouch:delete_via_sql: BookID is Null')
cursor.execute('delete from content where BookID is Null and ContentID =?',t)
# Remove the content_settings entry
debug_print('KoboTouch:delete_via_sql: delete from content_settings')
cursor.execute('delete from content_settings where ContentID =?',t)
# Remove the ratings entry
debug_print('KoboTouch:delete_via_sql: delete from ratings')
cursor.execute('delete from ratings where ContentID =?',t)
# Remove any entries for the Activity table - removes tile from new home page
if self.has_activity_table():
debug_print('KoboTouch:delete_via_sql: delete from Activity')
cursor.execute('delete from Activity where Id =?', t)
cursor.close()
debug_print('KoboTouch:delete_via_sql: finished SQL')
debug_print('KoboTouch:delete_via_sql: After SQL, no exception')
except Exception as e:
debug_print('KoboTouch:delete_via_sql - Database Exception: %s'%str(e))
debug_print('KoboTouch:delete_via_sql: imageId="%s"'%imageId)
if imageId is None:
imageId = self.imageid_from_contentid(ContentID)
return imageId
def delete_images(self, ImageID, book_path):
debug_print("KoboTouch:delete_images - ImageID=", ImageID)
if ImageID is not None:
path = self.images_path(book_path, ImageID)
debug_print("KoboTouch:delete_images - path=%s" % path)
for ending in self.cover_file_endings().keys():
fpath = path + ending
fpath = self.normalize_path(fpath)
debug_print("KoboTouch:delete_images - fpath=%s" % fpath)
if os.path.exists(fpath):
debug_print("KoboTouch:delete_images - Image File Exists")
os.unlink(fpath)
try:
os.removedirs(os.path.dirname(path))
except Exception:
pass
def contentid_from_path(self, path, ContentType):
show_debug = self.is_debugging_title(path) and True
if show_debug:
debug_print("KoboTouch:contentid_from_path - path='%s'"%path, "ContentType='%s'"%ContentType)
debug_print("KoboTouch:contentid_from_path - self._main_prefix='%s'"%self._main_prefix, "self._card_a_prefix='%s'"%self._card_a_prefix)
if ContentType == 6:
extension = os.path.splitext(path)[1]
if extension == '.kobo':
ContentID = os.path.splitext(path)[0]
# Remove the prefix on the file. it could be either
ContentID = ContentID.replace(self._main_prefix, '')
elif not extension:
ContentID = path
ContentID = ContentID.replace(self._main_prefix + self.normalize_path(KOBO_ROOT_DIR_NAME + '/kepub/'), '')
else:
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
if show_debug:
debug_print("KoboTouch:contentid_from_path - 1 ContentID='%s'"%ContentID)
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
else: # ContentType = 16
debug_print("KoboTouch:contentid_from_path ContentType other than 6 - ContentType='%d'"%ContentType, "path='%s'"%path)
ContentID = path
ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/")
if self._card_a_prefix is not None:
ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/")
ContentID = ContentID.replace("\\", '/')
if show_debug:
debug_print("KoboTouch:contentid_from_path - end - ContentID='%s'"%ContentID)
return ContentID
def get_content_type_from_path(self, path):
ContentType = 6
if self.fwversion < (1, 9, 17):
ContentType = super().get_content_type_from_path(path)
return ContentType
def get_content_type_from_extension(self, extension):
debug_print("KoboTouch:get_content_type_from_extension - start")
# With new firmware, ContentType appears to be 6 for all types of sideloaded books.
ContentType = 6
if self.fwversion < (1,9,17):
ContentType = super().get_content_type_from_extension(extension)
return ContentType
def set_plugboards(self, plugboards, pb_func):
self.plugboards = plugboards
self.plugboard_func = pb_func
def update_device_database_collections(self, booklists, collections_attributes, oncard):
debug_print("KoboTouch:update_device_database_collections - oncard='%s'"%oncard)
debug_print("KoboTouch:update_device_database_collections - device='%s'" % self)
if self.modify_database_check("update_device_database_collections") is False:
return
# Only process categories in this list
supportedcategories = {
"Im_Reading": 1,
"Read": 2,
"Closed": 3,
"Shortlist": 4,
"Archived": 5,
}
# Define lists for the ReadStatus
readstatuslist = {
"Im_Reading":1,
"Read":2,
"Closed":3,
}
accessibilitylist = {
"Deleted":1,
"OverDrive":9,
"Preview":6,
"Recommendation":4,
}
# debug_print('KoboTouch:update_device_database_collections - collections_attributes=', collections_attributes)
create_collections = self.create_collections
delete_empty_collections = self.delete_empty_collections
update_series_details = self.update_series_details
update_core_metadata = self.update_core_metadata
update_purchased_kepubs = self.update_purchased_kepubs
debugging_title = self.get_debugging_title()
debug_print("KoboTouch:update_device_database_collections - set_debugging_title to '%s'" % debugging_title)
booklists.set_debugging_title(debugging_title)
booklists.set_device_managed_collections(self.ignore_collections_names)
have_bookshelf_attributes = len(collections_attributes) > 0 or self.use_collections_template
collections = booklists.get_collections(collections_attributes,
collections_template=self.collections_template,
template_globals={
'serial_number': self.device_serial_no(),
'firmware_version': self.fwversion,
'display_firmware_version': self.display_fwversion,
'dbversion': self.dbversion,
}
) if have_bookshelf_attributes else None
# debug_print('KoboTouch:update_device_database_collections - Collections:', collections)
# Create a connection to the sqlite database
# Needs to be outside books collection as in the case of removing
# the last book from the collection the list of books is empty
# and the removal of the last book would not occur
with closing(self.device_database_connection(use_row_factory=True)) as connection:
if self.manage_collections:
if collections is not None:
# debug_print("KoboTouch:update_device_database_collections - length collections=" + str(len(collections)))
# Need to reset the collections outside the particular loops
# otherwise the last item will not be removed
if self.dbversion < 53:
debug_print("KoboTouch:update_device_database_collections - calling reset_readstatus")
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
debug_print("KoboTouch:update_device_database_collections - calling reset_favouritesindex")
self.reset_favouritesindex(connection, oncard)
# debug_print("KoboTouch:update_device_database_collections - length collections=", len(collections))
# debug_print("KoboTouch:update_device_database_collections - self.bookshelvelist=", self.bookshelvelist)
# Process any collections that exist
for category, books in collections.items():
debug_print("KoboTouch:update_device_database_collections - category='%s' books=%d"%(category, len(books)))
if create_collections and not (category in supportedcategories or category in readstatuslist or category in accessibilitylist):
self.check_for_bookshelf(connection, category)
# if category in self.bookshelvelist:
# debug_print("Category: ", category, " id = ", readstatuslist.get(category))
for book in books:
# debug_print(' Title:', book.title, 'category: ', category)
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print(' Title="%s"'%book.title, 'category="%s"'%category)
# debug_print(book)
debug_print(' class=%s'%book.__class__)
debug_print(' book.contentID="%s"'%book.contentID)
debug_print(' book.application_id="%s"'%book.application_id)
if book.application_id is None:
continue
category_added = False
if book.contentID is None:
debug_print(' Do not know ContentID - Title="%s", Authors="%s", path="%s"'%(book.title, book.author, book.path))
extension = os.path.splitext(book.path)[1]
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(book.path)
book.contentID = self.contentid_from_path(book.path, ContentType)
if category in self.ignore_collections_names:
debug_print(' Ignoring collection=%s' % category)
category_added = True
elif category in self.bookshelvelist and self.supports_bookshelves:
if show_debug:
debug_print(' length book.device_collections=%d'%len(book.device_collections))
if category not in book.device_collections:
if show_debug:
debug_print(' Setting bookshelf on device')
self.set_bookshelf(connection, book, category)
category_added = True
elif category in readstatuslist:
debug_print("KoboTouch:update_device_database_collections - about to set_readstatus - category='%s'"%(category, ))
# Manage ReadStatus
self.set_readstatus(connection, book.contentID, readstatuslist.get(category))
category_added = True
elif category == 'Shortlist' and self.dbversion >= 14:
if show_debug:
debug_print(' Have an older version shortlist - %s'%book.title)
# Manage FavouritesIndex/Shortlist
if not self.supports_bookshelves:
if show_debug:
debug_print(' and about to set it - %s'%book.title)
self.set_favouritesindex(connection, book.contentID)
category_added = True
elif category in accessibilitylist:
# Do not manage the Accessibility List
pass
if category_added and category not in book.device_collections:
if show_debug:
debug_print(' adding category to book.device_collections', book.device_collections)
book.device_collections.append(category)
else:
if show_debug:
debug_print(' category not added to book.device_collections', book.device_collections)
debug_print("KoboTouch:update_device_database_collections - end for category='%s'"%category)
elif have_bookshelf_attributes: # No collections but have set the shelf option
# Since no collections exist the ReadStatus needs to be reset to 0 (Unread)
debug_print("No Collections - resetting ReadStatus")
if self.dbversion < 53:
self.reset_readstatus(connection, oncard)
if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves:
debug_print("No Collections - resetting FavouritesIndex")
self.reset_favouritesindex(connection, oncard)
# Set the series info and cleanup the bookshelves only if the firmware supports them and the user has set the options.
if (self.supports_bookshelves and self.manage_collections or self.supports_series()) and (
have_bookshelf_attributes or update_series_details or update_core_metadata):
debug_print("KoboTouch:update_device_database_collections - managing bookshelves and series.")
self.series_set = 0
self.core_metadata_set = 0
books_in_library = 0
for book in booklists:
# debug_print("KoboTouch:update_device_database_collections - book.title=%s, book.contentID=%s" % (book.title, book.contentID))
if book.application_id is not None and book.contentID is not None:
books_in_library += 1
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print("KoboTouch:update_device_database_collections - book.title=%s" % book.title)
debug_print(
"KoboTouch:update_device_database_collections - contentId=%s,"
"update_core_metadata=%s,update_purchased_kepubs=%s, book.is_sideloaded=%s" % (
book.contentID, update_core_metadata, update_purchased_kepubs, book.is_sideloaded))
if update_core_metadata and (update_purchased_kepubs or book.is_sideloaded):
if show_debug:
debug_print("KoboTouch:update_device_database_collections - calling set_core_metadata")
self.set_core_metadata(connection, book)
elif update_series_details:
if show_debug:
debug_print("KoboTouch:update_device_database_collections - calling set_core_metadata - series only")
self.set_core_metadata(connection, book, series_only=True)
if self.manage_collections and have_bookshelf_attributes:
if show_debug:
debug_print("KoboTouch:update_device_database_collections - about to remove a book from shelves book.title=%s" % book.title)
self.remove_book_from_device_bookshelves(connection, book)
book.device_collections.extend(book.kobo_collections)
if not prefs['manage_device_metadata'] == 'manual' and delete_empty_collections:
debug_print("KoboTouch:update_device_database_collections - about to clear empty bookshelves")
self.delete_empty_bookshelves(connection)
debug_print("KoboTouch:update_device_database_collections - Number of series set=%d Number of books=%d" % (self.series_set, books_in_library))
debug_print("KoboTouch:update_device_database_collections - Number of core metadata set=%d Number of books=%d" % (
self.core_metadata_set, books_in_library))
self.dump_bookshelves(connection)
debug_print('KoboTouch:update_device_database_collections - Finished ')
def rebuild_collections(self, booklist, oncard):
debug_print("KoboTouch:rebuild_collections")
collections_attributes = self.get_collections_attributes()
debug_print('KoboTouch:rebuild_collections: collection fields:', collections_attributes)
self.update_device_database_collections(booklist, collections_attributes, oncard)
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload book cover to the device. Default implementation does nothing.
:param path: The full path to the folder where the associated book is located.
:param filename: The name of the book file without the extension.
:param metadata: metadata belonging to the book. Use metadata.thumbnail
for cover
:param filepath: The full path to the ebook file
'''
debug_print("KoboTouch:upload_cover - path='%s' filename='%s' "%(path, filename))
debug_print(" filepath='%s' "%(filepath))
if not self.upload_covers:
# Building thumbnails disabled
# debug_print('KoboTouch: not uploading cover')
return
# Only upload covers to SD card if that is supported
if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and not self.supports_covers_on_sdcard():
return
# debug_print('KoboTouch: uploading cover')
try:
self._upload_cover(
path, filename, metadata, filepath,
self.upload_grayscale, self.dithered_covers,
self.keep_cover_aspect, self.letterbox_fs_covers, self.png_covers,
letterbox_color=self.letterbox_fs_covers_color)
except Exception as e:
debug_print('KoboTouch: FAILED to upload cover=%s Exception=%s'%(filepath, str(e)))
def imageid_from_contentid(self, ContentID):
ImageID = ContentID.replace('/', '_')
ImageID = ImageID.replace(' ', '_')
ImageID = ImageID.replace(':', '_')
ImageID = ImageID.replace('.', '_')
return ImageID
def images_path(self, path, imageId=None):
if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and self.supports_covers_on_sdcard():
path_prefix = 'koboExtStorage/images-cache/' if self.supports_images_tree() else 'koboExtStorage/images/'
path = os.path.join(self._card_a_prefix, path_prefix)
else:
path_prefix = (
'.kobo-images/' if (
self.supports_images_tree() or (not self.supports_images_tree() and self.isTolinoDevice())
) else KOBO_ROOT_DIR_NAME + '/images/')
path = os.path.join(self._main_prefix, path_prefix)
if self.supports_images_tree() and imageId:
hash1 = qhash(imageId)
dir1 = hash1 & (0xff * 1)
dir2 = (hash1 & (0xff00 * 1)) >> 8
path = os.path.join(path, "%s" % dir1, "%s" % dir2)
if imageId:
path = os.path.join(path, imageId)
return path
def _calculate_kobo_cover_size(self, library_size, kobo_size, expand, keep_cover_aspect, letterbox):
# Remember the canvas size
canvas_size = kobo_size
# NOTE: Loosely based on Qt's QSize::scaled implementation
if keep_cover_aspect:
# NOTE: Unlike Qt, we round to avoid accumulating errors,
# as ImageOps will then floor via fit_image
aspect_ratio = library_size[0] / library_size[1]
rescaled_width = int(round(kobo_size[1] * aspect_ratio))
if expand:
use_height = (rescaled_width >= kobo_size[0])
else:
use_height = (rescaled_width <= kobo_size[0])
if use_height:
kobo_size = (rescaled_width, kobo_size[1])
else:
kobo_size = (kobo_size[0], int(round(kobo_size[0] / aspect_ratio)))
# Did we actually want to letterbox?
if not letterbox:
canvas_size = kobo_size
return (kobo_size, canvas_size)
def _create_cover_data(
self, cover_data, resize_to, minify_to, kobo_size,
upload_grayscale=False, dithered_covers=False, keep_cover_aspect=False, is_full_size=False, letterbox=False, png_covers=False, quality=90,
letterbox_color=DEFAULT_COVER_LETTERBOX_COLOR
):
'''
This will generate the new cover image from the cover in the library. It is a wrapper
for save_cover_data_to to allow it to be overridden in a subclass. For this reason,
options are passed in that are not used by this implementation.
:param cover_data: original cover data
:param resize_to: Size to resize the cover to (width, height). None means do not resize.
:param minify_to: Maximum canvas size for the resized cover (width, height).
:param kobo_size: Size of the cover image on the device.
:param upload_grayscale: boolean True if driver configured to send grayscale thumbnails
:param dithered_covers: boolean True if driver configured to quantize to 16-col grayscale
:param keep_cover_aspect: boolean - True if the aspect ratio of the cover in the library is to be kept.
:param is_full_size: True if this is the kobo_size is for the full size cover image
Passed to allow ability to process screensaver differently
to smaller thumbnails
:param letterbox: True if we were asked to handle the letterboxing
:param png_covers: True if we were asked to encode those images in PNG instead of JPG
:param quality: 0-100 Output encoding quality (or compression level for PNG, Ã la IM)
:param letterbox_color: Colour used for letterboxing.
'''
from calibre.utils.img import save_cover_data_to
data = save_cover_data_to(
cover_data, resize_to=resize_to, compression_quality=quality, minify_to=minify_to, grayscale=upload_grayscale, eink=dithered_covers,
letterbox=letterbox, data_fmt="png" if png_covers else "jpeg", letterbox_color=letterbox_color)
return data
def _upload_cover(
self, path, filename, metadata, filepath, upload_grayscale,
dithered_covers=False, keep_cover_aspect=False, letterbox_fs_covers=False, png_covers=False,
letterbox_color=DEFAULT_COVER_LETTERBOX_COLOR
):
from calibre.utils.img import optimize_png
from calibre.utils.imghdr import identify
debug_print("KoboTouch:_upload_cover - filename='%s' upload_grayscale='%s' dithered_covers='%s' "%(filename, upload_grayscale, dithered_covers))
if not metadata.cover:
return
show_debug = self.is_debugging_title(filename)
if show_debug:
debug_print("KoboTouch:_upload_cover - path='%s'"%path, "filename='%s'"%filename)
debug_print(" filepath='%s'"%filepath)
cover = self.normalize_path(metadata.cover.replace('/', os.sep))
if not os.path.exists(cover):
debug_print("KoboTouch:_upload_cover - Cover file does not exist in library")
return
# Get ContentID for Selected Book
extension = os.path.splitext(filepath)[1]
ContentType = self.get_content_type_from_extension(extension) if extension else self.get_content_type_from_path(filepath)
ContentID = self.contentid_from_path(filepath, ContentType)
try:
with closing(self.device_database_connection()) as connection:
cursor = connection.cursor()
t = (ContentID,)
cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t)
try:
result = next(cursor)
ImageID = result[0]
except StopIteration:
ImageID = self.imageid_from_contentid(ContentID)
debug_print("KoboTouch:_upload_cover - No rows exist in the database - generated ImageID='%s'" % ImageID)
cursor.close()
if ImageID is not None:
path = self.images_path(path, ImageID)
if show_debug:
debug_print("KoboTouch:_upload_cover - About to loop over cover endings")
image_dir = os.path.dirname(os.path.abspath(path))
if not os.path.exists(image_dir):
debug_print("KoboTouch:_upload_cover - Image folder does not exist. Creating path='%s'" % (image_dir))
os.makedirs(image_dir)
with open(cover, 'rb') as f:
cover_data = f.read()
fmt, width, height = identify(cover_data)
library_cover_size = (width, height)
for ending, cover_options in self.cover_file_endings().items():
kobo_size, min_dbversion, max_dbversion, is_full_size = cover_options
if show_debug:
debug_print("KoboTouch:_upload_cover - library_cover_size=%s -> kobo_size=%s, min_dbversion=%d max_dbversion=%d, is_full_size=%s" % (
library_cover_size, kobo_size, min_dbversion, max_dbversion, is_full_size))
if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion:
if show_debug:
debug_print("KoboTouch:_upload_cover - creating cover for ending='%s'"%ending) # , "library_cover_size'%s'"%library_cover_size)
fpath = path + ending
fpath = self.normalize_path(fpath.replace('/', os.sep))
# Never letterbox thumbnails, that's ugly. But for fullscreen covers, honor the setting.
letterbox = letterbox_fs_covers and is_full_size
# NOTE: Full size means we have to fit *inside* the
# given boundaries. Thumbnails, on the other hand, are
# *expanded* around those boundaries.
# In Qt, it'd mean full-screen covers are resized
# using Qt::KeepAspectRatio, while thumbnails are
# resized using Qt::KeepAspectRatioByExpanding
# (i.e., QSize's boundedTo() vs. expandedTo(). See also IM's '^' geometry token, for the same "expand" behavior.)
# Note that Nickel itself will generate bounded thumbnails, while it will download expanded thumbnails for store-bought KePubs...
# We chose to emulate the KePub behavior.
resize_to, expand_to = self._calculate_kobo_cover_size(library_cover_size, kobo_size, not is_full_size, keep_cover_aspect, letterbox)
if show_debug:
debug_print(
"KoboTouch:_calculate_kobo_cover_size - expand_to=%s"
" (vs. kobo_size=%s) & resize_to=%s, keep_cover_aspect=%s & letterbox_fs_covers=%s, png_covers=%s" % (
expand_to, kobo_size, resize_to, keep_cover_aspect, letterbox_fs_covers, png_covers))
# NOTE: To speed things up, we enforce a lower
# compression level for png_covers, as the final
# optipng pass will then select a higher compression
# level anyway,
# so the compression level from that first pass
# is irrelevant, and only takes up precious time
# ;).
quality = 10 if png_covers else 90
# Return the data resized and properly grayscaled/dithered/letterboxed if requested
data = self._create_cover_data(
cover_data, resize_to, expand_to, kobo_size, upload_grayscale,
dithered_covers, keep_cover_aspect, is_full_size, letterbox, png_covers, quality,
letterbox_color=letterbox_color)
# NOTE: If we're writing a PNG file, go through a quick
# optipng pass to make sure it's encoded properly, as
# Qt doesn't afford us enough control to do it right...
# Unfortunately, optipng doesn't support reading
# pipes, so this gets a bit clunky as we have go
# through a temporary file...
if png_covers:
tmp_cover = better_mktemp()
with open(tmp_cover, 'wb') as f:
f.write(data)
optimize_png(tmp_cover, level=1)
# Crossing FS boundaries, can't rename, have to copy + delete :/
shutil.copy2(tmp_cover, fpath)
os.remove(tmp_cover)
else:
with open(fpath, 'wb') as f:
f.write(data)
fsync(f)
except Exception as e:
err = str(e)
debug_print("KoboTouch:_upload_cover - Exception string: %s"%err)
raise
def remove_book_from_device_bookshelves(self, connection, book):
show_debug = self.is_debugging_title(book.title) # or True
remove_shelf_list = set(book.current_shelves) - set(book.device_collections)
remove_shelf_list = remove_shelf_list - set(self.ignore_collections_names)
if show_debug:
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.application_id="%s"'%book.application_id)
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.contentID="%s"'%book.contentID)
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.device_collections=', book.device_collections)
debug_print('KoboTouch:remove_book_from_device_bookshelves - book.current_shelves=', book.current_shelves)
debug_print('KoboTouch:remove_book_from_device_bookshelves - remove_shelf_list=', remove_shelf_list)
if len(remove_shelf_list) == 0:
return
query = 'DELETE FROM ShelfContent WHERE ContentId = ?'
values = [book.contentID,]
if book.device_collections:
placeholder = '?'
placeholders = ','.join(placeholder for unused in book.device_collections)
query += ' and ShelfName not in (%s)' % placeholders
values.extend(book.device_collections)
if show_debug:
debug_print('KoboTouch:remove_book_from_device_bookshelves query="%s"'%query)
debug_print('KoboTouch:remove_book_from_device_bookshelves values="%s"'%values)
cursor = connection.cursor()
cursor.execute(query, values)
cursor.close()
def set_filesize_in_device_database(self, connection, contentID, fpath):
show_debug = self.is_debugging_title(fpath)
if show_debug:
debug_print('KoboTouch:set_filesize_in_device_database contentID="%s"'%contentID)
test_query = 'SELECT ___FileSize ' \
'FROM content ' \
'WHERE ContentID = ? ' \
' AND ContentType = 6'
test_values = (contentID, )
updatequery = 'UPDATE content ' \
'SET ___FileSize = ? ' \
'WHERE ContentId = ? ' \
'AND ContentType = 6'
cursor = connection.cursor()
cursor.execute(test_query, test_values)
try:
result = next(cursor)
except StopIteration:
result = None
if result is None:
if show_debug:
debug_print(' Did not find a record - new book on device')
elif os.path.exists(fpath):
file_size = os.stat(self.normalize_path(fpath)).st_size
if show_debug:
debug_print(' Found a record - will update - ___FileSize=', result[0], ' file_size=', file_size)
if file_size != int(result[0]):
update_values = (file_size, contentID, )
cursor.execute(updatequery, update_values)
if show_debug:
debug_print(' Size updated.')
cursor.close()
# debug_print("KoboTouch:set_filesize_in_device_database - end")
def delete_empty_bookshelves(self, connection):
debug_print("KoboTouch:delete_empty_bookshelves - start")
ignore_collections_placeholder = ''
ignore_collections_values = []
if self.ignore_collections_names:
placeholder = ',?'
ignore_collections_placeholder = ''.join(placeholder for unused in self.ignore_collections_names)
ignore_collections_values.extend(self.ignore_collections_names)
debug_print("KoboTouch:delete_empty_bookshelves - ignore_collections_in=", ignore_collections_placeholder)
debug_print("KoboTouch:delete_empty_bookshelves - ignore_collections=", ignore_collections_values)
true, false = self.bool_for_query(True), self.bool_for_query(False)
delete_query = ("DELETE FROM Shelf "
f"WHERE Shelf._IsSynced = {false} "
"AND Shelf.InternalName not in ('Shortlist', 'Wishlist'" + ignore_collections_placeholder + ") "
"AND (Type IS NULL OR Type <> 'SystemTag') " # Collections are created with Type of NULL and change after a sync.
"AND NOT EXISTS "
"(SELECT 1 FROM ShelfContent c "
"WHERE Shelf.Name = c.ShelfName "
f"AND c._IsDeleted <> {true})")
debug_print("KoboTouch:delete_empty_bookshelves - delete_query=", delete_query)
update_query = ("UPDATE Shelf "
f"SET _IsDeleted = {true} "
f"WHERE Shelf._IsSynced = {true} "
"AND Shelf.InternalName not in ('Shortlist', 'Wishlist'" + ignore_collections_placeholder + ") "
"AND (Type IS NULL OR Type <> 'SystemTag') "
"AND NOT EXISTS "
"(SELECT 1 FROM ShelfContent c "
"WHERE Shelf.Name = c.ShelfName "
f"AND c._IsDeleted <> {true})")
debug_print("KoboTouch:delete_empty_bookshelves - update_query=", update_query)
delete_activity_query = ("DELETE FROM Activity "
"WHERE Type = 'Shelf' "
"AND NOT EXISTS "
"(SELECT 1 FROM Shelf "
"WHERE Shelf.Name = Activity.Id "
f"AND Shelf._IsDeleted = {false})"
)
debug_print("KoboTouch:delete_empty_bookshelves - delete_activity_query=", delete_activity_query)
cursor = connection.cursor()
cursor.execute(delete_query, ignore_collections_values)
cursor.execute(update_query, ignore_collections_values)
if self.has_activity_table():
cursor.execute(delete_activity_query)
cursor.close()
debug_print("KoboTouch:delete_empty_bookshelves - end")
def get_bookshelflist(self, connection):
# Retrieve the list of booksehelves
# debug_print('KoboTouch:get_bookshelflist')
bookshelves = []
if not self.supports_bookshelves:
return bookshelves
query = f'SELECT Name FROM Shelf WHERE _IsDeleted = {self.bool_for_query(False)}'
cursor = connection.cursor()
cursor.execute(query)
# count_bookshelves = 0
for row in cursor:
bookshelves.append(row['Name'])
# count_bookshelves = i + 1
cursor.close()
# debug_print("KoboTouch:get_bookshelflist - count bookshelves=" + str(count_bookshelves))
return bookshelves
def set_bookshelf(self, connection, book, shelfName):
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print('KoboTouch:set_bookshelf book.ContentID="%s"'%book.contentID)
debug_print('KoboTouch:set_bookshelf book.current_shelves="%s"'%book.current_shelves)
if shelfName in book.current_shelves:
if show_debug:
debug_print(' book already on shelf.')
return
test_query = 'SELECT _IsDeleted FROM ShelfContent WHERE ShelfName = ? and ContentId = ?'
test_values = (shelfName, book.contentID, )
false = self.bool_for_query(False)
addquery = f'INSERT INTO ShelfContent ("ShelfName","ContentId","DateModified","_IsDeleted","_IsSynced") VALUES (?, ?, ?, {false}, {false})'
add_values = (shelfName, book.contentID, time.strftime(self.TIMESTAMP_STRING, time.gmtime()), )
updatequery = f'UPDATE ShelfContent SET _IsDeleted = {false} WHERE ShelfName = ? and ContentId = ?'
update_values = (shelfName, book.contentID, )
cursor = connection.cursor()
cursor.execute(test_query, test_values)
try:
result = next(cursor)
except StopIteration:
result = None
if result is None:
if show_debug:
debug_print(' Did not find a record - adding')
cursor.execute(addquery, add_values)
elif self.is_true_value(result['_IsDeleted']):
if show_debug:
debug_print(' Found a record - updating - result=', result)
cursor.execute(updatequery, update_values)
cursor.close()
# debug_print("KoboTouch:set_bookshelf - end")
def check_for_bookshelf(self, connection, bookshelf_name):
show_debug = self.is_debugging_title(bookshelf_name)
if show_debug:
debug_print('KoboTouch:check_for_bookshelf bookshelf_name="%s"'%bookshelf_name)
test_query = 'SELECT InternalName, Name, _IsDeleted FROM Shelf WHERE Name = ?'
test_values = (bookshelf_name, )
addquery = 'INSERT INTO "main"."Shelf"'
if self.needs_real_bools:
add_values = (time.strftime(self.TIMESTAMP_STRING, time.gmtime()),
bookshelf_name,
time.strftime(self.TIMESTAMP_STRING, time.gmtime()),
bookshelf_name,
False,
True,
False,
)
else:
add_values = (time.strftime(self.TIMESTAMP_STRING, time.gmtime()),
bookshelf_name,
time.strftime(self.TIMESTAMP_STRING, time.gmtime()),
bookshelf_name,
"false",
"true",
"false",
)
shelf_type = "UserTag" # if self.supports_reading_list else None
if self.dbversion < 64:
addquery += ' ("CreationDate","InternalName","LastModified","Name","_IsDeleted","_IsVisible","_IsSynced")'\
' VALUES (?, ?, ?, ?, ?, ?, ?)'
else:
addquery += ' ("CreationDate", "InternalName","LastModified","Name","_IsDeleted","_IsVisible","_IsSynced", "Id", "Type")'\
' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)'
add_values = add_values +(bookshelf_name, shelf_type)
if show_debug:
debug_print('KoboTouch:check_for_bookshelf addquery=', addquery)
debug_print('KoboTouch:check_for_bookshelf add_values=', add_values)
updatequery = f'UPDATE Shelf SET _IsDeleted = {self.bool_for_query(False)} WHERE Name = ?'
cursor = connection.cursor()
cursor.execute(test_query, test_values)
try:
result = next(cursor)
except StopIteration:
result = None
if result is None:
if show_debug:
debug_print(' Did not find a record - adding shelf "%s"' % bookshelf_name)
cursor.execute(addquery, add_values)
elif self.is_true_value(result['_IsDeleted']):
debug_print("KoboTouch:check_for_bookshelf - Shelf '{}' is deleted - undeleting. result['_IsDeleted']='{}'".format(
bookshelf_name, str(result['_IsDeleted'])))
cursor.execute(updatequery, test_values)
cursor.close()
# Update the bookshelf list.
self.bookshelvelist = self.get_bookshelflist(connection)
# debug_print("KoboTouch:set_bookshelf - end")
def remove_from_bookshelves(self, connection, oncard, ContentID=None, bookshelves=None):
debug_print('KoboTouch:remove_from_bookshelf ContentID=', ContentID)
if not self.supports_bookshelves:
return
query = 'DELETE FROM ShelfContent'
values = []
if ContentID is not None:
query += ' WHERE ContentId = ?'
values.append(ContentID)
else:
if oncard == 'carda':
query += ' WHERE ContentID like \'file:///mnt/sd/%\''
elif oncard != 'carda' and oncard != 'cardb':
query += ' WHERE ContentID not like \'file:///mnt/sd/%\''
if bookshelves:
placeholder = '?'
placeholders = ','.join(placeholder for unused in bookshelves)
query += ' and ShelfName in (%s)' % placeholders
values.append(bookshelves)
debug_print('KoboTouch:remove_from_bookshelf query=', query)
debug_print('KoboTouch:remove_from_bookshelf values=', values)
cursor = connection.cursor()
cursor.execute(query, values)
cursor.close()
debug_print("KoboTouch:remove_from_bookshelf - end")
# No longer used, but keep for a little bit.
def set_series(self, connection, book):
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print('KoboTouch:set_series book.kobo_series="%s"'%book.kobo_series)
debug_print('KoboTouch:set_series book.series="%s"'%book.series)
debug_print('KoboTouch:set_series book.series_index=', book.series_index)
if book.series == book.kobo_series:
kobo_series_number = None
if book.kobo_series_number is not None:
try:
kobo_series_number = float(book.kobo_series_number)
except:
kobo_series_number = None
if kobo_series_number == book.series_index:
if show_debug:
debug_print('KoboTouch:set_series - series info the same - not changing')
return
update_query = 'UPDATE content SET Series=?, SeriesNumber==? where BookID is Null and ContentID = ?'
if book.series is None:
update_values = (None, None, book.contentID, )
elif book.series_index is None: # This should never happen, but...
update_values = (book.series, None, book.contentID, )
else:
update_values = (book.series, "%g"%book.series_index, book.contentID, )
cursor = connection.cursor()
try:
if show_debug:
debug_print('KoboTouch:set_series - about to set - parameters:', update_values)
cursor.execute(update_query, update_values)
self.series_set += 1
except:
debug_print(' Database Exception: Unable to set series info')
raise
finally:
cursor.close()
if show_debug:
debug_print("KoboTouch:set_series - end")
def set_core_metadata(self, connection, book, series_only=False):
# debug_print('KoboTouch:set_core_metadata book="%s"' % book.title)
show_debug = self.is_debugging_title(book.title)
if show_debug:
debug_print(f'KoboTouch:set_core_metadata book="{book}"\n'
f'series_only="{series_only}"\n'
f'force_series_id="{self.force_series_id}"')
def generate_update_from_template(book, update_values, set_clause, column_name, new_value=None, template=None, current_value=None):
if template is None or template == '':
new_value = None
else:
new_value = new_value if len(new_value.strip()) else None
if new_value is not None and new_value.startswith("PLUGBOARD TEMPLATE ERROR"):
debug_print("KoboTouch:generate_update_from_template template error - template='%s'" % template)
debug_print("KoboTouch:generate_update_from_template - new_value=", new_value)
# debug_print(
# f"KoboTouch:generate_update_from_template - {book.title} - column_name='{column_name}',"
# f" current_value='{current_value}', new_value='{new_value}'")
if (new_value is not None and
(current_value is None or new_value != current_value)) or \
(new_value is None and current_value is not None):
update_values.append(new_value)
set_clause.append(column_name)
plugboard = None
if self.plugboard_func and not series_only:
if book.contentID.endswith('.kepub.epub') or not os.path.splitext(book.contentID)[1]:
extension = 'kepub'
else:
extension = os.path.splitext(book.contentID)[1][1:]
plugboard = self.plugboard_func(self.__class__.__name__, extension, self.plugboards)
# If the book is a kepub, and there is no kepub plugboard, use the epub plugboard if it exists.
if not plugboard and extension == 'kepub':
plugboard = self.plugboard_func(self.__class__.__name__, 'epub', self.plugboards)
if plugboard is not None:
newmi = book.deepcopy_metadata()
newmi.template_to_attribute(book, plugboard)
else:
newmi = book
update_query = 'UPDATE content SET '
update_values = []
set_clause = []
changes_found = False
kobo_metadata = book.kobo_metadata
if show_debug:
debug_print(f'KoboTouch:set_core_metadata newmi.series="{newmi.series}"')
debug_print(f'KoboTouch:set_core_metadata kobo_metadata.series="{kobo_metadata.series}"')
debug_print(f'KoboTouch:set_core_metadata newmi.series_index="{newmi.series_index}"')
debug_print(f'KoboTouch:set_core_metadata kobo_metadata.series_index="{kobo_metadata.series_index}"')
debug_print(f'KoboTouch:set_core_metadata book.kobo_series_number="{book.kobo_series_number}"')
if newmi.series is not None:
new_series = newmi.series
try:
new_series_number = "%g" % newmi.series_index
except:
new_series_number = None
else:
new_series = None
new_series_number = None
series_changed = not (new_series == kobo_metadata.series)
series_number_changed = not (new_series_number == book.kobo_series_number)
if show_debug:
debug_print(f'KoboTouch:set_core_metadata new_series="{new_series}"')
debug_print(f'KoboTouch:set_core_metadata new_series_number="{new_series_number}"')
debug_print(f'KoboTouch:set_core_metadata series_number_changed="{series_number_changed}"')
debug_print(f'KoboTouch:set_core_metadata series_changed="{series_changed}"')
if series_changed or series_number_changed:
update_values.append(new_series)
set_clause.append('Series')
update_values.append(new_series_number)
set_clause.append('SeriesNumber')
if self.force_series_id:
update_values.append(new_series)
set_clause.append('SeriesID')
update_values.append(newmi.series_index)
set_clause.append('SeriesNumberFloat')
elif self.supports_series_list and book.is_sideloaded:
series_id = self.kobo_series_dict.get(new_series, new_series)
try:
kobo_series_id = book.kobo_series_id
kobo_series_number_float = book.kobo_series_number_float
except Exception: # This should mean the book was sent to the device during the current session.
kobo_series_id = None
kobo_series_number_float = None
if series_changed or series_number_changed \
or kobo_series_id != series_id \
or kobo_series_number_float != newmi.series_index:
update_values.append(series_id)
set_clause.append('SeriesID')
update_values.append(newmi.series_index)
set_clause.append('SeriesNumberFloat')
if show_debug:
debug_print(f"KoboTouch:set_core_metadata Setting SeriesID - new_series='{new_series}', series_id='{series_id}'")
if not series_only:
pb = []
if self.subtitle_template is not None:
pb.append((self.subtitle_template, 'subtitle'))
if self.bookstats_pagecount_template is not None:
pb.append((self.bookstats_pagecount_template, 'bookstats_pagecount'))
if self.bookstats_wordcount_template is not None:
pb.append((self.bookstats_wordcount_template, 'bookstats_wordcount'))
if self.bookstats_timetoread_upper_template is not None:
pb.append((self.bookstats_timetoread_upper_template, 'bookstats_timetoread_upper'))
if self.bookstats_timetoread_lower_template is not None:
pb.append((self.bookstats_timetoread_lower_template, 'bookstats_timetoread_lower'))
if show_debug:
debug_print(f"KoboTouch:set_core_metadata templates being used - pb='{pb}'")
book.template_to_attribute(book, pb)
if not (newmi.title == kobo_metadata.title):
update_values.append(newmi.title)
set_clause.append('Title')
if not (authors_to_string(newmi.authors) == authors_to_string(kobo_metadata.authors)):
update_values.append(authors_to_string(newmi.authors))
set_clause.append('Attribution')
if not (newmi.publisher == kobo_metadata.publisher):
update_values.append(newmi.publisher)
set_clause.append('Publisher')
if not (newmi.pubdate == kobo_metadata.pubdate):
pubdate_string = strftime(self.TIMESTAMP_STRING, newmi.pubdate) if newmi.pubdate else None
update_values.append(pubdate_string)
set_clause.append('DateCreated')
if not (newmi.comments == kobo_metadata.comments):
update_values.append(newmi.comments)
set_clause.append('Description')
if not (newmi.isbn == kobo_metadata.isbn):
update_values.append(newmi.isbn)
set_clause.append('ISBN')
library_language = normalize_languages(kobo_metadata.languages, newmi.languages)
library_language = library_language[0] if library_language is not None and len(library_language) > 0 else None
if not (library_language == kobo_metadata.language):
update_values.append(library_language)
set_clause.append('Language')
if self.update_subtitle:
if self.subtitle_template is None or self.subtitle_template == '':
new_subtitle = None
else:
new_subtitle = book.subtitle if len(book.subtitle.strip()) else None
if new_subtitle is not None and new_subtitle.startswith("PLUGBOARD TEMPLATE ERROR"):
debug_print("KoboTouch:set_core_metadata subtitle template error - self.subtitle_template='%s'" % self.subtitle_template)
debug_print("KoboTouch:set_core_metadata - new_subtitle=", new_subtitle)
if (new_subtitle is not None and (book.kobo_subtitle is None or book.subtitle != book.kobo_subtitle)) or \
(new_subtitle is None and book.kobo_subtitle is not None):
update_values.append(new_subtitle)
set_clause.append('Subtitle')
if self.update_bookstats:
if self.bookstats_pagecount_template is not None:
current_bookstats_pagecount = book.kobo_bookstats.get('StorePages', None)
generate_update_from_template(book, update_values, set_clause,
column_name='StorePages',
template=self.bookstats_pagecount_template,
new_value=book.bookstats_pagecount,
current_value=current_bookstats_pagecount
)
if self.bookstats_wordcount_template is not None:
current_bookstats_wordcount = book.kobo_bookstats.get('StoreWordCount', None)
generate_update_from_template(book, update_values, set_clause,
column_name='StoreWordCount',
template=self.bookstats_wordcount_template,
new_value=book.bookstats_wordcount,
current_value=current_bookstats_wordcount
)
if self.bookstats_timetoread_upper_template is not None:
current_bookstats_timetoread_upper = book.kobo_bookstats.get('StoreTimeToReadUpperEstimate', None)
generate_update_from_template(book, update_values, set_clause,
column_name='StoreTimeToReadUpperEstimate',
template=self.bookstats_timetoread_upper_template,
new_value=book.bookstats_timetoread_upper,
current_value=current_bookstats_timetoread_upper
)
if self.bookstats_timetoread_lower_template is not None:
current_bookstats_timetoread_lower = book.kobo_bookstats.get('StoreTimeToReadLowerEstimate', None)
generate_update_from_template(book, update_values, set_clause,
column_name='StoreTimeToReadLowerEstimate',
template=self.bookstats_timetoread_lower_template,
new_value=book.bookstats_timetoread_lower,
current_value=current_bookstats_timetoread_lower
)
if len(set_clause) > 0:
update_query += ', '.join([col_name + ' = ?' for col_name in set_clause])
changes_found = True
if show_debug:
debug_print('KoboTouch:set_core_metadata set_clause="%s"' % set_clause)
debug_print('KoboTouch:set_core_metadata update_values="%s"' % update_values)
debug_print('KoboTouch:set_core_metadata update_values="%s"' % update_query)
if changes_found:
update_query += ' WHERE ContentID = ? AND BookID IS NULL'
update_values.append(book.contentID)
cursor = connection.cursor()
try:
if show_debug:
debug_print('KoboTouch:set_core_metadata - about to set - parameters:', update_values)
debug_print('KoboTouch:set_core_metadata - about to set - update_query:', update_query)
cursor.execute(update_query, update_values)
self.core_metadata_set += 1
except:
debug_print(' Database Exception: Unable to set the core metadata')
debug_print(f' Query was: {update_query}')
debug_print(f' Values were: {update_values}')
raise
finally:
cursor.close()
if show_debug:
debug_print("KoboTouch:set_core_metadata - end")
@classmethod
def config_widget(cls):
# TODO: Cleanup the following
cls.current_friendly_name = cls.gui_name
from calibre.devices.kobo.kobotouch_config import KOBOTOUCHConfig
return KOBOTOUCHConfig(cls.settings(), cls.FORMATS,
cls.SUPPORTS_SUB_DIRS, cls.MUST_READ_METADATA,
cls.SUPPORTS_USE_AUTHOR_SORT, cls.EXTRA_CUSTOMIZATION_MESSAGE,
cls, extra_customization_choices=cls.EXTRA_CUSTOMIZATION_CHOICES
)
@classmethod
def get_pref(cls, key):
''' Get the setting named key. First looks for a device specific setting.
If that is not found looks for a device default and if that is not
found uses the global default.'''
# debug_print("KoboTouch::get_prefs - key=", key, "cls=", cls)
if not cls.opts:
cls.opts = cls.settings()
try:
return getattr(cls.opts, key)
except:
debug_print("KoboTouch::get_prefs - probably an extra_customization:", key)
return None
@classmethod
def save_settings(cls, config_widget):
cls.opts = None
config_widget.commit()
@classmethod
def save_template(cls):
return cls.settings().save_template
@classmethod
def _config(cls):
c = super()._config()
c.add_opt('manage_collections', default=True)
c.add_opt('use_collections_columns', default=True)
c.add_opt('collections_columns', default='')
c.add_opt('use_collections_template', default=False)
c.add_opt('collections_template', default='')
c.add_opt('create_collections', default=False)
c.add_opt('delete_empty_collections', default=False)
c.add_opt('ignore_collections_names', default='')
c.add_opt('upload_covers', default=False)
c.add_opt('dithered_covers', default=False)
c.add_opt('keep_cover_aspect', default=False)
c.add_opt('upload_grayscale', default=False)
c.add_opt('letterbox_fs_covers', default=False)
c.add_opt('letterbox_fs_covers_color', default=DEFAULT_COVER_LETTERBOX_COLOR)
c.add_opt('png_covers', default=False)
c.add_opt('show_archived_books', default=False)
c.add_opt('show_previews', default=False)
c.add_opt('show_recommendations', default=False)
c.add_opt('update_series', default=True)
c.add_opt('force_series_id', default=False)
c.add_opt('update_core_metadata', default=False)
c.add_opt('update_purchased_kepubs', default=False)
c.add_opt('update_device_metadata', default=True)
c.add_opt('update_subtitle', default=False)
c.add_opt('subtitle_template', default=None)
c.add_opt('update_bookstats', default=False)
c.add_opt('bookstats_wordcount_template', default=None)
c.add_opt('bookstats_pagecount_template', default=None)
c.add_opt('bookstats_timetoread_upper_template', default=None)
c.add_opt('bookstats_timetoread_lower_template', default=None)
c.add_opt('modify_css', default=False)
c.add_opt('override_kobo_replace_existing', default=True) # Overriding the replace behaviour is how the driver has always worked.
c.add_opt('support_newer_firmware', default=False)
c.add_opt('debugging_title', default='')
c.add_opt('driver_version', default='') # Mainly for debugging purposes, but might use if need to migrate between versions.
return c
@classmethod
def settings(cls):
opts = cls._config().parse()
if opts.extra_customization:
opts = cls.migrate_old_settings(opts)
cls.opts = opts
return opts
def is2024Device(self):
return self.detected_device.idProduct in self.LIBRA_COLOR_PRODUCT_ID
def isColorDevice(self):
# may be useful at some point
return self.isClaraColor() or self.isLibraColor()
def isAura(self):
return self.detected_device.idProduct in self.AURA_PRODUCT_ID
def isAuraEdition2(self):
return self.detected_device.idProduct in self.AURA_EDITION2_PRODUCT_ID
def isAuraHD(self):
return self.detected_device.idProduct in self.AURA_HD_PRODUCT_ID
def isAuraH2O(self):
return self.detected_device.idProduct in self.AURA_H2O_PRODUCT_ID
def isAuraH2OEdition2(self):
return self.detected_device.idProduct in self.AURA_H2O_EDITION2_PRODUCT_ID
def isAuraOne(self):
return self.detected_device.idProduct in self.AURA_ONE_PRODUCT_ID
def isClaraHD(self):
return self.detected_device.idProduct in self.CLARA_HD_PRODUCT_ID
def isClara2E(self):
return self.detected_device.idProduct in self.CLARA_2E_PRODUCT_ID
def isClaraBW(self):
return self.device_model_id.endswith('391') or self.detected_device.idProduct in self.CLARA_BW_PRODUCT_ID
def isClaraColor(self):
return self.device_model_id.endswith('393') or self.detected_device.idProduct in self.CLARA_COLOR_PRODUCT_ID
def isElipsa2E(self):
return self.detected_device.idProduct in self.ELIPSA_2E_PRODUCT_ID
def isElipsa(self):
return self.detected_device.idProduct in self.ELIPSA_PRODUCT_ID
def isForma(self):
return self.detected_device.idProduct in self.FORMA_PRODUCT_ID
def isGlo(self):
return self.detected_device.idProduct in self.GLO_PRODUCT_ID
def isGloHD(self):
return self.detected_device.idProduct in self.GLO_HD_PRODUCT_ID
def isLibraH2O(self):
return self.detected_device.idProduct in self.LIBRA_H2O_PRODUCT_ID
def isLibra2(self):
return self.detected_device.idProduct in self.LIBRA2_PRODUCT_ID
def isLibraColor(self):
return self.device_model_id.endswith('390')
def isMini(self):
return self.detected_device.idProduct in self.MINI_PRODUCT_ID
def isNia(self):
return self.detected_device.idProduct in self.NIA_PRODUCT_ID
def isSage(self):
return self.detected_device.idProduct in self.SAGE_PRODUCT_ID
def isShine5(self):
return self.device_model_id.endswith('691')
def isShineColor(self):
return self.device_model_id.endswith('693')
def isTouch(self):
return self.detected_device.idProduct in self.TOUCH_PRODUCT_ID
def isTouch2(self):
return self.detected_device.idProduct in self.TOUCH2_PRODUCT_ID
def isVisionColor(self):
return self.device_model_id.endswith('690')
def isTolinoDevice(self):
return self.isShine5() or self.isShineColor() or self.isVisionColor()
def cover_file_endings(self):
if self.isAura():
_cover_file_endings = self.AURA_COVER_FILE_ENDINGS
elif self.isAuraEdition2():
_cover_file_endings = self.GLO_COVER_FILE_ENDINGS
elif self.isAuraHD():
_cover_file_endings = self.AURA_HD_COVER_FILE_ENDINGS
elif self.isAuraH2O():
_cover_file_endings = self.AURA_H2O_COVER_FILE_ENDINGS
elif self.isAuraH2OEdition2():
_cover_file_endings = self.AURA_HD_COVER_FILE_ENDINGS
elif self.isAuraOne():
_cover_file_endings = self.AURA_ONE_COVER_FILE_ENDINGS
elif self.isClaraHD():
_cover_file_endings = self.GLO_HD_COVER_FILE_ENDINGS
elif self.isClara2E():
_cover_file_endings = self.GLO_HD_COVER_FILE_ENDINGS
elif self.isClaraBW():
_cover_file_endings = self.GLO_HD_COVER_FILE_ENDINGS
elif self.isClaraColor():
_cover_file_endings = self.GLO_HD_COVER_FILE_ENDINGS
elif self.isElipsa():
_cover_file_endings = self.AURA_ONE_COVER_FILE_ENDINGS
elif self.isElipsa2E():
_cover_file_endings = self.GLO_HD_COVER_FILE_ENDINGS
elif self.isForma():
_cover_file_endings = self.FORMA_COVER_FILE_ENDINGS
elif self.isGlo():
_cover_file_endings = self.GLO_COVER_FILE_ENDINGS
elif self.isGloHD():
_cover_file_endings = self.GLO_HD_COVER_FILE_ENDINGS
elif self.isLibraH2O():
_cover_file_endings = self.LIBRA_H2O_COVER_FILE_ENDINGS
elif self.isLibra2():
_cover_file_endings = self.LIBRA_H2O_COVER_FILE_ENDINGS
elif self.isLibraColor():
_cover_file_endings = self.LIBRA_H2O_COVER_FILE_ENDINGS
elif self.isMini():
_cover_file_endings = self.LEGACY_COVER_FILE_ENDINGS
elif self.isNia():
_cover_file_endings = self.GLO_COVER_FILE_ENDINGS
elif self.isSage():
_cover_file_endings = self.FORMA_COVER_FILE_ENDINGS
elif self.isShine5():
_cover_file_endings = self.TOLINO_SHINE_COVER_FILE_ENDINGS
elif self.isShineColor():
_cover_file_endings = self.TOLINO_SHINE_COVER_FILE_ENDINGS
elif self.isTouch():
_cover_file_endings = self.LEGACY_COVER_FILE_ENDINGS
elif self.isTouch2():
_cover_file_endings = self.LEGACY_COVER_FILE_ENDINGS
elif self.isVisionColor():
_cover_file_endings = self.TOLINO_VISION_COVER_FILE_ENDINGS
else:
_cover_file_endings = self.LEGACY_COVER_FILE_ENDINGS
# Don't forget to merge that on top of the common dictionary (c.f., https://stackoverflow.com/q/38987)
# But the tolino devices have only one cover file ending.
if self.isTolinoDevice():
_all_cover_file_endings = _cover_file_endings.copy()
else:
_all_cover_file_endings = self.COMMON_COVER_FILE_ENDINGS.copy()
_all_cover_file_endings.update(_cover_file_endings)
return _all_cover_file_endings
def set_device_name(self):
self.device_model_id = ''
if self.is2024Device():
self.device_model_id = self.get_device_model_id()
device_name = self.gui_name
if self.isAura():
device_name = 'Kobo Aura'
elif self.isAuraEdition2():
device_name = 'Kobo Aura Edition 2'
elif self.isAuraHD():
device_name = 'Kobo Aura HD'
elif self.isAuraH2O():
device_name = 'Kobo Aura H2O'
elif self.isAuraH2OEdition2():
device_name = 'Kobo Aura H2O Edition 2'
elif self.isAuraOne():
device_name = 'Kobo Aura ONE'
elif self.isClaraHD():
device_name = 'Kobo Clara HD'
elif self.isClara2E():
device_name = 'Kobo Clara 2E'
elif self.isClaraBW():
device_name = 'Kobo Clara BW'
elif self.isClaraColor():
device_name = 'Kobo Clara Colour'
elif self.isElipsa():
device_name = 'Kobo Elipsa'
elif self.isElipsa2E():
device_name = 'Kobo Elipsa 2E'
elif self.isForma():
device_name = 'Kobo Forma'
elif self.isGlo():
device_name = 'Kobo Glo'
elif self.isGloHD():
device_name = 'Kobo Glo HD'
elif self.isLibraH2O():
device_name = 'Kobo Libra H2O'
elif self.isLibra2():
device_name = 'Kobo Libra 2'
elif self.isLibraColor():
device_name = 'Kobo Libra Colour'
elif self.isMini():
device_name = 'Kobo Mini'
elif self.isNia():
device_name = 'Kobo Nia'
elif self.isSage():
device_name = 'Kobo Sage'
elif self.isShine5():
device_name = 'Tolino Shine 5'
elif self.isShineColor():
device_name = 'Tolino Shine Color'
elif self.isTouch():
device_name = 'Kobo Touch'
elif self.isTouch2():
device_name = 'Kobo Touch 2'
elif self.isVisionColor():
device_name = 'Tolino Vision Color'
self.__class__.gui_name = device_name
return device_name
@property
def manage_collections(self):
return self.get_pref('manage_collections')
@property
def create_collections(self):
return self.manage_collections and self.supports_bookshelves and self.get_pref('create_collections') \
and (len(self.collections_columns) > 0 or len(self.collections_template) > 0)
@property
def use_collections_columns(self):
return self.get_pref('use_collections_columns') and self.manage_collections
@property
def collections_columns(self):
return self.get_pref('collections_columns') if self.use_collections_columns else ''
@property
def use_collections_template(self):
return self.get_pref('use_collections_template') and self.manage_collections
@property
def collections_template(self):
return self.get_pref('collections_template') if self.use_collections_template else ''
def get_collections_attributes(self):
collections_str = self.collections_columns
collections = [x.lower().strip() for x in collections_str.split(',')] if collections_str else []
return collections
@property
def delete_empty_collections(self):
return self.manage_collections and self.get_pref('delete_empty_collections')
@property
def ignore_collections_names(self):
# Cache the collection from the options string.
if not hasattr(self.opts, '_ignore_collections_names'):
icn = self.get_pref('ignore_collections_names')
self.opts._ignore_collections_names = [x.strip() for x in icn.split(',')] if icn else []
return self.opts._ignore_collections_names
@property
def create_bookshelves(self):
# Only for backwards compatibility
return self.manage_collections
@property
def delete_empty_shelves(self):
# Only for backwards compatibility
return self.delete_empty_collections
@property
def upload_covers(self):
return self.get_pref('upload_covers')
@property
def keep_cover_aspect(self):
return self.upload_covers and self.get_pref('keep_cover_aspect')
@property
def upload_grayscale(self):
return self.upload_covers and self.get_pref('upload_grayscale')
@property
def dithered_covers(self):
return self.upload_grayscale and self.get_pref('dithered_covers')
@property
def letterbox_fs_covers(self):
return self.keep_cover_aspect and self.get_pref('letterbox_fs_covers')
@property
def letterbox_fs_covers_color(self):
return self.get_pref('letterbox_fs_covers_color')
@property
def png_covers(self):
return self.upload_grayscale and self.get_pref('png_covers')
def modifying_epub(self):
return self.modifying_css()
def modifying_css(self):
return self.get_pref('modify_css')
@property
def override_kobo_replace_existing(self):
return self.get_pref('override_kobo_replace_existing')
@property
def update_device_metadata(self):
return self.get_pref('update_device_metadata')
@property
def update_series_details(self):
return self.update_device_metadata and self.get_pref('update_series') and self.supports_series()
@property
def force_series_id(self):
return self.update_device_metadata and self.get_pref('force_series_id') and self.supports_series()
@property
def update_subtitle(self):
# Subtitle was added to the database at the same time as the series support.
return self.update_device_metadata and self.supports_series() and self.get_pref('update_subtitle')
@property
def subtitle_template(self):
if not self.update_subtitle:
return None
subtitle_template = self.get_pref('subtitle_template')
subtitle_template = subtitle_template.strip() if subtitle_template is not None else None
return subtitle_template
@property
def update_bookstats(self):
# Subtitle was added to the database at the same time as the series support.
return self.update_device_metadata and self.supports_bookstats and self.get_pref('update_bookstats')
@property
def bookstats_wordcount_template(self):
if not self.update_bookstats:
return None
bookstats_wordcount_template = self.get_pref('bookstats_wordcount_template')
bookstats_wordcount_template = bookstats_wordcount_template.strip() if bookstats_wordcount_template is not None else None
return bookstats_wordcount_template
@property
def bookstats_pagecount_template(self):
if not self.update_bookstats:
return None
bookstats_pagecount_template = self.get_pref('bookstats_pagecount_template')
bookstats_pagecount_template = bookstats_pagecount_template.strip() if bookstats_pagecount_template is not None else None
return bookstats_pagecount_template
@property
def bookstats_timetoread_lower_template(self):
if not self.update_bookstats:
return None
bookstats_timetoread_lower_template = self.get_pref('bookstats_timetoread_lower_template')
bookstats_timetoread_lower_template = bookstats_timetoread_lower_template.strip() if bookstats_timetoread_lower_template is not None else None
return bookstats_timetoread_lower_template
@property
def bookstats_timetoread_upper_template(self):
if not self.update_bookstats:
return None
bookstats_timetoread_upper_template = self.get_pref('bookstats_timetoread_upper_template')
bookstats_timetoread_upper_template = bookstats_timetoread_upper_template.strip() if bookstats_timetoread_upper_template is not None else None
return bookstats_timetoread_upper_template
@property
def update_core_metadata(self):
return self.update_device_metadata and self.get_pref('update_core_metadata')
@property
def update_purchased_kepubs(self):
return self.update_device_metadata and self.get_pref('update_purchased_kepubs')
@classmethod
def get_debugging_title(cls):
debugging_title = cls.get_pref('debugging_title')
if not debugging_title: # Make sure the value is set to prevent rereading the settings.
debugging_title = ''
return debugging_title
@property
def supports_bookshelves(self):
return self.dbversion >= self.min_supported_dbversion
@property
def show_archived_books(self):
return self.get_pref('show_archived_books')
@property
def show_previews(self):
return self.get_pref('show_previews')
@property
def show_recommendations(self):
return self.get_pref('show_recommendations')
@property
def read_metadata(self):
return self.get_pref('read_metadata')
def supports_series(self):
return self.dbversion >= self.min_dbversion_series
@property
def supports_bookstats(self):
return self.fwversion >= self.min_fwversion_bookstats and self.dbversion >= self.min_dbversion_bookstats
@property
def supports_series_list(self):
return self.dbversion >= self.min_dbversion_seriesid and self.fwversion >= self.min_fwversion_serieslist
@property
def supports_audiobooks(self):
return self.fwversion >= self.min_fwversion_audiobooks
def supports_kobo_archive(self):
return self.dbversion >= self.min_dbversion_archive
def supports_overdrive(self):
return self.fwversion >= self.min_fwversion_overdrive
def supports_covers_on_sdcard(self):
return self.dbversion >= self.min_dbversion_images_on_sdcard and self.fwversion >= self.min_fwversion_images_on_sdcard
def supports_images_tree(self):
return self.fwversion >= self.min_fwversion_images_tree and not self.isTolinoDevice()
def has_externalid(self):
return self.dbversion >= self.min_dbversion_externalid
def has_activity_table(self):
return self.dbversion >= self.min_dbversion_activity
def modify_database_check(self, function):
# Checks to see whether the database version is supported
# and whether the user has chosen to support the firmware version
if self.dbversion > self.supported_dbversion or self.is_supported_fwversion:
# Unsupported database
if not self.get_pref('support_newer_firmware'):
debug_print('The database has been upgraded past supported version')
self.report_progress(1.0, _('Removing books from device...'))
from calibre.devices.errors import UserFeedback
raise UserFeedback(_("Kobo database version unsupported - See details"),
_('Your Kobo is running an updated firmware/database version.'
' As calibre does not know about this updated firmware,'
' database editing is disabled, to prevent corruption.'
' You can still send books to your Kobo with calibre, '
' but deleting books and managing collections is disabled.'
' If you are willing to experiment and know how to reset'
' your Kobo to Factory defaults, you can override this'
' check by right clicking the device icon in calibre and'
' selecting "Configure this device" and then the'
' "Attempt to support newer firmware" option.'
' Doing so may require you to perform a Factory reset of'
' your Kobo.'
) +
'\n\n' +
_('Discussion of any new Kobo firmware can be found in the'
' Kobo forum at MobileRead. This is at %s.'
) % 'https://www.mobileread.com/forums/forumdisplay.php?f=223' + '\n' +
(
'\nDevice database version: %s.'
'\nDevice firmware version: %s'
) % (self.dbversion, self.display_fwversion),
UserFeedback.WARN
)
return False
else:
# The user chose to edit the database anyway
return True
else:
# Supported database version
return True
@property
def is_supported_fwversion(self):
# Starting with firmware version 3.19.x, the last number appears to be is a
# build number. It can be safely ignored when testing the firmware version.
debug_print("KoboTouch::is_supported_fwversion - self.fwversion[:2]", self.fwversion[:2])
return self.fwversion[:2] > self.max_supported_fwversion
@classmethod
def migrate_old_settings(cls, settings):
debug_print("KoboTouch::migrate_old_settings - start")
debug_print("KoboTouch::migrate_old_settings - settings.extra_customization=", settings.extra_customization)
debug_print("KoboTouch::migrate_old_settings - For class=", cls.name)
count_options = 0
OPT_COLLECTIONS = count_options
count_options += 1
OPT_CREATE_BOOKSHELVES = count_options
count_options += 1
OPT_DELETE_BOOKSHELVES = count_options
count_options += 1
OPT_UPLOAD_COVERS = count_options
count_options += 1
OPT_UPLOAD_GRAYSCALE_COVERS = count_options
count_options += 1
OPT_KEEP_COVER_ASPECT_RATIO = count_options
count_options += 1
OPT_SHOW_ARCHIVED_BOOK_RECORDS = count_options
count_options += 1
OPT_SHOW_PREVIEWS = count_options
count_options += 1
OPT_SHOW_RECOMMENDATIONS = count_options
count_options += 1
OPT_UPDATE_SERIES_DETAILS = count_options
count_options += 1
OPT_MODIFY_CSS = count_options
count_options += 1
OPT_SUPPORT_NEWER_FIRMWARE = count_options
count_options += 1
OPT_DEBUGGING_TITLE = count_options
# Always migrate options if for the KoboTouch class.
# For a subclass, only migrate the KoboTouch options if they haven't already been migrated. This is based on
# the total number of options.
if cls == KOBOTOUCH or len(settings.extra_customization) >= count_options:
config = cls._config()
debug_print("KoboTouch::migrate_old_settings - config.preferences=", config.preferences)
debug_print("KoboTouch::migrate_old_settings - settings need to be migrated")
settings.manage_collections = True
settings.collections_columns = settings.extra_customization[OPT_COLLECTIONS]
debug_print("KoboTouch::migrate_old_settings - settings.collections_columns=", settings.collections_columns)
settings.create_collections = settings.extra_customization[OPT_CREATE_BOOKSHELVES]
settings.delete_empty_collections = settings.extra_customization[OPT_DELETE_BOOKSHELVES]
settings.upload_covers = settings.extra_customization[OPT_UPLOAD_COVERS]
settings.keep_cover_aspect = settings.extra_customization[OPT_KEEP_COVER_ASPECT_RATIO]
settings.upload_grayscale = settings.extra_customization[OPT_UPLOAD_GRAYSCALE_COVERS]
settings.show_archived_books = settings.extra_customization[OPT_SHOW_ARCHIVED_BOOK_RECORDS]
settings.show_previews = settings.extra_customization[OPT_SHOW_PREVIEWS]
settings.show_recommendations = settings.extra_customization[OPT_SHOW_RECOMMENDATIONS]
# If the configuration hasn't been change for a long time, the last few option will be out
# of sync. The last two options are always the support newer firmware and the debugging
# title. Set seties and Modify CSS were the last two new options. The debugging title is
# a string, so looking for that.
start_subclass_extra_options = OPT_MODIFY_CSS
debugging_title = ''
if isinstance(settings.extra_customization[OPT_MODIFY_CSS], string_or_bytes):
debug_print("KoboTouch::migrate_old_settings - Don't have update_series option")
settings.update_series = config.get_option('update_series').default
settings.modify_css = config.get_option('modify_css').default
settings.support_newer_firmware = settings.extra_customization[OPT_UPDATE_SERIES_DETAILS]
debugging_title = settings.extra_customization[OPT_MODIFY_CSS]
start_subclass_extra_options = OPT_MODIFY_CSS + 1
elif isinstance(settings.extra_customization[OPT_SUPPORT_NEWER_FIRMWARE], string_or_bytes):
debug_print("KoboTouch::migrate_old_settings - Don't have modify_css option")
settings.update_series = settings.extra_customization[OPT_UPDATE_SERIES_DETAILS]
settings.modify_css = config.get_option('modify_css').default
settings.support_newer_firmware = settings.extra_customization[OPT_MODIFY_CSS]
debugging_title = settings.extra_customization[OPT_SUPPORT_NEWER_FIRMWARE]
start_subclass_extra_options = OPT_SUPPORT_NEWER_FIRMWARE + 1
else:
debug_print("KoboTouch::migrate_old_settings - Have all options")
settings.update_series = settings.extra_customization[OPT_UPDATE_SERIES_DETAILS]
settings.modify_css = settings.extra_customization[OPT_MODIFY_CSS]
settings.support_newer_firmware = settings.extra_customization[OPT_SUPPORT_NEWER_FIRMWARE]
debugging_title = settings.extra_customization[OPT_DEBUGGING_TITLE]
start_subclass_extra_options = OPT_DEBUGGING_TITLE + 1
settings.debugging_title = debugging_title if isinstance(debugging_title, string_or_bytes) else ''
settings.update_device_metadata = settings.update_series
settings.extra_customization = settings.extra_customization[start_subclass_extra_options:]
return settings
def is_debugging_title(self, title):
if not DEBUG:
return False
# debug_print("KoboTouch:is_debugging - title=", title)
if not self.debugging_title and not self.debugging_title == '':
self.debugging_title = self.get_debugging_title()
try:
is_debugging = len(self.debugging_title) > 0 and title.lower().find(self.debugging_title.lower()) >= 0 or len(title) == 0
except:
debug_print(("KoboTouch::is_debugging_title - Exception checking debugging title for title '{}'.").format(title))
is_debugging = False
return is_debugging
def dump_bookshelves(self, connection):
if not (DEBUG and self.supports_bookshelves and False):
return
debug_print('KoboTouch:dump_bookshelves - start')
shelf_query = 'SELECT * FROM Shelf'
shelfcontent_query = 'SELECT * FROM ShelfContent'
placeholder = '%s'
cursor = connection.cursor()
prints('\nBookshelves on device:')
cursor.execute(shelf_query)
i = 0
for row in cursor:
placeholders = ', '.join(placeholder for unused in row)
prints(placeholders%row)
i += 1
if i == 0:
prints("No shelves found!!")
else:
prints("Number of shelves=%d"%i)
prints('\nBooks on shelves on device:')
cursor.execute(shelfcontent_query)
i = 0
for row in cursor:
placeholders = ', '.join(placeholder for unused in row)
prints(placeholders%row)
i += 1
if i == 0:
prints("No books are on any shelves!!")
else:
prints("Number of shelved books=%d"%i)
cursor.close()
debug_print('KoboTouch:dump_bookshelves - end')
def __str__(self, *args, **kwargs):
options = ', '.join([f'{x.name}: {self.get_pref(x.name)}' for x in self._config().preferences])
return f"Driver:{self.name}, Options - {options}"
if __name__ == '__main__':
dev = KOBOTOUCH(None)
dev.startup()
try:
dev.initialize()
from calibre.devices.scanner import DeviceScanner
scanner = DeviceScanner()
scanner.scan()
devs = scanner.devices
# debug_print("unit test: devs.__class__=", devs.__class__)
# debug_print("unit test: devs.__class__=", devs.__class__.__name__)
debug_print("unit test: devs=", devs)
debug_print("unit test: dev=", dev)
# cd = dev.detect_managed_devices(devs)
# if cd is None:
# raise ValueError('Failed to detect KOBOTOUCH device')
dev.set_progress_reporter(prints)
# dev.open(cd, None)
# dev.filesystem_cache.dump()
print('Prefix for main memory:', dev.dbversion)
finally:
dev.shutdown()
| 211,055 | Python | .py | 3,647 | 43.275569 | 160 | 0.577482 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,737 | driver.py | kovidgoyal_calibre/src/calibre/devices/binatone/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Bookeen's Cybook Gen 3
'''
from calibre.devices.usbms.driver import USBMS
class README(USBMS):
name = 'Binatone Readme Device Interface'
gui_name = 'Binatone Readme'
description = _('Communicate with the Binatone Readme e-book reader.')
author = 'John Schember'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
# Be sure these have an entry in calibre.devices.mime
FORMATS = ['txt']
VENDOR_ID = [0x04fc]
PRODUCT_ID = [0x5563]
BCD = [0x0100]
VENDOR_NAME = ''
WINDOWS_MAIN_MEM = 'MASS_STORAGE'
WINDOWS_CARD_A_MEM = 'MASS_STORAGE'
MAIN_MEMORY_VOLUME_LABEL = 'Readme Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Readme Storage Card'
SUPPORTS_SUB_DIRS = True
def linux_swap_drives(self, drives):
if len(drives) < 2:
return drives
drives = list(drives)
t = drives[0]
drives[0] = drives[1]
drives[1] = t
return tuple(drives)
def windows_sort_drives(self, drives):
if len(drives) < 2:
return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda:
drives['main'] = carda
drives['carda'] = main
return drives
| 1,486 | Python | .py | 42 | 29.047619 | 77 | 0.611732 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,738 | driver.py | kovidgoyal_calibre/src/calibre/devices/nook/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Barns and Nobel's Nook
'''
import io
import os
from calibre import fsync, prints
from calibre.constants import DEBUG
from calibre.devices.usbms.driver import USBMS
from calibre.utils.resources import get_image_path as I
class NOOK(USBMS):
name = 'Nook Device Interface'
gui_name = _('The Nook')
description = _('Communicate with the Nook e-book reader.')
author = 'John Schember'
icon = 'devices/nook.png'
supported_platforms = ['windows', 'linux', 'osx']
# Ordered list of supported formats
FORMATS = ['epub', 'pdb', 'pdf']
VENDOR_ID = [0x2080, 0x18d1] # 0x18d1 is for softrooted nook
PRODUCT_ID = [0x001]
BCD = [0x322]
VENDOR_NAME = 'B&N'
WINDOWS_MAIN_MEM = 'NOOK'
WINDOWS_CARD_A_MEM = 'NOOK'
OSX_MAIN_MEM = 'B&N nook Media'
OSX_CARD_A_MEM = OSX_MAIN_MEM
MAIN_MEMORY_VOLUME_LABEL = 'Nook Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Nook Storage Card'
EBOOK_DIR_MAIN = 'my documents'
THUMBNAIL_HEIGHT = 144
DELETE_EXTS = ['.jpg']
SUPPORTS_SUB_DIRS = True
def upload_cover(self, path, filename, metadata, filepath):
from PIL import Image, ImageDraw
coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]:
cover = Image.open(io.BytesIO(coverdata[2]))
else:
coverdata = open(I('library.png'), 'rb').read()
cover = Image.new('RGB', (96, 144), 'black')
im = Image.open(io.BytesIO(coverdata))
im.thumbnail((96, 144), Image.Resampling.LANCZOS)
x, y = im.size
cover.paste(im, ((96-x)/2, (144-y)/2))
draw = ImageDraw.Draw(cover)
draw.text((1, 15), metadata.get('title', _('Unknown')).encode('ascii', 'ignore'))
draw.text((1, 115), metadata.get('authors', _('Unknown')).encode('ascii', 'ignore'))
data = io.BytesIO()
cover.save(data, 'JPEG')
coverdata = data.getvalue()
with open('%s.jpg' % os.path.join(path, filename), 'wb') as coverfile:
coverfile.write(coverdata)
fsync(coverfile)
def sanitize_path_components(self, components):
return [x.replace('#', '_').replace('%', '_') for x in components]
class NOOK_COLOR(NOOK):
name = 'Nook Color Device Interface'
description = _('Communicate with the Nook Color, TSR, Glowlight and Tablet e-book readers.')
PRODUCT_ID = [
0x002, 0x003, 0x004,
0x005, # Nook HD+
0x007, # Glowlight from 2013
# 0xa, # Glowlight from 2016 is MTP based device
0xb, # Glowlight from 2017
0xc, # Glowlight from 2019
0xd, # Glowlight from 2021
0xe, # Glowlight from 2022
# 0xf, # Glowlight from 2023 is MTP based device
]
BCD = [0x216, 0x9999, 0x409, 0x440]
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EBOOK_DISK', 'NOOK_TABLET',
'NOOK_SIMPLETOUCH', 'NOOK_GLOWLIGHT']
EBOOK_DIR_MAIN = 'My Files'
SCAN_FROM_ROOT = True
NEWS_IN_FOLDER = False
def upload_cover(self, path, filename, metadata, filepath):
pass
def post_open_callback(self):
product_id = self.device_being_opened[1]
if DEBUG:
prints('Opened NOOK with product id:', product_id)
if product_id >= 0xb:
self.EBOOK_DIR_MAIN = 'NOOK/Books' if product_id >= 0xd else 'NOOK/My Files'
if DEBUG:
prints(f'Setting Nook upload directory to {self.EBOOK_DIR_MAIN}')
try:
os.makedirs(os.path.join(self._main_prefix, *self.EBOOK_DIR_MAIN.split('/')), exist_ok=True)
except OSError:
self.EBOOK_DIR_MAIN = 'NOOK'
def get_carda_ebook_dir(self, for_upload=False):
if for_upload:
return self.EBOOK_DIR_MAIN
return ''
def create_upload_path(self, path, mdata, fname, create_dirs=True):
is_news = mdata.tags and _('News') in mdata.tags
subdir = 'Magazines' if is_news else 'Books'
path = os.path.join(path, subdir)
return USBMS.create_upload_path(self, path, mdata, fname,
create_dirs=create_dirs)
| 4,408 | Python | .py | 102 | 35.431373 | 108 | 0.604956 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,739 | __init__.py | kovidgoyal_calibre/src/calibre/devices/paladin/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,740 | driver.py | kovidgoyal_calibre/src/calibre/devices/paladin/driver.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
'''
Device driver for the Paladin devices
'''
import os
import sys
import time
from contextlib import closing
from calibre.devices.errors import DeviceError
from calibre.devices.mime import mime_type_ext
from calibre.devices.usbms.books import BookList, CollectionsBookList
from calibre.devices.usbms.driver import USBMS
from calibre.prints import debug_print
DBPATH = 'paladin/database/books.db'
class ImageWrapper:
def __init__(self, image_path):
self.image_path = image_path
class PALADIN(USBMS):
name = 'Paladin Device Interface'
gui_name = 'Paladin eLibrary'
description = _('Communicate with the Paladin readers')
author = 'David Hobley'
supported_platforms = ['windows', 'osx', 'linux']
path_sep = '/'
booklist_class = CollectionsBookList
FORMATS = ['epub', 'pdf']
CAN_SET_METADATA = ['collections']
CAN_DO_DEVICE_DB_PLUGBOARD = True
VENDOR_ID = [0x2207] #: Onyx Vendor Id (currently)
PRODUCT_ID = [0x0010]
BCD = None
SUPPORTS_SUB_DIRS = True
SUPPORTS_USE_AUTHOR_SORT = True
MUST_READ_METADATA = True
EBOOK_DIR_MAIN = 'paladin/books'
EXTRA_CUSTOMIZATION_MESSAGE = [
_(
'Comma separated list of metadata fields '
'to turn into collections on the device. Possibilities include: '
) + 'series, tags, authors',
]
EXTRA_CUSTOMIZATION_DEFAULT = [
', '.join(['series', 'tags']),
]
OPT_COLLECTIONS = 0
plugboards = None
plugboard_func = None
device_offset = None
def books(self, oncard=None, end_session=True):
import apsw
dummy_bl = BookList(None, None, None)
if (
(oncard == 'carda' and not self._card_a_prefix) or
(oncard and oncard != 'carda')
):
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
# Let parent driver get the books
self.booklist_class.rebuild_collections = self.rebuild_collections
bl = USBMS.books(self, oncard=oncard, end_session=end_session)
dbpath = self.normalize_path(prefix + DBPATH)
debug_print("SQLite DB Path: " + dbpath)
with closing(apsw.Connection(dbpath)) as connection:
cursor = connection.cursor()
# Query collections
query = '''
SELECT books._id, tags.tagname
FROM booktags
LEFT OUTER JOIN books
LEFT OUTER JOIN tags
WHERE booktags.book_id = books._id AND
booktags.tag_id = tags._id
'''
cursor.execute(query)
bl_collections = {}
for i, row in enumerate(cursor):
bl_collections.setdefault(row[0], [])
bl_collections[row[0]].append(row[1])
# collect information on offsets, but assume any
# offset we already calculated is correct
if self.device_offset is None:
query = 'SELECT filename, addeddate FROM books'
cursor.execute(query)
time_offsets = {}
for i, row in enumerate(cursor):
try:
comp_date = int(os.path.getmtime(self.normalize_path(prefix + row[0])) * 1000)
except (OSError, TypeError):
# In case the db has incorrect path info
continue
device_date = int(row[1])
offset = device_date - comp_date
time_offsets.setdefault(offset, 0)
time_offsets[offset] = time_offsets[offset] + 1
try:
device_offset = max(time_offsets, key=lambda a: time_offsets.get(a))
debug_print("Device Offset: %d ms"%device_offset)
self.device_offset = device_offset
except ValueError:
debug_print("No Books To Detect Device Offset.")
for idx, book in enumerate(bl):
query = 'SELECT _id, thumbnail FROM books WHERE filename = ?'
t = (book.lpath,)
cursor.execute(query, t)
for i, row in enumerate(cursor):
book.device_collections = bl_collections.get(row[0], None)
thumbnail = row[1]
if thumbnail is not None:
thumbnail = self.normalize_path(prefix + thumbnail)
book.thumbnail = ImageWrapper(thumbnail)
cursor.close()
return bl
def set_plugboards(self, plugboards, pb_func):
self.plugboards = plugboards
self.plugboard_func = pb_func
def sync_booklists(self, booklists, end_session=True):
debug_print('PALADIN: starting sync_booklists')
opts = self.settings()
if opts.extra_customization:
collections = [x.strip() for x in
opts.extra_customization[self.OPT_COLLECTIONS].split(',')]
else:
collections = []
debug_print('PALADIN: collection fields:', collections)
if booklists[0] is not None:
self.update_device_database(booklists[0], collections, None)
if len(booklists) > 1 and booklists[1] is not None:
self.update_device_database(booklists[1], collections, 'carda')
USBMS.sync_booklists(self, booklists, end_session=end_session)
debug_print('PALADIN: finished sync_booklists')
def update_device_database(self, booklist, collections_attributes, oncard):
import apsw
debug_print('PALADIN: starting update_device_database')
plugboard = None
if self.plugboard_func:
plugboard = self.plugboard_func(self.__class__.__name__,
'device_db', self.plugboards)
debug_print("PALADIN: Using Plugboard", plugboard)
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
if prefix is None:
# Reader has no sd card inserted
return
source_id = 1 if oncard == 'carda' else 0
dbpath = self.normalize_path(prefix + DBPATH)
debug_print("SQLite DB Path: " + dbpath)
collections = booklist.get_collections(collections_attributes)
with closing(apsw.Connection(dbpath)) as connection:
self.remove_orphaned_records(connection, dbpath)
self.update_device_books(connection, booklist, source_id,
plugboard, dbpath)
self.update_device_collections(connection, booklist, collections, source_id, dbpath)
debug_print('PALADIN: finished update_device_database')
def remove_orphaned_records(self, connection, dbpath):
try:
cursor = connection.cursor()
debug_print("Removing Orphaned Collection Records")
# Purge any collections references that point into the abyss
query = 'DELETE FROM booktags WHERE book_id NOT IN (SELECT _id FROM books)'
cursor.execute(query)
query = 'DELETE FROM booktags WHERE tag_id NOT IN (SELECT _id FROM tags)'
cursor.execute(query)
debug_print("Removing Orphaned Book Records")
cursor.close()
except Exception:
import traceback
tb = traceback.format_exc()
raise DeviceError((('The Paladin database is corrupted. '
' Delete the file %s on your reader and then disconnect '
' reconnect it. If you are using an SD card, you '
' should delete the file on the card as well. Note that '
' deleting this file will cause your reader to forget '
' any notes/highlights, etc.')%dbpath)+' Underlying error:'
'\n'+tb)
def get_database_min_id(self, source_id):
sequence_min = 0
if source_id == 1:
sequence_min = 4294967296
return sequence_min
def set_database_sequence_id(self, connection, table, sequence_id):
cursor = connection.cursor()
# Update the sequence Id if it exists
query = 'UPDATE sqlite_sequence SET seq = ? WHERE name = ?'
t = (sequence_id, table,)
cursor.execute(query, t)
# Insert the sequence Id if it doesn't
query = ('INSERT INTO sqlite_sequence (name, seq) '
'SELECT ?, ? '
'WHERE NOT EXISTS (SELECT 1 FROM sqlite_sequence WHERE name = ?)')
cursor.execute(query, (table, sequence_id, table,))
cursor.close()
def read_device_books(self, connection, source_id, dbpath):
sequence_min = self.get_database_min_id(source_id)
sequence_max = sequence_min
sequence_dirty = 0
debug_print("Book Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
try:
cursor = connection.cursor()
# Get existing books
query = 'SELECT filename, _id FROM books'
cursor.execute(query)
except Exception:
import traceback
tb = traceback.format_exc()
raise DeviceError((('The Paladin database is corrupted. '
' Delete the file %s on your reader and then disconnect '
' reconnect it. If you are using an SD card, you '
' should delete the file on the card as well. Note that '
' deleting this file will cause your reader to forget '
' any notes/highlights, etc.')%dbpath)+' Underlying error:'
'\n'+tb)
# Get the books themselves, but keep track of any that are less than the minimum.
# Record what the max id being used is as well.
db_books = {}
for i, row in enumerate(cursor):
if not hasattr(row[0], 'replace'):
continue
lpath = row[0].replace('\\', '/')
db_books[lpath] = row[1]
if row[1] < sequence_min:
sequence_dirty = 1
else:
sequence_max = max(sequence_max, row[1])
# If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1:
debug_print("Book Sequence Dirty for Source Id: %d"%source_id)
sequence_max = sequence_max + 1
for book, bookId in db_books.items():
if bookId < sequence_min:
# Record the new Id and write it to the DB
db_books[book] = sequence_max
sequence_max = sequence_max + 1
# Fix the Books DB
query = 'UPDATE books SET _id = ? WHERE filename = ?'
t = (db_books[book], book,)
cursor.execute(query, t)
# Fix any references so that they point back to the right book
t = (db_books[book], bookId,)
query = 'UPDATE booktags SET tag_id = ? WHERE tag_id = ?'
cursor.execute(query, t)
self.set_database_sequence_id(connection, 'books', sequence_max)
debug_print("Book Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
cursor.close()
return db_books
def update_device_books(self, connection, booklist, source_id, plugboard,
dbpath):
from calibre.ebooks.metadata import authors_to_sort_string, authors_to_string
from calibre.ebooks.metadata.meta import path_to_ext
opts = self.settings()
db_books = self.read_device_books(connection, source_id, dbpath)
cursor = connection.cursor()
for book in booklist:
# Run through plugboard if needed
if plugboard is not None:
newmi = book.deepcopy_metadata()
newmi.template_to_attribute(book, plugboard)
else:
newmi = book
# Get Metadata We Want
lpath = book.lpath
try:
if opts.use_author_sort:
if newmi.author_sort:
author = newmi.author_sort
else:
author = authors_to_sort_string(newmi.authors)
else:
author = authors_to_string(newmi.authors)
except Exception:
author = _('Unknown')
title = newmi.title or _('Unknown')
# Get modified date
# If there was a detected offset, use that. Otherwise use UTC (same as Sony software)
modified_date = os.path.getmtime(book.path) * 1000
if self.device_offset is not None:
modified_date = modified_date + self.device_offset
if lpath not in db_books:
query = '''
INSERT INTO books
(bookname, authorname, description, addeddate, seriesname, seriesorder, filename, mimetype)
values (?,?,?,?,?,?,?,?)
'''
t = (title, author, book.get('comments', None), int(time.time() * 1000),
book.get('series', None), book.get('series_index', sys.maxsize), lpath,
book.mime or mime_type_ext(path_to_ext(lpath)))
cursor.execute(query, t)
book.bookId = connection.last_insert_rowid()
debug_print('Inserted New Book: (%u) '%book.bookId + book.title)
else:
query = '''
UPDATE books
SET bookname = ?, authorname = ?, addeddate = ?
WHERE filename = ?
'''
t = (title, author, modified_date, lpath)
cursor.execute(query, t)
book.bookId = db_books[lpath]
db_books[lpath] = None
for book, bookId in db_books.items():
if bookId is not None:
# Remove From Collections
query = 'DELETE FROM tags WHERE _id in (select tag_id from booktags where book_id = ?)'
t = (bookId,)
cursor.execute(query, t)
# Remove from Books
query = 'DELETE FROM books where _id = ?'
t = (bookId,)
cursor.execute(query, t)
debug_print('Deleted Book:' + book)
cursor.close()
def read_device_collections(self, connection, source_id, dbpath):
sequence_min = self.get_database_min_id(source_id)
sequence_max = sequence_min
sequence_dirty = 0
debug_print("Collection Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
try:
cursor = connection.cursor()
# Get existing collections
query = 'SELECT _id, tagname FROM tags'
cursor.execute(query)
except Exception:
import traceback
tb = traceback.format_exc()
raise DeviceError((('The Paladin database is corrupted. '
' Delete the file %s on your reader and then disconnect '
' reconnect it. If you are using an SD card, you '
' should delete the file on the card as well. Note that '
' deleting this file will cause your reader to forget '
' any notes/highlights, etc.')%dbpath)+' Underlying error:'
'\n'+tb)
db_collections = {}
for i, row in enumerate(cursor):
db_collections[row[1]] = row[0]
if row[0] < sequence_min:
sequence_dirty = 1
else:
sequence_max = max(sequence_max, row[0])
# If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1:
debug_print("Collection Sequence Dirty for Source Id: %d"%source_id)
sequence_max = sequence_max + 1
for collection, collectionId in db_collections.items():
if collectionId < sequence_min:
# Record the new Id and write it to the DB
db_collections[collection] = sequence_max
sequence_max = sequence_max + 1
# Fix the collection DB
query = 'UPDATE tags SET _id = ? WHERE tagname = ?'
t = (db_collections[collection], collection, )
cursor.execute(query, t)
# Fix any references in existing collections
query = 'UPDATE booktags SET tag_id = ? WHERE tag_id = ?'
t = (db_collections[collection], collectionId,)
cursor.execute(query, t)
self.set_database_sequence_id(connection, 'tags', sequence_max)
debug_print("Collection Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
# Fix up the collections table now...
sequence_dirty = 0
sequence_max = sequence_min
debug_print("Collections Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
query = 'SELECT _id FROM booktags'
cursor.execute(query)
db_collection_pairs = []
for i, row in enumerate(cursor):
db_collection_pairs.append(row[0])
if row[0] < sequence_min:
sequence_dirty = 1
else:
sequence_max = max(sequence_max, row[0])
if sequence_dirty == 1:
debug_print("Collections Sequence Dirty for Source Id: %d"%source_id)
sequence_max = sequence_max + 1
for pairId in db_collection_pairs:
if pairId < sequence_min:
# Record the new Id and write it to the DB
query = 'UPDATE booktags SET _id = ? WHERE _id = ?'
t = (sequence_max, pairId,)
cursor.execute(query, t)
sequence_max = sequence_max + 1
self.set_database_sequence_id(connection, 'booktags', sequence_max)
debug_print("Collections Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
cursor.close()
return db_collections
def update_device_collections(self, connection, booklist, collections,
source_id, dbpath):
if collections:
db_collections = self.read_device_collections(connection, source_id, dbpath)
cursor = connection.cursor()
for collection, books in collections.items():
if collection not in db_collections:
query = 'INSERT INTO tags (tagname) VALUES (?)'
t = (collection,)
cursor.execute(query, t)
db_collections[collection] = connection.last_insert_rowid()
debug_print('Inserted New Collection: (%u) '%db_collections[collection] + collection)
# Get existing books in collection
query = '''
SELECT books.filename, book_id
FROM booktags
LEFT OUTER JOIN books
WHERE tag_id = ? AND books._id = booktags.book_id
'''
t = (db_collections[collection],)
cursor.execute(query, t)
db_books = {}
for i, row in enumerate(cursor):
db_books[row[0]] = row[1]
for idx, book in enumerate(books):
if collection not in book.device_collections:
book.device_collections.append(collection)
if db_books.get(book.lpath, None) is None:
query = '''
INSERT INTO booktags (tag_id, book_id) values (?,?)
'''
t = (db_collections[collection], book.bookId)
cursor.execute(query, t)
debug_print('Inserted Book Into Collection: ' +
book.title + ' -> ' + collection)
db_books[book.lpath] = None
for bookPath, bookId in db_books.items():
if bookId is not None:
query = ('DELETE FROM booktags '
'WHERE book_id = ? AND tag_id = ? ')
t = (bookId, db_collections[collection],)
cursor.execute(query, t)
debug_print('Deleted Book From Collection: ' + bookPath + ' -> ' + collection)
db_collections[collection] = None
for collection, collectionId in db_collections.items():
if collectionId is not None:
# Remove Books from Collection
query = ('DELETE FROM booktags '
'WHERE tag_id = ?')
t = (collectionId,)
cursor.execute(query, t)
# Remove Collection
query = ('DELETE FROM tags '
'WHERE _id = ?')
t = (collectionId,)
cursor.execute(query, t)
debug_print('Deleted Collection: ' + repr(collection))
cursor.close()
def rebuild_collections(self, booklist, oncard):
debug_print('PALADIN: starting rebuild_collections')
opts = self.settings()
if opts.extra_customization:
collections = [x.strip() for x in
opts.extra_customization[self.OPT_COLLECTIONS].split(',')]
else:
collections = []
debug_print('PALADIN: collection fields:', collections)
self.update_device_database(booklist, collections, oncard)
debug_print('PALADIN: finished rebuild_collections')
| 22,184 | Python | .py | 453 | 34.827815 | 107 | 0.553239 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,741 | __init__.py | kovidgoyal_calibre/src/calibre/devices/folder_device/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,742 | driver.py | kovidgoyal_calibre/src/calibre/devices/folder_device/driver.py | '''
Created on 15 May 2010
@author: charles
'''
import os
from calibre.devices.usbms.driver import USBMS, BookList
from calibre.ebooks import BOOK_EXTENSIONS
# This class is added to the standard device plugin chain, so that it can
# be configured. It has invalid vendor_id etc, so it will never match a
# device. The 'real' FOLDER_DEVICE will use the config from it.
class FOLDER_DEVICE_FOR_CONFIG(USBMS):
name = 'Folder Device Interface'
gui_name = 'Folder Device'
description = _('Use an arbitrary folder as a device.')
author = 'John Schember/Charles Haley'
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = list(BOOK_EXTENSIONS) + ['ppt', 'pptx']
VENDOR_ID = [0xffff]
PRODUCT_ID = [0xffff]
BCD = [0xffff]
DEVICE_PLUGBOARD_NAME = 'FOLDER_DEVICE'
SUPPORTS_SUB_DIRS = True
class FOLDER_DEVICE(USBMS):
type = _('Device interface')
name = 'Folder Device Interface'
gui_name = 'Folder Device'
description = _('Use an arbitrary folder as a device.')
author = 'John Schember/Charles Haley'
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = FOLDER_DEVICE_FOR_CONFIG.FORMATS
VENDOR_ID = [0xffff]
PRODUCT_ID = [0xffff]
BCD = [0xffff]
DEVICE_PLUGBOARD_NAME = 'FOLDER_DEVICE'
THUMBNAIL_HEIGHT = 68 # Height for thumbnails on device
CAN_SET_METADATA = ['title', 'authors']
SUPPORTS_SUB_DIRS = True
#: Icon for this device
icon = 'devices/folder.png'
METADATA_CACHE = '.metadata.calibre'
DRIVEINFO = '.driveinfo.calibre'
_main_prefix = ''
_card_a_prefix = None
_card_b_prefix = None
is_connected = False
def __init__(self, path):
if not os.path.isdir(path):
raise OSError('Path is not a folder')
path = USBMS.normalize_path(path)
if path.endswith(os.sep):
self._main_prefix = path
else:
self._main_prefix = path + os.sep
self.booklist_class = BookList
self.is_connected = True
def reset(self, key='-1', log_packets=False, report_progress=None,
detected_device=None):
pass
def unmount_device(self):
self._main_prefix = ''
self.is_connected = False
def is_usb_connected(self, devices_on_system, debug=False,
only_presence=False):
return self.is_connected, self
def open(self, connected_device, library_uuid):
self.current_library_uuid = library_uuid
if not self._main_prefix:
return False
return True
def set_progress_reporter(self, report_progress):
self.report_progress = report_progress
def card_prefix(self, end_session=True):
return (None, None)
def eject(self):
self.is_connected = False
@classmethod
def settings(self):
return FOLDER_DEVICE_FOR_CONFIG._config().parse()
@classmethod
def config_widget(cls):
return FOLDER_DEVICE_FOR_CONFIG.config_widget()
@classmethod
def save_settings(cls, config_widget):
return FOLDER_DEVICE_FOR_CONFIG.save_settings(config_widget)
| 3,208 | Python | .py | 84 | 31.952381 | 73 | 0.645578 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,743 | __init__.py | kovidgoyal_calibre/src/calibre/devices/hanlin/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2009, Tijmen Ruizendaal <tijmen at mybebook.com>'
| 92 | Python | .py | 2 | 45 | 66 | 0.666667 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,744 | driver.py | kovidgoyal_calibre/src/calibre/devices/hanlin/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, Tijmen Ruizendaal <tijmen at mybebook.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Hanlin
'''
import re
from calibre.devices.usbms.driver import USBMS
class HANLINV3(USBMS):
name = 'Hanlin V3 driver'
gui_name = 'Hanlin V3'
description = _('Communicate with Hanlin V3 e-book readers.')
author = 'Tijmen Ruizendaal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'mobi', 'fb2', 'lit', 'prc', 'pdf', 'rtf', 'txt']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0x8803, 0x6803]
BCD = [0x312]
VENDOR_NAME = 'LINUX'
WINDOWS_MAIN_MEM = 'FILE-STOR_GADGET'
WINDOWS_CARD_A_MEM = 'FILE-STOR_GADGET'
OSX_MAIN_MEM = 'Linux File-Stor Gadget Media'
OSX_CARD_A_MEM = 'Linux File-Stor Gadget Media'
MAIN_MEMORY_VOLUME_LABEL = 'Hanlin V3 Internal Memory'
STORAGE_CARD_VOLUME_LABEL = 'Hanlin V3 Storage Card'
SUPPORTS_SUB_DIRS = True
def osx_sort_names(self, names):
main = names.get('main', None)
card = names.get('carda', None)
try:
main_num = int(re.findall(r'\d+', main)[0]) if main else None
except:
main_num = None
try:
card_num = int(re.findall(r'\d+', card)[0]) if card else None
except:
card_num = None
if card_num is not None and main_num is not None and card_num > main_num:
names['main'] = card
names['carda'] = main
if card and not main:
names['main'] = card
names['carda'] = None
return names
def linux_swap_drives(self, drives):
if len(drives) < 2 or not drives[0] or not drives[1]:
return drives
drives = list(drives)
t = drives[0]
drives[0] = drives[1]
drives[1] = t
return tuple(drives)
def windows_sort_drives(self, drives):
if len(drives) < 2:
return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda:
drives['main'] = carda
drives['carda'] = main
return drives
class SPECTRA(HANLINV3):
name = 'Spectra'
gui_name = 'Spectra'
PRODUCT_ID = [0xa4a5]
FORMATS = ['epub', 'mobi', 'fb2', 'lit', 'prc', 'chm', 'djvu', 'pdf', 'rtf', 'txt']
SUPPORTS_SUB_DIRS = True
class HANLINV5(HANLINV3):
name = 'Hanlin V5 driver'
gui_name = 'Hanlin V5'
description = _('Communicate with Hanlin V5 e-book readers.')
VENDOR_ID = [0x0492]
PRODUCT_ID = [0x8813]
BCD = [0x319]
OSX_MAIN_MEM = 'Hanlin V5 Internal Memory'
OSX_CARD_MEM = 'Hanlin V5 Storage Card'
MAIN_MEMORY_VOLUME_LABEL = 'Hanlin V5 Internal Memory'
STORAGE_CARD_VOLUME_LABEL = 'Hanlin V5 Storage Card'
OSX_EJECT_COMMAND = ['diskutil', 'unmount', 'force']
class BOOX(HANLINV3):
name = 'BOOX driver'
gui_name = 'BOOX'
description = _('Communicate with the BOOX e-book reader.')
author = 'Jesus Manuel Marinho Valcarce'
supported_platforms = ['windows', 'osx', 'linux']
METADATA_CACHE = '.metadata.calibre'
DRIVEINFO = '.driveinfo.calibre'
icon = 'devices/boox.png'
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'djvu', 'pdf', 'html', 'txt', 'rtf', 'mobi',
'prc', 'chm', 'doc']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x322, 0x323, 0x326]
MAIN_MEMORY_VOLUME_LABEL = 'BOOX Internal Memory'
STORAGE_CARD_VOLUME_LABEL = 'BOOX Storage Card'
EBOOK_DIR_MAIN = ['MyBooks']
EXTRA_CUSTOMIZATION_MESSAGE = _('Comma separated list of folders to '
'send e-books to on the device. The first one that exists will '
'be used.')
EXTRA_CUSTOMIZATION_DEFAULT = ', '.join(EBOOK_DIR_MAIN)
# EBOOK_DIR_CARD_A = 'MyBooks' ## Am quite sure we need this.
def post_open_callback(self):
opts = self.settings()
dirs = opts.extra_customization
if not dirs:
dirs = self.EBOOK_DIR_MAIN
else:
dirs = [x.strip() for x in dirs.split(',')]
self.EBOOK_DIR_MAIN = dirs
def windows_sort_drives(self, drives):
return drives
def osx_sort_names(self, names):
return names
def linux_swap_drives(self, drives):
return drives
| 4,549 | Python | .py | 117 | 31.752137 | 87 | 0.590661 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,745 | driver.py | kovidgoyal_calibre/src/calibre/devices/nuut2/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Device driver for the Nuut2
'''
from calibre.devices.usbms.driver import USBMS
class NUUT2(USBMS):
name = 'Nuut2 Device Interface'
gui_name = 'NeoLux Nuut2'
description = _('Communicate with the Nuut2 e-book reader.')
author = _('Kovid Goyal')
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'pdf', 'txt']
DRM_FORMATS = ['epub']
VENDOR_ID = [0x140e]
PRODUCT_ID = [0xb055]
BCD = [0x318]
VENDOR_NAME = 'NEOLUX'
WINDOWS_MAIN_MEM = 'NUUT2'
OSX_MAIN_MEM = 'NEXTPPRS MASS STORAGE Media'
MAIN_MEMORY_VOLUME_LABEL = 'NUUT2 Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'NUUT2 Storage Card'
EBOOK_DIR_MAIN = 'books'
SUPPORTS_SUB_DIRS = True
| 941 | Python | .py | 26 | 32 | 67 | 0.630531 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,746 | t4b.py | kovidgoyal_calibre/src/calibre/devices/cybook/t4b.py | __license__ = 'GPL v3'
__copyright__ = '2013, Jellby <jellby at yahoo.com>'
'''
Write a t4b file to disk.
'''
from io import BytesIO
DEFAULT_T4B_DATA = b'\x74\x34\x62\x70\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xc4\x00\x16\xdf\xff\xff\xf7\x6d\xff\xff\xfd\x7a\xff\xff\xff\xe7\x77\x76\xff\xf6\x77\x77\x8d\xff\xff\xe7\x77\x78\xbf\xff\xff\xf7\x77\x77\x77\x7d\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe4\x03\x78\x61\x07\xff\xff\x90\x04\xff\xff\xfc\x05\xff\xff\xff\xd5\x30\x35\xff\xf0\x13\x32\x00\x5f\xff\xd0\x03\x32\x01\xbf\xff\xe0\x00\x00\x00\x0b\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x50\x8f\xff\xff\x75\xff\xff\x40\x30\xef\xff\xfc\x06\xff\xff\xff\xff\xa0\x9f\xff\xf0\x6f\xff\xf5\x0d\xff\xd0\x4f\xff\xd0\x0f\xff\xe0\x3f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfd\x05\xff\xff\xff\xfe\xff\xfe\x03\xa0\x8f\xff\xfc\x06\xff\xff\xff\xff\xa0\x9f\xff\xf0\x6f\xff\xfb\x0b\xff\xd0\x4f\xff\xf7\x0d\xff\xe0\x3f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf6\x0e\xff\xff\xff\xff\xff\xfa\x0b\xe0\x3f\xff\xfc\x06\xff\xff\xff\xff\xa0\x9f\xff\xf0\x6f\xff\xf5\x0e\xff\xd0\x4f\xff\xf8\x0e\xff\xe0\x3f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf1\x1f\xff\xff\xff\xff\xff\xf2\x0f\xf3\x0d\xff\xfc\x06\xff\xff\xff\xff\xa0\x9f\xff\xf0\x39\x88\x30\xaf\xff\xd0\x4f\xff\xf2\x1f\xff\xe0\x18\x88\x88\x8d\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf0\x3f\xff\xff\xff\xff\xff\xd0\x6f\xfc\x09\xff\xfc\x06\xff\xff\xff\xff\xa0\x9f\xff\xf0\x01\x11\x00\x2c\xff\xd0\x3a\xa8\x20\xcf\xff\xe0\x00\x00\x00\x0b\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf0\x2f\xff\xff\xff\xff\xff\x60\xaf\xff\x11\xff\xfc\x06\xff\xff\xff\xff\xa0\x9f\xff\xf0\x6f\xff\xfd\x10\xef\xd0\x00\x00\x1e\xff\xff\xe0\x3f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf2\x0f\xff\xff\xff\xff\xfe\x20\x12\x22\x00\xcf\xfc\x06\xff\xff\xff\xff\xa0\x9f\xff\xf0\x6f\xff\xff\x90\x9f\xd0\x3d\xd8\x09\xff\xff\xe0\x3f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf8\x0b\xff\xff\xff\xff\xfc\x03\x88\x88\x60\x4f\xfc\x06\xff\xff\xff\xff\xa0\x9f\xff\xf0\x6f\xff\xff\xa0\x8f\xd0\x4f\xff\x40\xcf\xff\xe0\x3f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\xef\xff\xff\xfb\xf7\x0d\xff\xff\xf1\x1e\xfc\x06\xff\xff\xff\xff\xa0\x9f\xff\xf0\x6f\xff\xff\x60\xcf\xd0\x4f\xff\xf3\x0d\xff\xe0\x3f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x80\x2c\xff\xfa\x05\xf0\x2f\xff\xff\xf6\x0b\xfc\x03\x88\x88\x88\xff\xb0\xaf\xff\xf0\x5d\xcc\xa3\x05\xff\xd0\x4f\xff\xfe\x11\xef\xe0\x18\x88\x88\x8d\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf8\x10\x01\x00\x3b\xb0\x9f\xff\xff\xfd\x06\xfc\x00\x00\x00\x00\xd0\x00\x00\xff\xf0\x00\x00\x02\x7f\xff\xe1\x5f\xff\xff\xc0\x5f\xe1\x00\x00\x00\x0b\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xed\xa9\xbd\xff\xed\xff\xff\xff\xff\xde\xff\xdd\xdd\xdd\xdd\xfd\xdd\xdd\xff\xfd\xdd\xdd\xee\xff\xff\xfd\xef\xff\xff\xfe\xdf\xfd\xdd\xdd\xdd\xdf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xed\xdb\x86\x8e\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xb7\x42\x00\x00\x0b\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfd\x00\x00\x00\x00\x09\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfd\x00\x00\x01\x11\x17\xef\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xee\xee\xee\xee\xee\xee\xee\xee\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\x22\x45\x78\x9b\x95\xef\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xb8\x77\x78\x88\x88\x88\x87\x87\x89\xdf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\x57\x9a\xaa\xaa\x94\xdf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfb\x00\x00\x11\x11\x22\x12\x11\x11\x10\x7e\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x76\xaa\xaa\xab\xa4\xcf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x12\x23\x33\x33\x33\x33\x32\x21\x6e\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xa5\xaa\xa9\x99\xa5\xaf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x33\x44\x44\x44\x33\x21\x6d\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xb4\xaa\xa9\xa9\xb6\x8f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x23\x34\x44\x54\x55\x44\x44\x31\x6d\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xc5\x9a\x99\x9a\xb8\x7e\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x23\x34\x44\x44\x55\x45\x44\x31\x6d\xff\xff\xff\xff\xff\xff\xff\xff\xee\xdd\xdd\xee\xee\xc5\x9a\x88\x8a\xa9\x6e\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x44\x44\x44\x44\x44\x31\x6d\xff\xff\xff\xff\xff\xff\xff\xfe\xdc\xbb\xab\xcc\xca\x84\x8a\xa9\x99\xaa\x5d\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x44\x44\x44\x44\x44\x31\x6d\xff\xff\xff\xff\xff\xff\xff\xed\xa9\x99\x78\x87\x78\x84\x7a\xaa\x89\x9a\x6c\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x23\x34\x44\x45\x55\x55\x44\x31\x6d\xff\xff\xff\xff\xff\xff\xff\xec\x98\x88\x76\x98\x88\x74\x7a\xaa\xa7\x9a\x6b\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x23\x33\x34\x44\x54\x44\x44\x31\x6d\xff\xff\xff\xff\xff\xff\xff\xdb\x98\x88\x76\x98\x88\x74\x5a\xaa\x89\x9b\x7a\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x44\x45\x55\x54\x43\x31\x6d\xff\xff\xff\xff\xff\xff\xfe\xdb\x98\x88\x76\x98\x88\x84\x4a\xaa\x97\xbb\x79\xef\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x34\x44\x44\x44\x44\x31\x5b\xcc\xcc\xcc\xcc\xcc\xcc\xcb\xba\x98\x88\x76\x88\x88\x85\x39\xa9\x8a\xab\x97\xef\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x33\x34\x44\x44\x43\x31\x34\x44\x44\x55\x55\x55\x55\x44\x46\x98\x88\x76\x78\x88\x85\x28\xa8\x9a\xab\xa5\xef\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf9\x00\x11\x11\x11\x12\x22\x22\x11\x10\x00\x01\x22\x23\x33\x33\x33\x31\x11\x88\x88\x76\x68\x88\x85\x27\xa9\xa9\xab\xa5\xdf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x12\x23\x33\x33\x44\x43\x33\x21\x00\x12\x33\x44\x55\x55\x65\x42\x11\x78\x88\x76\x68\x88\x85\x36\xaa\xaa\xab\xb5\xcf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x34\x44\x54\x44\x44\x21\x01\x23\x44\x55\x66\x66\x66\x53\x21\x78\x88\x86\x68\x88\x85\x45\xaa\xaa\xbb\xb6\xaf\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x34\x44\x44\x44\x33\x21\x01\x23\x45\x56\x67\x77\x77\x54\x21\x78\x88\x86\x68\x88\x85\x54\xaa\xab\xbb\xb7\x8f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x44\x44\x44\x44\x43\x21\x01\x23\x57\x8a\xaa\xaa\x99\x74\x21\x79\x88\x86\x68\x88\x75\x44\x9a\xab\xbb\xb9\x6e\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x34\x44\x44\x55\x43\x21\x01\x24\x56\x78\x89\x99\x88\x74\x21\x69\x88\x87\x68\x88\x75\x53\x9a\xab\xbb\xba\x5e\xff\xff\xff\xff\xff\xed\xa7\x9e\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x44\x34\x44\x54\x44\x21\x01\x23\x45\x56\x67\x77\x77\x64\x21\x69\x88\x87\x68\x88\x65\x53\x8a\xaa\xaa\xba\x5d\xff\xff\xff\xed\xb9\x75\x54\x5b\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x84\x66\x48\x54\x43\x21\x01\x23\x45\x56\x67\x77\x76\x64\x21\x6a\x88\x87\x68\x88\x66\x54\x7a\xaa\x9a\xbb\x6b\xff\xee\xb9\x66\x66\x78\x75\x69\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x64\x54\x47\x54\x43\x21\x01\x23\x45\x66\x77\x67\x67\x64\x21\x6a\x88\x87\x68\x88\x76\x54\x6a\xbb\xba\xbb\x79\xca\x75\x56\x77\x88\x88\x85\x68\xef\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x23\x33\xba\xba\xac\x44\x43\x21\x01\x23\x45\x56\x66\x66\x76\x64\x21\x6a\x88\x87\x67\x88\x66\x54\x5a\xbb\xa9\xbb\x83\x45\x67\x78\x64\x78\x88\x86\x66\xdf\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x87\x77\x79\x54\x43\x21\x01\x23\x45\x56\x66\x67\x77\x54\x21\x6a\x88\x87\x67\x88\x66\x54\x4a\xbb\xab\xbb\x93\x47\x88\x88\x66\x38\x88\x98\x66\xbf\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x44\x44\x44\x54\x44\x21\x01\x23\x45\x56\x66\x67\x77\x64\x21\x6a\x88\x88\x66\x98\x66\x55\x3a\xab\xba\xab\xa4\x47\x88\x87\x68\x43\x89\x99\x57\x8f\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x32\x34\x3a\x44\x43\x21\x01\x23\x45\x56\x66\x67\x66\x64\x31\x69\x88\x88\x65\x98\x66\x55\x39\xbb\xba\xbc\xa4\x47\x88\x87\x77\x85\x37\xaa\x78\x7e\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\xad\x76\xc5\x44\x33\x21\x01\x23\x44\x56\x66\x77\x76\x54\x21\x69\x98\x88\x65\x98\x66\x65\x38\xbb\xb9\xbb\xb5\x46\x77\x86\x68\x87\x78\x9a\x87\x7c\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x84\x7b\x44\x44\x33\x21\x01\x23\x44\x56\x67\x76\x76\x63\x21\x79\x98\x88\x76\x87\x66\x65\x46\xaa\x99\xab\xb5\x45\x88\x76\x67\x77\x78\x9a\x96\x8a\xff\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x63\x54\x46\x54\x43\x21\x01\x23\x44\x55\x66\x67\x76\x64\x21\x79\x98\x88\x76\x77\x66\x65\x44\xab\xbb\xaa\xb7\x35\x88\x87\x77\x89\x99\x9a\x96\x89\xef\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\xbb\xba\xbc\x44\x43\x21\x01\x23\x44\x56\x67\x66\x76\x63\x21\x79\x98\x88\x76\x68\x66\x65\x53\xab\xba\xaa\xb8\x35\x78\x88\x89\x99\x99\x9a\xa7\x88\xef\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x54\x44\x45\x44\x34\x21\x01\x23\x44\x56\x66\x86\x66\x54\x21\x79\x98\x88\x76\x68\x66\x65\x63\x9a\xba\xab\xb9\x35\x68\x88\x99\x99\x9a\x9a\xa8\x77\xcf\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x33\x43\x43\x44\x43\x21\x01\x23\x44\x55\x66\x76\x66\x54\x21\x7a\x98\x88\x76\x68\x66\x65\x63\x8a\xab\xa9\xba\x35\x58\x88\x99\x99\x99\x9a\xa9\x67\xaf\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x12\x23\x69\x2a\xd9\x34\x33\x21\x01\x23\x44\x55\x67\x67\x66\x53\x21\x7a\x98\x88\x76\x67\x66\x66\x54\x7a\xa9\x9a\xbb\x44\x48\x99\x99\x89\x99\xaa\xaa\x68\x8e\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x97\x98\x47\x44\x34\x21\x01\x23\x44\x56\x66\x76\x66\x53\x21\x7a\x98\x88\x76\x67\x66\x66\x55\x6a\xbb\x9a\xbb\x54\x57\x99\x98\x79\x9a\xaa\xaa\x87\x7d\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x53\x44\x36\x54\x44\x21\x00\x23\x44\x55\x66\x66\x66\x53\x21\x7a\x98\x87\x76\x67\x66\x66\x55\x5a\xaa\xaa\xbb\x73\x66\x99\x66\x59\x77\xaa\xaa\x97\x8b\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\xcc\xcc\xcd\x44\x33\x21\x01\x23\x34\x55\x66\x66\x66\x53\x21\x7b\x98\x88\x76\x66\x66\x66\x65\x4a\xaa\xab\xab\x83\x66\x88\x55\x48\x84\x5a\xaa\xa6\x89\xff\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x44\x34\x45\x44\x33\x21\x01\x23\x44\x55\x56\x66\x66\x53\x21\x7b\x98\x88\x76\x66\x66\x66\x56\x49\x9a\xaa\x9b\x94\x55\x88\x66\x78\x86\x98\xaa\xa7\x88\xef\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x33\x33\x44\x44\x33\x21\x00\x22\x34\x55\x66\x66\x66\x53\x21\x8b\xa7\x88\x86\x66\x66\x66\x56\x38\xa7\x99\xaa\x95\x56\x79\x99\x97\x76\x96\xaa\xa8\x88\xdf\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x33\x76\x77\x78\x44\x33\x21\x01\x22\x34\x45\x54\x66\x66\x53\x21\x8b\xa7\x78\x86\x56\x66\x66\x55\x48\xaa\xaa\xaa\xa5\x56\x69\x99\x99\x85\x79\x7a\xa9\x68\xbf\xff\xff\xff\xff\xff\xff\xfa\x11\x12\x33\xaa\xaa\xab\x34\x33\x21\x00\x23\x34\x45\x6b\x66\x66\x53\x21\x8b\xa8\x78\x76\x56\x76\x66\x65\x47\xaa\xa9\x9a\xa6\x66\x59\x99\x99\xa6\x6a\x6a\xaa\x68\x9f\xff\xff\xff\xff\xff\xff\xfa\x11\x12\x23\x43\x33\x33\x34\x33\x21\x00\x22\x34\x45\x68\x86\x66\x53\x21\x8c\xa8\x77\x76\x56\x76\x66\x66\x45\xaa\xa9\xab\xb6\x85\x58\x99\x99\x99\x6a\x69\xaa\x67\x7e\xff\xff\xff\xff\xff\xff\xfa\x11\x12\x23\x33\x34\x49\x75\x33\x21\x00\x22\x34\x45\x66\x66\x65\x53\x21\x8c\xa8\x77\x66\x55\x76\x66\x66\x54\xa9\x99\x9b\xa7\x76\x66\x99\x99\xa6\x69\x88\xaa\x87\x6c\xff\xff\xff\xff\xff\xff\xfa\x11\x12\x23\x33\x33\x37\x34\x33\x21\x00\x12\x34\x45\x59\x86\x66\x53\x21\x8c\xb9\x77\x66\x55\x76\x66\x65\x63\x9a\xaa\xba\xb8\x67\x66\x99\x99\x97\x68\x96\xaa\xa6\x7a\xff\xff\xff\xff\xff\xff\xfa\x11\x22\x23\x54\x44\x46\x44\x43\x21\x00\x22\x34\x58\x59\xa5\x65\x53\x21\x8c\xb9\x77\x76\x56\x76\x66\x65\x63\x8a\xaa\xaa\xb9\x58\x65\x89\x99\x99\x86\xa3\xaa\xa6\x88\xef\xff\xff\xff\xff\xff\xfa\x11\x12\x23\xab\xbb\xbc\x33\x33\x21\x00\x12\x34\x46\x66\x66\x55\x43\x21\x8c\xb9\x77\x76\x56\x76\x66\x65\x64\x7a\xaa\x9a\xba\x49\x66\x89\x99\x99\x95\xa4\x5a\xa7\x87\xdf\xff\xff\xff\xff\xff\xfa\x11\x12\x22\x42\x33\x35\x44\x33\x21\x00\x12\x34\x48\x86\x85\x55\x43\x21\x8c\xb9\x67\x77\x56\x67\x66\x66\x65\x6a\xaa\x9a\xba\x4a\x67\x79\x99\x86\x76\x86\x27\xa8\x67\xcf\xff\xff\xff\xff\xff\xfa\x11\x12\x23\x33\x33\x35\x33\x33\x21\x00\x12\x33\x54\x88\x56\x65\x43\x21\x8d\xba\x66\x77\x56\x57\x66\x66\x65\x5a\xab\xaa\xbb\x59\x76\x59\x99\x99\x97\x98\x24\xa9\x67\xaf\xff\xff\xff\xff\xff\xfa\x11\x12\x22\x21\x36\x9b\x33\x33\x21\x00\x12\x34\x45\x56\x76\x55\x43\x21\x8d\xca\x76\x77\x66\x57\x66\x65\x65\x5a\xaa\x9a\xbb\x78\x96\x58\x99\x99\x88\x9a\x44\xa9\x67\x8e\xff\xff\xff\xff\xff\xfa\x11\x12\x23\x6a\xb7\x44\x43\x33\x21\x00\x12\x34\x44\x55\x85\x55\x43\x21\x8d\xca\x77\x67\x66\x67\x66\x66\x66\x49\xaa\xaa\xab\x87\xc5\x57\x98\x98\x87\x68\x85\x99\x76\x6d\xff\xff\xff\xff\xff\xfa\x11\x12\x23\x96\x33\x43\x33\x33\x21\x00\x12\x33\x44\x65\x55\x55\x43\x21\x8d\xca\x87\x76\x66\x67\x66\x66\x56\x59\xa9\x99\xab\x96\xc6\x65\x98\x89\x99\x68\x98\x99\x86\x6b\xff\xff\xff\xff\xff\xfa\x11\x12\x22\x44\x75\x23\x33\x33\x21\x00\x12\x33\x44\x6a\x65\x55\x43\x21\x8d\xca\x87\x76\x66\x67\x66\x66\x66\x58\xa8\x9a\x9b\xa5\xc9\x65\x88\x89\x99\x77\x98\x99\x95\x68\xff\xff\xff\xff\xff\xfa\x11\x12\x22\x22\x24\x79\x33\x33\x21\x00\x12\x34\x44\x56\x75\x55\x43\x21\x8d\xda\x87\x77\x66\x67\x66\x66\x66\x47\xa9\x9a\xbb\xa6\xcb\x65\x78\x88\x88\x85\x68\x99\x96\x66\xef\xff\xff\xff\xff\xfa\x11\x12\x23\x11\x33\x34\x33\x32\x21\x00\x12\x33\x44\x57\x65\x55\x43\x21\x8d\xda\x87\x77\x65\x76\x76\x66\x67\x56\xaa\xbb\xaa\xa6\xbc\x66\x68\x88\x88\x85\x88\x89\x97\x66\xcf\xff\xff\xff\xff\xfa\x11\x12\x23\x95\x23\x36\x33\x33\x21\x00\x12\x33\x44\x58\x65\x55\x43\x21\x8d\xda\x96\x77\x65\x77\x66\x66\x57\x55\xaa\xaa\xaa\xb6\xad\x66\x58\x88\x88\x85\x78\x88\x98\x66\xaf\xff\xff\xff\xff\xfa\x11\x12\x22\x52\x33\x35\x33\x33\x21\x00\x12\x33\x44\x55\x65\x55\x42\x21\x8e\xdb\x97\x67\x75\x77\x66\x66\x56\x64\xaa\xbb\xaa\xa7\x8e\x85\x58\x87\x88\x87\x88\x88\x88\x56\x8f\xff\xff\xff\xff\xfa\x11\x12\x22\x42\x23\x35\x43\x32\x21\x00\x12\x33\x44\x48\x85\x55\x43\x21\x8e\xdb\x97\x67\x75\x77\x66\x66\x66\x73\x9b\xaa\xaa\xb8\x6e\xb4\x57\x87\x87\x77\x78\x88\x88\x56\x7e\xff\xff\xff\xff\xfa\x11\x12\x22\x42\x33\x28\x33\x32\x21\x00\x12\x33\x44\x55\x55\x55\x43\x21\x8e\xdb\x97\x76\x76\x67\x76\x66\x66\x74\x8a\xba\xaa\xb9\x5d\xd5\x55\x77\x77\x77\x47\x88\x88\x65\x5d\xff\xff\xff\xff\xfa\x10\x12\x22\x63\x11\x78\x33\x32\x20\x00\x12\x33\x34\x47\x84\x55\x42\x21\x8e\xec\x97\x77\x66\x67\x87\x76\x66\x74\x7a\xaa\xaa\xba\x4d\xe7\x54\x77\x77\x77\x56\x87\x88\x74\x5a\xff\xff\xff\xff\xfa\x11\x12\x22\x28\x9a\x83\x33\x32\x20\x00\x12\x33\x44\x55\x55\x55\x42\x21\x9e\xec\x97\x77\x66\x67\x76\x66\x66\x76\x6a\xaa\x99\xba\x4b\xfa\x54\x67\x77\x77\x65\x77\x77\x84\x57\xef\xff\xff\xff\xfa\x11\x12\x22\x22\x33\x33\x33\x22\x10\x00\x12\x33\x33\x56\x45\x54\x42\x11\x9e\xec\x97\x77\x75\x67\x76\x66\x65\x67\x5a\xaa\x99\xba\x59\xfc\x54\x57\x77\x76\x65\x77\x77\x75\x55\xdf\xff\xff\xff\xfa\x11\x12\x22\x22\x33\x33\x33\x22\x20\x00\x12\x33\x35\x45\x65\x54\x42\x21\x9e\xec\xa7\x77\x76\x67\x86\x66\x65\x68\x4a\xa9\x8a\xbb\x77\xfd\x65\x56\x76\x76\x64\x67\x77\x76\x55\xbf\xff\xff\xff\xfa\x11\x12\x22\x22\x32\x33\x23\x22\x20\x00\x12\x23\x45\x55\x54\x44\x42\x11\x9e\xec\xa7\x67\x76\x68\x77\x66\x66\x68\x4a\xb9\x88\xaa\x96\xef\x75\x46\x66\x66\x65\x45\x77\x77\x45\x9f\xff\xff\xff\xfa\x10\x12\x22\x22\x22\x33\x32\x22\x20\x00\x12\x23\x33\x44\x54\x54\x42\x21\x8e\xed\xa8\x76\x76\x58\x77\x66\x66\x67\x59\xba\xa8\xa9\xa5\xef\x94\x46\x66\x66\x66\x46\x67\x77\x45\x7e\xff\xff\xff\xfa\x11\x12\x22\x22\x33\x33\x23\x32\x21\x00\x12\x23\x34\x54\x44\x44\x42\x11\x8e\xfd\xb8\x76\x67\x67\x68\x77\x66\x67\x68\xaa\xa9\x7b\xa5\xcf\xc3\x45\x66\x66\x66\x67\x66\x67\x55\x5d\xff\xff\xff\xf9\x00\x00\x11\x11\x11\x11\x11\x11\x10\x00\x12\x23\x34\x34\x44\x44\x42\x11\x8e\xfd\xb8\x77\x66\x67\x77\x66\x66\x67\x77\xaa\xa8\xa9\xb6\xbf\xe5\x44\x66\x66\x66\x66\x66\x67\x54\x4b\xff\xff\xff\xf9\x00\x01\x11\x11\x11\x11\x11\x11\x10\x00\x12\x23\x33\x44\x44\x44\x32\x11\x8e\xfd\xb8\x77\x76\x67\x77\x66\x66\x57\x76\xba\xa8\x7a\xb6\xaf\xf8\x43\x66\x66\x66\x66\x66\x66\x63\x48\xff\xff\xff\xfa\x10\x11\x22\x22\x22\x22\x32\x22\x10\x00\x12\x23\x34\x44\x44\x44\x32\x11\x8e\xfd\xb9\x77\x76\x57\x87\x66\x66\x66\x85\xbb\xa8\x8b\xa7\x9f\xfb\x43\x56\x66\x66\x66\x66\x66\x64\x45\xef\xff\xff\xfa\x10\x12\x22\x22\x22\x32\x22\x22\x10\x00\x12\x23\x33\x44\x44\x44\x32\x11\x8e\xfe\xb9\x67\x77\x57\x87\x76\x66\x66\x84\xab\x89\xaa\xb8\x8e\xfd\x54\x46\x66\x55\x55\x55\x66\x65\x43\xdf\xff\xff\xfa\x10\x12\x22\x22\x22\x32\x22\x22\x20\x00\x12\x23\x33\x44\x44\x44\x32\x11\x8e\xfe\xca\x66\x77\x57\x87\x77\x66\x67\x84\xab\x9a\xa9\xb9\x6e\xfe\x64\x35\x66\x55\x66\x65\x56\x66\x34\x9f\xff\xff\xfa\x10\x11\x12\x22\x22\x33\x22\x22\x10\x00\x11\x23\x34\x44\x55\x44\x32\x11\x8e\xfe\xca\x76\x77\x66\x87\x76\x66\x57\x84\x9b\x9a\x9a\xaa\x5d\xff\x84\x35\x66\x56\x66\x55\x45\x66\x44\x7f\xff\xff\xfa\x00\x11\x22\x22\x22\x22\x32\x22\x10\x00\x12\x34\x67\x78\x88\x76\x53\x11\x8e\xfe\xca\x76\x67\x66\x88\x66\x66\x56\x85\x7b\xaa\xba\xaa\x4c\xff\xb4\x45\x66\x55\x55\x45\x45\x66\x44\x5d\xff\xff\xfa\x10\x11\x22\x22\x33\x33\x23\x32\x10\x00\x12\x23\x34\x44\x44\x44\x42\x11\x8e\xfe\xca\x77\x66\x66\x88\x66\x66\x66\x87\x6b\xba\xab\xbb\x4b\xff\xd4\x44\x65\x54\x44\x44\x45\x55\x43\x4c\xff\xff\xfa\x00\x11\x22\x22\x22\x22\x22\x22\x10\x00\x11\x22\x33\x34\x44\x44\x32\x11\x8d\xed\xb9\x87\x76\x66\x78\x76\x66\x66\x78\x4b\xba\x98\x65\x18\xff\xe6\x33\x55\x54\x44\x55\x55\x54\x42\x5d\xff\xff\xea\x00\x11\x11\x22\x22\x22\x22\x21\x10\x00\x11\x12\x22\x22\x33\x32\x21\x10\x6a\xba\x98\x87\x77\x66\x66\x77\x76\x66\x77\x23\x11\x11\x11\x05\xcd\xd8\x33\x55\x55\x55\x54\x44\x34\x7b\xdf\xee\xee\xd8\x00\x01\x11\x11\x11\x11\x11\x11\x00\x00\x00\x11\x11\x12\x22\x12\x11\x00\x59\x99\x87\x87\x77\x66\x66\x76\x76\x66\x77\x10\x11\x11\x11\x02\xaa\xa9\x33\x45\x55\x44\x44\x58\xbd\xee\xff\xdd\xcc\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x11\x11\x11\x10\x00\x69\x99\x87\x97\x77\x76\x76\x76\x65\x55\x67\x20\x10\x00\x00\x05\x99\x98\x43\x24\x33\x46\x89\xbc\xcd\xde\xee\xdc\xcc\xba\x42\x22\x22\x22\x22\x22\x22\x22\x22\x42\x11\x22\x22\x22\x22\x22\x21\x24\x79\x99\x88\x97\x66\x77\x77\x86\x67\x77\x88\x50\x00\x23\x46\x88\x99\x99\x63\x25\x78\x9a\xab\xbc\xcd\xde\xee\xee\xed\xdc\xbb\xaa\xa9\x99\x99\x99\x88\x88\x88\x88\x77\x77\x77\x77\x77\x77\x77\x78\x99\x99\x98\x88\x88\x88\x88\x88\x88\x88\x99\x87\x78\x99\x99\xaa\xaa\xaa\xa9\xaa\xbb\xcc\xcd\xdd\xee\xef\xff\xff\xff\xff\xee\xee\xed\xdd\xdd\xdc\xcc\xcc\xcc\xbb\xbb\xbb\xbb\xbb\xbb\xba\xaa\xaa\xab\xba\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb\xcc\xcc\xcc\xcd\xdd\xdd\xde\xee\xee\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xee\xee\xee\xee\xee\xee\xee\xee\xee\xee\xee\xed\xdd\xdd\xdd\xee\xee\xee\xee\xee\xee\xee\xee\xee\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff' # noqa
def reduce_color(c):
return max(0, min(255, c))//16
def write_t4b(t4bfile, coverdata=None):
'''
t4bfile is a file handle ready to write binary data to disk.
coverdata is a string representation of a JPEG file.
'''
from PIL import Image
if coverdata is not None:
coverdata = BytesIO(coverdata)
cover = Image.open(coverdata).convert("L")
cover.thumbnail((96, 144), Image.Resampling.LANCZOS)
t4bcover = Image.new('L', (96, 144), 'white')
x, y = cover.size
t4bcover.paste(cover, ((96-x)//2, (144-y)//2))
pxs = t4bcover.getdata()
t4bfile.write(b't4bp')
data = (16 * reduce_color(pxs[i]) + reduce_color(pxs[i+1])
for i in range(0, len(pxs), 2))
t4bfile.write(bytes(bytearray(data)))
else:
t4bfile.write(DEFAULT_T4B_DATA)
| 28,699 | Python | .py | 29 | 983.310345 | 27,694 | 0.745203 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,747 | t2b.py | kovidgoyal_calibre/src/calibre/devices/cybook/t2b.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
'''
Write a t2b file to disk.
'''
import io
from polyglot.builtins import int_to_byte
DEFAULT_T2B_DATA = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x0f\xff\xff\xff\xf0\xff\x0f\xc3\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf8\x00\x00\xff\xff\xff\xf0\xff\x0f\xc3\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xe0\xff\xf0\xff\xff\xff\xf0\xff\xff\xc3\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xc3\xff\xff\xff\xff\xff\xf0\xff\xff\xc3\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x07\xff\xff\xfc\x00?\xf0\xff\x0f\xc3\x00?\xf0\xc0\xfe\x00?\xff\xff\xff\xff\xff\xff\xff\x0f\xff\xff\xf0<\x0f\xf0\xff\x0f\xc0,\x0f\xf0\x0e\xf0,\x0f\xff\xff\xff\xff\xff\xff\xff\x0f\xff\xff\xff\xff\xc3\xf0\xff\x0f\xc0\xff\x0f\xf0\xff\xf0\xff\xc7\xff\xff\xff\xff\xff\xff\xff\x0f\xff\xff\xff\xff\xc3\xf0\xff\x0f\xc3\xff\xc3\xf0\xff\xc3\xff\xc3\xff\xff\xff\xff\xff\xff\xff\x0f\xff\xff\xff\x00\x03\xf0\xff\x0f\xc3\xff\xc3\xf0\xff\xc3\xff\xc3\xff\xff\xff\xff\xff\xff\xff\x0f\xff\xff\xf0\x1f\xc3\xf0\xff\x0f\xc3\xff\xc3\xf0\xff\xc0\x00\x03\xff\xff\xff\xff\xff\xff\xff\x0b\xff\xff\xf0\xff\xc3\xf0\xff\x0f\xc3\xff\xc3\xf0\xff\xc3\xff\xff\xff\xff\xff\xff\xff\xff\xff\xc3\xff\xff\xf3\xff\xc3\xf0\xff\x0f\xc3\xff\xc3\xf0\xff\xc3\xff\xff\xff\xff\xff\xff\xff\xff\xff\xc0\xff\xfc\xf0\xff\x03\xf0\xff\x0f\xc0\xff\x0f\xf0\xff\xf0\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf0\x0f\x00\xf08\x03\xf0\xff\x0f\xc0,\x0f\xf0\xff\xf0\x1f\x03\xff\xff\xff\xff\xff\xff\xff\xff\x00\x0f\xfc\x00\xc3\xf0\xff\x0f\xc3\x00?\xf0\xff\xff\x00\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf0\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xf0\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x03\xfe\x94\xff\xff\xff\xff\xff\xff\xff\xff\xff\xc0\x00\x00\x00\x0f\xff\xff\xff\xff\xff\xff\xfc\x7f\xfe\x94\xff\xff\xff\xff\xff\xff\xff\xff\xfc\x0f\xff\xfe\xa9@\xff\xff\xff\xff\xff\xff\xfc?\xfe\xa4\xff\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xe9P\xff\xff\xff\xff\xff\xff\xfe/\xfe\xa8\xff\xff\xff\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xf9T\xff\xff\xff\xff\xf0@\x00+\xfa\xa8?\xff\xff\xff\xff\xff\xff\xff\xfc\xbf\xff\xff\xf9T\xff\xff\xff\xff\xcb\xe4}*\xaa\xaa?\xff\xff\xff\xff\xff\xff\xff\xfc\xbf\xff\xff\xe9T\xff\xff\xff\xff\xc7\xe4\xfd\x1a\xaa\xaa?\xff\xff\xff\xff\xff\xff\xff\xfc\xaf\xea\xaa\xa6\xa4\xff@\x00\x0f\xc3\xe8\xfe\x1a\xaa\xaa?\xff\xff\xff\xff\xff\xff\xff\xfcj\x95UZ\xa4\x00\x7f\xfe\x90\x03\xe8\xfe\n\xaa\xaa?\xff\xff\xff\xff\xff\xff\xff\xfcj\x95UZ\xa4?\xff\xff\xa5C\xe8\xfe\x06\xaa\xaa?\xff\xff\xff\xff\xff\xff\xff\xfcj\x95UZ\xa4?\xff\xff\xeaC\xe8\xbe\x06\xaa\xaa\x0f\xff\xff\xff\xff\xff\xff\xff\xfcj\x95UZ\xa4/\xff\xff\xea\x82\xe8j\x06\xaa\xaa\x0f\xff\xff\xff\xff\xff\xff\xff\xfcj\x95UZ\xa4/\xff\xff\xaa\x82\xe8*F\xaa\xaa\x8f\xff\xff\xff\xff\xff\xff\xff\xfcj\x95UZ\xa4+\xff\xfe\xaa\x82\xe8*\x86\xaa\xaa\x8f\xff\xff\x80\xff\xff\xff\xff\xfcj\x95UV\xa4\x1a\xfa\xaa\xaa\x82\xe8*\x86\xaa\xaa\x8f\xf0\x00T?\xff\xff\xff\xfcj\x95UV\xa4\x1a\xfa\xaa\xaa\x82\xe8*\x81\xaa\xaa\x8c\x03\xff\x95?\xff\xff\xff\xfcj\x95UV\xa4\x1a\xfa\xaa\xaa\x82\xe8*\x81\xaa\xaa\x80\xbf\xff\x95?\xff\xff\xff\xfcj\x95UV\xa4\x1a\xfa\xaa\xaa\x82\xe8*\x81\xaa\xaa\x9b\xff\xff\x95\x0f\xff\xff\xff\xfcj\x95UV\xa4\x1a\xfa\xaa\xaa\x82\xe8\x1a\x81\xaa\xaa\x9a\xff\xfe\x95\x0f\xff\xff\xff\xfcj\x95UV\xa4\x1a\xfa\xaa\xaa\x82\xe8\n\x81\xaa\xaa\xa6\xbf\xfeUO\xff\xff\xff\xfcj\x95UV\xa4\x1a\xfa\xaa\xaa\x82\xa8\n\x91j\xaa\xa5\xaa\xa9ZO\xff\xff\xff\xfcj\x95UV\xa4\x1a\xfa\xaa\xaa\x82\xa8\n\xa0j\xaa\xa5Z\x95ZO\xff\xff\xff\xfcj\x95UV\xa4*\xfa\xaa\xaa\x82\xa9\n\xa0j\xaa\xa5UUZC\xff\xff\xff\xfcj\x95UV\xa4*\xfa\xaa\xaa\x82\xaa\n\xa0j\xaa\xa4UUZS\xff\xff\xff\xfcZ\x95UV\xa4*\xfa\xaa\xaa\x82\xaa\n\xa0j\xaa\xa4UUZS\xff\xff\xff\xfcZ\x95UU\xa4*\xfa\xaa\xaa\x82\xaa\n\xa0j\xaa\xa8UUVS\xff\xff\xff\xfcZ\x95UU\xa4*\xea\xaa\xaa\x82\xaa\x06\xa0Z\xaa\xa8UUV\x93\xff\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x81\xaa\x02\xa0\x1a\xaa\xa8UUV\x90\xff\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x80\xaa\x02\xa0\x1a\xaa\xa8\x15UU\x94\xff\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x80\xaa"\xa0\x1a\xaa\xa8\x15UU\x94\xff\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x80\xaa2\xa4\x16\xaa\xa8\x15UU\x94\xff\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x80\xaa2\xa8\x16\xa6\xa9\x15UU\x94\xff\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x80\xaa2\xa8\x16\xa6\xa9\x05UUT?\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x84\xaa2\xa8\x16\xaa\xaa\x05UUU?\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x88\xaa2\xa8\x06\xaa\xaa\x05UUU?\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa1\xa8\xc5\xaa\xaa\x05UUU?\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa0\xa8E\xa9\xaa\x05UUU/\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa<\xa8\x05\xa9\xaaAUUU\x0f\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa<\xa8\x05\xa9\xaaAUUUO\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa<\xa9\x05\xaa\xaaAUUUO\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa\x1c\xaa\x01\xaa\xaa\x81UUUO\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa\x0c\xaa\x01\xaa\xaa\x81UUUO\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa\x0c\xaa1j\xaa\x80UUUC\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa\x0cj1jj\x90UUUS\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa\x0c*1jj\x90UUUS\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaaL*1jj\xa0UUUS\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa\x8f* j\xaa\xa0\x15UUS\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa\x8f*@j\xaa\xa0\x15UUP\xff\xff\xfcZ\x95UU\xa4*\xaa\xaa\xaa\x8c\xaa\x8f*\x8cZ\xaa\xa1\x15UUT\xff\xff\xfcZ\x95UU\xa4j\xaa\xaa\xaa\x8c\xaa\x8f*\x8cZ\x9a\xa0\x15UUT\xff\xff\xfcZ\x95UU\xa4j\xaa\xaa\xaa\x8c\xaa\x8f*\x8cZ\x9a\xa0\x15UUT\xff\xff\xfcZ\x95UU\xa4j\xaa\xaa\xaa\x8c\xaa\x8f\x1a\x8cZ\x9a\xa4\x15UUT?\xff\xfcZ\x95UU\x94j\xaa\xaa\xaa\x8cj\x8f\n\x8cVj\xa4\x05UU\xa4?\xff\xfcVUUU\xa4j\xaa\xaa\xaa\x8cj\x8fJ\x8c\x16\xaa\xa8\xc5UZ\xa5?\xff\xfcUUUV\xa4j\xaa\xaa\xaa\x8cj\x8f\xca\x8f\x16\xaa\xa8\xc5V\xaa\xa5?\xff\xfcUj\xaa\xaa\xa4j\xaa\xaa\xaa\x8cj\x8f\xca\x8f\x1a\xaa\xa8\x05Z\xaaU?\xff\xfcV\xaa\xaa\xaa\xa5j\xaa\xaa\xaa\x8e*\x8f\xca\x83\x1a\xaa\xa4\x01eUU?\xff\xfcZ\xaa\xaa\xaa\xa5j\xaa\xaa\xaa\x8f*\x8f\xca\x83\x1a\xa5U\x01U\x00\x00\x0f\xff\xfcUUUUUZ\xaa\xaa\xaaO%\x8f\xc6\x93\x15\x00\x001@\x0f\xff\xff\xff\xfcP\x00\x00\x00\x15\x00\x00\x00\x00\x0f\x00\x07\xc0\x03\x00\xff\xff0\x1f\xff\xff\xff\xff\xfc\x00\xff\xff\xf8\x00?\xff\xff\xff\x0f?\xc7\xc3\xf7\x0f\xff\xff\xf1\xff\xff\xff\xff\xff\xfc\xff\xff\xff\xff\xf4\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff' # noqa
def reduce_color(c):
if c <= 64:
return 0
elif c > 64 and c <= 128:
return 1
elif c > 128 and c <= 192:
return 2
else:
return 3
def i2b(n):
return "".join([str((n >> y) & 1) for y in range(1, -1, -1)])
def write_t2b(t2bfile, coverdata=None):
'''
t2bfile is a file handle ready to write binary data to disk.
coverdata is a string representation of a JPEG file.
'''
from PIL import Image
if coverdata is not None:
coverdata = io.BytesIO(coverdata)
cover = Image.open(coverdata).convert("L")
cover.thumbnail((96, 144), Image.Resampling.LANCZOS)
t2bcover = Image.new('L', (96, 144), 'white')
x, y = cover.size
t2bcover.paste(cover, ((96-x)//2, (144-y)//2))
px = []
pxs = t2bcover.getdata()
for i in range(len(pxs)):
px.append(pxs[i])
if len(px) >= 4:
binstr = i2b(reduce_color(px[0])) + i2b(reduce_color(px[1])) + i2b(reduce_color(px[2])) + i2b(reduce_color(px[3]))
t2bfile.write(int_to_byte(int(binstr, 2)))
px = []
else:
t2bfile.write(DEFAULT_T2B_DATA)
| 13,916 | Python | .py | 42 | 324.547619 | 12,546 | 0.73368 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,748 | driver.py | kovidgoyal_calibre/src/calibre/devices/cybook/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Bookeen's Cybook Gen 3 and Opus and Orizon
'''
import os
import re
import calibre.devices.cybook.t2b as t2b
import calibre.devices.cybook.t4b as t4b
from calibre import fsync
from calibre.constants import isunix
from calibre.devices.usbms.driver import USBMS
class CYBOOK(USBMS):
name = 'Cybook Gen 3 / Opus Device Interface'
gui_name = 'Cybook Gen 3/Opus'
description = _('Communicate with the Cybook Gen 3/Opus e-book reader.')
author = 'John Schember'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
# Be sure these have an entry in calibre.devices.mime
FORMATS = ['epub', 'mobi', 'prc', 'html', 'pdf', 'rtf', 'txt']
VENDOR_ID = [0x0bda, 0x3034]
PRODUCT_ID = [0x0703, 0x1795]
BCD = [0x110, 0x132]
VENDOR_NAME = 'BOOKEEN'
WINDOWS_MAIN_MEM = re.compile(r'CYBOOK_(OPUS|GEN3)__-FD')
WINDOWS_CARD_A_MEM = re.compile('CYBOOK_(OPUS|GEN3)__-SD')
OSX_MAIN_MEM_VOL_PAT = re.compile(r'/Cybook')
EBOOK_DIR_MAIN = 'eBooks'
EBOOK_DIR_CARD_A = 'eBooks'
THUMBNAIL_HEIGHT = 144
DELETE_EXTS = ['.mbp', '.dat', '.bin', '_6090.t2b', '.thn']
SUPPORTS_SUB_DIRS = True
def upload_cover(self, path, filename, metadata, filepath):
coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]:
coverdata = coverdata[2]
else:
coverdata = None
with open('%s_6090.t2b' % os.path.join(path, filename), 'wb') as t2bfile:
t2b.write_t2b(t2bfile, coverdata)
fsync(t2bfile)
@classmethod
def can_handle(cls, device_info, debug=False):
if isunix:
return device_info[3] == 'Bookeen' and (device_info[4] == 'Cybook Gen3' or device_info[4] == 'Cybook Opus')
return True
class ORIZON(CYBOOK):
name = 'Cybook Orizon Device Interface'
gui_name = 'Orizon'
description = _('Communicate with the Cybook Orizon e-book reader.')
BCD = [0x319]
FORMATS = ['epub', 'html', 'pdf', 'rtf', 'txt']
VENDOR_NAME = ['BOOKEEN', 'LINUX']
WINDOWS_MAIN_MEM = re.compile(r'(CYBOOK_ORIZON__-FD)|(FILE-STOR_GADGET)')
WINDOWS_CARD_A_MEM = re.compile('(CYBOOK_ORIZON__-SD)|(FILE-STOR_GADGET)')
EBOOK_DIR_MAIN = EBOOK_DIR_CARD_A = 'Digital Editions'
EXTRA_CUSTOMIZATION_MESSAGE = [
_('Card A folder') + ':::<p>' + _(
'Enter the folder where the books are to be stored when sent to the '
'memory card. This folder is prepended to any send to device template') + '</p>',
]
EXTRA_CUSTOMIZATION_DEFAULT = [EBOOK_DIR_CARD_A]
def upload_cover(self, path, filename, metadata, filepath):
coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]:
coverdata = coverdata[2]
else:
coverdata = None
with open('%s.thn' % filepath, 'wb') as thnfile:
t4b.write_t4b(thnfile, coverdata)
fsync(thnfile)
def post_open_callback(self):
opts = self.settings()
folder = opts.extra_customization[0]
if not folder:
folder = ''
self.EBOOK_DIR_CARD_A = folder
@classmethod
def can_handle(cls, device_info, debug=False):
if isunix:
return device_info[3] == 'Bookeen' and device_info[4] == 'Cybook Orizon'
return True
def get_carda_ebook_dir(self, for_upload=False):
if not for_upload:
return ''
return self.EBOOK_DIR_CARD_A
class MUSE(CYBOOK):
name = 'Cybook Muse Device Interface'
gui_name = 'Muse'
description = _('Communicate with the Cybook Muse e-book reader.')
author = 'Kovid Goyal'
FORMATS = ['epub', 'html', 'fb2', 'txt', 'pdf', 'djvu']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x0230]
VENDOR_NAME = 'USB_2.0'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'USB_FLASH_DRIVER'
EBOOK_DIR_MAIN = 'Books'
SCAN_FROM_ROOT = True
@classmethod
def can_handle(cls, device_info, debug=False):
if isunix:
return device_info[3] == 'Bookeen' and device_info[4] in ('Cybook', 'Lev', 'Nolimbook', 'Letto', 'Nolim', 'Saga', 'NolimbookXL')
return True
class DIVA(CYBOOK):
name = 'Bookeen Diva HD Device Interface'
gui_name = 'Diva HD'
description = _('Communicate with the Bookeen Diva HD e-book reader.')
author = 'Kovid Goyal'
VENDOR_ID = [0x1d6b]
PRODUCT_ID = [0x0104]
BCD = [0x100]
FORMATS = ['epub', 'html', 'fb2', 'txt', 'pdf']
EBOOK_DIR_MAIN = 'Books'
SCAN_FROM_ROOT = True
@classmethod
def can_handle(cls, device_info, debug=False):
return True
| 4,984 | Python | .py | 120 | 35.05 | 140 | 0.612308 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,749 | __init__.py | kovidgoyal_calibre/src/calibre/devices/blackberry/__init__.py | __license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 119 | Python | .py | 3 | 38.666667 | 58 | 0.681034 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,750 | driver.py | kovidgoyal_calibre/src/calibre/devices/blackberry/driver.py | __license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.devices.usbms.driver import USBMS
class BLACKBERRY(USBMS):
name = 'Blackberry Device Interface'
gui_name = 'Blackberry'
description = _('Communicate with the Blackberry smart phone.')
author = _('Kovid Goyal')
supported_platforms = ['windows', 'linux', 'osx']
# Ordered list of supported formats
FORMATS = ['mobi', 'prc']
VENDOR_ID = [0x0fca]
PRODUCT_ID = [0x8004, 0x0004]
BCD = [0x0200, 0x0107, 0x0210, 0x0201, 0x0211, 0x0220, 0x232]
VENDOR_NAME = 'RIM'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['BLACKBERRY_SD', 'BLACKBERRY']
MAIN_MEMORY_VOLUME_LABEL = 'Blackberry SD Card'
EBOOK_DIR_MAIN = 'eBooks'
SUPPORTS_SUB_DIRS = True
class PLAYBOOK(USBMS):
name = 'Blackberry Playbook Interface'
gui_name = 'Playbook'
description = _('Communicate with the Blackberry Playbook.')
author = _('Kovid Goyal')
supported_platforms = ['windows', 'linux', 'osx']
# Ordered list of supported formats
FORMATS = ['epub']
VENDOR_ID = [0x0fca]
PRODUCT_ID = [0x8010]
BCD = [0x1]
VENDOR_NAME = 'GENERIC-'
WINDOWS_MAIN_MEM = 'MULTI-CARD'
MAIN_MEMORY_VOLUME_LABEL = 'Blackberry'
EBOOK_DIR_MAIN = 'media/books'
SUPPORTS_SUB_DIRS = True
| 1,465 | Python | .py | 36 | 35.888889 | 75 | 0.630312 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,751 | __init__.py | kovidgoyal_calibre/src/calibre/devices/eslick/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,752 | driver.py | kovidgoyal_calibre/src/calibre/devices/eslick/driver.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.devices.usbms.driver import USBMS
class ESLICK(USBMS):
name = 'ESlick Device Interface'
gui_name = 'Foxit ESlick'
description = _('Communicate with the ESlick e-book reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'pdb', 'pdf', 'txt']
VENDOR_ID = [0x04cc]
PRODUCT_ID = [0x1a64]
BCD = [0x0110]
VENDOR_NAME = 'FOXIT'
WINDOWS_MAIN_MEM = 'ESLICK_USB_DEVIC'
WINDOWS_CARD_A_MEM = 'ESLICK_USB_DEVIC'
# OSX_MAIN_MEM = 'Kindle Internal Storage Media'
# OSX_CARD_A_MEM = 'Kindle Card Storage Media'
MAIN_MEMORY_VOLUME_LABEL = 'ESlick Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'ESlick Storage Card'
SUPPORTS_SUB_DIRS = True
@classmethod
def can_handle(cls, dev, debug=False):
return (dev[3], dev[4]) != ('philips', 'Philips d')
class EBK52(ESLICK):
name = 'EBK-52 Device Interface'
gui_name = 'Sigmatek EBK'
description = _('Communicate with the Sigmatek e-book reader.')
FORMATS = ['epub', 'fb2', 'pdf', 'txt']
VENDOR_NAME = ''
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EBOOK_READER'
MAIN_MEMORY_VOLUME_LABEL = 'Sigmatek Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Sigmatek Storage Card'
@classmethod
def can_handle(cls, dev, debug=False):
return (dev[3], dev[4]) == ('philips', 'Philips d')
| 1,649 | Python | .py | 39 | 37.25641 | 70 | 0.631844 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,753 | driver.py | kovidgoyal_calibre/src/calibre/devices/irexdr/driver.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for IRex Digiatal Reader
'''
from calibre.devices.usbms.driver import USBMS
class IREXDR1000(USBMS):
name = 'IRex Digital Reader 1000 Device Interface'
description = _('Communicate with the IRex Digital Reader 1000 e-book '
'reader.')
author = 'John Schember'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
# Be sure these have an entry in calibre.devices.mime
FORMATS = ['epub', 'mobi', 'prc', 'html', 'pdf', 'djvu', 'txt']
VENDOR_ID = [0x1e6b]
PRODUCT_ID = [0x001]
BCD = [0x322]
VENDOR_NAME = 'IREX'
WINDOWS_MAIN_MEM = 'DR1000'
OSX_MAIN_MEM = 'iRex DR1000 Media'
MAIN_MEMORY_VOLUME_LABEL = 'IRex Digital Reader 1000 Main Memory'
EBOOK_DIR_MAIN = ''
DELETE_EXTS = ['.mbp']
SUPPORTS_SUB_DIRS = True
class IREXDR800(IREXDR1000):
name = 'IRex Digital Reader 800 Device Interface'
description = _('Communicate with the IRex Digital Reader 800')
PRODUCT_ID = [0x002]
WINDOWS_MAIN_MEM = 'DR800'
FORMATS = ['epub', 'pdb', 'html', 'pdf', 'txt']
EBOOK_DIR_MAIN = ''
DELETE_EXTS = []
SUPPORTS_SUB_DIRS = True
| 1,354 | Python | .py | 35 | 34.2 | 78 | 0.636015 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,754 | __init__.py | kovidgoyal_calibre/src/calibre/devices/teclast/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,755 | driver.py | kovidgoyal_calibre/src/calibre/devices/teclast/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.devices.usbms.driver import USBMS
class TECLAST_K3(USBMS):
name = 'Teclast K3/K5 Device Interface'
gui_name = 'K3/K5'
description = _('Communicate with the Teclast K3/K5 reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'doc', 'pdf', 'txt']
VENDOR_ID = [0x071b]
PRODUCT_ID = [0x3203]
BCD = [0x0000, 0x0100]
VENDOR_NAME = ['TECLAST', 'IMAGIN', 'RK28XX', 'PER3274B', 'BEBOOK',
'RK2728', 'MR700', 'CYBER', 'E-BOOK', 'EBOOK']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = [
'DIGITAL_PLAYER', 'TL-K5', 'EREADER', 'USB-MSC', 'PER3274B', 'BEBOOK', 'USER', 'BOOK', 'E71B',
]
MAIN_MEMORY_VOLUME_LABEL = 'K3 Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'K3 Storage Card'
EBOOK_DIR_MAIN = ''
EBOOK_DIR_CARD_A = ''
SUPPORTS_SUB_DIRS = True
class NEWSMY(TECLAST_K3):
name = 'Newsmy device interface'
gui_name = 'Newsmy'
description = _('Communicate with the Newsmy reader.')
FORMATS = ['epub', 'fb2', 'pdb', 'html', 'pdf', 'txt', 'skt']
VENDOR_NAME = ''
WINDOWS_MAIN_MEM = 'NEWSMY'
WINDOWS_CARD_A_MEM = 'USBDISK____SD'
class ARCHOS7O(TECLAST_K3):
name = 'Archos 7O device interface'
gui_name = 'Archos'
description = _('Communicate with the Archos reader.')
FORMATS = ['epub', 'mobi', 'fb2', 'rtf', 'ap', 'html', 'pdf', 'txt']
VENDOR_NAME = 'ARCHOS'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'USB-MSC'
class PICO(NEWSMY):
name = 'Pico device interface'
gui_name = 'Pico'
description = _('Communicate with the Pico reader.')
VENDOR_NAME = ['TECLAST', 'IMAGIN', 'LASER-', 'LASER', '']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['USBDISK__USER', 'EB720', 'EBOOK-EB720']
EBOOK_DIR_MAIN = 'Books'
FORMATS = ['EPUB', 'FB2', 'TXT', 'LRC', 'PDB', 'PDF', 'HTML', 'WTXT']
SCAN_FROM_ROOT = True
class IPAPYRUS(TECLAST_K3):
name = 'iPapyrus device interface'
gui_name = 'iPapyrus'
description = _('Communicate with the iPapyrus reader.')
FORMATS = ['epub', 'pdf', 'txt']
VENDOR_NAME = ['E_READER', 'EBOOKREA', 'ICARUS']
WINDOWS_MAIN_MEM = ''
class SOVOS(TECLAST_K3):
name = 'Sovos device interface'
gui_name = 'Sovos'
description = _('Communicate with the Sovos reader.')
FORMATS = ['epub', 'fb2', 'pdf', 'txt']
VENDOR_NAME = 'RK28XX'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'USB-MSC'
class SUNSTECH_EB700(TECLAST_K3):
name = 'Sunstech EB700 device interface'
gui_name = 'EB700'
description = _('Communicate with the Sunstech EB700 reader.')
FORMATS = ['epub', 'fb2', 'pdf', 'pdb', 'txt']
VENDOR_NAME = 'SUNEB700'
WINDOWS_MAIN_MEM = 'USB-MSC'
class STASH(TECLAST_K3):
name = 'Stash device interface'
gui_name = 'Stash'
description = _('Communicate with the Stash W950 reader.')
FORMATS = ['epub', 'fb2', 'lrc', 'pdb', 'html', 'fb2', 'wtxt',
'txt', 'pdf']
VENDOR_NAME = 'STASH'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'W950'
class WEXLER(TECLAST_K3):
name = 'Wexler device interface'
gui_name = 'Wexler'
description = _('Communicate with the Wexler reader.')
FORMATS = ['epub', 'fb2', 'pdf', 'txt']
VENDOR_NAME = 'WEXLER'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'T7001'
| 3,621 | Python | .py | 85 | 37.317647 | 102 | 0.606529 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,756 | driver.py | kovidgoyal_calibre/src/calibre/devices/hanvon/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Hanvon devices
'''
import os
import re
from calibre import fsync
from calibre.devices.usbms.driver import USBMS
def is_alex(device_info):
return device_info[3] == 'Linux 2.6.28 with pxa3xx_u2d' and \
device_info[4] == 'Seleucia Disk'
class N516(USBMS):
name = 'N516 driver'
gui_name = 'N516'
description = _('Communicate with the Hanvon N520 e-book reader.')
author = 'John Schember'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'prc', 'mobi', 'html', 'pdf', 'txt']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x323, 0x326, 0x327]
VENDOR_NAME = 'INGENIC'
WINDOWS_MAIN_MEM = '_FILE-STOR_GADGE'
MAIN_MEMORY_VOLUME_LABEL = 'N520 Internal Memory'
EBOOK_DIR_MAIN = 'e_book'
SUPPORTS_SUB_DIRS = True
def can_handle(self, device_info, debug=False):
return not is_alex(device_info)
class KIBANO(N516):
name = 'Kibano driver'
gui_name = 'Kibano'
description = _('Communicate with the Kibano e-book reader.')
FORMATS = ['epub', 'pdf', 'txt']
BCD = [0x323]
VENDOR_NAME = 'EBOOK'
# We use EXTERNAL_SD_CARD for main mem as some devices have not working
# main memories
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['INTERNAL_SD_CARD',
'EXTERNAL_SD_CARD']
class THEBOOK(N516):
name = 'The Book driver'
gui_name = 'The Book'
description = _('Communicate with The Book reader.')
author = 'Kovid Goyal'
BCD = [0x399]
MAIN_MEMORY_VOLUME_LABEL = 'The Book Main Memory'
EBOOK_DIR_MAIN = 'My books'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['_FILE-STOR_GADGE',
'FILE-STOR_GADGET']
class LIBREAIR(N516):
name = 'Libre Air Driver'
gui_name = 'Libre Air'
description = _('Communicate with the Libre Air reader.')
author = 'Kovid Goyal'
FORMATS = ['epub', 'mobi', 'prc', 'fb2', 'rtf', 'txt', 'pdf']
BCD = [0x399]
VENDOR_NAME = ['ALURATEK', 'LINUX']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'FILE-STOR_GADGET'
EBOOK_DIR_MAIN = 'Books'
class ALEX(N516):
name = 'Alex driver'
gui_name = 'SpringDesign Alex'
description = _('Communicate with the SpringDesign Alex e-book reader.')
author = 'Kovid Goyal'
FORMATS = ['epub', 'fb2', 'pdf']
VENDOR_NAME = 'ALEX'
WINDOWS_MAIN_MEM = 'READER'
MAIN_MEMORY_VOLUME_LABEL = 'Alex Internal Memory'
EBOOK_DIR_MAIN = 'eBooks'
SUPPORTS_SUB_DIRS = False
THUMBNAIL_HEIGHT = 120
def can_handle(self, device_info, debug=False):
return is_alex(device_info)
def alex_cpath(self, file_abspath):
base = os.path.dirname(file_abspath)
name = os.path.splitext(os.path.basename(file_abspath))[0] + '.png'
return os.path.join(base, 'covers', name)
def upload_cover(self, path, filename, metadata, filepath):
from calibre.ebooks.covers import calibre_cover2
from calibre.utils.img import scale_image
coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]:
cover = coverdata[2]
else:
cover = calibre_cover2(metadata.get('title', _('Unknown')),
metadata.get('authors', _('Unknown')))
cover = scale_image(cover, width=self.THUMBNAIL_HEIGHT,
height=self.THUMBNAIL_HEIGHT, as_png=True)[-1]
cpath = self.alex_cpath(os.path.join(path, filename))
cdir = os.path.dirname(cpath)
if not os.path.exists(cdir):
os.makedirs(cdir)
with open(cpath, 'wb') as coverfile:
coverfile.write(cover)
fsync(coverfile)
def delete_books(self, paths, end_session=True):
for i, path in enumerate(paths):
self.report_progress((i+1) / float(len(paths)), _('Removing books from device...'))
path = self.normalize_path(path)
if os.path.exists(path):
# Delete the ebook
os.unlink(path)
try:
cpath = self.alex_cpath(path)
if os.path.exists(cpath):
os.remove(cpath)
except:
pass
self.report_progress(1.0, _('Removing books from device...'))
class AZBOOKA(ALEX):
name = 'Azbooka driver'
gui_name = 'Azbooka'
description = _('Communicate with the Azbooka')
VENDOR_NAME = 'LINUX'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'FILE-STOR_GADGET'
MAIN_MEMORY_VOLUME_LABEL = 'Azbooka Internal Memory'
EBOOK_DIR_MAIN = ''
SUPPORTS_SUB_DIRS = True
def can_handle(self, device_info, debug=False):
return not is_alex(device_info)
def upload_cover(self, path, filename, metadata, filepath):
pass
class EB511(USBMS):
name = 'Elonex EB 511 driver'
gui_name = 'EB 511'
description = _('Communicate with the Elonex EB 511 e-book reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'html', 'pdf', 'txt']
VENDOR_ID = [0x45e]
PRODUCT_ID = [0xffff]
BCD = [0x0]
MAIN_MEMORY_VOLUME_LABEL = 'EB 511 Internal Memory'
EBOOK_DIR_MAIN = 'e_book'
SUPPORTS_SUB_DIRS = True
OSX_MAIN_MEM_VOL_PAT = re.compile(r'/eReader')
class ODYSSEY(N516):
name = 'Cybook Odyssey driver'
gui_name = 'Odyssey'
description = _('Communicate with the Cybook Odyssey e-book reader.')
BCD = [0x316]
VENDOR_NAME = ['LINUX', 'BOOKEEN']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['FILE-STOR_GADGET', 'FLASH_DISK']
FORMATS = ['epub', 'fb2', 'html', 'pdf', 'txt']
EBOOK_DIR_MAIN = 'Digital Editions'
def get_main_ebook_dir(self, for_upload=False):
if for_upload:
return self.EBOOK_DIR_MAIN
return ''
| 6,181 | Python | .py | 152 | 33.605263 | 95 | 0.605224 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,757 | driver.py | kovidgoyal_calibre/src/calibre/devices/sne/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Bookeen's Cybook Gen 3
'''
from calibre.devices.usbms.driver import USBMS
class SNE(USBMS):
name = 'Samsung SNE Device Interface'
gui_name = 'Samsung SNE'
description = _('Communicate with the Samsung SNE e-book reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
# Be sure these have an entry in calibre.devices.mime
FORMATS = ['epub', 'pdf', 'txt']
VENDOR_ID = [0x04e8]
PRODUCT_ID = [0x2051, 0x2053, 0x2054]
BCD = [0x0323]
VENDOR_NAME = 'SAMSUNG'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['SNE-60', 'E65']
MAIN_MEMORY_VOLUME_LABEL = 'SNE Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'SNE Storage Card'
EBOOK_DIR_MAIN = EBOOK_DIR_CARD_A = 'Books'
SUPPORTS_SUB_DIRS = True
| 1,005 | Python | .py | 25 | 36.08 | 73 | 0.645361 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,758 | test.py | kovidgoyal_calibre/src/calibre/devices/mtp/test.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import gc
import io
import unittest
from calibre.constants import islinux, iswindows
from calibre.devices.mtp.driver import MTP_DEVICE
from calibre.devices.scanner import DeviceScanner
from calibre.utils.icu import lower
class ProgressCallback:
def __init__(self):
self.count = 0
self.end_called = False
def __call__(self, pos, total):
if pos == total:
self.end_called = True
self.count += 1
class TestDeviceInteraction(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.dev = cls.storage = None
cls.dev = MTP_DEVICE(None)
cls.dev.startup()
cls.scanner = DeviceScanner()
cls.scanner.scan()
cd = cls.dev.detect_managed_devices(cls.scanner.devices)
if cd is None:
cls.dev.shutdown()
cls.dev = None
return
cls.dev.open(cd, 'test_library')
if cls.dev.free_space()[0] < 10*(1024**2):
return
cls.dev.filesystem_cache
cls.storage = cls.dev.filesystem_cache.entries[0]
@classmethod
def tearDownClass(cls):
if cls.dev is not None:
cls.dev.shutdown()
cls.dev = None
def setUp(self):
self.cleanup = []
def tearDown(self):
for obj in reversed(self.cleanup):
self.dev.delete_file_or_folder(obj)
def check_setup(self):
if self.dev is None:
self.skipTest('No MTP device detected')
if self.storage is None:
self.skipTest('The connected device does not have enough free space')
def test_folder_operations(self):
''' Test the creation of folders, duplicate folders and sub folders '''
self.check_setup()
# Create a folder
name = 'zzz-test-folder'
folder = self.dev.create_folder(self.storage, name)
self.cleanup.append(folder)
self.assertTrue(folder.is_folder)
self.assertEqual(folder.parent_id, self.storage.object_id)
self.assertEqual(folder.storage_id, self.storage.object_id)
self.assertEqual(lower(name), lower(folder.name))
# Create a sub-folder
name = 'sub-folder'
subfolder = self.dev.create_folder(folder, name)
self.assertTrue(subfolder.is_folder)
self.assertEqual(subfolder.parent_id, folder.object_id)
self.assertEqual(subfolder.storage_id, self.storage.object_id)
self.assertEqual(lower(name), lower(subfolder.name))
self.cleanup.append(subfolder)
# Check that creating an existing folder returns that folder (case
# insensitively)
self.assertIs(subfolder, self.dev.create_folder(folder,
'SUB-FOLDER'),
msg='Creating an existing folder did not return the existing folder')
# Check that creating folders as children of files is not allowed
root_file = [f for f in self.dev.filesystem_cache.entries[0].files if
not f.is_folder]
if root_file:
with self.assertRaises(ValueError):
self.dev.create_folder(root_file[0], 'sub-folder')
def test_file_transfer(self):
''' Test transferring files to and from the device '''
self.check_setup()
# Create a folder
name = 'zzz-test-folder'
folder = self.dev.create_folder(self.storage, name)
self.cleanup.append(folder)
self.assertTrue(folder.is_folder)
self.assertEqual(folder.parent_id, self.storage.object_id)
# Check simple file put/get
size = 1024**2
raw = io.BytesIO(b'a'*size)
raw.seek(0)
name = 'test-file.txt'
pc = ProgressCallback()
f = self.dev.put_file(folder, name, raw, size, callback=pc)
self.cleanup.append(f)
self.assertEqual(f.name, name)
self.assertEqual(f.size, size)
self.assertEqual(f.parent_id, folder.object_id)
self.assertEqual(f.storage_id, folder.storage_id)
self.assertTrue(pc.end_called,
msg='Progress callback not called with equal values (put_file)')
self.assertTrue(pc.count > 1,
msg='Progress callback only called once (put_file)')
raw2 = io.BytesIO()
pc = ProgressCallback()
self.dev.get_mtp_file(f, raw2, callback=pc)
self.assertEqual(raw.getvalue(), raw2.getvalue())
self.assertTrue(pc.end_called,
msg='Progress callback not called with equal values (get_file)')
self.assertTrue(pc.count > 1,
msg='Progress callback only called once (get_file)')
# Check file replacement
raw = io.BytesIO(b'abcd')
raw.seek(0)
size = 4
f = self.dev.put_file(folder, name, raw, size)
self.cleanup.append(f)
self.assertEqual(f.name, name)
self.assertEqual(f.size, size)
self.assertEqual(f.parent_id, folder.object_id)
self.assertEqual(f.storage_id, folder.storage_id)
# Check that we get an error with replace=False
raw.seek(0)
with self.assertRaises(ValueError):
self.dev.put_file(folder, name, raw, size, replace=False)
# Check that we can put a file into the root
raw.seek(0)
name = 'zzz-test-file.txt'
f = self.dev.put_file(self.storage, name, raw, size)
self.cleanup.append(f)
self.assertEqual(f.name, name)
self.assertEqual(f.size, size)
self.assertEqual(f.parent_id, self.storage.object_id)
self.assertEqual(f.storage_id, self.storage.storage_id)
raw2 = io.BytesIO()
self.dev.get_mtp_file(f, raw2)
self.assertEqual(raw.getvalue(), raw2.getvalue())
def measure_memory_usage(self, repetitions, func, *args, **kwargs):
from calibre.utils.mem import memory
gc.disable()
try:
start_mem = memory()
for i in range(repetitions):
func(*args, **kwargs)
for i in range(3):
gc.collect()
end_mem = memory()
finally:
gc.enable()
return end_mem - start_mem
def check_memory(self, once, many, msg, factor=2):
msg += ' for once: %g for many: %g'%(once, many)
if once > 0:
self.assertTrue(many <= once*factor, msg=msg)
else:
self.assertTrue(many <= 0.01, msg=msg)
@unittest.skipUnless(iswindows or islinux, 'Can only test for leaks on windows and linux')
def test_memory_leaks(self):
''' Test for memory leaks in the C module '''
self.check_setup()
# Test device scanning
used_by_one = self.measure_memory_usage(1,
self.dev.detect_managed_devices, self.scanner.devices,
force_refresh=True)
used_by_many = self.measure_memory_usage(100,
self.dev.detect_managed_devices, self.scanner.devices,
force_refresh=True)
self.check_memory(used_by_one, used_by_many,
'Memory consumption during device scan')
# Test file transfer
size = 1024*100
raw = io.BytesIO(b'a'*size)
raw.seek(0)
name = 'zzz-test-file.txt'
def send_file(storage, name, raw, size):
raw.seek(0)
pc = ProgressCallback()
f = self.dev.put_file(storage, name, raw, size, callback=pc)
self.cleanup.append(f)
del pc
used_once = self.measure_memory_usage(1, send_file, self.storage, name,
raw, size)
used_many = self.measure_memory_usage(20, send_file, self.storage, name,
raw, size)
self.check_memory(used_once, used_many,
'Memory consumption during put_file:')
def get_file(f):
raw = io.BytesIO()
pc = ProgressCallback()
self.dev.get_mtp_file(f, raw, callback=pc)
raw.truncate(0)
del raw
del pc
f = self.storage.file_named(name)
used_once = self.measure_memory_usage(1, get_file, f)
used_many = self.measure_memory_usage(20, get_file, f)
self.check_memory(used_once, used_many,
'Memory consumption during get_file:')
# Test get_filesystem
used_by_one = self.measure_memory_usage(1,
self.dev.dev.get_filesystem, self.storage.object_id,
lambda x, l:True)
used_by_many = self.measure_memory_usage(5,
self.dev.dev.get_filesystem, self.storage.object_id,
lambda x, l: True)
self.check_memory(used_by_one, used_by_many,
'Memory consumption during get_filesystem')
def tests():
tl = unittest.TestLoader()
# return tl.loadTestsFromName('test.TestDeviceInteraction.test_memory_leaks')
return tl.loadTestsFromTestCase(TestDeviceInteraction)
def run():
unittest.TextTestRunner(verbosity=2).run(tests())
if __name__ == '__main__':
run()
| 9,228 | Python | .py | 219 | 32.182648 | 94 | 0.609865 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,759 | filesystem_cache.py | kovidgoyal_calibre/src/calibre/devices/mtp/filesystem_cache.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import json
import sys
import time
import weakref
from collections import defaultdict, deque
from datetime import datetime
from itertools import chain
from operator import attrgetter
from typing import Dict, Tuple
from calibre import force_unicode, human_readable, prints
from calibre.constants import iswindows
from calibre.ebooks import BOOK_EXTENSIONS
from calibre.utils.date import as_utc, local_tz
from calibre.utils.icu import lower, sort_key
bexts = frozenset(BOOK_EXTENSIONS) - {'mbp', 'tan', 'rar', 'zip', 'xml'}
class ListEntry:
def __init__(self, entry: 'FileOrFolder'):
self.is_dir = entry.is_folder
self.is_readonly = not entry.can_delete
self.path = '/'.join(entry.full_path)
self.name = entry.name
self.size = entry.size
self.ctime = self.wtime = time.mktime(entry.last_modified.timetuple())
class FileOrFolder:
def __init__(self, entry, fs_cache: 'FilesystemCache', is_storage: bool = False):
self.object_id = entry['id']
self.is_storage = is_storage
self.is_folder = entry['is_folder']
self.storage_id = entry['storage_id']
# self.parent_id is None for storage objects
self.parent_id = entry.get('parent_id', None)
self.persistent_id = entry.get('persistent_id', self.object_id)
n = entry.get('name', None)
if not n:
if self.is_storage:
prefix = 'Storage'
else:
prefix = 'Folder' if self.is_folder else 'File'
n = f'{prefix}-{self.persistent_id}'
self.name = force_unicode(n, 'utf-8')
self.size = entry.get('size', 0)
md = entry.get('modified', 0)
try:
if isinstance(md, tuple):
self.last_modified = datetime(*(list(md)+[local_tz]))
else:
self.last_modified = datetime.fromtimestamp(md, local_tz)
except Exception:
self.last_modified = datetime.fromtimestamp(0, local_tz)
self.last_mod_string = self.last_modified.strftime('%Y/%m/%d %H:%M')
self.last_modified = as_utc(self.last_modified)
if self.storage_id not in fs_cache.all_storage_ids:
raise ValueError('Storage id %s not valid for %s, valid values: %s'%(self.storage_id,
entry, fs_cache.all_storage_ids))
self.is_hidden = entry.get('is_hidden', False)
self.is_system = entry.get('is_system', False)
self.can_delete = entry.get('can_delete', True)
self.files = []
self.folders = []
if not self.is_storage:
# storage ids can overlap filesystem object ids on libmtp. See https://bugs.launchpad.net/bugs/2072384
# so only store actual filesystem object ids in id_map
fs_cache.id_maps[self.storage_id][self.object_id] = self
if iswindows:
# On windows parent_id == storage_id for objects in root. Set
# it 0 so the rest of the logic works as on libmtp.
# See https://bugs.launchpad.net/bugs/2073323
if self.storage_id == self.parent_id:
self.parent_id = 0
self.fs_cache = weakref.ref(fs_cache)
self.deleted = False
if self.is_storage:
self.storage_prefix = 'mtp:::%s:::'%self.persistent_id
# Ignore non ebook files and AppleDouble files
self.is_ebook = (not self.is_folder and not self.is_storage and
self.name.rpartition('.')[-1].lower() in bexts and not self.name.startswith('._'))
def __repr__(self):
if self.is_storage:
name = 'Storage'
else:
name = 'Folder' if self.is_folder else 'File'
try:
path = str(self.full_path)
except Exception:
path = ''
datum = 'size=%s'%(self.size)
if self.is_folder or self.is_storage:
datum = 'children=%s'%(len(self.files) + len(self.folders))
return '%s(id=%s, storage_id=%s, %s, path=%s, modified=%s)'%(name, self.object_id,
self.storage_id, datum, path, self.last_mod_string)
__str__ = __repr__
__unicode__ = __repr__
@property
def empty(self):
return not self.files and not self.folders
@property
def id_map(self) -> Dict[int, 'FileOrFolder']:
return self.fs_cache().id_maps[self.storage_id]
@property
def parent(self):
if self.parent_id:
return self.id_map[self.parent_id]
if self.is_storage or self.parent_id is None:
return None
return self.fs_cache().storage(self.storage_id)
@property
def in_root(self):
return self.parent_id is not None and self.parent_id == 0
@property
def storage(self):
return self.fs_cache().storage(self.storage_id)
@property
def full_path(self) -> Tuple[str, ...]:
parts = deque()
parts.append(self.name)
p = self.parent
while p is not None:
parts.appendleft(p.name)
p = p.parent
return tuple(parts)
def __iter__(self):
yield from self.folders
yield from self.files
def add_child(self, entry):
ans = FileOrFolder(entry, self.fs_cache())
t = self.folders if ans.is_folder else self.files
t.append(ans)
return ans
def remove_child(self, entry):
for x in (self.files, self.folders):
try:
x.remove(entry)
except ValueError:
pass
self.id_map.pop(entry.object_id, None)
entry.deleted = True
def dump(self, prefix='', out=sys.stdout):
c = '+' if self.is_folder else '-'
data = ('%s children'%(sum(map(len, (self.files, self.folders))))
if self.is_folder else human_readable(self.size))
data += ' modified=%s'%self.last_mod_string
line = '%s%s %s [id:%s %s]'%(prefix, c, self.name, self.object_id, data)
prints(line, file=out)
for c in (self.folders, self.files):
for e in sorted(c, key=lambda x:sort_key(x.name)):
e.dump(prefix=prefix+' ', out=out)
def list(self, recurse=False):
if not self.is_folder:
parent = self.parent
yield '/'.join(parent.full_path[1:]), ListEntry(self)
return
entries = [ListEntry(x) for x in chain(self.folders, self.files)]
yield '/'.join(self.full_path[1:]), entries
if recurse:
for x in self.folders:
yield from x.list(recurse=True)
def folder_named(self, name):
name = lower(name)
for e in self.folders:
if e.name and lower(e.name) == name:
return e
return None
def file_named(self, name):
name = lower(name)
for e in self.files:
if e.name and lower(e.name) == name:
return e
return None
def find_path(self, path):
'''
Find a path in this folder, where path is a
tuple of folder and file names like ('eBooks', 'newest',
'calibre.epub'). Finding is case-insensitive.
'''
parent = self
components = list(path)
while components:
child = components[0]
components = components[1:]
c = parent.folder_named(child)
if c is None:
c = parent.file_named(child)
if c is None:
return None
parent = c
return parent
@property
def mtp_relpath(self):
return tuple(x.lower() for x in self.full_path[1:])
@property
def mtp_id_path(self):
return 'mtp:::' + json.dumps(self.object_id) + ':::' + '/'.join(self.full_path)
class FilesystemCache:
def __init__(self, all_storage, entries):
self.entries = []
self.id_maps = defaultdict(dict)
self.all_storage_ids = tuple(x['id'] for x in all_storage)
for storage in all_storage:
storage['storage_id'] = storage['id']
e = FileOrFolder(storage, self, is_storage=True)
self.entries.append(e)
self.entries.sort(key=attrgetter('object_id'))
self.all_storage_ids = tuple(x.storage_id for x in self.entries)
for entry in entries:
FileOrFolder(entry, self)
for id_map in self.id_maps.values():
for item in id_map.values():
try:
p = item.parent
except KeyError:
# Parent does not exist, set the parent to be the storage
# object
item.parent_id = 0
p = item.parent
if p is not None:
t = p.folders if item.is_folder else p.files
t.append(item)
def dump(self, out=sys.stdout):
for e in self.entries:
e.dump(out=out)
def storage(self, storage_id):
for e in self.entries:
if e.storage_id == storage_id:
return e
def iterebooks(self, storage_id):
id_map = self.id_maps[storage_id]
for x in id_map.values():
if x.is_ebook:
if x.in_root and x.name.lower().endswith('.txt'):
continue # Ignore .txt files in the root
yield x
def __len__(self):
ans = len(self.id_maps)
for id_map in self.id_maps.values():
ans += len(id_map)
return ans
def resolve_mtp_id_path(self, path):
if not path.startswith('mtp:::'):
raise ValueError('%s is not a valid MTP path'%path)
parts = path.split(':::', 2)
if len(parts) < 3:
raise ValueError('%s is not a valid MTP path'%path)
try:
object_id = json.loads(parts[1])
except Exception:
raise ValueError('%s is not a valid MTP path'%path)
id_map = {}
path = parts[2]
storage_name = path.partition('/')[0]
for entry in self.entries:
if entry.name == storage_name:
id_map = self.id_maps[entry.storage_id]
break
try:
return id_map[object_id]
except KeyError:
raise ValueError('No object found with MTP path: %s'%path)
| 10,511 | Python | .py | 261 | 30.249042 | 114 | 0.573851 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,760 | books.py | kovidgoyal_calibre/src/calibre/devices/mtp/books.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from calibre.devices.interface import BookList as BL
from calibre.ebooks.metadata import title_sort
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.book.json_codec import JsonCodec
from calibre.utils.date import utcnow
from calibre.utils.icu import lower as icu_lower
class BookList(BL):
def __init__(self, storage_id):
self.storage_id = storage_id
def supports_collections(self):
return False
def add_book(self, book, replace_metadata=True):
try:
b = self.index(book)
except (ValueError, IndexError):
b = None
if b is None:
self.append(book)
return book
if replace_metadata:
self[b].smart_update(book, replace_metadata=True)
return self[b]
return None
def remove_book(self, book):
self.remove(book)
class Book(Metadata):
def __init__(self, storage_id, lpath, other=None):
Metadata.__init__(self, _('Unknown'), other=other)
self.storage_id, self.lpath = storage_id, lpath
self.lpath = self.path = self.lpath.replace(os.sep, '/')
self.mtp_relpath = tuple(icu_lower(x) for x in self.lpath.split('/'))
self.datetime = utcnow().timetuple()
self.thumbail = None
def matches_file(self, mtp_file):
return (self.storage_id == mtp_file.storage_id and
self.mtp_relpath == mtp_file.mtp_relpath)
def __eq__(self, other):
return (isinstance(other, self.__class__) and (self.storage_id ==
other.storage_id and self.mtp_relpath == other.mtp_relpath))
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.storage_id, self.mtp_relpath))
@property
def title_sorter(self):
ans = getattr(self, 'title_sort', None)
if not ans or self.is_null('title_sort') or ans == _('Unknown'):
ans = ''
return ans or title_sort(self.title or '')
class JSONCodec(JsonCodec):
pass
| 2,225 | Python | .py | 56 | 32.517857 | 77 | 0.638436 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,761 | defaults.py | kovidgoyal_calibre/src/calibre/devices/mtp/defaults.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
import traceback
from calibre.constants import iswindows
from polyglot.builtins import iteritems
supernote_settings = {
'calibre_file_paths': {'metadata':'Document/metadata.calibre', 'driveinfo':'Document/driveinfo.calibre'},
'send_to': ['Document', 'Documents'],
}
class DeviceDefaults:
def __init__(self):
self.rules = (
# Amazon devices
({'vendor':0x1949}, {
'format_map': ['azw3', 'mobi', 'azw',
'azw1', 'azw4', 'kfx', 'pdf'],
'send_to': ['documents', 'kindle', 'books'],
}
),
# B&N devices
({'vendor':0x2080}, {
'format_map': ['epub', 'pdf'],
# NOOK does not allow writing files into root
'calibre_file_paths': {'metadata':'NOOK/metadata.calibre', 'driveinfo':'NOOK/driveinfo.calibre'},
'send_to': ['NOOK/My Books', 'NOOK/My Files', 'NOOK', 'Calibre_Companion', 'Books', 'eBooks/import', 'eBooks', 'sdcard/ebooks'],
}
),
# Supernote A5 and A5X and A6X2
({'vendor': 0x2207, 'product': 0x0031}, supernote_settings),
({'vendor': 0x2207, 'product': 0x0011}, supernote_settings),
({'vendor': 0x2207, 'product': 0x0007}, supernote_settings), # A6X2
({'vendor': 0x2207, 'product': 0x0017}, supernote_settings), # A6X2
)
def __call__(self, device, driver):
if iswindows:
vid = pid = 0xffff
m = re.search(r'(?i)vid_([0-9a-fA-F]+)&pid_([0-9a-fA-F]+)', device)
if m is not None:
try:
vid, pid = int(m.group(1), 16), int(m.group(2), 16)
except:
traceback.print_exc()
else:
vid, pid = device.vendor_id, device.product_id
for rule in self.rules:
tests = rule[0]
matches = True
for k, v in iteritems(tests):
if k == 'vendor' and v != vid:
matches = False
break
if k == 'product' and v != pid:
matches = False
break
if matches:
ans = rule[1]
return ans, vid, pid
return {}, vid, pid
| 2,590 | Python | .py | 61 | 29.016393 | 148 | 0.484909 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,762 | __init__.py | kovidgoyal_calibre/src/calibre/devices/mtp/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 149 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,763 | base.py | kovidgoyal_calibre/src/calibre/devices/mtp/base.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from functools import wraps
from calibre import prints
from calibre.constants import DEBUG
from calibre.devices.interface import DevicePlugin
def debug(*args, **kwargs):
if DEBUG:
prints('MTP:', *args, **kwargs)
def synchronous(func):
@wraps(func)
def synchronizer(self, *args, **kwargs):
with self.lock:
return func(self, *args, **kwargs)
return synchronizer
class MTPDeviceBase(DevicePlugin):
name = 'MTP Device Interface'
gui_name = _('MTP device')
icon = 'devices/tablet.png'
description = _('Communicate with MTP devices')
author = 'Kovid Goyal'
version = (1, 0, 0)
def __init__(self, *args, **kwargs):
DevicePlugin.__init__(self, *args, **kwargs)
self.progress_reporter = None
self.current_friendly_name = None
self.report_progress = lambda x, y: None
self.current_serial_num = None
def reset(self, key='-1', log_packets=False, report_progress=None,
detected_device=None):
pass
def set_progress_reporter(self, report_progress):
self.report_progress = report_progress
def get_gui_name(self):
return getattr(self, 'current_friendly_name', self.gui_name)
def is_usb_connected(self, devices_on_system, debug=False,
only_presence=False):
# We manage device presence ourselves, so this method should always
# return False
return False
def build_template_regexp(self):
from calibre.devices.utils import build_template_regexp
return build_template_regexp(self.save_template)
def is_customizable(self):
return True
| 1,808 | Python | .py | 47 | 32.170213 | 75 | 0.672592 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,764 | driver.py | kovidgoyal_calibre/src/calibre/devices/mtp/driver.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import importlib
import json
import os
import posixpath
import sys
import traceback
from io import BytesIO
from typing import Sequence
from calibre import prints
from calibre.constants import iswindows, numeric_version
from calibre.devices.errors import PathError
from calibre.devices.mtp.base import debug
from calibre.devices.mtp.defaults import DeviceDefaults
from calibre.devices.mtp.filesystem_cache import FileOrFolder
from calibre.ptempfile import PersistentTemporaryDirectory, SpooledTemporaryFile
from calibre.utils.filenames import shorten_components_to
from calibre.utils.icu import lower as icu_lower
from polyglot.builtins import as_bytes, iteritems, itervalues
BASE = importlib.import_module('calibre.devices.mtp.%s.driver'%(
'windows' if iswindows else 'unix')).MTP_DEVICE
DEFAULT_THUMBNAIL_HEIGHT = 320
class MTPInvalidSendPathError(PathError):
def __init__(self, folder):
PathError.__init__(self, 'Trying to send to ignored folder: %s'%folder)
self.folder = folder
class MTP_DEVICE(BASE):
METADATA_CACHE = 'metadata.calibre'
DRIVEINFO = 'driveinfo.calibre'
CAN_SET_METADATA = []
NEWS_IN_FOLDER = True
MAX_PATH_LEN = 230
THUMBNAIL_HEIGHT = DEFAULT_THUMBNAIL_HEIGHT
CAN_SET_METADATA = []
BACKLOADING_ERROR_MESSAGE = None
MANAGES_DEVICE_PRESENCE = True
FORMATS = ['epub', 'azw3', 'mobi', 'pdf']
DEVICE_PLUGBOARD_NAME = 'MTP_DEVICE'
SLOW_DRIVEINFO = True
ASK_TO_ALLOW_CONNECT = True
def __init__(self, *args, **kwargs):
BASE.__init__(self, *args, **kwargs)
self.plugboards = self.plugboard_func = None
self._prefs = None
self.device_defaults = DeviceDefaults()
self.current_device_defaults = {}
self.current_vid = self.current_pid = -1
self.calibre_file_paths = {'metadata':self.METADATA_CACHE, 'driveinfo':self.DRIVEINFO}
self.highlight_ignored_folders = False
@property
def prefs(self):
from calibre.utils.config import JSONConfig
if self._prefs is None:
self._prefs = p = JSONConfig('mtp_devices')
p.defaults['format_map'] = self.FORMATS
p.defaults['send_to'] = [
'Calibre_Companion', 'Books', 'eBooks/import', 'eBooks',
'wordplayer/calibretransfer', 'sdcard/ebooks',
'Android/data/com.amazon.kindle/files', 'kindle', 'NOOK', 'Documents',
]
p.defaults['send_template'] = '{title} - {authors}'
p.defaults['blacklist'] = []
p.defaults['history'] = {}
p.defaults['rules'] = []
p.defaults['ignored_folders'] = {}
return self._prefs
@property
def is_kindle(self) -> bool:
return self.current_vid == 0x1949
def is_folder_ignored(self, storage_or_storage_id, path,
ignored_folders=None):
storage_id = str(getattr(storage_or_storage_id, 'object_id',
storage_or_storage_id))
lpath = tuple(icu_lower(name) for name in path)
if ignored_folders is None:
ignored_folders = self.get_pref('ignored_folders')
if storage_id in ignored_folders:
# Use the users ignored folders settings
return '/'.join(lpath) in {icu_lower(x) for x in ignored_folders[storage_id]}
if self.is_kindle and lpath and lpath[-1].endswith('.sdr'):
return True
# Implement the default ignore policy
# Top level ignores
if lpath[0] in {
'alarms', 'dcim', 'movies', 'music', 'notifications',
'pictures', 'ringtones', 'samsung', 'sony', 'htc', 'bluetooth', 'fonts',
'games', 'lost.dir', 'video', 'whatsapp', 'image', 'com.zinio.mobile.android.reader'}:
return True
if lpath[0].startswith('.') and lpath[0] != '.tolino':
# apparently the Tolino for some reason uses a hidden folder for its library, sigh.
return True
if lpath[0] == 'system' and not self.is_kindle:
# on Kindles we need the system folder for the amazon cover bug workaround
return True
if len(lpath) > 1 and lpath[0] == 'android':
# Ignore everything in Android apart from a few select folders
if lpath[1] != 'data':
return True
if len(lpath) > 2 and lpath[2] != 'com.amazon.kindle':
return True
return False
def configure_for_kindle_app(self):
proxy = self.prefs
with proxy:
proxy['format_map'] = ['azw3', 'mobi', 'azw', 'azw1', 'azw4', 'pdf']
proxy['send_template'] = '{title} - {authors}'
orig = list(proxy['send_to'])
for folder in ('kindle', 'Android/data/com.amazon.kindle/files'):
if folder in orig:
orig.remove(folder)
orig.insert(0, folder)
proxy['send_to'] = orig
def configure_for_generic_epub_app(self):
with self.prefs:
for x in ('format_map', 'send_template', 'send_to'):
del self.prefs[x]
def open(self, device, library_uuid):
from calibre.utils.date import isoformat, utcnow
self.current_library_uuid = library_uuid
self.location_paths = None
self.driveinfo = {}
BASE.open(self, device, library_uuid)
h = self.prefs['history']
if self.current_serial_num:
h[self.current_serial_num] = (self.current_friendly_name,
isoformat(utcnow()))
self.prefs['history'] = h
self.current_device_defaults, self.current_vid, self.current_pid = self.device_defaults(device, self)
self.calibre_file_paths = self.current_device_defaults.get(
'calibre_file_paths', {'metadata':self.METADATA_CACHE, 'driveinfo':self.DRIVEINFO})
self.THUMBNAIL_HEIGHT = DEFAULT_THUMBNAIL_HEIGHT
if self.is_kindle:
self.THUMBNAIL_HEIGHT = 500 # see kindle/driver.py
try:
self.sync_kindle_thumbnails()
except Exception:
import traceback
traceback.print_exc()
def list(self, path, recurse=False):
if path.startswith('/'):
q = self._main_id
path = path[1:]
elif path.startswith('card:/'):
q = self._carda_id
path = path[6:]
for storage in self.filesystem_cache.entries:
if storage.storage_id == q:
if path:
path = path.replace(os.sep, '/')
parts = path.split('/')
if parts:
storage = storage.find_path(parts)
if storage is None:
return []
return list(storage.list(recurse))
return []
def get_device_uid(self):
return self.current_serial_num
def ignore_connected_device(self, uid):
bl = self.prefs['blacklist']
if uid not in bl:
bl.append(uid)
self.prefs['blacklist'] = bl
if self.is_mtp_device_connected:
self.eject()
def put_calibre_file(self, storage, key, stream, size):
path = self.calibre_file_paths[key].split('/')
parent = self.ensure_parent(storage, path)
self.put_file(parent, path[-1], stream, size)
# Device information {{{
def _update_drive_info(self, storage, location_code, name=None):
import uuid
from calibre.utils.config import from_json, to_json
from calibre.utils.date import isoformat, now
f = storage.find_path(self.calibre_file_paths['driveinfo'].split('/'))
dinfo = {}
if f is not None:
try:
stream = self.get_mtp_file(f)
dinfo = json.load(stream, object_hook=from_json)
except:
prints('Failed to load existing driveinfo.calibre file, with error:')
traceback.print_exc()
dinfo = {}
if dinfo.get('device_store_uuid', None) is None:
dinfo['device_store_uuid'] = str(uuid.uuid4())
if dinfo.get('device_name', None) is None:
dinfo['device_name'] = self.current_friendly_name
if name is not None:
dinfo['device_name'] = name
dinfo['location_code'] = location_code
dinfo['last_library_uuid'] = getattr(self, 'current_library_uuid', None)
dinfo['calibre_version'] = '.'.join(str(i) for i in numeric_version)
dinfo['date_last_connected'] = isoformat(now())
dinfo['mtp_prefix'] = storage.storage_prefix
raw = as_bytes(json.dumps(dinfo, default=to_json))
self.put_calibre_file(storage, 'driveinfo', BytesIO(raw), len(raw))
self.driveinfo[location_code] = dinfo
def get_driveinfo(self):
if not self.driveinfo:
self.driveinfo = {}
for sid, location_code in ((self._main_id, 'main'), (self._carda_id,
'A'), (self._cardb_id, 'B')):
if sid is None:
continue
self._update_drive_info(self.filesystem_cache.storage(sid), location_code)
return self.driveinfo
def get_device_information(self, end_session=True):
self.report_progress(1.0, _('Get device information...'))
dinfo = self.get_basic_device_information()
return tuple(list(dinfo) + [self.driveinfo])
def card_prefix(self, end_session=True):
return (self._carda_id, self._cardb_id)
def set_driveinfo_name(self, location_code, name):
sid = {'main':self._main_id, 'A':self._carda_id,
'B':self._cardb_id}.get(location_code, None)
if sid is None:
return
self._update_drive_info(self.filesystem_cache.storage(sid),
location_code, name=name)
# }}}
# Get list of books from device, with metadata {{{
def filesystem_callback(self, msg):
self.report_progress(0, msg)
def books(self, oncard=None, end_session=True):
from calibre.devices.mtp.books import Book, BookList, JSONCodec
self.report_progress(0, _('Listing files, this can take a while'))
self.get_driveinfo() # Ensure driveinfo is loaded
sid = {'carda':self._carda_id, 'cardb':self._cardb_id}.get(oncard,
self._main_id)
if sid is None:
return BookList(None)
bl = BookList(sid)
# If True then there is a mismatch between the ebooks on the device and
# the metadata cache
need_sync = False
all_books = list(self.filesystem_cache.iterebooks(sid))
steps = len(all_books) + 2
count = 0
self.report_progress(0, _('Reading e-book metadata'))
# Read the cache if it exists
storage = self.filesystem_cache.storage(sid)
cache = storage.find_path(self.calibre_file_paths['metadata'].split('/'))
if cache is not None:
json_codec = JSONCodec()
try:
stream = self.get_mtp_file(cache)
json_codec.decode_from_file(stream, bl, Book, sid)
except:
need_sync = True
relpath_cache = {b.mtp_relpath:i for i, b in enumerate(bl)}
for mtp_file in all_books:
count += 1
relpath = mtp_file.mtp_relpath
idx = relpath_cache.get(relpath, None)
if idx is not None:
cached_metadata = bl[idx]
del relpath_cache[relpath]
if cached_metadata.size == mtp_file.size:
cached_metadata.datetime = mtp_file.last_modified.timetuple()
cached_metadata.path = mtp_file.mtp_id_path
debug('Using cached metadata for',
'/'.join(mtp_file.full_path))
continue # No need to update metadata
book = cached_metadata
else:
book = Book(sid, '/'.join(relpath))
bl.append(book)
need_sync = True
self.report_progress(count/steps, _('Reading metadata from %s')%
('/'.join(relpath)))
try:
book.smart_update(self.read_file_metadata(mtp_file))
debug('Read metadata for', '/'.join(mtp_file.full_path))
except:
prints('Failed to read metadata from',
'/'.join(mtp_file.full_path))
traceback.print_exc()
book.size = mtp_file.size
book.datetime = mtp_file.last_modified.timetuple()
book.path = mtp_file.mtp_id_path
# Remove books in the cache that no longer exist
for idx in sorted(itervalues(relpath_cache), reverse=True):
del bl[idx]
need_sync = True
if need_sync:
self.report_progress(count/steps, _('Updating metadata cache on device'))
self.write_metadata_cache(storage, bl)
self.report_progress(1, _('Finished reading metadata from device'))
return bl
def read_file_metadata(self, mtp_file):
from calibre.customize.ui import quick_metadata
from calibre.ebooks.metadata.meta import get_metadata
ext = mtp_file.name.rpartition('.')[-1].lower()
stream = self.get_mtp_file(mtp_file)
with quick_metadata:
return get_metadata(stream, stream_type=ext,
force_read_metadata=True,
pattern=self.build_template_regexp())
def write_metadata_cache(self, storage, bl):
from calibre.devices.mtp.books import JSONCodec
if bl.storage_id != storage.storage_id:
# Just a sanity check, should never happen
return
json_codec = JSONCodec()
stream = SpooledTemporaryFile(10*(1024**2))
json_codec.encode_to_file(stream, bl)
size = stream.tell()
stream.seek(0)
self.put_calibre_file(storage, 'metadata', stream, size)
def sync_booklists(self, booklists, end_session=True):
debug('sync_booklists() called')
for bl in booklists:
if getattr(bl, 'storage_id', None) is None:
continue
storage = self.filesystem_cache.storage(bl.storage_id)
if storage is None:
continue
self.write_metadata_cache(storage, bl)
debug('sync_booklists() ended')
# }}}
# Get files from the device {{{
def get_file(self, path, outfile, end_session=True):
f = self.filesystem_cache.resolve_mtp_id_path(path)
self.get_mtp_file(f, outfile)
def prepare_addable_books(self, paths):
tdir = PersistentTemporaryDirectory('_prepare_mtp')
ans = []
for path in paths:
try:
f = self.filesystem_cache.resolve_mtp_id_path(path)
except Exception as e:
ans.append((path, e, traceback.format_exc()))
continue
base = os.path.join(tdir, '%s'%f.object_id)
os.mkdir(base)
name = f.name
if iswindows:
plen = len(base)
name = ''.join(shorten_components_to(245-plen, [name]))
with open(os.path.join(base, name), 'wb') as out:
try:
self.get_mtp_file(f, out)
except Exception as e:
ans.append((path, e, traceback.format_exc()))
else:
ans.append(out.name)
return ans
# }}}
# Sending files to the device {{{
def set_plugboards(self, plugboards, pb_func):
self.plugboards = plugboards
self.plugboard_func = pb_func
def create_upload_path(self, path, mdata, fname, routing):
from calibre.devices.utils import create_upload_path
from calibre.utils.filenames import ascii_filename as sanitize
ext = fname.rpartition('.')[-1].lower()
path = routing.get(ext, path)
filepath = create_upload_path(mdata, fname, self.save_template, sanitize,
prefix_path=path,
path_type=posixpath,
maxlen=self.MAX_PATH_LEN,
use_subdirs='/' in self.save_template,
news_in_folder=self.NEWS_IN_FOLDER,
)
return tuple(x for x in filepath.split('/'))
def prefix_for_location(self, on_card):
if self.location_paths is None:
self.location_paths = {}
for sid, loc in ((self._main_id, None), (self._carda_id, 'carda'),
(self._cardb_id, 'cardb')):
if sid is not None:
storage = self.filesystem_cache.storage(sid)
prefixes = self.get_pref('send_to')
p = None
for path in prefixes:
path = path.replace(os.sep, '/')
if storage.find_path(path.split('/')) is not None:
p = path
break
if p is None:
p = 'Books'
self.location_paths[loc] = p
return self.location_paths[on_card]
def ensure_parent(self, storage, path):
parent = storage
pos = list(path)[:-1]
while pos:
name = pos[0]
pos = pos[1:]
parent = self.create_folder(parent, name)
return parent
def upload_books(self, files, names, on_card=None, end_session=True,
metadata=None):
debug('upload_books() called')
from calibre.devices.utils import sanity_check
sanity_check(on_card, files, self.card_prefix(), self.free_space())
prefix = self.prefix_for_location(on_card)
sid = {'carda':self._carda_id, 'cardb':self._cardb_id}.get(on_card,
self._main_id)
bl_idx = {'carda':1, 'cardb':2}.get(on_card, 0)
storage = self.filesystem_cache.storage(sid)
ans = []
self.report_progress(0, _('Transferring books to device...'))
i, total = 0, len(files)
routing = {fmt:dest for fmt,dest in self.get_pref('rules')}
for infile, fname, mi in zip(files, names, metadata):
path = self.create_upload_path(prefix, mi, fname, routing)
if path and self.is_folder_ignored(storage, path):
raise MTPInvalidSendPathError('/'.join(path))
parent = self.ensure_parent(storage, path)
if hasattr(infile, 'read'):
pos = infile.tell()
infile.seek(0, 2)
sz = infile.tell()
infile.seek(pos)
stream = infile
close = False
else:
sz = os.path.getsize(infile)
stream = open(infile, 'rb')
close = True
relpath = parent.mtp_relpath + (path[-1].lower(),)
try:
mtp_file = self.put_file(parent, path[-1], stream, sz)
try:
self.upload_cover(parent, relpath, storage, mi, stream)
except Exception:
import traceback
traceback.print_exc()
finally:
if close:
stream.close()
ans.append((mtp_file, bl_idx))
i += 1
self.report_progress(i/total, _('Transferred %s to device')%mi.title)
self.report_progress(1, _('Transfer to device finished...'))
debug('upload_books() ended')
return ans
def upload_cover(self, parent_folder: FileOrFolder, relpath_of_ebook_on_device: Sequence[str], storage: FileOrFolder, mi, ebook_file_as_stream):
if self.is_kindle:
self.upload_kindle_thumbnail(parent_folder, relpath_of_ebook_on_device, storage, mi, ebook_file_as_stream)
# Kindle cover thumbnail handling {{{
def upload_kindle_thumbnail(self, parent_folder: FileOrFolder, relpath_of_ebook_on_device: Sequence[str], storage: FileOrFolder, mi, ebook_file_as_stream):
coverdata = getattr(mi, 'thumbnail', None)
if not coverdata or not coverdata[2]:
return
from calibre.devices.kindle.driver import thumbnail_filename
tfname = thumbnail_filename(ebook_file_as_stream)
if not tfname:
return
thumbpath = 'system', 'thumbnails', tfname
cover_stream = BytesIO(coverdata[2])
sz = len(coverdata[2])
try:
parent = self.ensure_parent(storage, thumbpath)
except Exception as err:
print(f'Failed to upload cover thumbnail to system/thumbnails with error: {err}', file=sys.stderr)
return
self.put_file(parent, tfname, cover_stream, sz)
cover_stream.seek(0)
cache_path = 'amazon-cover-bug', tfname
parent = self.ensure_parent(storage, cache_path)
self.put_file(parent, tfname, cover_stream, sz)
# mapping from ebook relpath to thumbnail filename
from hashlib import sha1
index_name = sha1('/'.join(relpath_of_ebook_on_device).encode()).hexdigest()
data = tfname.encode()
self.put_file(parent, index_name, BytesIO(data), len(data))
def delete_kindle_cover_thumbnail_for(self, storage: FileOrFolder, mtp_relpath: Sequence[str]) -> None:
from hashlib import sha1
index_name = sha1('/'.join(mtp_relpath).encode()).hexdigest()
index = storage.find_path(('amazon-cover-bug', index_name))
if index is not None:
data = BytesIO()
self.get_mtp_file(index, data)
tfname = data.getvalue().decode().strip()
if tfname:
thumbnail = storage.find_path(('system', 'thumbnails', tfname))
if thumbnail is not None:
self.delete_file_or_folder(thumbnail)
cache = storage.find_path(('amazon-cover-bug', tfname))
if cache is not None:
self.delete_file_or_folder(cache)
self.delete_file_or_folder(index)
def sync_kindle_thumbnails(self):
for storage in self.filesystem_cache.entries:
self._sync_kindle_thumbnails(storage)
def _sync_kindle_thumbnails(self, storage):
system_thumbnails_dir = storage.find_path(('system', 'thumbnails'))
amazon_cover_bug_cache_dir = storage.find_path(('amazon-cover-bug',))
if system_thumbnails_dir is None or amazon_cover_bug_cache_dir is None:
return
debug('Syncing cover thumbnails to workaround amazon cover bug')
system_thumbnails = {x.name: x for x in system_thumbnails_dir.files}
count = 0
for f in amazon_cover_bug_cache_dir.files:
s = system_thumbnails.get(f.name)
if s is not None and s.size != f.size:
count += 1
data = BytesIO()
self.get_mtp_file(f, data)
data.seek(0)
sz = len(data.getvalue())
self.put_file(system_thumbnails_dir, f.name, data, sz)
debug(f'Restored {count} cover thumbnails that were destroyed by Amazon')
# }}}
def add_books_to_metadata(self, mtp_files, metadata, booklists):
debug('add_books_to_metadata() called')
from calibre.devices.mtp.books import Book
i, total = 0, len(mtp_files)
self.report_progress(0, _('Adding books to device metadata listing...'))
for x, mi in zip(mtp_files, metadata):
mtp_file, bl_idx = x
bl = booklists[bl_idx]
book = Book(mtp_file.storage_id, '/'.join(mtp_file.mtp_relpath),
other=mi)
book = bl.add_book(book, replace_metadata=True)
if book is not None:
book.size = mtp_file.size
book.datetime = mtp_file.last_modified.timetuple()
book.path = mtp_file.mtp_id_path
i += 1
self.report_progress(i/total, _('Added %s')%mi.title)
self.report_progress(1, _('Adding complete'))
debug('add_books_to_metadata() ended')
# }}}
# Removing books from the device {{{
def recursive_delete(self, obj):
parent = self.delete_file_or_folder(obj)
if parent.empty and parent.can_delete and not parent.is_system:
try:
self.recursive_delete(parent)
except:
prints('Failed to delete parent: %s, ignoring'%(
'/'.join(parent.full_path)))
def delete_books(self, paths, end_session=True):
self.report_progress(0, _('Deleting books from device...'))
for i, path in enumerate(paths):
f = self.filesystem_cache.resolve_mtp_id_path(path)
fpath = f.mtp_relpath
storage = f.storage
self.recursive_delete(f)
if self.is_kindle:
self.delete_kindle_cover_thumbnail_for(storage, fpath)
self.report_progress((i+1) / float(len(paths)),
_('Deleted %s')%path)
self.report_progress(1, _('All books deleted'))
def remove_books_from_metadata(self, paths, booklists):
self.report_progress(0, _('Removing books from metadata'))
class NextPath(Exception):
pass
for i, path in enumerate(paths):
try:
for bl in booklists:
for book in bl:
if book.path == path:
bl.remove_book(book)
raise NextPath('')
except NextPath:
pass
self.report_progress((i+1)/len(paths), _('Removed %s')%path)
self.report_progress(1, _('All books removed'))
# }}}
# Settings {{{
def get_pref(self, key):
''' Get the setting named key. First looks for a device specific setting.
If that is not found looks for a device default and if that is not
found uses the global default.'''
dd = self.current_device_defaults if self.is_mtp_device_connected else {}
dev_settings = self.prefs.get('device-%s'%self.current_serial_num, {})
default_value = dd.get(key, self.prefs[key])
return dev_settings.get(key, default_value)
def config_widget(self):
from calibre.gui2.device_drivers.mtp_config import MTPConfig
return MTPConfig(self, highlight_ignored_folders=self.highlight_ignored_folders)
def save_settings(self, cw):
cw.commit()
def settings(self):
class Opts:
def __init__(s):
s.format_map = self.get_pref('format_map')
return Opts()
@property
def save_template(self):
return self.get_pref('send_template')
def get_user_blacklisted_devices(self):
bl = frozenset(self.prefs['blacklist'])
ans = {}
for dev, x in iteritems(self.prefs['history']):
name = x[0]
if dev in bl:
ans[dev] = name
return ans
def set_user_blacklisted_devices(self, devs):
self.prefs['blacklist'] = list(devs)
# }}}
def main():
import io
dev = MTP_DEVICE(None)
dev.startup()
try:
from calibre.devices.scanner import DeviceScanner
scanner = DeviceScanner()
scanner.scan()
devs = scanner.devices
cd = dev.detect_managed_devices(devs)
if cd is None:
raise ValueError('Failed to detect MTP device')
dev.set_progress_reporter(prints)
dev.open(cd, None)
dev.filesystem_cache.dump()
print('Prefix for main mem:', dev.prefix_for_location(None), flush=True)
raw = os.urandom(32 * 1024)
folder = dev.create_folder(dev.filesystem_cache.entries[0], 'developing-mtp-driver')
f = dev.put_file(folder, 'developing-mtp-driver.bin', io.BytesIO(raw), len(raw))
print('Put file:', f, flush=True)
buf = io.BytesIO()
dev.get_file(f.mtp_id_path, buf)
if buf.getvalue() != raw:
raise ValueError('Getting previously put file did not return expected data')
print('Successfully got previously put file', flush=True)
dev.recursive_delete(f)
finally:
dev.shutdown()
if __name__ == '__main__':
main()
| 28,623 | Python | .py | 630 | 34.047619 | 159 | 0.582802 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,765 | sysfs.py | kovidgoyal_calibre/src/calibre/devices/mtp/unix/sysfs.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import glob
import os
class MTPDetect:
SYSFS_PATH = os.environ.get('SYSFS_PATH', '/sys')
def __init__(self):
self.base = os.path.join(self.SYSFS_PATH, 'subsystem', 'usb', 'devices')
if not os.path.exists(self.base):
self.base = os.path.join(self.SYSFS_PATH, 'bus', 'usb', 'devices')
self.ok = os.path.exists(self.base)
def __call__(self, dev, debug=None):
'''
Check if the device has an interface named "MTP" using sysfs, which
avoids probing the device.
'''
if not self.ok:
return False
def read(x):
try:
with open(x, 'rb') as f:
return f.read()
except OSError:
pass
ipath = os.path.join(self.base, '{0}-*/{0}-*/interface'.format(dev.busnum))
for x in glob.glob(ipath):
raw = read(x)
if not raw or raw.strip() != b'MTP':
continue
raw = read(os.path.join(os.path.dirname(os.path.dirname(x)),
'devnum'))
try:
if raw and int(raw) == dev.devnum:
if debug is not None:
debug('Unknown device {} claims to be an MTP device'
.format(dev))
return True
except (ValueError, TypeError):
continue
return False
| 1,602 | Python | .py | 42 | 26.261905 | 83 | 0.510652 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,766 | __init__.py | kovidgoyal_calibre/src/calibre/devices/mtp/unix/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
libmtp based drivers for MTP devices on Unix like platforms.
'''
| 218 | Python | .py | 7 | 29.428571 | 60 | 0.703883 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,767 | driver.py | kovidgoyal_calibre/src/calibre/devices/mtp/unix/driver.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import operator
import pprint
import sys
import time
import traceback
from collections import namedtuple
from functools import partial
from threading import RLock
from calibre import as_unicode, force_unicode, prints
from calibre.constants import islinux, ismacos
from calibre.devices.errors import BlacklistedDevice, DeviceError, OpenActionNeeded, OpenFailed
from calibre.devices.mtp.base import MTPDeviceBase, debug, synchronous
from calibre.ptempfile import SpooledTemporaryFile
MTPDevice = namedtuple('MTPDevice', 'busnum devnum vendor_id product_id '
'bcd serial manufacturer product')
null = object()
def fingerprint(d):
return MTPDevice(d.busnum, d.devnum, d.vendor_id, d.product_id, d.bcd,
d.serial, d.manufacturer, d.product)
APPLE = 0x05ac
class MTP_DEVICE(MTPDeviceBase):
supported_platforms = ['freebsd', 'linux', 'osx']
def __init__(self, *args, **kwargs):
MTPDeviceBase.__init__(self, *args, **kwargs)
self.libmtp = None
self.known_devices = None
self.detect_cache = {}
self.dev = None
self._filesystem_cache = None
self.lock = RLock()
self.blacklisted_devices = set()
self.ejected_devices = set()
self.currently_connected_dev = None
self._is_device_mtp = None
if islinux:
from calibre.devices.mtp.unix.sysfs import MTPDetect
self._is_device_mtp = MTPDetect()
if ismacos and 'osx' in self.supported_platforms:
from calibre_extensions import usbobserver
self.usbobserver = usbobserver
self._is_device_mtp = self.osx_is_device_mtp
def is_device_mtp(self, d, debug=None):
''' Returns True iff the _is_device_mtp check returns True and libmtp
is able to probe the device successfully. '''
if self._is_device_mtp is None:
return False
return (self._is_device_mtp(d, debug=debug) and
self.libmtp.is_mtp_device(d.busnum, d.devnum))
def osx_is_device_mtp(self, d, debug=None):
if not d.serial:
ans = False
else:
try:
ans = self.usbobserver.is_mtp_device(d.vendor_id, d.product_id, d.bcd, d.serial)
except Exception:
if debug is not None:
import traceback
traceback.print_stack()
return False
if debug is not None and ans:
debug(f'Device {d} claims to be an MTP device in the IOKit registry')
return bool(ans)
def set_debug_level(self, lvl):
self.libmtp.set_debug_level(lvl)
@synchronous
def detect_managed_devices(self, devices_on_system, force_refresh=False):
if self.libmtp is None:
return None
# First remove blacklisted devices.
devs = set()
for d in devices_on_system:
fp = fingerprint(d)
if fp not in self.blacklisted_devices and fp.vendor_id != APPLE:
# Do not try to open Apple devices
devs.add(fp)
# Clean up ejected devices
self.ejected_devices = devs.intersection(self.ejected_devices)
# Check if the currently connected device is still present
if self.currently_connected_dev is not None:
return (self.currently_connected_dev if
self.currently_connected_dev in devs else None)
# Remove ejected devices
devs = devs - self.ejected_devices
# Now check for MTP devices
if force_refresh:
self.detect_cache = {}
cache = self.detect_cache
for d in devs:
ans = cache.get(d, None)
if ans is None:
ans = (
(d.vendor_id, d.product_id) in self.known_devices or
self.is_device_mtp(d))
cache[d] = ans
if ans:
return d
return None
@synchronous
def debug_managed_device_detection(self, devices_on_system, output):
if self.currently_connected_dev is not None:
return True
p = partial(prints, file=output)
if self.libmtp is None:
err = 'startup() not called on this device driver'
p(err)
return False
devs = [d for d in devices_on_system if
((d.vendor_id, d.product_id) in self.known_devices or
self.is_device_mtp(d, debug=p)) and d.vendor_id != APPLE]
if not devs:
p('No MTP devices connected to system')
return False
p('MTP devices connected:')
for d in devs:
p(d)
for d in devs:
p('\nTrying to open:', d)
try:
self.open(d, 'debug')
except BlacklistedDevice:
p('This device has been blacklisted by the user')
continue
except:
p('Opening device failed:')
p(traceback.format_exc())
return False
else:
p('Opened', self.current_friendly_name, 'successfully')
p('Storage info:')
p(pprint.pformat(self.dev.storage_info))
self.post_yank_cleanup()
return True
return False
@synchronous
def create_device(self, connected_device):
d = connected_device
man, prod = d.manufacturer, d.product
man = force_unicode(man, 'utf-8') if isinstance(man, bytes) else man
prod = force_unicode(prod, 'utf-8') if isinstance(prod, bytes) else prod
return self.libmtp.Device(d.busnum, d.devnum, d.vendor_id,
d.product_id, man, prod, d.serial)
@synchronous
def eject(self):
if self.currently_connected_dev is None:
return
self.ejected_devices.add(self.currently_connected_dev)
self.post_yank_cleanup()
@synchronous
def post_yank_cleanup(self):
self.dev = self._filesystem_cache = self.current_friendly_name = None
self.currently_connected_dev = None
self.current_serial_num = None
@property
def is_mtp_device_connected(self):
return self.currently_connected_dev is not None
@synchronous
def startup(self):
try:
from calibre_extensions import libmtp
except Exception as err:
print('Failed to load libmtp, MTP device detection disabled')
print(err)
self.libmtp = None
else:
self.libmtp = libmtp
self.known_devices = frozenset(self.libmtp.known_devices())
for x in vars(self.libmtp):
if x.startswith('LIBMTP'):
setattr(self, x, getattr(self.libmtp, x))
@synchronous
def shutdown(self):
self.dev = self._filesystem_cache = None
def format_errorstack(self, errs):
return '\n'.join('%d:%s'%(code, as_unicode(msg)) for code, msg in errs)
@synchronous
def open(self, connected_device, library_uuid):
self.dev = self._filesystem_cache = None
try:
self.dev = self.create_device(connected_device)
except Exception as e:
self.blacklisted_devices.add(connected_device)
raise OpenFailed('Failed to open %s: Error: %s'%(
connected_device, as_unicode(e)))
try:
storage = sorted(self.dev.storage_info, key=operator.itemgetter('id'))
except self.libmtp.MTPError as e:
if "The device has no storage information." in str(e):
# This happens on newer Android devices while waiting for
# the user to allow access. Apparently what happens is
# that when the user clicks allow, the device disconnects
# and re-connects as a new device.
name = self.dev.friendly_name or ''
if not name:
if connected_device.manufacturer:
name = connected_device.manufacturer
if connected_device.product:
name = name and (name + ' ')
name += connected_device.product
name = name or _('Unnamed device')
raise OpenActionNeeded(name, _(
'The device {0} is not allowing connections.'
' Unlock the screen on the {0}, tap "Allow" on any connection popup message you see,'
' then either wait a minute or restart calibre. You might'
' also have to change the mode of the USB connection on the {0}'
' to "Media Transfer mode (MTP)" or similar.'
).format(name), (name, self.dev.serial_number))
raise
storage = [x for x in storage if x.get('rw', False)]
if not storage:
self.blacklisted_devices.add(connected_device)
raise OpenFailed('No storage found for device %s'%(connected_device,))
snum = self.dev.serial_number
if snum in self.prefs.get('blacklist', []):
self.blacklisted_devices.add(connected_device)
self.dev = None
raise BlacklistedDevice(
'The %s device has been blacklisted by the user'%(connected_device,))
self._main_id = storage[0]['id']
self._carda_id = self._cardb_id = None
if len(storage) > 1:
self._carda_id = storage[1]['id']
if len(storage) > 2:
self._cardb_id = storage[2]['id']
self.current_friendly_name = self.dev.friendly_name
if not self.current_friendly_name:
self.current_friendly_name = self.dev.model_name or _('Unknown MTP device')
self.current_serial_num = snum
self.currently_connected_dev = connected_device
@synchronous
def device_debug_info(self):
ans = self.get_gui_name()
ans += '\nSerial number: %s'%self.current_serial_num
ans += '\nManufacturer: %s'%self.dev.manufacturer_name
ans += '\nModel: %s'%self.dev.model_name
ans += '\nids: %s'%(self.dev.ids,)
ans += '\nDevice version: %s'%self.dev.device_version
ans += '\nStorage:\n'
storage = sorted(self.dev.storage_info, key=operator.itemgetter('id'))
ans += pprint.pformat(storage)
return ans
def _filesystem_callback(self, fs_map, entry, level):
name = entry.get('name', '')
self.filesystem_callback(_('Found object: %s')%name)
fs_map[entry.get('id', null)] = entry
path = [name]
pid = entry.get('parent_id', 0)
while pid != 0 and pid in fs_map:
parent = fs_map[pid]
path.append(parent.get('name', ''))
pid = parent.get('parent_id', 0)
if fs_map.get(pid, None) is parent:
break # An object is its own parent
path = tuple(reversed(path))
ok = not self.is_folder_ignored(self._currently_getting_sid, path)
if not ok:
debug('Ignored object: %s' % '/'.join(path))
return ok
@property
def filesystem_cache(self):
if self._filesystem_cache is None:
st = time.time()
debug('Loading filesystem metadata...')
from calibre.devices.mtp.filesystem_cache import FilesystemCache
with self.lock:
storage, all_items, all_errs = [], [], []
for sid, capacity in zip([self._main_id, self._carda_id,
self._cardb_id], self.total_space()):
if sid is None:
continue
name = _('Unknown')
for x in self.dev.storage_info:
if x['id'] == sid:
name = x['name']
break
storage.append({'id':sid, 'size':capacity,
'is_folder':True, 'name':name, 'can_delete':False,
'is_system':True})
self._currently_getting_sid = str(sid)
items, errs = self.dev.get_filesystem(sid,
partial(self._filesystem_callback, {}))
all_items.extend(items), all_errs.extend(errs)
if not all_items and all_errs:
raise DeviceError(
'Failed to read filesystem from %s with errors: %s'
%(self.current_friendly_name,
self.format_errorstack(all_errs)))
if all_errs:
prints('There were some errors while getting the '
' filesystem from %s: %s'%(
self.current_friendly_name,
self.format_errorstack(all_errs)))
self._filesystem_cache = FilesystemCache(storage, all_items)
debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
time.time()-st, len(self._filesystem_cache)))
return self._filesystem_cache
@synchronous
def get_basic_device_information(self):
d = self.dev
return (self.current_friendly_name, d.device_version, d.device_version, '')
@synchronous
def total_space(self, end_session=True):
ans = [0, 0, 0]
for s in self.dev.storage_info:
i = {self._main_id:0, self._carda_id:1,
self._cardb_id:2}.get(s['id'], None)
if i is not None:
ans[i] = s['capacity']
return tuple(ans)
@synchronous
def free_space(self, end_session=True):
self.dev.update_storage_info()
ans = [0, 0, 0]
for s in self.dev.storage_info:
i = {self._main_id:0, self._carda_id:1,
self._cardb_id:2}.get(s['id'], None)
if i is not None:
ans[i] = s['freespace_bytes']
return tuple(ans)
@synchronous
def create_folder(self, parent, name):
if not parent.is_folder:
raise ValueError('%s is not a folder'%(parent.full_path,))
e = parent.folder_named(name)
if e is not None:
return e
sid, pid = parent.storage_id, parent.object_id
if pid == sid:
pid = 0
ans, errs = self.dev.create_folder(sid, pid, name)
if ans is None:
raise DeviceError(
'Failed to create folder named %s in %s with error: %s'%
(name, parent.full_path, self.format_errorstack(errs)))
return parent.add_child(ans)
@synchronous
def put_file(self, parent, name, stream, size, callback=None, replace=True):
e = parent.folder_named(name)
if e is not None:
raise ValueError('Cannot upload file, %s already has a folder named: %s'%(
parent.full_path, e.name))
e = parent.file_named(name)
if e is not None:
if not replace:
raise ValueError('Cannot upload file %s, it already exists'%(
e.full_path,))
self.delete_file_or_folder(e)
sid, pid = parent.storage_id, parent.object_id
if pid == sid:
pid = 0xFFFFFFFF
ans, errs = self.dev.put_file(sid, pid, name, stream, size, callback)
if ans is None:
raise DeviceError('Failed to upload file named: %s to %s: %s'
%(name, parent.full_path, self.format_errorstack(errs)))
return parent.add_child(ans)
@synchronous
def get_mtp_file(self, f, stream=None, callback=None):
if f.is_folder:
raise ValueError('%s if a folder'%(f.full_path,))
set_name = stream is None
if stream is None:
stream = SpooledTemporaryFile(5*1024*1024, '_wpd_receive_file.dat')
ok, errs = self.dev.get_file(f.object_id, stream, callback)
if not ok:
raise DeviceError('Failed to get file: %s with errors: %s'%(
f.full_path, self.format_errorstack(errs)))
stream.seek(0)
if set_name:
stream.name = f.name
return stream
@synchronous
def delete_file_or_folder(self, obj):
if obj.deleted:
return
if not obj.can_delete:
raise ValueError('Cannot delete %s as deletion not allowed'%
(obj.full_path,))
if obj.is_system:
raise ValueError('Cannot delete %s as it is a system object'%
(obj.full_path,))
if obj.files or obj.folders:
raise ValueError('Cannot delete %s as it is not empty'%
(obj.full_path,))
parent = obj.parent
ok, errs = self.dev.delete_object(obj.object_id)
if not ok:
raise DeviceError('Failed to delete %s with error: %s'%
(obj.full_path, self.format_errorstack(errs)))
parent.remove_child(obj)
return parent
def develop():
from calibre.devices.scanner import DeviceScanner
scanner = DeviceScanner()
scanner.scan()
dev = MTP_DEVICE(None)
dev.startup()
try:
cd = dev.detect_managed_devices(scanner.devices)
if cd is None:
raise RuntimeError('No MTP device found')
dev.open(cd, 'develop')
pprint.pprint(dev.dev.storage_info)
dev.filesystem_cache
finally:
dev.shutdown()
if __name__ == '__main__':
dev = MTP_DEVICE(None)
dev.startup()
from calibre.devices.scanner import DeviceScanner
scanner = DeviceScanner()
scanner.scan()
devs = scanner.devices
dev.debug_managed_device_detection(devs, sys.stdout)
dev.set_debug_level(dev.LIBMTP_DEBUG_ALL)
dev.shutdown()
| 17,984 | Python | .py | 418 | 31.538278 | 105 | 0.574626 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,768 | update.py | kovidgoyal_calibre/src/calibre/devices/mtp/unix/upstream/update.py | #!/usr/bin/env python3
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import shutil
import subprocess
base = os.path.dirname(os.path.abspath(__file__))
os.chdir('/tmp')
if os.path.exists('libmtp'):
shutil.rmtree('libmtp')
subprocess.check_call(['git', 'clone', '--depth=1', 'git://git.code.sf.net/p/libmtp/code',
'libmtp'])
for x in ('src/music-players.h', 'src/device-flags.h'):
shutil.copyfile('libmtp/'+x, os.path.join(base, os.path.basename(x)))
| 574 | Python | .py | 15 | 34.866667 | 90 | 0.66426 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,769 | __init__.py | kovidgoyal_calibre/src/calibre/devices/mtp/windows/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 152 | Python | .py | 4 | 35.75 | 61 | 0.678322 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,770 | driver.py | kovidgoyal_calibre/src/calibre/devices/mtp/windows/driver.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import threading
import time
import traceback
from functools import partial, wraps
from itertools import chain
from calibre import as_unicode, force_unicode, prints
from calibre.constants import __appname__, isxp, numeric_version
from calibre.devices.errors import BlacklistedDevice, DeviceError, OpenFailed
from calibre.devices.mtp.base import MTPDeviceBase, debug
from calibre.ptempfile import SpooledTemporaryFile
from polyglot.builtins import iteritems, itervalues
null = object()
class ThreadingViolation(Exception):
def __init__(self):
Exception.__init__(self,
'You cannot use the MTP driver from a thread other than the '
' thread in which startup() was called')
def same_thread(func):
@wraps(func)
def check_thread(self, *args, **kwargs):
if self.start_thread is not threading.current_thread():
raise ThreadingViolation()
return func(self, *args, **kwargs)
return check_thread
class MTP_DEVICE(MTPDeviceBase):
supported_platforms = ['windows']
def __init__(self, *args, **kwargs):
MTPDeviceBase.__init__(self, *args, **kwargs)
self.dev = None
self.blacklisted_devices = set()
self.ejected_devices = set()
self.currently_connected_pnp_id = None
self.detected_devices = {}
self.previous_devices_on_system = frozenset()
self.last_refresh_devices_time = time.time()
self.wpd = self.wpd_error = None
self._main_id = self._carda_id = self._cardb_id = None
self.start_thread = None
self._filesystem_cache = None
self.eject_dev_on_next_scan = False
self.current_device_data = {}
def startup(self):
self.start_thread = threading.current_thread()
if isxp:
self.wpd = None
self.wpd_error = _('MTP devices are not supported on Windows XP')
else:
try:
from calibre_extensions import wpd
self.wpd = wpd
except Exception as err:
self.wpd = None
self.wpd_error = as_unicode(err)
if self.wpd is not None:
try:
self.wpd.init(__appname__, *(numeric_version[:3]))
except self.wpd.NoWPD:
self.wpd_error = _(
'The Windows Portable Devices service is not available'
' on your computer. You may need to install Windows'
' Media Player 11 or newer and/or restart your computer')
except Exception as e:
self.wpd_error = as_unicode(e)
@same_thread
def shutdown(self):
self.dev = self._filesystem_cache = self.start_thread = None
if self.wpd is not None:
self.wpd.uninit()
@same_thread
def detect_managed_devices(self, devices_on_system, force_refresh=False):
if self.wpd is None:
return None
if self.eject_dev_on_next_scan:
self.eject_dev_on_next_scan = False
if self.currently_connected_pnp_id is not None:
self.do_eject()
devices_on_system = frozenset(devices_on_system)
if (force_refresh or
devices_on_system != self.previous_devices_on_system or
time.time() - self.last_refresh_devices_time > 10):
self.previous_devices_on_system = devices_on_system
self.last_refresh_devices_time = time.time()
try:
pnp_ids = frozenset(self.wpd.enumerate_devices())
except:
return None
self.detected_devices = {dev:self.detected_devices.get(dev, None)
for dev in pnp_ids}
# Get device data for detected devices. If there is an error, we will
# try again for that device the next time this method is called.
for dev in tuple(self.detected_devices):
data = self.detected_devices.get(dev, None)
if data is None or data is False:
try:
data = self.wpd.device_info(dev)
except Exception as e:
prints('Failed to get device info for device:', dev,
as_unicode(e))
data = {} if data is False else False
self.detected_devices[dev] = data
# Remove devices that have been disconnected from ejected
# devices and blacklisted devices
self.ejected_devices = set(self.detected_devices).intersection(
self.ejected_devices)
self.blacklisted_devices = set(self.detected_devices).intersection(
self.blacklisted_devices)
if self.currently_connected_pnp_id is not None:
return (self.currently_connected_pnp_id if
self.currently_connected_pnp_id in self.detected_devices
else None)
for dev, data in iteritems(self.detected_devices):
if dev in self.blacklisted_devices or dev in self.ejected_devices:
# Ignore blacklisted and ejected devices
continue
if data and self.is_suitable_wpd_device(data):
return dev
return None
@same_thread
def debug_managed_device_detection(self, devices_on_system, output):
import pprint
p = partial(prints, file=output)
if self.currently_connected_pnp_id is not None:
return True
if self.wpd_error:
p('Cannot detect MTP devices')
p(force_unicode(self.wpd_error))
return False
try:
pnp_ids = frozenset(self.wpd.enumerate_devices())
except:
p("Failed to get list of PNP ids on system")
p(traceback.format_exc())
return False
if not pnp_ids:
p('The Windows WPD service says there are no portable devices connected')
return False
p('List of WPD PNP ids:')
p(pprint.pformat(list(pnp_ids)))
for pnp_id in pnp_ids:
try:
data = self.wpd.device_info(pnp_id)
except:
p('Failed to get data for device:', pnp_id)
p(traceback.format_exc())
continue
protocol = data.get('protocol', '').lower()
if not protocol.startswith('mtp:'):
continue
p('MTP device:', pnp_id)
p(pprint.pformat(data))
if not self.is_suitable_wpd_device(data):
p('Not a suitable MTP device, ignoring\n')
continue
p('\nTrying to open:', pnp_id)
try:
self.open(pnp_id, 'debug-detection')
except BlacklistedDevice:
p('This device has been blacklisted by the user')
continue
except:
p('Open failed:')
p(traceback.format_exc())
continue
break
if self.currently_connected_pnp_id:
p('Opened', self.current_friendly_name, 'successfully')
p('Device info:')
p(pprint.pformat(self.dev.data))
self.post_yank_cleanup()
return True
p('No suitable MTP devices found')
return False
def is_suitable_wpd_device(self, devdata):
# Check that protocol is MTP
protocol = devdata.get('protocol', '').lower()
if not protocol.startswith('mtp:'):
return False
# Check that the device has some read-write storage
if not devdata.get('has_storage', False):
return False
has_rw_storage = False
for s in devdata.get('storage', []):
if s.get('filesystem', None) == 'DCF':
# DCF filesystem indicates a camera or an iPhone
# See https://bugs.launchpad.net/calibre/+bug/1054562
continue
if s.get('type', 'unknown_unknown').split('_')[-1] == 'rom':
continue # Read only storage
if s.get('rw', False):
has_rw_storage = True
break
if not has_rw_storage:
return False
return True
def _filesystem_callback(self, fs_map, obj, level):
name = obj.get('name', '')
self.filesystem_callback(_('Found object: %s')%name)
if not obj.get('is_folder', False):
return False
fs_map[obj.get('id', null)] = obj
path = [name]
pid = obj.get('parent_id', 0)
while pid != 0 and pid in fs_map:
parent = fs_map[pid]
path.append(parent.get('name', ''))
pid = parent.get('parent_id', 0)
if fs_map.get(pid, None) is parent:
break # An object is its own parent
path = tuple(reversed(path))
ok = not self.is_folder_ignored(self._currently_getting_sid, path)
if not ok:
debug('Ignored object: %s' % '/'.join(path))
return ok
@property
def filesystem_cache(self):
if self._filesystem_cache is None:
debug('Loading filesystem metadata...')
st = time.time()
from calibre.devices.mtp.filesystem_cache import FilesystemCache
ts = self.total_space()
all_storage = []
items = []
for storage_id, capacity in zip([self._main_id, self._carda_id,
self._cardb_id], ts):
if storage_id is None:
continue
name = _('Unknown')
for s in self.dev.data['storage']:
if s['id'] == storage_id:
name = s['name']
break
storage = {'id':storage_id, 'size':capacity, 'name':name,
'is_folder':True, 'can_delete':False, 'is_system':True}
self._currently_getting_sid = str(storage_id)
id_map = self.dev.get_filesystem(storage_id, partial(
self._filesystem_callback, {}))
for x in itervalues(id_map):
x['storage_id'] = storage_id
all_storage.append(storage)
items.append(itervalues(id_map))
self._filesystem_cache = FilesystemCache(all_storage, chain(*items))
debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
time.time()-st, len(self._filesystem_cache)))
return self._filesystem_cache
@same_thread
def do_eject(self):
if self.currently_connected_pnp_id is None:
return
self.ejected_devices.add(self.currently_connected_pnp_id)
self.currently_connected_pnp_id = self.current_friendly_name = None
self._main_id = self._carda_id = self._cardb_id = None
self.dev = self._filesystem_cache = None
@same_thread
def post_yank_cleanup(self):
self.currently_connected_pnp_id = self.current_friendly_name = None
self._main_id = self._carda_id = self._cardb_id = None
self.dev = self._filesystem_cache = None
self.current_serial_num = None
@property
def is_mtp_device_connected(self):
return self.currently_connected_pnp_id is not None
def eject(self):
if self.currently_connected_pnp_id is None:
return
self.eject_dev_on_next_scan = True
self.current_serial_num = None
@same_thread
def open(self, connected_device, library_uuid):
self.dev = self._filesystem_cache = None
try:
self.dev = self.wpd.Device(connected_device)
except self.wpd.WPDError:
time.sleep(2)
try:
self.dev = self.wpd.Device(connected_device)
except self.wpd.WPDError as e:
self.blacklisted_devices.add(connected_device)
raise OpenFailed('Failed to open %s with error: %s'%(
connected_device, as_unicode(e)))
devdata = self.dev.data
storage = [s for s in devdata.get('storage', []) if s.get('rw', False)]
if not storage:
self.blacklisted_devices.add(connected_device)
raise OpenFailed('No storage found for device %s'%(connected_device,))
snum = devdata.get('serial_number', None)
if snum in self.prefs.get('blacklist', []):
self.blacklisted_devices.add(connected_device)
self.dev = None
raise BlacklistedDevice(
'The %s device has been blacklisted by the user'%(connected_device,))
storage.sort(key=lambda x:x.get('id', 'zzzzz'))
self._main_id = storage[0]['id']
if len(storage) > 1:
self._carda_id = storage[1]['id']
if len(storage) > 2:
self._cardb_id = storage[2]['id']
self.current_friendly_name = devdata.get('friendly_name', '')
if not self.current_friendly_name:
self.current_friendly_name = devdata.get('model_name',
_('Unknown MTP device'))
self.currently_connected_pnp_id = connected_device
self.current_serial_num = snum
self.current_device_data = devdata.copy()
def device_debug_info(self):
import pprint
return pprint.pformat(self.current_device_data)
@same_thread
def get_basic_device_information(self):
d = self.dev.data
dv = d.get('device_version', '')
return (self.current_friendly_name, dv, dv, '')
@same_thread
def total_space(self, end_session=True):
ans = [0, 0, 0]
dd = self.dev.data
for s in dd.get('storage', []):
i = {self._main_id:0, self._carda_id:1,
self._cardb_id:2}.get(s.get('id', -1), None)
if i is not None:
ans[i] = s['capacity']
return tuple(ans)
@same_thread
def free_space(self, end_session=True):
self.dev.update_data()
ans = [0, 0, 0]
dd = self.dev.data
for s in dd.get('storage', []):
i = {self._main_id:0, self._carda_id:1,
self._cardb_id:2}.get(s.get('id', -1), None)
if i is not None:
ans[i] = s['free_space']
return tuple(ans)
@same_thread
def get_mtp_file(self, f, stream=None, callback=None):
if f.is_folder:
raise ValueError('%s if a folder'%(f.full_path,))
set_name = stream is None
if stream is None:
stream = SpooledTemporaryFile(5*1024*1024, '_wpd_receive_file.dat')
try:
try:
self.dev.get_file(f.object_id, stream, callback)
except self.wpd.WPDFileBusy:
time.sleep(2)
self.dev.get_file(f.object_id, stream, callback)
except Exception as e:
raise DeviceError('Failed to fetch the file %s with error: %s'%
(f.full_path, as_unicode(e)))
stream.seek(0)
if set_name:
stream.name = f.name
return stream
@same_thread
def create_folder(self, parent, name):
if not parent.is_folder:
raise ValueError('%s is not a folder'%(parent.full_path,))
e = parent.folder_named(name)
if e is not None:
return e
try:
ans = self.dev.create_folder(parent.object_id, name)
except Exception as err:
raise OSError(f'Failed to create the folder: {name} in {parent.full_path} with error: {err}') from err
ans['storage_id'] = parent.storage_id
return parent.add_child(ans)
@same_thread
def delete_file_or_folder(self, obj):
if obj.deleted:
return
if not obj.can_delete:
raise ValueError('Cannot delete %s as deletion not allowed'%
(obj.full_path,))
if obj.is_system:
raise ValueError('Cannot delete %s as it is a system object'%
(obj.full_path,))
if obj.files or obj.folders:
raise ValueError('Cannot delete %s as it is not empty'%
(obj.full_path,))
parent = obj.parent
self.dev.delete_object(obj.object_id)
parent.remove_child(obj)
return parent
@same_thread
def put_file(self, parent, name, stream, size, callback=None, replace=True):
e = parent.folder_named(name)
if e is not None:
raise ValueError('Cannot upload file, %s already has a folder named: %s'%(
parent.full_path, e.name))
e = parent.file_named(name)
if e is not None:
if not replace:
raise ValueError('Cannot upload file %s, it already exists'%(
e.full_path,))
self.delete_file_or_folder(e)
sid, pid = parent.storage_id, parent.object_id
ans = self.dev.put_file(pid, name, stream, size, callback)
ans['storage_id'] = sid
return parent.add_child(ans)
| 17,202 | Python | .py | 402 | 31.310945 | 114 | 0.572503 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,771 | apnx.py | kovidgoyal_calibre/src/calibre/devices/kindle/apnx.py | __license__ = 'GPL v3'
__copyright__ = '2011, John Schember <john at nachtimwald.com>, refactored: 2022, Vaso Peras-Likodric <vaso at vipl.in.rs>'
__docformat__ = 'restructuredtext en'
from typing import Dict, Optional
'''
Generates and writes an APNX page mapping file.
'''
import struct
from calibre import fsync, prints
from calibre.constants import DEBUG
from calibre.devices.kindle.apnx_page_generator.generators.accurate_page_generator import AccuratePageGenerator
from calibre.devices.kindle.apnx_page_generator.generators.exact_page_generator import ExactPageGenerator
from calibre.devices.kindle.apnx_page_generator.generators.fast_page_generator import FastPageGenerator
from calibre.devices.kindle.apnx_page_generator.generators.pagebreak_page_generator import PagebreakPageGenerator
from calibre.devices.kindle.apnx_page_generator.i_page_generator import IPageGenerator
from calibre.devices.kindle.apnx_page_generator.pages import Pages
from calibre.ebooks.mobi.reader.headers import MetadataHeader
from calibre.ebooks.pdb.header import PdbHeaderReader
from calibre.utils.logging import default_log
from polyglot.builtins import as_bytes, as_unicode
class APNXBuilder:
"""
Create an APNX file using a pseudo page mapping.
"""
generators: Dict[str, IPageGenerator] = {
FastPageGenerator.instance.name(): FastPageGenerator.instance,
AccuratePageGenerator.instance.name(): AccuratePageGenerator.instance,
PagebreakPageGenerator.instance.name(): PagebreakPageGenerator.instance,
# ExactPageGenerator.instance.name(): ExactPageGenerator.instance,
}
def write_apnx(self, mobi_file_path: str, apnx_path: str, method: Optional[str] = None, page_count: int = 0):
"""
If you want a fixed number of pages (such as from a custom column) then
pass in a value to page_count, otherwise a count will be estimated
using either the fast or accurate algorithm.
"""
apnx_meta = self.get_apnx_meta(mobi_file_path)
if page_count:
generator: IPageGenerator = ExactPageGenerator.instance
else:
generator: IPageGenerator = self.generators.setdefault(method, FastPageGenerator.instance)
pages = generator.generate(mobi_file_path, page_count)
if pages.number_of_pages == 0:
raise Exception(_('Could not generate page mapping.'))
# Generate the APNX file from the page mapping.
apnx = self.generate_apnx(pages, apnx_meta)
# Write the APNX.
with open(apnx_path, 'wb') as apnxf:
apnxf.write(apnx)
fsync(apnxf)
@staticmethod
def get_apnx_meta(mobi_file_path) -> Dict[str, str]:
import uuid
apnx_meta = {
'guid': str(uuid.uuid4()).replace('-', '')[:8],
'asin': '',
'cdetype': 'EBOK',
'format': 'MOBI_7',
'acr': ''
}
with open(mobi_file_path, 'rb') as mf:
ident = PdbHeaderReader(mf).identity()
if as_bytes(ident) != b'BOOKMOBI':
# Check that this is really a MOBI file.
raise Exception(_('Not a valid MOBI file. Reports identity of %s') % ident)
apnx_meta['acr'] = as_unicode(PdbHeaderReader(mf).name(), errors='replace')
# We'll need the PDB name, the MOBI version, and some metadata to make FW 3.4 happy with KF8 files...
with open(mobi_file_path, 'rb') as mf:
mh = MetadataHeader(mf, default_log)
if mh.mobi_version == 8:
apnx_meta['format'] = 'MOBI_8'
else:
apnx_meta['format'] = 'MOBI_7'
if mh.exth is None or not mh.exth.cdetype:
apnx_meta['cdetype'] = 'EBOK'
else:
apnx_meta['cdetype'] = str(mh.exth.cdetype)
if mh.exth is None or not mh.exth.uuid:
apnx_meta['asin'] = ''
else:
apnx_meta['asin'] = str(mh.exth.uuid)
return apnx_meta
@staticmethod
def generate_apnx(pages: Pages, apnx_meta) -> bytes:
apnx = b''
if DEBUG:
prints('APNX META: guid:', apnx_meta['guid'])
prints('APNX META: ASIN:', apnx_meta['asin'])
prints('APNX META: CDE:', apnx_meta['cdetype'])
prints('APNX META: format:', apnx_meta['format'])
prints('APNX META: Name:', apnx_meta['acr'])
# Updated header if we have a KF8 file...
if apnx_meta['format'] == 'MOBI_8':
content_header = '{"contentGuid":"%(guid)s","asin":"%(asin)s","cdeType":"%(cdetype)s","format":"%(format)s","fileRevisionId":"1","acr":"%(acr)s"}' % apnx_meta # noqa
else:
# My 5.1.x Touch & 3.4 K3 seem to handle the 'extended' header fine for
# legacy mobi files, too. But, since they still handle this one too, let's
# try not to break old devices, and keep using the simple header ;).
content_header = '{"contentGuid":"%(guid)s","asin":"%(asin)s","cdeType":"%(cdetype)s","fileRevisionId":"1"}' % apnx_meta
page_header = '{"asin":"%(asin)s","pageMap":"' % apnx_meta
page_header += pages.page_maps + '"}'
if DEBUG:
prints('APNX Content Header:', content_header)
content_header = as_bytes(content_header)
page_header = as_bytes(page_header)
apnx += struct.pack('>I', 65537)
apnx += struct.pack('>I', 12 + len(content_header))
apnx += struct.pack('>I', len(content_header))
apnx += content_header
apnx += struct.pack('>H', 1)
apnx += struct.pack('>H', len(page_header))
apnx += struct.pack('>H', pages.number_of_pages)
apnx += struct.pack('>H', 32)
apnx += page_header
# Write page values to APNX.
for location in pages.page_locations:
apnx += struct.pack('>I', location)
return apnx
| 5,951 | Python | .py | 118 | 41.415254 | 178 | 0.630954 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,772 | bookmark.py | kovidgoyal_calibre/src/calibre/devices/kindle/bookmark.py | __license__ = 'GPL v3'
__docformat__ = 'restructuredtext en'
import io
import os
from struct import unpack
class Bookmark(): # {{{
'''
A simple class fetching bookmark data
Kindle-specific
'''
def __init__(self, path, id, book_format, bookmark_extension):
self.book_format = book_format
self.bookmark_extension = bookmark_extension
self.book_length = 0
self.id = id
self.last_read = 0
self.last_read_location = 0
self.path = path
self.timestamp = 0
self.user_notes = None
self.get_bookmark_data()
self.get_book_length()
try:
self.percent_read = min(float(100*self.last_read / self.book_length),100)
except:
self.percent_read = 0
def record(self, n):
from calibre.ebooks.metadata.mobi import StreamSlicer
if n >= self.nrecs:
raise ValueError('non-existent record %r' % n)
offoff = 78 + (8 * n)
start, = unpack('>I', self.data[offoff + 0:offoff + 4])
stop = None
if n < (self.nrecs - 1):
stop, = unpack('>I', self.data[offoff + 8:offoff + 12])
return StreamSlicer(self.stream, start, stop)
def get_bookmark_data(self):
''' Return the timestamp and last_read_location '''
from calibre.ebooks.metadata.mobi import StreamSlicer
user_notes = {}
if self.bookmark_extension == 'mbp':
MAGIC_MOBI_CONSTANT = 150
with open(self.path,'rb') as f:
stream = io.BytesIO(f.read())
data = StreamSlicer(stream)
self.timestamp, = unpack('>I', data[0x24:0x28])
bpar_offset, = unpack('>I', data[0x4e:0x52])
lrlo = bpar_offset + 0x0c
self.last_read = int(unpack('>I', data[lrlo:lrlo+4])[0])
self.last_read_location = self.last_read // MAGIC_MOBI_CONSTANT + 1
entries, = unpack('>I', data[0x4a:0x4e])
# Store the annotations/locations
bpl = bpar_offset + 4
bpar_len, = unpack('>I', data[bpl:bpl+4])
bpar_len += 8
# print "bpar_len: 0x%x" % bpar_len
eo = bpar_offset + bpar_len
# Walk bookmark entries
# print " --- %s --- " % self.path
current_entry = 1
sig = data[eo:eo+4]
previous_block = None
while sig == b'DATA':
text = None
entry_type = None
rec_len, = unpack('>I', data[eo+4:eo+8])
if rec_len == 0:
current_block = "empty_data"
elif data[eo+8:eo+12] == b"EBAR":
current_block = "data_header"
# entry_type = "data_header"
location, = unpack('>I', data[eo+0x34:eo+0x38])
# print "data_header location: %d" % location
else:
current_block = "text_block"
if previous_block == 'empty_data':
entry_type = 'Note'
elif previous_block == 'data_header':
entry_type = 'Highlight'
text = data[eo+8:eo+8+rec_len].decode('utf-16-be')
if entry_type:
displayed_location = location // MAGIC_MOBI_CONSTANT + 1
user_notes[location] = dict(id=self.id,
displayed_location=displayed_location,
type=entry_type,
text=text)
eo += rec_len + 8
current_entry += 1
previous_block = current_block
sig = data[eo:eo+4]
while sig == b'BKMK':
# Fix start location for Highlights using BKMK data
end_loc, = unpack('>I', data[eo+0x10:eo+0x14])
if end_loc in user_notes and \
(user_notes[end_loc]['type'] == 'Highlight' or
user_notes[end_loc]['type'] == 'Note'):
# Switch location to start (0x08:0x0c)
start, = unpack('>I', data[eo+8:eo+12])
user_notes[start] = user_notes[end_loc]
'''
print " %s: swapping 0x%x (%d) to 0x%x (%d)" % (user_notes[end_loc]['type'],
end_loc,
end_loc/MAGIC_MOBI_CONSTANT + 1,
start,
start//MAGIC_MOBI_CONSTANT + 1)
'''
user_notes[start]['displayed_location'] = start // MAGIC_MOBI_CONSTANT + 1
user_notes.pop(end_loc)
else:
# If a bookmark coincides with a user annotation, the locs could
# be the same - cheat by nudging -1
# Skip bookmark for last_read_location
if end_loc != self.last_read:
# print " adding Bookmark at 0x%x (%d)" % (end_loc, end_loc/MAGIC_MOBI_CONSTANT + 1)
displayed_location = end_loc // MAGIC_MOBI_CONSTANT + 1
user_notes[end_loc - 1] = dict(id=self.id,
displayed_location=displayed_location,
type='Bookmark',
text=None)
rec_len, = unpack('>I', data[eo+4:eo+8])
eo += rec_len + 8
sig = data[eo:eo+4]
elif self.bookmark_extension == 'tan':
from calibre.ebooks.metadata.topaz import get_metadata as get_topaz_metadata
def get_topaz_highlight(displayed_location):
# Parse My Clippings.txt for a matching highlight
# Search looks for book title match, highlight match, and location match
# Author is not matched
# This will find the first instance of a clipping only
book_fs = self.path.replace('.%s' % self.bookmark_extension,'.%s' % self.book_format)
with open(book_fs,'rb') as f2:
stream = io.BytesIO(f2.read())
mi = get_topaz_metadata(stream)
my_clippings = self.path
split = my_clippings.find('documents') + len('documents/')
my_clippings = my_clippings[:split] + "My Clippings.txt"
try:
with open(my_clippings, encoding='utf-8', errors='replace') as f2:
marker_found = 0
text = ''
search_str1 = '%s' % (mi.title)
search_str2 = '- Highlight Loc. %d' % (displayed_location)
for line in f2:
if marker_found == 0:
if line.startswith(search_str1):
marker_found = 1
elif marker_found == 1:
if line.startswith(search_str2):
marker_found = 2
elif marker_found == 2:
if line.startswith('=========='):
break
text += line.strip()
else:
raise Exception('error')
except:
text = '(Unable to extract highlight text from My Clippings.txt)'
return text
MAGIC_TOPAZ_CONSTANT = 33.33
self.timestamp = os.path.getmtime(self.path)
with open(self.path,'rb') as f:
stream = io.BytesIO(f.read())
data = StreamSlicer(stream)
self.last_read = int(unpack('>I', data[5:9])[0])
self.last_read_location = self.last_read/MAGIC_TOPAZ_CONSTANT + 1
entries, = unpack('>I', data[9:13])
current_entry = 0
e_base = 0x0d
while current_entry < entries:
location, = unpack('>I', data[e_base+2:e_base+6])
text = None
text_len, = unpack('>I', data[e_base+0xA:e_base+0xE])
e_type, = unpack('>B', data[e_base+1])
if e_type == 0:
e_type = 'Bookmark'
elif e_type == 1:
e_type = 'Highlight'
text = get_topaz_highlight(location/MAGIC_TOPAZ_CONSTANT + 1)
elif e_type == 2:
e_type = 'Note'
text = data[e_base+0x10:e_base+0x10+text_len]
else:
e_type = 'Unknown annotation type'
displayed_location = location/MAGIC_TOPAZ_CONSTANT + 1
user_notes[location] = dict(id=self.id,
displayed_location=displayed_location,
type=e_type,
text=text)
if text_len == 0xFFFFFFFF:
e_base = e_base + 14
else:
e_base = e_base + 14 + 2 + text_len
current_entry += 1
for location in user_notes:
if location == self.last_read:
user_notes.pop(location)
break
elif self.bookmark_extension == 'pdr':
self.timestamp = os.path.getmtime(self.path)
with open(self.path,'rb') as f:
stream = io.BytesIO(f.read())
data = StreamSlicer(stream)
self.last_read = int(unpack('>I', data[5:9])[0])
entries, = unpack('>I', data[9:13])
current_entry = 0
e_base = 0x0d
self.pdf_page_offset = 0
while current_entry < entries:
'''
location, = unpack('>I', data[e_base+2:e_base+6])
text = None
text_len, = unpack('>I', data[e_base+0xA:e_base+0xE])
e_type, = unpack('>B', data[e_base+1])
if e_type == 0:
e_type = 'Bookmark'
elif e_type == 1:
e_type = 'Highlight'
text = get_topaz_highlight(location/MAGIC_TOPAZ_CONSTANT + 1)
elif e_type == 2:
e_type = 'Note'
text = data[e_base+0x10:e_base+0x10+text_len]
else:
e_type = 'Unknown annotation type'
if self.book_format in ['tpz','azw1']:
displayed_location = location/MAGIC_TOPAZ_CONSTANT + 1
elif self.book_format == 'pdf':
# *** This needs implementation
displayed_location = location
user_notes[location] = dict(id=self.id,
displayed_location=displayed_location,
type=e_type,
text=text)
if text_len == 0xFFFFFFFF:
e_base = e_base + 14
else:
e_base = e_base + 14 + 2 + text_len
current_entry += 1
'''
# Use label as page number
pdf_location, = unpack('>I', data[e_base+1:e_base+5])
label_len, = unpack('>H', data[e_base+5:e_base+7])
location = int(data[e_base+7:e_base+7+label_len])
displayed_location = location
e_type = 'Bookmark'
text = None
user_notes[location] = dict(id=self.id,
displayed_location=displayed_location,
type=e_type,
text=text)
self.pdf_page_offset = pdf_location - location
e_base += (7 + label_len)
current_entry += 1
self.last_read_location = self.last_read - self.pdf_page_offset
else:
print("unsupported bookmark_extension: %s" % self.bookmark_extension)
self.user_notes = user_notes
def get_book_length(self):
from calibre.ebooks.metadata.mobi import StreamSlicer
book_fs = self.path.replace('.%s' % self.bookmark_extension,'.%s' % self.book_format)
self.book_length = 0
if self.bookmark_extension == 'mbp':
# Read the book len from the header
try:
with open(book_fs,'rb') as f:
self.stream = io.BytesIO(f.read())
self.data = StreamSlicer(self.stream)
self.nrecs, = unpack('>H', self.data[76:78])
record0 = self.record(0)
self.book_length = int(unpack('>I', record0[0x04:0x08])[0])
except:
pass
elif self.bookmark_extension == 'tan':
# Read bookLength from metadata
from calibre.ebooks.metadata.topaz import MetadataUpdater
try:
with open(book_fs,'rb') as f:
mu = MetadataUpdater(f)
self.book_length = mu.book_length
except:
pass
else:
print("unsupported bookmark_extension: %s" % self.bookmark_extension)
# }}}
| 14,437 | Python | .py | 283 | 30.067138 | 112 | 0.435417 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,773 | driver.py | kovidgoyal_calibre/src/calibre/devices/kindle/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com>'
__docformat__ = 'restructuredtext en'
from calibre.devices.kindle.apnx import APNXBuilder
'''
Device driver for Amazon's Kindle
'''
import errno
import hashlib
import json
import os
import re
from calibre import fsync, prints, strftime
from calibre.constants import DEBUG, filesystem_encoding
from calibre.devices.interface import OpenPopupMessage
from calibre.devices.kindle.bookmark import Bookmark
from calibre.devices.usbms.driver import USBMS
from calibre.utils.date import utcfromtimestamp
from polyglot.builtins import as_bytes, as_unicode
'''
Notes on collections:
A collections cache is stored at system/collections.json
The cache is read only, changes made to it are overwritten (it is regenerated)
on device disconnect
A log of collection creation/manipulation is available at
system/userannotationlog
collections.json refers to books via a SHA1 hash of the absolute path to the
book (prefix is /mnt/us on my Kindle). The SHA1 hash may or may not be prefixed
by some characters, use the last 40 characters. For books from Amazon, the ASIN
is used instead.
Changing the metadata and resending the file doesn't seem to affect collections
Adding a book to a collection on the Kindle does not change the book file at all
(i.e. it is binary identical). Therefore collection information is not stored in
file metadata.
'''
def thumbnail_filename(stream) -> str:
from calibre.ebooks.metadata.kfx import CONTAINER_MAGIC, read_book_key_kfx
from calibre.ebooks.mobi.reader.headers import MetadataHeader
from calibre.utils.logging import default_log
stream.seek(0)
is_kfx = stream.read(4) == CONTAINER_MAGIC
stream.seek(0)
uuid = cdetype = None
if is_kfx:
uuid, cdetype = read_book_key_kfx(stream)
else:
mh = MetadataHeader(stream, default_log)
if mh.exth is not None:
uuid = mh.exth.uuid
cdetype = mh.exth.cdetype
if not uuid or not cdetype:
return ''
return f'thumbnail_{uuid}_{cdetype}_portrait.jpg'
def get_files_in(path):
if hasattr(os, 'scandir'):
for dir_entry in os.scandir(path):
if dir_entry.is_file(follow_symlinks=False):
yield dir_entry.name, dir_entry.stat(follow_symlinks=False)
else:
import stat
for x in os.listdir(path):
xp = os.path.join(path, x)
s = os.lstat(xp)
if stat.S_ISREG(s.st_mode):
yield x, s
class KINDLE(USBMS):
name = 'Kindle Device Interface'
gui_name = 'Amazon Kindle'
icon = 'devices/kindle.png'
description = _('Communicate with the Kindle e-book reader.')
author = 'John Schember'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['azw', 'mobi', 'prc', 'azw1', 'tpz', 'txt']
VENDOR_ID = [0x1949]
PRODUCT_ID = [0x0001]
BCD = [0x399]
VENDOR_NAME = 'KINDLE'
WINDOWS_MAIN_MEM = 'INTERNAL_STORAGE'
WINDOWS_CARD_A_MEM = 'CARD_STORAGE'
OSX_MAIN_MEM = 'Kindle Internal Storage Media'
OSX_CARD_A_MEM = 'Kindle Card Storage Media'
MAIN_MEMORY_VOLUME_LABEL = 'Kindle Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Kindle Storage Card'
EBOOK_DIR_MAIN = 'documents'
EBOOK_DIR_CARD_A = 'documents'
DELETE_EXTS = ['.mbp', '.tan', '.pdr', '.ea', '.apnx', '.phl']
SUPPORTS_SUB_DIRS = True
SUPPORTS_ANNOTATIONS = True
WIRELESS_FILE_NAME_PATTERN = re.compile(
r'(?P<title>[^-]+)-asin_(?P<asin>[a-zA-Z\d]{10,})-type_(?P<type>\w{4})-v_(?P<index>\d+).*')
VIRTUAL_BOOK_EXTENSIONS = frozenset({'kfx'})
VIRTUAL_BOOK_EXTENSION_MESSAGE = _(
'The following books are in KFX format. KFX is a virtual book format, and cannot'
' be transferred from the device. Instead, you should go to your "Manage my'
' content and devices" page on the Amazon homepage and download the book to your computer from there.'
' That will give you a regular AZW3 file that you can add to calibre normally.'
' Click "Show details" to see the list of books.'
)
@classmethod
def get_open_popup_message(cls):
from calibre.utils.localization import localize_website_link
return OpenPopupMessage(title=_('WARNING: E-book covers'), message=_(
'Amazon has <b>broken display of covers</b> for books sent to the Kindle by USB cable. To workaround it,'
' you have to either keep your Kindle in Airplane mode, or:'
'<ol><li>Send the books to the Kindle</li><li>Disconnect the Kindle and wait for the covers to be deleted'
' by Amazon</li><li>Reconnect the Kindle and calibre will restore the covers.</li></ol> After this the'
' covers for those books should stay put. <a href="{}">Click here</a> for details.').format(localize_website_link(
'https://manual.calibre-ebook.com/faq.html#covers-for-books-i'
'-send-to-my-e-ink-kindle-show-up-momentarily-and-then-are-replaced-by-a-generic-cover')
))
def is_allowed_book_file(self, filename, path, prefix):
lpath = os.path.join(path, filename).partition(self.normalize_path(prefix))[2].replace('\\', '/')
return '.sdr/' not in lpath
@classmethod
def metadata_from_path(cls, path):
if path.endswith('.kfx'):
from calibre.ebooks.metadata.kfx import read_metadata_kfx
try:
kfx_path = path
with open(kfx_path, 'rb') as f:
if f.read(8) != b'\xeaDRMION\xee':
f.seek(0)
mi = read_metadata_kfx(f)
else:
kfx_path = os.path.join(path.rpartition('.')[0] + '.sdr', 'assets', 'metadata.kfx')
with open(kfx_path, 'rb') as mf:
mi = read_metadata_kfx(mf)
except Exception:
import traceback
traceback.print_exc()
if DEBUG:
prints('failed kfx path:', kfx_path)
mi = cls.metadata_from_formats([path])
else:
mi = cls.metadata_from_formats([path])
if mi.title == _('Unknown') or ('-asin' in mi.title and '-type' in mi.title):
path = as_unicode(path, filesystem_encoding, 'replace')
match = cls.WIRELESS_FILE_NAME_PATTERN.match(os.path.basename(path))
if match is not None:
mi.title = match.group('title')
return mi
def get_annotations(self, path_map):
MBP_FORMATS = ['azw', 'mobi', 'prc', 'txt']
mbp_formats = set(MBP_FORMATS)
PDR_FORMATS = ['pdf']
pdr_formats = set(PDR_FORMATS)
TAN_FORMATS = ['tpz', 'azw1']
tan_formats = set(TAN_FORMATS)
def get_storage():
storage = []
if self._main_prefix:
storage.append(os.path.join(self._main_prefix, self.EBOOK_DIR_MAIN))
if self._card_a_prefix:
storage.append(os.path.join(self._card_a_prefix, self.EBOOK_DIR_CARD_A))
if self._card_b_prefix:
storage.append(os.path.join(self._card_b_prefix, self.EBOOK_DIR_CARD_B))
return storage
def resolve_bookmark_paths(storage, path_map):
pop_list = []
book_ext = {}
for id in path_map:
file_fmts = set()
for fmt in path_map[id]['fmts']:
file_fmts.add(fmt)
bookmark_extension = None
if file_fmts.intersection(mbp_formats):
book_extension = list(file_fmts.intersection(mbp_formats))[0]
bookmark_extension = 'mbp'
elif file_fmts.intersection(tan_formats):
book_extension = list(file_fmts.intersection(tan_formats))[0]
bookmark_extension = 'tan'
elif file_fmts.intersection(pdr_formats):
book_extension = list(file_fmts.intersection(pdr_formats))[0]
bookmark_extension = 'pdr'
if bookmark_extension:
for vol in storage:
bkmk_path = path_map[id]['path'].replace(os.path.abspath('/<storage>'),vol)
bkmk_path = bkmk_path.replace('bookmark',bookmark_extension)
if os.path.exists(bkmk_path):
path_map[id] = bkmk_path
book_ext[id] = book_extension
break
else:
pop_list.append(id)
else:
pop_list.append(id)
# Remove non-existent bookmark templates
for id in pop_list:
path_map.pop(id)
return path_map, book_ext
def get_my_clippings(storage, bookmarked_books):
# add an entry for 'My Clippings.txt'
for vol in storage:
mc_path = os.path.join(vol,'My Clippings.txt')
if os.path.exists(mc_path):
return mc_path
return None
storage = get_storage()
path_map, book_ext = resolve_bookmark_paths(storage, path_map)
bookmarked_books = {}
for id in path_map:
bookmark_ext = path_map[id].rpartition('.')[2]
myBookmark = Bookmark(path_map[id], id, book_ext[id], bookmark_ext)
bookmarked_books[id] = self.UserAnnotation(type='kindle_bookmark', value=myBookmark)
mc_path = get_my_clippings(storage, bookmarked_books)
if mc_path:
timestamp = utcfromtimestamp(os.path.getmtime(mc_path))
bookmarked_books['clippings'] = self.UserAnnotation(type='kindle_clippings',
value=dict(path=mc_path, timestamp=timestamp))
# This returns as job.result in gui2.ui.annotations_fetched(self,job)
return bookmarked_books
def generate_annotation_html(self, bookmark):
from calibre.ebooks.BeautifulSoup import BeautifulSoup
# Returns <div class="user_annotations"> ... </div>
last_read_location = bookmark.last_read_location
timestamp = utcfromtimestamp(bookmark.timestamp)
percent_read = bookmark.percent_read
ka_soup = BeautifulSoup()
dtc = 0
divTag = ka_soup.new_tag('div')
divTag['class'] = 'user_annotations'
# Add the last-read location
if bookmark.book_format == 'pdf':
markup = _("%(time)s<br />Last page read: %(loc)d (%(pr)d%%)") % dict(
time=strftime('%x', timestamp.timetuple()),
loc=last_read_location,
pr=percent_read)
else:
markup = _("%(time)s<br />Last page read: Location %(loc)d (%(pr)d%%)") % dict(
time=strftime('%x', timestamp.timetuple()),
loc=last_read_location,
pr=percent_read)
spanTag = BeautifulSoup('<span style="font-weight:bold">' + markup + '</span>').find('span')
divTag.insert(dtc, spanTag)
dtc += 1
divTag.insert(dtc, ka_soup.new_tag('br'))
dtc += 1
if bookmark.user_notes:
user_notes = bookmark.user_notes
annotations = []
# Add the annotations sorted by location
# Italicize highlighted text
for location in sorted(user_notes):
if user_notes[location]['text']:
annotations.append(
_('<b>Location %(dl)d • %(typ)s</b><br />%(text)s<br />') % dict(
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type'],
text=(user_notes[location]['text'] if
user_notes[location]['type'] == 'Note' else
'<i>%s</i>' % user_notes[location]['text'])))
else:
if bookmark.book_format == 'pdf':
annotations.append(
_('<b>Page %(dl)d • %(typ)s</b><br />') % dict(
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type']))
else:
annotations.append(
_('<b>Location %(dl)d • %(typ)s</b><br />') % dict(
dl=user_notes[location]['displayed_location'],
typ=user_notes[location]['type']))
for annotation in annotations:
annot = BeautifulSoup('<span>' + annotation + '</span>').find('span')
divTag.insert(dtc, annot)
dtc += 1
ka_soup.insert(0,divTag)
return ka_soup
def add_annotation_to_library(self, db, db_id, annotation):
from calibre.ebooks.BeautifulSoup import prettify
from calibre.ebooks.metadata import MetaInformation
bm = annotation
ignore_tags = {'Catalog', 'Clippings'}
if bm.type == 'kindle_bookmark':
mi = db.get_metadata(db_id, index_is_id=True)
user_notes_soup = self.generate_annotation_html(bm.value)
if mi.comments:
a_offset = mi.comments.find('<div class="user_annotations">')
ad_offset = mi.comments.find('<hr class="annotations_divider" />')
if a_offset >= 0:
mi.comments = mi.comments[:a_offset]
if ad_offset >= 0:
mi.comments = mi.comments[:ad_offset]
if set(mi.tags).intersection(ignore_tags):
return
if mi.comments:
hrTag = user_notes_soup.new_tag('hr')
hrTag['class'] = 'annotations_divider'
user_notes_soup.insert(0, hrTag)
mi.comments += prettify(user_notes_soup)
else:
mi.comments = prettify(user_notes_soup)
# Update library comments
db.set_comment(db_id, mi.comments)
# Add bookmark file to db_id
db.add_format_with_hooks(db_id, bm.value.bookmark_extension,
bm.value.path, index_is_id=True)
elif bm.type == 'kindle_clippings':
# Find 'My Clippings' author=Kindle in database, or add
last_update = 'Last modified %s' % strftime('%x %X',bm.value['timestamp'].timetuple())
mc_id = list(db.data.search_getting_ids('title:"My Clippings"', '', sort_results=False))
if mc_id:
db.add_format_with_hooks(mc_id[0], 'TXT', bm.value['path'],
index_is_id=True)
mi = db.get_metadata(mc_id[0], index_is_id=True)
mi.comments = last_update
db.set_metadata(mc_id[0], mi)
else:
mi = MetaInformation('My Clippings', authors=['Kindle'])
mi.tags = ['Clippings']
mi.comments = last_update
db.add_books([bm.value['path']], ['txt'], [mi])
class KINDLE2(KINDLE):
name = 'Kindle 2/3/4/Touch/PaperWhite/Voyage Device Interface'
description = _('Communicate with the Kindle 2/3/4/Touch/Paperwhite/Voyage e-book reader.')
FORMATS = ['azw', 'mobi', 'azw3', 'prc', 'azw1', 'tpz', 'azw4', 'kfx', 'pobi', 'pdf', 'txt']
DELETE_EXTS = KINDLE.DELETE_EXTS + ['.mbp1', '.mbs', '.sdr', '.han']
# On the Touch, there's also .asc files, but not using the same basename
# (for X-Ray & End Actions), azw3f & azw3r files, but all of them are in
# the .sdr sidecar folder
PRODUCT_ID = [0x0002, 0x0004, 0x0324]
BCD = [0x0100, 0x0310, 0x401, 0x409]
# SUPPORTS_SUB_DIRS = False # Apparently the Paperwhite doesn't like files placed in subdirectories
# SUPPORTS_SUB_DIRS_FOR_SCAN = True
EXTRA_CUSTOMIZATION_MESSAGE = [
_('Send page number information when sending books') + ':::' + _(
'The Kindle 3 and newer versions can use page number information'
' in MOBI files. With this option, calibre will calculate and send'
' this information to the Kindle when uploading MOBI files by'
' USB. Note that the page numbers do not correspond to any paper'
' book.'),
_('Page count calculation method') + ':::' + '<p>' + _(
'There are multiple ways to generate the page number information.'
' If a page count is given then the book will be divided into that many pages.'
' Otherwise the number of pages will be approximated using one of the following'
' methods.<ul>'
' <li>fast: 2300 characters of uncompressed text per page.\n\n'
' <li>accurate: Based on the number of chapters, paragraphs, and visible lines in the book.'
' This method is designed to simulate an average paperback book where there are 32 lines per'
' page and a maximum of 70 characters per line.\n\n'
' <li>pagebreak: The "pagebreak" method uses the presence of <mbp:pagebreak> tags within'
' the book to determine pages.</ul>'
'Methods other than "fast" are going to be much slower.'
' Further, if "pagebreak" fails to determine a page count accurate will be used, and if '
' "accurate" fails fast will be used.'),
_('Custom column name to retrieve page counts from') + ':::' + _(
'If you have a custom column in your library that you use to'
' store the page count of books, you can have calibre use that'
' information, instead of calculating a page count. Specify the'
' name of the custom column here, for example, #pages.'),
_('Custom column name to retrieve calculation method from') + ':::' + _(
'If you have a custom column in your library that you use to'
' store the preferred method for calculating the number of pages'
' for a book, you can have calibre use that method instead of the'
' default one selected above. Specify the name of the custom'
' column here, for example, #pagemethod. The custom column should have the '
' values: fast, accurate or pagebreak.'),
_('Overwrite existing APNX on device') + ':::' + _(
'Uncheck this option to allow an APNX file existing on the device'
' to have priority over the version which calibre would send.'
' Since APNX files are usually deleted when a book is removed from'
' the Kindle, this is mostly useful when resending a book to the'
' device which is already on the device (e.g. after making a'
' modification).'),
]
EXTRA_CUSTOMIZATION_DEFAULT = [
True,
'fast',
'',
'',
True,
]
OPT_APNX = 0
OPT_APNX_METHOD = 1
OPT_APNX_CUST_COL = 2
OPT_APNX_METHOD_COL = 3
OPT_APNX_OVERWRITE = 4
EXTRA_CUSTOMIZATION_CHOICES = {OPT_APNX_METHOD: set(APNXBuilder.generators.keys())}
# x330 on the PaperWhite
# x262 on the Touch. Doesn't choke on x330, though.
# x470 on the Voyage, checked that it works on PW, Touch checked by eschwartz.
# x500 on the Oasis 2017. checked that it works on the PW3
THUMBNAIL_HEIGHT = 500
@classmethod
def migrate_extra_customization(cls, vals):
if isinstance(vals[cls.OPT_APNX_METHOD], bool):
# Previously this option used to be a bool
vals[cls.OPT_APNX_METHOD] = 'accurate' if vals[cls.OPT_APNX_METHOD] else 'fast'
return vals
def formats_to_scan_for(self):
ans = USBMS.formats_to_scan_for(self) | {'azw3', 'kfx'}
return ans
def books(self, oncard=None, end_session=True):
bl = USBMS.books(self, oncard=oncard, end_session=end_session)
# Read collections information
collections = os.path.join(self._main_prefix, 'system', 'collections.json')
if os.access(collections, os.R_OK):
try:
self.kindle_update_booklist(bl, collections)
except:
import traceback
traceback.print_exc()
return bl
def kindle_update_booklist(self, bl, collections):
with open(collections, 'rb') as f:
collections = f.read()
collections = json.loads(collections)
path_map = {}
for name, val in collections.items():
col = name.split('@')[0]
items = val.get('items', [])
for x in items:
x = x[-40:]
if x not in path_map:
path_map[x] = set()
path_map[x].add(col)
if path_map:
for book in bl:
path = '/mnt/us/'+book.lpath
h = hashlib.sha1(as_bytes(path)).hexdigest()
if h in path_map:
book.device_collections = list(sorted(path_map[h]))
def post_open_callback(self):
try:
self.sync_cover_thumbnails()
except Exception:
import traceback
traceback.print_exc()
# Detect if the product family needs .apnx files uploaded to sidecar folder
product_id = self.device_being_opened[1]
self.sidecar_apnx = False
if product_id > 0x3:
# Check if we need to put the apnx into a sidecar dir
for _, dirnames, _ in os.walk(self._main_prefix):
for x in dirnames:
if x.endswith('.sdr'):
self.sidecar_apnx = True
return
def upload_cover(self, path, filename, metadata, filepath):
'''
Upload sidecar files: cover thumbnails and page count
'''
# Upload the cover thumbnail
try:
self.upload_kindle_thumbnail(metadata, filepath)
except:
import traceback
traceback.print_exc()
# Upload the apnx file
self.upload_apnx(path, filename, metadata, filepath)
def amazon_system_thumbnails_dir(self):
return os.path.join(self._main_prefix, 'system', 'thumbnails')
def thumbpath_from_filepath(self, filepath):
thumb_dir = self.amazon_system_thumbnails_dir()
if os.path.exists(thumb_dir):
with open(filepath, 'rb') as f:
tfname = thumbnail_filename(f)
if tfname:
return os.path.join(thumb_dir, tfname)
def amazon_cover_bug_cache_dir(self):
# see https://www.mobileread.com/forums/showthread.php?t=329945
return os.path.join(self._main_prefix, 'amazon-cover-bug')
def upload_kindle_thumbnail(self, metadata, filepath):
coverdata = getattr(metadata, 'thumbnail', None)
if not coverdata or not coverdata[2]:
return
tp = self.thumbpath_from_filepath(filepath)
if tp:
with open(tp, 'wb') as f:
f.write(coverdata[2])
fsync(f)
cache_dir = self.amazon_cover_bug_cache_dir()
try:
os.mkdir(cache_dir)
except OSError:
pass
with open(os.path.join(cache_dir, os.path.basename(tp)), 'wb') as f:
f.write(coverdata[2])
fsync(f)
def sync_cover_thumbnails(self):
import shutil
# See https://www.mobileread.com/forums/showthread.php?t=329945
# for why this is needed
if DEBUG:
prints('Syncing cover thumbnails to workaround amazon cover bug')
dest_dir = self.amazon_system_thumbnails_dir()
src_dir = self.amazon_cover_bug_cache_dir()
if not os.path.exists(dest_dir) or not os.path.exists(src_dir):
return
count = 0
for name, src_stat_result in get_files_in(src_dir):
dest_path = os.path.join(dest_dir, name)
try:
dest_stat_result = os.lstat(dest_path)
except OSError:
needs_sync = True
else:
needs_sync = src_stat_result.st_size != dest_stat_result.st_size
if needs_sync:
count += 1
if DEBUG:
prints('Restoring cover thumbnail:', name)
with open(os.path.join(src_dir, name), 'rb') as src, open(dest_path, 'wb') as dest:
shutil.copyfileobj(src, dest)
fsync(dest)
if DEBUG:
prints(f'Restored {count} cover thumbnails that were destroyed by Amazon')
def delete_single_book(self, path):
try:
tp1 = self.thumbpath_from_filepath(path)
if tp1:
tp2 = os.path.join(self.amazon_cover_bug_cache_dir(), os.path.basename(tp1))
for tp in (tp1, tp2):
try:
os.remove(tp)
except OSError as err:
if err.errno != errno.ENOENT:
prints(f'Failed to delete thumbnail for {path!r} at {tp!r} with error: {err}')
except Exception:
import traceback
traceback.print_exc()
USBMS.delete_single_book(self, path)
def upload_apnx(self, path, filename, metadata, filepath):
from calibre.devices.kindle.apnx import APNXBuilder
opts = self.settings()
if not opts.extra_customization[self.OPT_APNX]:
return
if os.path.splitext(filepath.lower())[1] not in ('.azw', '.mobi',
'.prc', '.azw3'):
return
# Create the sidecar folder if necessary
if (self.sidecar_apnx):
path = os.path.join(os.path.dirname(filepath), filename+".sdr")
if not os.path.exists(path):
os.makedirs(path)
cust_col_name = opts.extra_customization[self.OPT_APNX_CUST_COL]
custom_page_count = 0
if cust_col_name:
try:
custom_page_count = int(metadata.get(cust_col_name, 0))
except:
pass
apnx_path = '%s.apnx' % os.path.join(path, filename)
apnx_builder = APNXBuilder()
# Check to see if there is an existing apnx file on Kindle we should keep.
if opts.extra_customization[self.OPT_APNX_OVERWRITE] or not os.path.exists(apnx_path):
try:
method = opts.extra_customization[self.OPT_APNX_METHOD]
cust_col_name = opts.extra_customization[self.OPT_APNX_METHOD_COL]
if cust_col_name:
try:
temp = str(metadata.get(cust_col_name)).lower()
if temp in self.EXTRA_CUSTOMIZATION_CHOICES[self.OPT_APNX_METHOD]:
method = temp
else:
print("Invalid method choice for this book (%r), ignoring." % temp)
except:
print('Could not retrieve override method choice, using default.')
apnx_builder.write_apnx(filepath, apnx_path, method=method, page_count=custom_page_count)
except:
print('Failed to generate APNX')
import traceback
traceback.print_exc()
class KINDLE_DX(KINDLE2):
name = 'Kindle DX Device Interface'
description = _('Communicate with the Kindle DX e-book reader.')
FORMATS = ['azw', 'mobi', 'prc', 'azw1', 'tpz', 'azw4', 'pobi', 'pdf', 'txt']
PRODUCT_ID = [0x0003]
BCD = [0x0100]
def upload_kindle_thumbnail(self, metadata, filepath):
pass
def delete_single_book(self, path):
USBMS.delete_single_book(self, path)
class KINDLE_FIRE(KINDLE2):
name = 'Kindle Fire Device Interface'
description = _('Communicate with the Kindle Fire')
gui_name = 'Fire'
FORMATS = ['azw3', 'azw', 'mobi', 'prc', 'azw1', 'tpz', 'azw4', 'kfx', 'pobi', 'pdf', 'txt']
PRODUCT_ID = [0x0006]
BCD = [0x216, 0x100]
EBOOK_DIR_MAIN = 'Documents'
SUPPORTS_SUB_DIRS = False
SCAN_FROM_ROOT = True
SUPPORTS_SUB_DIRS_FOR_SCAN = True
VENDOR_NAME = 'AMAZON'
WINDOWS_MAIN_MEM = 'KINDLE'
def upload_kindle_thumbnail(self, metadata, filepath):
pass
def delete_single_book(self, path):
USBMS.delete_single_book(self, path)
| 28,887 | Python | .py | 591 | 36.769882 | 126 | 0.575299 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,774 | page_number_type.py | kovidgoyal_calibre/src/calibre/devices/kindle/apnx_page_generator/page_number_type.py | __license__ = 'GPL v3'
__copyright__ = '2022, Vaso Peras-Likodric <vaso at vipl.in.rs>'
__docformat__ = 'restructuredtext en'
import enum
class PageNumberTypes(enum.Enum):
Arabic = "a"
Roman = "r"
Custom = "c"
| 225 | Python | .py | 8 | 25.25 | 64 | 0.658879 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,775 | i_page_generator.py | kovidgoyal_calibre/src/calibre/devices/kindle/apnx_page_generator/i_page_generator.py | __license__ = 'GPL v3'
__copyright__ = '2022, Vaso Peras-Likodric <vaso at vipl.in.rs>'
__docformat__ = 'restructuredtext en'
import struct
from abc import ABCMeta, abstractmethod
from typing import Optional
from calibre.devices.kindle.apnx_page_generator.pages import Pages
from calibre.ebooks.pdb.header import PdbHeaderReader
from calibre.utils.logging import default_log
from polyglot.builtins import as_bytes
class IPageGenerator(metaclass=ABCMeta):
@abstractmethod
def _generate(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
pass
@abstractmethod
def _generate_fallback(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
pass
def generate(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
try:
result = self._generate(mobi_file_path, real_count)
if result.number_of_pages > 0:
return result
return self._generate_fallback(mobi_file_path, real_count)
except Exception as e:
if self.__class__.__name__ == "FastPageGenerator":
raise e
return self._generate_fallback(mobi_file_path, real_count)
@abstractmethod
def name(self) -> str:
pass
def mobi_html(mobi_file_path: str) -> bytes:
from calibre.ebooks.mobi.reader.mobi6 import MobiReader
mr = MobiReader(mobi_file_path, default_log)
if mr.book_header.encryption_type != 0:
raise Exception("DRMed book")
mr.extract_text()
return as_bytes(mr.mobi_html.lower())
def mobi_html_length(mobi_file_path: str) -> int:
with open(mobi_file_path, 'rb') as mf:
pdb_header = PdbHeaderReader(mf)
r0 = pdb_header.section_data(0)
return struct.unpack('>I', r0[4:8])[0]
| 1,778 | Python | .py | 42 | 35.904762 | 90 | 0.679234 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,776 | page_group.py | kovidgoyal_calibre/src/calibre/devices/kindle/apnx_page_generator/page_group.py | __license__ = 'GPL v3'
__copyright__ = '2022, Vaso Peras-Likodric <vaso at vipl.in.rs>'
__docformat__ = 'restructuredtext en'
from typing import List, Tuple, Union
from calibre.devices.kindle.apnx_page_generator.page_number_type import PageNumberTypes
class PageGroup:
"""Simulate constructor overloading"""
def __init__(self, page_locations: Union[int, List[int]], page_number_type: PageNumberTypes, first_value: int,
page_labels: Union[str, List[str], None] = None):
if page_locations.__class__ == int:
self.page_locations: List[int] = [page_locations]
else:
self.page_locations: List[int] = page_locations
self.__page_number_type: PageNumberTypes = page_number_type
self.__first_value = first_value
if page_number_type == PageNumberTypes.Custom:
assert page_labels is not None
if page_labels.__class__ == str:
assert 1 == len(self.page_locations) and len(page_labels) > 0
self.__page_number_labels: List[str] = [page_labels]
else:
assert len(page_labels) == len(self.page_locations)
assert all(len(label) > 0 for label in page_labels)
self.__page_number_labels: List[str] = page_labels
def append(self, page_location: Union[int, Tuple[int, str]]) -> None:
if page_location.__class__ == int:
assert self.__page_number_type != PageNumberTypes.Custom
self.page_locations.append(page_location)
else:
assert self.__page_number_type == PageNumberTypes.Custom
self.page_locations.append(page_location[0])
self.__page_number_labels.append(page_location[1])
return
@property
def page_number_types(self) -> PageNumberTypes:
return self.__page_number_type
@property
def number_of_pages(self) -> int:
return len(self.page_locations)
@property
def last_value(self) -> int:
return self.__first_value + len(self.page_locations) - 1
def get_page_map(self, starting_location: int) -> str:
if self.__page_number_type != PageNumberTypes.Custom:
values = str(self.__first_value)
else:
values = "|".join(self.__page_number_labels)
return "({},{},{})".format(starting_location, self.__page_number_type.value, values)
| 2,400 | Python | .py | 48 | 40.625 | 114 | 0.626547 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,777 | pages.py | kovidgoyal_calibre/src/calibre/devices/kindle/apnx_page_generator/pages.py | __license__ = 'GPL v3'
__copyright__ = '2022, Vaso Peras-Likodric <vaso at vipl.in.rs>'
__docformat__ = 'restructuredtext en'
import itertools
from typing import List, Optional
from calibre.devices.kindle.apnx_page_generator.page_group import PageGroup
from calibre.devices.kindle.apnx_page_generator.page_number_type import PageNumberTypes
class Pages:
def __init__(self, page_locations: Optional[List[int]] = None):
if page_locations.__class__ == list:
self.__pages_groups: List[PageGroup] = [PageGroup(page_locations, PageNumberTypes.Arabic, 1)]
else:
self.__pages_groups: List[PageGroup] = []
def append(self, page_location: PageGroup) -> None:
self.__pages_groups.append(page_location)
return
@property
def last_group(self) -> PageGroup:
return self.__pages_groups[-1]
@property
def page_maps(self) -> str:
location = 1
result = []
for group in self.__pages_groups:
result.append(group.get_page_map(location))
location += group.number_of_pages
return ",".join(result)
@property
def page_locations(self) -> List[int]:
return list(itertools.chain.from_iterable(list(map(lambda pg: pg.page_locations, self.__pages_groups))))
@property
def number_of_pages(self) -> int:
return sum(list(map(lambda pg: len(pg.page_locations), self.__pages_groups)))
| 1,436 | Python | .py | 33 | 36.848485 | 112 | 0.665948 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,778 | pagebreak_page_generator.py | kovidgoyal_calibre/src/calibre/devices/kindle/apnx_page_generator/generators/pagebreak_page_generator.py | __license__ = 'GPL v3'
__copyright__ = '2022, Vaso Peras-Likodric <vaso at vipl.in.rs>'
__docformat__ = 'restructuredtext en'
import re
from typing import Optional
from calibre.devices.kindle.apnx_page_generator.generators.fast_page_generator import FastPageGenerator
from calibre.devices.kindle.apnx_page_generator.i_page_generator import IPageGenerator, mobi_html
from calibre.devices.kindle.apnx_page_generator.pages import Pages
class PagebreakPageGenerator(IPageGenerator):
def name(self) -> str:
return "pagebreak"
def _generate_fallback(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
return FastPageGenerator.instance.generate(mobi_file_path, real_count)
def _generate(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
""" Determine pages based on the presence of <*pagebreak*/>. """
html = mobi_html(mobi_file_path)
pages = []
for m in re.finditer(b'<[^>]*pagebreak[^>]*>', html):
pages.append(m.end())
return Pages(pages)
PagebreakPageGenerator.instance = PagebreakPageGenerator()
| 1,111 | Python | .py | 21 | 47.619048 | 103 | 0.721296 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,779 | exact_page_generator.py | kovidgoyal_calibre/src/calibre/devices/kindle/apnx_page_generator/generators/exact_page_generator.py | __license__ = 'GPL v3'
__copyright__ = '2022, Vaso Peras-Likodric <vaso at vipl.in.rs>'
__docformat__ = 'restructuredtext en'
from typing import Optional
from calibre.devices.kindle.apnx_page_generator.generators.fast_page_generator import FastPageGenerator
from calibre.devices.kindle.apnx_page_generator.i_page_generator import IPageGenerator, mobi_html_length
from calibre.devices.kindle.apnx_page_generator.pages import Pages
class ExactPageGenerator(IPageGenerator):
instance = None
def name(self) -> str:
return "exact"
def _generate_fallback(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
return FastPageGenerator.instance.generate(mobi_file_path, real_count)
def _generate(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
"""
Given a specified page count (such as from a custom column),
create our array of pages for the apnx file by dividing by
the content size of the book.
"""
pages = []
count = 0
text_length = mobi_html_length(mobi_file_path)
chars_per_page = int(text_length // real_count)
while count < text_length:
pages.append(count)
count += chars_per_page
if len(pages) > real_count:
# Rounding created extra page entries
pages = pages[:real_count]
return Pages(pages)
ExactPageGenerator.instance = ExactPageGenerator()
| 1,461 | Python | .py | 31 | 40 | 104 | 0.689266 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,780 | accurate_page_generator.py | kovidgoyal_calibre/src/calibre/devices/kindle/apnx_page_generator/generators/accurate_page_generator.py | __license__ = 'GPL v3'
__copyright__ = '2022, Vaso Peras-Likodric <vaso at vipl.in.rs>'
__docformat__ = 'restructuredtext en'
from typing import Optional
from calibre.devices.kindle.apnx_page_generator.generators.fast_page_generator import FastPageGenerator
from calibre.devices.kindle.apnx_page_generator.i_page_generator import IPageGenerator, mobi_html
from calibre.devices.kindle.apnx_page_generator.pages import Pages
class AccuratePageGenerator(IPageGenerator):
instance = None
def name(self) -> str:
return "accurate"
def _generate_fallback(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
return FastPageGenerator.instance.generate(mobi_file_path, real_count)
def _generate(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
"""
A more accurate but much more resource intensive and slower
method to calculate the page length.
Parses the uncompressed text. In an average paper back book
There are 32 lines per page and a maximum of 70 characters
per line.
Each paragraph starts a new line and every 70 characters
(minus markup) in a paragraph starts a new line. The
position after every 30 lines will be marked as a new
page.
This can be make more accurate by accounting for
<div class="mbp_pagebreak" /> as a new page marker.
And <br> elements as an empty line.
"""
pages = []
html = mobi_html(mobi_file_path)
# States
in_tag = False
in_p = False
check_p = False
closing = False
p_char_count = 0
# Get positions of every line
# A line is either a paragraph starting
# or every 70 characters in a paragraph.
lines = []
pos = -1
# We want this to be as fast as possible so we
# are going to do one pass across the text. re
# and string functions will parse the text each
# time they are called.
#
# We can use .lower() here because we are
# not modifying the text. In this case the case
# doesn't matter just the absolute character and
# the position within the stream.
data = bytearray(html)
slash, p, lt, gt = map(ord, '/p<>')
for c in data:
pos += 1
# Check if we are starting or stopping a p tag.
if check_p:
if c == slash:
closing = True
continue
elif c == p:
if closing:
in_p = False
else:
in_p = True
lines.append(pos - 2)
check_p = False
closing = False
continue
if c == lt:
in_tag = True
check_p = True
continue
elif c == gt:
in_tag = False
check_p = False
continue
if in_p and not in_tag:
p_char_count += 1
if p_char_count == 70:
lines.append(pos)
p_char_count = 0
# Every 30 lines is a new page
for i in range(0, len(lines), 32):
pages.append(lines[i])
return Pages(pages)
AccuratePageGenerator.instance = AccuratePageGenerator()
| 3,448 | Python | .py | 86 | 28.94186 | 103 | 0.571087 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,781 | fast_page_generator.py | kovidgoyal_calibre/src/calibre/devices/kindle/apnx_page_generator/generators/fast_page_generator.py | __license__ = 'GPL v3'
__copyright__ = '2022, Vaso Peras-Likodric <vaso at vipl.in.rs>'
__docformat__ = 'restructuredtext en'
from typing import Optional
from calibre.devices.kindle.apnx_page_generator.i_page_generator import IPageGenerator, mobi_html_length
from calibre.devices.kindle.apnx_page_generator.pages import Pages
class FastPageGenerator(IPageGenerator):
def name(self) -> str:
return "fast"
def _generate_fallback(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
raise Exception("Fast calculation impossible.")
def _generate(self, mobi_file_path: str, real_count: Optional[int]) -> Pages:
"""
2300 characters of uncompressed text per page. This is
not meant to map 1 to 1 to a print book but to be a
close enough measure.
A test book was chosen and the characters were counted
on one page. This number was round to 2240 then 60
characters of markup were added to the total giving
2300.
Uncompressed text length is used because it's easily
accessible in MOBI files (part of the header). Also,
It's faster to work off of the length then to
decompress and parse the actual text.
"""
pages = []
count = 0
text_length = mobi_html_length(mobi_file_path)
while count < text_length:
pages.append(count)
count += 2300
return Pages(pages)
FastPageGenerator.instance = FastPageGenerator()
| 1,517 | Python | .py | 33 | 38.575758 | 104 | 0.681416 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,782 | driver.py | kovidgoyal_calibre/src/calibre/devices/iliad/driver.py | __license__ = 'GPL 3'
__copyright__ = '2009, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for IRex Iliad
'''
from calibre.devices.usbms.driver import USBMS
class ILIAD(USBMS):
name = 'IRex Iliad Device Interface'
description = _('Communicate with the IRex Iliad e-book reader.')
author = 'John Schember'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
# Be sure these have an entry in calibre.devices.mime
FORMATS = ['mobi', 'prc', 'html', 'pdf', 'txt']
VENDOR_ID = [0x04cc]
PRODUCT_ID = [0x1a64]
BCD = [0x100]
VENDOR_NAME = 'IREX'
WINDOWS_MAIN_MEM = 'ILIAD'
# OSX_MAIN_MEM = ''
MAIN_MEMORY_VOLUME_LABEL = 'IRex Iliad Main Memory'
EBOOK_DIR_MAIN = 'books'
DELETE_EXTS = ['.mbp']
SUPPORTS_SUB_DIRS = True
| 907 | Python | .py | 25 | 32.12 | 72 | 0.633754 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,783 | driver.py | kovidgoyal_calibre/src/calibre/devices/edge/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal'
__docformat__ = 'restructuredtext en'
'''
Device driver for Barns and Nobel's Nook
'''
from calibre.devices.usbms.driver import USBMS
class EDGE(USBMS):
name = 'Edge Device Interface'
gui_name = _('Entourage Edge')
description = _('Communicate with the Entourage Edge.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'linux', 'osx']
# Ordered list of supported formats
FORMATS = ['epub', 'pdf']
VENDOR_ID = [0x0bb4]
PRODUCT_ID = [0x0c02]
BCD = [0x0223]
VENDOR_NAME = ['ANDROID', 'LINUX']
WINDOWS_MAIN_MEM = ['__FILE-STOR_GADG', 'FILE-CD_GADGET']
WINDOWS_CARD_A_MEM = ['__FILE-STOR_GADG', 'FILE-CD_GADGET']
MAIN_MEMORY_VOLUME_LABEL = 'Edge Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Edge Storage Card'
EBOOK_DIR_MAIN = 'download'
SUPPORTS_SUB_DIRS = True
| 947 | Python | .py | 25 | 33.72 | 63 | 0.630077 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,784 | __init__.py | kovidgoyal_calibre/src/calibre/devices/prst1/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,785 | driver.py | kovidgoyal_calibre/src/calibre/devices/prst1/driver.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Device driver for the SONY T1 devices
'''
import os
import re
import time
from contextlib import closing
from datetime import date
from calibre import fsync
from calibre.constants import islinux
from calibre.devices.errors import DeviceError
from calibre.devices.mime import mime_type_ext
from calibre.devices.usbms.books import BookList, CollectionsBookList
from calibre.devices.usbms.device import USBDevice
from calibre.devices.usbms.driver import USBMS
from calibre.ebooks.metadata import authors_to_sort_string, authors_to_string
from calibre.prints import debug_print
from polyglot.builtins import long_type
DBPATH = 'Sony_Reader/database/books.db'
THUMBPATH = 'Sony_Reader/database/cache/books/%s/thumbnail/main_thumbnail.jpg'
class ImageWrapper:
def __init__(self, image_path):
self.image_path = image_path
class PRST1(USBMS):
name = 'SONY PRST1 and newer Device Interface'
gui_name = 'SONY Reader'
description = _('Communicate with the PRST1 and newer SONY e-book readers')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
path_sep = '/'
booklist_class = CollectionsBookList
FORMATS = ['epub', 'pdf', 'txt', 'book', 'zbf'] # The last two are used in japan
CAN_SET_METADATA = ['collections']
CAN_DO_DEVICE_DB_PLUGBOARD = True
VENDOR_ID = [0x054c] #: SONY Vendor Id
PRODUCT_ID = [0x05c2]
BCD = [0x226]
VENDOR_NAME = 'SONY'
WINDOWS_MAIN_MEM = re.compile(
r'(PRS-T(1|2|2N|3)&)'
)
WINDOWS_CARD_A_MEM = re.compile(
r'(PRS-T(1|2|2N|3)_{1,2}SD&)'
)
MAIN_MEMORY_VOLUME_LABEL = 'SONY Reader Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'SONY Reader Storage Card'
THUMBNAIL_HEIGHT = 144
SUPPORTS_SUB_DIRS = True
SUPPORTS_USE_AUTHOR_SORT = True
MUST_READ_METADATA = True
EBOOK_DIR_MAIN = 'Sony_Reader/media/books'
EXTRA_CUSTOMIZATION_MESSAGE = [
_('Comma separated list of metadata fields '
'to turn into collections on the device. Possibilities include: ')+
'series, tags, authors',
_('Upload separate cover thumbnails for books') +
':::'+_('Normally, the SONY readers get the cover image from the'
' e-book file itself. With this option, calibre will send a '
'separate cover image to the reader, useful if you are '
'sending DRMed books in which you cannot change the cover.'),
_('Refresh separate covers when using automatic management') +
':::' +
_('Set this option to have separate book covers uploaded '
'every time you connect your device. Unset this option if '
'you have so many books on the reader that performance is '
'unacceptable.'),
_('Preserve cover aspect ratio when building thumbnails') +
':::' +
_('Set this option if you want the cover thumbnails to have '
'the same aspect ratio (width to height) as the cover. '
'Unset it if you want the thumbnail to be the maximum size, '
'ignoring aspect ratio.'),
_('Use SONY Author Format (First Author Only)') +
':::' +
_('Set this option if you want the author on the Sony to '
'appear the same way the T1 sets it. This means it will '
'only show the first author for books with multiple authors. '
'Leave this disabled if you use Metadata plugboards.')
]
EXTRA_CUSTOMIZATION_DEFAULT = [
', '.join(['series', 'tags']),
True,
False,
True,
False,
]
OPT_COLLECTIONS = 0
OPT_UPLOAD_COVERS = 1
OPT_REFRESH_COVERS = 2
OPT_PRESERVE_ASPECT_RATIO = 3
OPT_USE_SONY_AUTHORS = 4
plugboards = None
plugboard_func = None
def post_open_callback(self):
# Set the thumbnail width to the theoretical max if the user has asked
# that we do not preserve aspect ratio
ec = self.settings().extra_customization
if not ec[self.OPT_PRESERVE_ASPECT_RATIO]:
self.THUMBNAIL_WIDTH = 108
self.WANTS_UPDATED_THUMBNAILS = ec[self.OPT_REFRESH_COVERS]
# Make sure the date offset is set to none, we'll calculate it in books.
self.device_offset = None
def windows_filter_pnp_id(self, pnp_id):
return '_LAUNCHER' in pnp_id or '_SETTING' in pnp_id
def get_carda_ebook_dir(self, for_upload=False):
if for_upload:
return self.EBOOK_DIR_MAIN
return self.EBOOK_DIR_CARD_A
def get_main_ebook_dir(self, for_upload=False):
if for_upload:
return self.EBOOK_DIR_MAIN
return ''
def can_handle(self, devinfo, debug=False):
if islinux:
dev = USBDevice(devinfo)
main, carda, cardb = self.find_device_nodes(detected_device=dev)
if main is None and carda is None and cardb is None:
if debug:
print('\tPRS-T1: Appears to be in non data mode'
' or was ejected, ignoring')
return False
return True
def books(self, oncard=None, end_session=True):
import sqlite3 as sqlite
dummy_bl = BookList(None, None, None)
if (
(oncard == 'carda' and not self._card_a_prefix) or
(oncard and oncard != 'carda')
):
self.report_progress(1.0, _('Getting list of books on device...'))
return dummy_bl
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
# Let parent driver get the books
self.booklist_class.rebuild_collections = self.rebuild_collections
bl = USBMS.books(self, oncard=oncard, end_session=end_session)
dbpath = self.normalize_path(prefix + DBPATH)
debug_print("SQLite DB Path: " + dbpath)
with closing(sqlite.connect(dbpath)) as connection:
# Replace undecodable characters in the db instead of erroring out
connection.text_factory = lambda x: x if isinstance(x, str) else x.decode('utf-8', 'replace')
cursor = connection.cursor()
# Query collections
query = '''
SELECT books._id, collection.title
FROM collections
LEFT OUTER JOIN books
LEFT OUTER JOIN collection
WHERE collections.content_id = books._id AND
collections.collection_id = collection._id
'''
cursor.execute(query)
bl_collections = {}
for i, row in enumerate(cursor):
bl_collections.setdefault(row[0], [])
bl_collections[row[0]].append(row[1])
# collect information on offsets, but assume any
# offset we already calculated is correct
if self.device_offset is None:
query = 'SELECT file_path, modified_date FROM books'
cursor.execute(query)
time_offsets = {}
for i, row in enumerate(cursor):
try:
comp_date = int(os.path.getmtime(self.normalize_path(prefix + row[0])) * 1000)
except (OSError, TypeError):
# In case the db has incorrect path info
continue
device_date = int(row[1])
offset = device_date - comp_date
time_offsets.setdefault(offset, 0)
time_offsets[offset] = time_offsets[offset] + 1
try:
device_offset = max(time_offsets, key=lambda a: time_offsets.get(a))
debug_print("Device Offset: %d ms"%device_offset)
self.device_offset = device_offset
except ValueError:
debug_print("No Books To Detect Device Offset.")
for idx, book in enumerate(bl):
query = 'SELECT _id, thumbnail FROM books WHERE file_path = ?'
t = (book.lpath,)
cursor.execute(query, t)
for i, row in enumerate(cursor):
book.device_collections = bl_collections.get(row[0], None)
thumbnail = row[1]
if thumbnail is not None:
thumbnail = self.normalize_path(prefix + thumbnail)
book.thumbnail = ImageWrapper(thumbnail)
cursor.close()
return bl
def set_plugboards(self, plugboards, pb_func):
self.plugboards = plugboards
self.plugboard_func = pb_func
def sync_booklists(self, booklists, end_session=True):
debug_print('PRST1: starting sync_booklists')
opts = self.settings()
if opts.extra_customization:
collections = [x.strip() for x in
opts.extra_customization[self.OPT_COLLECTIONS].split(',')]
else:
collections = []
debug_print('PRST1: collection fields:', collections)
if booklists[0] is not None:
self.update_device_database(booklists[0], collections, None)
if len(booklists) > 1 and booklists[1] is not None:
self.update_device_database(booklists[1], collections, 'carda')
USBMS.sync_booklists(self, booklists, end_session=end_session)
debug_print('PRST1: finished sync_booklists')
def update_device_database(self, booklist, collections_attributes, oncard):
import sqlite3 as sqlite
debug_print('PRST1: starting update_device_database')
plugboard = None
if self.plugboard_func:
plugboard = self.plugboard_func(self.__class__.__name__,
'device_db', self.plugboards)
debug_print("PRST1: Using Plugboard", plugboard)
prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix
if prefix is None:
# Reader has no sd card inserted
return
source_id = 1 if oncard == 'carda' else 0
dbpath = self.normalize_path(prefix + DBPATH)
debug_print("SQLite DB Path: " + dbpath)
collections = booklist.get_collections(collections_attributes)
with closing(sqlite.connect(dbpath)) as connection:
self.remove_orphaned_records(connection, dbpath)
self.update_device_books(connection, booklist, source_id,
plugboard, dbpath)
self.update_device_collections(connection, booklist, collections, source_id, dbpath)
debug_print('PRST1: finished update_device_database')
def remove_orphaned_records(self, connection, dbpath):
from sqlite3 import DatabaseError
try:
cursor = connection.cursor()
debug_print("Removing Orphaned Collection Records")
# Purge any collections references that point into the abyss
query = 'DELETE FROM collections WHERE content_id NOT IN (SELECT _id FROM books)'
cursor.execute(query)
query = 'DELETE FROM collections WHERE collection_id NOT IN (SELECT _id FROM collection)'
cursor.execute(query)
debug_print("Removing Orphaned Book Records")
# Purge any references to books not in this database
# Idea is to prevent any spill-over where these wind up applying to some other book
query = 'DELETE FROM %s WHERE content_id NOT IN (SELECT _id FROM books)'
cursor.execute(query%'annotation')
cursor.execute(query%'bookmark')
cursor.execute(query%'current_position')
cursor.execute(query%'freehand')
cursor.execute(query%'history')
cursor.execute(query%'layout_cache')
cursor.execute(query%'preference')
cursor.close()
except DatabaseError:
import traceback
tb = traceback.format_exc()
raise DeviceError((('The SONY database is corrupted. '
' Delete the file %s on your reader and then disconnect '
' reconnect it. If you are using an SD card, you '
' should delete the file on the card as well. Note that '
' deleting this file will cause your reader to forget '
' any notes/highlights, etc.')%dbpath)+' Underlying error:'
'\n'+tb)
def get_lastrowid(self, cursor):
# SQLite3 + Python has a fun issue on 32-bit systems with integer overflows.
# Issue a SQL query instead, getting the value as a string, and then converting to a long python int manually.
query = 'SELECT last_insert_rowid()'
cursor.execute(query)
row = cursor.fetchone()
return long_type(row[0])
def get_database_min_id(self, source_id):
sequence_min = 0
if source_id == 1:
sequence_min = 4294967296
return sequence_min
def set_database_sequence_id(self, connection, table, sequence_id):
cursor = connection.cursor()
# Update the sequence Id if it exists
query = 'UPDATE sqlite_sequence SET seq = ? WHERE name = ?'
t = (sequence_id, table,)
cursor.execute(query, t)
# Insert the sequence Id if it doesn't
query = ('INSERT INTO sqlite_sequence (name, seq) '
'SELECT ?, ? '
'WHERE NOT EXISTS (SELECT 1 FROM sqlite_sequence WHERE name = ?)')
cursor.execute(query, (table, sequence_id, table,))
cursor.close()
def read_device_books(self, connection, source_id, dbpath):
from sqlite3 import DatabaseError
sequence_min = self.get_database_min_id(source_id)
sequence_max = sequence_min
sequence_dirty = 0
debug_print("Book Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
try:
cursor = connection.cursor()
# Get existing books
query = 'SELECT file_path, _id FROM books'
cursor.execute(query)
except DatabaseError:
import traceback
tb = traceback.format_exc()
raise DeviceError((('The SONY database is corrupted. '
' Delete the file %s on your reader and then disconnect '
' reconnect it. If you are using an SD card, you '
' should delete the file on the card as well. Note that '
' deleting this file will cause your reader to forget '
' any notes/highlights, etc.')%dbpath)+' Underlying error:'
'\n'+tb)
# Get the books themselves, but keep track of any that are less than the minimum.
# Record what the max id being used is as well.
db_books = {}
for i, row in enumerate(cursor):
if not hasattr(row[0], 'replace'):
continue
lpath = row[0].replace('\\', '/')
db_books[lpath] = row[1]
if row[1] < sequence_min:
sequence_dirty = 1
else:
sequence_max = max(sequence_max, row[1])
# If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1:
debug_print("Book Sequence Dirty for Source Id: %d"%source_id)
sequence_max = sequence_max + 1
for book, bookId in db_books.items():
if bookId < sequence_min:
# Record the new Id and write it to the DB
db_books[book] = sequence_max
sequence_max = sequence_max + 1
# Fix the Books DB
query = 'UPDATE books SET _id = ? WHERE file_path = ?'
t = (db_books[book], book,)
cursor.execute(query, t)
# Fix any references so that they point back to the right book
t = (db_books[book], bookId,)
query = 'UPDATE collections SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
query = 'UPDATE annotation SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
query = 'UPDATE bookmark SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
query = 'UPDATE current_position SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
query = 'UPDATE deleted_markups SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
query = 'UPDATE dic_histories SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
query = 'UPDATE freehand SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
query = 'UPDATE history SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
query = 'UPDATE layout_cache SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
query = 'UPDATE preference SET content_id = ? WHERE content_id = ?'
cursor.execute(query, t)
self.set_database_sequence_id(connection, 'books', sequence_max)
debug_print("Book Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
cursor.close()
return db_books
def update_device_books(self, connection, booklist, source_id, plugboard,
dbpath):
from calibre.ebooks.metadata.meta import path_to_ext
opts = self.settings()
upload_covers = opts.extra_customization[self.OPT_UPLOAD_COVERS]
refresh_covers = opts.extra_customization[self.OPT_REFRESH_COVERS]
use_sony_authors = opts.extra_customization[self.OPT_USE_SONY_AUTHORS]
db_books = self.read_device_books(connection, source_id, dbpath)
cursor = connection.cursor()
for book in booklist:
# Run through plugboard if needed
if plugboard is not None:
newmi = book.deepcopy_metadata()
newmi.template_to_attribute(book, plugboard)
else:
newmi = book
# Get Metadata We Want
lpath = book.lpath
try:
if opts.use_author_sort:
if newmi.author_sort:
author = newmi.author_sort
else:
author = authors_to_sort_string(newmi.authors)
else:
if use_sony_authors:
author = newmi.authors[0]
else:
author = authors_to_string(newmi.authors)
except:
author = _('Unknown')
title = newmi.title or _('Unknown')
# Get modified date
# If there was a detected offset, use that. Otherwise use UTC (same as Sony software)
modified_date = os.path.getmtime(book.path) * 1000
if self.device_offset is not None:
modified_date = modified_date + self.device_offset
if lpath not in db_books:
query = '''
INSERT INTO books
(title, author, source_id, added_date, modified_date,
file_path, file_name, file_size, mime_type, corrupted,
prevent_delete)
values (?,?,?,?,?,?,?,?,?,0,0)
'''
t = (title, author, source_id, int(time.time() * 1000),
modified_date, lpath,
os.path.basename(lpath), book.size, book.mime or mime_type_ext(path_to_ext(lpath)))
cursor.execute(query, t)
book.bookId = self.get_lastrowid(cursor)
if upload_covers:
self.upload_book_cover(connection, book, source_id)
debug_print('Inserted New Book: (%u) '%book.bookId + book.title)
else:
query = '''
UPDATE books
SET title = ?, author = ?, modified_date = ?, file_size = ?
WHERE file_path = ?
'''
t = (title, author, modified_date, book.size, lpath)
cursor.execute(query, t)
book.bookId = db_books[lpath]
if refresh_covers:
self.upload_book_cover(connection, book, source_id)
db_books[lpath] = None
if self.is_sony_periodical(book):
self.periodicalize_book(connection, book)
for book, bookId in db_books.items():
if bookId is not None:
# Remove From Collections
query = 'DELETE FROM collections WHERE content_id = ?'
t = (bookId,)
cursor.execute(query, t)
# Remove from Books
query = 'DELETE FROM books where _id = ?'
t = (bookId,)
cursor.execute(query, t)
debug_print('Deleted Book:' + book)
connection.commit()
cursor.close()
def read_device_collections(self, connection, source_id, dbpath):
from sqlite3 import DatabaseError
sequence_min = self.get_database_min_id(source_id)
sequence_max = sequence_min
sequence_dirty = 0
debug_print("Collection Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
try:
cursor = connection.cursor()
# Get existing collections
query = 'SELECT _id, title FROM collection'
cursor.execute(query)
except DatabaseError:
import traceback
tb = traceback.format_exc()
raise DeviceError((('The SONY database is corrupted. '
' Delete the file %s on your reader and then disconnect '
' reconnect it. If you are using an SD card, you '
' should delete the file on the card as well. Note that '
' deleting this file will cause your reader to forget '
' any notes/highlights, etc.')%dbpath)+' Underlying error:'
'\n'+tb)
db_collections = {}
for i, row in enumerate(cursor):
db_collections[row[1]] = row[0]
if row[0] < sequence_min:
sequence_dirty = 1
else:
sequence_max = max(sequence_max, row[0])
# If the database is 'dirty', then we should fix up the Ids and the sequence number
if sequence_dirty == 1:
debug_print("Collection Sequence Dirty for Source Id: %d"%source_id)
sequence_max = sequence_max + 1
for collection, collectionId in db_collections.items():
if collectionId < sequence_min:
# Record the new Id and write it to the DB
db_collections[collection] = sequence_max
sequence_max = sequence_max + 1
# Fix the collection DB
query = 'UPDATE collection SET _id = ? WHERE title = ?'
t = (db_collections[collection], collection, )
cursor.execute(query, t)
# Fix any references in existing collections
query = 'UPDATE collections SET collection_id = ? WHERE collection_id = ?'
t = (db_collections[collection], collectionId,)
cursor.execute(query, t)
self.set_database_sequence_id(connection, 'collection', sequence_max)
debug_print("Collection Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
# Fix up the collections table now...
sequence_dirty = 0
sequence_max = sequence_min
debug_print("Collections Sequence Min: %d, Source Id: %d"%(sequence_min,source_id))
query = 'SELECT _id FROM collections'
cursor.execute(query)
db_collection_pairs = []
for i, row in enumerate(cursor):
db_collection_pairs.append(row[0])
if row[0] < sequence_min:
sequence_dirty = 1
else:
sequence_max = max(sequence_max, row[0])
if sequence_dirty == 1:
debug_print("Collections Sequence Dirty for Source Id: %d"%source_id)
sequence_max = sequence_max + 1
for pairId in db_collection_pairs:
if pairId < sequence_min:
# Record the new Id and write it to the DB
query = 'UPDATE collections SET _id = ? WHERE _id = ?'
t = (sequence_max, pairId,)
cursor.execute(query, t)
sequence_max = sequence_max + 1
self.set_database_sequence_id(connection, 'collections', sequence_max)
debug_print("Collections Sequence Max: %d, Source Id: %d"%(sequence_max,source_id))
cursor.close()
return db_collections
def update_device_collections(self, connection, booklist, collections,
source_id, dbpath):
if collections:
db_collections = self.read_device_collections(connection, source_id, dbpath)
cursor = connection.cursor()
for collection, books in collections.items():
if collection not in db_collections:
query = 'INSERT INTO collection (title, source_id) VALUES (?,?)'
t = (collection, source_id)
cursor.execute(query, t)
db_collections[collection] = self.get_lastrowid(cursor)
debug_print('Inserted New Collection: (%u) '%db_collections[collection] + collection)
# Get existing books in collection
query = '''
SELECT books.file_path, content_id
FROM collections
LEFT OUTER JOIN books
WHERE collection_id = ? AND books._id = collections.content_id
'''
t = (db_collections[collection],)
cursor.execute(query, t)
db_books = {}
for i, row in enumerate(cursor):
db_books[row[0]] = row[1]
for idx, book in enumerate(books):
if collection not in book.device_collections:
book.device_collections.append(collection)
if db_books.get(book.lpath, None) is None:
query = '''
INSERT INTO collections (collection_id, content_id,
added_order) values (?,?,?)
'''
t = (db_collections[collection], book.bookId, idx)
cursor.execute(query, t)
debug_print('Inserted Book Into Collection: ' +
book.title + ' -> ' + collection)
else:
query = '''
UPDATE collections
SET added_order = ?
WHERE content_id = ? AND collection_id = ?
'''
t = (idx, book.bookId, db_collections[collection])
cursor.execute(query, t)
db_books[book.lpath] = None
for bookPath, bookId in db_books.items():
if bookId is not None:
query = ('DELETE FROM collections '
'WHERE content_id = ? AND collection_id = ? ')
t = (bookId, db_collections[collection],)
cursor.execute(query, t)
debug_print('Deleted Book From Collection: ' + bookPath + ' -> ' + collection)
db_collections[collection] = None
for collection, collectionId in db_collections.items():
if collectionId is not None:
# Remove Books from Collection
query = ('DELETE FROM collections '
'WHERE collection_id = ?')
t = (collectionId,)
cursor.execute(query, t)
# Remove Collection
query = ('DELETE FROM collection '
'WHERE _id = ?')
t = (collectionId,)
cursor.execute(query, t)
debug_print('Deleted Collection: ' + repr(collection))
connection.commit()
cursor.close()
def rebuild_collections(self, booklist, oncard):
debug_print('PRST1: starting rebuild_collections')
opts = self.settings()
if opts.extra_customization:
collections = [x.strip() for x in
opts.extra_customization[self.OPT_COLLECTIONS].split(',')]
else:
collections = []
debug_print('PRST1: collection fields:', collections)
self.update_device_database(booklist, collections, oncard)
debug_print('PRS-T1: finished rebuild_collections')
def upload_cover(self, path, filename, metadata, filepath):
import sqlite3 as sqlite
debug_print('PRS-T1: uploading cover')
if filepath.startswith(self._main_prefix):
prefix = self._main_prefix
source_id = 0
else:
prefix = self._card_a_prefix
source_id = 1
metadata.lpath = filepath.partition(prefix)[2]
metadata.lpath = metadata.lpath.replace('\\', '/')
dbpath = self.normalize_path(prefix + DBPATH)
debug_print("SQLite DB Path: " + dbpath)
with closing(sqlite.connect(dbpath)) as connection:
cursor = connection.cursor()
query = 'SELECT _id FROM books WHERE file_path = ?'
t = (metadata.lpath,)
cursor.execute(query, t)
for i, row in enumerate(cursor):
metadata.bookId = row[0]
cursor.close()
if getattr(metadata, 'bookId', None) is not None:
debug_print('PRS-T1: refreshing cover for book being sent')
self.upload_book_cover(connection, metadata, source_id)
debug_print('PRS-T1: done uploading cover')
def upload_book_cover(self, connection, book, source_id):
debug_print('PRST1: Uploading/Refreshing Cover for ' + book.title)
if (not book.thumbnail or isinstance(book.thumbnail, ImageWrapper) or
not book.thumbnail[-1]):
# If the thumbnail is an ImageWrapper instance, it refers to a book
# not in the calibre library
return
cursor = connection.cursor()
thumbnail_path = THUMBPATH%book.bookId
prefix = self._main_prefix if source_id == 0 else self._card_a_prefix
thumbnail_file_path = os.path.join(prefix, *thumbnail_path.split('/'))
thumbnail_dir_path = os.path.dirname(thumbnail_file_path)
if not os.path.exists(thumbnail_dir_path):
os.makedirs(thumbnail_dir_path)
with open(thumbnail_file_path, 'wb') as f:
f.write(book.thumbnail[-1])
fsync(f)
query = 'UPDATE books SET thumbnail = ? WHERE _id = ?'
t = (thumbnail_path, book.bookId,)
cursor.execute(query, t)
connection.commit()
cursor.close()
def is_sony_periodical(self, book):
if _('News') not in book.tags:
return False
if not book.lpath.lower().endswith('.epub'):
return False
if book.pubdate is None or book.pubdate.date() < date(2010, 10, 17):
return False
return True
def periodicalize_book(self, connection, book):
if not self.is_sony_periodical(book):
return
name = None
if '[' in book.title:
name = book.title.split('[')[0].strip()
if len(name) < 4:
name = None
if not name:
try:
name = [t for t in book.tags if t != _('News')][0]
except:
name = None
if not name:
name = book.title
pubdate = None
try:
pubdate = int(time.mktime(book.pubdate.timetuple()) * 1000)
except:
pass
cursor = connection.cursor()
periodical_schema = \
"'http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0'"
# Setting this to the SONY periodical schema apparently causes errors
# with some periodicals, therefore set it to null, since the special
# periodical navigation doesn't work anyway.
periodical_schema = None
query = '''
UPDATE books
SET conforms_to = ?,
periodical_name = ?,
description = ?,
publication_date = ?
WHERE _id = ?
'''
t = (periodical_schema, name, None, pubdate, book.bookId,)
cursor.execute(query, t)
connection.commit()
cursor.close()
| 33,588 | Python | .py | 688 | 35.328488 | 118 | 0.564049 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,786 | driver.py | kovidgoyal_calibre/src/calibre/devices/eb600/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Device driver for the Netronix EB600
Windows PNP strings:
('USBSTOR\\DISK&VEN_NETRONIX&PROD_EBOOK&REV_062E\\6&1A275569&0&EB6001009
2W00000&0', 2, u'F:\\')
('USBSTOR\\DISK&VEN_NETRONIX&PROD_EBOOK&REV_062E\\6&1A275569&0&EB6001009
2W00000&1', 3, u'G:\\')
'''
import re
from calibre.devices.usbms.driver import USBMS
class EB600(USBMS):
name = 'Netronix EB600 Device Interface'
gui_name = 'Netronix EB600'
description = _('Communicate with the EB600 e-book reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'mobi', 'prc', 'chm', 'djvu', 'html', 'rtf', 'txt',
'pdf']
DRM_FORMATS = ['prc', 'mobi', 'html', 'pdf', 'txt']
VENDOR_ID = [0x1f85]
PRODUCT_ID = [0x1688]
BCD = [0x110]
VENDOR_NAME = ['NETRONIX', 'WOLDER', 'MD86371']
WINDOWS_MAIN_MEM = ['EBOOK', 'MIBUK_GAMMA_6.2', 'MD86371']
WINDOWS_CARD_A_MEM = ['EBOOK', 'MD86371']
OSX_MAIN_MEM = 'EB600 Internal Storage Media'
OSX_CARD_A_MEM = 'EB600 Card Storage Media'
MAIN_MEMORY_VOLUME_LABEL = 'EB600 Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'EB600 Storage Card'
EBOOK_DIR_MAIN = ''
EBOOK_DIR_CARD_A = ''
SUPPORTS_SUB_DIRS = True
class TOLINO(EB600):
name = 'Tolino Shine Device Interface'
gui_name = 'Tolino Shine'
description = _('Communicate with the Tolino Shine and Vision readers')
FORMATS = ['epub', 'pdf', 'txt']
EPOS_PRODUCT_ID = [0x6053]
VISION6_PRODUCT_ID = [0x8000]
OTHER_TOLINO_PRODUCT_ID = [0x6033, 0x6052]
PRODUCT_ID = EB600.PRODUCT_ID + OTHER_TOLINO_PRODUCT_ID + EPOS_PRODUCT_ID + VISION6_PRODUCT_ID
KOBO_VENDOR_ID = [0x4173] # Some newer Tolino devices have the Kobo Vendor ID. But, they still use different software.
VENDOR_ID = EB600.VENDOR_ID + KOBO_VENDOR_ID
BCD = [0x226, 0x9999]
VENDOR_NAME = ['DEUTSCHE', 'LINUX']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['_TELEKOMTOLINO', 'FILE-CD_GADGET']
EBOOK_DIR_MAIN = ''
EXTRA_CUSTOMIZATION_MESSAGE = [
_('Swap main and card A') +
':::' +
_('Check this box if the device\'s main memory is being seen as card a and the card '
'is being seen as main memory. Some tolino devices may need this option.'),
]
EXTRA_CUSTOMIZATION_DEFAULT = [
True,
]
OPT_SWAP_MEMORY = 0
def get_device_information(self, end_session=True):
self.set_device_name()
return super().get_device_information(end_session)
# There are apparently two versions of this device, one with swapped
# drives and one without, see https://bugs.launchpad.net/bugs/1240504
def linux_swap_drives(self, drives):
e = self.settings().extra_customization
if len(drives) < 2 or not drives[0] or not drives[1] or not e[self.OPT_SWAP_MEMORY]:
return drives
drives = list(drives)
t = drives[0]
drives[0] = drives[1]
drives[1] = t
return tuple(drives)
def windows_sort_drives(self, drives):
e = self.settings().extra_customization
if len(drives) < 2 or not e[self.OPT_SWAP_MEMORY]:
return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda:
drives['main'] = carda
drives['carda'] = main
return drives
def osx_sort_names(self, names):
e = self.settings().extra_customization
if len(names) < 2 or not e[self.OPT_SWAP_MEMORY]:
return names
main = names.get('main', None)
card = names.get('carda', None)
if main is not None and card is not None:
names['main'] = card
names['carda'] = main
return names
def post_open_callback(self):
# The tolino vision only handles books inside the Books folder
product_id, bcd = self.device_being_opened[1], self.device_being_opened[2]
is_tolino = product_id in (0x6033, 0x6052, 0x6053) or (product_id == 0x1688 and bcd == 0x226)
self.ebook_dir_for_upload = 'Books' if is_tolino else ''
def get_main_ebook_dir(self, for_upload=False):
if for_upload:
return getattr(self, 'ebook_dir_for_upload', self.EBOOK_DIR_MAIN)
return self.EBOOK_DIR_MAIN
def isEpos(self):
return self.detected_device.idProduct in self.EPOS_PRODUCT_ID
def isVision6(self):
return self.detected_device.idProduct in self.VISION6_PRODUCT_ID
def set_device_name(self):
device_name = self.gui_name
if self.isEpos():
device_name = 'tolino epos'
elif self.isVision6():
device_name = 'tolino vision 6'
self.__class__.gui_name = device_name
return device_name
class COOL_ER(EB600):
name = 'Cool-er device interface'
gui_name = 'Cool-er'
FORMATS = ['epub', 'mobi', 'prc', 'pdf', 'txt']
VENDOR_NAME = 'COOL-ER'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'EREADER'
OSX_MAIN_MEM = 'COOL-ER eReader Media'
EBOOK_DIR_MAIN = 'my docs'
class SHINEBOOK(EB600):
name = 'ShineBook device Interface'
gui_name = 'ShineBook'
FORMATS = ['epub', 'prc', 'rtf', 'pdf', 'txt']
VENDOR_NAME = 'LONGSHIN'
WINDOWS_MAIN_MEM = 'ESHINEBOOK'
MAIN_MEMORY_VOLUME_LABEL = 'ShineBook Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'ShineBook Storage Card'
@classmethod
def can_handle(cls, dev, debug=False):
return dev[4] == 'ShineBook'
class POCKETBOOK360(EB600):
# Device info on OS X
# (8069L, 5768L, 272L, u'', u'', u'1.00')
name = 'PocketBook 360 Device Interface'
gui_name = 'PocketBook 360'
VENDOR_ID = [0x1f85, 0x525]
PRODUCT_ID = [0x1688, 0xa4a5]
BCD = [0x110]
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm', 'txt']
VENDOR_NAME = ['PHILIPS', '__POCKET', 'POCKETBO']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['MASS_STORAGE', 'BOOK_USB_STORAGE',
'OK_POCKET_611_61', 'OK_POCKET_360+61']
OSX_MAIN_MEM = OSX_CARD_A_MEM = 'Philips Mass Storage Media'
OSX_MAIN_MEM_VOL_PAT = re.compile(r'/Pocket')
@classmethod
def can_handle(cls, dev, debug=False):
return dev[-1] == '1.00' and not dev[-2] and not dev[-3]
class POCKETBOOKHD(EB600):
name = 'Pocket Touch HD Device Interface'
gui_name = 'PocketBook HD'
PRODUCT_ID = [0x6a42]
BCD = [0x9999]
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'docx', 'doc', 'pdf', 'djvu', 'rtf', 'chm', 'txt']
class GER2(EB600):
name = 'Ganaxa GeR2 Device Interface'
gui_name = 'Ganaxa GeR2'
FORMATS = ['pdf']
VENDOR_ID = [0x3034]
PRODUCT_ID = [0x1795]
BCD = [0x132]
VENDOR_NAME = 'GANAXA'
WINDOWS_MAIN_MEN = 'GER2_________-FD'
WINDOWS_CARD_A_MEM = 'GER2_________-SD'
class ITALICA(EB600):
name = 'Italica Device Interface'
gui_name = 'Italica'
icon = 'devices/italica.png'
FORMATS = ['epub', 'rtf', 'fb2', 'html', 'prc', 'mobi', 'pdf', 'txt']
VENDOR_NAME = 'ITALICA'
WINDOWS_MAIN_MEM = 'EREADER'
WINDOWS_CARD_A_MEM = WINDOWS_MAIN_MEM
OSX_MAIN_MEM = 'Italica eReader Media'
OSX_CARD_A_MEM = OSX_MAIN_MEM
MAIN_MEMORY_VOLUME_LABEL = 'Italica Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Italica Storage Card'
class ECLICTO(EB600):
name = 'eClicto Device Interface'
gui_name = 'eClicto'
FORMATS = ['epub', 'pdf', 'htm', 'html', 'txt']
VENDOR_NAME = 'ECLICTO'
WINDOWS_MAIN_MEM = 'EBOOK'
WINDOWS_CARD_A_MEM = 'EBOOK'
EBOOK_DIR_MAIN = 'Text'
EBOOK_DIR_CARD_A = ''
class DBOOK(EB600):
name = 'Airis Dbook Device Interface'
gui_name = 'Airis Dbook'
FORMATS = ['epub', 'mobi', 'prc', 'fb2', 'html', 'pdf', 'rtf', 'txt']
VENDOR_NAME = 'INFINITY'
WINDOWS_MAIN_MEM = 'AIRIS_DBOOK'
WINDOWS_CARD_A_MEM = 'AIRIS_DBOOK'
class INVESBOOK(EB600):
name = 'Inves Book Device Interface'
gui_name = 'Inves Book 600'
FORMATS = ['epub', 'mobi', 'prc', 'fb2', 'html', 'pdf', 'rtf', 'txt']
BCD = [0x110, 0x323]
VENDOR_NAME = ['INVES_E6', 'INVES-WI', 'POCKETBO']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['00INVES_E600', 'INVES-WIBOOK',
'OK_POCKET_611_61']
class BOOQ(EB600):
name = 'Booq Device Interface'
gui_name = 'bq Reader'
FORMATS = ['epub', 'mobi', 'prc', 'fb2', 'pdf', 'doc', 'rtf', 'txt', 'html']
VENDOR_NAME = ['NETRONIX', '36LBOOKS']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['EB600', 'ELEQTOR']
class MENTOR(EB600):
name = 'Astak Mentor EB600'
gui_name = 'Mentor'
description = _('Communicate with the Astak Mentor EB600')
FORMATS = ['epub', 'fb2', 'mobi', 'prc', 'pdf', 'txt']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'MENTOR'
class ELONEX(EB600):
name = 'Elonex 600EB'
gui_name = 'Elonex'
FORMATS = ['epub', 'pdf', 'txt', 'html']
VENDOR_NAME = 'ELONEX'
WINDOWS_MAIN_MEM = 'EBOOK'
WINDOWS_CARD_A_MEM = 'EBOOK'
@classmethod
def can_handle(cls, dev, debug=False):
return dev[3] == 'Elonex' and dev[4] == 'eBook'
class POCKETBOOK301(USBMS):
name = 'PocketBook 301 Device Interface'
description = _('Communicate with the PocketBook 301 Reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm', 'txt']
SUPPORTS_SUB_DIRS = True
MAIN_MEMORY_VOLUME_LABEL = 'PocketBook 301 Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'PocketBook 301 Storage Card'
VENDOR_ID = [0x1]
PRODUCT_ID = [0x301]
BCD = [0x132]
class POCKETBOOK602(USBMS):
name = 'PocketBook Pro 602/902 Device Interface'
gui_name = 'PocketBook'
description = _('Communicate with the PocketBook 515/602/603/902/903/Pro 912 reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm',
'doc', 'tcr', 'txt']
EBOOK_DIR_MAIN = 'books'
SUPPORTS_SUB_DIRS = True
SCAN_FROM_ROOT = True
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x0324, 0x0330, 0x0399]
VENDOR_NAME = ['', 'LINUX']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['PB602', 'PB603', 'PB902',
'PB903', 'Pocket912', 'PB', 'FILE-STOR_GADGET']
class POCKETBOOK622(POCKETBOOK602):
name = 'PocketBook 622 Device Interface'
description = _('Communicate with the PocketBook 622 and 623 readers.')
EBOOK_DIR_MAIN = ''
VENDOR_ID = [0x0489]
PRODUCT_ID = [0xe107, 0xcff1]
BCD = [0x0326]
VENDOR_NAME = 'LINUX'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'FILE-STOR_GADGET'
class POCKETBOOK360P(POCKETBOOK602):
name = 'PocketBook 360+ Device Interface'
description = _('Communicate with the PocketBook 360+ reader.')
BCD = [0x0323]
EBOOK_DIR_MAIN = ''
VENDOR_NAME = '__POCKET'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = 'BOOK_USB_STORAGE'
class POCKETBOOK701(USBMS):
name = 'PocketBook 701 Device Interface'
gui_name = 'PocketBook'
description = _('Communicate with the PocketBook 701')
author = _('Kovid Goyal')
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm',
'doc', 'tcr', 'txt']
EBOOK_DIR_MAIN = 'books'
SUPPORTS_SUB_DIRS = True
VENDOR_ID = [0x18d1]
PRODUCT_ID = [0xa004]
BCD = [0x0224]
VENDOR_NAME = 'ANDROID'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = '__UMS_COMPOSITE'
def windows_sort_drives(self, drives):
if len(drives) < 2:
return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda:
drives['main'] = carda
drives['carda'] = main
return drives
class POCKETBOOK740(USBMS):
name = 'PocketBook 740 Device Interface'
gui_name = 'PocketBook'
description = _('Communicate with the PocketBook 740')
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'fb2', 'prc', 'mobi', 'pdf', 'djvu', 'rtf', 'chm',
'doc', 'tcr', 'txt']
EBOOK_DIR_MAIN = 'books'
SUPPORTS_SUB_DIRS = True
SCAN_FROM_ROOT = True
VENDOR_ID = [0x18d1]
PRODUCT_ID = [0x0001]
BCD = [0x0101]
class PI2(EB600):
name = 'Infibeam Pi2 Device Interface'
gui_name = 'Infibeam Pi2'
author = 'Michael Scalet'
description = _('Communicate with the Infibeam Pi2 reader.')
version = (1,0,1)
# Ordered list of supported formats
FORMATS = ['epub', 'mobi', 'prc', 'html', 'htm', 'doc', 'pdf', 'rtf',
'txt']
VENDOR_NAME = 'INFIBEAM'
WINDOWS_MAIN_MEM = 'INFIBEAM_PI'
WINDOWS_CARD_A_MEM = 'INFIBEAM_PI'
DELETE_EXTS = ['.rec']
| 13,271 | Python | .py | 321 | 35.193146 | 124 | 0.612256 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,787 | driver.py | kovidgoyal_calibre/src/calibre/devices/user_defined/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.devices.usbms.driver import USBMS
class USER_DEFINED(USBMS):
name = 'User Defined USB driver'
gui_name = 'User Defined USB Device'
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'mobi', 'pdf']
VENDOR_ID = 0xFFFF
PRODUCT_ID = 0xFFFF
BCD = None
EBOOK_DIR_MAIN = ''
EBOOK_DIR_CARD_A = ''
VENDOR_NAME = []
WINDOWS_MAIN_MEM = ''
WINDOWS_CARD_A_MEM = ''
OSX_MAIN_MEM = 'Device Main Memory'
MAIN_MEMORY_VOLUME_LABEL = 'Device Main Memory'
SUPPORTS_SUB_DIRS = True
EXTRA_CUSTOMIZATION_MESSAGE = [
_('USB Vendor ID (in hex)') + ':::<p>' + _(
'Get this ID using Preferences -> Misc -> Get information to '
'set up the user-defined device') + '</p>',
_('USB Product ID (in hex)')+ ':::<p>' + _(
'Get this ID using Preferences -> Misc -> Get information to '
'set up the user-defined device') + '</p>',
_('USB Revision ID (in hex)')+ ':::<p>' + _(
'Get this ID using Preferences -> Misc -> Get information to '
'set up the user-defined device') + '</p>',
'',
_('Unused (leave blank)') + ':::<p>' + _(
'This field is no longer used, leave it blank.') + '</p>',
_('Unused (leave blank)') + ':::<p>' + _(
'This field is no longer used, leave it blank.') + '</p>',
_('Unused (leave blank)') + ':::<p>' + _(
'This field is no longer used, leave it blank.') + '</p>',
_('Unused (leave blank)') + ':::<p>' + _(
'This field is no longer used, leave it blank.') + '</p>',
_('Main memory folder') + ':::<p>' + _(
'Enter the folder where the books are to be stored. This folder '
'is prepended to any send_to_device template') + '</p>',
_('Card A folder') + ':::<p>' + _(
'Enter the folder where the books are to be stored. This folder '
'is prepended to any send_to_device template') + '</p>',
_('Swap main and card A') + ':::<p>' + _(
'Check this box if the device\'s main memory is being seen as '
'card a and the card is being seen as main memory') + '</p>',
]
EXTRA_CUSTOMIZATION_DEFAULT = [
'0xffff',
'0xffff',
'0xffff',
None,
'',
'',
'',
'',
'',
'',
False,
]
OPT_USB_VENDOR_ID = 0
OPT_USB_PRODUCT_ID = 1
OPT_USB_REVISION_ID = 2
# OPT 3 isn't used
OPT_USB_WINDOWS_MM_VEN_ID = 4
OPT_USB_WINDOWS_MM_ID = 5
OPT_USB_WINDOWS_CA_VEN_ID = 6
OPT_USB_WINDOWS_CA_ID = 7
OPT_MAIN_MEM_FOLDER = 8
OPT_CARD_A_FOLDER = 9
OPT_SWAP_MAIN_AND_CARD = 10
def initialize(self):
self.plugin_needs_delayed_initialization = True
USBMS.initialize(self)
def do_delayed_plugin_initialization(self):
try:
e = self.settings().extra_customization
if e[self.OPT_USB_VENDOR_ID]:
self.VENDOR_ID = int(e[self.OPT_USB_VENDOR_ID], 16)
self.PRODUCT_ID = int(e[self.OPT_USB_PRODUCT_ID], 16)
self.BCD = [int(e[self.OPT_USB_REVISION_ID], 16)]
if e[self.OPT_USB_WINDOWS_MM_VEN_ID]:
self.VENDOR_NAME.append(e[self.OPT_USB_WINDOWS_MM_VEN_ID])
if e[self.OPT_USB_WINDOWS_CA_VEN_ID] and \
e[self.OPT_USB_WINDOWS_CA_VEN_ID] not in self.VENDOR_NAME:
self.VENDOR_NAME.append(e[self.OPT_USB_WINDOWS_CA_VEN_ID])
self.WINDOWS_MAIN_MEM = e[self.OPT_USB_WINDOWS_MM_ID] + '&'
self.WINDOWS_CARD_A_MEM = e[self.OPT_USB_WINDOWS_CA_ID] + '&'
self.EBOOK_DIR_MAIN = e[self.OPT_MAIN_MEM_FOLDER]
self.EBOOK_DIR_CARD_A = e[self.OPT_CARD_A_FOLDER]
except:
import traceback
traceback.print_exc()
self.plugin_needs_delayed_initialization = False
def windows_sort_drives(self, drives):
if len(drives) < 2:
return drives
e = self.settings().extra_customization
if not e[self.OPT_SWAP_MAIN_AND_CARD]:
return drives
main = drives.get('main', None)
carda = drives.get('carda', None)
if main and carda:
drives['main'] = carda
drives['carda'] = main
return drives
def linux_swap_drives(self, drives):
if len(drives) < 2 or not drives[0] or not drives[1]:
return drives
e = self.settings().extra_customization
if not e[self.OPT_SWAP_MAIN_AND_CARD]:
return drives
drives = list(drives)
t = drives[0]
drives[0] = drives[1]
drives[1] = t
return tuple(drives)
def osx_sort_names(self, names):
if len(names) < 2:
return names
e = self.settings().extra_customization
if not e[self.OPT_SWAP_MAIN_AND_CARD]:
return names
main = names.get('main', None)
card = names.get('carda', None)
if main is not None and card is not None:
names['main'] = card
names['carda'] = main
return names
| 5,663 | Python | .py | 133 | 32.443609 | 82 | 0.522773 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,788 | driver.py | kovidgoyal_calibre/src/calibre/devices/nokia/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009-2014, John Schember <john at nachtimwald.com> and Andres Gomez <agomez at igalia.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Nokia's internet tablet devices
'''
from calibre.devices.usbms.driver import USBMS
class N770(USBMS):
name = 'Nokia 770 Device Interface'
gui_name = 'Nokia 770'
description = _('Communicate with the Nokia 770 Internet Tablet.')
author = 'John Schember and Andres Gomez'
supported_platforms = ['windows', 'linux', 'osx']
# Ordered list of supported formats
FORMATS = ['mobi', 'prc', 'epub', 'pdf', 'html', 'zip', 'fb2', 'chm',
'pdb', 'tcr', 'txt', 'rtf']
VENDOR_ID = [0x421]
PRODUCT_ID = [0x431]
BCD = [0x308]
VENDOR_NAME = 'NOKIA'
WINDOWS_MAIN_MEM = '770'
MAIN_MEMORY_VOLUME_LABEL = 'Nokia 770 Main Memory'
EBOOK_DIR_MAIN = 'My Ebooks'
SUPPORTS_SUB_DIRS = True
class N810(N770):
name = 'Nokia N800/N810/N900/N950/N9 Device Interface'
gui_name = 'Nokia N800/N810/N900/N950/N9'
description = _('Communicate with the Nokia N800/N810/N900/N950/N9 Maemo/MeeGo devices.')
PRODUCT_ID = [0x4c3, 0x96, 0x1c7, 0x3d1, 0x518]
BCD = [0x316]
WINDOWS_MAIN_MEM = ['N800', 'N810', 'N900', 'NOKIA_N950', 'NOKIA_N9']
MAIN_MEMORY_VOLUME_LABEL = 'Nokia Maemo/MeeGo device Main Memory'
class E71X(USBMS):
name = 'Nokia E71X device interface'
gui_name = 'Nokia E71X'
description = _('Communicate with the Nokia E71X')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'linux', 'osx']
VENDOR_ID = [0x421]
PRODUCT_ID = [0x1a0]
BCD = [0x100]
FORMATS = ['mobi', 'prc']
EBOOK_DIR_MAIN = 'eBooks'
SUPPORTS_SUB_DIRS = True
VENDOR_NAME = 'NOKIA'
WINDOWS_MAIN_MEM = 'S60'
class E52(USBMS):
name = 'Nokia E52 device interface'
gui_name = 'Nokia E52'
description = _('Communicate with the Nokia E52')
author = 'David Ignjic'
supported_platforms = ['windows', 'linux', 'osx']
VENDOR_ID = [0x421]
PRODUCT_ID = [0x1CD, 0x273, 0x00aa]
BCD = [0x100]
FORMATS = ['epub', 'fb2', 'mobi', 'prc', 'txt']
EBOOK_DIR_MAIN = 'eBooks'
SUPPORTS_SUB_DIRS = True
VENDOR_NAME = 'NOKIA'
WINDOWS_MAIN_MEM = ['S60', 'E71']
| 2,413 | Python | .py | 60 | 35.4 | 108 | 0.614888 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,789 | __init__.py | kovidgoyal_calibre/src/calibre/devices/android/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,790 | driver.py | kovidgoyal_calibre/src/calibre/devices/android/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import io
import os
from calibre import fsync
from calibre.devices.usbms.driver import USBMS
from calibre.utils.resources import get_image_path as I
from polyglot.builtins import string_or_bytes
HTC_BCDS = [0x100, 0x0222, 0x0224, 0x0226, 0x227, 0x228, 0x229, 0x0231, 0x9999]
class ANDROID(USBMS):
name = 'Android driver'
gui_name = 'Android phone'
description = _('Communicate with Android phones.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'pdf']
VENDOR_ID = {
# HTC
0x0bb4 : {0xc02 : HTC_BCDS,
0xc01 : HTC_BCDS,
0xff9 : HTC_BCDS,
0xc86 : HTC_BCDS,
0xc87 : HTC_BCDS,
0xc8d : HTC_BCDS,
0xc91 : HTC_BCDS,
0xc92 : HTC_BCDS,
0xc97 : HTC_BCDS,
0xc99 : HTC_BCDS,
0xca2 : HTC_BCDS,
0xca3 : HTC_BCDS,
0xca4 : HTC_BCDS,
0xca9 : HTC_BCDS,
0xcac : HTC_BCDS,
0xcba : HTC_BCDS,
0xccf : HTC_BCDS,
0xcd6 : HTC_BCDS,
0xce5 : HTC_BCDS,
0xcec : HTC_BCDS,
0x0cf5 : HTC_BCDS,
0x2910 : HTC_BCDS,
0xe77 : HTC_BCDS,
0x0001 : [0x255],
},
# Eken
0x040d : {0x8510 : [0x0001], 0x0851 : [0x1]},
# Trekstor
0x1e68 : {
0x006a : [0x0231],
0x0062 : [0x222], # Surftab ventos https://bugs.launchpad.net/bugs/1204885
},
# Motorola
0x22b8 : {
0x41d9 : [0x216], 0x2d61 : [0x100], 0x2d67 : [0x100],
0x2de8 : [0x229],
0x41db : [0x216], 0x4285 : [0x216], 0x42a3 : [0x216],
0x4286 : [0x216], 0x42b3 : [0x216], 0x42b4 : [0x216],
0x7086 : [0x0226], 0x70a8: [0x9999], 0x42c4 : [0x216],
0x70c6 : [0x226], 0x70c7: [0x226],
0x4316 : [0x216],
0x4317 : [0x216],
0x42d6 : [0x216],
0x42d7 : [0x216],
0x42f7 : [0x216],
0x4365 : [0x216],
0x4366 : [0x216],
0x4371 : [0x216],
},
# Freescale
0x15a2 : {
0x0c01 : [0x226]
},
# Alcatel
0x05c6 : {
0x9018 : [0x0226],
},
# Sony Ericsson
0xfce : {
0xa173 : [0x216],
0xd12e : [0x0100],
0xe156 : [0x226],
0xe15d : [0x226],
0xe14f : [0x0226],
0x614f : [0x0226, 0x100],
0x6156 : [0x0226, 0x100],
},
# Google
0x18d1 : {
0x0001 : [0x0222, 0x0223, 0x230, 0x255, 0x9999],
0x0002 : [0x9999],
0x0003 : [0x0230, 0x9999],
0x4e11 : [0x0100, 0x226, 0x227],
0x4e12 : [0x0100, 0x226, 0x227],
0x4e21 : [0x0100, 0x226, 0x227, 0x231],
0x4e22 : [0x0100, 0x226, 0x227, 0x231],
0xb058 : [0x0222, 0x226, 0x227],
0x0ff9 : [0x0226],
0xc91 : HTC_BCDS,
0xdddd : [0x216],
0x0d01 : [0x9999],
0x0d02 : [0x9999],
0x2d01 : [0x9999],
0xdeed : [0x231, 0x226],
},
# Samsung
0x04e8 : {0x681d : [0x0222, 0x0223, 0x0224, 0x0400],
0x681c : [0x0222, 0x0223, 0x0224, 0x0400],
0x6640 : [0x0100],
0x685b : [0x0400, 0x0226],
0x685e : [0x0400, 0x226],
0x6860 : [0x0400],
0x6863 : [0x226],
0x6877 : [0x0400],
0x689e : [0x0400],
0xdeed : [0x0222],
0x1234 : [0x0400],
},
# Viewsonic/Vizio
0x0489 : {
0xc000 : [0x0226],
0xc001 : [0x0226],
0xc004 : [0x0226],
0x8801 : [0x0226, 0x0227],
0xe115 : [0x0216], # PocketBook A10
},
# Another Viewsonic
0x0bb0 : {
0x2a2b : [0x0226, 0x0227],
},
# Acer
0x502 : {0x3203 : [0x0100, 0x224]},
# Dell
0x413c : {0xb007 : [0x0100, 0x0224, 0x0226]},
# LG
0x1004 : {
0x61c5 : [0x100, 0x226, 0x227, 0x229, 0x9999],
0x61cc : [0x226, 0x227, 0x9999, 0x100],
0x61ce : [0x226, 0x227, 0x9999, 0x100],
0x618e : [0x226, 0x227, 0x9999, 0x100],
0x6205 : [0x226, 0x227, 0x9999, 0x100],
0x6234 : [0x231],
},
# Archos
0x0e79 : {
0x1400 : [0x0222, 0x0216],
0x1408 : [0x0222, 0x0216],
0x1411 : [0x216],
0x1417 : [0x0216],
0x1419 : [0x0216],
0x1420 : [0x0216],
0x1422 : [0x0216]
},
# Huawei
# Disabled as this USB id is used by various USB flash drives
# 0x45e : { 0x00e1 : [0x007], },
# T-Mobile
0x0408 : {0x03ba : [0x0109], },
# Xperia
0x13d3 : {0x3304 : [0x0001, 0x0002]},
# ZTE
0x19d2 : {0x1353 : [0x226], 0x1351 : [0x227]},
# Advent
0x0955 : {0x7100 : [0x9999]}, # This is the same as the Notion Ink Adam
# Kobo
0x2237: {0x2208 : [0x0226]},
# Lenovo
0x17ef : {
0x7421 : [0x0216],
0x741b : [0x9999],
0x7640 : [0x0255],
},
# Pantech
0x10a9 : {0x6050 : [0x227]},
# Prestigio and Teclast
0x2207 : {0 : [0x222], 0x10 : [0x222]},
# OPPO
0x22d9 : {0x2768: [0x228]},
}
EBOOK_DIR_MAIN = ['eBooks/import', 'wordplayer/calibretransfer', 'Books',
'sdcard/ebooks']
EXTRA_CUSTOMIZATION_MESSAGE = [_('Comma separated list of folders to '
'send e-books to on the device\'s <b>main memory</b>. The first one that exists will '
'be used'),
_('Comma separated list of folders to '
'send e-books to on the device\'s <b>storage cards</b>. The first one that exists will '
'be used')
]
EXTRA_CUSTOMIZATION_DEFAULT = [', '.join(EBOOK_DIR_MAIN), '']
VENDOR_NAME = ['HTC', 'MOTOROLA', 'GOOGLE_', 'ANDROID', 'ACER',
'GT-I5700', 'SAMSUNG', 'DELL', 'LINUX', 'GOOGLE', 'ARCHOS',
'TELECHIP', 'HUAWEI', 'T-MOBILE', 'SEMC', 'LGE', 'NVIDIA',
'GENERIC-', 'ZTE', 'MID', 'QUALCOMM', 'PANDIGIT', 'HYSTON',
'VIZIO', 'GOOGLE', 'FREESCAL', 'KOBO_INC', 'LENOVO', 'ROCKCHIP',
'POCKET', 'ONDA_MID', 'ZENITHIN', 'INGENIC', 'PMID701C', 'PD',
'PMP5097C', 'MASS', 'NOVO7', 'ZEKI', 'COBY', 'SXZ', 'USB_2.0',
'COBY_MID', 'VS', 'AINOL', 'TOPWISE', 'PAD703', 'NEXT8D12',
'MEDIATEK', 'KEENHI', 'TECLAST', 'SURFTAB', 'XENTA', 'OBREEY_S',
'SURFTAB_', 'ONYX-INT', 'IMCOSYS', 'SURFPAD3', 'GRAMMATA',
]
WINDOWS_MAIN_MEM = ['ANDROID_PHONE', 'A855', 'A853', 'A953', 'INC.NEXUS_ONE',
'__UMS_COMPOSITE', '_MB200', 'MASS_STORAGE', '_-_CARD', 'SGH-I897',
'GT-I9000', 'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID',
'SCH-I500_CARD', 'SPH-D700_CARD', 'MB810', 'GT-P1000', 'DESIRE',
'SGH-T849', '_MB300', 'A70S', 'S_ANDROID', 'A101IT', 'A70H',
'IDEOS_TABLET', 'MYTOUCH_4G', 'UMS_COMPOSITE', 'SCH-I800_CARD',
'7', 'A956', 'A955', 'A43', 'ANDROID_PLATFORM', 'TEGRA_2',
'MB860', 'MULTI-CARD', 'MID7015A', 'INCREDIBLE', 'A7EB', 'STREAK',
'MB525', 'ANDROID2.3', 'SGH-I997', 'GT-I5800_CARD', 'MB612',
'GT-S5830_CARD', 'GT-S5570_CARD', 'MB870', 'MID7015A',
'ALPANDIGITAL', 'ANDROID_MID', 'VTAB1008', 'EMX51_BBG_ANDROI',
'UMS', '.K080', 'P990', 'LTE', 'MB853', 'GT-S5660_CARD', 'A107',
'GT-I9003_CARD', 'XT912', 'FILE-CD_GADGET', 'RK29_SDK', 'MB855',
'XT910', 'BOOK_A10', 'USB_2.0_DRIVER', 'I9100T', 'P999DW',
'KTABLET_PC', 'INGENIC', 'GT-I9001_CARD', 'USB_2.0',
'GT-S5830L_CARD', 'UNIVERSE', 'XT875', 'PRO', '.KOBO_VOX',
'THINKPAD_TABLET', 'SGH-T989', 'YP-G70', 'STORAGE_DEVICE',
'ADVANCED', 'SGH-I727', 'USB_FLASH_DRIVER', 'ANDROID',
'S5830I_CARD', 'MID7042', 'LINK-CREATE', '7035', 'VIEWPAD_7E',
'NOVO7', 'MB526', '_USB#WYK7MSF8KE', 'TABLET_PC', 'F', 'MT65XX_MS',
'ICS', 'E400', '__FILE-STOR_GADG', 'ST80208-1', 'GT-S5660M_CARD', 'XT894', '_USB',
'PROD_TAB13-201', 'URFPAD2', 'MID1126', 'ST10216-1', 'S5360L_CARD', 'IDEATAB_A1000-F',
'LBOOX', 'LTAGUS', 'IMCOV6L', '_101', 'LPAPYRE_624', 'S.L.',
]
WINDOWS_CARD_A_MEM = ['ANDROID_PHONE', 'GT-I9000_CARD', 'SGH-I897',
'FILE-STOR_GADGET', 'SGH-T959_CARD', 'SGH-T959', 'SAMSUNG_ANDROID', 'GT-P1000_CARD',
'A70S', 'A101IT', '7', 'INCREDIBLE', 'A7EB', 'SGH-T849_CARD',
'__UMS_COMPOSITE', 'SGH-I997_CARD', 'MB870', 'ALPANDIGITAL',
'ANDROID_MID', 'P990_SD_CARD', '.K080', 'LTE_CARD', 'MB853',
'A1-07___C0541A4F', 'XT912', 'MB855', 'XT910', 'BOOK_A10_CARD',
'USB_2.0_DRIVER', 'I9100T', 'P999DW_SD_CARD', 'KTABLET_PC',
'FILE-CD_GADGET', 'GT-I9001_CARD', 'USB_2.0', 'XT875',
'UMS_COMPOSITE', 'PRO', '.KOBO_VOX', 'SGH-T989_CARD', 'SGH-I727',
'USB_FLASH_DRIVER', 'ANDROID', 'MID7042', '7035', 'VIEWPAD_7E',
'NOVO7', 'ADVANCED', 'TABLET_PC', 'F', 'E400_SD_CARD', 'ST80208-1', 'XT894',
'_USB', 'PROD_TAB13-201', 'URFPAD2', 'MID1126', 'ANDROID_PLATFORM',
'ST10216-1', 'LBOOX', 'LTAGUS', 'IMCOV6L',
]
OSX_MAIN_MEM = 'Android Device Main Memory'
MAIN_MEMORY_VOLUME_LABEL = 'Android Device Main Memory'
SUPPORTS_SUB_DIRS = True
def post_open_callback(self):
opts = self.settings()
opts = opts.extra_customization
if not opts:
opts = [self.EBOOK_DIR_MAIN, '']
def strtolist(x):
if isinstance(x, string_or_bytes):
x = [y.strip() for y in x.split(',')]
return x or []
opts = [strtolist(x) for x in opts]
self._android_main_ebook_dir = opts[0]
self._android_card_ebook_dir = opts[1]
def get_main_ebook_dir(self, for_upload=False):
dirs = self._android_main_ebook_dir
if not for_upload:
def aldiko_tweak(x):
return 'eBooks' if x == 'eBooks/import' else x
dirs = list(map(aldiko_tweak, dirs))
return dirs
def get_carda_ebook_dir(self, for_upload=False):
if not for_upload:
return ''
return self._android_card_ebook_dir
def get_cardb_ebook_dir(self, for_upload=False):
return self.get_carda_ebook_dir()
def windows_sort_drives(self, drives):
try:
vid, pid, bcd = self.device_being_opened[:3]
except:
vid, pid, bcd = -1, -1, -1
if (vid, pid, bcd) == (0x0e79, 0x1408, 0x0222):
letter_a = drives.get('carda', None)
if letter_a is not None:
drives['carda'] = drives['main']
drives['main'] = letter_a
return drives
@classmethod
def configure_for_kindle_app(cls):
proxy = cls._configProxy()
proxy['format_map'] = ['azw3', 'mobi', 'azw', 'azw1', 'azw4', 'pdf']
proxy['use_subdirs'] = False
proxy['extra_customization'] = [
','.join(['kindle']+cls.EBOOK_DIR_MAIN), '']
@classmethod
def configure_for_generic_epub_app(cls):
proxy = cls._configProxy()
del proxy['format_map']
del proxy['use_subdirs']
del proxy['extra_customization']
class S60(USBMS):
name = 'S60 driver'
gui_name = 'S60 phone'
description = _('Communicate with S60 phones.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
VENDOR_ID = [0x421]
PRODUCT_ID = [0x156]
BCD = [0x100]
# For use with zxreader
FORMATS = ['fb2']
EBOOK_DIR_MAIN = 'FB2 Books'
VENDOR_NAME = 'NOKIA'
WINDOWS_MAIN_MEM = 'S60'
class WEBOS(USBMS):
name = 'WebOS driver'
gui_name = 'WebOS Tablet'
description = _('Communicate with WebOS tablets.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['mobi', 'azw', 'prc']
VENDOR_ID = [0x0830]
PRODUCT_ID = [0x8074, 0x8072]
BCD = [0x0327]
EBOOK_DIR_MAIN = '.palmkindle'
VENDOR_NAME = 'HP'
WINDOWS_MAIN_MEM = 'WEBOS-DEVICE'
THUMBNAIL_HEIGHT = 160
THUMBNAIL_WIDTH = 120
def upload_cover(self, path, filename, metadata, filepath):
from PIL import Image, ImageDraw
coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]:
cover = Image.open(io.BytesIO(coverdata[2]))
else:
coverdata = open(I('library.png'), 'rb').read()
cover = Image.new('RGB', (120,160), 'black')
im = Image.open(io.BytesIO(coverdata))
im.thumbnail((120, 160), Image.Resampling.LANCZOS)
x, y = im.size
cover.paste(im, ((120-x)/2, (160-y)/2))
draw = ImageDraw.Draw(cover)
draw.text((1, 10), metadata.get('title', _('Unknown')).encode('ascii', 'ignore'))
draw.text((1, 140), metadata.get('authors', _('Unknown'))[0].encode('ascii', 'ignore'))
data = io.BytesIO()
cover.save(data, 'JPEG')
coverdata = data.getvalue()
with open(os.path.join(path, 'coverCache', filename + '-medium.jpg'), 'wb') as coverfile:
coverfile.write(coverdata)
fsync(coverfile)
coverdata = getattr(metadata, 'thumbnail', None)
if coverdata and coverdata[2]:
cover = Image.open(io.BytesIO(coverdata[2]))
else:
coverdata = open(I('library.png'), 'rb').read()
cover = Image.new('RGB', (52,69), 'black')
im = Image.open(io.BytesIO(coverdata))
im.thumbnail((52, 69), Image.Resampling.LANCZOS)
x, y = im.size
cover.paste(im, ((52-x)//2, (69-y)//2))
cover2 = cover.resize((52, 69), Image.Resampling.LANCZOS).convert('RGB')
data = io.BytesIO()
cover2.save(data, 'JPEG')
coverdata = data.getvalue()
with open(os.path.join(path, 'coverCache', filename + '-small.jpg'), 'wb') as coverfile:
coverfile.write(coverdata)
fsync(coverfile)
| 15,803 | Python | .py | 358 | 31.139665 | 100 | 0.488551 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,791 | __init__.py | kovidgoyal_calibre/src/calibre/devices/iriver/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,792 | driver.py | kovidgoyal_calibre/src/calibre/devices/iriver/driver.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re
from calibre.devices.usbms.driver import USBMS
class IRIVER_STORY(USBMS):
name = 'Iriver Story Device Interface'
gui_name = 'Iriver Story'
description = _('Communicate with the Iriver Story reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
FORMATS = ['epub', 'fb2', 'pdf', 'djvu', 'txt']
VENDOR_ID = [0x1006]
PRODUCT_ID = [0x4023, 0x4024, 0x4025, 0x4034, 0x4037]
BCD = [0x0323, 0x0326, 0x226]
VENDOR_NAME = 'IRIVER'
WINDOWS_MAIN_MEM = ['STORY', 'STORY_EB05', 'STORY_WI-FI', 'STORY_EB07',
'STORY_EB12']
WINDOWS_MAIN_MEM = re.compile(r'(%s)&'%('|'.join(WINDOWS_MAIN_MEM)))
WINDOWS_CARD_A_MEM = ['STORY', 'STORY_SD', 'STORY_EB12_SD']
WINDOWS_CARD_A_MEM = re.compile(r'(%s)&'%('|'.join(WINDOWS_CARD_A_MEM)))
# OSX_MAIN_MEM = 'Kindle Internal Storage Media'
# OSX_CARD_A_MEM = 'Kindle Card Storage Media'
MAIN_MEMORY_VOLUME_LABEL = 'Story Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Story Storage Card'
EBOOK_DIR_MAIN = 'Book'
SUPPORTS_SUB_DIRS = True
| 1,332 | Python | .py | 29 | 40.758621 | 76 | 0.626357 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,793 | driver.py | kovidgoyal_calibre/src/calibre/devices/jetbook/driver.py | __license__ = 'GPL v3'
__copyright__ = '2009, James Ralston <jralston at mindspring.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for Ectaco Jetbook firmware >= JL04_v030e
'''
import os
import re
from calibre.constants import filesystem_encoding
from calibre.devices.usbms.driver import USBMS
from calibre.ebooks.metadata import string_to_authors
class JETBOOK(USBMS):
name = 'Ectaco JetBook Device Interface'
description = _('Communicate with the JetBook e-book reader.')
author = 'James Ralston'
supported_platforms = ['windows', 'osx', 'linux']
# Ordered list of supported formats
# Be sure these have an entry in calibre.devices.mime
FORMATS = ['epub', 'mobi', 'prc', 'fb2', 'txt', 'rtf', 'pdf']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x314]
VENDOR_NAME = 'LINUX'
WINDOWS_MAIN_MEM = 'EBOOK'
WINDOWS_CARD_A_MEM = 'EBOOK'
OSX_MAIN_MEM = 'Linux ebook Media'
OSX_CARD_A_MEM = 'Linux ebook Media'
MAIN_MEMORY_VOLUME_LABEL = 'Jetbook Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Jetbook Storage Card'
EBOOK_DIR_MAIN = "Books"
EBOOK_DIR_CARD_A = "Books"
SUPPORTS_SUB_DIRS = True
JETBOOK_FILE_NAME_PATTERN = re.compile(
r'(?P<authors>.+)#(?P<title>.+)'
)
def filename_callback(self, fname, mi):
fileext = os.path.splitext(os.path.basename(fname))[1]
if fileext.lower() not in ('txt', 'pdf', 'fb2'):
return fname
title = mi.title if mi.title else 'Unknown'
title = title.replace(' ', '_')
au = mi.format_authors()
if not au:
au = 'Unknown'
return f'{au}#{title}{fileext}'
@classmethod
def metadata_from_path(cls, path):
def check_unicode(txt):
if not isinstance(txt, str):
txt = txt.decode(filesystem_encoding, 'replace')
txt = txt.replace('_', ' ')
return txt
mi = cls.metadata_from_formats([path])
if (mi.title==_('Unknown') or mi.authors==[_('Unknown')]) \
and '#' in mi.title:
fn = os.path.splitext(os.path.basename(path))[0]
match = cls.JETBOOK_FILE_NAME_PATTERN.match(fn)
if match is not None:
mi.title = check_unicode(match.group('title'))
authors = string_to_authors(match.group('authors'))
mi.authors = list(map(check_unicode, authors))
return mi
class MIBUK(USBMS):
name = 'MiBuk Wolder Device Interface'
description = _('Communicate with the MiBuk Wolder reader.')
author = 'Kovid Goyal'
supported_platforms = ['windows']
FORMATS = ['epub', 'mobi', 'prc', 'fb2', 'txt', 'rtf', 'pdf']
VENDOR_ID = [0x0525]
PRODUCT_ID = [0xa4a5]
BCD = [0x314, 0x319]
SUPPORTS_SUB_DIRS = True
VENDOR_NAME = ['LINUX', 'FILE_BAC']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['WOLDERMIBUK', 'KED_STORAGE_GADG']
class JETBOOK_MINI(USBMS):
'''
['0x4b8',
'0x507',
'0x100',
'ECTACO',
'ECTACO ATA/ATAPI Bridge (Bulk-Only)',
'Rev.0.20']
'''
FORMATS = ['fb2', 'txt']
gui_name = 'JetBook Mini'
name = 'JetBook Mini Device Interface'
description = _('Communicate with the JetBook Mini reader.')
author = 'Kovid Goyal'
VENDOR_ID = [0x4b8]
PRODUCT_ID = [0x507]
BCD = [0x100]
VENDOR_NAME = 'ECTACO'
WINDOWS_MAIN_MEM = '' # Matches PROD_
MAIN_MEMORY_VOLUME_LABEL = 'Jetbook Mini'
SUPPORTS_SUB_DIRS = True
class JETBOOK_COLOR(USBMS):
'''
set([(u'0x951',
u'0x160b',
u'0x0',
u'Freescale',
u'Mass Storage Device',
u'0802270905553')])
'''
FORMATS = ['epub', 'mobi', 'prc', 'fb2', 'rtf', 'txt', 'pdf', 'djvu']
gui_name = 'JetBook Color'
name = 'JetBook Color Device Interface'
description = _('Communicate with the JetBook Color reader.')
author = 'Kovid Goyal'
VENDOR_ID = [0x951]
PRODUCT_ID = [0x160b]
BCD = [0x0]
EBOOK_DIR_MAIN = 'My Books'
SUPPORTS_SUB_DIRS = True
| 4,185 | Python | .py | 114 | 30.333333 | 79 | 0.594943 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,794 | driver.py | kovidgoyal_calibre/src/calibre/devices/boeye/driver.py | __license__ = 'GPL v3'
__copyright__ = '2011, Ken <ken at szboeye.com>'
__docformat__ = 'restructuredtext en'
'''
Device driver for BOEYE serial readers
'''
from calibre.devices.usbms.driver import USBMS
class BOEYE_BEX(USBMS):
name = 'BOEYE BEX reader driver'
gui_name = 'BOEYE BEX'
description = _('Communicate with BOEYE BEX Serial e-book readers.')
author = 'szboeye'
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'mobi', 'fb2', 'lit', 'prc', 'pdf', 'rtf', 'txt', 'djvu', 'doc', 'chm', 'html', 'zip', 'pdb']
VENDOR_ID = [0x0085]
PRODUCT_ID = [0x600]
VENDOR_NAME = 'LINUX'
WINDOWS_MAIN_MEM = 'FILE-STOR_GADGET'
OSX_MAIN_MEM = 'Linux File-Stor Gadget Media'
MAIN_MEMORY_VOLUME_LABEL = 'BOEYE BEX Storage Card'
EBOOK_DIR_MAIN = 'Documents'
SUPPORTS_SUB_DIRS = True
class BOEYE_BDX(USBMS):
name = 'BOEYE BDX reader driver'
gui_name = 'BOEYE BDX'
description = _('Communicate with BOEYE BDX serial e-book readers.')
author = 'szboeye'
supported_platforms = ['windows', 'osx', 'linux']
FORMATS = ['epub', 'mobi', 'fb2', 'lit', 'prc', 'pdf', 'rtf', 'txt', 'djvu', 'doc', 'chm', 'html', 'zip', 'pdb']
VENDOR_ID = [0x0085]
PRODUCT_ID = [0x800]
VENDOR_NAME = 'LINUX'
WINDOWS_MAIN_MEM = 'FILE-STOR_GADGET'
WINDOWS_CARD_A_MEM = 'FILE-STOR_GADGET'
OSX_MAIN_MEM = 'Linux File-Stor Gadget Media'
OSX_CARD_A_MEM = 'Linux File-Stor Gadget Media'
MAIN_MEMORY_VOLUME_LABEL = 'BOEYE BDX Internal Memory'
STORAGE_CARD_VOLUME_LABEL = 'BOEYE BDX Storage Card'
EBOOK_DIR_MAIN = 'Documents'
EBOOK_DIR_CARD_A = 'Documents'
SUPPORTS_SUB_DIRS = True
| 1,715 | Python | .py | 41 | 37.292683 | 117 | 0.639107 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,795 | sony_cache.py | kovidgoyal_calibre/src/calibre/devices/prs505/sony_cache.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import time
from datetime import date
from calibre import fsync, guess_type, isbytestring, prints
from calibre.constants import DEBUG, preferred_encoding
from calibre.devices.errors import DeviceError
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.metadata import authors_to_sort_string, authors_to_string, title_sort
from calibre.prints import debug_print
from polyglot.binary import from_base64_bytes
'''
cacheExt.xml
Periodical identifier sample from a PRS-650:
<?xml version="1.0" encoding="UTF-8"?>
<cacheExt xmlns="http://www.sony.com/xmlns/product/prs/device/1">
<text conformsTo="http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0" periodicalName="The Atlantic"
description="Current affairs and politics focused on the US" publicationDate="Tue, 19 Oct 2010 00:00:00 GMT"
path="database/media/books/calibre/Atlantic [Mon, 18 Oct 2010], The - calibre_1701.epub">
<thumbnail width="167" height="217">main_thumbnail.jpg</thumbnail>
</text>
</cacheExt>
'''
# Utility functions {{{
EMPTY_CARD_CACHE = b'''\
<?xml version="1.0" encoding="UTF-8"?>
<cache xmlns="http://www.kinoma.com/FskCache/1">
</cache>
'''
EMPTY_EXT_CACHE = b'''\
<?xml version="1.0" encoding="UTF-8"?>
<cacheExt xmlns="http://www.sony.com/xmlns/product/prs/device/1">
</cacheExt>
'''
MIME_MAP = {
"lrf" : "application/x-sony-bbeb",
'lrx' : 'application/x-sony-bbeb',
"rtf" : "application/rtf",
"pdf" : "application/pdf",
"txt" : "text/plain" ,
'epub': 'application/epub+zip',
}
DAY_MAP = dict(Sun=0, Mon=1, Tue=2, Wed=3, Thu=4, Fri=5, Sat=6)
MONTH_MAP = dict(Jan=1, Feb=2, Mar=3, Apr=4, May=5, Jun=6, Jul=7, Aug=8, Sep=9, Oct=10, Nov=11, Dec=12)
INVERSE_DAY_MAP = dict(zip(DAY_MAP.values(), DAY_MAP.keys()))
INVERSE_MONTH_MAP = dict(zip(MONTH_MAP.values(), MONTH_MAP.keys()))
def strptime(src):
src = src.strip()
src = src.split()
src[0] = str(DAY_MAP[src[0][:-1]])+','
src[2] = str(MONTH_MAP[src[2]])
return time.strptime(' '.join(src), '%w, %d %m %Y %H:%M:%S %Z')
def strftime(epoch, zone=time.localtime):
try:
src = time.strftime("%w, %d %m %Y %H:%M:%S GMT", zone(epoch)).split()
except:
src = time.strftime("%w, %d %m %Y %H:%M:%S GMT", zone()).split()
src[0] = INVERSE_DAY_MAP[int(src[0][:-1])]+','
src[2] = INVERSE_MONTH_MAP[int(src[2])]
return ' '.join(src)
def uuid():
from uuid import uuid4
return str(uuid4()).replace('-', '', 1).upper()
# }}}
class XMLCache:
def __init__(self, paths, ext_paths, prefixes, use_author_sort):
from calibre.utils.xml_parse import safe_xml_fromstring
if DEBUG:
debug_print('Building XMLCache...', paths)
self.paths = paths
self.prefixes = prefixes
self.use_author_sort = use_author_sort
# Parse XML files {{{
self.roots = {}
for source_id, path in paths.items():
if source_id == 0:
if not os.path.exists(path):
raise DeviceError(('The SONY XML cache %r does not exist. Try'
' disconnecting and reconnecting your reader.')%repr(path))
with open(path, 'rb') as f:
raw = f.read()
else:
raw = EMPTY_CARD_CACHE
if os.access(path, os.R_OK):
with open(path, 'rb') as f:
raw = f.read()
self.roots[source_id] = safe_xml_fromstring(
xml_to_unicode(raw, strip_encoding_pats=True, assume_utf8=True, verbose=DEBUG)[0]
)
if self.roots[source_id] is None:
raise Exception(('The SONY database at %r is corrupted. Try '
' disconnecting and reconnecting your reader.')%path)
self.ext_paths, self.ext_roots = {}, {}
for source_id, path in ext_paths.items():
if not os.path.exists(path):
try:
with open(path, 'wb') as f:
f.write(EMPTY_EXT_CACHE)
fsync(f)
except:
pass
if os.access(path, os.W_OK):
try:
with open(path, 'rb') as f:
self.ext_roots[source_id] = safe_xml_fromstring(
xml_to_unicode(f.read(), strip_encoding_pats=True, assume_utf8=True, verbose=DEBUG)[0]
)
self.ext_paths[source_id] = path
except:
pass
# }}}
recs = self.roots[0].xpath('//*[local-name()="records"]')
if not recs:
raise DeviceError('The SONY XML database is corrupted (no'
' <records>). Try disconnecting an reconnecting'
' your reader.')
self.record_roots = {}
self.record_roots.update(self.roots)
self.record_roots[0] = recs[0]
self.detect_namespaces()
debug_print('Done building XMLCache...')
# Playlist management {{{
def purge_broken_playlist_items(self, root):
id_map = self.build_id_map(root)
for pl in root.xpath('//*[local-name()="playlist"]'):
seen = set()
for item in list(pl):
id_ = item.get('id', None)
if id_ is None or id_ in seen or id_map.get(id_, None) is None:
if DEBUG:
if id_ is None:
cause = 'invalid id'
elif id_ in seen:
cause = 'duplicate item'
else:
cause = 'id not found'
prints('Purging broken playlist item:',
id_, 'from playlist:', pl.get('title', None),
'because:', cause)
item.getparent().remove(item)
continue
seen.add(id_)
def prune_empty_playlists(self):
for i, root in self.record_roots.items():
self.purge_broken_playlist_items(root)
for playlist in root.xpath('//*[local-name()="playlist"]'):
if len(playlist) == 0 or not playlist.get('title', None):
if DEBUG:
debug_print('Removing playlist id:', playlist.get('id', None),
playlist.get('title', None))
playlist.getparent().remove(playlist)
def ensure_unique_playlist_titles(self):
for i, root in self.record_roots.items():
seen = set()
for playlist in root.xpath('//*[local-name()="playlist"]'):
title = playlist.get('title', None)
if title is None:
title = _('Unnamed')
playlist.set('title', title)
if title in seen:
for i in range(2, 1000):
if title+str(i) not in seen:
title = title+str(i)
playlist.set('title', title)
seen.add(title)
break
else:
seen.add(title)
def build_id_playlist_map(self, bl_index):
'''
Return a map of the collections in books: {lpaths: [collection names]}
'''
debug_print('Start build_id_playlist_map')
self.ensure_unique_playlist_titles()
self.prune_empty_playlists()
debug_print('after cleaning playlists')
root = self.record_roots[bl_index]
if root is None:
return
id_map = self.build_id_map(root)
playlist_map = {}
# foreach playlist, get the lpaths for the ids in it, then add to dict
for playlist in root.xpath('//*[local-name()="playlist"]'):
name = playlist.get('title')
if name is None:
debug_print('build_id_playlist_map: unnamed playlist!')
continue
for item in playlist:
# translate each id into its lpath
id_ = item.get('id', None)
if id_ is None:
debug_print('build_id_playlist_map: id_ is None!')
continue
bk = id_map.get(id_, None)
if bk is None:
debug_print('build_id_playlist_map: book is None!', id_)
continue
lpath = bk.get('path', None)
if lpath is None:
debug_print('build_id_playlist_map: lpath is None!', id_)
continue
if lpath not in playlist_map:
playlist_map[lpath] = []
playlist_map[lpath].append(name)
debug_print('Finish build_id_playlist_map. Found', len(playlist_map))
return playlist_map
def reset_existing_playlists_map(self):
'''
Call this method before calling get_or_create_playlist in the context of
a given job. Call it again after deleting any playlists. The current
implementation adds all new playlists before deleting any, so that
constraint is respected.
'''
self._playlist_to_playlist_id_map = {}
def get_or_create_playlist(self, bl_idx, title):
# maintain a private map of playlists to their ids. Don't check if it
# exists, because reset_existing_playlist_map must be called before it
# is used to ensure that deleted playlists are taken into account
root = self.record_roots[bl_idx]
if bl_idx not in self._playlist_to_playlist_id_map:
self._playlist_to_playlist_id_map[bl_idx] = {}
for playlist in root.xpath('//*[local-name()="playlist"]'):
pl_title = playlist.get('title', None)
if pl_title is not None:
self._playlist_to_playlist_id_map[bl_idx][pl_title] = playlist
if title in self._playlist_to_playlist_id_map[bl_idx]:
return self._playlist_to_playlist_id_map[bl_idx][title]
debug_print('Creating playlist:', title)
ans = root.makeelement('{%s}playlist'%self.namespaces[bl_idx],
nsmap=root.nsmap, attrib={
'uuid' : uuid(),
'title': title,
'id' : str(self.max_id(root)+1),
'sourceid': '1'
})
root.append(ans)
self._playlist_to_playlist_id_map[bl_idx][title] = ans
return ans
# }}}
def fix_ids(self): # {{{
debug_print('Running fix_ids()')
def ensure_numeric_ids(root):
idmap = {}
for x in root.xpath('child::*[@id]'):
id_ = x.get('id')
try:
id_ = int(id_)
except:
x.set('id', '-1')
idmap[id_] = '-1'
if DEBUG and idmap:
debug_print('Found non numeric ids:')
debug_print(list(idmap.keys()))
return idmap
def remap_playlist_references(root, idmap):
for playlist in root.xpath('//*[local-name()="playlist"]'):
for item in playlist.xpath(
'descendant::*[@id and local-name()="item"]'):
id_ = item.get('id')
if id_ in idmap:
item.set('id', idmap[id_])
if DEBUG:
debug_print('Remapping id %s to %s'%(id_, idmap[id_]))
def ensure_media_xml_base_ids(root):
for num, tag in enumerate(('library', 'watchSpecial')):
for x in root.xpath('//*[local-name()="%s"]'%tag):
x.set('id', str(num))
def rebase_ids(root, base, sourceid, pl_sourceid):
'Rebase all ids and also make them consecutive'
for item in root.xpath('//*[@sourceid]'):
sid = pl_sourceid if item.tag.endswith('playlist') else sourceid
item.set('sourceid', str(sid))
# Only rebase ids of nodes that are immediate children of the
# record root (that way playlist/itemnodes are unaffected
items = root.xpath('child::*[@id]')
items.sort(key=lambda x: int(x.get('id')))
idmap = {}
for i, item in enumerate(items):
old = int(item.get('id'))
new = base + i
if old != new:
item.set('id', str(new))
idmap[str(old)] = str(new)
return idmap
self.prune_empty_playlists()
for i in sorted(self.roots.keys()):
root = self.record_roots[i]
if i == 0:
ensure_media_xml_base_ids(root)
idmap = ensure_numeric_ids(root)
if len(idmap) > 0:
debug_print('fix_ids: found some non-numeric ids')
remap_playlist_references(root, idmap)
if i == 0:
sourceid, playlist_sid = 1, 0
base = 0
else:
previous = i-1
if previous not in self.roots:
previous = 0
max_id = self.max_id(self.roots[previous])
sourceid = playlist_sid = max_id + 1
base = max_id + 2
idmap = rebase_ids(root, base, sourceid, playlist_sid)
remap_playlist_references(root, idmap)
last_bl = max(self.roots.keys())
max_id = self.max_id(self.roots[last_bl])
self.roots[0].set('nextID', str(max_id+1))
debug_print('Finished running fix_ids()')
# }}}
# Update JSON from XML {{{
def update_booklist(self, bl, bl_index):
if bl_index not in self.record_roots:
return
debug_print('Updating JSON cache:', bl_index)
playlist_map = self.build_id_playlist_map(bl_index)
root = self.record_roots[bl_index]
lpath_map = self.build_lpath_map(root)
for book in bl:
record = lpath_map.get(book.lpath, None)
if record is not None:
for thumbnail in record.xpath(
'descendant::*[local-name()="thumbnail"]'):
for img in thumbnail.xpath(
'descendant::*[local-name()="jpeg"]|'
'descendant::*[local-name()="png"]'):
if img.text:
try:
raw = from_base64_bytes(img.text.strip())
except Exception:
continue
book.thumbnail = raw
break
break
book.device_collections = playlist_map.get(book.lpath, [])
debug_print('Finished updating JSON cache:', bl_index)
# }}}
# Update XML from JSON {{{
def update(self, booklists, collections_attributes, plugboard):
debug_print('Starting update', collections_attributes)
use_tz_var = False
for i, booklist in booklists.items():
playlist_map = self.build_id_playlist_map(i)
debug_print('Updating XML Cache:', i)
root = self.record_roots[i]
lpath_map = self.build_lpath_map(root)
ext_root = self.ext_roots[i] if i in self.ext_roots else None
ext_lpath_map = None
if ext_root is not None:
ext_lpath_map = self.build_lpath_map(ext_root)
gtz_count = ltz_count = 0
use_tz_var = False
for book in booklist:
path = os.path.join(self.prefixes[i], *(book.lpath.split('/')))
record = lpath_map.get(book.lpath, None)
created = False
if record is None:
created = True
record = self.create_text_record(root, i, book.lpath)
if plugboard is not None:
newmi = book.deepcopy_metadata()
newmi.template_to_attribute(book, plugboard)
newmi.set('_new_book', getattr(book, '_new_book', False))
book.set('_pb_title_sort',
newmi.get('title_sort', newmi.get('title', None)))
book.set('_pb_author_sort', newmi.get('author_sort', ''))
else:
newmi = book
(gtz_count, ltz_count, use_tz_var) = \
self.update_text_record(record, newmi, path, i,
gtz_count, ltz_count, use_tz_var)
# Ensure the collections in the XML database are recorded for
# this book
if book.device_collections is None:
book.device_collections = []
book.device_collections = playlist_map.get(book.lpath, [])
if created and ext_root is not None and \
ext_lpath_map.get(book.lpath, None) is None:
ext_record = self.create_ext_text_record(ext_root, i,
book.lpath, book.thumbnail)
self.periodicalize_book(book, ext_record)
debug_print('Timezone votes: %d GMT, %d LTZ, use_tz_var=%s'%
(gtz_count, ltz_count, use_tz_var))
self.update_playlists(i, root, booklist, collections_attributes)
# Update the device collections because update playlist could have added
# some new ones.
debug_print('In update/ Starting refresh of device_collections')
for i, booklist in booklists.items():
playlist_map = self.build_id_playlist_map(i)
for book in booklist:
book.device_collections = playlist_map.get(book.lpath, [])
self.fix_ids()
debug_print('Finished update')
def is_sony_periodical(self, book):
if _('News') not in book.tags:
return False
if not book.lpath.lower().endswith('.epub'):
return False
if book.pubdate.date() < date(2010, 10, 17):
return False
return True
def periodicalize_book(self, book, record):
if not self.is_sony_periodical(book):
return
record.set('conformsTo',
"http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0")
record.set('description', '')
name = None
if '[' in book.title:
name = book.title.split('[')[0].strip()
if len(name) < 4:
name = None
if not name:
try:
name = [t for t in book.tags if t != _('News')][0]
except:
name = None
if not name:
name = book.title
record.set('periodicalName', name)
try:
pubdate = strftime(book.pubdate.utctimetuple(),
zone=lambda x : x)
record.set('publicationDate', pubdate)
except:
pass
def rebuild_collections(self, booklist, bl_index):
if bl_index not in self.record_roots:
return
root = self.record_roots[bl_index]
self.update_playlists(bl_index, root, booklist, [])
self.fix_ids()
def update_playlists(self, bl_index, root, booklist, collections_attributes):
debug_print('Starting update_playlists', collections_attributes, bl_index)
self.reset_existing_playlists_map()
collections = booklist.get_collections(collections_attributes)
lpath_map = self.build_lpath_map(root)
debug_print('update_playlists: finished building maps')
for category, books in collections.items():
records = [lpath_map.get(b.lpath, None) for b in books]
# Remove any books that were not found, although this
# *should* never happen
if DEBUG and None in records:
debug_print('WARNING: Some elements in the JSON cache were not'
' found in the XML cache')
records = [x for x in records if x is not None]
# Ensure each book has an ID.
for rec in records:
if rec.get('id', None) is None:
rec.set('id', str(self.max_id(root)+1))
ids = [x.get('id', None) for x in records]
# Given that we set the ids, there shouldn't be any None's. But
# better to be safe...
if None in ids:
debug_print('WARNING: Some <text> elements do not have ids')
ids = [x for x in ids if x is not None]
playlist = self.get_or_create_playlist(bl_index, category)
# Get the books currently in the playlist. We will need them to be
# sure to put back any books that were manually added.
playlist_ids = []
for item in playlist:
id_ = item.get('id', None)
if id_ is not None:
playlist_ids.append(id_)
# Empty the playlist. We do this so that the playlist will have the
# order specified by get_collections
for item in list(playlist):
playlist.remove(item)
# Get a list of ids not known by get_collections
extra_ids = [x for x in playlist_ids if x not in ids]
# Rebuild the collection in the order specified by get_collections. Then
# add the ids that get_collections didn't know about.
for id_ in ids + extra_ids:
item = playlist.makeelement(
'{%s}item'%self.namespaces[bl_index],
nsmap=playlist.nsmap, attrib={'id':id_})
playlist.append(item)
# Delete playlist entries not in collections
for playlist in root.xpath('//*[local-name()="playlist"]'):
title = playlist.get('title', None)
if title not in collections:
if DEBUG:
debug_print('Deleting playlist:', playlist.get('title', ''))
playlist.getparent().remove(playlist)
continue
books = collections[title]
records = [lpath_map.get(b.lpath, None) for b in books]
records = [x for x in records if x is not None]
ids = [x.get('id', None) for x in records]
ids = [x for x in ids if x is not None]
for item in list(playlist):
if item.get('id', None) not in ids:
if DEBUG:
debug_print('Deleting item:', item.get('id', ''),
'from playlist:', playlist.get('title', ''))
playlist.remove(item)
debug_print('Finishing update_playlists')
def create_text_record(self, root, bl_id, lpath):
namespace = self.namespaces[bl_id]
id_ = self.max_id(root)+1
attrib = {
'page':'0', 'part':'0','pageOffset':'0','scale':'0',
'id':str(id_), 'sourceid':'1', 'path':lpath}
ans = root.makeelement('{%s}text'%namespace, attrib=attrib, nsmap=root.nsmap)
root.append(ans)
return ans
def create_ext_text_record(self, root, bl_id, lpath, thumbnail):
namespace = root.nsmap[None]
attrib = {'path': lpath}
ans = root.makeelement('{%s}text'%namespace, attrib=attrib,
nsmap=root.nsmap)
ans.tail = '\n'
if len(root) > 0:
root[-1].tail = '\n\t'
else:
root.text = '\n\t'
root.append(ans)
if thumbnail and thumbnail[-1]:
ans.text = '\n' + '\t\t'
t = root.makeelement('{%s}thumbnail'%namespace,
attrib={'width':str(thumbnail[0]), 'height':str(thumbnail[1])},
nsmap=root.nsmap)
t.text = 'main_thumbnail.jpg'
ans.append(t)
t.tail = '\n\t'
return ans
def update_text_record(self, record, book, path, bl_index,
gtz_count, ltz_count, use_tz_var):
'''
Update the Sony database from the book. This is done if the timestamp in
the db differs from the timestamp on the file.
'''
# It seems that a Sony device can sometimes know what timezone it is in,
# and apparently converts the dates to GMT when it writes them to its
# DB. We can detect that a device is timezone-aware because there is a
# 'tz' variable in the Sony DB, which we can set to "0" to tell the
# device to ignore its own timezone when comparing mtime to the date in
# the DB.
# Unfortunately, if there is no tz variable in the DB, then we can't
# tell when the device applies a timezone conversion. We use a horrible
# heuristic to work around this problem. First, set dates only for new
# books, trying to avoid upsetting the sony. Second, voting: if a book
# is not new, compare its Sony DB date against localtime and gmtime.
# Count the matches. When we must set a date, use the one with the most
# matches. Use localtime if the case of a tie, and hope it is right.
try:
timestamp = os.path.getmtime(path)
except:
debug_print('Failed to get timestamp for:', path)
timestamp = time.time()
rec_date = record.get('date', None)
def clean(x):
if isbytestring(x):
x = x.decode(preferred_encoding, 'replace')
x.replace('\0', '')
return x
def record_set(k, v):
try:
record.set(k, clean(v))
except:
# v is not suitable for XML, ignore
pass
if not getattr(book, '_new_book', False): # book is not new
if record.get('tz', None) is not None:
use_tz_var = True
if strftime(timestamp, zone=time.gmtime) == rec_date:
gtz_count += 1
elif strftime(timestamp, zone=time.localtime) == rec_date:
ltz_count += 1
else: # book is new. Set the time using the current votes
if use_tz_var:
tz = time.localtime
record.set('tz', '0')
debug_print("Use localtime TZ and tz='0' for new book", book.lpath)
elif ltz_count >= gtz_count:
tz = time.localtime
debug_print("Use localtime TZ for new book", book.lpath)
else:
tz = time.gmtime
debug_print("Use GMT TZ for new book", book.lpath)
date = strftime(timestamp, zone=tz)
record.set('date', clean(date))
try:
record.set('size', clean(str(os.stat(path).st_size)))
except:
record.set('size', '0')
title = book.title if book.title else _('Unknown')
record_set('title', title)
ts = book.title_sort
if not ts:
ts = title_sort(title)
record_set('titleSorter', ts)
if self.use_author_sort:
if book.author_sort:
aus = book.author_sort
else:
debug_print('Author_sort is None for book', book.lpath)
aus = authors_to_sort_string(book.authors)
record_set('author', aus)
else:
record_set('author', authors_to_string(book.authors))
ext = os.path.splitext(path)[1]
if ext:
ext = ext[1:].lower()
mime = MIME_MAP.get(ext, None)
if mime is None:
mime = guess_type('a.'+ext)[0]
if mime is not None:
record.set('mime', clean(mime))
if 'sourceid' not in record.attrib:
record.set('sourceid', '1')
if 'id' not in record.attrib:
num = self.max_id(record.getroottree().getroot())
record.set('id', str(num+1))
return (gtz_count, ltz_count, use_tz_var)
# }}}
# Writing the XML files {{{
def cleanup_whitespace(self, bl_index):
root = self.record_roots[bl_index]
level = 2 if bl_index == 0 else 1
if len(root) > 0:
root.text = '\n'+'\t'*level
for child in root:
child.tail = '\n'+'\t'*level
if len(child) > 0:
child.text = '\n'+'\t'*(level+1)
for gc in child:
gc.tail = '\n'+'\t'*(level+1)
next(child.iterchildren(reversed=True)).tail = '\n'+'\t'*level
next(root.iterchildren(reversed=True)).tail = '\n'+'\t'*(level-1)
def move_playlists_to_bottom(self):
for root in self.record_roots.values():
seen = []
for pl in root.xpath('//*[local-name()="playlist"]'):
pl.getparent().remove(pl)
seen.append(pl)
for pl in seen:
root.append(pl)
def write(self):
from lxml import etree
for i, path in self.paths.items():
self.move_playlists_to_bottom()
self.cleanup_whitespace(i)
raw = etree.tostring(self.roots[i], encoding='UTF-8',
xml_declaration=True)
raw = raw.replace(b"<?xml version='1.0' encoding='UTF-8'?>",
b'<?xml version="1.0" encoding="UTF-8"?>')
with open(path, 'wb') as f:
f.write(raw)
fsync(f)
for i, path in self.ext_paths.items():
try:
raw = etree.tostring(self.ext_roots[i], encoding='UTF-8',
xml_declaration=True)
except:
continue
raw = raw.replace(b"<?xml version='1.0' encoding='UTF-8'?>",
b'<?xml version="1.0" encoding="UTF-8"?>')
with open(path, 'wb') as f:
f.write(raw)
fsync(f)
# }}}
# Utility methods {{{
def build_lpath_map(self, root):
m = {}
for bk in root.xpath('//*[local-name()="text"]'):
m[bk.get('path')] = bk
return m
def build_id_map(self, root):
m = {}
for bk in root.xpath('//*[local-name()="text"]'):
m[bk.get('id')] = bk
return m
def book_by_lpath(self, lpath, root):
matches = root.xpath('//*[local-name()="text" and @path="%s"]'%lpath)
if matches:
return matches[0]
def max_id(self, root):
ans = -1
for x in root.xpath('//*[@id]'):
id_ = x.get('id')
try:
num = int(id_)
if num > ans:
ans = num
except:
continue
return ans
def detect_namespaces(self):
self.nsmaps = {}
for i, root in self.roots.items():
self.nsmaps[i] = root.nsmap
self.namespaces = {}
for i in self.roots:
for c in ('library', 'text', 'image', 'playlist', 'thumbnail',
'watchSpecial'):
matches = self.record_roots[i].xpath('//*[local-name()="%s"]'%c)
if matches:
e = matches[0]
self.namespaces[i] = e.nsmap[e.prefix]
break
if i not in self.namespaces:
ns = self.nsmaps[i].get(None, None)
for prefix in self.nsmaps[i]:
if prefix is not None:
ns = self.nsmaps[i][prefix]
break
self.namespaces[i] = ns
# }}}
| 32,106 | Python | .py | 712 | 31.620787 | 115 | 0.523253 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,796 | __init__.py | kovidgoyal_calibre/src/calibre/devices/prs505/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
MEDIA_XML = 'database/cache/media.xml'
MEDIA_EXT = 'database/cache/cacheExt.xml'
CACHE_XML = 'Sony Reader/database/cache.xml'
CACHE_EXT = 'Sony Reader/database/cacheExt.xml'
MEDIA_THUMBNAIL = 'database/thumbnail'
CACHE_THUMBNAIL = 'Sony Reader/thumbnail'
| 345 | Python | .py | 8 | 41.75 | 61 | 0.751497 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,797 | driver.py | kovidgoyal_calibre/src/calibre/devices/prs505/driver.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Device driver for the SONY devices
'''
import os
import re
import time
from calibre import __appname__, fsync, prints
from calibre.devices.prs505 import CACHE_EXT, CACHE_THUMBNAIL, CACHE_XML, MEDIA_EXT, MEDIA_THUMBNAIL, MEDIA_XML
from calibre.devices.usbms.books import CollectionsBookList
from calibre.devices.usbms.driver import USBMS
from calibre.prints import debug_print
class PRS505(USBMS):
name = 'SONY Device Interface'
gui_name = 'SONY Reader'
description = _('Communicate with Sony e-book readers older than the'
' PRST1.')
author = 'Kovid Goyal'
supported_platforms = ['windows', 'osx', 'linux']
path_sep = '/'
booklist_class = CollectionsBookList
FORMATS = ['epub', 'lrf', 'lrx', 'rtf', 'pdf', 'txt', 'zbf']
CAN_SET_METADATA = ['title', 'authors', 'collections']
CAN_DO_DEVICE_DB_PLUGBOARD = True
VENDOR_ID = [0x054c] #: SONY Vendor Id
PRODUCT_ID = [0x031e]
BCD = [0x229, 0x1000, 0x22a, 0x31a]
VENDOR_NAME = 'SONY'
WINDOWS_MAIN_MEM = re.compile(
r'(PRS-(505|500|300))|'
r'(PRS-((700[#/])|((6|9|3)(0|5)0&)))'
)
WINDOWS_CARD_A_MEM = re.compile(
r'(PRS-(505|500)[#/]\S+:MS)|'
r'(PRS-((700[/#]\S+:)|((6|9)(0|5)0[#_]))MS)'
)
WINDOWS_CARD_B_MEM = re.compile(
r'(PRS-(505|500)[#/]\S+:SD)|'
r'(PRS-((700[/#]\S+:)|((6|9)(0|5)0[#_]))SD)'
)
MAIN_MEMORY_VOLUME_LABEL = 'Sony Reader Main Memory'
STORAGE_CARD_VOLUME_LABEL = 'Sony Reader Storage Card'
CARD_PATH_PREFIX = __appname__
SUPPORTS_SUB_DIRS = True
MUST_READ_METADATA = True
NUKE_COMMENTS = _('Comments have been removed as the SONY reader'
' chokes on them')
SUPPORTS_USE_AUTHOR_SORT = True
EBOOK_DIR_MAIN = 'database/media/books'
SCAN_FROM_ROOT = False
ALL_BY_TITLE = _('All by title')
ALL_BY_AUTHOR = _('All by author')
EXTRA_CUSTOMIZATION_MESSAGE = [
_('Comma separated list of metadata fields '
'to turn into collections on the device. Possibilities include: '
'%(coll)s. Two special collections are available: '
'%(abt)s:%(abtv)s and %(aba)s:%(abav)s. Add '
'these values to the list to enable them. The collections will be '
'given the name provided after the ":" character.')%dict(
abt='abt', abtv=ALL_BY_TITLE, aba='aba', abav=ALL_BY_AUTHOR, coll='series, tags, authors'),
_('Upload separate cover thumbnails for books (newer readers)') + ':::'+
_('Normally, the SONY readers get the cover image from the'
' e-book file itself. With this option, calibre will send a '
'separate cover image to the reader, useful if you are '
'sending DRMed books in which you cannot change the cover.'
' WARNING: This option should only be used with newer '
'SONY readers: 350, 650, 950 and newer.'),
_('Refresh separate covers when using automatic management (newer readers)') + ':::' +
_('Set this option to have separate book covers uploaded '
'every time you connect your device. Unset this option if '
'you have so many books on the reader that performance is '
'unacceptable.'),
_('Preserve cover aspect ratio when building thumbnails') + ':::' +
_('Set this option if you want the cover thumbnails to have '
'the same aspect ratio (width to height) as the cover. '
'Unset it if you want the thumbnail to be the maximum size, '
'ignoring aspect ratio.'),
_('Search for books in all folders') + ':::' +
_('Setting this option tells calibre to look for books in all '
'folders on the device and its cards. This permits calibre to '
'find books put on the device by other software and by '
'wireless download.')
]
EXTRA_CUSTOMIZATION_DEFAULT = [
', '.join(['series', 'tags']),
False,
False,
True,
True
]
OPT_COLLECTIONS = 0
OPT_UPLOAD_COVERS = 1
OPT_REFRESH_COVERS = 2
OPT_PRESERVE_ASPECT_RATIO = 3
OPT_SCAN_FROM_ROOT = 4
plugboard = None
plugboard_func = None
THUMBNAIL_HEIGHT = 217
MAX_PATH_LEN = 201 # 250 - (max(len(CACHE_THUMBNAIL), len(MEDIA_THUMBNAIL)) +
# len('main_thumbnail.jpg') + 1)
def windows_filter_pnp_id(self, pnp_id):
return '_LAUNCHER' in pnp_id
def post_open_callback(self):
def write_cache(prefix):
try:
cachep = os.path.join(prefix, *(CACHE_XML.split('/')))
if not os.path.exists(cachep):
dname = os.path.dirname(cachep)
if not os.path.exists(dname):
try:
os.makedirs(dname, mode=0o777)
except:
time.sleep(5)
os.makedirs(dname, mode=0o777)
with open(cachep, 'wb') as f:
f.write(b'''<?xml version="1.0" encoding="UTF-8"?>
<cache xmlns="http://www.kinoma.com/FskCache/1">
</cache>
''')
fsync(f)
return True
except:
import traceback
traceback.print_exc()
return False
# Make sure we don't have the launcher partition
# as one of the cards
if self._card_a_prefix is not None:
if not write_cache(self._card_a_prefix):
self._card_a_prefix = None
if self._card_b_prefix is not None:
if not write_cache(self._card_b_prefix):
self._card_b_prefix = None
self.booklist_class.rebuild_collections = self.rebuild_collections
# Set the thumbnail width to the theoretical max if the user has asked
# that we do not preserve aspect ratio
if not self.settings().extra_customization[self.OPT_PRESERVE_ASPECT_RATIO]:
self.THUMBNAIL_WIDTH = 168
# Set WANTS_UPDATED_THUMBNAILS if the user has asked that thumbnails be
# updated on every connect
self.WANTS_UPDATED_THUMBNAILS = \
self.settings().extra_customization[self.OPT_REFRESH_COVERS]
self.SCAN_FROM_ROOT = self.settings().extra_customization[self.OPT_SCAN_FROM_ROOT]
def filename_callback(self, fname, mi):
if getattr(mi, 'application_id', None) is not None:
base = fname.rpartition('.')[0]
suffix = '_%s'%mi.application_id
if not base.endswith(suffix):
fname = base + suffix + '.' + fname.rpartition('.')[-1]
return fname
def initialize_XML_cache(self):
from calibre.devices.prs505.sony_cache import XMLCache
paths, prefixes, ext_paths = {}, {}, {}
for prefix, path, ext_path, source_id in [
('main', MEDIA_XML, MEDIA_EXT, 0),
('card_a', CACHE_XML, CACHE_EXT, 1),
('card_b', CACHE_XML, CACHE_EXT, 2)
]:
prefix = getattr(self, '_%s_prefix'%prefix)
if prefix is not None and os.path.exists(prefix):
paths[source_id] = os.path.join(prefix, *(path.split('/')))
ext_paths[source_id] = os.path.join(prefix, *(ext_path.split('/')))
prefixes[source_id] = prefix
d = os.path.dirname(paths[source_id])
if not os.path.exists(d):
os.makedirs(d)
return XMLCache(paths, ext_paths, prefixes, self.settings().use_author_sort)
def books(self, oncard=None, end_session=True):
debug_print('PRS505: starting fetching books for card', oncard)
bl = USBMS.books(self, oncard=oncard, end_session=end_session)
c = self.initialize_XML_cache()
c.update_booklist(bl, {'carda':1, 'cardb':2}.get(oncard, 0))
debug_print('PRS505: finished fetching books for card', oncard)
return bl
def sync_booklists(self, booklists, end_session=True):
debug_print('PRS505: started sync_booklists')
c = self.initialize_XML_cache()
blists = {}
for i in c.paths:
try:
if booklists[i] is not None:
blists[i] = booklists[i]
except IndexError:
pass
opts = self.settings()
if opts.extra_customization:
collections = [x.strip() for x in
opts.extra_customization[self.OPT_COLLECTIONS].split(',')]
else:
collections = []
debug_print('PRS505: collection fields:', collections)
pb = None
if self.plugboard_func:
pb = self.plugboard_func(self.__class__.__name__,
'device_db', self.plugboards)
debug_print('PRS505: use plugboards', pb)
c.update(blists, collections, pb)
c.write()
if opts.extra_customization[self.OPT_REFRESH_COVERS]:
debug_print('PRS505: uploading covers in sync_booklists')
for idx,bl in blists.items():
prefix = self._card_a_prefix if idx == 1 else \
self._card_b_prefix if idx == 2 else self._main_prefix
for book in bl:
try:
p = os.path.join(prefix, book.lpath)
self._upload_cover(os.path.dirname(p),
os.path.splitext(os.path.basename(p))[0],
book, p)
except:
debug_print('FAILED to upload cover',
prefix, book.lpath)
else:
debug_print('PRS505: NOT uploading covers in sync_booklists')
USBMS.sync_booklists(self, booklists, end_session=end_session)
debug_print('PRS505: finished sync_booklists')
def rebuild_collections(self, booklist, oncard):
debug_print('PRS505: started rebuild_collections on card', oncard)
c = self.initialize_XML_cache()
c.rebuild_collections(booklist, {'carda':1, 'cardb':2}.get(oncard, 0))
c.write()
debug_print('PRS505: finished rebuild_collections')
def set_plugboards(self, plugboards, pb_func):
self.plugboards = plugboards
self.plugboard_func = pb_func
def upload_cover(self, path, filename, metadata, filepath):
opts = self.settings()
if not opts.extra_customization[self.OPT_UPLOAD_COVERS]:
# Building thumbnails disabled
debug_print('PRS505: not uploading cover')
return
debug_print('PRS505: uploading cover')
try:
self._upload_cover(path, filename, metadata, filepath)
except:
debug_print('FAILED to upload cover', filepath)
def _upload_cover(self, path, filename, metadata, filepath):
if metadata.thumbnail and metadata.thumbnail[-1]:
path = path.replace('/', os.sep)
is_main = path.startswith(self._main_prefix)
thumbnail_dir = MEDIA_THUMBNAIL if is_main else CACHE_THUMBNAIL
prefix = None
if is_main:
prefix = self._main_prefix
else:
if self._card_a_prefix and \
path.startswith(self._card_a_prefix):
prefix = self._card_a_prefix
elif self._card_b_prefix and \
path.startswith(self._card_b_prefix):
prefix = self._card_b_prefix
if prefix is None:
prints('WARNING: Failed to find prefix for:', filepath)
return
thumbnail_dir = os.path.join(prefix, *thumbnail_dir.split('/'))
relpath = os.path.relpath(filepath, prefix)
if relpath.startswith('..\\'):
relpath = relpath[3:]
thumbnail_dir = os.path.join(thumbnail_dir, relpath)
if not os.path.exists(thumbnail_dir):
os.makedirs(thumbnail_dir)
cpath = os.path.join(thumbnail_dir, 'main_thumbnail.jpg')
with open(cpath, 'wb') as f:
f.write(metadata.thumbnail[-1])
debug_print('Cover uploaded to: %r'%cpath)
| 12,762 | Python | .py | 267 | 35.558052 | 111 | 0.561913 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,798 | hal.py | kovidgoyal_calibre/src/calibre/devices/usbms/hal.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2021, Kovid Goyal <kovid at kovidgoyal.net>
import time
from jeepney import DBusAddress, DBusErrorResponse, MessageType, Properties, new_method_call
from jeepney.io.blocking import open_dbus_connection
from calibre.constants import DEBUG
class HAL:
def __init__(self):
self.bus = open_dbus_connection('SYSTEM')
def send(self, msg):
reply = self.bus.send_and_get_reply(msg)
if reply.header.message_type is MessageType.error:
raise DBusErrorResponse(reply)
return reply.body[0]
def call(self, addr, method, sig='', *args):
if sig:
return self.send(new_method_call(addr, method, sig, args))
return self.send(new_method_call(addr, method))
def prop(self, addr, name):
return self.send(Properties(addr).get(name))
def addr(self, path, interface):
return DBusAddress(path, bus_name='org.freedesktop.Hal', interface=f'org.freedesktop.Hal.{interface}')
def get_volume(self, vpath):
vdevif = self.addr(vpath, 'Device')
if not self.prop(vdevif, 'block.is_volume') or self.prop(vdevif, 'volume.fsusage') != 'filesystem':
return
volif = self.addr(vpath, 'Volume')
pdevif = self.addr(self.prop(volif, 'info.parent'), 'Device')
return {'node': self.prop(pdevif, 'block.device'),
'dev': vdevif,
'vol': volif,
'label': self.prop(vdevif, 'volume.label')}
def get_volumes(self, d):
vols = []
manager = self.addr('/org/freedesktop/Hal/Manager', 'Manager')
paths = self.call(manager, 'FindDeviceStringMatch', 'ss', 'usb.serial', d.serial)
for path in paths:
objif = self.addr(path, 'Device')
# Extra paranoia...
try:
if d.idVendor == self.prop(objif, 'usb.vendor_id') and \
d.idProduct == self.prop(objif, 'usb.product_id') and \
d.manufacturer == self.prop(objif, 'usb.vendor') and \
d.product == self.prop(objif, 'usb.product') and \
d.serial == self.prop(objif, 'usb.serial'):
midpath = self.call(manager, 'FindDeviceStringMatch', 'ss', 'info.parent', path)
dpaths = self.call(manager, 'FindDeviceStringMatch', 'ss', 'storage.originating_device', path
) + self.call(manager, 'FindDeviceStringMatch', 'ss', 'storage.originating_device', midpath[0])
for dpath in dpaths:
try:
vpaths = self.call(manager, 'FindDeviceStringMatch', 'block.storage_device', dpath)
for vpath in vpaths:
try:
vol = self.get_volume(vpath)
if vol is not None:
vols.append(vol)
except DBusErrorResponse as e:
print(e)
continue
except DBusErrorResponse as e:
print(e)
continue
except DBusErrorResponse:
continue
vols.sort(key=lambda x: x['node'])
return vols
def get_mount_point(self, vol):
if not self.prop(vol['dev'], 'volume.is_mounted'):
fstype = self.prop(vol['dev'], 'volume.fstype')
self.call(vol['vol'], 'Mount', 'ssas', 'Calibre-'+vol['label'], fstype, [])
loops = 0
while not self.prop(vol['dev'], 'volume.is_mounted'):
time.sleep(1)
loops += 1
if loops > 100:
raise Exception("ERROR: Timeout waiting for mount to complete")
return self.prop(vol['dev'], 'volume.mount_point')
def mount_volumes(self, volumes):
mtd=0
ans = {
'_main_prefix': None, '_main_vol': None,
'_card_a_prefix': None, '_card_a_vol': None,
'_card_b_prefix': None, '_card_b_vol': None,
}
for vol in volumes:
try:
mp = self.get_mount_point(vol)
except Exception as e:
print("Failed to mount: {vol['label']}", e)
continue
# Mount Point becomes Mount Path
mp += '/'
if DEBUG:
print("FBSD: mounted", vol['label'], "on", mp)
if mtd == 0:
ans['_main_prefix'], ans['_main_vol'] = mp, vol['vol']
if DEBUG:
print("FBSD: main = ", mp)
elif mtd == 1:
ans['_card_a_prefix'], ans['_card_a_vol'] = mp, vol['vol']
if DEBUG:
print("FBSD: card a = ", mp)
elif mtd == 2:
ans['_card_b_prefix'], ans['_card_b_vol'] = mp, vol['vol']
if DEBUG:
print("FBSD: card b = ", mp)
break
mtd += 1
return mtd > 0, ans
def unmount(self, vol):
try:
self.call(vol, 'Unmount', 'as', [])
except DBusErrorResponse as e:
print('Unable to eject ', e)
def get_hal():
if not hasattr(get_hal, 'ans'):
get_hal.ans = HAL()
return get_hal.ans
| 5,469 | Python | .py | 118 | 31.90678 | 134 | 0.510128 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,799 | device.py | kovidgoyal_calibre/src/calibre/devices/usbms/device.py | __license__ = 'GPL v3'
__copyright__ = '2009, John Schember <john at nachtimwald.com> ' \
'2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Generic device driver. This is not a complete stand alone driver. It is
intended to be subclassed with the relevant parts implemented for a particular
device. This class handles device detection.
'''
import glob
import os
import re
import subprocess
import sys
import time
from collections import namedtuple
from itertools import repeat
from calibre import prints
from calibre.constants import is_debugging, isfreebsd, islinux, ismacos, iswindows
from calibre.devices.errors import DeviceError
from calibre.devices.interface import DevicePlugin
from calibre.devices.usbms.deviceconfig import DeviceConfig
from calibre.utils.filenames import ascii_filename as sanitize
from polyglot.builtins import iteritems, string_or_bytes
if ismacos:
osx_sanitize_name_pat = re.compile(r'[.-]')
if iswindows:
usb_info_cache = {}
def eject_exe():
base = sys.extensions_location if hasattr(sys, 'new_app_layout') else os.path.dirname(sys.executable)
return os.path.join(base, 'calibre-eject.exe')
class USBDevice:
def __init__(self, dev):
self.idVendor = dev[0]
self.idProduct = dev[1]
self.bcdDevice = dev[2]
if iswindows:
# Getting this information requires communicating with the device
# we only do that in the can_handle_windows() method, if needed.
self.manufacturer = self.serial = self.product = None
else:
self.manufacturer = dev[3]
self.product = dev[4]
self.serial = dev[5]
def match_serial(self, serial):
return self.serial and self.serial == serial
def match_numbers(self, vid, pid, bcd):
return self.idVendor == vid and self.idProduct == pid and self.bcdDevice == bcd
def match_strings(self, vid, pid, bcd, man, prod):
if not self.match_numbers(vid, pid, bcd):
return False
if man == self.manufacturer and prod == self.product:
return True
# As of macOS 10.11.4 Apple started mangling the names returned via the
# IOKit registry. See
# https://www.mobileread.com/forums/showthread.php?t=273213
m = osx_sanitize_name_pat.sub('_', (self.manufacturer or ''))
p = osx_sanitize_name_pat.sub('_', (self.product or ''))
return m == man and p == prod
class Device(DeviceConfig, DevicePlugin):
'''
This class provides logic common to all drivers for devices that export themselves
as USB Mass Storage devices. Provides implementations for mounting/ejecting
of USBMS devices on all platforms.
'''
VENDOR_ID = 0x0
PRODUCT_ID = 0x0
BCD = None
VENDOR_NAME = None
#: String identifying the main memory of the device in the Windows PnP id
#: strings
#: This can be None, string, list of strings or compiled regex
WINDOWS_MAIN_MEM = None
#: String identifying the first card of the device in the Windows PnP id
#: strings
#: This can be None, string, list of strings or compiled regex
WINDOWS_CARD_A_MEM = None
#: String identifying the second card of the device in the Windows PnP id
#: strings
#: This can be None, string, list of strings or compiled regex
WINDOWS_CARD_B_MEM = None
#: Used by the new driver detection to disambiguate main memory from
#: storage cards. Should be a regular expression that matches the
#: main memory mount point assigned by macOS
OSX_MAIN_MEM_VOL_PAT = None
OSX_EJECT_COMMAND = ['diskutil', 'eject']
MAIN_MEMORY_VOLUME_LABEL = ''
STORAGE_CARD_VOLUME_LABEL = ''
STORAGE_CARD2_VOLUME_LABEL = None
EBOOK_DIR_MAIN = ''
EBOOK_DIR_CARD_A = ''
EBOOK_DIR_CARD_B = ''
DELETE_EXTS = []
# USB disk-based devices can see the book files on the device, so can
# copy these back to the library
BACKLOADING_ERROR_MESSAGE = None
#: The maximum length of paths created on the device
MAX_PATH_LEN = 250
#: Put news in its own folder
NEWS_IN_FOLDER = True
def reset(self, key='-1', log_packets=False, report_progress=None,
detected_device=None):
self._main_prefix = self._card_a_prefix = self._card_b_prefix = None
self.detected_device = None if detected_device is None else USBDevice(detected_device)
self.set_progress_reporter(report_progress)
def set_progress_reporter(self, report_progress):
self.report_progress = report_progress
self.report_progress = report_progress
if self.report_progress is None:
self.report_progress = lambda x, y: x
def card_prefix(self, end_session=True):
return (self._card_a_prefix, self._card_b_prefix)
@classmethod
def _windows_space(cls, prefix):
if not prefix:
return 0, 0
prefix = prefix[:-1]
from calibre_extensions import winutil
try:
available_space, total_space, free_space = winutil.get_disk_free_space(prefix)
except OSError as err:
if err.winerror == winutil.ERROR_NOT_READY:
# Disk not ready
time.sleep(3)
available_space, total_space, free_space = winutil.get_disk_free_space(prefix)
else:
raise
return total_space, available_space
def total_space(self, end_session=True):
msz = casz = cbsz = 0
if not iswindows:
if self._main_prefix is not None:
stats = os.statvfs(self._main_prefix)
msz = stats.f_frsize * (stats.f_blocks + stats.f_bavail - stats.f_bfree)
if self._card_a_prefix is not None:
stats = os.statvfs(self._card_a_prefix)
casz = stats.f_frsize * (stats.f_blocks + stats.f_bavail - stats.f_bfree)
if self._card_b_prefix is not None:
stats = os.statvfs(self._card_b_prefix)
cbsz = stats.f_frsize * (stats.f_blocks + stats.f_bavail - stats.f_bfree)
else:
msz = self._windows_space(self._main_prefix)[0]
casz = self._windows_space(self._card_a_prefix)[0]
cbsz = self._windows_space(self._card_b_prefix)[0]
return (msz, casz, cbsz)
def free_space(self, end_session=True):
msz = casz = cbsz = 0
if not iswindows:
if self._main_prefix is not None:
stats = os.statvfs(self._main_prefix)
msz = stats.f_frsize * stats.f_bavail
if self._card_a_prefix is not None:
stats = os.statvfs(self._card_a_prefix)
casz = stats.f_frsize * stats.f_bavail
if self._card_b_prefix is not None:
stats = os.statvfs(self._card_b_prefix)
cbsz = stats.f_frsize * stats.f_bavail
else:
msz = self._windows_space(self._main_prefix)[1]
casz = self._windows_space(self._card_a_prefix)[1]
cbsz = self._windows_space(self._card_b_prefix)[1]
return (msz, casz, cbsz)
def windows_filter_pnp_id(self, pnp_id):
return False
def windows_sort_drives(self, drives):
'''
Called to disambiguate main memory and storage card for devices that
do not distinguish between them on the basis of `WINDOWS_CARD_NAME`.
For example: The EB600
'''
return drives
def can_handle_windows(self, usbdevice, debug=False):
if hasattr(self.can_handle, 'is_base_class_implementation'):
# No custom can_handle implementation
return True
# Delegate to the unix can_handle function, creating a unix like
# USBDevice object
from calibre.devices.winusb import get_usb_info
dev = usb_info_cache.get(usbdevice)
if dev is None:
try:
data = get_usb_info(usbdevice, debug=debug)
except Exception:
time.sleep(0.1)
try:
data = get_usb_info(usbdevice, debug=debug)
except Exception:
data = {}
dev = usb_info_cache[usbdevice] = namedtuple(
'USBDevice', 'vendor_id product_id bcd manufacturer product serial')(
usbdevice.vendor_id, usbdevice.product_id, usbdevice.bcd,
data.get('manufacturer') or '', data.get('product') or '', data.get('serial_number') or '')
if debug:
prints(f'USB Info for device: {dev}')
return self.can_handle(dev, debug=debug)
def open_windows(self):
from calibre.devices.scanner import drive_is_ok
from calibre.devices.winusb import get_drive_letters_for_device
usbdev = self.device_being_opened
debug = is_debugging() or getattr(self, 'do_device_debug', False)
try:
dlmap = get_drive_letters_for_device(usbdev, debug=debug)
except Exception:
dlmap = {}
if not dlmap.get('drive_letters'):
time.sleep(7)
dlmap = get_drive_letters_for_device(usbdev, debug=debug)
if debug:
from pprint import pformat
prints(f'Drive letters for {usbdev}')
prints(pformat(dlmap))
filtered = set()
for dl in dlmap['drive_letters']:
pnp_id = dlmap['pnp_id_map'][dl].upper()
if dl in dlmap['readonly_drives']:
filtered.add(dl)
if debug:
prints('Ignoring the drive %s as it is readonly' % dl)
elif self.windows_filter_pnp_id(pnp_id):
filtered.add(dl)
if debug:
prints(f'Ignoring the drive {dl} because of a PNP filter on {pnp_id}')
elif not drive_is_ok(dl, debug=debug):
filtered.add(dl)
if debug:
prints('Ignoring the drive %s because failed to get free space for it' % dl)
dlmap['drive_letters'] = [dl for dl in dlmap['drive_letters'] if dl not in filtered]
if not dlmap['drive_letters']:
raise DeviceError(_('Unable to detect any disk drives for the device: %s. Try rebooting') % self.get_gui_name())
drives = {}
for drive_letter, which in zip(dlmap['drive_letters'], 'main carda cardb'.split()):
drives[which] = drive_letter + ':\\'
drives = self.windows_sort_drives(drives)
self._main_prefix = drives.get('main')
self._card_a_prefix = drives.get('carda', None)
self._card_b_prefix = drives.get('cardb', None)
@classmethod
def run_ioreg(cls, raw=None):
if raw is not None:
return raw
ioreg = '/usr/sbin/ioreg'
if not os.access(ioreg, os.X_OK):
ioreg = 'ioreg'
cmd = (ioreg+' -w 0 -S -c IOMedia').split()
for i in range(3):
try:
return subprocess.Popen(cmd,
stdout=subprocess.PIPE).communicate()[0]
except OSError: # Probably an interrupted system call
if i == 2:
raise
time.sleep(2)
def osx_sort_names(self, names):
return names
@classmethod
def osx_run_mount(cls):
for i in range(3):
try:
return subprocess.Popen('mount',
stdout=subprocess.PIPE).communicate()[0]
except OSError: # Probably an interrupted system call
if i == 2:
raise
time.sleep(2)
@classmethod
def osx_get_usb_drives(cls):
from calibre_extensions.usbobserver import get_usb_drives
return get_usb_drives()
def _osx_bsd_names(self):
drives = self.osx_get_usb_drives()
matches = []
d = self.detected_device
if d.serial:
for path, vid, pid, bcd, ven, prod, serial in drives:
if d.match_serial(serial):
matches.append(path)
if not matches and d.manufacturer and d.product:
for path, vid, pid, bcd, man, prod, serial in drives:
if d.match_strings(vid, pid, bcd, man, prod):
matches.append(path)
if not matches:
# Since Apple started mangling the names stored in the IOKit
# registry, we cannot trust match_strings() so fallback to matching
# on just numbers. See http://www.mobileread.com/forums/showthread.php?t=273213
for path, vid, pid, bcd, man, prod, serial in drives:
if d.match_numbers(vid, pid, bcd):
matches.append(path)
if not matches:
from pprint import pformat
raise DeviceError(
f'Could not detect BSD names for {self.name}. Try rebooting.\nOutput from osx_get_usb_drives():\n{pformat(drives)}')
pat = re.compile(r'(?P<m>\d+)([a-z]+(?P<p>\d+)){0,1}')
def nums(x):
'Return (disk num, partition number)'
m = pat.search(x)
if m is None:
return (10000, -1)
g = m.groupdict()
if g['p'] is None:
g['p'] = 0
return list(map(int, (g.get('m'), g.get('p'))))
def cmp_key(x):
'''
Sorting based on the following scheme:
- disks without partitions are first
- sub sorted based on disk number
- disks with partitions are sorted first on
disk number, then on partition number
'''
x = x.rpartition('/')[-1]
disk_num, part_num = nums(x)
has_part = 1 if part_num > 0 else 0
return has_part, disk_num, part_num
matches.sort(key=cmp_key)
drives = {'main':matches[0]}
if len(matches) > 1:
drives['carda'] = matches[1]
if len(matches) > 2:
drives['cardb'] = matches[2]
return drives
def osx_bsd_names(self):
drives = {}
for i in range(3):
try:
drives = self._osx_bsd_names()
if len(drives) > 1: # wait for device to settle and SD card (if any) to become available
return drives
except Exception:
if i == 2:
raise
time.sleep(3)
return drives
def open_osx(self):
from calibre_extensions.usbobserver import get_mounted_filesystems
bsd_drives = self.osx_bsd_names()
drives = self.osx_sort_names(bsd_drives.copy())
mount_map = get_mounted_filesystems()
# macOS 13 Ventura uses a weird scheme for mounted FAT devices of the
# form fat://basename_of_bsd_name/basename_of_mountpoint
# see https://www.mobileread.com/forums/showthread.php?t=347294
for dev_node in tuple(mount_map):
if ':' in dev_node and '//' in dev_node:
val = mount_map[dev_node]
dev_node = dev_node.split('/')[-2]
dev_node = f'/dev/{dev_node}'
if dev_node not in mount_map:
mount_map[dev_node] = val
drives = {k: mount_map.get(v) for k, v in iteritems(drives)}
if is_debugging():
print()
from pprint import pprint
pprint({'bsd_drives': bsd_drives, 'mount_map': mount_map, 'drives': drives})
if drives.get('carda') is None and drives.get('cardb') is not None:
drives['carda'] = drives.pop('cardb')
if drives.get('main') is None and drives.get('carda') is not None:
drives['main'] = drives.pop('carda')
if drives.get('carda') is None and drives.get('cardb') is not None:
drives['carda'] = drives.pop('cardb')
if drives.get('main') is None:
raise DeviceError(_('Unable to detect the %s mount point. Try rebooting.')%self.__class__.__name__)
pat = self.OSX_MAIN_MEM_VOL_PAT
if pat is not None and len(drives) > 1 and 'main' in drives:
if pat.search(drives['main']) is None:
main = drives['main']
for x in ('carda', 'cardb'):
if x in drives and pat.search(drives[x]):
drives['main'] = drives.pop(x)
drives[x] = main
break
self._main_prefix = drives['main']+os.sep
def get_card_prefix(c):
ans = drives.get(c, None)
if ans is not None:
ans += os.sep
return ans
self._card_a_prefix = get_card_prefix('carda')
self._card_b_prefix = get_card_prefix('cardb')
def find_device_nodes(self, detected_device=None):
def walk(base):
base = os.path.abspath(os.path.realpath(base))
for x in os.listdir(base):
p = os.path.join(base, x)
if os.path.islink(p) or not os.access(p, os.R_OK):
continue
isfile = os.path.isfile(p)
yield p, isfile
if not isfile:
yield from walk(p)
def raw2num(raw):
raw = raw.lower()
if not raw.startswith('0x'):
raw = '0x' + raw
return int(raw, 16)
# Find device node based on vendor, product and bcd
d, j = os.path.dirname, os.path.join
usb_dir = None
if detected_device is None:
detected_device = self.detected_device
def test(val, attr):
q = getattr(detected_device, attr)
return q == val
def getnum(usb_dir):
def rc(q):
with open(j(usb_dir, q), 'rb') as f:
return raw2num(f.read().decode('utf-8'))
return rc
for x, isfile in walk('/sys/devices'):
if isfile and x.endswith('idVendor'):
usb_dir = d(x)
for y in ('idProduct', 'idVendor', 'bcdDevice'):
if not os.access(j(usb_dir, y), os.R_OK):
usb_dir = None
break
if usb_dir is None:
continue
ven, prod, bcd = map(getnum(usb_dir), ('idVendor', 'idProduct', 'bcdDevice'))
if not (test(ven, 'idVendor') and test(prod, 'idProduct') and
test(bcd, 'bcdDevice')):
usb_dir = None
continue
else:
break
if usb_dir is None:
raise DeviceError(_('Unable to detect the %s disk drive.')
%self.__class__.__name__)
devnodes, ok = [], {}
for x, isfile in walk(usb_dir):
if not isfile and '/block/' in x:
parts = x.split('/')
idx = parts.index('block')
if idx == len(parts)-2:
sz = j(x, 'size')
node = parts[idx+1]
try:
with open(sz, 'rb') as szf:
exists = int(szf.read().decode('utf-8')) > 0
if exists:
node = self.find_largest_partition(x)
ok[node] = True
else:
ok[node] = False
except:
ok[node] = False
if is_debugging() and not ok[node]:
print(f'\nIgnoring the node: {node} as could not read size from: {sz}')
devnodes.append(node)
devnodes += list(repeat(None, 3))
ans = ['/dev/'+x if ok.get(x, False) else None for x in devnodes]
ans.sort(key=lambda x: x[5:] if x else 'zzzzz')
return self.linux_swap_drives(ans[:3])
def linux_swap_drives(self, drives):
return drives
def node_mountpoint(self, node):
from calibre.devices.udisks import node_mountpoint
return node_mountpoint(node)
def find_largest_partition(self, path):
node = path.split('/')[-1]
nodes = []
for x in glob.glob(path+'/'+node+'*'):
sz = x + '/size'
if not os.access(sz, os.R_OK):
continue
try:
with open(sz, 'rb') as szf:
sz = int(szf.read().decode('utf-8'))
except:
continue
if sz > 0:
nodes.append((x.split('/')[-1], sz))
nodes.sort(key=lambda x: x[1])
if not nodes:
return node
return nodes[-1][0]
def open_linux(self):
def mount(node, type):
mp = self.node_mountpoint(node)
if mp is not None:
return mp, 0
def do_mount(node):
try:
from calibre.devices.udisks import mount
mount(node)
return 0
except:
print('Udisks mount call failed:')
import traceback
traceback.print_exc()
return 1
ret = do_mount(node)
if ret != 0:
return None, ret
return self.node_mountpoint(node)+'/', 0
main, carda, cardb = self.find_device_nodes()
if main is None:
raise DeviceError(_('Unable to detect the %s disk drive. Either '
'the device has already been ejected, or your '
'kernel is exporting a deprecated version of SYSFS.')
%self.__class__.__name__)
if is_debugging():
print('\nFound device nodes:', main, carda, cardb)
self._linux_mount_map = {}
mp, ret = mount(main, 'main')
if mp is None:
raise DeviceError(
_('Unable to mount main memory (Error code: %d)')%ret)
if not mp.endswith('/'):
mp += '/'
self._linux_mount_map[main] = mp
self._main_prefix = mp
self._linux_main_device_node = main
cards = [(carda, '_card_a_prefix', 'carda'),
(cardb, '_card_b_prefix', 'cardb')]
for card, prefix, typ in cards:
if card is None:
continue
mp, ret = mount(card, typ)
if mp is None:
print('Unable to mount card (Error code: %d)'%ret, file=sys.stderr)
else:
if not mp.endswith('/'):
mp += '/'
setattr(self, prefix, mp)
self._linux_mount_map[card] = mp
self.filter_read_only_mount_points()
def filter_read_only_mount_points(self):
def is_readonly(mp):
if mp is None:
return True
path = os.path.join(mp, 'calibre_readonly_test')
ro = True
try:
with open(path, 'wb'):
ro = False
except:
pass
else:
try:
os.remove(path)
except:
pass
if is_debugging() and ro:
print('\nThe mountpoint', mp, 'is readonly, ignoring it')
return ro
for mp in ('_main_prefix', '_card_a_prefix', '_card_b_prefix'):
if is_readonly(getattr(self, mp, None)):
setattr(self, mp, None)
if self._main_prefix is None:
for p in ('_card_a_prefix', '_card_b_prefix'):
nmp = getattr(self, p, None)
if nmp is not None:
self._main_prefix = nmp
setattr(self, p, None)
break
if self._main_prefix is None:
raise DeviceError(_('The main memory of %s is read only. '
'This usually happens because of file system errors.')
%self.__class__.__name__)
if self._card_a_prefix is None and self._card_b_prefix is not None:
self._card_a_prefix = self._card_b_prefix
self._card_b_prefix = None
# ------------------------------------------------------
#
# open for FreeBSD
# find the device node or nodes that match the S/N we already have from the scanner
# and attempt to mount each one
# 1. get list of devices in /dev with matching s/n etc.
# 2. get list of volumes associated with each
# 3. attempt to mount each one using Hal
# 4. when finished, we have a list of mount points and associated dbus nodes
#
def open_freebsd(self):
# There should be some way to access the -v arg...
verbose = False
# this gives us access to the S/N, etc. of the reader that the scanner has found
# and the match routines for some of that data, like s/n, vendor ID, etc.
d=self.detected_device
if not d.serial:
raise DeviceError("Device has no S/N. Can't continue")
from .hal import get_hal
hal = get_hal()
vols = hal.get_volumes(d)
if verbose:
print("FBSD: ", vols)
ok, mv = hal.mount_volumes(vols)
if not ok:
raise DeviceError(_('Unable to mount the device'))
for k, v in mv.items():
setattr(self, k, v)
#
# ------------------------------------------------------
#
# this one is pretty simple:
# just umount each of the previously
# mounted filesystems, using the stored volume object
#
def eject_freebsd(self):
from .hal import get_hal
hal = get_hal()
if self._main_prefix:
hal.unmount(self._main_vol)
if self._card_a_prefix:
hal.unmount(self._card_a_vol)
if self._card_b_prefix:
hal.unmount(self._card_b_vol)
self._main_prefix = self._main_vol = None
self._card_a_prefix = self._card_a_vol = None
self._card_b_prefix = self._card_b_vol = None
# ------------------------------------------------------
def open(self, connected_device, library_uuid):
time.sleep(5)
self._main_prefix = self._card_a_prefix = self._card_b_prefix = None
self.device_being_opened = connected_device
try:
if islinux:
try:
self.open_linux()
except DeviceError:
time.sleep(7)
self.open_linux()
if isfreebsd:
self._main_vol = self._card_a_vol = self._card_b_vol = None
try:
self.open_freebsd()
except DeviceError:
time.sleep(2)
self.open_freebsd()
if iswindows:
self.open_windows()
if ismacos:
try:
self.open_osx()
except DeviceError:
time.sleep(7)
self.open_osx()
self.current_library_uuid = library_uuid
self.post_open_callback()
finally:
self.device_being_opened = None
def post_open_callback(self):
pass
def eject_windows(self):
from threading import Thread
drives = []
for x in ('_main_prefix', '_card_a_prefix', '_card_b_prefix'):
x = getattr(self, x, None)
if x is not None:
drives.append(x[0].upper())
def do_it(drives):
subprocess.Popen([eject_exe()] + drives, creationflags=subprocess.CREATE_NO_WINDOW).wait()
t = Thread(target=do_it, args=[drives])
t.daemon = True
t.start()
self.__save_win_eject_thread = t
def eject_osx(self):
for x in ('_main_prefix', '_card_a_prefix', '_card_b_prefix'):
x = getattr(self, x, None)
if x is not None:
try:
subprocess.Popen(self.OSX_EJECT_COMMAND + [x])
except:
pass
def eject_linux(self):
from calibre.devices.udisks import eject, umount
drives = [d for d in self.find_device_nodes() if d]
for d in drives:
try:
umount(d)
except:
pass
for d in drives:
try:
eject(d)
except Exception as e:
print('Udisks eject call for:', d, 'failed:')
print('\t', e)
def eject(self):
if islinux:
try:
self.eject_linux()
except:
pass
if isfreebsd:
try:
self.eject_freebsd()
except:
pass
if iswindows:
try:
self.eject_windows()
except:
pass
if ismacos:
try:
self.eject_osx()
except:
pass
self._main_prefix = self._card_a_prefix = self._card_b_prefix = None
def linux_post_yank(self):
self._linux_mount_map = {}
def post_yank_cleanup(self):
if islinux:
try:
self.linux_post_yank()
except:
import traceback
traceback.print_exc()
self._main_prefix = self._card_a_prefix = self._card_b_prefix = None
def get_main_ebook_dir(self, for_upload=False):
return self.EBOOK_DIR_MAIN
def get_carda_ebook_dir(self, for_upload=False):
return self.EBOOK_DIR_CARD_A
def get_cardb_ebook_dir(self, for_upload=False):
return self.EBOOK_DIR_CARD_B
def _sanity_check(self, on_card, files):
from calibre.devices.utils import sanity_check
sanity_check(on_card, files, self.card_prefix(), self.free_space())
def get_dest_dir(prefix, candidates):
if isinstance(candidates, string_or_bytes):
candidates = [candidates]
if not candidates:
candidates = ['']
candidates = [
((os.path.join(prefix, *(x.split('/')))) if x else prefix)
for x in candidates]
existing = [x for x in candidates if os.path.exists(x)]
if not existing:
existing = candidates
return existing[0]
if on_card == 'carda':
candidates = self.get_carda_ebook_dir(for_upload=True)
path = get_dest_dir(self._card_a_prefix, candidates)
elif on_card == 'cardb':
candidates = self.get_cardb_ebook_dir(for_upload=True)
path = get_dest_dir(self._card_b_prefix, candidates)
else:
candidates = self.get_main_ebook_dir(for_upload=True)
path = get_dest_dir(self._main_prefix, candidates)
return path
def sanitize_callback(self, path):
'''
Callback to allow individual device drivers to override the path sanitization
used by :meth:`create_upload_path`.
'''
return sanitize(path)
def filename_callback(self, default, mi):
'''
Callback to allow drivers to change the default file name
set by :meth:`create_upload_path`.
'''
return default
def sanitize_path_components(self, components):
'''
Perform any device specific sanitization on the path components
for files to be uploaded to the device
'''
return components
def get_annotations(self, path_map):
'''
Resolve path_map to annotation_map of files found on the device
'''
return {}
def add_annotation_to_library(self, db, db_id, annotation):
'''
Add an annotation to the calibre library
'''
pass
def create_upload_path(self, path, mdata, fname, create_dirs=True):
from calibre.devices.utils import create_upload_path
settings = self.settings()
filepath = create_upload_path(mdata, fname, self.save_template(), self.sanitize_callback,
prefix_path=os.path.abspath(path),
maxlen=self.MAX_PATH_LEN,
use_subdirs=self.SUPPORTS_SUB_DIRS and settings.use_subdirs,
news_in_folder=self.NEWS_IN_FOLDER,
filename_callback=self.filename_callback,
sanitize_path_components=self.sanitize_path_components
)
filedir = os.path.dirname(filepath)
if create_dirs and not os.path.exists(filedir):
os.makedirs(filedir)
return filepath
def create_annotations_path(self, mdata, device_path=None):
return self.create_upload_path(os.path.abspath('/<storage>'), mdata, 'x.bookmark', create_dirs=False)
| 32,985 | Python | .py | 783 | 30.337165 | 132 | 0.548521 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |