id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27,200 | simple_worker.py | kovidgoyal_calibre/src/calibre/utils/ipc/simple_worker.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import importlib
import os
import time
import traceback
from multiprocessing import Pipe
from threading import Thread
from calibre.constants import iswindows
from calibre.utils.ipc import eintr_retry_call
from calibre.utils.ipc.launch import Worker
from calibre.utils.monotonic import monotonic
from polyglot.builtins import environ_item, string_or_bytes
if iswindows:
from multiprocessing.connection import PipeConnection as Connection
else:
from multiprocessing.connection import Connection
class WorkerError(Exception):
def __init__(self, msg, orig_tb='', log_path=None):
Exception.__init__(self, msg)
self.orig_tb = orig_tb
self.log_path = log_path
class ConnectedWorker(Thread):
def __init__(self, conn, args):
Thread.__init__(self)
self.daemon = True
self.conn = conn
self.args = args
self.accepted = False
self.tb = None
self.res = None
def run(self):
self.accepted = True
conn = self.conn
with conn:
try:
eintr_retry_call(conn.send, self.args)
self.res = eintr_retry_call(conn.recv)
except BaseException:
self.tb = traceback.format_exc()
class OffloadWorker:
def __init__(self, conn, worker):
self.conn = conn
self.worker = worker
self.kill_thread = t = Thread(target=self.worker.kill)
t.daemon = True
def __call__(self, module, func, *args, **kwargs):
eintr_retry_call(self.conn.send, (module, func, args, kwargs))
return eintr_retry_call(self.conn.recv)
def shutdown(self):
try:
eintr_retry_call(self.conn.send, None)
except OSError:
pass
except:
import traceback
traceback.print_exc()
finally:
self.conn = None
try:
os.remove(self.worker.log_path)
except:
pass
self.kill_thread.start()
def is_alive(self):
return self.worker.is_alive or self.kill_thread.is_alive()
def communicate(ans, worker, conn, args, timeout=300, heartbeat=None,
abort=None):
cw = ConnectedWorker(conn, args)
cw.start()
st = monotonic()
check_heartbeat = callable(heartbeat)
while worker.is_alive and cw.is_alive():
cw.join(0.01)
delta = monotonic() - st
if not cw.accepted and delta > min(10, timeout):
break
hung = not heartbeat() if check_heartbeat else delta > timeout
if hung:
raise WorkerError('Worker appears to have hung')
if abort is not None and abort.is_set():
# The worker process will be killed by fork_job, after we return
return
if not cw.accepted:
if not cw.tb:
raise WorkerError('Failed to connect to worker process')
raise WorkerError('Failed to connect to worker process', cw.tb)
if cw.tb:
raise WorkerError('Failed to communicate with worker process', cw.tb)
if cw.res is None:
raise WorkerError('Something strange happened. The worker process was aborted without an exception.')
if cw.res.get('tb', None):
raise WorkerError('Worker failed', cw.res['tb'])
ans['result'] = cw.res['result']
def create_worker(env, priority='normal', cwd=None, func='main'):
env = dict(env)
a, b = Pipe()
with a:
env.update({
'CALIBRE_WORKER_FD': str(a.fileno()),
'CALIBRE_SIMPLE_WORKER': environ_item('calibre.utils.ipc.simple_worker:%s' % func),
})
w = Worker(env)
w(cwd=cwd, priority=priority, pass_fds=(a.fileno(),))
return b, w
def start_pipe_worker(command, env=None, priority='normal', **process_args):
import subprocess
w = Worker(env or {})
args = {'stdout':subprocess.PIPE, 'stdin':subprocess.PIPE, 'env':w.env, 'close_fds': True}
args.update(process_args)
pass_fds = None
try:
if iswindows:
priority = {
'high' : subprocess.HIGH_PRIORITY_CLASS,
'normal' : subprocess.NORMAL_PRIORITY_CLASS,
'low' : subprocess.IDLE_PRIORITY_CLASS}[priority]
args['creationflags'] = subprocess.CREATE_NO_WINDOW|priority
pass_fds = args.pop('pass_fds', None)
if pass_fds:
for fd in pass_fds:
os.set_handle_inheritable(fd, True)
args['startupinfo'] = subprocess.STARTUPINFO(lpAttributeList={'handle_list':pass_fds})
else:
niceness = {'normal' : 0, 'low' : 10, 'high' : 20}[priority]
args['env']['CALIBRE_WORKER_NICENESS'] = str(niceness)
exe = w.executable
cmd = [exe] if isinstance(exe, string_or_bytes) else exe
p = subprocess.Popen(cmd + ['--pipe-worker', command], **args)
finally:
if iswindows and pass_fds:
for fd in pass_fds:
os.set_handle_inheritable(fd, False)
return p
def two_part_fork_job(env=None, priority='normal', cwd=None):
env = env or {}
conn, w = create_worker(env, priority, cwd)
def run_job(
mod_name, func_name, args=(), kwargs=None, timeout=300, # seconds
no_output=False, heartbeat=None, abort=None, module_is_source_code=False
):
ans = {'result':None, 'stdout_stderr':None}
kwargs = kwargs or {}
try:
communicate(ans, w, conn, (mod_name, func_name, args, kwargs,
module_is_source_code), timeout=timeout, heartbeat=heartbeat,
abort=abort)
except WorkerError as e:
if not no_output:
e.log_path = w.log_path
raise
finally:
t = Thread(target=w.kill)
t.daemon=True
t.start()
if no_output:
try:
os.remove(w.log_path)
except:
pass
if not no_output:
ans['stdout_stderr'] = w.log_path
return ans
run_job.worker = w
return run_job
def fork_job(mod_name, func_name, args=(), kwargs=None, timeout=300, # seconds
cwd=None, priority='normal', env={}, no_output=False, heartbeat=None,
abort=None, module_is_source_code=False):
'''
Run a job in a worker process. A job is simply a function that will be
called with the supplied arguments, in the worker process.
The result of the function will be returned.
If an error occurs a WorkerError is raised.
:param mod_name: Module to import in the worker process
:param func_name: Function to call in the worker process from the imported
module
:param args: Positional arguments to pass to the function
:param kwargs: Keyword arguments to pass to the function
:param timeout: The time in seconds to wait for the worker process to
complete. If it takes longer a WorkerError is raised and the process is
killed.
:param cwd: The working directory for the worker process. I recommend
against using this, unless you are sure the path is pure ASCII.
:param priority: The process priority for the worker process
:param env: Extra environment variables to set for the worker process
:param no_output: If True, the stdout and stderr of the worker process are
discarded
:param heartbeat: If not None, it is used to check if the worker has hung,
instead of a simple timeout. It must be a callable that takes no
arguments and returns True or False. The worker will be assumed to have
hung if this function returns False. At that point, the process will be
killed and a WorkerError will be raised.
:param abort: If not None, it must be an Event. As soon as abort.is_set()
returns True, the worker process is killed. No error is raised.
:param module_is_source_code: If True, the ``mod`` is treated as python
source rather than a module name to import. The source is executed as a
module. Useful if you want to use fork_job from within a script to run some
dynamically generated python.
:return: A dictionary with the keys result and stdout_stderr. result is the
return value of the function (it must be picklable). stdout_stderr is the
path to a file that contains the stdout and stderr of the worker process.
If you set no_output=True, then this will not be present.
'''
return two_part_fork_job(env, priority, cwd)(
mod_name, func_name, args=args, kwargs=kwargs, timeout=timeout,
no_output=no_output, heartbeat=heartbeat, abort=abort,
module_is_source_code=module_is_source_code
)
def offload_worker(env={}, priority='normal', cwd=None):
conn, w = create_worker(env=env, priority=priority, cwd=cwd, func='offload')
return OffloadWorker(conn, w)
def compile_code(src):
import io
import re
if not isinstance(src, str):
match = re.search(br'coding[:=]\s*([-\w.]+)', src[:200])
enc = match.group(1).decode('utf-8') if match else 'utf-8'
src = src.decode(enc)
# Python complains if there is a coding declaration in a unicode string
src = re.sub(r'^#.*coding\s*[:=]\s*([-\w.]+)', '#', src, flags=re.MULTILINE)
# Translate newlines to \n
src = io.StringIO(src, newline=None).getvalue()
namespace = {
'time':time, 're':re, 'os':os, 'io':io,
}
exec(src, namespace)
return namespace
def main():
# The entry point for the simple worker process
with Connection(int(os.environ['CALIBRE_WORKER_FD'])) as conn:
args = eintr_retry_call(conn.recv)
try:
mod, func, args, kwargs, module_is_source_code = args
if module_is_source_code:
importlib.import_module('calibre.customize.ui') # Load plugins
mod = compile_code(mod)
func = mod[func]
else:
try:
mod = importlib.import_module(mod)
except ImportError:
importlib.import_module('calibre.customize.ui') # Load plugins
mod = importlib.import_module(mod)
func = getattr(mod, func)
res = {'result':func(*args, **kwargs)}
except:
res = {'tb': traceback.format_exc()}
try:
conn.send(res)
except:
# Maybe EINTR
conn.send(res)
def offload():
# The entry point for the offload worker process
func_cache = {}
with Connection(int(os.environ['CALIBRE_WORKER_FD'])) as conn:
while True:
args = eintr_retry_call(conn.recv)
if args is None:
break
res = {'result':None, 'tb':None}
try:
mod, func, args, kwargs = args
if mod is None:
eintr_retry_call(conn.send, res)
continue
f = func_cache.get((mod, func), None)
if f is None:
try:
m = importlib.import_module(mod)
except ImportError:
importlib.import_module('calibre.customize.ui') # Load plugins
m = importlib.import_module(mod)
func_cache[(mod, func)] = f = getattr(m, func)
res['result'] = f(*args, **kwargs)
except:
import traceback
res['tb'] = traceback.format_exc()
eintr_retry_call(conn.send, res)
| 11,775 | Python | .py | 281 | 32.619217 | 109 | 0.61116 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,201 | worker.py | kovidgoyal_calibre/src/calibre/utils/ipc/worker.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import importlib
import os
import sys
from threading import Thread
from zipimport import ZipImportError
from calibre import prints
from calibre.constants import ismacos, iswindows
from calibre.utils.ipc import eintr_retry_call
from calibre.utils.serialize import pickle_dumps
from polyglot.binary import from_hex_unicode
from polyglot.queue import Queue
if iswindows:
from multiprocessing.connection import PipeConnection as Connection
else:
from multiprocessing.connection import Connection
PARALLEL_FUNCS = {
'lrfviewer' :
('calibre.gui2.lrf_renderer.main', 'main', None),
'ebook-viewer' :
('calibre.gui_launch', 'ebook_viewer', None),
'ebook-edit' :
('calibre.gui_launch', 'gui_ebook_edit', None),
'store-dialog' :
('calibre.gui_launch', 'store_dialog', None),
'toc-dialog' :
('calibre.gui_launch', 'toc_dialog', None),
'webengine-dialog' :
('calibre.gui_launch', 'webengine_dialog', None),
'render_pages' :
('calibre.ebooks.comic.input', 'render_pages', 'notification'),
'gui_convert' :
('calibre.gui2.convert.gui_conversion', 'gui_convert', 'notification'),
'gui_convert_recipe' :
('calibre.gui2.convert.gui_conversion', 'gui_convert_recipe', 'notification'),
'gui_polish' :
('calibre.ebooks.oeb.polish.main', 'gui_polish', None),
'gui_convert_override' :
('calibre.gui2.convert.gui_conversion', 'gui_convert_override', 'notification'),
'gui_catalog' :
('calibre.gui2.convert.gui_conversion', 'gui_catalog', 'notification'),
'arbitrary' :
('calibre.utils.ipc.worker', 'arbitrary', None),
'arbitrary_n' :
('calibre.utils.ipc.worker', 'arbitrary_n', 'notification'),
}
class Progress(Thread):
def __init__(self, conn):
Thread.__init__(self)
self.daemon = True
self.conn = conn
self.queue = Queue()
def __call__(self, percent, msg=''):
self.queue.put((percent, msg))
def run(self):
while True:
x = self.queue.get()
if x is None:
break
try:
eintr_retry_call(self.conn.send, x)
except:
break
def arbitrary(module_name, func_name, args, kwargs={}):
'''
An entry point that allows arbitrary functions to be run in a parallel
process. useful for plugin developers that want to run jobs in a parallel
process.
To use this entry point, simply create a ParallelJob with the module and
function names for the real entry point.
Remember that args and kwargs must be serialized so only use basic types
for them.
To use this, you will do something like
from calibre.gui2 import Dispatcher
gui.job_manager.run_job(Dispatcher(job_done), 'arbitrary',
args=('calibre_plugins.myplugin.worker', 'do_work',
('arg1' 'arg2', 'arg3')),
description='Change the world')
The function job_done will be called on completion, see the code in
gui2.actions.catalog for an example of using run_job and Dispatcher.
:param module_name: The fully qualified name of the module that contains
the actual function to be run. For example:
calibre_plugins.myplugin.worker
:param func_name: The name of the function to be run.
:param name: A list (or tuple) of arguments that will be passed to the
function ``func_name``
:param kwargs: A dictionary of keyword arguments to pass to func_name
'''
if module_name.startswith('calibre_plugins'):
# Initialize the plugin loader by doing this dummy import
from calibre.customize.ui import find_plugin
find_plugin
module = importlib.import_module(module_name)
func = getattr(module, func_name)
return func(*args, **kwargs)
def arbitrary_n(module_name, func_name, args, kwargs={},
notification=lambda x, y: y):
'''
Same as :func:`arbitrary` above, except that func_name must support a
keyword argument "notification". This will be a function that accepts two
arguments. func_name should call it periodically with progress information.
The first argument is a float between 0 and 1 that represent percent
completed and the second is a string with a message (it can be an empty
string).
'''
if module_name.startswith('calibre_plugins'):
# Initialize the plugin loader by doing this dummy import
from calibre.customize.ui import find_plugin
find_plugin
module = importlib.import_module(module_name)
func = getattr(module, func_name)
kwargs['notification'] = notification
return func(*args, **kwargs)
def get_func(name):
module, func, notification = PARALLEL_FUNCS[name]
try:
module = importlib.import_module(module)
except ZipImportError:
# Something windows weird happened, try clearing the zip import cache
# in case the zipfile was changed from under us
from zipimport import _zip_directory_cache as zdc
zdc.clear()
module = importlib.import_module(module)
func = getattr(module, func)
return func, notification
def main():
if iswindows:
if '--multiprocessing-fork' in sys.argv:
# We are using the multiprocessing module on windows to launch a
# worker process
from multiprocessing import freeze_support
freeze_support()
return 0
if ismacos and 'CALIBRE_WORKER_FD' not in os.environ and 'CALIBRE_SIMPLE_WORKER' not in os.environ and '--pipe-worker' not in sys.argv:
# On some OS X computers launchd apparently tries to
# launch the last run process from the bundle
# so launch the gui as usual
from calibre.gui2.main import main as gui_main
return gui_main(['calibre'])
niceness = os.environ.pop('CALIBRE_WORKER_NICENESS', None)
if niceness:
try:
os.nice(int(niceness))
except Exception:
pass
csw = os.environ.pop('CALIBRE_SIMPLE_WORKER', None)
if csw:
mod, _, func = csw.partition(':')
mod = importlib.import_module(mod)
func = getattr(mod, func)
func()
return
if '--pipe-worker' in sys.argv:
try:
exec(sys.argv[-1])
except Exception:
print('Failed to run pipe worker with command:', sys.argv[-1])
sys.stdout.flush()
raise
return
fd = int(os.environ['CALIBRE_WORKER_FD'])
resultf = from_hex_unicode(os.environ['CALIBRE_WORKER_RESULT'])
with Connection(fd) as conn:
name, args, kwargs, desc = eintr_retry_call(conn.recv)
if desc:
prints(desc)
sys.stdout.flush()
func, notification = get_func(name)
notifier = Progress(conn)
if notification:
kwargs[notification] = notifier
notifier.start()
result = func(*args, **kwargs)
if result is not None:
os.makedirs(os.path.dirname(resultf), exist_ok=True)
with open(resultf, 'wb') as f:
f.write(pickle_dumps(result))
notifier.queue.put(None)
try:
sys.stdout.flush()
except OSError:
pass # Happens sometimes on OS X for GUI processes (EPIPE)
try:
sys.stderr.flush()
except OSError:
pass # Happens sometimes on OS X for GUI processes (EPIPE)
return 0
if __name__ == '__main__':
sys.exit(main())
| 7,651 | Python | .py | 192 | 32.895833 | 139 | 0.658172 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,202 | __init__.py | kovidgoyal_calibre/src/calibre/utils/ipc/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import errno
import os
from calibre import force_unicode
from calibre.constants import filesystem_encoding, get_windows_username, islinux, iswindows
from calibre.utils.filenames import ascii_filename
from polyglot.functools import lru_cache
VADDRESS = None
def eintr_retry_call(func, *args, **kwargs):
while True:
try:
return func(*args, **kwargs)
except OSError as e:
if getattr(e, 'errno', None) == errno.EINTR:
continue
raise
@lru_cache()
def socket_address(which):
if iswindows:
ans = r'\\.\pipe\Calibre' + which
try:
user = get_windows_username()
except Exception:
user = None
if user:
user = ascii_filename(user).replace(' ', '_')
if user:
ans += '-' + user[:100] + 'x'
else:
user = force_unicode(os.environ.get('USER') or os.path.basename(os.path.expanduser('~')), filesystem_encoding)
sock_name = '{}-calibre-{}.socket'.format(ascii_filename(user).replace(' ', '_'), which)
if islinux:
ans = '\0' + sock_name
else:
from tempfile import gettempdir
tmp = force_unicode(gettempdir(), filesystem_encoding)
ans = os.path.join(tmp, sock_name)
return ans
def gui_socket_address():
return socket_address('GUI' if iswindows else 'gui')
def viewer_socket_address():
return socket_address('Viewer' if iswindows else 'viewer')
| 1,648 | Python | .py | 45 | 29.377778 | 118 | 0.622642 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,203 | launch.py | kovidgoyal_calibre/src/calibre/utils/ipc/launch.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import subprocess
import sys
import time
from calibre.constants import isfrozen, ismacos, iswindows
from calibre.ptempfile import PersistentTemporaryFile, base_dir
from calibre.utils.config import prefs
from calibre.utils.serialize import msgpack_dumps
from polyglot.binary import as_hex_unicode
from polyglot.builtins import environ_item, native_string_type, string_or_bytes
if iswindows:
try:
windows_null_file = open(os.devnull, 'wb')
except:
raise RuntimeError('NUL file missing in windows. This indicates a'
' corrupted windows. You should contact Microsoft'
' for assistance and/or follow the steps described here: https://bytes.com/topic/net/answers/264804-compile-error-null-device-missing')
def renice(niceness):
try:
os.nice(niceness)
except:
pass
def macos_viewer_bundle_path():
base = os.path.dirname(sys.executables_location)
return os.path.join(base, 'ebook-viewer.app/Contents/MacOS/')
def macos_edit_book_bundle_path():
base = os.path.dirname(sys.executables_location)
return os.path.join(base, 'ebook-viewer.app/Contents/ebook-edit.app/Contents/MacOS/')
def macos_headless_bundle_path():
base = os.path.dirname(sys.executables_location)
return os.path.join(base, 'ebook-viewer.app/Contents/ebook-edit.app/Contents/headless.app/Contents/MacOS/')
def exe_path(exe_name):
if hasattr(sys, 'running_from_setup'):
return [sys.executable, os.path.join(sys.setup_dir, 'run-calibre-worker.py')]
if getattr(sys, 'run_local', False):
return [sys.executable, sys.run_local, exe_name]
e = exe_name
if iswindows:
return os.path.join(os.path.dirname(sys.executable),
e+'.exe' if isfrozen else 'Scripts\\%s.exe'%e)
if ismacos:
return os.path.join(sys.executables_location, e)
if isfrozen:
return os.path.join(sys.executables_location, e)
if hasattr(sys, 'executables_location'):
c = os.path.join(sys.executables_location, e)
if os.access(c, os.X_OK):
return c
return e
class Worker:
'''
Platform independent object for launching child processes. All processes
have the environment variable :envvar:`CALIBRE_WORKER` set.
Useful attributes: ``is_alive``, ``returncode``, ``pid``
Useful methods: ``kill``
To launch child simply call the Worker object. By default, the child's
output is redirected to an on disk file, the path to which is returned by
the call.
'''
exe_name = 'calibre-parallel'
@property
def executable(self):
if ismacos and not hasattr(sys, 'running_from_setup'):
return os.path.join(macos_headless_bundle_path(), self.exe_name)
return exe_path(self.exe_name)
@property
def gui_executable(self):
if ismacos and not hasattr(sys, 'running_from_setup'):
if self.job_name == 'ebook-viewer':
return os.path.join(macos_viewer_bundle_path(), self.exe_name)
if self.job_name == 'ebook-edit':
return os.path.join(macos_edit_book_bundle_path(), self.exe_name)
return os.path.join(sys.executables_location, self.exe_name)
return self.executable
@property
def env(self):
env = os.environ.copy()
env[native_string_type('CALIBRE_WORKER')] = environ_item('1')
td = as_hex_unicode(msgpack_dumps(base_dir()))
env[native_string_type('CALIBRE_WORKER_TEMP_DIR')] = environ_item(td)
env.update(self._env)
return env
@property
def is_alive(self):
return hasattr(self, 'child') and self.child.poll() is None
@property
def returncode(self):
if not hasattr(self, 'child'):
return None
self.child.poll()
return self.child.returncode
@property
def pid(self):
if not hasattr(self, 'child'):
return None
return getattr(self.child, 'pid', None)
def close_log_file(self):
try:
self._file.close()
except:
pass
def kill(self):
self.close_log_file()
try:
if self.is_alive:
if iswindows:
return self.child.kill()
try:
self.child.terminate()
st = time.time()
while self.is_alive and time.time()-st < 2:
time.sleep(0.2)
finally:
if self.is_alive:
self.child.kill()
except:
pass
def __init__(self, env=None, gui=False, job_name=None):
self.gui = gui
self.job_name = job_name
self._env = (env or {}).copy()
def __call__(self, redirect_output=True, cwd=None, priority=None, pass_fds=()):
'''
If redirect_output is True, output from the child is redirected
to a file on disk and this method returns the path to that file.
'''
exe = self.gui_executable if self.gui else self.executable
env = self.env
try:
origwd = cwd or os.path.abspath(os.getcwd())
except OSError:
# cwd no longer exists
origwd = cwd or os.path.expanduser('~')
env[native_string_type('ORIGWD')] = environ_item(as_hex_unicode(msgpack_dumps(origwd)))
_cwd = cwd
if priority is None:
priority = prefs['worker_process_priority']
cmd = [exe] if isinstance(exe, string_or_bytes) else exe
args = {
'env' : env,
'cwd' : _cwd,
}
if iswindows:
priority = {
'high' : subprocess.HIGH_PRIORITY_CLASS,
'normal' : subprocess.NORMAL_PRIORITY_CLASS,
'low' : subprocess.IDLE_PRIORITY_CLASS}[priority]
args['creationflags'] = subprocess.CREATE_NO_WINDOW|priority
else:
niceness = {
'normal' : 0,
'low' : 10,
'high' : 20,
}[priority]
args['env']['CALIBRE_WORKER_NICENESS'] = str(niceness)
ret = None
if redirect_output:
self._file = PersistentTemporaryFile('_worker_redirect.log')
args['stdout'] = self._file._fd
args['stderr'] = subprocess.STDOUT
if iswindows:
args['stdin'] = subprocess.PIPE
ret = self._file.name
if iswindows and 'stdin' not in args:
# On windows when using the pythonw interpreter,
# stdout, stderr and stdin may not be valid
args['stdin'] = subprocess.PIPE
args['stdout'] = windows_null_file
args['stderr'] = subprocess.STDOUT
args['close_fds'] = True
try:
if pass_fds:
if iswindows:
for fd in pass_fds:
os.set_handle_inheritable(fd, True)
args['startupinfo'] = subprocess.STARTUPINFO(lpAttributeList={'handle_list':pass_fds})
else:
args['pass_fds'] = pass_fds
self.child = subprocess.Popen(cmd, **args)
finally:
if iswindows and pass_fds:
for fd in pass_fds:
os.set_handle_inheritable(fd, False)
if 'stdin' in args:
self.child.stdin.close()
self.log_path = ret
return ret
| 7,685 | Python | .py | 191 | 30.314136 | 151 | 0.595602 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,204 | job.py | kovidgoyal_calibre/src/calibre/utils/ipc/job.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import io
import time
from itertools import count
from calibre import prints
from calibre.constants import DEBUG
from calibre.utils.localization import _
from polyglot.builtins import cmp
from polyglot.queue import Empty, Queue
job_counter = count()
class BaseJob:
WAITING = 0
RUNNING = 1
FINISHED = 2
def __init__(self, description, done=lambda x: x):
self.id = next(job_counter)
self.description = description
self.done = done
self.done2 = None
self.killed = False
self.failed = False
self.kill_on_start = False
self.start_time = None
self.result = None
self.duration = None
self.log_path = None
self.notifications = Queue()
self._run_state = self.WAITING
self.percent = 0
self._message = None
self._status_text = _('Waiting...')
self._done_called = False
self.core_usage = 1
self.timed_out = False
def update(self, consume_notifications=True):
if self.duration is not None:
self._run_state = self.FINISHED
self.percent = 100
if self.killed:
if self.timed_out:
self._status_text = _('Aborted, taking too long')
else:
self._status_text = _('Stopped')
else:
self._status_text = _('Error') if self.failed else _('Finished')
if DEBUG:
try:
prints('Job:', self.id, self.description, 'finished')
prints('\t'.join(self.details.splitlines(True)))
except:
pass
if not self._done_called:
self._done_called = True
try:
self.done(self)
except:
pass
try:
if callable(self.done2):
self.done2(self)
except:
pass
elif self.start_time is not None:
self._run_state = self.RUNNING
self._status_text = _('Working...')
if consume_notifications:
return self.consume_notifications()
return False
def consume_notifications(self):
got_notification = False
while self.notifications is not None:
try:
self.percent, self._message = self.notifications.get_nowait()
self.percent *= 100.
got_notification = True
except Empty:
break
return got_notification
@property
def status_text(self):
if self._run_state == self.FINISHED or not self._message:
return self._status_text
return self._message
@property
def run_state(self):
return self._run_state
@property
def running_time(self):
if self.duration is not None:
return self.duration
if self.start_time is not None:
return time.time() - self.start_time
return None
@property
def is_finished(self):
return self._run_state == self.FINISHED
@property
def is_started(self):
return self._run_state != self.WAITING
@property
def is_running(self):
return self.is_started and not self.is_finished
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def __lt__(self, other):
return self.compare_to_other(other) < 0
def __le__(self, other):
return self.compare_to_other(other) <= 0
def __gt__(self, other):
return self.compare_to_other(other) > 0
def __ge__(self, other):
return self.compare_to_other(other) >= 0
def compare_to_other(self, other):
if self.is_finished != other.is_finished:
return 1 if self.is_finished else -1
if self.start_time is None:
if other.start_time is None: # Both waiting
return cmp(other.id, self.id)
return 1
if other.start_time is None:
return -1
# Both running
return cmp((other.start_time, id(other)), (self.start_time, id(self)))
@property
def log_file(self):
if self.log_path:
return open(self.log_path, 'rb')
return io.BytesIO(_('No details available.').encode('utf-8',
'replace'))
@property
def details(self):
return self.log_file.read().decode('utf-8', 'replace')
class ParallelJob(BaseJob):
def __init__(self, name, description, done, args=[], kwargs={}):
self.name, self.args, self.kwargs = name, args, kwargs
BaseJob.__init__(self, description, done)
| 5,074 | Python | .py | 143 | 25.685315 | 80 | 0.557076 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,205 | server.py | kovidgoyal_calibre/src/calibre/utils/ipc/server.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import sys
import tempfile
import time
from collections import deque
from itertools import count
from math import ceil
from multiprocessing import Pipe
from threading import Thread
from calibre import detect_ncpus as cpu_count
from calibre import force_unicode
from calibre.constants import DEBUG
from calibre.ptempfile import base_dir
from calibre.utils.ipc import eintr_retry_call
from calibre.utils.ipc.launch import Worker
from calibre.utils.ipc.worker import PARALLEL_FUNCS
from calibre.utils.serialize import pickle_loads
from polyglot.binary import as_hex_unicode
from polyglot.builtins import environ_item, string_or_bytes
from polyglot.queue import Empty, Queue
server_counter = count()
_name_counter = count()
class ConnectedWorker(Thread):
def __init__(self, worker, conn, rfile):
Thread.__init__(self)
self.daemon = True
self.conn = conn
self.worker = worker
self.notifications = Queue()
self._returncode = 'dummy'
self.killed = False
self.log_path = worker.log_path
self.rfile = rfile
self.close_log_file = getattr(worker, 'close_log_file', None)
def start_job(self, job):
notification = PARALLEL_FUNCS[job.name][-1] is not None
eintr_retry_call(self.conn.send, (job.name, job.args, job.kwargs, job.description))
if notification:
self.start()
else:
self.conn.close()
self.job = job
def run(self):
while True:
try:
x = eintr_retry_call(self.conn.recv)
self.notifications.put(x)
except BaseException:
break
try:
self.conn.close()
except BaseException:
pass
def kill(self):
self.killed = True
try:
self.worker.kill()
except BaseException:
pass
@property
def is_alive(self):
return not self.killed and self.worker.is_alive
@property
def returncode(self):
if self._returncode != 'dummy':
return self._returncode
r = self.worker.returncode
if self.killed and r is None:
self._returncode = 1
return 1
if r is not None:
self._returncode = r
return r
class CriticalError(Exception):
pass
class Server(Thread):
def __init__(self, notify_on_job_done=lambda x: x, pool_size=None,
limit=sys.maxsize, enforce_cpu_limit=True):
Thread.__init__(self)
self.daemon = True
self.id = next(server_counter) + 1
if enforce_cpu_limit:
limit = min(limit, cpu_count())
self.pool_size = limit if pool_size is None else pool_size
self.notify_on_job_done = notify_on_job_done
self.add_jobs_queue, self.changed_jobs_queue = Queue(), Queue()
self.kill_queue = Queue()
self.waiting_jobs = []
self.workers = deque()
self.launched_worker_counter = count()
next(self.launched_worker_counter)
self.start()
def launch_worker(self, gui=False, redirect_output=None, job_name=None):
start = time.monotonic()
id = next(self.launched_worker_counter)
fd, rfile = tempfile.mkstemp(prefix='ipc_result_%d_%d_'%(self.id, id),
dir=base_dir(), suffix='.pickle')
os.close(fd)
if redirect_output is None:
redirect_output = not gui
cw = self.do_launch(gui, redirect_output, rfile, job_name=job_name)
if isinstance(cw, string_or_bytes):
raise CriticalError('Failed to launch worker process:\n'+force_unicode(cw))
if DEBUG:
print(f'Worker Launch took: {time.monotonic() - start:.2f} seconds')
return cw
def do_launch(self, gui, redirect_output, rfile, job_name=None):
a, b = Pipe()
with a:
env = {
'CALIBRE_WORKER_FD': str(a.fileno()),
'CALIBRE_WORKER_RESULT' : environ_item(as_hex_unicode(rfile))
}
w = Worker(env, gui=gui, job_name=job_name)
try:
w(pass_fds=(a.fileno(),), redirect_output=redirect_output)
except BaseException:
try:
w.kill()
except:
pass
b.close()
import traceback
return traceback.format_exc()
return ConnectedWorker(w, b, rfile)
def add_job(self, job):
job.done2 = self.notify_on_job_done
self.add_jobs_queue.put(job)
def run_job(self, job, gui=True, redirect_output=False):
w = self.launch_worker(gui=gui, redirect_output=redirect_output, job_name=getattr(job, 'name', None))
w.start_job(job)
def run(self):
while True:
try:
job = self.add_jobs_queue.get(True, 0.2)
if job is None:
break
self.waiting_jobs.insert(0, job)
except Empty:
pass
# Get notifications from worker process
for worker in self.workers:
while True:
try:
n = worker.notifications.get_nowait()
worker.job.notifications.put(n)
self.changed_jobs_queue.put(worker.job)
except Empty:
break
# Remove finished jobs
for worker in [w for w in self.workers if not w.is_alive]:
try:
worker.close_log_file()
except:
pass
self.workers.remove(worker)
job = worker.job
if worker.returncode != 0:
job.failed = True
job.returncode = worker.returncode
elif os.path.exists(worker.rfile):
try:
with open(worker.rfile, 'rb') as f:
job.result = pickle_loads(f.read())
os.remove(worker.rfile)
except:
pass
job.duration = time.time() - job.start_time
self.changed_jobs_queue.put(job)
# Start waiting jobs
sj = self.suitable_waiting_job()
if sj is not None:
job = self.waiting_jobs.pop(sj)
job.start_time = time.time()
if job.kill_on_start:
job.duration = 0.0
job.returncode = 1
job.killed = job.failed = True
job.result = None
else:
worker = self.launch_worker()
worker.start_job(job)
self.workers.append(worker)
job.log_path = worker.log_path
self.changed_jobs_queue.put(job)
while True:
try:
j = self.kill_queue.get_nowait()
self._kill_job(j)
except Empty:
break
def suitable_waiting_job(self):
available_workers = self.pool_size - len(self.workers)
for worker in self.workers:
job = worker.job
if job.core_usage == -1:
available_workers = 0
elif job.core_usage > 1:
available_workers -= job.core_usage - 1
if available_workers < 1:
return None
for i, job in enumerate(self.waiting_jobs):
if job.core_usage == -1:
if available_workers >= self.pool_size:
return i
elif job.core_usage <= available_workers:
return i
def kill_job(self, job):
self.kill_queue.put(job)
def killall(self):
for worker in self.workers:
self.kill_queue.put(worker.job)
def _kill_job(self, job):
if job.start_time is None:
job.kill_on_start = True
return
for worker in self.workers:
if job is worker.job:
worker.kill()
job.killed = True
break
def split(self, tasks):
'''
Split a list into a list of sub lists, with the number of sub lists being
no more than the number of workers this server supports. Each sublist contains
2-tuples of the form (i, x) where x is an element from the original list
and i is the index of the element x in the original list.
'''
ans, count, pos = [], 0, 0
delta = int(ceil(len(tasks)/float(self.pool_size)))
while count < len(tasks):
section = []
for t in tasks[pos:pos+delta]:
section.append((count, t))
count += 1
ans.append(section)
pos += delta
return ans
def close(self):
try:
self.add_jobs_queue.put(None)
except:
pass
try:
self.listener.close()
except:
pass
time.sleep(0.2)
for worker in list(self.workers):
try:
worker.kill()
except:
pass
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
| 9,579 | Python | .py | 261 | 25.08046 | 109 | 0.545484 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,206 | description.py | kovidgoyal_calibre/src/calibre/utils/opensearch/description.py | __license__ = 'GPL 3'
__copyright__ = '''
2011, John Schember <john@nachtimwald.com>,
2006, Ed Summers <ehs@pobox.com>
'''
__docformat__ = 'restructuredtext en'
from contextlib import closing
from calibre import browser
from calibre.utils.opensearch.url import URL
from calibre.utils.xml_parse import safe_xml_fromstring
class Description:
'''
A class for representing OpenSearch Description files.
'''
def __init__(self, url=""):
'''
The constructor which may pass an optional url to load from.
d = Description("http://www.example.com/description")
'''
if url:
self.load(url)
def load(self, url):
'''
For loading up a description object from a url. Normally
you'll probably just want to pass a URL into the constructor.
'''
br = browser()
with closing(br.open(url, timeout=15)) as f:
doc = safe_xml_fromstring(f.read())
# version 1.1 has repeating Url elements.
self.urls = []
for element in doc.xpath('//*[local-name() = "Url"]'):
template = element.get('template')
type = element.get('type')
if template and type:
url = URL()
url.template = template
url.type = type
self.urls.append(url)
# Stanza catalogs.
for element in doc.xpath('//*[local-name() = "link"]'):
if element.get('rel') != 'search':
continue
href = element.get('href')
type = element.get('type')
if href and type:
url = URL()
url.template = href
url.type = type
self.urls.append(url)
# this is version 1.0 specific.
self.url = ''
if not self.urls:
self.url = ''.join(doc.xpath('//*[local-name() = "Url"][1]//text()'))
self.format = ''.join(doc.xpath('//*[local-name() = "Format"][1]//text()'))
self.shortname = ''.join(doc.xpath('//*[local-name() = "ShortName"][1]//text()'))
self.longname = ''.join(doc.xpath('//*[local-name() = "LongName"][1]//text()'))
self.description = ''.join(doc.xpath('//*[local-name() = "Description"][1]//text()'))
self.image = ''.join(doc.xpath('//*[local-name() = "Image"][1]//text()'))
self.sameplesearch = ''.join(doc.xpath('//*[local-name() = "SampleSearch"][1]//text()'))
self.developer = ''.join(doc.xpath('//*[local-name() = "Developer"][1]//text()'))
self.contact = ''.join(doc.xpath('/*[local-name() = "Contact"][1]//text()'))
self.attribution = ''.join(doc.xpath('//*[local-name() = "Attribution"][1]//text()'))
self.syndicationright = ''.join(doc.xpath('//*[local-name() = "SyndicationRight"][1]//text()'))
tag_text = ' '.join(doc.xpath('//*[local-name() = "Tags"]//text()'))
if tag_text is not None:
self.tags = tag_text.split(' ')
self.adultcontent = doc.xpath('boolean(//*[local-name() = "AdultContent" and contains(., "true")])')
def get_url_by_type(self, type):
'''
Walks available urls and returns them by type. Only
appropriate in opensearch v1.1 where there can be multiple
query targets. Returns none if no such type is found.
url = description.get_url_by_type('application/rss+xml')
'''
for url in self.urls:
if url.type == type:
return url
return None
def get_best_template(self):
'''
OK, best is a value judgement, but so be it. You'll get
back either the atom, rss or first template available. This
method handles the main difference between opensearch v1.0 and v1.1
'''
# version 1.0
if self.url:
return self.url
# atom
if self.get_url_by_type('application/atom+xml'):
return self.get_url_by_type('application/atom+xml').template
# rss
if self.get_url_by_type('application/rss+xml'):
return self.get_url_by_type('application/rss+xml').template
# other possible rss type
if self.get_url_by_type('text/xml'):
return self.get_url_by_Type('text/xml').template
# otherwise just the first one
if len(self.urls) > 0:
return self.urls[0].template
# out of luck
return None
| 4,454 | Python | .py | 102 | 34.156863 | 108 | 0.564635 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,207 | url.py | kovidgoyal_calibre/src/calibre/utils/opensearch/url.py | __license__ = 'GPL 3'
__copyright__ = '2006, Ed Summers <ehs@pobox.com>'
__docformat__ = 'restructuredtext en'
class URL:
'''
Class for representing a URL in an opensearch v1.1 query
'''
def __init__(self, type='', template='', method='GET'):
self.type = type
self.template = template
self.method = 'GET'
self.params = []
| 373 | Python | .py | 12 | 25.833333 | 60 | 0.592179 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,208 | __init__.py | kovidgoyal_calibre/src/calibre/utils/opensearch/__init__.py | '''
Based on the OpenSearch Python module by Ed Summers <ehs@pobox.com> from
https://github.com/edsu/opensearch .
This module is heavily modified and does not implement all the features from
the original. The ability for the module to perform a search and retrieve
search results has been removed. The original module used a modified version
of the Universal feed parser from http://feedparser.org/ . The use of
FeedPaser made getting search results very slow. There is also a bug in the
modified FeedParser that causes the system to run out of file descriptors.
Instead of fixing the modified feed parser it was decided to remove it and
manually parse the feeds in a set of type specific classes. This is much
faster and as we know in advance the feed format is simpler than using
FeedParser. Also, replacing the modified FeedParser with the newest version
of FeedParser caused some feeds to be parsed incorrectly and result in a loss
of data.
The module was also rewritten to use lxml instead of MiniDom.
Usage:
description = Description(open_search_url)
url_template = description.get_best_template()
if not url_template:
return
query = Query(url_template)
# set up initial values.
query.searchTerms = search_terms
# Note the count is ignored by some feeds.
query.count = max_results
search_url = oquery.url()
'''
| 1,330 | Python | .py | 28 | 46.035714 | 77 | 0.808198 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,209 | query.py | kovidgoyal_calibre/src/calibre/utils/opensearch/query.py | __license__ = 'GPL 3'
__copyright__ = '2006, Ed Summers <ehs@pobox.com>'
__docformat__ = 'restructuredtext en'
from polyglot.urllib import parse_qs, urlencode, urlparse, urlunparse
class Query:
'''
Represents an opensearch query Really this class is just a
helper for substituting values into the macros in a format.
format = 'http://beta.indeed.com/opensearch?q={searchTerms}&start={startIndex}&limit={count}'
q = Query(format)
q.searchTerms('zx81')
q.startIndex = 1
q.count = 25
print q.url()
'''
standard_macros = ['searchTerms', 'count', 'startIndex', 'startPage',
'language', 'outputEncoding', 'inputEncoding']
def __init__(self, format):
'''
Create a query object by passing it the url format obtained
from the opensearch Description.
'''
self.format = format
# unpack the url to a tuple
self.url_parts = urlparse(format)
# unpack the query string to a dictionary
self.query_string = parse_qs(self.url_parts[4])
# look for standard macros and create a mapping of the
# opensearch names to the service specific ones
# so q={searchTerms} will result in a mapping between searchTerms and q
self.macro_map = {}
for key,values in self.query_string.items():
# TODO eventually optional/required params should be
# distinguished somehow (the ones with/without trailing ?
macro = values[0].replace('{', '').replace('}', '').replace('?', '')
if macro in Query.standard_macros:
self.macro_map[macro] = key
def url(self):
# copy the original query string
query_string = dict(self.query_string)
# iterate through macros and set the position in the querystring
for macro, name in self.macro_map.items():
if hasattr(self, macro):
# set the name/value pair
query_string[name] = [getattr(self, macro)]
else:
# remove the name/value pair
del(query_string[name])
# copy the url parts and substitute in our new query string
url_parts = list(self.url_parts)
url_parts[4] = urlencode(query_string, 1)
# recompose and return url
return urlunparse(tuple(url_parts))
def has_macro(self, macro):
return macro in self.macro_map
| 2,429 | Python | .py | 55 | 35.490909 | 97 | 0.629237 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,210 | __init__.py | kovidgoyal_calibre/src/calibre/utils/rcc/__init__.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2022, Kovid Goyal <kovid at kovidgoyal.net>
import io
import os
import sys
import tempfile
from posixpath import normpath
from qt.core import QFile, QIODevice
from calibre_extensions import rcc_backend
def compile_qrc(output_path, *qrc_file_paths):
rcc = rcc_backend.RCCResourceLibrary()
err_device = QFile()
try:
fd = sys.__stderr__.fileno()
except io.UnsupportedOperation:
fd = 2
if not err_device.open(fd, QIODevice.OpenModeFlag.WriteOnly | QIODevice.OpenModeFlag.Text):
raise ValueError('Failed to open STDERR for writing')
if not qrc_file_paths:
raise TypeError('Must specify at least one .qrc file')
rcc.setInputFiles(list(qrc_file_paths))
if not rcc.readFiles(False, err_device):
raise ValueError('Failed to read qrc files')
with open(output_path, 'wb') as f:
out = QFile(output_path)
if not out.open(f.fileno(), QIODevice.OpenModeFlag.WriteOnly):
raise RuntimeError(f'Failed to open {output_path} for writing')
ok = rcc.output(out, QFile(), err_device)
if not ok:
os.remove(output_path)
raise ValueError('Failed to write output')
def index_theme(name, inherits=''):
min_sz, sz, max_sz = 16, 128, 512
lines = ['[Icon Theme]', f'Name={name}', 'Comment=Icons for calibre']
if inherits:
lines.append(f'Inherits={inherits}')
lines.append('')
subdirs = ['images']
for sb in subdirs:
lines += [f'[{sb}]', f'Size={sz}', f'MinSize={min_sz}', f'MaxSize={max_sz}', '']
return '\n'.join(lines)
def safe_link(src, dest):
try:
os.link(src, dest)
except FileExistsError:
os.remove(dest)
os.link(src, dest)
def compile_icon_dir_as_themes(
path_to_dir, output_path, theme_name='calibre-default', inherits='',
for_theme='any', prefix='/icons',
):
with tempfile.TemporaryDirectory(dir=path_to_dir) as tdir, open(os.path.join(tdir, 'icons.qrc'), 'w') as qrc:
print('<RCC>', file=qrc)
print(f' <qresource prefix="{prefix}">', file=qrc)
def file(name):
name = name.replace('\\', '/')
print(f' <file>{normpath(name)}</file>', file=qrc)
specific_themes = []
if for_theme == 'any':
specific_themes = [theme_name + '-dark', theme_name + '-light']
for q in [theme_name] + specific_themes:
os.mkdir(os.path.join(tdir, q))
for sd in ['images']:
os.makedirs(os.path.join(tdir, q, sd))
with open(os.path.join(tdir, theme_name, 'index.theme'), 'w') as f:
f.write(index_theme(theme_name, inherits))
file(f'{theme_name}/index.theme')
for q in specific_themes:
with open(os.path.join(tdir, q, 'index.theme'), 'w') as f:
f.write(index_theme(q, inherits=theme_name))
file(f'{q}/index.theme')
def handle_image(image_path):
image_name = os.path.basename(image_path)
rp = os.path.relpath(os.path.dirname(image_path), path_to_dir).replace('\\', '/').strip('/').replace('/', '__')
if rp == '.':
rp = ''
else:
rp += '__'
base, ext = os.path.splitext(image_name)
theme_dir = theme_name
dest_name = image_name
if ext.lower() not in ('.png',):
if image_name == 'metadata.json':
dest = theme_dir, dest_name
safe_link(image_path, os.path.join(tdir, *dest))
file('/'.join(dest))
return
if base.endswith('-for-dark-theme'):
if for_theme == 'any':
theme_dir += '-dark'
elif for_theme == 'light':
return
dest_name = dest_name.replace('-for-dark-theme', '')
elif base.endswith('-for-light-theme'):
if for_theme == 'any':
theme_dir += '-light'
elif for_theme == 'dark':
return
dest_name = dest_name.replace('-for-light-theme', '')
dest = theme_dir, 'images', (rp + dest_name)
safe_link(image_path, os.path.join(tdir, *dest))
file('/'.join(dest))
for dirpath, dirnames, filenames in os.walk(path_to_dir):
if 'textures' in dirnames:
dirnames.remove('textures')
if os.path.basename(tdir) in dirnames:
dirnames.remove(os.path.basename(tdir))
for f in filenames:
handle_image(os.path.join(dirpath, f))
print(' </qresource>', file=qrc)
print('</RCC>', file=qrc)
qrc.close()
# input(tdir)
compile_qrc(output_path, qrc.name)
| 4,868 | Python | .py | 114 | 32.570175 | 123 | 0.563859 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,211 | emf.py | kovidgoyal_calibre/src/calibre/utils/wmf/emf.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import sys
from collections import namedtuple
from struct import unpack_from
from calibre.utils.wmf import create_bmp_from_dib, to_png
from polyglot.builtins import iteritems
# Record types {{{
# See: http://msdn.microsoft.com/en-us/library/cc231166.aspx
RECORD_TYPES = {
'EMR_BITBLT' : 0x4C,
'EMR_STRETCHBLT' : 0x4D,
'EMR_MASKBLT' : 0x4E,
'EMR_PLGBLT' : 0x4F,
'EMR_SETDIBITSTODEVICE' : 0x50,
'EMR_STRETCHDIBITS' : 0x51,
'EMR_ALPHABLEND' : 0x72,
'EMR_TRANSPARENTBLT' : 0x74,
'EOF' : 0xe,
'HEADER' : 0x1,
}
RECORD_RMAP = {v:k for k, v in iteritems(RECORD_TYPES)}
# See http://msdn.microsoft.com/en-us/library/cc230601.aspx
StretchDiBits = namedtuple(
'StretchDiBits', 'left top right bottom x_dest y_dest x_src y_src cx_src'
' cy_src bmp_hdr_offset bmp_header_size bmp_bits_offset'
' bmp_bits_size usage op dest_width dest_height')
# }}}
class EMF:
def __init__(self, raw, verbose=0):
self.pos = 0
self.found_eof = False
self.verbose = verbose
self.func_map = {v:getattr(self, 'handle_%s' % (k.replace('EMR_', '').lower()), self.handle_unknown) for k, v in iteritems(RECORD_TYPES)}
self.bitmaps = []
while self.pos < len(raw) and not self.found_eof:
self.read_record(raw)
self.has_raster_image = bool(self.bitmaps)
def handle_unknown(self, rtype, size, raw):
if self.verbose:
print('Ignoring unknown record:', RECORD_RMAP.get(rtype, hex(rtype).upper()))
def handle_header(self, rtype, size, raw):
pass
def handle_stretchdibits(self, rtype, size, raw):
data = StretchDiBits(*unpack_from(b'<18I', raw, 8))
hdr = raw[data.bmp_hdr_offset:data.bmp_hdr_offset + data.bmp_header_size]
bits = raw[data.bmp_bits_offset:data.bmp_bits_offset + data.bmp_bits_size]
bmp = create_bmp_from_dib(hdr + bits)
self.bitmaps.append(bmp)
def handle_eof(self, rtype, size, raw):
self.found_eof = True
def read_record(self, raw):
rtype, size = unpack_from(b'<II', raw, self.pos)
record = raw[self.pos:self.pos+size]
self.pos += size
self.func_map.get(rtype, self.handle_unknown)(rtype, size, record)
def to_png(self):
bmps = list(sorted(self.bitmaps, key=lambda x: len(x)))
bmp = bmps[-1]
return to_png(bmp)
def emf_unwrap(raw, verbose=0):
'''
Return the largest embedded raster image in the EMF.
The returned data is in PNG format.
'''
w = EMF(raw, verbose=verbose)
if not w.has_raster_image:
raise ValueError('No raster image found in the EMF')
return w.to_png()
if __name__ == '__main__':
with open(sys.argv[-1], 'rb') as f:
raw = f.read()
emf = EMF(raw, verbose=4)
open('/t/test.bmp', 'wb').write(emf.bitmaps[0])
open('/t/test.png', 'wb').write(emf.to_png())
| 3,004 | Python | .py | 76 | 33.802632 | 145 | 0.641801 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,212 | __init__.py | kovidgoyal_calibre/src/calibre/utils/wmf/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import struct
class Unavailable(Exception):
pass
class NoRaster(Exception):
pass
class DIBHeader:
'''
See http://en.wikipedia.org/wiki/BMP_file_format
'''
def __init__(self, raw):
hsize = struct.unpack(b'<I', raw[:4])[0]
if hsize == 40:
parts = struct.unpack(b'<IiiHHIIIIII', raw[:hsize])
for i, attr in enumerate((
'header_size', 'width', 'height', 'color_planes',
'bits_per_pixel', 'compression', 'image_size',
'hres', 'vres', 'ncols', 'nimpcols'
)):
setattr(self, attr, parts[i])
elif hsize == 12:
parts = struct.unpack(b'<IHHHH', raw[:hsize])
for i, attr in enumerate((
'header_size', 'width', 'height', 'color_planes',
'bits_per_pixel')):
setattr(self, attr, parts[i])
else:
raise ValueError('Unsupported DIB header type of size: %d'%hsize)
self.bitmasks_size = 12 if getattr(self, 'compression', 0) == 3 else 0
self.color_table_size = 0
if self.bits_per_pixel != 24:
# See http://support.microsoft.com/kb/q81498/
# for all the gory Micro and soft details
self.color_table_size = getattr(self, 'ncols', 0) * 4
def create_bmp_from_dib(raw):
size = len(raw) + 14
dh = DIBHeader(raw)
pixel_array_offset = dh.header_size + dh.bitmasks_size + \
dh.color_table_size
parts = [b'BM', struct.pack(b'<I', size), b'\0'*4, struct.pack(b'<I',
pixel_array_offset)]
return b''.join(parts) + raw
def to_png(bmp):
from qt.core import QBuffer, QByteArray, QImage, QIODevice
i = QImage()
if not i.loadFromData(bmp):
raise ValueError('Invalid image data')
ba = QByteArray()
buf = QBuffer(ba)
buf.open(QIODevice.OpenModeFlag.WriteOnly)
i.save(buf, 'png')
return ba.data()
| 2,116 | Python | .py | 55 | 29.909091 | 78 | 0.575061 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,213 | parse.py | kovidgoyal_calibre/src/calibre/utils/wmf/parse.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import struct
import sys
from calibre.utils.wmf import create_bmp_from_dib, to_png
class WMFHeader:
'''
For header documentation, see
http://www.skynet.ie/~caolan/publink/libwmf/libwmf/doc/ora-wmf.html
'''
def __init__(self, data, log, verbose):
self.log, self.verbose = log, verbose
offset = 0
file_type, header_size, windows_version = struct.unpack_from('<HHH', data)
offset += 6
if header_size != 9:
raise ValueError('Not a WMF file')
file_size, num_of_objects = struct.unpack_from('<IH', data, offset)
if file_size * 2 != len(data):
# file size is in 2-byte units
raise ValueError('WMF file header specifies incorrect file size')
offset += 6
self.records_start_at = header_size * 2
class WMF:
def __init__(self, log=None, verbose=0):
if log is None:
from calibre.utils.logging import default_log as log
self.log = log
self.verbose = verbose
self.map_mode = None
self.window_origin = None
self.window_extent = None
self.bitmaps = []
self.function_map = { # {{{
30: 'SaveDC',
53: 'RealizePalette',
55: 'SetPalEntries',
79: 'StartPage',
80: 'EndPage',
82: 'AbortDoc',
94: 'EndDoc',
258: 'SetBkMode',
259: 'SetMapMode',
260: 'SetROP2',
261: 'SetRelabs',
262: 'SetPolyFillMode',
263: 'SetStretchBltMode',
264: 'SetTextCharExtra',
295: 'RestoreDC',
298: 'InvertRegion',
299: 'PaintRegion',
300: 'SelectClipRegion',
301: 'SelectObject',
302: 'SetTextAlign',
313: 'ResizePalette',
332: 'ResetDc',
333: 'StartDoc',
496: 'DeleteObject',
513: 'SetBkColor',
521: 'SetTextColor',
522: 'SetTextJustification',
523: 'SetWindowOrg',
524: 'SetWindowExt',
525: 'SetViewportOrg',
526: 'SetViewportExt',
527: 'OffsetWindowOrg',
529: 'OffsetViewportOrg',
531: 'LineTo',
532: 'MoveTo',
544: 'OffsetClipRgn',
552: 'FillRegion',
561: 'SetMapperFlags',
564: 'SelectPalette',
1040: 'ScaleWindowExt',
1042: 'ScaleViewportExt',
1045: 'ExcludeClipRect',
1046: 'IntersectClipRect',
1048: 'Ellipse',
1049: 'FloodFill',
1051: 'Rectangle',
1055: 'SetPixel',
1065: 'FrameRegion',
1352: 'ExtFloodFill',
1564: 'RoundRect',
1565: 'PatBlt',
2071: 'Arc',
2074: 'Pie',
2096: 'Chord',
3379: 'SetDibToDev',
247: 'CreatePalette',
248: 'CreateBrush',
322: 'DibCreatePatternBrush',
505: 'CreatePatternBrush',
762: 'CreatePenIndirect',
763: 'CreateFontIndirect',
764: 'CreateBrushIndirect',
765: 'CreateBitmapIndirect',
804: 'Polygon',
805: 'Polyline',
1078: 'AnimatePalette',
1313: 'TextOut',
1336: 'PolyPolygon',
1574: 'Escape',
1583: 'DrawText',
1790: 'CreateBitmap',
1791: 'CreateRegion',
2338: 'BitBlt',
2368: 'DibBitblt',
2610: 'ExtTextOut',
2851: 'StretchBlt',
2881: 'DibStretchBlt',
3907: 'StretchDIBits'
} # }}}
def __call__(self, stream_or_data):
data = stream_or_data
if hasattr(data, 'read'):
data = data.read()
self.log.filter_level = self.log.DEBUG
self.header = WMFHeader(data, self.log, self.verbose)
offset = self.header.records_start_at
hsize = struct.calcsize('<IH')
self.records = []
while offset < len(data)-6:
size, func = struct.unpack_from('<IH', data, offset)
size *= 2 # Convert to bytes
offset += hsize
params = b''
delta = size - hsize
if delta > 0:
params = data[offset:offset+delta]
offset += delta
func = self.function_map.get(func, func)
if self.verbose > 3:
self.log.debug('WMF Record:', size, func)
self.records.append((func, params))
for rec in self.records:
if not hasattr(rec[0], 'split'):
continue
f = getattr(self, rec[0], None)
if callable(f):
f(rec[1])
elif self.verbose > 2:
self.log.debug('Ignoring record:', rec[0])
self.has_raster_image = len(self.bitmaps) > 0
def SetMapMode(self, params):
if len(params) == 2:
self.map_mode = struct.unpack('<H', params)[0]
else:
self.log.warn('Invalid SetMapMode param')
def SetWindowOrg(self, params):
if len(params) == 4:
self.window_origin = struct.unpack('<HH', params)
elif len(params) == 8:
self.window_origin = struct.unpack('<II', params)
elif len(params) == 16:
self.window_origin = struct.unpack('<LL', params)
else:
self.log.warn('Invalid SetWindowOrg param', repr(params))
def SetWindowExt(self, params):
if len(params) == 4:
self.window_extent = struct.unpack('<HH', params)
elif len(params) == 8:
self.window_extent = struct.unpack('<II', params)
elif len(params) == 16:
self.window_extent = struct.unpack('<LL', params)
else:
self.log.warn('Invalid SetWindowExt param', repr(params))
def DibStretchBlt(self, raw):
offset = 0
fmt = '<IHHHHHHHH'
raster_op, src_height, src_width, y_src, x_src, dest_height, \
dest_width, y_dest, x_dest = struct.unpack_from('<IHHHHHHHH', raw, offset)
offset += struct.calcsize(fmt)
bmp_data = raw[offset:]
bmp = create_bmp_from_dib(bmp_data)
self.bitmaps.append(bmp)
def to_png(self):
bmps = list(sorted(self.bitmaps, key=lambda x: len(x)))
bmp = bmps[-1]
return to_png(bmp)
def wmf_unwrap(wmf_data, verbose=0):
'''
Return the largest embedded raster image in the WMF.
The returned data is in PNG format.
'''
w = WMF(verbose=verbose)
w(wmf_data)
if not w.has_raster_image:
raise ValueError('No raster image found in the WMF')
return w.to_png()
if __name__ == '__main__':
wmf = WMF(verbose=4)
wmf(open(sys.argv[-1], 'rb'))
open('/t/test.bmp', 'wb').write(wmf.bitmaps[0])
open('/t/test.png', 'wb').write(wmf.to_png())
| 7,460 | Python | .py | 197 | 25.639594 | 86 | 0.509888 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,214 | __init__.py | kovidgoyal_calibre/src/calibre/utils/podofo/__init__.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2009, Kovid Goyal <kovid at kovidgoyal.net>
import os
import shutil
import sys
from calibre.constants import preferred_encoding
from calibre.ebooks.metadata import authors_to_string
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.ipc.simple_worker import WorkerError, fork_job
def get_podofo():
from calibre_extensions import podofo
return podofo
def prep(val):
if not val:
return ''
if not isinstance(val, str):
val = val.decode(preferred_encoding, 'replace')
return val.strip()
def set_metadata(stream, mi):
with TemporaryDirectory('_podofo_set_metadata') as tdir:
with open(os.path.join(tdir, 'input.pdf'), 'wb') as f:
shutil.copyfileobj(stream, f)
from calibre.ebooks.metadata.xmp import metadata_to_xmp_packet
xmp_packet = metadata_to_xmp_packet(mi)
try:
result = fork_job('calibre.utils.podofo', 'set_metadata_', (tdir,
mi.title, mi.authors, mi.book_producer, mi.tags, xmp_packet))
touched = result['result']
except WorkerError as e:
raise Exception('Failed to set PDF metadata in (%s): %s'%(mi.title, e.orig_tb))
if touched:
with open(os.path.join(tdir, 'output.pdf'), 'rb') as f:
f.seek(0, 2)
if f.tell() > 100:
f.seek(0)
stream.seek(0)
stream.truncate()
shutil.copyfileobj(f, stream)
stream.flush()
stream.seek(0)
def set_metadata_implementation(pdf_doc, title, authors, bkp, tags, xmp_packet):
title = prep(title)
touched = False
if title and title != pdf_doc.title:
pdf_doc.title = title
touched = True
author = prep(authors_to_string(authors))
if author and author != pdf_doc.author:
pdf_doc.author = author
touched = True
bkp = prep(bkp)
if bkp and bkp != pdf_doc.creator:
pdf_doc.creator = bkp
touched = True
if bkp and bkp != pdf_doc.producer:
pdf_doc.producer = bkp
touched = True
try:
tags = prep(', '.join(x.strip() for x in tags if x.strip()))
if tags != pdf_doc.keywords:
pdf_doc.keywords = tags
touched = True
except Exception:
pass
try:
current_xmp_packet = pdf_doc.get_xmp_metadata()
if current_xmp_packet:
from calibre.ebooks.metadata.xmp import merge_xmp_packet
xmp_packet = merge_xmp_packet(current_xmp_packet, xmp_packet)
pdf_doc.set_xmp_metadata(xmp_packet)
touched = True
except Exception:
pass
return touched
def set_metadata_(tdir, title, authors, bkp, tags, xmp_packet):
podofo = get_podofo()
os.chdir(tdir)
p = podofo.PDFDoc()
p.open('input.pdf')
touched = set_metadata_implementation(p, title, authors, bkp, tags, xmp_packet)
if touched:
p.save('output.pdf')
return touched
def get_xmp_metadata(path):
podofo = get_podofo()
p = podofo.PDFDoc()
with open(path, 'rb') as f:
raw = f.read()
p.load(raw)
return p.get_xmp_metadata()
def get_outline(path=None):
if path is None:
path = sys.argv[-1]
podofo = get_podofo()
p = podofo.PDFDoc()
with open(path, 'rb') as f:
raw = f.read()
p.load(raw)
return p.get_outline()['children']
def get_image_count(path):
podofo = get_podofo()
p = podofo.PDFDoc()
with open(path, 'rb') as f:
raw = f.read()
p.load(raw)
return p.image_count()
def list_fonts(pdf_doc):
fonts = pdf_doc.list_fonts()
ref_map = {f['Reference']: f for f in fonts}
return ref_map
def remove_unused_fonts(pdf_doc):
return pdf_doc.remove_unused_fonts()
def test_remove_unused_fonts(src):
podofo = get_podofo()
p = podofo.PDFDoc()
p.open(src)
remove_unused_fonts(p)
dest = src.rpartition('.')[0] + '-removed.pdf'
p.save(dest)
print('Modified pdf saved to:', dest)
def dedup_type3_fonts(pdf_doc):
return pdf_doc.dedup_type3_fonts()
def test_dedup_type3_fonts(src):
podofo = get_podofo()
p = podofo.PDFDoc()
p.open(src)
num = dedup_type3_fonts(p)
dest = src.rpartition('.')[0] + '-removed.pdf'
p.save(dest)
print(f'Modified pdf with {num} glyphs removed saved to:', dest)
def add_image_page(pdf_doc, image_data, page_size=None, page_num=1, preserve_aspect_ratio=True):
if page_size is None:
from qt.core import QPageSize
p = QPageSize(QPageSize.PageSizeId.A4).rect(QPageSize.Unit.Point)
page_size = p.left(), p.top(), p.width(), p.height()
pdf_doc.add_image_page(
image_data, *page_size, *page_size, page_num, preserve_aspect_ratio)
def test_add_image_page(image='/t/t.jpg', dest='/t/t.pdf', **kw):
image_data = open(image, 'rb').read()
podofo = get_podofo()
p = podofo.PDFDoc()
add_image_page(p, image_data, **kw)
p.save(dest)
def test_list_fonts(src):
podofo = get_podofo()
p = podofo.PDFDoc()
with open(src, 'rb') as f:
raw = f.read()
p.load(raw)
import pprint
pprint.pprint(list_fonts(p))
def test_save_to(src, dest):
podofo = get_podofo()
p = podofo.PDFDoc()
with open(src, 'rb') as f:
raw = f.read()
p.load(raw)
with open(dest, 'wb') as out:
p.save_to_fileobj(out)
print('Wrote PDF of size:', out.tell())
def test_roundtrip(src, dest):
podofo = get_podofo()
p = podofo.PDFDoc()
p.open(src)
p.save(dest)
def test_podofo():
import tempfile
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.xmp import metadata_to_xmp_packet
# {{{
raw = b"%PDF-1.1\n%\xe2\xe3\xcf\xd3\n1 0 obj<</Type/Catalog/Metadata 6 0 R/Pages 2 0 R>>\nendobj\n2 0 obj<</Type/Pages/Count 1/Kids[ 3 0 R]/MediaBox[ 0 0 300 144]>>\nendobj\n3 0 obj<</Type/Page/Contents 4 0 R/Parent 2 0 R/Resources<</Font<</F1<</Type/Font/BaseFont/Times-Roman/Subtype/Type1>>>>>>>>\nendobj\n4 0 obj<</Length 55>>\nstream\n BT\n /F1 18 Tf\n 0 0 Td\n (Hello World) Tj\n ET\nendstream\nendobj\n5 0 obj<</Author(\xfe\xff\x00U\x00n\x00k\x00n\x00o\x00w\x00n)/CreationDate(D:20140919134038+05'00')/Producer(PoDoFo - http://podofo.sf.net)/Title(\xfe\xff\x00n\x00e\x00w\x00t)>>\nendobj\n6 0 obj<</Type/Metadata/Filter/FlateDecode/Length 584/Subtype/XML>>\nstream\nx\x9c\xed\x98\xcd\xb2\x930\x14\xc7\xf7}\n&.\x1d\x1ahoGa\x80\x8e\xb6\xe3x\x17ua\xaf\xe3\xd2\t\xc9i\x1b\x0b\x81&a\xc0\xfbj.|$_\xc1\xd0r\xe9\xb7V\x9d\xbb\x83\x15\x9c\x9c\xff\xff\x97\x8fs\xb2 \x18W9\xa1k\xd0V\x0cK.B\xf4\xf3\xfb\x0fdq\x16\xa2\xcf\xa3\x993\xcb'\xb0\xe2\xef\x1f%\xcc\x1f?<\xd0\xc75\xf5\x18\x1aG\xbd\xa0\xf2\xab4OA\x13\xabJ\x13\xa1\xfc*D\x84e1\xf8\xe6\xbd\x0ec\x14\xf5,+\x90l\xe1\x7f\x9c\xbek\x92\xccW\x88VZ\xe7>\xc6eY\xf6\xcba?\x93K\xecz\x9e\x87\x9d\x01\x1e\x0cl\x93a\xaboB\x93\xca\x16\xea\xc5\xd6\xa3q\x99\x82\xa2\x92\xe7\x9ag\xa2qc\xb45\xcb\x0b\x99l\xad\x18\xc5\x90@\nB+\xec\xf6]\x8c\xacZK\xe2\xac\xd0!j\xec\x8c!\xa3>\xdb\xfb=\x85\x1b\xd2\x9bD\xef#M,\xe15\xd4O\x88X\x86\xa8\xb2\x19,H\x91h\x14\x05x7z`\x81O<\x02|\x99VOBs\x9d\xc0\x7f\xe0\x05\x94\xfa\xd6)\x1c\xb1jx^\xc4\tW+\x90'\x13xK\x96\xf8Hy\x96X\xabU\x11\x7f\x05\xaa\xff\xa4=I\xab\x95T\x02\xd1\xd9)u\x0e\x9b\x0b\xcb\x8e>\x89\xb5\xc8Jqm\x91\x07\xaa-\xee\xc8{\x972=\xdd\xfa+\xe5d\xea\xb9\xad'\xa1\xfa\xdbj\xee\xd3,\xc5\x15\xc9M-9\xa6\x96\xdaD\xce6Wr\xd3\x1c\xdf3S~|\xc1A\xe2MA\x92F{\xb1\x0eM\xba?3\xdd\xc2\x88&S\xa2!\x1a8\xee\x9d\xedx\xb6\xeb=\xb8C\xff\xce\xf1\x87\xaf\xfb\xde\xe0\xd5\xc8\xf3^:#\x7f\xe8\x04\xf8L\xf2\x0fK\xcd%W\xe9\xbey\xea/\xa5\x89`D\xb2m\x17\t\x92\x822\xb7\x02(\x1c\x13\xc5)\x1e\x9c-\x01\xff\x1e\xc0\x16\xd5\xe5\r\xaaG\xcc\x8e\x0c\xff\xca\x8e\x92\x84\xc7\x12&\x93\xd6\xb3\x89\xd8\x10g\xd9\xfai\xe7\xedv\xde6-\x94\xceR\x9bfI\x91\n\x85\x8e}nu9\x91\xcd\xefo\xc6+\x90\x1c\x94\xcd\x05\x83\xea\xca\xd17\x16\xbb\xb6\xfc\xa22\xa9\x9bn\xbe0p\xfd\x88wAs\xc3\x9a+\x19\xb7w\xf2a#=\xdf\xd3A:H\x07\xe9 \x1d\xa4\x83t\x90\x0e\xd2A:H\x07yNH/h\x7f\xd6\x80`!*\xd18\xfa\x05\x94\x80P\xb0\nendstream\nendobj\nxref\n0 7\n0000000000 65535 f \n0000000015 00000 n \n0000000074 00000 n \n0000000148 00000 n \n0000000280 00000 n \n0000000382 00000 n \n0000000522 00000 n \ntrailer\n<</ID[<4D028D512DEBEFD964756764AD8FF726><4D028D512DEBEFD964756764AD8FF726>]/Info 5 0 R/Root 1 0 R/Size 7>>\nstartxref\n1199\n%%EOF\n" # noqa
# }}}
mi = Metadata('title1', ['xmp_author'])
podofo = get_podofo()
p = podofo.PDFDoc()
p.load(raw)
p.title = 'info title'
p.author = 'info author'
p.keywords = 'a, b'
if p.version != '1.1':
raise ValueError('Incorrect PDF version')
xmp_packet = metadata_to_xmp_packet(mi)
# print(p.get_xmp_metadata().decode())
p.set_xmp_metadata(xmp_packet)
# print(p.get_xmp_metadata().decode())
with tempfile.NamedTemporaryFile(suffix='.pdf', delete=False) as f:
p.save_to_fileobj(f)
f.seek(0)
fraw = f.read()
wraw = p.write()
if fraw != wraw:
raise ValueError("write() and save_to_fileobj() resulted in different output")
try:
p = podofo.PDFDoc()
p.open(f.name)
if (p.title, p.author, p.keywords) != ('info title', 'info author', 'a, b'):
raise ValueError('podofo failed to set title and author in Info dict {} != {}'.format(
(p.title, p.author, p.keywords), ('info title', 'info author', 'a, b')))
xmp = p.get_xmp_metadata().decode()
if 'xmp_author' not in xmp:
raise ValueError('Failed to set XML block, received:\n' + xmp)
del p
finally:
os.remove(f.name)
a = podofo.PDFDoc()
a.load(raw)
b = podofo.PDFDoc()
b.load(raw)
a.append(b)
if a.page_count() != 2 * b.page_count():
raise ValueError('Appending failed')
def develop(path=sys.argv[-1]):
podofo = get_podofo()
p = podofo.PDFDoc()
p.open(path)
p.title = 'test'
if __name__ == '__main__':
develop()
| 10,108 | Python | .py | 218 | 39.706422 | 2,663 | 0.647214 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,215 | linux.py | kovidgoyal_calibre/src/calibre/utils/open_with/linux.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import re
import shlex
from collections import defaultdict
from calibre import force_unicode, guess_type, prints, walk
from calibre.constants import cache_dir, filesystem_encoding
from calibre.utils.icu import numeric_sort_key as sort_key
from calibre.utils.localization import canonicalize_lang, get_lang
from calibre.utils.serialize import msgpack_dumps, msgpack_loads
from polyglot.builtins import iteritems, itervalues, string_or_bytes
def parse_localized_key(key):
name, rest = key.partition('[')[0::2]
if not rest:
return name, None
return name, rest[:-1]
def unquote_exec(val):
val = val.replace(r'\\', '\\')
return shlex.split(val)
def known_localized_items():
return {'Name': {}, 'GenericName': {}, 'Comment': {}, 'Icon': {}}
def parse_desktop_file(path):
gpat = re.compile(r'^\[(.+?)\]\s*$')
kpat = re.compile(r'^([-a-zA-Z0-9\[\]@_.]+)\s*=\s*(.+)$')
try:
with open(path, 'rb') as f:
raw = f.read().decode('utf-8')
except (OSError, UnicodeDecodeError):
return
group = None
ans = {}
ans['desktop_file_path'] = path
localized_items = known_localized_items()
for line in raw.splitlines():
m = gpat.match(line)
if m is not None:
if group == 'Desktop Entry':
break
group = m.group(1)
continue
if group == 'Desktop Entry':
m = kpat.match(line)
if m is not None:
k, v = m.group(1), m.group(2)
if k == 'Hidden' and v == 'true':
return
if k == 'Type' and v != 'Application':
return
if k == 'Exec':
cmdline = unquote_exec(v)
if cmdline and (not os.path.isabs(cmdline[0]) or os.access(cmdline[0], os.X_OK)):
ans[k] = cmdline
elif k == 'MimeType':
ans[k] = frozenset(x.strip() for x in v.split(';'))
elif k in localized_items or '[' in k:
name, lang = parse_localized_key(k)
vals = localized_items.setdefault(name, {})
vals[lang] = v
if name in ans:
vals[None] = ans.pop(name)
else:
ans[k] = v
for k, vals in localized_items.items():
if vals:
ans[k] = dict(vals)
if 'Exec' in ans and 'MimeType' in ans and 'Name' in ans:
return ans
icon_data = None
def find_icons():
global icon_data
if icon_data is not None:
return icon_data
base_dirs = [(os.environ.get('XDG_DATA_HOME') or os.path.expanduser('~/.local/share')) + '/icons']
base_dirs += [os.path.expanduser('~/.icons')]
base_dirs += [
os.path.join(b, 'icons') for b in os.environ.get(
'XDG_DATA_DIRS', '/usr/local/share:/usr/share').split(os.pathsep)] + [
'/usr/share/pixmaps']
ans = defaultdict(list)
sz_pat = re.compile(r'/((?:\d+x\d+)|scalable)/')
cache_file = os.path.join(cache_dir(), 'icon-theme-cache.calibre_msgpack')
exts = {'.svg', '.png', '.xpm'}
def read_icon_theme_dir(dirpath):
ans = defaultdict(list)
for path in walk(dirpath):
bn = os.path.basename(path)
name, ext = os.path.splitext(bn)
if ext in exts:
sz = sz_pat.findall(path)
if sz:
sz = sz[-1]
if sz == 'scalable':
sz = 100000
else:
sz = int(sz.partition('x')[0])
idx = len(ans[name])
ans[name].append((-sz, idx, sz, path))
for icons in itervalues(ans):
icons.sort(key=list)
return {k:(-v[0][2], v[0][3]) for k, v in iteritems(ans)}
try:
with open(cache_file, 'rb') as f:
cache = f.read()
cache = msgpack_loads(cache)
mtimes, cache = defaultdict(int, cache['mtimes']), defaultdict(dict, cache['data'])
except Exception:
mtimes, cache = defaultdict(int), defaultdict(dict)
seen_dirs = set()
changed = False
for loc in base_dirs:
try:
subdirs = os.listdir(loc)
except OSError:
continue
for dname in subdirs:
d = os.path.join(loc, dname)
if os.path.isdir(d):
try:
mtime = os.stat(d).st_mtime
except OSError:
continue
seen_dirs.add(d)
if mtime != mtimes[d]:
changed = True
try:
cache[d] = read_icon_theme_dir(d)
except Exception:
prints('Failed to read icon theme dir: %r with error:' % d)
import traceback
traceback.print_exc()
mtimes[d] = mtime
for name, data in iteritems(cache[d]):
ans[name].append(data)
for removed in set(mtimes) - seen_dirs:
mtimes.pop(removed), cache.pop(removed)
changed = True
if changed:
data = msgpack_dumps({'data':cache, 'mtimes':mtimes})
try:
with open(cache_file, 'wb') as f:
f.write(data)
except Exception:
import traceback
traceback.print_exc()
for icons in itervalues(ans):
icons.sort(key=list)
icon_data = {k:v[0][1] for k, v in iteritems(ans)}
return icon_data
def localize_string(data):
lang = canonicalize_lang(get_lang())
def key_matches(key):
if key is None:
return False
base = re.split(r'[_.@]', key)[0]
return canonicalize_lang(base) == lang
matches = tuple(filter(key_matches, data))
if matches:
return data[matches[0]]
return data.get(None) or ''
def process_desktop_file(data):
icon = data.get('Icon', {}).get(None)
if icon and not os.path.isabs(icon):
icon = find_icons().get(icon)
if icon:
data['Icon'] = icon
else:
data.pop('Icon')
if not isinstance(data.get('Icon'), string_or_bytes):
data.pop('Icon', None)
for k in ('Name', 'GenericName', 'Comment'):
val = data.get(k)
if val:
data[k] = localize_string(val)
return data
def find_programs(extensions):
extensions = {ext.lower() for ext in extensions}
data_dirs = [os.environ.get('XDG_DATA_HOME') or os.path.expanduser('~/.local/share')]
data_dirs += (os.environ.get('XDG_DATA_DIRS') or '/usr/local/share/:/usr/share/').split(os.pathsep)
data_dirs = [force_unicode(x, filesystem_encoding).rstrip(os.sep) for x in data_dirs]
data_dirs = [x for x in data_dirs if x and os.path.isdir(x)]
desktop_files = {}
mime_types = {guess_type('file.' + ext)[0] for ext in extensions}
ans = []
for base in data_dirs:
for f in walk(os.path.join(base, 'applications')):
if f.endswith('.desktop'):
bn = os.path.basename(f)
if f not in desktop_files:
desktop_files[bn] = f
for bn, path in iteritems(desktop_files):
try:
data = parse_desktop_file(path)
except Exception:
import traceback
traceback.print_exc()
continue
if data is not None and mime_types.intersection(data['MimeType']):
ans.append(process_desktop_file(data))
ans.sort(key=lambda d:sort_key(d.get('Name')))
return ans
def entry_sort_key(entry):
return sort_key(entry['Name'])
def entry_to_cmdline(entry, path):
path = os.path.abspath(path)
rmap = {
'f':path, 'F':path, 'u':'file://'+path, 'U':'file://'+path, '%':'%',
'c':entry.get('Name', ''), 'k':entry.get('desktop_file_path', ''),
}
def replace(match):
char = match.group()[-1]
repl = rmap.get(char)
return match.group() if repl is None else repl
sub = re.compile(r'%[fFuUdDnNickvm%]').sub
cmd = entry['Exec']
try:
idx = cmd.index('%i')
except ValueError:
pass
else:
icon = entry.get('Icon')
repl = ['--icon', icon] if icon else []
cmd[idx:idx+1] = repl
return cmd[:1] + [sub(replace, x) for x in cmd[1:]]
| 8,579 | Python | .py | 225 | 28.066667 | 103 | 0.54057 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,216 | windows.py | kovidgoyal_calibre/src/calibre/utils/open_with/windows.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import os
import re
import sys
from qt.core import QBuffer, QByteArray, QIODevice, QPixmap, Qt
from calibre.gui2 import must_use_qt
from calibre.utils.winreg.default_programs import split_commandline
from calibre_extensions import progress_indicator, winutil
ICON_SIZE = 256
def hicon_to_pixmap(hicon):
return QPixmap.fromImage(progress_indicator.image_from_hicon(int(hicon)))
def pixmap_to_data(pixmap):
ba = QByteArray()
buf = QBuffer(ba)
buf.open(QIODevice.OpenModeFlag.WriteOnly)
pixmap.save(buf, 'PNG')
return bytes(bytearray(ba.data()))
def load_icon_resource_as_pixmap(icon_resource, size=ICON_SIZE):
if not icon_resource:
return
parts = tuple(filter(None, re.split(r',([-0-9]+$)', icon_resource)))
if len(parts) != 2:
return
module, index = parts
index = int(index)
if module.startswith('"') and module.endswith('"'):
module = split_commandline(module)[0]
hmodule = winutil.load_library(module, winutil.LOAD_LIBRARY_AS_DATAFILE | winutil.LOAD_LIBRARY_AS_IMAGE_RESOURCE)
icons = winutil.load_icons(hmodule, index)
pixmaps = []
must_use_qt()
for icon_data, icon_handle in icons:
pixmap = QPixmap()
pixmap.loadFromData(icon_data)
if pixmap.isNull() and bool(icon_handle):
pixmap = hicon_to_pixmap(icon_handle)
if pixmap.isNull():
continue
pixmaps.append(pixmap)
if not pixmaps:
return
def area(p):
return p.width() * p.height()
pixmaps.sort(key=area)
q = size * size
for pmap in pixmaps:
if area(pmap) >= q:
if area(pmap) == q:
return pmap
return pmap.scaled(
int(size), int(size), aspectRatioMode=Qt.AspectRatioMode.KeepAspectRatio, transformMode=Qt.TransformationMode.SmoothTransformation)
return pixmaps[-1].scaled(
int(size), int(size), aspectRatioMode=Qt.AspectRatioMode.KeepAspectRatio, transformMode=Qt.TransformationMode.SmoothTransformation)
def load_icon_resource(icon_resource, as_data=False, size=ICON_SIZE):
ans = load_icon_resource_as_pixmap(icon_resource, size=size)
if ans is not None:
if as_data:
ans = pixmap_to_data(ans)
return ans
def load_icon_for_file(path: str, as_data=False, size=ICON_SIZE):
path = os.path.abspath(path)
try:
hicon = winutil.get_icon_for_file(path)
except Exception:
return
must_use_qt()
pmap = hicon_to_pixmap(hicon)
if not pmap.isNull():
if pmap.width() != size:
pmap = pmap.scaled(
int(size), int(size), aspectRatioMode=Qt.AspectRatioMode.KeepAspectRatio, transformMode=Qt.TransformationMode.SmoothTransformation)
return pixmap_to_data(pmap) if as_data else pmap
def load_icon_for_cmdline(cmdline: str, as_data=False, size=ICON_SIZE):
return load_icon_for_file(split_commandline(cmdline)[0], as_data=as_data, size=size)
def display_image(png_data):
from base64 import standard_b64encode
def serialize_gr_command(cmd, payload=None):
cmd = ','.join(f'{k}={v}' for k, v in cmd.items())
ans = []
w = ans.append
w(b'\033_G'), w(cmd.encode('ascii'))
if payload:
w(b';')
w(payload)
w(b'\033\\')
return b''.join(ans)
def write_chunked(cmd, data):
data = standard_b64encode(data)
while data:
chunk, data = data[:4096], data[4096:]
m = 1 if data else 0
cmd['m'] = m
sys.stdout.buffer.write(serialize_gr_command(cmd, chunk))
sys.stdout.buffer.flush()
cmd.clear()
sys.stdout.flush()
write_chunked({'a': 'T', 'f': 100}, png_data)
def test():
png_data = load_icon_resource(sys.argv[-1], as_data=True)
display_image(png_data)
def test_shell():
png_data = load_icon_for_file(sys.argv[-1], as_data=True)
display_image(png_data)
| 4,089 | Python | .py | 105 | 32.028571 | 147 | 0.651833 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,217 | osx.py | kovidgoyal_calibre/src/calibre/utils/open_with/osx.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import mimetypes
import os
import re
import subprocess
from collections import defaultdict
from plistlib import loads
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.icu import numeric_sort_key
from polyglot.builtins import iteritems, string_or_bytes
application_locations = ('/Applications', '~/Applications', '~/Desktop')
# Public UTI MAP {{{
def generate_public_uti_map():
from html5_parser import parse
from lxml import etree
from polyglot.urllib import urlopen
raw = urlopen(
'https://developer.apple.com/library/ios/documentation/Miscellaneous/Reference/UTIRef/Articles/System-DeclaredUniformTypeIdentifiers.html').read()
root = parse(raw)
tables = root.xpath('//table')[0::2]
data = {}
for table in tables:
for tr in table.xpath('descendant::tr')[1:]:
td = tr.xpath('descendant::td')
identifier = etree.tostring(td[0], method='text', encoding='unicode').strip()
tags = etree.tostring(td[2], method='text', encoding='unicode').strip()
identifier = identifier.split()[0].replace('\u200b', '')
exts = [x.strip()[1:].lower() for x in tags.split(',') if x.strip().startswith('.')]
for ext in exts:
data[ext] = identifier
lines = ['PUBLIC_UTI_MAP = {']
for ext in sorted(data):
r = ("'" + ext + "':").ljust(16)
lines.append((' ' * 4) + r + "'" + data[ext] + "',")
lines.append('}')
with open(__file__, 'r+b') as f:
raw = f.read()
f.seek(0)
nraw = re.sub(r'^PUBLIC_UTI_MAP = .+?}', '\n'.join(lines), raw, flags=re.MULTILINE | re.DOTALL)
f.truncate(), f.write(nraw)
# Generated by generate_public_uti_map()
PUBLIC_UTI_MAP = {
'3g2': 'public.3gpp2',
'3gp': 'public.3gpp',
'3gp2': 'public.3gpp2',
'3gpp': 'public.3gpp',
'ai': 'com.adobe.illustrator.ai-image',
'aif': 'public.aiff-audio',
'aifc': 'public.aifc-audio',
'aiff': 'public.aiff-audio',
'app': 'com.apple.application-bundle',
'applescript': 'com.apple.applescript.text',
'asf': 'com.microsoft.advanced-systems-format',
'asx': 'com.microsoft.advanced-stream-redirector',
'au': 'public.ulaw-audio',
'avi': 'public.avi',
'bin': 'com.apple.macbinary-archive',
'bmp': 'com.microsoft.bmp',
'bundle': 'com.apple.bundle',
'c': 'public.c-source',
'c++': 'public.c-plus-plus-source',
'caf': 'com.apple.coreaudio-format',
'cc': 'public.c-plus-plus-source',
'class': 'com.sun.java-class',
'command': 'public.shell-script',
'cp': 'public.c-plus-plus-source',
'cpio': 'public.cpio-archive',
'cpp': 'public.c-plus-plus-source',
'csh': 'public.csh-script',
'cxx': 'public.c-plus-plus-source',
'defs': 'public.mig-source',
'dfont': 'com.apple.truetype-datafork-suitcase-font',
'dll': 'com.microsoft.windows-dynamic-link-library',
'doc': 'com.microsoft.word.doc',
'efx': 'com.js.efx-fax',
'eps': 'com.adobe.encapsulated-postscript',
'exe': 'com.microsoft.windows-executable',
'exp': 'com.apple.symbol-export',
'exr': 'com.ilm.openexr-image',
'fpx': 'com.kodak.flashpix.image',
'framework': 'com.apple.framework',
'gif': 'com.compuserve.gif',
'gtar': 'org.gnu.gnu-tar-archive',
'gz': 'org.gnu.gnu-zip-archive',
'gzip': 'org.gnu.gnu-zip-archive',
'h': 'public.c-header',
'h++': 'public.c-plus-plus-header',
'hpp': 'public.c-plus-plus-header',
'hqx': 'com.apple.binhex-archive',
'htm': 'public.html',
'html': 'public.html',
'hxx': 'public.c-plus-plus-header',
'icc': 'com.apple.colorsync-profile',
'icm': 'com.apple.colorsync-profile',
'icns': 'com.apple.icns',
'ico': 'com.microsoft.ico',
'jar': 'com.sun.java-archive',
'jav': 'com.sun.java-source',
'java': 'com.sun.java-source',
'javascript': 'com.netscape.javascript-source',
'jfx': 'com.j2.jfx-fax',
'jnlp': 'com.sun.java-web-start',
'jp2': 'public.jpeg-2000',
'jpeg': 'public.jpeg',
'jpg': 'public.jpeg',
'js': 'com.netscape.javascript-source',
'jscript': 'com.netscape.javascript-source',
'key': 'com.apple.keynote.key',
'kth': 'com.apple.keynote.kth',
'm': 'public.objective-c-source',
'm15': 'public.mpeg',
'm4a': 'public.mpeg-4-audio',
'm4b': 'com.apple.protected-mpeg-4-audio',
'm4p': 'com.apple.protected-mpeg-4-audio',
'm75': 'public.mpeg',
'mdimporter': 'com.apple.metadata-importer',
'mig': 'public.mig-source',
'mm': 'public.objective-c-plus-plus-source',
'mov': 'com.apple.quicktime-movie',
'mp3': 'public.mp3',
'mp4': 'public.mpeg-4',
'mpeg': 'public.mpeg',
'mpg': 'public.mpeg',
'o': 'public.object-code',
'otf': 'public.opentype-font',
'pct': 'com.apple.pict',
'pdf': 'com.adobe.pdf',
'pf': 'com.apple.colorsync-profile',
'pfa': 'com.adobe.postscript.pfa-font',
'pfb': 'com.adobe.postscript-pfb-font',
'ph3': 'public.php-script',
'ph4': 'public.php-script',
'php': 'public.php-script',
'php3': 'public.php-script',
'php4': 'public.php-script',
'phtml': 'public.php-script',
'pic': 'com.apple.pict',
'pict': 'com.apple.pict',
'pl': 'public.perl-script',
'plugin': 'com.apple.plugin',
'pm': 'public.perl-script',
'png': 'public.png',
'pntg': 'com.apple.macpaint-image',
'ppt': 'com.microsoft.powerpoint.ppt',
'ps': 'com.adobe.postscript',
'psd': 'com.adobe.photoshop-image',
'py': 'public.python-script',
'qif': 'com.apple.quicktime-image',
'qt': 'com.apple.quicktime-movie',
'qtif': 'com.apple.quicktime-image',
'qtz': 'com.apple.quartz-composer-composition',
'r': 'com.apple.rez-source',
'ra': 'com.real.realaudio',
'ram': 'com.real.realaudio',
'rb': 'public.ruby-script',
'rbw': 'public.ruby-script',
'rm': 'com.real.realmedia',
'rtf': 'public.rtf',
'rtfd': 'com.apple.rtfd',
's': 'public.assembly-source',
'scpt': 'com.apple.applescript.script',
'sd2': 'com.digidesign.sd2-audio',
'sgi': 'com.sgi.sgi-image',
'sh': 'public.shell-script',
'sit': 'com.allume.stuffit-archive',
'sitx': 'com.allume.stuffit-archive',
'smil': 'com.real.smil',
'snd': 'public.ulaw-audio',
'suit': 'com.apple.font-suitcase',
'tar': 'public.tar-archive',
'tga': 'com.truevision.tga-image',
'tgz': 'org.gnu.gnu-zip-tar-archive',
'tif': 'public.tiff',
'tiff': 'public.tiff',
'ttc': 'public.truetype-collection-font',
'ttf': 'public.truetype-ttf-font',
'txt': 'public.plain-text',
'ulw': 'public.ulaw-audio',
'vcard': 'public.vcard',
'vcf': 'public.vcard',
'vfw': 'public.avi',
'wav': 'com.microsoft.waveform-audio',
'wave': 'com.microsoft.waveform-audio',
'wax': 'com.microsoft.windows-media-wax',
'wdgt': 'com.apple.dashboard-widget',
'wm': 'com.microsoft.windows-media-wm',
'wma': 'com.microsoft.windows-media-wma',
'wmp': 'com.microsoft.windows-media-wmp',
'wmv': 'com.microsoft.windows-media-wmv',
'wmx': 'com.microsoft.windows-media-wmx',
'wvx': 'com.microsoft.windows-media-wvx',
'xbm': 'public.xbitmap-image',
'xls': 'com.microsoft.excel.xls',
'xml': 'public.xml',
'zip': 'com.pkware.zip-archive',
}
PUBLIC_UTI_RMAP = defaultdict(set)
for ext, uti in iteritems(PUBLIC_UTI_MAP):
PUBLIC_UTI_RMAP[uti].add(ext)
PUBLIC_UTI_RMAP = dict(PUBLIC_UTI_RMAP)
# }}}
def find_applications_in(base):
try:
entries = os.listdir(base)
except OSError:
return
for name in entries:
path = os.path.join(base, name)
if os.path.isdir(path):
if name.lower().endswith('.app'):
yield path
else:
yield from find_applications_in(path)
def find_applications():
for base in application_locations:
base = os.path.expanduser(base)
yield from find_applications_in(base)
def get_extensions_from_utis(utis, plist):
declared_utis = defaultdict(set)
for key in ('UTExportedTypeDeclarations', 'UTImportedTypeDeclarations'):
for decl in plist.get(key, ()):
if isinstance(decl, dict):
uti = decl.get('UTTypeIdentifier')
if isinstance(uti, string_or_bytes):
spec = decl.get('UTTypeTagSpecification')
if isinstance(spec, dict):
ext = spec.get('public.filename-extension')
if ext:
declared_utis[uti] |= set(ext)
types = spec.get('public.mime-type')
if types:
for mt in types:
for ext in mimetypes.guess_all_extensions(mt, strict=False):
declared_utis[uti].add(ext.lower()[1:])
ans = set()
for uti in utis:
ans |= declared_utis[uti]
ans |= PUBLIC_UTI_RMAP.get(uti, set())
return ans
def get_bundle_data(path):
path = os.path.abspath(path)
info = os.path.join(path, 'Contents', 'Info.plist')
ans = {
'name': os.path.splitext(os.path.basename(path))[0],
'path': path,
}
try:
with open(info, 'rb') as f:
plist = loads(f.read())
except Exception:
import traceback
traceback.print_exc()
return None
ans['name'] = plist.get('CFBundleDisplayName') or plist.get('CFBundleName') or ans['name']
icfile = plist.get('CFBundleIconFile')
if icfile:
icfile = os.path.join(path, 'Contents', 'Resources', icfile)
if not os.path.exists(icfile):
icfile += '.icns'
if os.path.exists(icfile):
ans['icon_file'] = icfile
bid = plist.get('CFBundleIdentifier')
if bid:
ans['identifier'] = bid
ans['extensions'] = extensions = set()
for dtype in plist.get('CFBundleDocumentTypes', ()):
utis = frozenset(dtype.get('LSItemContentTypes', ()))
if utis:
extensions |= get_extensions_from_utis(utis, plist)
else:
for ext in dtype.get('CFBundleTypeExtensions', ()):
if isinstance(ext, string_or_bytes):
extensions.add(ext.lower())
for mt in dtype.get('CFBundleTypeMIMETypes', ()):
if isinstance(mt, string_or_bytes):
for ext in mimetypes.guess_all_extensions(mt, strict=False):
extensions.add(ext.lower())
return ans
def find_programs(extensions):
extensions = frozenset(extensions)
ans = []
for app in find_applications():
try:
app = get_bundle_data(app)
except Exception:
import traceback
traceback.print_exc()
continue
if app and app['extensions'].intersection(extensions):
ans.append(app)
return ans
def get_icon(path, pixmap_to_data=None, as_data=False, size=64):
if not path:
return
with TemporaryDirectory() as tdir:
iconset = os.path.join(tdir, 'output.iconset')
try:
subprocess.check_call(['iconutil', '-c', 'iconset', '-o', 'output.iconset', path], cwd=tdir)
except subprocess.CalledProcessError:
return
try:
names = os.listdir(iconset)
except OSError:
return
if not names:
return
from qt.core import QImage, Qt
names.sort(key=numeric_sort_key)
for name in names:
m = re.search(r'(\d+)x\d+', name)
if m is not None and int(m.group(1)) >= size:
ans = QImage(os.path.join(iconset, name))
if not ans.isNull():
break
else:
return
ans = ans.scaled(size, size, transformMode=Qt.TransformationMode.SmoothTransformation)
if as_data:
ans = pixmap_to_data(ans)
return ans
def entry_to_cmdline(entry, path):
app = entry['path']
if os.path.isdir(app):
return ['open', '-a', app, path]
if 'identifier' in entry:
return ['open', '-b', entry['identifier'], path]
return [app, path]
| 13,678 | Python | .py | 329 | 34.392097 | 154 | 0.539368 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,218 | hyphenate.py | kovidgoyal_calibre/src/calibre/utils/hyphenation/hyphenate.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
import os
import regex
from calibre.utils.hyphenation.dictionaries import dictionary_name_for_locale, path_to_dictionary
from polyglot.functools import lru_cache
REGEX_FLAGS = regex.VERSION1 | regex.WORD | regex.FULLCASE | regex.UNICODE
@lru_cache()
def dictionary_for_locale(locale):
name = dictionary_name_for_locale(locale)
if name is not None:
from calibre_extensions import hyphen
path = path_to_dictionary(name)
fd = os.open(path, getattr(os, 'O_BINARY', 0) | os.O_RDONLY)
return hyphen.load_dictionary(fd)
def add_soft_hyphens(word, dictionary, hyphen_char='\u00ad'):
word = str(word)
if len(word) > 99 or '=' in word:
return word
q = word
q = q.replace(hyphen_char, '')
if len(q) < 4:
return word
lq = q.lower() # the hyphen library needs lowercase words to work
from calibre_extensions import hyphen
try:
ans = hyphen.simple_hyphenate(dictionary, lq)
except ValueError:
# Can happen if the word requires non-standard hyphenation (i.e.
# replacements)
return word
parts = ans.split('=')
if len(parts) == 1:
return word
if lq != q:
aparts = []
pos = 0
for p in parts:
lp = len(p)
aparts.append(q[pos:pos+lp])
pos += lp
parts = aparts
return hyphen_char.join(parts)
tags_not_to_hyphenate = frozenset((
'video', 'audio', 'script', 'code', 'pre', 'img', 'br', 'samp', 'kbd',
'var', 'abbr', 'acronym', 'sub', 'sup', 'button', 'option', 'label',
'textarea', 'input', 'math', 'svg', 'style', 'title', 'head'
))
def barename(x):
return x.split('}', 1)[-1]
def words_pat():
ans = getattr(words_pat, 'ans', None)
if ans is None:
ans = words_pat.ans = regex.compile(r'\w+', REGEX_FLAGS)
return ans
def add_soft_hyphens_to_words(words, dictionary, hyphen_char='\u00ad'):
pos = 0
parts = []
for m in words_pat().finditer(words):
word = m.group()
if m.start() > pos:
parts.append(words[pos:m.start()])
parts.append(add_soft_hyphens(word, dictionary, hyphen_char))
pos = m.end()
if pos < len(words):
parts.append(words[pos:])
return ''.join(parts)
def add_to_tag(stack, elem, locale, hyphen_char):
name = barename(elem.tag)
if name in tags_not_to_hyphenate:
return
tl = elem.get('lang') or elem.get('{http://www.w3.org/XML/1998/namespace}lang') or locale
dictionary = dictionary_for_locale(tl)
if dictionary is not None and elem.text and not elem.text.isspace():
elem.text = add_soft_hyphens_to_words(elem.text, dictionary, hyphen_char)
for child in elem:
if dictionary is not None and child.tail and not child.tail.isspace():
child.tail = add_soft_hyphens_to_words(child.tail, dictionary, hyphen_char)
if not callable(getattr(child, 'tag', None)):
stack.append((child, tl))
def add_soft_hyphens_to_html(root, locale='en', hyphen_char='\u00ad'):
stack = [(root, locale)]
while stack:
elem, locale = stack.pop()
add_to_tag(stack, elem, locale, hyphen_char)
def remove_soft_hyphens_from_html(root, hyphen_char='\u00ad'):
for elem in root.iterdescendants():
if elem.tail:
elem.tail = elem.tail.replace(hyphen_char, '')
text = getattr(elem, 'text', None)
if text:
elem.text = elem.text.replace(hyphen_char, '')
| 3,599 | Python | .py | 92 | 32.652174 | 97 | 0.632032 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,219 | test_hyphenation.py | kovidgoyal_calibre/src/calibre/utils/hyphenation/test_hyphenation.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
import os
import shutil
import unittest
from lxml import etree
from calibre.ebooks.oeb.polish.parsing import parse_html5
from calibre.ptempfile import PersistentTemporaryDirectory
from calibre.utils.hyphenation.dictionaries import dictionary_name_for_locale, get_cache_path, is_cache_up_to_date, path_to_dictionary
from calibre.utils.hyphenation.hyphenate import add_soft_hyphens, add_soft_hyphens_to_html, add_soft_hyphens_to_words, dictionary_for_locale
class TestHyphenation(unittest.TestCase):
ae = unittest.TestCase.assertEqual
def setUp(self):
tdir = PersistentTemporaryDirectory()
path_to_dictionary.cache_dir = tdir
dictionary_name_for_locale.cache_clear()
dictionary_for_locale.cache_clear()
get_cache_path.cache_clear()
is_cache_up_to_date.updated = False
def tearDown(self):
dictionary_name_for_locale.cache_clear()
dictionary_for_locale.cache_clear()
get_cache_path.cache_clear()
is_cache_up_to_date.updated = False
try:
shutil.rmtree(path_to_dictionary.cache_dir)
except OSError:
pass
path_to_dictionary.cache_dir = None
def test_locale_to_hyphen_dictionary(self):
def t(x, expected=None):
self.ae(
dictionary_name_for_locale(x),
f'hyph_{expected}.dic' if expected else None
)
t('en', 'en_US')
t('en_IN', 'en_GB')
t('de', 'de_DE')
t('es', 'es')
t('nl', 'nl_NL')
t('fr', 'fr')
t('XXX')
cache = [False]
def cache_callback():
cache[0] = True
dp = path_to_dictionary(dictionary_name_for_locale('en'), cache_callback)
self.assertTrue(
os.path.exists(dp), f'The dictionary {dp} does not exist'
)
self.assertTrue(cache[0])
cache[0] = False
self.assertTrue(
os.path.exists(path_to_dictionary(dictionary_name_for_locale('es'), cache_callback))
)
self.assertFalse(cache[0])
def test_add_soft_hyphens(self):
def t(word, expected):
self.ae(add_soft_hyphens(word, dictionary, '='), expected)
dictionary = dictionary_for_locale('hu')
t('asszonnyal', 'asszonnyal')
dictionary = dictionary_for_locale('en')
t('beautiful', 'beau=ti=ful')
t('BeauTiful', 'Beau=Ti=ful')
def w(words, expected):
self.ae(add_soft_hyphens_to_words(words, dictionary, '='), expected)
w(' A\n beautiful day. ', ' A\n beau=ti=ful day. ')
def test_hyphenate_html(self):
root = parse_html5('''
<p>beautiful, <span lang="sv"><!-- x -->tillata\n<span lang="en">Expand</span></span> "latitude!''',
line_numbers=False)
add_soft_hyphens_to_html(root, hyphen_char='=')
raw = etree.tostring(root, method='text', encoding='unicode')
self.ae(raw, 'beau=ti=ful, tilla=ta\nEx=pand "lat=i=tude!')
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(TestHyphenation)
| 3,185 | Python | .py | 75 | 34.226667 | 140 | 0.636216 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,220 | dictionaries.py | kovidgoyal_calibre/src/calibre/utils/hyphenation/dictionaries.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
import errno
import json
import os
import tarfile
from io import BytesIO
from calibre.constants import cache_dir
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.localization import lang_as_iso639_1
from calibre.utils.lock import ExclusiveFile
from calibre.utils.resources import get_path as P
from polyglot.builtins import iteritems
from polyglot.functools import lru_cache
def locale_map():
ans = getattr(locale_map, 'ans', None)
if ans is None:
ans = locale_map.ans = {k.lower(): v for k, v in iteritems(json.loads(P('hyphenation/locales.json', data=True)))}
return ans
@lru_cache()
def dictionary_name_for_locale(loc):
loc = loc.lower().replace('-', '_')
lmap = locale_map()
if loc in lmap:
return lmap[loc]
parts = loc.split('_')
if len(parts) > 2:
loc = '_'.join(parts[:2])
if loc in lmap:
return lmap[loc]
loc = lang_as_iso639_1(parts[0])
if not loc:
return
if loc in lmap:
return lmap[loc]
if loc == 'en':
return lmap['en_us']
if loc == 'de':
return lmap['de_de']
if loc == 'es':
return lmap['es_es']
q = loc + '_'
for k, v in iteritems(lmap):
if k.startswith(q):
return lmap[k]
@lru_cache(maxsize=2)
def expected_hash():
return P('hyphenation/sha1sum', data=True, allow_user_override=False)
def extract_dicts(cache_path):
dict_tarball = P('hyphenation/dictionaries.tar.xz', allow_user_override=False)
with TemporaryDirectory(dir=cache_path) as tdir:
try:
from calibre_lzma.xz import decompress
except ImportError:
tf = tarfile.open(dict_tarball)
else:
buf = BytesIO()
with open(dict_tarball, 'rb') as f:
data = f.read()
decompress(data, outfile=buf)
buf.seek(0)
tf = tarfile.TarFile(fileobj=buf)
with tf:
try:
tf.extractall(tdir, filter='data')
except TypeError:
tf.extractall(tdir)
with open(os.path.join(tdir, 'sha1sum'), 'wb') as f:
f.write(expected_hash())
dest = os.path.join(cache_path, 'f')
with TemporaryDirectory(dir=cache_path) as trash:
try:
os.rename(dest, os.path.join(trash, 'f'))
except OSError as err:
if err.errno != errno.ENOENT:
raise
os.rename(tdir, dest)
is_cache_up_to_date.updated = True
def is_cache_up_to_date(cache_path):
if getattr(is_cache_up_to_date, 'updated', False):
return True
try:
with open(os.path.join(cache_path, 'f', 'sha1sum'), 'rb') as f:
actual_hash = f.read()
if actual_hash == expected_hash():
is_cache_up_to_date.updated = True
return True
except OSError:
pass
return False
@lru_cache()
def get_cache_path(cd):
cache_path = os.path.join(cd, 'hyphenation')
try:
os.makedirs(cache_path)
except OSError as err:
if err.errno != errno.EEXIST:
raise
return cache_path
def path_to_dictionary(dictionary_name, cache_callback=None):
cd = getattr(path_to_dictionary, 'cache_dir', None) or cache_dir()
cache_path = get_cache_path(cd)
with ExclusiveFile(os.path.join(cache_path, 'lock')):
if not is_cache_up_to_date(cache_path):
extract_dicts(cache_path)
if cache_callback is not None:
cache_callback()
return os.path.join(cache_path, 'f', dictionary_name)
| 3,718 | Python | .py | 108 | 26.898148 | 121 | 0.613693 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,221 | win_fonts.py | kovidgoyal_calibre/src/calibre/utils/fonts/win_fonts.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import atexit
import os
import sys
from itertools import product
from calibre import isbytestring, prints
from calibre.constants import filesystem_encoding
from calibre.utils.fonts.utils import get_font_characteristics, get_font_names, is_truetype_font
from calibre.utils.resources import get_path as P
from polyglot.builtins import iteritems
class WinFonts:
def __init__(self, winfonts):
self.w = winfonts
# Windows requires font files to be executable for them to be loaded,
# so instead we use this hack.
self.app_font_families = {}
for f in ('Serif', 'Sans', 'Mono'):
base = 'fonts/liberation/Liberation%s-%s.ttf'
self.app_font_families['Liberation %s'%f] = m = {}
for weight, is_italic in product((self.w.FW_NORMAL, self.w.FW_BOLD), (False, True)):
name = {(self.w.FW_NORMAL, False):'Regular',
(self.w.FW_NORMAL, True):'Italic',
(self.w.FW_BOLD, False):'Bold',
(self.w.FW_BOLD, True):'BoldItalic'}[(weight,
is_italic)]
m[(weight, is_italic)] = base%(f, name)
# import pprint
# pprint.pprint(self.app_font_families)
def font_families(self):
names = set()
for font in self.w.enum_font_families():
if (
font['is_truetype'] and
# Fonts with names starting with @ are designed for
# vertical text
not font['name'].startswith('@')
):
names.add(font['name'])
return sorted(names.union(frozenset(self.app_font_families)))
def get_normalized_name(self, is_italic, weight):
if is_italic:
ft = 'bi' if weight == self.w.FW_BOLD else 'italic'
else:
ft = 'bold' if weight == self.w.FW_BOLD else 'normal'
return ft
def fonts_for_family(self, family, normalize=True):
family = str(family)
ans = {}
for weight, is_italic in product((self.w.FW_NORMAL, self.w.FW_BOLD), (False, True)):
if family in self.app_font_families:
m = self.app_font_families[family]
path = m.get((weight, is_italic), None)
if path is None:
continue
data = P(path, data=True)
else:
try:
data = self.w.font_data(family, is_italic, weight)
except Exception as e:
prints('Failed to get font data for font: %s [%s] with error: %s'%
(family, self.get_normalized_name(is_italic, weight), e))
continue
ok, sig = is_truetype_font(data)
if not ok:
prints('Not a supported font, sfnt_version: %r'%sig)
continue
ext = 'otf' if sig == b'OTTO' else 'ttf'
try:
weight, is_italic, is_bold, is_regular = get_font_characteristics(data)[:4]
except Exception as e:
prints('Failed to get font characteristic for font: %s [%s]'
' with error: %s'%(family,
self.get_normalized_name(is_italic, weight), e))
continue
try:
family_name, sub_family_name, full_name = get_font_names(data)
except:
pass
if normalize:
ft = {(True, True):'bi', (True, False):'italic', (False,
True):'bold', (False, False):'normal'}[(is_italic,
is_bold)]
else:
ft = (1 if is_italic else 0, weight//10)
if not (family_name or full_name):
# prints('Font %s [%s] has no names'%(family,
# self.get_normalized_name(is_italic, weight)))
family_name = family
name = full_name or family + ' ' + (sub_family_name or '')
try:
name.encode('ascii')
except ValueError:
try:
sub_family_name.encode('ascii')
subf = sub_family_name
except:
subf = ''
name = family + ((' ' + subf) if subf else '')
ans[ft] = (ext, name, data)
return ans
def add_system_font(self, path):
'''
WARNING: The file you are adding must have execute permissions or
windows will fail to add it. (ls -l in cygwin to check)
'''
if isbytestring(path):
path = path.decode(filesystem_encoding)
path = os.path.abspath(path)
ret = self.w.add_system_font(path)
if ret > 0:
atexit.register(self.remove_system_font, path)
return ret
def remove_system_font(self, path):
return self.w.remove_system_font(path)
def load_winfonts():
from calibre_extensions import winfonts
return WinFonts(winfonts)
def test_ttf_reading():
for arg in sys.argv[1:]:
with open(arg, 'rb') as f:
raw = f.read()
print(os.path.basename(arg))
get_font_characteristics(raw)
print()
def test():
base = os.path.abspath(__file__)
d = os.path.dirname
pluginsd = os.path.join(d(d(d(base))), 'plugins')
if os.path.exists(os.path.join(pluginsd, 'winfonts.pyd')):
sys.path.insert(0, pluginsd)
import winfonts
w = WinFonts(winfonts)
else:
w = load_winfonts()
print(w.w)
families = w.font_families()
print(families)
for family in families:
prints(family + ':')
for font, data in iteritems(w.fonts_for_family(family)):
prints(' ', font, data[0], data[1], len(data[2]))
print()
if __name__ == '__main__':
test()
| 6,065 | Python | .py | 147 | 29.47619 | 96 | 0.537978 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,222 | utils.py | kovidgoyal_calibre/src/calibre/utils/fonts/utils.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import struct
from collections import defaultdict
from io import BytesIO
from calibre.utils.resources import get_path as P
from polyglot.builtins import as_bytes, iteritems, itervalues
class UnsupportedFont(ValueError):
pass
def get_printable_characters(text):
import unicodedata
return ''.join(x for x in unicodedata.normalize('NFC', text)
if unicodedata.category(x)[0] not in {'C', 'Z', 'M'})
def is_truetype_font(raw):
sfnt_version = raw[:4]
return (sfnt_version in {b'\x00\x01\x00\x00', b'OTTO'}, sfnt_version)
def get_tables(raw):
num_tables = struct.unpack_from(b'>H', raw, 4)[0]
offset = 4*3 # start of the table record entries
for i in range(num_tables):
table_tag, table_checksum, table_offset, table_length = struct.unpack_from(
b'>4s3L', raw, offset)
yield (table_tag, raw[table_offset:table_offset+table_length], offset,
table_offset, table_checksum)
offset += 4*4
def get_table(raw, name):
''' Get the raw table bytes for the specified table in the font '''
name = as_bytes(name.lower())
for table_tag, table, table_index, table_offset, table_checksum in get_tables(raw):
if table_tag.lower() == name:
return table, table_index, table_offset, table_checksum
return None, None, None, None
def get_font_characteristics_from_ttlib_os2_table(t, return_all=False):
(char_width, weight, width, fs_type, subscript_x_size, subscript_y_size, subscript_x_offset, subscript_y_offset,
superscript_x_size, superscript_y_size, superscript_x_offset, superscript_y_offset, strikeout_size,
strikeout_position, family_class, selection, version) = (
t.xAvgCharWidth, t.usWeightClass, t.usWidthClass, t.fsType,
t.ySubscriptXSize, t.ySubscriptYSize, t.ySubscriptXOffset, t.ySubscriptYOffset,
t.ySuperscriptXSize, t.ySuperscriptYSize, t.ySuperscriptXOffset, t.ySuperscriptYOffset,
t.yStrikeoutSize, t.yStrikeoutPosition, t.sFamilyClass, t.fsSelection, t.version)
is_italic = (selection & (1 << 0)) != 0
is_bold = (selection & (1 << 5)) != 0
is_regular = (selection & (1 << 6)) != 0
is_wws = (selection & (1 << 8)) != 0
is_oblique = (selection & (1 << 9)) != 0
p = t.panose
panose = (p.bFamilyType, p.bSerifStyle, p.bWeight, p.bProportion, p.bContrast, p.bStrokeVariation, p.bArmStyle, p.bLetterForm, p.bMidline, p.bXHeight)
if return_all:
return (version, char_width, weight, width, fs_type, subscript_x_size,
subscript_y_size, subscript_x_offset, subscript_y_offset,
superscript_x_size, superscript_y_size, superscript_x_offset,
superscript_y_offset, strikeout_size, strikeout_position,
family_class, panose, selection, is_italic, is_bold, is_regular)
return weight, is_italic, is_bold, is_regular, fs_type, panose, width, is_oblique, is_wws, version
def get_font_characteristics(raw, raw_is_table=False, return_all=False):
'''
Return (weight, is_italic, is_bold, is_regular, fs_type, panose, width,
is_oblique, is_wws). These
values are taken from the OS/2 table of the font. See
http://www.microsoft.com/typography/otspec/os2.htm for details
'''
if hasattr(raw, 'getUnicodeRanges'):
return get_font_characteristics_from_ttlib_os2_table(raw, return_all)
if raw_is_table:
os2_table = raw
else:
os2_table = get_table(raw, 'os/2')[0]
if os2_table is None:
raise UnsupportedFont('Not a supported font, has no OS/2 table')
common_fields = b'>Hh3H11h'
(version, char_width, weight, width, fs_type, subscript_x_size,
subscript_y_size, subscript_x_offset, subscript_y_offset,
superscript_x_size, superscript_y_size, superscript_x_offset,
superscript_y_offset, strikeout_size, strikeout_position,
family_class) = struct.unpack_from(common_fields, os2_table)
offset = struct.calcsize(common_fields)
panose = struct.unpack_from(b'>10B', os2_table, offset)
offset += 10
(range1, range2, range3, range4) = struct.unpack_from(b'>4L', os2_table, offset)
offset += struct.calcsize(b'>4L')
vendor_id = os2_table[offset:offset+4]
vendor_id
offset += 4
selection, = struct.unpack_from(b'>H', os2_table, offset)
is_italic = (selection & (1 << 0)) != 0
is_bold = (selection & (1 << 5)) != 0
is_regular = (selection & (1 << 6)) != 0
is_wws = (selection & (1 << 8)) != 0
is_oblique = (selection & (1 << 9)) != 0
if return_all:
return (version, char_width, weight, width, fs_type, subscript_x_size,
subscript_y_size, subscript_x_offset, subscript_y_offset,
superscript_x_size, superscript_y_size, superscript_x_offset,
superscript_y_offset, strikeout_size, strikeout_position,
family_class, panose, selection, is_italic, is_bold, is_regular)
return weight, is_italic, is_bold, is_regular, fs_type, panose, width, is_oblique, is_wws, version
def panose_to_css_generic_family(panose):
proportion = panose[3]
if proportion == 9:
return 'monospace'
family_type = panose[0]
if family_type == 3:
return 'cursive'
if family_type == 4:
return 'fantasy'
serif_style = panose[1]
if serif_style in (11, 12, 13):
return 'sans-serif'
return 'serif'
def decode_name_record(recs):
'''
Get the English names of this font. See
http://www.microsoft.com/typography/otspec/name.htm for details.
'''
if not recs:
return None
unicode_names = {}
windows_names = {}
mac_names = {}
for platform_id, encoding_id, language_id, src in recs:
if language_id > 0x8000:
continue
if platform_id == 0:
if encoding_id < 4:
try:
unicode_names[language_id] = src.decode('utf-16-be')
except ValueError:
continue
elif platform_id == 1:
try:
mac_names[language_id] = src.decode('utf-8')
except ValueError:
continue
elif platform_id == 2:
codec = {0:'ascii', 1:'utf-16-be', 2:'iso-8859-1'}.get(encoding_id,
None)
if codec is None:
continue
try:
unicode_names[language_id] = src.decode(codec)
except ValueError:
continue
elif platform_id == 3:
codec = {1:16, 10:32}.get(encoding_id, None)
if codec is None:
continue
try:
windows_names[language_id] = src.decode('utf-%d-be'%codec)
except ValueError:
continue
# First try the windows names
# First look for the US English name
if 1033 in windows_names:
return windows_names[1033]
# Look for some other english name variant
for lang in (3081, 10249, 4105, 9225, 16393, 6153, 8201, 17417, 5129,
13321, 18441, 7177, 11273, 2057, 12297):
if lang in windows_names:
return windows_names[lang]
# Look for Mac name
if 0 in mac_names:
return mac_names[0]
# Use unicode names
for val in itervalues(unicode_names):
return val
return None
def _get_font_names(raw, raw_is_table=False):
if raw_is_table:
table = raw
else:
table = get_table(raw, 'name')[0]
if table is None:
raise UnsupportedFont('Not a supported font, has no name table')
table_type, count, string_offset = struct.unpack_from(b'>3H', table)
records = defaultdict(list)
for i in range(count):
try:
platform_id, encoding_id, language_id, name_id, length, offset = \
struct.unpack_from(b'>6H', table, 6+i*12)
except struct.error:
break
offset += string_offset
src = table[offset:offset+length]
records[name_id].append((platform_id, encoding_id, language_id,
src))
return records
def get_font_name_records_from_ttlib_names_table(names_table):
records = defaultdict(list)
for rec in names_table.names:
records[rec.nameID].append((rec.platformID, rec.platEncID, rec.langID, rec.string))
return records
def get_font_names_from_ttlib_names_table(names_table):
records = get_font_name_records_from_ttlib_names_table(names_table)
family_name = decode_name_record(records[1])
subfamily_name = decode_name_record(records[2])
full_name = decode_name_record(records[4])
preferred_family_name = decode_name_record(records[16])
preferred_subfamily_name = decode_name_record(records[17])
wws_family_name = decode_name_record(records[21])
wws_subfamily_name = decode_name_record(records[22])
return (family_name, subfamily_name, full_name, preferred_family_name,
preferred_subfamily_name, wws_family_name, wws_subfamily_name)
def get_font_names(raw, raw_is_table=False):
records = _get_font_names(raw, raw_is_table)
family_name = decode_name_record(records[1])
subfamily_name = decode_name_record(records[2])
full_name = decode_name_record(records[4])
return family_name, subfamily_name, full_name
def get_font_names2(raw, raw_is_table=False):
records = _get_font_names(raw, raw_is_table)
family_name = decode_name_record(records[1])
subfamily_name = decode_name_record(records[2])
full_name = decode_name_record(records[4])
preferred_family_name = decode_name_record(records[16])
preferred_subfamily_name = decode_name_record(records[17])
wws_family_name = decode_name_record(records[21])
wws_subfamily_name = decode_name_record(records[22])
return (family_name, subfamily_name, full_name, preferred_family_name,
preferred_subfamily_name, wws_family_name, wws_subfamily_name)
def get_all_font_names(raw, raw_is_table=False):
records = _get_font_names(raw, raw_is_table)
ans = {}
for name, num in iteritems({'family_name':1, 'subfamily_name':2, 'full_name':4,
'preferred_family_name':16, 'preferred_subfamily_name':17,
'wws_family_name':21, 'wws_subfamily_name':22}):
try:
ans[name] = decode_name_record(records[num])
except (IndexError, KeyError, ValueError):
continue
if not ans[name]:
del ans[name]
for platform_id, encoding_id, language_id, src in records[6]:
if (platform_id, encoding_id, language_id) == (1, 0, 0):
try:
ans['postscript_name'] = src.decode('utf-8')
break
except ValueError:
continue
elif (platform_id, encoding_id, language_id) == (3, 1, 1033):
try:
ans['postscript_name'] = src.decode('utf-16-be')
break
except ValueError:
continue
return ans
def checksum_of_block(raw):
extra = 4 - len(raw)%4
raw += b'\0'*extra
num = len(raw)//4
return sum(struct.unpack(b'>%dI'%num, raw)) % (1<<32)
def verify_checksums(raw):
head_table = None
for table_tag, table, table_index, table_offset, table_checksum in get_tables(raw):
if table_tag.lower() == b'head':
version, fontrev, checksum_adj = struct.unpack_from(b'>ffL', table)
head_table = table
offset = table_offset
checksum = table_checksum
elif checksum_of_block(table) != table_checksum:
raise ValueError('The %r table has an incorrect checksum'%table_tag)
if head_table is not None:
table = head_table
table = table[:8] + struct.pack(b'>I', 0) + table[12:]
raw = raw[:offset] + table + raw[offset+len(table):]
# Check the checksum of the head table
if checksum_of_block(table) != checksum:
raise ValueError('Checksum of head table not correct')
# Check the checksum of the entire font
checksum = checksum_of_block(raw)
q = (0xB1B0AFBA - checksum) & 0xffffffff
if q != checksum_adj:
raise ValueError('Checksum of entire font incorrect')
def set_checksum_adjustment(f):
offset = get_table(f.getvalue(), 'head')[2]
offset += 8
f.seek(offset)
f.write(struct.pack(b'>I', 0))
checksum = checksum_of_block(f.getvalue())
q = (0xB1B0AFBA - checksum) & 0xffffffff
f.seek(offset)
f.write(struct.pack(b'>I', q))
def set_table_checksum(f, name):
table, table_index, table_offset, table_checksum = get_table(f.getvalue(), name)
checksum = checksum_of_block(table)
if checksum != table_checksum:
f.seek(table_index + 4)
f.write(struct.pack(b'>I', checksum))
def remove_embed_restriction(raw):
ok, sig = is_truetype_font(raw)
if not ok:
raise UnsupportedFont('Not a supported font, sfnt_version: %r'%sig)
table, table_index, table_offset = get_table(raw, 'os/2')[:3]
if table is None:
raise UnsupportedFont('Not a supported font, has no OS/2 table')
fs_type_offset = struct.calcsize(b'>HhHH')
fs_type = struct.unpack_from(b'>H', table, fs_type_offset)[0]
if fs_type == 0:
return raw
f = BytesIO(raw)
f.seek(fs_type_offset + table_offset)
f.write(struct.pack(b'>H', 0))
set_table_checksum(f, 'os/2')
set_checksum_adjustment(f)
raw = f.getvalue()
verify_checksums(raw)
return raw
def is_font_embeddable(raw):
# https://www.microsoft.com/typography/otspec/os2.htm#fst
ok, sig = is_truetype_font(raw)
if not ok:
raise UnsupportedFont('Not a supported font, sfnt_version: %r'%sig)
table, table_index, table_offset = get_table(raw, 'os/2')[:3]
if table is None:
raise UnsupportedFont('Not a supported font, has no OS/2 table')
fs_type_offset = struct.calcsize(b'>HhHH')
fs_type = struct.unpack_from(b'>H', table, fs_type_offset)[0]
if fs_type == 0 or fs_type & 0x8:
return True, fs_type
if fs_type & 1:
return False, fs_type
if fs_type & 0x200:
return False, fs_type
return True, fs_type
def read_bmp_prefix(table, bmp):
length, language, segcount = struct.unpack_from(b'>3H', table, bmp+2)
array_len = segcount //2
offset = bmp + 7*2
array_sz = 2*array_len
array = b'>%dH'%array_len
end_count = struct.unpack_from(array, table, offset)
offset += array_sz + 2
start_count = struct.unpack_from(array, table, offset)
offset += array_sz
id_delta = struct.unpack_from(array.replace(b'H', b'h'), table, offset)
offset += array_sz
range_offset = struct.unpack_from(array, table, offset)
if length + bmp < offset + array_sz:
raise ValueError('cmap subtable length is too small')
glyph_id_len = (length + bmp - (offset + array_sz))//2
glyph_id_map = struct.unpack_from(b'>%dH'%glyph_id_len, table, offset +
array_sz)
return (start_count, end_count, range_offset, id_delta, glyph_id_len,
glyph_id_map, array_len)
def get_bmp_glyph_ids(table, bmp, codes):
(start_count, end_count, range_offset, id_delta, glyph_id_len,
glyph_id_map, array_len) = read_bmp_prefix(table, bmp)
for code in codes:
found = False
for i, ec in enumerate(end_count):
if ec >= code:
sc = start_count[i]
if sc <= code:
found = True
ro = range_offset[i]
if ro == 0:
glyph_id = id_delta[i] + code
else:
idx = ro//2 + (code - sc) + i - array_len
glyph_id = glyph_id_map[idx]
if glyph_id != 0:
glyph_id += id_delta[i]
yield glyph_id % 0x10000
break
if not found:
yield 0
def get_glyph_ids(raw, text, raw_is_table=False):
if not isinstance(text, str):
raise TypeError('%r is not a unicode object'%text)
if raw_is_table:
table = raw
else:
table = get_table(raw, 'cmap')[0]
if table is None:
raise UnsupportedFont('Not a supported font, has no cmap table')
version, num_tables = struct.unpack_from(b'>HH', table)
bmp_table = None
for i in range(num_tables):
platform_id, encoding_id, offset = struct.unpack_from(b'>HHL', table,
4 + (i*8))
if platform_id == 3 and encoding_id == 1:
table_format = struct.unpack_from(b'>H', table, offset)[0]
if table_format == 4:
bmp_table = offset
break
if bmp_table is None:
raise UnsupportedFont('Not a supported font, has no format 4 cmap table')
yield from get_bmp_glyph_ids(table, bmp_table, map(ord, text))
def supports_text(raw, text, has_only_printable_chars=False):
if not isinstance(text, str):
raise TypeError('%r is not a unicode object'%text)
if not has_only_printable_chars:
text = get_printable_characters(text)
try:
for glyph_id in get_glyph_ids(raw, text):
if glyph_id == 0:
return False
except:
return False
return True
def get_font_for_text(text, candidate_font_data=None):
ok = False
if candidate_font_data is not None:
ok = supports_text(candidate_font_data, text)
if not ok:
from calibre.utils.fonts.scanner import font_scanner
family, faces = font_scanner.find_font_for_text(text)
if faces:
with open(faces[0]['path'], 'rb') as f:
candidate_font_data = f.read()
return candidate_font_data
def test_glyph_ids():
from calibre.utils.fonts.free_type import FreeType
data = P('fonts/liberation/LiberationSerif-Regular.ttf', data=True)
ft = FreeType()
font = ft.load_font(data)
text = '诶йab'
ft_glyphs = tuple(font.glyph_ids(text))
glyphs = tuple(get_glyph_ids(data, text))
if ft_glyphs != glyphs:
raise Exception('My code and FreeType differ on the glyph ids')
def test_supports_text():
data = P('fonts/calibreSymbols.otf', data=True)
if not supports_text(data, '.★½⯨'):
raise RuntimeError('Incorrectly returning that text is not supported')
if supports_text(data, 'abc'):
raise RuntimeError('Incorrectly claiming that text is supported')
def test_find_font():
from calibre.utils.fonts.scanner import font_scanner
abcd = '诶比西迪'
family = font_scanner.find_font_for_text(abcd)[0]
print('Family for Chinese text:', family)
family = font_scanner.find_font_for_text(abcd)[0]
abcd = 'لوحة المفاتيح العربية'
print('Family for Arabic text:', family)
def test():
test_glyph_ids()
test_supports_text()
test_find_font()
def main():
import os
import sys
for arg in sys.argv[1:]:
print(os.path.basename(arg))
with open(arg, 'rb') as f:
raw = f.read()
print(get_font_names(raw))
characs = get_font_characteristics(raw)
print(characs)
print(panose_to_css_generic_family(characs[5]))
verify_checksums(raw)
remove_embed_restriction(raw)
if __name__ == '__main__':
main()
| 19,589 | Python | .py | 455 | 34.964835 | 154 | 0.628053 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,223 | metadata.py | kovidgoyal_calibre/src/calibre/utils/fonts/metadata.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from collections import namedtuple
from io import BytesIO
from calibre.utils.fonts.utils import get_font_characteristics, get_font_names_from_ttlib_names_table
class UnsupportedFont(ValueError):
pass
FontCharacteristics = namedtuple('FontCharacteristics',
'weight, is_italic, is_bold, is_regular, fs_type, panose, width, is_oblique, is_wws, os2_version')
FontNames = namedtuple('FontNames',
'family_name, subfamily_name, full_name, preferred_family_name, preferred_subfamily_name, wws_family_name, wws_subfamily_name')
class FontMetadata:
def __init__(self, bytes_or_stream):
from fontTools.subset import Subsetter, load_font
if not hasattr(bytes_or_stream, 'read'):
bytes_or_stream = BytesIO(bytes_or_stream)
f = bytes_or_stream
f.seek(0)
s = Subsetter()
try:
font = load_font(f, s.options, dontLoadGlyphNames=True)
except Exception as e:
raise UnsupportedFont(str(e)) from e
self.is_otf = font.sfntVersion == 'OTTO'
self._read_names(font)
self._read_characteristics(font)
f.seek(0)
self.font_family = self.names.family_name
wt = self.characteristics.weight
if wt == 400:
wt = 'normal'
elif wt == 700:
wt = 'bold'
else:
wt = str(wt)
self.font_weight = wt
self.font_stretch = ('ultra-condensed', 'extra-condensed',
'condensed', 'semi-condensed', 'normal', 'semi-expanded',
'expanded', 'extra-expanded', 'ultra-expanded')[
self.characteristics.width-1]
if self.characteristics.is_oblique:
self.font_style = 'oblique'
elif self.characteristics.is_italic:
self.font_style = 'italic'
else:
self.font_style = 'normal'
def _read_names(self, font):
try:
name_table = font['name']
except KeyError:
raise UnsupportedFont('This font has no name table')
self.names = FontNames(*get_font_names_from_ttlib_names_table(name_table))
def _read_characteristics(self, font):
try:
os2_table = font['OS/2']
except KeyError:
raise UnsupportedFont('This font has no OS/2 table')
vals = get_font_characteristics(os2_table, raw_is_table=True)
self.characteristics = FontCharacteristics(*vals)
def to_dict(self):
ans = {
'is_otf':self.is_otf,
'font-family':self.font_family,
'font-weight':self.font_weight,
'font-style':self.font_style,
'font-stretch':self.font_stretch
}
for f in self.names._fields:
ans[f] = getattr(self.names, f)
for f in self.characteristics._fields:
ans[f] = getattr(self.characteristics, f)
return ans
if __name__ == '__main__':
import sys
with open(sys.argv[-1], 'rb') as f:
fm = FontMetadata(f)
import pprint
pprint.pprint(fm.to_dict())
| 3,243 | Python | .py | 80 | 31.3 | 131 | 0.608779 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,224 | __init__.py | kovidgoyal_calibre/src/calibre/utils/fonts/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,225 | free_type.py | kovidgoyal_calibre/src/calibre/utils/fonts/free_type.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import threading
from functools import wraps
from calibre_extensions.freetype import FreeType as _FreeType
class ThreadingViolation(Exception):
def __init__(self):
Exception.__init__(self,
'You cannot use the freetype plugin from a thread other than the '
' thread in which startup() was called')
def same_thread(func):
@wraps(func)
def check_thread(self, *args, **kwargs):
if self.start_thread is not threading.current_thread():
raise ThreadingViolation()
return func(self, *args, **kwargs)
return check_thread
class Face:
def __init__(self, face):
self.start_thread = threading.current_thread()
self.face = face
for x in ('family_name', 'style_name'):
val = getattr(self.face, x)
try:
val = val.decode('utf-8')
except UnicodeDecodeError:
val = repr(val).decode('utf-8')
setattr(self, x, val)
@same_thread
def supports_text(self, text, has_non_printable_chars=True):
'''
Returns True if all the characters in text have glyphs in this font.
'''
if not isinstance(text, str):
raise TypeError('%r is not a unicode object'%text)
if has_non_printable_chars:
from calibre.utils.fonts.utils import get_printable_characters
text = get_printable_characters(text)
chars = tuple(frozenset(map(ord, text)))
return self.face.supports_text(chars)
@same_thread
def glyph_ids(self, text):
if not isinstance(text, str):
raise TypeError('%r is not a unicode object'%text)
for char in text:
yield self.face.glyph_id(ord(char))
class FreeType:
def __init__(self):
self.start_thread = threading.current_thread()
self.ft = _FreeType()
@same_thread
def load_font(self, data):
return Face(self.ft.load_font(data))
| 2,132 | Python | .py | 55 | 30.6 | 82 | 0.624575 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,226 | scanner.py | kovidgoyal_calibre/src/calibre/utils/fonts/scanner.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
from collections import defaultdict
from threading import Thread
from calibre import as_unicode, prints, walk
from calibre.constants import DEBUG, config_dir, filesystem_encoding, ismacos, iswindows, isworker
from calibre.utils.fonts.metadata import FontMetadata, UnsupportedFont
from calibre.utils.icu import lower as icu_lower
from calibre.utils.icu import sort_key
from calibre.utils.resources import get_path as P
from polyglot.builtins import itervalues
class NoFonts(ValueError):
pass
# Font dirs {{{
def default_font_dirs():
return [
'/opt/share/fonts',
'/usr/share/fonts',
'/usr/local/share/fonts',
os.path.expanduser('~/.local/share/fonts'),
os.path.expanduser('~/.fonts')
]
def fc_list():
import ctypes
from ctypes.util import find_library
lib = find_library('fontconfig')
if lib is None:
return default_font_dirs()
try:
lib = ctypes.CDLL(lib)
except:
return default_font_dirs()
prototype = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
try:
get_font_dirs = prototype(('FcConfigGetFontDirs', lib))
except (AttributeError):
return default_font_dirs()
prototype = ctypes.CFUNCTYPE(ctypes.c_char_p, ctypes.c_void_p)
try:
next_dir = prototype(('FcStrListNext', lib))
except (AttributeError):
return default_font_dirs()
prototype = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
try:
end = prototype(('FcStrListDone', lib))
except (AttributeError):
return default_font_dirs()
str_list = get_font_dirs(ctypes.c_void_p())
if not str_list:
return default_font_dirs()
ans = []
while True:
d = next_dir(str_list)
if not d:
break
if d:
try:
ans.append(d.decode(filesystem_encoding))
except ValueError:
prints('Ignoring undecodeable font path: %r' % d)
continue
end(str_list)
if len(ans) < 3:
return default_font_dirs()
parents, visited = [], set()
for f in ans:
path = os.path.normpath(os.path.abspath(os.path.realpath(f)))
if path == '/':
continue
head, tail = os.path.split(path)
while head and tail:
if head in visited:
break
head, tail = os.path.split(head)
else:
parents.append(path)
visited.add(path)
return parents
def font_dirs():
if iswindows:
from calibre_extensions import winutil
paths = {os.path.normcase(r'C:\Windows\Fonts')}
for which in (winutil.CSIDL_FONTS, winutil.CSIDL_LOCAL_APPDATA, winutil.CSIDL_APPDATA):
try:
path = winutil.special_folder_path(which)
except ValueError:
continue
if which != winutil.CSIDL_FONTS:
path = os.path.join(path, r'Microsoft\Windows\Fonts')
paths.add(os.path.normcase(path))
return list(paths)
if ismacos:
return [
'/Library/Fonts',
'/System/Library/Fonts',
'/usr/share/fonts',
'/var/root/Library/Fonts',
os.path.expanduser('~/.fonts'),
os.path.expanduser('~/Library/Fonts'),
]
return fc_list()
# }}}
# Build font family maps {{{
def font_priority(font):
'''
Try to ensure that the "Regular" face is the first font for a given
family.
'''
style_normal = font['font-style'] == 'normal'
width_normal = font['font-stretch'] == 'normal'
weight_normal = font['font-weight'] == 'normal'
num_normal = sum(filter(None, (style_normal, width_normal,
weight_normal)))
subfamily_name = (font['wws_subfamily_name'] or
font['preferred_subfamily_name'] or font['subfamily_name'])
if num_normal == 3 and subfamily_name == 'Regular':
return 0
if num_normal == 3:
return 1
if subfamily_name == 'Regular':
return 2
return 3 + (3 - num_normal)
def path_significance(path, folders):
path = os.path.normcase(os.path.abspath(path))
for i, q in enumerate(folders):
if path.startswith(q):
return i
return -1
def build_families(cached_fonts, folders, family_attr='font-family'):
families = defaultdict(list)
for f in itervalues(cached_fonts):
if not f:
continue
lf = icu_lower(f.get(family_attr) or '')
if lf:
families[lf].append(f)
for fonts in itervalues(families):
# Look for duplicate font files and choose the copy that is from a
# more significant font directory (prefer user directories over
# system directories).
fmap = {}
remove = []
for f in fonts:
fingerprint = (icu_lower(f['font-family']), f['font-weight'],
f['font-stretch'], f['font-style'])
if fingerprint in fmap:
opath = fmap[fingerprint]['path']
npath = f['path']
if path_significance(npath, folders) >= path_significance(opath, folders):
remove.append(fmap[fingerprint])
fmap[fingerprint] = f
else:
remove.append(f)
else:
fmap[fingerprint] = f
for font in remove:
fonts.remove(font)
fonts.sort(key=font_priority)
font_family_map = dict.copy(families)
font_families = tuple(sorted((f[0]['font-family'] for f in
itervalues(font_family_map)), key=sort_key))
return font_family_map, font_families
# }}}
class FontScanner(Thread):
CACHE_VERSION = 2
def __init__(self, folders=[], allowed_extensions={'ttf', 'otf'}):
super().__init__(daemon=True)
self.folders = folders + font_dirs() + [os.path.join(config_dir, 'fonts'),
P('fonts/liberation')]
self.folders = [os.path.normcase(os.path.abspath(f)) for f in
self.folders]
self.font_families = ()
self.allowed_extensions = allowed_extensions
# API {{{
def find_font_families(self):
self.join()
return self.font_families
def fonts_for_family(self, family):
'''
Return a list of the faces belonging to the specified family. The first
face is the "Regular" face of family. Each face is a dictionary with
many keys, the most important of which are: path, font-family,
font-weight, font-style, font-stretch. The font-* properties follow the
CSS 3 Fonts specification.
'''
self.join()
try:
return self.font_family_map[icu_lower(family)]
except KeyError:
raise NoFonts('No fonts found for the family: %r'%family)
def legacy_fonts_for_family(self, family):
'''
Return a simple set of regular, bold, italic and bold-italic faces for
the specified family. Returns a dictionary with each element being a
2-tuple of (path to font, full font name) and the keys being: normal,
bold, italic, bi.
'''
ans = {}
try:
faces = self.fonts_for_family(family)
except NoFonts:
return ans
for i, face in enumerate(faces):
if i == 0:
key = 'normal'
elif face['font-style'] in {'italic', 'oblique'}:
key = 'bi' if face['font-weight'] == 'bold' else 'italic'
elif face['font-weight'] == 'bold':
key = 'bold'
else:
continue
ans[key] = (face['path'], face['full_name'])
return ans
def get_font_data(self, font_or_path):
path = font_or_path
if isinstance(font_or_path, dict):
path = font_or_path['path']
with open(path, 'rb') as f:
return f.read()
def find_font_for_text(self, text, allowed_families={'serif', 'sans-serif'},
preferred_families=('serif', 'sans-serif', 'monospace', 'cursive', 'fantasy')):
'''
Find a font on the system capable of rendering the given text.
Returns a font family (as given by fonts_for_family()) that has a
"normal" font and that can render the supplied text. If no such font
exists, returns None.
:return: (family name, faces) or None, None
'''
from calibre.utils.fonts.utils import get_printable_characters, panose_to_css_generic_family, supports_text
if not isinstance(text, str):
raise TypeError('%r is not unicode'%text)
text = get_printable_characters(text)
found = {}
def filter_faces(font):
try:
raw = self.get_font_data(font)
return supports_text(raw, text)
except:
pass
return False
for family in self.find_font_families():
faces = list(filter(filter_faces, self.fonts_for_family(family)))
if not faces:
continue
generic_family = panose_to_css_generic_family(faces[0]['panose'])
if generic_family in allowed_families or generic_family == preferred_families[0]:
return (family, faces)
elif generic_family not in found:
found[generic_family] = (family, faces)
for f in preferred_families:
if f in found:
return found[f]
return None, None
# }}}
def reload_cache(self):
if not hasattr(self, 'cache'):
from calibre.utils.config import JSONConfig
self.cache = JSONConfig('fonts/scanner_cache')
else:
self.cache.refresh()
if self.cache.get('version', None) != self.CACHE_VERSION:
self.cache.clear()
self.cached_fonts = self.cache.get('fonts', {})
def run(self):
self.do_scan()
def do_scan(self):
self.reload_cache()
if isworker:
# Dont scan font files in worker processes, use whatever is
# cached. Font files typically dont change frequently enough to
# justify a rescan in a worker process.
self.build_families()
return
cached_fonts = self.cached_fonts.copy()
self.cached_fonts.clear()
for folder in self.folders:
if not os.path.isdir(folder):
continue
try:
files = tuple(walk(folder))
except OSError as e:
if DEBUG:
prints('Failed to walk font folder:', folder,
as_unicode(e))
continue
for candidate in files:
if (candidate.rpartition('.')[-1].lower() not in self.allowed_extensions or not os.path.isfile(candidate)):
continue
candidate = os.path.normcase(os.path.abspath(candidate))
try:
s = os.stat(candidate)
except OSError:
continue
fileid = f'{candidate}||{s.st_size}:{s.st_mtime}'
if fileid in cached_fonts:
# Use previously cached metadata, since the file size and
# last modified timestamp have not changed.
self.cached_fonts[fileid] = cached_fonts[fileid]
continue
try:
self.read_font_metadata(candidate, fileid)
except Exception as e:
if DEBUG:
prints('Failed to read metadata from font file:',
candidate, as_unicode(e))
continue
if frozenset(cached_fonts) != frozenset(self.cached_fonts):
# Write out the cache only if some font files have changed
self.write_cache()
self.build_families()
def build_families(self):
self.font_family_map, self.font_families = build_families(self.cached_fonts, self.folders)
def write_cache(self):
# writing to the cache is atomic thanks to JSONConfig
with self.cache:
self.cache['version'] = self.CACHE_VERSION
self.cache['fonts'] = self.cached_fonts
def force_rescan(self):
self.cached_fonts = {}
self.write_cache()
def read_font_metadata(self, path, fileid):
with open(path, 'rb') as f:
try:
fm = FontMetadata(f)
except UnsupportedFont:
self.cached_fonts[fileid] = {}
else:
data = fm.to_dict()
data['path'] = path
self.cached_fonts[fileid] = data
def dump_fonts(self):
self.join()
for family in self.font_families:
prints(family)
for font in self.fonts_for_family(family):
prints('\t%s: %s'%(font['full_name'], font['path']))
prints(end='\t')
for key in ('font-stretch', 'font-weight', 'font-style'):
prints('%s: %s'%(key, font[key]), end=' ')
prints()
prints('\tSub-family:', font['wws_subfamily_name'] or
font['preferred_subfamily_name'] or
font['subfamily_name'])
prints()
prints()
font_scanner = FontScanner()
font_scanner.start()
def force_rescan():
font_scanner.join()
font_scanner.force_rescan()
font_scanner.run()
if __name__ == '__main__':
font_scanner.dump_fonts()
| 13,881 | Python | .py | 356 | 28.542135 | 123 | 0.572596 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,227 | subset.py | kovidgoyal_calibre/src/calibre/utils/fonts/subset.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2023, Kovid Goyal <kovid at kovidgoyal.net>
import os
import sys
from logging.handlers import QueueHandler
from queue import Empty, SimpleQueue
def subset(input_file_object_or_path, output_file_object_or_path, container_type, chars_or_text=''):
from fontTools.subset import Subsetter, load_font, log, save_font
log_messages = SimpleQueue()
log_handler = QueueHandler(log_messages)
log.addHandler(log_handler)
try:
s = Subsetter()
s.options.layout_features.append('*')
s.options.recommended_glyphs = True
container_type = container_type.lower()
if 'woff' in container_type:
s.options.flavor = 'woff2'
font = load_font(input_file_object_or_path, s.options, dontLoadGlyphNames=False)
unicodes = {ord(x) for x in chars_or_text}
unicodes.add(ord(' '))
s.populate(unicodes=unicodes)
s.subset(font)
save_font(font, output_file_object_or_path, s.options)
finally:
log.removeHandler(log_handler)
msgs = []
while True:
try:
msgs.append(log_messages.get_nowait().getMessage())
except Empty:
break
return msgs
if __name__ == '__main__':
import tempfile
src = sys.argv[-1]
with open(os.path.join(tempfile.gettempdir(), os.path.basename(src)), 'wb') as output:
print('\n'.join(subset(src, output, os.path.splitext(sys.argv[-1])[1][1:], 'abcdefghijk')))
a, b = os.path.getsize(src), os.path.getsize(output.name)
print(f'Input: {a} Output: {b}')
print('Written to:', output.name)
| 1,635 | Python | .py | 41 | 33.487805 | 100 | 0.657646 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,228 | errors.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/errors.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
class UnsupportedFont(ValueError):
pass
class NoGlyphs(ValueError):
pass
| 235 | Python | .py | 8 | 26.5 | 61 | 0.718182 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,229 | merge.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/merge.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
from collections import OrderedDict
from functools import partial
class GlyphSizeMismatch(ValueError):
pass
def merge_truetype_fonts_for_pdf(fonts, log=None):
all_glyphs = {}
ans = fonts[0]
hmetrics_map = {}
vmetrics_map = {}
for font in fonts:
loca = font[b'loca']
glyf = font[b'glyf']
num_glyphs = font[b'maxp'].num_glyphs
loca.load_offsets(font[b'head'], font[b'maxp'])
try:
hhea = font[b'hhea']
except KeyError:
hhea = None
else:
hhea.read_data(font[b'hmtx'], num_glyphs)
try:
vhea = font[b'vhea']
except KeyError:
vhea = None
else:
vhea.read_data(font[b'vmtx'], num_glyphs)
for glyph_id in range(len(loca.offset_map) - 1):
offset, sz = loca.glyph_location(glyph_id)
prev_glyph_data = all_glyphs.get(glyph_id)
if not prev_glyph_data:
all_glyphs[glyph_id] = glyf.glyph_data(offset, sz, as_raw=True)
if hhea is not None:
hmetrics_map[glyph_id] = hhea.metrics_for(glyph_id)
if vhea is not None:
vmetrics_map[glyph_id] = vhea.metrics_for(glyph_id)
elif sz > 0:
if abs(sz - len(prev_glyph_data)) > 8:
# raise Exception('Size mismatch for glyph id: {} prev_sz: {} sz: {}'.format(glyph_id, len(prev_glyph_data), sz))
if log is not None:
log(f'Size mismatch for glyph id: {glyph_id} prev_sz: {len(prev_glyph_data)} sz: {sz}')
if hhea is not None:
m = hhea.metrics_for(glyph_id)
old_val = hmetrics_map.get(glyph_id)
if old_val is None:
hmetrics_map[glyph_id] = m
elif m != old_val:
log(f'Metrics mismatch for glyph id: {glyph_id} prev: {hmetrics_map[glyph_id]} cur: {m}')
if vhea is not None:
m = vhea.metrics_for(glyph_id)
old_val = vmetrics_map.get(glyph_id)
if old_val is None:
vmetrics_map[glyph_id] = m
elif m != vmetrics_map[glyph_id]:
log(f'Vertical metrics mismatch for glyph id: {glyph_id} prev: {vmetrics_map[glyph_id]} cur: {m}')
glyf = ans[b'glyf']
head = ans[b'head']
loca = ans[b'loca']
maxp = ans[b'maxp']
gmap = OrderedDict()
for glyph_id in sorted(all_glyphs):
gmap[glyph_id] = partial(all_glyphs.__getitem__, glyph_id)
offset_map = glyf.update(gmap)
loca.update(offset_map)
head.index_to_loc_format = 0 if loca.fmt == 'H' else 1
head.update()
maxp.num_glyphs = len(loca.offset_map) - 1
maxp.update()
if hmetrics_map and b'hhea' in ans:
ans[b'hhea'].update(hmetrics_map, ans[b'hmtx'])
if vmetrics_map and b'vhea' in ans:
ans[b'vhea'].update(vmetrics_map, ans[b'vmtx'])
for name in 'hdmx GPOS GSUB'.split():
ans.pop(name.encode('ascii'), None)
return ans
| 3,256 | Python | .py | 76 | 31.381579 | 133 | 0.54686 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,230 | container.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/container.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2012, Kovid Goyal <kovid at kovidgoyal.net>
from collections import OrderedDict
from io import BytesIO
from struct import calcsize, pack
from calibre.utils.fonts.sfnt import UnknownTable, align_block, max_power_of_two
from calibre.utils.fonts.sfnt.cff.table import CFFTable
from calibre.utils.fonts.sfnt.cmap import CmapTable
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from calibre.utils.fonts.sfnt.glyf import GlyfTable
from calibre.utils.fonts.sfnt.gsub import GSUBTable
from calibre.utils.fonts.sfnt.head import HeadTable, HorizontalHeader, OS2Table, PostTable, VerticalHeader
from calibre.utils.fonts.sfnt.kern import KernTable
from calibre.utils.fonts.sfnt.loca import LocaTable
from calibre.utils.fonts.sfnt.maxp import MaxpTable
from calibre.utils.fonts.utils import checksum_of_block, get_tables, verify_checksums
from calibre.utils.resources import get_path as P
# OpenType spec: http://www.microsoft.com/typography/otspec/otff.htm
class Sfnt:
TABLE_MAP = {
b'head' : HeadTable,
b'hhea' : HorizontalHeader,
b'vhea' : VerticalHeader,
b'maxp' : MaxpTable,
b'loca' : LocaTable,
b'glyf' : GlyfTable,
b'cmap' : CmapTable,
b'CFF ' : CFFTable,
b'kern' : KernTable,
b'GSUB' : GSUBTable,
b'OS/2' : OS2Table,
b'post' : PostTable,
}
def __init__(self, raw_or_get_table):
self.tables = {}
if isinstance(raw_or_get_table, bytes):
raw = raw_or_get_table
self.sfnt_version = raw[:4]
if self.sfnt_version not in {b'\x00\x01\x00\x00', b'OTTO', b'true',
b'type1'}:
raise UnsupportedFont('Font has unknown sfnt version: %r'%self.sfnt_version)
for table_tag, table, table_index, table_offset, table_checksum in get_tables(raw):
self.tables[table_tag] = self.TABLE_MAP.get(
table_tag, UnknownTable)(table)
else:
for table_tag in {
b'cmap', b'hhea', b'head', b'hmtx', b'maxp', b'name', b'OS/2',
b'post', b'cvt ', b'fpgm', b'glyf', b'loca', b'prep', b'CFF ',
b'VORG', b'EBDT', b'EBLC', b'EBSC', b'BASE', b'GSUB', b'GPOS',
b'GDEF', b'JSTF', b'gasp', b'hdmx', b'kern', b'LTSH', b'PCLT',
b'VDMX', b'vhea', b'vmtx', b'MATH'}:
table = bytes(raw_or_get_table(table_tag))
if table:
self.tables[table_tag] = self.TABLE_MAP.get(
table_tag, UnknownTable)(table)
if not self.tables:
raise UnsupportedFont('This font has no tables')
self.sfnt_version = (b'\0\x01\0\0' if b'glyf' in self.tables
else b'OTTO')
def __getitem__(self, key):
return self.tables[key]
def __contains__(self, key):
return key in self.tables
def __delitem__(self, key):
del self.tables[key]
def __iter__(self):
'''Iterate over the table tags in order.'''
yield from sorted(self.tables)
# Although the optimal order is not alphabetical, the OTF spec says
# they should be alphabetical, so we stick with that. See
# http://partners.adobe.com/public/developer/opentype/index_recs.html
# for optimal order.
# keys = list(self.tables)
# order = {x:i for i, x in enumerate((b'head', b'hhea', b'maxp', b'OS/2',
# b'hmtx', b'LTSH', b'VDMX', b'hdmx', b'cmap', b'fpgm', b'prep',
# b'cvt ', b'loca', b'glyf', b'CFF ', b'kern', b'name', b'post',
# b'gasp', b'PCLT', b'DSIG'))}
# keys.sort(key=lambda x:order.get(x, 1000))
# for x in keys:
# yield x
def pop(self, key, default=None):
return self.tables.pop(key, default)
def get(self, key, default=None):
return self.tables.get(key, default)
def sizes(self):
ans = OrderedDict()
for tag in self:
ans[tag] = len(self[tag])
return ans
def get_all_font_names(self):
from calibre.utils.fonts.metadata import FontNames, get_font_names2
name_table = self.get(b'name')
if name_table is not None:
return FontNames(*get_font_names2(name_table.raw, raw_is_table=True))
def __call__(self, stream=None):
stream = BytesIO() if stream is None else stream
def spack(*args):
stream.write(pack(*args))
stream.seek(0)
# Write header
num_tables = len(self.tables)
ln2 = max_power_of_two(num_tables)
srange = (2**ln2) * 16
spack(b'>4s4H',
self.sfnt_version, num_tables, srange, ln2, num_tables * 16 - srange)
# Write tables
head_offset = None
table_data = []
offset = stream.tell() + (calcsize(b'>4s3L') * num_tables)
sizes = OrderedDict()
for tag in self:
table = self.tables[tag]
raw = table()
table_len = len(raw)
if tag == b'head':
head_offset = offset
raw = raw[:8] + b'\0\0\0\0' + raw[12:]
raw = align_block(raw)
checksum = checksum_of_block(raw)
spack(b'>4s3L', tag, checksum, offset, table_len)
offset += len(raw)
table_data.append(raw)
sizes[tag] = table_len
for x in table_data:
stream.write(x)
checksum = checksum_of_block(stream.getvalue())
q = (0xB1B0AFBA - checksum) & 0xffffffff
stream.seek(head_offset + 8)
spack(b'>L', q)
return stream.getvalue(), sizes
def test_roundtrip(ff=None):
if ff is None:
data = P('fonts/liberation/LiberationSerif-Regular.ttf', data=True)
else:
with open(ff, 'rb') as f:
data = f.read()
rd = Sfnt(data)()[0]
verify_checksums(rd)
if data[:12] != rd[:12]:
raise ValueError('Roundtripping failed, font header not the same')
if len(data) != len(rd):
raise ValueError('Roundtripping failed, size different (%d vs. %d)'%
(len(data), len(rd)))
if __name__ == '__main__':
import sys
test_roundtrip(sys.argv[-1])
| 6,328 | Python | .py | 146 | 33.89726 | 106 | 0.58791 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,231 | loca.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/loca.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import array
import sys
from itertools import repeat
from operator import itemgetter
from calibre.utils.fonts.sfnt import UnknownTable
from polyglot.builtins import iteritems
def four_byte_type_code():
for c in 'IL':
a = array.array(c)
if a.itemsize == 4:
return c
def read_array(data, fmt='H'):
ans = array.array(fmt, data)
if sys.byteorder != 'big':
ans.byteswap()
return ans
class LocaTable(UnknownTable):
def load_offsets(self, head_table, maxp_table):
fmt = 'H' if head_table.index_to_loc_format == 0 else four_byte_type_code()
locs = read_array(self.raw, fmt)
self.offset_map = locs.tolist()
if fmt == 'H':
self.offset_map = [2*i for i in self.offset_map]
self.fmt = fmt
def glyph_location(self, glyph_id):
offset = self.offset_map[glyph_id]
next_offset = self.offset_map[glyph_id+1]
return offset, next_offset - offset
def update(self, resolved_glyph_map):
'''
Update this table to contain pointers only to the glyphs in
resolved_glyph_map which must be a map of glyph_ids to (offset, sz)
Note that the loca table is generated for all glyphs from 0 to the
largest glyph that is either in resolved_glyph_map or was present
originally. The pointers to glyphs that have no data will be set to
zero. This preserves glyph ids.
'''
current_max_glyph_id = len(self.offset_map) - 2
max_glyph_id = max(resolved_glyph_map or (0,))
max_glyph_id = max(max_glyph_id, current_max_glyph_id)
self.offset_map = list(repeat(0, max_glyph_id + 2))
glyphs = [(glyph_id, x[0], x[1]) for glyph_id, x in
iteritems(resolved_glyph_map)]
glyphs.sort(key=itemgetter(1))
for glyph_id, offset, sz in glyphs:
self.offset_map[glyph_id] = offset
self.offset_map[glyph_id+1] = offset + sz
# Fix all zero entries to be the same as the previous entry, which
# means that if the ith entry is zero, the i-1 glyph is not present.
for i in range(1, len(self.offset_map)):
if self.offset_map[i] == 0:
self.offset_map[i] = self.offset_map[i-1]
vals = self.offset_map
max_offset = max(vals) if vals else 0
if max_offset < 0x20000 and all(l % 2 == 0 for l in vals):
self.fmt = 'H'
vals = array.array(self.fmt, (i // 2 for i in vals))
else:
self.fmt = four_byte_type_code()
vals = array.array(self.fmt, vals)
if sys.byteorder != "big":
vals.byteswap()
self.raw = vals.tobytes()
subset = update
def dump_glyphs(self, sfnt):
if not hasattr(self, 'offset_map'):
self.load_offsets(sfnt[b'head'], sfnt[b'maxp'])
for i in range(len(self.offset_map)-1):
off, noff = self.offset_map[i], self.offset_map[i+1]
if noff != off:
print('Glyph id:', i, 'size:', noff-off)
| 3,222 | Python | .py | 75 | 34.546667 | 83 | 0.608432 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,232 | maxp.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/maxp.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from struct import pack, unpack_from
from calibre.utils.fonts.sfnt import FixedProperty, UnknownTable
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
class MaxpTable(UnknownTable):
version = FixedProperty('_version')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._fmt = b'>lH'
self._version, self.num_glyphs = unpack_from(self._fmt, self.raw)
self.fields = ('_version', 'num_glyphs')
if self.version > 1.0:
raise UnsupportedFont('This font has a maxp table with version: %s'
%self.version)
if self.version == 1.0:
self.fields = ('_version', 'num_glyphs', 'max_points',
'max_contours', 'max_composite_points',
'max_composite_contours', 'max_zones',
'max_twilight_points', 'max_storage', 'max_function_defs',
'max_instruction_defs', 'max_stack_elements',
'max_size_of_instructions', 'max_component_elements',
'max_component_depth')
self._fmt = b'>lH' + b'H'*(len(self.fields)-2)
vals = unpack_from(self._fmt, self.raw)
for f, val in zip(self.fields, vals):
setattr(self, f, val)
def update(self):
vals = [getattr(self, f) for f in self.fields]
self.raw = pack(self._fmt, *vals)
| 1,569 | Python | .py | 32 | 38.53125 | 79 | 0.58623 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,233 | metrics.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/metrics.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from calibre.utils.fonts.sfnt.container import UnsupportedFont
from calibre.utils.fonts.utils import get_all_font_names
class FontMetrics:
'''
Get various metrics for the specified sfnt. All the metrics are returned in
units of pixels. To calculate a metric you have to specify the font size
(in pixels) and the horizontal stretch factor (between 0.0 and 1.0).
'''
def __init__(self, sfnt):
for table in (b'head', b'hhea', b'hmtx', b'cmap', b'OS/2', b'post',
b'name', b'maxp'):
if table not in sfnt:
raise UnsupportedFont('This font has no %s table'%table)
self.sfnt = sfnt
self.head = self.sfnt[b'head']
hhea = self.sfnt[b'hhea']
hhea.read_data(self.sfnt[b'hmtx'], self.sfnt[b'maxp'].num_glyphs)
self.ascent = hhea.ascender
self.descent = hhea.descender
self.bbox = (self.head.x_min, self.head.y_min, self.head.x_max,
self.head.y_max)
self._advance_widths = hhea.advance_widths
self.cmap = self.sfnt[b'cmap']
self.units_per_em = self.head.units_per_em
self.os2 = self.sfnt[b'OS/2']
self.os2.read_data()
self.post = self.sfnt[b'post']
self.post.read_data()
self.names = get_all_font_names(self.sfnt[b'name'].raw, raw_is_table=True)
self.is_otf = 'CFF ' in self.sfnt.tables
self._sig = hash(self.sfnt[b'name'].raw)
# Metrics for embedding in PDF
pdf_scale = self.pdf_scale = lambda x:int(round(x*1000./self.units_per_em))
self.pdf_ascent, self.pdf_descent = map(pdf_scale,
(self.os2.typo_ascender, self.os2.typo_descender))
self.pdf_bbox = tuple(map(pdf_scale, self.bbox))
self.pdf_capheight = pdf_scale(getattr(self.os2, 'cap_height',
self.os2.typo_ascender))
self.pdf_avg_width = pdf_scale(self.os2.average_char_width)
self.pdf_stemv = 50 + int((self.os2.weight_class / 65.0) ** 2)
def __hash__(self):
return self._sig
@property
def postscript_name(self):
if 'postscript_name' in self.names:
return self.names['postscript_name'].replace(' ', '-')
try:
return self.names['full_name'].replace(' ', '-')
except KeyError:
return self.names['family_name'].replace(' ', '-')
def underline_thickness(self, pixel_size=12.0):
'Thickness for lines (in pixels) at the specified size'
yscale = pixel_size / self.units_per_em
return self.post.underline_thickness * yscale
def underline_position(self, pixel_size=12.0):
yscale = pixel_size / self.units_per_em
return self.post.underline_position * yscale
def overline_position(self, pixel_size=12.0):
yscale = pixel_size / self.units_per_em
return (self.ascent + 2) * yscale
def strikeout_size(self, pixel_size=12.0):
'The width of the strikeout line, in pixels'
yscale = pixel_size / self.units_per_em
return yscale * self.os2.strikeout_size
def strikeout_position(self, pixel_size=12.0):
'The displacement from the baseline to top of the strikeout line, in pixels'
yscale = pixel_size / self.units_per_em
return yscale * self.os2.strikeout_position
def advance_widths(self, string, pixel_size=12.0, stretch=1.0):
'''
Return the advance widths (in pixels) for all glyphs corresponding to
the characters in string at the specified pixel_size and stretch factor.
'''
if not isinstance(string, str):
raise ValueError('Must supply a unicode object')
chars = tuple(map(ord, string))
cmap = self.cmap.get_character_map(chars)
glyph_ids = (cmap[c] for c in chars)
pixel_size_x = stretch * pixel_size
xscale = pixel_size_x / self.units_per_em
return tuple(i*xscale for i in self.glyph_widths(glyph_ids))
def glyph_widths(self, glyph_ids):
last = len(self._advance_widths)
return tuple(self._advance_widths[i if i < last else -1] for i in
glyph_ids)
def width(self, string, pixel_size=12.0, stretch=1.0):
'The width of the string at the specified pixel size and stretch, in pixels'
return sum(self.advance_widths(string, pixel_size, stretch))
if __name__ == '__main__':
import sys
from calibre.utils.fonts.sfnt.container import Sfnt
with open(sys.argv[-1], 'rb') as f:
raw = f.read()
sfnt = Sfnt(raw)
m = FontMetrics(sfnt)
print('Ascent:', m.pdf_ascent)
print('Descent:', m.pdf_descent)
print('PDF BBox:', m.pdf_bbox)
print('CapHeight:', m.pdf_capheight)
print('AvgWidth:', m.pdf_avg_width)
print('ItalicAngle', m.post.italic_angle)
print('StemV', m.pdf_stemv)
| 5,050 | Python | .py | 106 | 38.971698 | 84 | 0.626371 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,234 | __init__.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from datetime import datetime, timedelta
def align_block(raw, multiple=4, pad=b'\0'):
'''
Return raw with enough pad bytes append to ensure its length is a multiple
of 4.
'''
extra = len(raw) % multiple
if extra == 0:
return raw
return raw + pad*(multiple - extra)
class UnknownTable:
def __init__(self, raw):
self.raw = raw
def __call__(self):
return self.raw
def __len__(self):
return len(self.raw)
class DateTimeProperty:
def __init__(self, name):
self.name = name
def __get__(self, obj, type=None):
return datetime(1904, 1, 1) + timedelta(seconds=getattr(obj,
self.name))
def __set__(self, obj, val):
td = val - datetime(1904, 1, 1)
setattr(obj, self.name, int(td.total_seconds()))
class FixedProperty:
def __init__(self, name):
self.name = name
def __get__(self, obj, type=None):
val = getattr(obj, self.name)
return val / 0x10000
def __set__(self, obj, val):
return int(round(val*(0x10000)))
def max_power_of_two(x):
"""
Return the highest exponent of two, so that
(2 ** exponent) <= x
"""
exponent = 0
while x:
x = x >> 1
exponent += 1
return max(exponent - 1, 0)
def load_font(stream_or_path):
raw = stream_or_path
if hasattr(raw, 'read'):
raw = raw.read()
from calibre.utils.fonts.sfnt.container import Sfnt
return Sfnt(raw)
| 1,642 | Python | .py | 54 | 24.574074 | 78 | 0.603327 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,235 | kern.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/kern.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from struct import calcsize, pack, unpack_from
from struct import error as struct_error
from calibre.utils.fonts.sfnt import FixedProperty, UnknownTable, max_power_of_two
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
class KernTable(UnknownTable):
version = FixedProperty('_version')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._version, self.num_tables = unpack_from(b'>HH', self.raw)
if self._version == 1 and len(self.raw) >= 8:
self._version, self.num_tables = unpack_from(b'>LL', self.raw)
self.headerfmt = b'>HH' if self._version == 0 else b'>LL'
def restrict_to_glyphs(self, glyph_ids):
if self._version not in {0, 0x10000}:
raise UnsupportedFont('kern table has version: %x'%self._version)
offset = 4 if (self._version == 0) else 8
tables = []
for i in range(self.num_tables):
if self._version == 0:
version, length, coverage = unpack_from(b'>3H', self.raw, offset)
table_format = version
else:
length, coverage = unpack_from(b'>LH', self.raw, offset)
table_format = coverage & 0xff
raw = self.raw[offset:offset+length]
if table_format == 0:
raw = self.restrict_format_0(raw, glyph_ids)
if not raw:
continue
tables.append(raw)
offset += length
self.raw = pack(self.headerfmt, self._version, len(tables)) + b''.join(tables)
def restrict_format_0(self, raw, glyph_ids):
if self._version == 0:
version, length, coverage, npairs = unpack_from(b'>4H', raw)
headerfmt = b'>3H'
else:
length, coverage, tuple_index, npairs = unpack_from(b'>L3H', raw)
headerfmt = b'>L2H'
offset = calcsize(headerfmt + b'4H')
entries = []
entrysz = calcsize(b'>2Hh')
for i in range(npairs):
try:
left, right, value = unpack_from(b'>2Hh', raw, offset)
except struct_error:
offset = len(raw)
break # Buggy kern table
if left in glyph_ids and right in glyph_ids:
entries.append(pack(b'>2Hh', left, right, value))
offset += entrysz
if offset != len(raw):
raise UnsupportedFont('This font has extra data at the end of'
' a Format 0 kern subtable')
npairs = len(entries)
if npairs == 0:
return b''
entry_selector = max_power_of_two(npairs)
search_range = (2 ** entry_selector) * 6
range_shift = (npairs - (2 ** entry_selector)) * 6
entries = b''.join(entries)
length = calcsize(headerfmt + b'4H') + len(entries)
if self._version == 0:
header = pack(headerfmt, version, length, coverage)
else:
header = pack(headerfmt, length, coverage, tuple_index)
return header + pack(b'>4H', npairs, search_range, entry_selector,
range_shift) + entries
| 3,304 | Python | .py | 72 | 35.236111 | 86 | 0.575381 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,236 | cmap.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/cmap.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
# Note that the code for creating a BMP table (cmap format 4) is taken with
# thanks from the fonttools project (BSD licensed).
from collections import OrderedDict
from struct import calcsize, pack, unpack_from
from calibre.utils.fonts.sfnt import UnknownTable, max_power_of_two
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from calibre.utils.fonts.utils import read_bmp_prefix
def split_range(start_code, end_code, cmap): # {{{
# Try to split a range of character codes into subranges with consecutive
# glyph IDs in such a way that the cmap4 subtable can be stored "most"
# efficiently.
if start_code == end_code:
return [], [end_code]
last_id = cmap[start_code]
last_code = start_code
in_order = False
ordered_begin = None
sub_ranges = []
# Gather subranges in which the glyph IDs are consecutive.
for code in range(start_code + 1, end_code + 1):
glyph_id = cmap[code]
if glyph_id - 1 == last_id:
if not in_order:
in_order = True
ordered_begin = last_code
else:
if in_order:
in_order = False
sub_ranges.append((ordered_begin, last_code))
ordered_begin = None
last_id = glyph_id
last_code = code
if in_order:
sub_ranges.append((ordered_begin, last_code))
assert last_code == end_code
# Now filter out those new subranges that would only make the data bigger.
# A new segment cost 8 bytes, not using a new segment costs 2 bytes per
# character.
new_ranges = []
for b, e in sub_ranges:
if b == start_code and e == end_code:
break # the whole range, we're fine
if b == start_code or e == end_code:
threshold = 4 # split costs one more segment
else:
threshold = 8 # split costs two more segments
if (e - b + 1) > threshold:
new_ranges.append((b, e))
sub_ranges = new_ranges
if not sub_ranges:
return [], [end_code]
if sub_ranges[0][0] != start_code:
sub_ranges.insert(0, (start_code, sub_ranges[0][0] - 1))
if sub_ranges[-1][1] != end_code:
sub_ranges.append((sub_ranges[-1][1] + 1, end_code))
# Fill the "holes" in the segments list -- those are the segments in which
# the glyph IDs are _not_ consecutive.
i = 1
while i < len(sub_ranges):
if sub_ranges[i-1][1] + 1 != sub_ranges[i][0]:
sub_ranges.insert(i, (sub_ranges[i-1][1] + 1, sub_ranges[i][0] - 1))
i = i + 1
i = i + 1
# Transform the ranges into start_code/end_code lists.
start = []
end = []
for b, e in sub_ranges:
start.append(b)
end.append(e)
start.pop(0)
assert len(start) + 1 == len(end)
return start, end
# }}}
def set_id_delta(index, start_code): # {{{
# The lowest gid in glyphIndexArray, after subtracting id_delta, must be 1.
# id_delta is a short, and must be between -32K and 32K
# startCode can be between 0 and 64K-1, and the first glyph index can be between 1 and 64K-1
# This means that we have a problem because we can need to assign to
# id_delta values
# between -(64K-2) and 64K -1.
# Since the final gi is reconstructed from the glyphArray GID by:
# (short)finalGID = (gid + id_delta) % 0x10000),
# we can get from a startCode of 0 to a final GID of 64 -1K by subtracting 1, and casting the
# negative number to an unsigned short.
# Similarly , we can get from a startCode of 64K-1 to a final GID of 1 by adding 2, because of
# the modulo arithmetic.
id_delta = index - start_code
if id_delta > 0x7FFF:
id_delta -= 0x10000
elif id_delta < -0x7FFF:
id_delta += 0x10000
return id_delta
# }}}
class BMPTable:
def __init__(self, raw):
self.raw = raw
(self.start_count, self.end_count, self.range_offset, self.id_delta,
self.glyph_id_len, self.glyph_id_map, self.array_len) = \
read_bmp_prefix(raw, 0)
def get_glyph_ids(self, codes):
for code in codes:
found = False
for i, ec in enumerate(self.end_count):
if ec >= code:
sc = self.start_count[i]
if sc <= code:
found = True
ro = self.range_offset[i]
if ro == 0:
glyph_id = self.id_delta[i] + code
else:
idx = ro//2 + (code - sc) + i - self.array_len
glyph_id = self.glyph_id_map[idx]
if glyph_id != 0:
glyph_id += self.id_delta[i]
yield glyph_id % 0x10000
break
if not found:
yield 0
def get_glyph_map(self, glyph_ids):
ans = {}
for i, ec in enumerate(self.end_count):
sc = self.start_count[i]
for code in range(sc, ec+1):
ro = self.range_offset[i]
if ro == 0:
glyph_id = self.id_delta[i] + code
else:
idx = ro//2 + (code - sc) + i - self.array_len
glyph_id = self.glyph_id_map[idx]
if glyph_id != 0:
glyph_id += self.id_delta[i]
glyph_id %= 0x10000
if glyph_id in glyph_ids and code not in ans:
ans[code] = glyph_id
return ans
class CmapTable(UnknownTable):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.version, self.num_tables = unpack_from(b'>HH', self.raw)
self.tables = {}
offset = 4
sz = calcsize(b'>HHL')
recs = []
for i in range(self.num_tables):
platform, encoding, table_offset = unpack_from(b'>HHL', self.raw,
offset)
offset += sz
recs.append((platform, encoding, table_offset))
self.bmp_table = None
for i in range(len(recs)):
platform, encoding, offset = recs[i]
try:
next_offset = recs[i+1][-1]
except IndexError:
next_offset = len(self.raw)
table = self.raw[offset:next_offset]
if table:
fmt = unpack_from(b'>H', table)[0]
if platform == 3 and encoding == 1 and fmt == 4:
self.bmp_table = BMPTable(table)
def get_character_map(self, chars):
'''
Get a mapping of character codes to glyph ids in the font.
'''
if self.bmp_table is None:
raise UnsupportedFont('This font has no Windows BMP cmap subtable.'
' Most likely a special purpose font.')
chars = sorted(set(chars))
ans = OrderedDict()
for i, glyph_id in enumerate(self.bmp_table.get_glyph_ids(chars)):
if glyph_id > 0:
ans[chars[i]] = glyph_id
return ans
def get_glyph_map(self, glyph_ids):
'''
Get a mapping of character codes to glyph ids for the specified glyph
ids.
'''
if self.bmp_table is None:
raise UnsupportedFont('This font has no Windows BMP cmap subtable.'
' Most likely a special purpose font.')
glyph_ids = frozenset(glyph_ids)
return self.bmp_table.get_glyph_map(glyph_ids)
def set_character_map(self, cmap):
self.version, self.num_tables = 0, 1
fmt = b'>7H'
codes = sorted(cmap)
if not codes:
start_code = [0xffff]
end_code = [0xffff]
else:
last_code = codes[0]
end_code = []
start_code = [last_code]
for code in codes[1:]:
if code == last_code + 1:
last_code = code
continue
start, end = split_range(start_code[-1], last_code, cmap)
start_code.extend(start)
end_code.extend(end)
start_code.append(code)
last_code = code
end_code.append(last_code)
start_code.append(0xffff)
end_code.append(0xffff)
id_delta = []
id_range_offset = []
glyph_index_array = []
for i in range(len(end_code)-1): # skip the closing codes (0xffff)
indices = tuple(cmap[char_code] for char_code in range(start_code[i], end_code[i] + 1))
if indices == tuple(range(indices[0], indices[0] + len(indices))):
# indices is a contiguous list
id_delta_temp = set_id_delta(indices[0], start_code[i])
if id_delta_temp > 0x7FFF or id_delta_temp < -0x7FFF:
id_delta.append(0)
id_range_offset.append(2 * (len(end_code) + len(glyph_index_array) - i))
glyph_index_array.extend(indices)
else:
id_delta.append(id_delta_temp)
id_range_offset.append(0)
else:
id_delta.append(0)
id_range_offset.append(2 * (len(end_code) + len(glyph_index_array) - i))
glyph_index_array.extend(indices)
id_delta.append(1) # 0xffff + 1 == 0. So this end code maps to .notdef
id_range_offset.append(0)
seg_count = len(end_code)
max_exponent = max_power_of_two(seg_count)
search_range = 2 * (2 ** max_exponent)
entry_selector = max_exponent
range_shift = 2 * seg_count - search_range
char_code_array = end_code + [0] + start_code
char_code_array = pack(b'>%dH'%len(char_code_array), *char_code_array)
id_delta_array = pack(b'>%dh'%len(id_delta), *id_delta)
rest_array = id_range_offset + glyph_index_array
rest_array = pack(b'>%dH'%len(rest_array), *rest_array)
data = char_code_array + id_delta_array + rest_array
length = calcsize(fmt) + len(data)
header = pack(fmt, 4, length, 0, 2*seg_count, search_range, entry_selector, range_shift)
self.bmp_table = header + data
fmt = b'>4HL'
offset = calcsize(fmt)
self.raw = pack(fmt, self.version, self.num_tables, 3, 1, offset) + self.bmp_table
| 10,641 | Python | .py | 249 | 31.686747 | 99 | 0.551464 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,237 | glyf.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/glyf.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from collections import OrderedDict
from struct import unpack_from
from calibre.utils.fonts.sfnt import UnknownTable
from polyglot.builtins import iteritems
ARG_1_AND_2_ARE_WORDS = 0x0001 # if set args are words otherwise they are bytes
ARGS_ARE_XY_VALUES = 0x0002 # if set args are xy values, otherwise they are points
ROUND_XY_TO_GRID = 0x0004 # for the xy values if above is true
WE_HAVE_A_SCALE = 0x0008 # Sx = Sy, otherwise scale == 1.0
NON_OVERLAPPING = 0x0010 # set to same value for all components (obsolete!)
MORE_COMPONENTS = 0x0020 # indicates at least one more glyph after this one
WE_HAVE_AN_X_AND_Y_SCALE = 0x0040 # Sx, Sy
WE_HAVE_A_TWO_BY_TWO = 0x0080 # t00, t01, t10, t11
WE_HAVE_INSTRUCTIONS = 0x0100 # instructions follow
USE_MY_METRICS = 0x0200 # apply these metrics to parent glyph
OVERLAP_COMPOUND = 0x0400 # used by Apple in GX fonts
SCALED_COMPONENT_OFFSET = 0x0800 # composite designed to have the component offset scaled (designed for Apple)
UNSCALED_COMPONENT_OFFSET = 0x1000 # composite designed not to have the component offset scaled (designed for MS)
class SimpleGlyph:
def __init__(self, num_of_countours, raw):
self.num_of_countours = num_of_countours
self.raw = raw
# The list of glyph indices referred to by this glyph, will always be
# empty for a simple glyph and not empty for a composite glyph
self.glyph_indices = []
self.is_composite = False
def __len__(self):
return len(self.raw)
def __call__(self):
return self.raw
class CompositeGlyph(SimpleGlyph):
def __init__(self, num_of_countours, raw):
super().__init__(num_of_countours, raw)
self.is_composite = True
flags = MORE_COMPONENTS
offset = 10
while flags & MORE_COMPONENTS:
flags, glyph_index = unpack_from(b'>HH', raw, offset)
self.glyph_indices.append(glyph_index)
offset += 4
if flags & ARG_1_AND_2_ARE_WORDS:
offset += 4
else:
offset += 2
if flags & WE_HAVE_A_SCALE:
offset += 2
elif flags & WE_HAVE_AN_X_AND_Y_SCALE:
offset += 4
elif flags & WE_HAVE_A_TWO_BY_TWO:
offset += 8
class GlyfTable(UnknownTable):
def glyph_data(self, offset, length, as_raw=False):
raw = self.raw[offset:offset+length]
if as_raw:
return raw
num_of_countours = unpack_from(b'>h', raw)[0] if raw else 0
if num_of_countours >= 0:
return SimpleGlyph(num_of_countours, raw)
return CompositeGlyph(num_of_countours, raw)
def update(self, sorted_glyph_map):
ans = OrderedDict()
offset = 0
block = []
for glyph_id, glyph in iteritems(sorted_glyph_map):
raw = glyph()
pad = 4 - (len(raw) % 4)
if pad < 4:
raw += b'\0' * pad
ans[glyph_id] = offset, len(raw)
offset += len(raw)
block.append(raw)
self.raw = b''.join(block)
return ans
| 3,376 | Python | .py | 76 | 36.552632 | 115 | 0.610908 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,238 | head.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/head.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import array
from struct import calcsize, pack, unpack_from
from calibre.utils.fonts.sfnt import DateTimeProperty, FixedProperty, UnknownTable
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from calibre.utils.fonts.sfnt.loca import read_array
class HeadTable(UnknownTable):
created = DateTimeProperty('_created')
modified = DateTimeProperty('_modified')
version_number = FixedProperty('_version_number')
font_revision = FixedProperty('_font_revision')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
field_types = (
'_version_number' , 'l',
'_font_revision' , 'l',
'checksum_adjustment' , 'L',
'magic_number' , 'L',
'flags' , 'H',
'units_per_em' , 'H',
'_created' , 'q',
'_modified' , 'q',
'x_min' , 'h',
'y_min' , 'h',
'x_max' , 'h',
'y_max' , 'h',
'mac_style' , 'H',
'lowest_rec_ppem' , 'H',
'font_direction_hint' , 'h',
'index_to_loc_format' , 'h',
'glyph_data_format' , 'h'
)
self._fmt = ('>%s'%(''.join(field_types[1::2]))).encode('ascii')
self._fields = field_types[0::2]
for f, val in zip(self._fields, unpack_from(self._fmt, self.raw)):
setattr(self, f, val)
def update(self):
vals = [getattr(self, f) for f in self._fields]
self.raw = pack(self._fmt, *vals)
def read_metrics(raw, num_of_metrics, num_of_glyphs, table_name):
rawsz = 4 * num_of_metrics
if len(raw) < rawsz:
raise UnsupportedFont(f'The {table_name} table has insufficient data')
long_hor_metric = raw[:rawsz]
a = read_array(long_hor_metric)
advances = a[0::2]
a = read_array(long_hor_metric, 'h')
bearings = a[1::2]
if num_of_glyphs > num_of_metrics:
extra = num_of_glyphs - num_of_metrics
raw = raw[rawsz:]
rawsz = 2 * extra
if len(raw) < rawsz:
raise UnsupportedFont(f'The {table_name} table has insufficient data for trailing bearings')
bearings += read_array(raw, 'h')
return advances, bearings
def update_metrics_table(metrics_map, mtx_table):
recs, aw, b = [], array.array('H'), array.array('h')
for glyph_id in sorted(metrics_map):
adv, bearing = metrics_map[glyph_id]
aw.append(adv)
b.append(bearing)
recs.append(pack('>Hh', adv, bearing))
mtx_table.raw = b''.join(recs)
return aw, b
class HorizontalHeader(UnknownTable):
version_number = FixedProperty('_version_number')
field_types = (
'_version_number' , 'l',
'ascender', 'h',
'descender', 'h',
'line_gap', 'h',
'advance_width_max', 'H',
'min_left_side_bearing', 'h',
'min_right_side_bearing', 'h',
'x_max_extent', 'h',
'caret_slope_rise', 'h',
'caret_slop_run', 'h',
'caret_offset', 'h',
'r1', 'h',
'r2', 'h',
'r3', 'h',
'r4', 'h',
'metric_data_format', 'h',
'number_of_h_metrics', 'H',
)
def read_data(self, hmtx, num_glyphs):
self._fmt = ('>%s'%(''.join(self.field_types[1::2]))).encode('ascii')
self._fields = self.field_types[0::2]
for f, val in zip(self._fields, unpack_from(self._fmt, self.raw)):
setattr(self, f, val)
self.advance_widths, self.left_side_bearings = read_metrics(hmtx.raw, self.number_of_h_metrics, num_glyphs, 'hmtx')
def metrics_for(self, glyph_id):
lsb = self.left_side_bearings[glyph_id]
if glyph_id >= len(self.advance_widths):
glyph_id = -1
return self.advance_widths[glyph_id], lsb
def update(self, metrics_map, mtx_table):
aw, b = update_metrics_table(metrics_map, mtx_table)
self.advance_widths = aw
self.left_side_bearings = b
self.number_of_h_metrics = len(metrics_map)
self.advance_width_max = max(aw or (0,))
self.min_left_side_bearing = min(b or (0,))
data = (getattr(self, x) for x in self._fields)
self.raw = pack('>' + ''.join(self.field_types[1::2]), *data)
class VerticalHeader(UnknownTable):
version_number = FixedProperty('_version_number')
field_types = (
'_version_number' , 'l',
'ascender', 'h',
'descender', 'h',
'line_gap', 'h',
'advance_height_max', 'H',
'min_top_side_bearing', 'h',
'min_bottom_side_bearing', 'h',
'y_max_extent', 'h',
'caret_slope_rise', 'h',
'caret_slop_run', 'h',
'caret_offset', 'h',
'r1', 'h',
'r2', 'h',
'r3', 'h',
'r4', 'h',
'metric_data_format', 'h',
'number_of_v_metrics', 'H',
)
def read_data(self, vmtx, num_glyphs):
self._fmt = ('>%s'%(''.join(self.field_types[1::2]))).encode('ascii')
self._fields = self.field_types[0::2]
for f, val in zip(self._fields, unpack_from(self._fmt, self.raw)):
setattr(self, f, val)
self.advance_heights, self.top_side_bearings = read_metrics(vmtx.raw, self.number_of_v_metrics, num_glyphs, 'vmtx')
def metrics_for(self, glyph_id):
tsb = self.top_side_bearings[glyph_id]
if glyph_id >= len(self.advance_heights):
glyph_id = -1
return self.advance_heights[glyph_id], tsb
def update(self, metrics_map, mtx_table):
aw, b = update_metrics_table(metrics_map, mtx_table)
self.advance_heights = aw
self.top_side_bearings = b
self.number_of_v_metrics = len(metrics_map)
self.advance_height_max = max(aw or (0,))
self.min_top_side_bearing = min(b or (0,))
data = (getattr(self, x) for x in self._fields)
self.raw = pack('>' + ''.join(self.field_types[1::2]), *data)
class OS2Table(UnknownTable):
def read_data(self):
if hasattr(self, 'char_width'):
return
ver, = unpack_from(b'>H', self.raw)
field_types = [
'version' , 'H',
'average_char_width', 'h',
'weight_class', 'H',
'width_class', 'H',
'fs_type', 'H',
'subscript_x_size', 'h',
'subscript_y_size', 'h',
'subscript_x_offset', 'h',
'subscript_y_offset', 'h',
'superscript_x_size', 'h',
'superscript_y_size', 'h',
'superscript_x_offset', 'h',
'superscript_y_offset', 'h',
'strikeout_size', 'h',
'strikeout_position', 'h',
'family_class', 'h',
'panose', '10s',
'ranges', '16s',
'vendor_id', '4s',
'selection', 'H',
'first_char_index', 'H',
'last_char_index', 'H',
'typo_ascender', 'h',
'typo_descender', 'h',
'typo_line_gap', 'h',
'win_ascent', 'H',
'win_descent', 'H',
]
if ver > 1:
field_types += [
'code_page_range', '8s',
'x_height', 'h',
'cap_height', 'h',
'default_char', 'H',
'break_char', 'H',
'max_context', 'H',
]
self._fmt = ('>%s'%(''.join(field_types[1::2]))).encode('ascii')
self._fields = field_types[0::2]
for f, val in zip(self._fields, unpack_from(self._fmt, self.raw)):
setattr(self, f, val)
def zero_fstype(self):
prefix = calcsize(b'>HhHH')
self.raw = self.raw[:prefix] + b'\0\0' + self.raw[prefix+2:]
self.fs_type = 0
class PostTable(UnknownTable):
version_number = FixedProperty('_version')
italic_angle = FixedProperty('_italic_angle')
def read_data(self):
if hasattr(self, 'underline_position'):
return
(self._version, self._italic_angle, self.underline_position,
self.underline_thickness) = unpack_from(b'>llhh', self.raw)
| 8,301 | Python | .py | 209 | 30.205742 | 123 | 0.538137 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,239 | subset.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/subset.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import traceback
from collections import OrderedDict
from functools import partial
from operator import itemgetter
from calibre.utils.fonts.sfnt.container import Sfnt
from calibre.utils.fonts.sfnt.errors import NoGlyphs, UnsupportedFont
from calibre.utils.icu import ord_string, safe_chr
from calibre.utils.resources import get_path as P
from polyglot.builtins import iteritems, itervalues
# TrueType outlines {{{
def resolve_glyphs(loca, glyf, character_map, extra_glyphs):
unresolved_glyphs = set(itervalues(character_map)) | extra_glyphs
unresolved_glyphs.add(0) # We always want the .notdef glyph
resolved_glyphs = {}
while unresolved_glyphs:
glyph_id = unresolved_glyphs.pop()
try:
offset, length = loca.glyph_location(glyph_id)
except (IndexError, ValueError, KeyError, TypeError):
continue
glyph = glyf.glyph_data(offset, length)
resolved_glyphs[glyph_id] = glyph
for gid in glyph.glyph_indices:
if gid not in resolved_glyphs:
unresolved_glyphs.add(gid)
return OrderedDict(sorted(iteritems(resolved_glyphs), key=itemgetter(0)))
def subset_truetype(sfnt, character_map, extra_glyphs):
loca = sfnt[b'loca']
glyf = sfnt[b'glyf']
try:
head, maxp = sfnt[b'head'], sfnt[b'maxp']
except KeyError:
raise UnsupportedFont('This font does not contain head and/or maxp tables')
loca.load_offsets(head, maxp)
resolved_glyphs = resolve_glyphs(loca, glyf, character_map, extra_glyphs)
if not resolved_glyphs or set(resolved_glyphs) == {0}:
raise NoGlyphs('This font has no glyphs for the specified character '
'set, subsetting it is pointless')
# Keep only character codes that have resolved glyphs
for code, glyph_id in tuple(iteritems(character_map)):
if glyph_id not in resolved_glyphs:
del character_map[code]
# Update the glyf table
glyph_offset_map = glyf.update(resolved_glyphs)
# Update the loca table
loca.subset(glyph_offset_map)
head.index_to_loc_format = 0 if loca.fmt == 'H' else 1
head.update()
maxp.num_glyphs = len(loca.offset_map) - 1
# }}}
def subset_postscript(sfnt, character_map, extra_glyphs):
cff = sfnt[b'CFF ']
cff.decompile()
cff.subset(character_map, extra_glyphs)
def do_warn(warnings, *args):
for arg in args:
for line in arg.splitlines():
if warnings is None:
print(line)
else:
warnings.append(line)
if warnings is None:
print()
else:
warnings.append('')
def pdf_subset(sfnt, glyphs):
for tag in tuple(sfnt.tables):
if tag not in {b'hhea', b'head', b'hmtx', b'maxp',
b'OS/2', b'post', b'cvt ', b'fpgm', b'glyf', b'loca',
b'prep', b'CFF ', b'VORG'}:
# Remove non core tables since they are unused in PDF rendering
del sfnt[tag]
if b'loca' in sfnt and b'glyf' in sfnt:
# TrueType Outlines
subset_truetype(sfnt, {}, glyphs)
elif b'CFF ' in sfnt:
# PostScript Outlines
subset_postscript(sfnt, {}, glyphs)
else:
raise UnsupportedFont('This font does not contain TrueType '
'or PostScript outlines')
def safe_ord(x):
return ord_string(str(x))[0]
def subset(raw, individual_chars, ranges=(), warnings=None):
warn = partial(do_warn, warnings)
chars = set()
for ic in individual_chars:
try:
chars.add(safe_ord(ic))
except ValueError:
continue
for r in ranges:
chars |= set(range(safe_ord(r[0]), safe_ord(r[1])+1))
# Always add the space character for ease of use from the command line
if safe_ord(' ') not in chars:
chars.add(safe_ord(' '))
sfnt = Sfnt(raw)
old_sizes = sfnt.sizes()
# Remove the Digital Signature table since it is useless in a subset
# font anyway
sfnt.pop(b'DSIG', None)
# Remove non core tables as they aren't likely to be used by renderers
# anyway
core_tables = {b'cmap', b'hhea', b'head', b'hmtx', b'maxp', b'name',
b'OS/2', b'post', b'cvt ', b'fpgm', b'glyf', b'loca', b'prep',
b'CFF ', b'VORG', b'EBDT', b'EBLC', b'EBSC', b'BASE', b'GSUB',
b'GPOS', b'GDEF', b'JSTF', b'gasp', b'hdmx', b'kern', b'LTSH',
b'PCLT', b'VDMX', b'vhea', b'vmtx', b'MATH'}
for tag in list(sfnt):
if tag not in core_tables:
del sfnt[tag]
try:
cmap = sfnt[b'cmap']
except KeyError:
raise UnsupportedFont('This font has no cmap table')
# Get mapping of chars to glyph ids for all specified chars
character_map = cmap.get_character_map(chars)
extra_glyphs = set()
if b'GSUB' in sfnt:
# Parse all substitution rules to ensure that glyphs that can be
# substituted for the specified set of glyphs are not removed
gsub = sfnt[b'GSUB']
try:
gsub.decompile()
extra_glyphs = gsub.all_substitutions(itervalues(character_map))
except UnsupportedFont as e:
warn('Usupported GSUB table: %s'%e)
except Exception:
warn('Failed to decompile GSUB table:', traceback.format_exc())
if b'loca' in sfnt and b'glyf' in sfnt:
# TrueType Outlines
subset_truetype(sfnt, character_map, extra_glyphs)
elif b'CFF ' in sfnt:
# PostScript Outlines
subset_postscript(sfnt, character_map, extra_glyphs)
else:
raise UnsupportedFont('This font does not contain TrueType '
'or PostScript outlines')
# Restrict the cmap table to only contain entries for the resolved glyphs
cmap.set_character_map(character_map)
if b'kern' in sfnt:
try:
sfnt[b'kern'].restrict_to_glyphs(frozenset(itervalues(character_map)))
except UnsupportedFont as e:
warn('kern table unsupported, ignoring: %s'%e)
except Exception:
warn('Subsetting of kern table failed, ignoring:',
traceback.format_exc())
raw, new_sizes = sfnt()
return raw, old_sizes, new_sizes
# CLI {{{
def option_parser():
import textwrap
from calibre.utils.config import OptionParser
parser = OptionParser(usage=textwrap.dedent('''\
%prog [options] input_font_file output_font_file characters_to_keep
Subset the specified font, keeping only the glyphs for the characters in
characters_to_keep. characters_to_keep is a comma separated list of characters of
the form: a,b,c,A-Z,0-9,xyz
You can specify ranges in the list of characters, as shown above.
'''))
parser.add_option('-c', '--codes', default=False, action='store_true',
help='If specified, the list of characters is interpreted as '
'numeric unicode codes instead of characters. So to specify the '
'characters a,b you would use 97,98 or U+0061,U+0062')
parser.prog = 'subset-font'
return parser
def print_stats(old_stats, new_stats):
from calibre import prints
prints('========= Table comparison (original vs. subset) =========')
prints('Table', ' ', '%10s'%'Size', ' ', 'Percent', ' ', '%10s'%'New Size',
' New Percent')
prints('='*80)
old_total = sum(itervalues(old_stats))
new_total = sum(itervalues(new_stats))
tables = sorted(old_stats, key=lambda x:old_stats[x],
reverse=True)
for table in tables:
osz = old_stats[table]
op = osz/old_total * 100
nsz = new_stats.get(table, 0)
np = nsz/new_total * 100
suffix = ' | same size'
if nsz != osz:
suffix = ' | reduced to %.1f %%'%(nsz/osz * 100)
prints('%4s'%table, ' ', '%10s'%osz, ' ', '%5.1f %%'%op, ' ',
'%10s'%nsz, ' ', '%5.1f %%'%np, suffix)
prints('='*80)
def main(args):
import sys
import time
from calibre import prints
parser = option_parser()
opts, args = parser.parse_args(args)
if len(args) < 4 or len(args) > 4:
parser.print_help()
raise SystemExit(1)
iff, off, chars = args[1:]
with open(iff, 'rb') as f:
orig = f.read()
chars = [x for x in chars.split(',')]
individual, ranges = set(), set()
def not_single(c):
if len(c) > 1:
prints(c, 'is not a single character', file=sys.stderr)
raise SystemExit(1)
def conv_code(c):
if c.upper()[:2] in ('U+', '0X'):
c = int(c[2:], 16)
return safe_chr(int(c))
for c in chars:
if '-' in c:
parts = tuple(x.strip() for x in c.split('-'))
if len(parts) != 2:
prints('Invalid range:', c, file=sys.stderr)
raise SystemExit(1)
if opts.codes:
parts = tuple(map(conv_code, parts))
for i in parts:
not_single(i)
ranges.add(parts)
else:
if opts.codes:
c = conv_code(c)
not_single(c)
individual.add(c)
st = time.time()
sf, old_stats, new_stats = subset(orig, individual, ranges)
taken = time.time() - st
reduced = (len(sf)/len(orig)) * 100
def sz(x):
return '%gKB'%(len(x)/1024.)
print_stats(old_stats, new_stats)
prints('Original size:', sz(orig), 'Subset size:', sz(sf), 'Reduced to: %g%%'%(reduced))
prints('Subsetting took %g seconds'%taken)
with open(off, 'wb') as f:
f.write(sf)
prints('Subset font written to:', off)
if __name__ == '__main__':
try:
import init_calibre
init_calibre
except ImportError:
pass
import sys
main(sys.argv)
# }}}
# Tests {{{
def test_mem():
import gc
from calibre.utils.mem import memory
gc.collect()
start_mem = memory()
raw = P('fonts/liberation/LiberationSerif-Regular.ttf', data=True)
calls = 1000
for i in range(calls):
subset(raw, (), (('a', 'z'),))
del raw
for i in range(3):
gc.collect()
print('Leaked memory per call:', (memory() - start_mem)/calls*1024, 'KB')
def test():
raw = P('fonts/liberation/LiberationSerif-Regular.ttf', data=True)
sf, old_stats, new_stats = subset(raw, {'a', 'b', 'c'}, ())
if len(sf) > 0.3 * len(raw):
raise Exception('Subsetting failed')
def all():
from calibre.utils.fonts.scanner import font_scanner
failed = []
unsupported = []
warnings = {}
total = 0
averages = []
for family in font_scanner.find_font_families():
for font in font_scanner.fonts_for_family(family):
raw = font_scanner.get_font_data(font)
print('Subsetting', font['full_name'], end='\t')
total += 1
try:
w = []
sf, old_stats, new_stats = subset(raw, {'a', 'b', 'c'},
(), w)
if w:
warnings[font['full_name'] + ' (%s)'%font['path']] = w
except NoGlyphs:
print('No glyphs!')
continue
except UnsupportedFont as e:
unsupported.append((font['full_name'], font['path'], str(e)))
print('Unsupported!')
continue
except Exception as e:
print('Failed!')
failed.append((font['full_name'], font['path'], str(e)))
else:
averages.append(sum(itervalues(new_stats))/sum(itervalues(old_stats)) * 100)
print('Reduced to:', '%.1f'%averages[-1] , '%')
if unsupported:
print('\n\nUnsupported:')
for name, path, err in unsupported:
print(name, path, err)
print()
if warnings:
print('\n\nWarnings:')
for name, w in iteritems(warnings):
if w:
print(name)
print('', '\n\t'.join(w), sep='\t')
if failed:
print('\n\nFailures:')
for name, path, err in failed:
print(name, path, err)
print()
print('Average reduction to: %.1f%%'%(sum(averages)/len(averages)))
print('Total:', total, 'Unsupported:', len(unsupported), 'Failed:',
len(failed), 'Warnings:', len(warnings))
# }}}
| 12,567 | Python | .py | 321 | 30.800623 | 93 | 0.59325 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,240 | common.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/common.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from collections import OrderedDict, namedtuple
from struct import calcsize, unpack_from
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from polyglot.builtins import iteritems
class Unpackable:
def __init__(self, raw, offset):
self.raw, self.offset = raw, offset
self.start_pos = offset
def unpack(self, fmt, single_special=True):
fmt = fmt.encode('ascii') if not isinstance(fmt, bytes) else fmt
ans = unpack_from(b'>'+fmt, self.raw, self.offset)
if single_special and len(ans) == 1:
ans = ans[0]
self.offset += calcsize(fmt)
return ans
class SimpleListTable(list):
'A table that contains a list of subtables'
child_class = None
def __init__(self, raw, offset):
list.__init__(self)
data = Unpackable(raw, offset)
self.read_extra_header(data)
count = data.unpack('H')
for i in range(count):
offset = data.unpack('H')
self.append(self.child_class(raw, data.start_pos + offset))
self.read_extra_footer(data)
def read_extra_header(self, data):
pass
def read_extra_footer(self, data):
pass
class ListTable(OrderedDict):
'A table that contains an ordered mapping of table tag to subtable'
child_class = None
def __init__(self, raw, offset):
OrderedDict.__init__(self)
data = Unpackable(raw, offset)
self.read_extra_header(data)
count = data.unpack('H')
for i in range(count):
tag, coffset = data.unpack('4sH')
self[tag] = self.child_class(raw, data.start_pos + coffset)
self.read_extra_footer(data)
def read_extra_header(self, data):
pass
def read_extra_footer(self, data):
pass
def dump(self, prefix=''):
print(prefix, self.__class__.__name__, sep='')
prefix += ' '
for tag, child in iteritems(self):
print(prefix, tag, sep='')
child.dump(prefix=prefix+' ')
class IndexTable(list):
def __init__(self, raw, offset):
data = Unpackable(raw, offset)
self.read_extra_header(data)
count = data.unpack('H')
for i in range(count):
self.append(data.unpack('H'))
def read_extra_header(self, data):
pass
def dump(self, prefix=''):
print(prefix, self.__class__.__name__, sep='')
class LanguageSystemTable(IndexTable):
def read_extra_header(self, data):
self.lookup_order, self.required_feature_index = data.unpack('2H')
if self.lookup_order != 0:
raise UnsupportedFont('This LanguageSystemTable has an unknown'
' lookup order: 0x%x'%self.lookup_order)
class ScriptTable(ListTable):
child_class = LanguageSystemTable
def __init__(self, raw, offset):
ListTable.__init__(self, raw, offset)
def read_extra_header(self, data):
start_pos = data.offset
default_offset = data.unpack('H')
self[b'default'] = (LanguageSystemTable(data.raw, start_pos +
default_offset) if default_offset else None)
class ScriptListTable(ListTable):
child_class = ScriptTable
class FeatureTable(IndexTable):
def read_extra_header(self, data):
self.feature_params = data.unpack('H')
if False and self.feature_params != 0:
# Source code pro sets this to non NULL
raise UnsupportedFont(
'This FeatureTable has non NULL FeatureParams: 0x%x'%self.feature_params)
class FeatureListTable(ListTable):
child_class = FeatureTable
class LookupTable(SimpleListTable):
def read_extra_header(self, data):
self.lookup_type, self.lookup_flag = data.unpack('2H')
self.set_child_class()
def set_child_class(self):
raise NotImplementedError()
def read_extra_footer(self, data):
if self.lookup_flag & 0x0010:
self.mark_filtering_set = data.unpack('H')
def ExtensionSubstitution(raw, offset, subtable_map={}):
data = Unpackable(raw, offset)
subst_format, extension_lookup_type, offset = data.unpack('2HL')
if subst_format != 1:
raise UnsupportedFont('ExtensionSubstitution has unknown format: 0x%x'%subst_format)
return subtable_map[extension_lookup_type](raw, offset+data.start_pos)
CoverageRange = namedtuple('CoverageRange', 'start end start_coverage_index')
class Coverage:
def __init__(self, raw, offset, parent_table_name):
data = Unpackable(raw, offset)
self.format, count = data.unpack('2H')
if self.format not in {1, 2}:
raise UnsupportedFont('Unknown Coverage format: 0x%x in %s'%(
self.format, parent_table_name))
if self.format == 1:
self.glyph_ids = data.unpack('%dH'%count, single_special=False)
self.glyph_ids_map = {gid:i for i, gid in
enumerate(self.glyph_ids)}
else:
self.ranges = []
ranges = data.unpack('%dH'%(3*count), single_special=False)
for i in range(count):
start, end, start_coverage_index = ranges[i*3:(i+1)*3]
self.ranges.append(CoverageRange(start, end, start_coverage_index))
def coverage_indices(self, glyph_ids):
'''Return map of glyph_id -> coverage index. Map contains only those
glyph_ids that are covered by this table and that are present in
glyph_ids.'''
ans = OrderedDict()
for gid in glyph_ids:
if self.format == 1:
idx = self.glyph_ids_map.get(gid, None)
if idx is not None:
ans[gid] = idx
else:
for start, end, start_coverage_index in self.ranges:
if start <= gid <= end:
ans[gid] = start_coverage_index + (gid-start)
return ans
class UnknownLookupSubTable:
formats = {}
def __init__(self, raw, offset):
data = Unpackable(raw, offset)
self.format = data.unpack('H')
if self.format not in self.formats:
raise UnsupportedFont('Unknown format for Lookup Subtable %s: 0x%x'%(
self.__class__.__name__, self.format))
if self.has_initial_coverage:
coverage_offset = data.unpack('H') + data.start_pos
self.coverage = Coverage(raw, coverage_offset, self.__class__.__name__)
self.initialize(data)
@property
def has_initial_coverage(self):
return True
def all_substitutions(self, glyph_ids):
''' Return a set of all glyph ids that could be substituted for any
subset of the specified glyph ids (which must be a set)'''
raise NotImplementedError()
def read_sets(self, data, read_item=None, set_is_index=False):
count = data.unpack('H')
sets = data.unpack('%dH'%count, single_special=False)
coverage_to_items_map = []
for offset in sets:
# Read items in the set
data.offset = start_pos = offset + data.start_pos
count = data.unpack('H')
item_offsets = data.unpack('%dH'%count, single_special=False)
items = []
for offset in item_offsets:
data.offset = offset + start_pos
if set_is_index:
items.append(offset)
else:
items.append(read_item(data))
coverage_to_items_map.append(items)
return coverage_to_items_map
| 7,712 | Python | .py | 179 | 33.837989 | 92 | 0.615869 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,241 | gsub.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/gsub.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from functools import partial
from struct import unpack_from
from calibre.utils.fonts.sfnt import FixedProperty, UnknownTable
from calibre.utils.fonts.sfnt.common import ExtensionSubstitution, FeatureListTable, LookupTable, ScriptListTable, SimpleListTable, UnknownLookupSubTable
from calibre.utils.fonts.sfnt.errors import UnsupportedFont
from polyglot.builtins import iteritems, itervalues
class SingleSubstitution(UnknownLookupSubTable):
formats = {1, 2}
def initialize(self, data):
if self.format == 1:
self.delta = data.unpack('h')
else:
count = data.unpack('H')
self.substitutes = data.unpack('%dH'%count, single_special=False)
def all_substitutions(self, glyph_ids):
gid_index_map = self.coverage.coverage_indices(glyph_ids)
if self.format == 1:
return {gid + self.delta for gid in gid_index_map}
return {self.substitutes[i] for i in itervalues(gid_index_map)}
class MultipleSubstitution(UnknownLookupSubTable):
formats = {1}
def initialize(self, data):
self.coverage_to_subs_map = self.read_sets(data, set_is_index=True)
def all_substitutions(self, glyph_ids):
gid_index_map = self.coverage.coverage_indices(glyph_ids)
ans = set()
for index in itervalues(gid_index_map):
glyphs = set(self.coverage_to_subs_map[index])
ans |= glyphs
return ans
class AlternateSubstitution(MultipleSubstitution):
pass
class LigatureSubstitution(UnknownLookupSubTable):
formats = {1}
def initialize(self, data):
self.coverage_to_lig_map = self.read_sets(data, self.read_ligature)
def read_ligature(self, data):
lig_glyph, count = data.unpack('HH')
components = data.unpack('%dH'%(count-1), single_special=False)
return (lig_glyph, components)
def all_substitutions(self, glyph_ids):
gid_index_map = self.coverage.coverage_indices(glyph_ids)
ans = set()
for start_glyph_id, index in iteritems(gid_index_map):
for glyph_id, components in self.coverage_to_lig_map[index]:
components = (start_glyph_id,) + components
if set(components).issubset(glyph_ids):
ans.add(glyph_id)
return ans
class ContexttualSubstitution(UnknownLookupSubTable):
formats = {1, 2, 3}
@property
def has_initial_coverage(self):
return self.format != 3
def initialize(self, data):
pass # TODO
def all_substitutions(self, glyph_ids):
# This table only defined substitution in terms of other tables
return set()
class ChainingContextualSubstitution(UnknownLookupSubTable):
formats = {1, 2, 3}
@property
def has_initial_coverage(self):
return self.format != 3
def initialize(self, data):
pass # TODO
def all_substitutions(self, glyph_ids):
# This table only defined substitution in terms of other tables
return set()
class ReverseChainSingleSubstitution(UnknownLookupSubTable):
formats = {1}
def initialize(self, data):
backtrack_count = data.unpack('H')
backtrack_offsets = data.unpack('%dH'%backtrack_count,
single_special=False)
lookahead_count = data.unpack('H')
lookahead_offsets = data.unpack('%dH'%lookahead_count,
single_special=False)
backtrack_offsets = [data.start_pos + x for x in backtrack_offsets]
lookahead_offsets = [data.start_pos + x for x in lookahead_offsets]
backtrack_offsets, lookahead_offsets # TODO: Use these
count = data.unpack('H')
self.substitutes = data.unpack('%dH'%count)
def all_substitutions(self, glyph_ids):
gid_index_map = self.coverage.coverage_indices(glyph_ids)
return {self.substitutes[i] for i in itervalues(gid_index_map)}
subtable_map = {
1: SingleSubstitution,
2: MultipleSubstitution,
3: AlternateSubstitution,
4: LigatureSubstitution,
5: ContexttualSubstitution,
6: ChainingContextualSubstitution,
8: ReverseChainSingleSubstitution,
}
class GSUBLookupTable(LookupTable):
def set_child_class(self):
if self.lookup_type == 7:
self.child_class = partial(ExtensionSubstitution,
subtable_map=subtable_map)
else:
self.child_class = subtable_map[self.lookup_type]
class LookupListTable(SimpleListTable):
child_class = GSUBLookupTable
class GSUBTable(UnknownTable):
version = FixedProperty('_version')
def decompile(self):
(self._version, self.scriptlist_offset, self.featurelist_offset,
self.lookuplist_offset) = unpack_from(b'>L3H', self.raw)
if self._version != 0x10000:
raise UnsupportedFont('The GSUB table has unknown version: 0x%x'%
self._version)
self.script_list_table = ScriptListTable(self.raw,
self.scriptlist_offset)
# self.script_list_table.dump()
self.feature_list_table = FeatureListTable(self.raw,
self.featurelist_offset)
# self.feature_list_table.dump()
self.lookup_list_table = LookupListTable(self.raw,
self.lookuplist_offset)
def all_substitutions(self, glyph_ids):
glyph_ids = frozenset(glyph_ids)
ans = set(glyph_ids)
for lookup_table in self.lookup_list_table:
for subtable in lookup_table:
glyphs = subtable.all_substitutions(ans)
if glyphs:
ans |= glyphs
return ans - {glyph_ids}
| 5,849 | Python | .py | 133 | 35.466165 | 153 | 0.66543 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,242 | constants.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/cff/constants.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
# cff_standard_strings {{{
# The 391 Standard Strings as used in the CFF format.
# from Adobe Technical None #5176, version 1.0, 18 March 1998
cff_standard_strings = [
'.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent',
'ampersand', 'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus',
'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four',
'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal',
'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore',
'quoteleft', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft',
'bar', 'braceright', 'asciitilde', 'exclamdown', 'cent', 'sterling',
'fraction', 'yen', 'florin', 'section', 'currency', 'quotesingle',
'quotedblleft', 'guillemotleft', 'guilsinglleft', 'guilsinglright', 'fi', 'fl',
'endash', 'dagger', 'daggerdbl', 'periodcentered', 'paragraph', 'bullet',
'quotesinglbase', 'quotedblbase', 'quotedblright', 'guillemotright',
'ellipsis', 'perthousand', 'questiondown', 'grave', 'acute', 'circumflex',
'tilde', 'macron', 'breve', 'dotaccent', 'dieresis', 'ring', 'cedilla',
'hungarumlaut', 'ogonek', 'caron', 'emdash', 'AE', 'ordfeminine', 'Lslash',
'Oslash', 'OE', 'ordmasculine', 'ae', 'dotlessi', 'lslash', 'oslash', 'oe',
'germandbls', 'onesuperior', 'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf',
'plusminus', 'Thorn', 'onequarter', 'divide', 'brokenbar', 'degree', 'thorn',
'threequarters', 'twosuperior', 'registered', 'minus', 'eth', 'multiply',
'threesuperior', 'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave',
'Aring', 'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave',
'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute',
'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute',
'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron',
'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla',
'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex',
'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis',
'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave',
'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall',
'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall',
'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader',
'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle',
'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle',
'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior',
'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior',
'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior',
'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior',
'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall',
'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall',
'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall',
'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall',
'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall',
'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall',
'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall',
'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall',
'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths',
'seveneighths', 'onethird', 'twothirds', 'zerosuperior', 'foursuperior',
'fivesuperior', 'sixsuperior', 'sevensuperior', 'eightsuperior',
'ninesuperior', 'zeroinferior', 'oneinferior', 'twoinferior', 'threeinferior',
'fourinferior', 'fiveinferior', 'sixinferior', 'seveninferior',
'eightinferior', 'nineinferior', 'centinferior', 'dollarinferior',
'periodinferior', 'commainferior', 'Agravesmall', 'Aacutesmall',
'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', 'AEsmall',
'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall',
'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall',
'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall',
'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall',
'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall',
'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002',
'001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman',
'Semibold'
]
# }}}
STANDARD_CHARSETS = [ # {{{
# ISOAdobe
(".notdef", "space", "exclam", "quotedbl", "numbersign", "dollar",
"percent", "ampersand", "quoteright", "parenleft", "parenright",
"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero",
"one", "two", "three", "four", "five", "six", "seven", "eight", "nine",
"colon", "semicolon", "less", "equal", "greater", "question", "at",
"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N",
"O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
"bracketleft", "backslash", "bracketright", "asciicircum",
"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i",
"j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "braceleft", "bar", "braceright", "asciitilde",
"exclamdown", "cent", "sterling", "fraction", "yen", "florin",
"section", "currency", "quotesingle", "quotedblleft", "guillemotleft",
"guilsinglleft", "guilsinglright", "fi", "fl", "endash", "dagger",
"daggerdbl", "periodcentered", "paragraph", "bullet", "quotesinglbase",
"quotedblbase", "quotedblright", "guillemotright", "ellipsis",
"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde",
"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla",
"hungarumlaut", "ogonek", "caron", "emdash", "AE", "ordfeminine",
"Lslash", "Oslash", "OE", "ordmasculine", "ae", "dotlessi", "lslash",
"oslash", "oe", "germandbls", "onesuperior", "logicalnot", "mu",
"trademark", "Eth", "onehalf", "plusminus", "Thorn", "onequarter",
"divide", "brokenbar", "degree", "thorn", "threequarters",
"twosuperior", "registered", "minus", "eth", "multiply",
"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis",
"Agrave", "Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex",
"Edieresis", "Egrave", "Iacute", "Icircumflex", "Idieresis", "Igrave",
"Ntilde", "Oacute", "Ocircumflex", "Odieresis", "Ograve", "Otilde",
"Scaron", "Uacute", "Ucircumflex", "Udieresis", "Ugrave", "Yacute",
"Ydieresis", "Zcaron", "aacute", "acircumflex", "adieresis", "agrave",
"aring", "atilde", "ccedilla", "eacute", "ecircumflex", "edieresis",
"egrave", "iacute", "icircumflex", "idieresis", "igrave", "ntilde",
"oacute", "ocircumflex", "odieresis", "ograve", "otilde", "scaron",
"uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis",
"zcaron"),
# Expert
("notdef", "space", "exclamsmall", "Hungarumlautsmall", "dollaroldstyle",
"dollarsuperior", "ampersandsmall", "Acutesmall", "parenleftsuperior",
"parenrightsuperior", "twodotenleader", "onedotenleader", "comma",
"hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle",
"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle",
"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle",
"colon", "semicolon", "commasuperior", "threequartersemdash",
"periodsuperior", "questionsmall", "asuperior", "bsuperior",
"centsuperior", "dsuperior", "esuperior", "isuperior", "lsuperior",
"msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
"parenrightinferior", "Circumflexsmall", "hyphensuperior",
"Gravesmall", "Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall",
"Fsmall", "Gsmall", "Hsmall", "Ismall", "Jsmall", "Ksmall", "Lsmall",
"Msmall", "Nsmall", "Osmall", "Psmall", "Qsmall", "Rsmall", "Ssmall",
"Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall", "Ysmall", "Zsmall",
"colonmonetary", "onefitted", "rupiah", "Tildesmall",
"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall",
"Zcaronsmall", "Dieresissmall", "Brevesmall", "Caronsmall",
"Dotaccentsmall", "Macronsmall", "figuredash", "hypheninferior",
"Ogoneksmall", "Ringsmall", "Cedillasmall", "onequarter", "onehalf",
"threequarters", "questiondownsmall", "oneeighth", "threeeighths",
"fiveeighths", "seveneighths", "onethird", "twothirds", "zerosuperior",
"onesuperior", "twosuperior", "threesuperior", "foursuperior",
"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior",
"ninesuperior", "zeroinferior", "oneinferior", "twoinferior",
"threeinferior", "fourinferior", "fiveinferior", "sixinferior",
"seveninferior", "eightinferior", "nineinferior", "centinferior",
"dollarinferior", "periodinferior", "commainferior", "Agravesmall",
"Aacutesmall", "Acircumflexsmall", "Atildesmall", "Adieresissmall",
"Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall", "Eacutesmall",
"Ecircumflexsmall", "Edieresissmall", "Igravesmall", "Iacutesmall",
"Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall",
"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall",
"Uacutesmall", "Ucircumflexsmall", "Udieresissmall", "Yacutesmall",
"Thornsmall", "Ydieresissmall"),
# Expert Subset
(".notdef", "space", "dollaroldstyle", "dollarsuperior",
"parenleftsuperior", "parenrightsuperior", "twodotenleader",
"onedotenleader", "comma", "hyphen", "period", "fraction",
"zerooldstyle", "oneoldstyle", "twooldstyle", "threeoldstyle",
"fouroldstyle", "fiveoldstyle", "sixoldstyle", "sevenoldstyle",
"eightoldstyle", "nineoldstyle", "colon", "semicolon",
"commasuperior", "threequartersemdash", "periodsuperior",
"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior",
"isuperior", "lsuperior", "msuperior", "nsuperior", "osuperior",
"rsuperior", "ssuperior", "tsuperior", "ff", "fi", "fl", "ffi",
"ffl", "parenleftinferior", "parenrightinferior", "hyphensuperior",
"colonmonetary", "onefitted", "rupiah", "centoldstyle",
"figuredash", "hypheninferior", "onequarter", "onehalf",
"threequarters", "oneeighth", "threeeighths", "fiveeighths",
"seveneighths", "onethird", "twothirds", "zerosuperior",
"onesuperior", "twosuperior", "threesuperior", "foursuperior",
"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior",
"ninesuperior", "zeroinferior", "oneinferior", "twoinferior",
"threeinferior", "fourinferior", "fiveinferior", "sixinferior",
"seveninferior", "eightinferior", "nineinferior", "centinferior",
"dollarinferior", "periodinferior", "commainferior"),
] # }}}
| 11,432 | Python | .py | 171 | 63.222222 | 79 | 0.652653 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,243 | writer.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/cff/writer.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from collections import OrderedDict
from struct import pack
from calibre.utils.fonts.sfnt.cff.constants import cff_standard_strings
class Index(list):
def __init__(self):
list.__init__(self)
self.raw = None
def calcsize(self, largest_offset):
if largest_offset < 0x100:
return 1
elif largest_offset < 0x10000:
return 2
elif largest_offset < 0x1000000:
return 3
return 4
def compile(self):
if len(self) == 0:
self.raw = pack(b'>H', 0)
else:
offsets = [1]
for i, obj in enumerate(self):
offsets.append(offsets[-1] + len(obj))
offsize = self.calcsize(offsets[-1])
obj_data = b''.join(self)
prefix = pack(b'>HB', len(self), offsize)
if offsize == 3:
offsets = b''.join(pack(b'>L', x)[1:] for x in offsets)
else:
fmt = {1:'B', 2:'H', 4:'L'}[offsize]
offsets = pack(('>%d%s'%(len(offsets), fmt)).encode('ascii'),
*offsets)
self.raw = prefix + offsets + obj_data
return self.raw
class Strings(Index):
def __init__(self):
Index.__init__(self)
self.added = {x:i for i, x in enumerate(cff_standard_strings)}
def __call__(self, x):
ans = self.added.get(x, None)
if ans is None:
ans = len(self) + len(cff_standard_strings)
self.added[x] = ans
self.append(x)
return ans
class Dict(Index):
def __init__(self, src, strings):
Index.__init__(self)
self.src, self.strings = src, strings
def compile(self):
self[:] = [self.src.compile(self.strings)]
Index.compile(self)
class PrivateDict:
def __init__(self, src, subrs, strings):
self.src, self.strings = src, strings
self.subrs = None
if subrs is not None:
self.subrs = Index()
self.subrs.extend(subrs)
self.subrs.compile()
def compile(self):
raw = self.src.compile(self.strings)
if self.subrs is not None:
self.src['Subrs'] = len(raw)
raw = self.src.compile(self.strings)
self.raw = raw
return raw
class Charsets(list):
def __init__(self, strings):
list.__init__(self)
self.strings = strings
def compile(self):
ans = pack(b'>B', 0)
sids = [self.strings(x) for x in self]
ans += pack(('>%dH'%len(self)).encode('ascii'), *sids)
self.raw = ans
return ans
class Subset:
def __init__(self, cff, keep_charnames):
self.cff = cff
keep_charnames.add(b'.notdef')
header = pack(b'>4B', 1, 0, 4, cff.offset_size)
# Font names Index
font_names = Index()
font_names.extend(self.cff.font_names)
# Strings Index
strings = Strings()
# CharStrings Index and charsets
char_strings = Index()
self.charname_map = OrderedDict()
charsets = Charsets(strings)
charsets.extend(cff.charset[1:]) # .notdef is not included
endchar_operator = bytes(bytearray([14]))
for i in range(self.cff.num_glyphs):
cname = self.cff.charset.safe_lookup(i)
ok = cname in keep_charnames
cs = self.cff.char_strings[i] if ok else endchar_operator
char_strings.append(cs)
if ok:
self.charname_map[cname] = i
# Add the strings
char_strings.compile()
charsets.compile()
# Global subroutines
global_subrs = Index()
global_subrs.extend(cff.global_subrs)
global_subrs.compile()
# TOP DICT
top_dict = Dict(cff.top_dict, strings)
top_dict.compile() # Add strings
private_dict = None
if cff.private_dict is not None:
private_dict = PrivateDict(cff.private_dict, cff.private_subrs,
strings)
private_dict.compile() # Add strings
fixed_prefix = header + font_names.compile()
t = top_dict.src
# Put in dummy offsets
t['charset'] = 1
t['CharStrings'] = 1
if private_dict is not None:
t['Private'] = (len(private_dict.raw), 1)
top_dict.compile()
strings.compile()
# Calculate real offsets
pos = len(fixed_prefix)
pos += len(top_dict.raw)
pos += len(strings.raw)
pos += len(global_subrs.raw)
t['charset'] = pos
pos += len(charsets.raw)
t['CharStrings'] = pos
pos += len(char_strings.raw)
if private_dict is not None:
t['Private'] = (len(private_dict.raw), pos)
top_dict.compile()
self.raw = (fixed_prefix + top_dict.raw + strings.raw +
global_subrs.raw + charsets.raw + char_strings.raw)
if private_dict is not None:
self.raw += private_dict.raw
if private_dict.subrs is not None:
self.raw += private_dict.subrs.raw
| 5,296 | Python | .py | 145 | 26.834483 | 77 | 0.557862 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,244 | __init__.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/cff/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 152 | Python | .py | 4 | 35.75 | 61 | 0.678322 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,245 | table.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/cff/table.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from functools import partial
from struct import calcsize, unpack, unpack_from
from calibre.utils.fonts.sfnt import UnknownTable
from calibre.utils.fonts.sfnt.cff.constants import STANDARD_CHARSETS, cff_standard_strings
from calibre.utils.fonts.sfnt.cff.dict_data import PrivateDict, TopDict
from calibre.utils.fonts.sfnt.errors import NoGlyphs, UnsupportedFont
from polyglot.builtins import iteritems, itervalues
# Useful links
# http://www.adobe.com/content/dam/Adobe/en/devnet/font/pdfs/5176.CFF.pdf
# http://www.adobe.com/content/dam/Adobe/en/devnet/font/pdfs/5177.Type2.pdf
class CFF:
def __init__(self, raw):
(self.major_version, self.minor_version, self.header_size,
self.offset_size) = unpack_from(b'>4B', raw)
if (self.major_version, self.minor_version) != (1, 0):
raise UnsupportedFont('The CFF table has unknown version: '
'(%d, %d)'%(self.major_version, self.minor_version))
offset = self.header_size
# Read Names Index
self.font_names = Index(raw, offset)
offset = self.font_names.pos
if len(self.font_names) > 1:
raise UnsupportedFont('CFF table has more than one font.')
# Read Top Dict
self.top_index = Index(raw, offset)
self.top_dict = TopDict()
offset = self.top_index.pos
# Read strings
self.strings = Strings(raw, offset)
offset = self.strings.pos
# Read global subroutines
self.global_subrs = Subrs(raw, offset)
offset = self.global_subrs.pos
# Decompile Top Dict
self.top_dict.decompile(self.strings, self.global_subrs, self.top_index[0])
self.is_CID = 'ROS' in self.top_dict
if self.is_CID:
raise UnsupportedFont('Subsetting of CID keyed fonts is not supported')
# Read CharStrings (Glyph definitions)
try:
offset = self.top_dict['CharStrings']
except KeyError:
raise ValueError('This font has no CharStrings')
cs_type = self.top_dict.safe_get('CharstringType')
if cs_type != 2:
raise UnsupportedFont('This font has unsupported CharstringType: '
'%s'%cs_type)
self.char_strings = CharStringsIndex(raw, offset)
self.num_glyphs = len(self.char_strings)
# Read Private Dict
self.private_dict = self.private_subrs = None
pd = self.top_dict.safe_get('Private')
if pd:
size, offset = pd
self.private_dict = PrivateDict()
self.private_dict.decompile(self.strings, self.global_subrs,
raw[offset:offset+size])
if 'Subrs' in self.private_dict:
self.private_subrs = Subrs(raw, offset +
self.private_dict['Subrs'])
# Read charset (Glyph names)
self.charset = Charset(raw, self.top_dict.safe_get('charset'),
self.strings, self.num_glyphs, self.is_CID)
# import pprint
# pprint.pprint(self.top_dict)
# pprint.pprint(self.private_dict)
class Index(list):
def __init__(self, raw, offset, prepend=()):
list.__init__(self)
self.extend(prepend)
count = unpack_from(b'>H', raw, offset)[0]
offset += 2
self.pos = offset
if count > 0:
self.offset_size = unpack_from(b'>B', raw, offset)[0]
offset += 1
if self.offset_size == 3:
offsets = [unpack(b'>L', b'\0' + raw[i:i+3])[0]
for i in range(offset, offset+3*(count+1), 3)]
else:
fmt = {1:'B', 2:'H', 4:'L'}[self.offset_size]
fmt = ('>%d%s'%(count+1, fmt)).encode('ascii')
offsets = unpack_from(fmt, raw, offset)
offset += self.offset_size * (count+1) - 1
for i in range(len(offsets)-1):
off, noff = offsets[i:i+2]
obj = raw[offset+off:offset+noff]
self.append(obj)
try:
self.pos = offset + offsets[-1]
except IndexError:
self.pos = offset
class Strings(Index):
def __init__(self, raw, offset):
super().__init__(raw, offset, prepend=[x.encode('ascii')
for x in cff_standard_strings])
class Charset(list):
def __init__(self, raw, offset, strings, num_glyphs, is_CID):
super().__init__()
self.standard_charset = offset if offset in {0, 1, 2} else None
if is_CID and self.standard_charset is not None:
raise ValueError("CID font must not use a standard charset")
if self.standard_charset is None:
self.append(b'.notdef')
fmt = unpack_from(b'>B', raw, offset)[0]
offset += 1
f = {0:self.parse_fmt0, 1:self.parse_fmt1,
2:partial(self.parse_fmt1, is_two_byte=True)}.get(fmt, None)
if f is None:
raise UnsupportedFont('This font uses unsupported charset '
'table format: %d'%fmt)
f(raw, offset, strings, num_glyphs, is_CID)
def parse_fmt0(self, raw, offset, strings, num_glyphs, is_CID):
fmt = ('>%dH'%(num_glyphs-1)).encode('ascii')
ids = unpack_from(fmt, raw, offset)
if is_CID:
ids = ('cid%05d'%x for x in ids)
else:
ids = (strings[x] for x in ids)
self.extend(ids)
def parse_fmt1(self, raw, offset, strings, num_glyphs, is_CID,
is_two_byte=False):
fmt = b'>2H' if is_two_byte else b'>HB'
sz = calcsize(fmt)
count = 1
while count < num_glyphs:
first, nleft = unpack_from(fmt, raw, offset)
offset += sz
count += nleft + 1
self.extend('cid%05d'%x if is_CID else strings[x] for x in
range(first, first + nleft+1))
def lookup(self, glyph_id):
if self.standard_charset is None:
return self[glyph_id]
return STANDARD_CHARSETS[self.standard_charset][glyph_id].encode('ascii')
def safe_lookup(self, glyph_id):
try:
return self.lookup(glyph_id)
except (KeyError, IndexError, ValueError):
return None
class Subrs(Index):
pass
class CharStringsIndex(Index):
pass
class CFFTable(UnknownTable):
def decompile(self):
self.cff = CFF(self.raw)
def subset(self, character_map, extra_glyphs):
from calibre.utils.fonts.sfnt.cff.writer import Subset
# Map codes from the cmap table to glyph names, this will be used to
# reconstruct character_map for the subset font
charset_map = {code:self.cff.charset.safe_lookup(glyph_id) for code,
glyph_id in iteritems(character_map)}
charset = set(itervalues(charset_map))
charset.discard(None)
if not charset and character_map:
raise NoGlyphs('This font has no glyphs for the specified characters')
charset |= {
self.cff.charset.safe_lookup(glyph_id) for glyph_id in extra_glyphs}
charset.discard(None)
s = Subset(self.cff, charset)
# Rebuild character_map with the glyph ids from the subset font
character_map.clear()
for code, charname in iteritems(charset_map):
glyph_id = s.charname_map.get(charname, None)
if glyph_id:
character_map[code] = glyph_id
# Check that raw is parseable
CFF(s.raw)
self.raw = s.raw
| 7,736 | Python | .py | 174 | 34.212644 | 90 | 0.596115 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,246 | dict_data.py | kovidgoyal_calibre/src/calibre/utils/fonts/sfnt/cff/dict_data.py | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
from struct import pack, unpack_from
t1_operand_encoding = [None] * 256
t1_operand_encoding[0:32] = (32) * ["do_operator"]
t1_operand_encoding[32:247] = (247 - 32) * ["read_byte"]
t1_operand_encoding[247:251] = (251 - 247) * ["read_small_int1"]
t1_operand_encoding[251:255] = (255 - 251) * ["read_small_int2"]
t1_operand_encoding[255] = "read_long_int"
t2_operand_encoding = t1_operand_encoding[:]
t2_operand_encoding[28] = "read_short_int"
t2_operand_encoding[255] = "read_fixed_1616"
cff_dict_operand_encoding = t2_operand_encoding[:]
cff_dict_operand_encoding[29] = "read_long_int"
cff_dict_operand_encoding[30] = "read_real_number"
cff_dict_operand_encoding[255] = "reserved"
real_nibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'.', 'E', 'E-', None, '-']
real_nibbles_map = {x:i for i, x in enumerate(real_nibbles)}
class ByteCode(dict):
def read_byte(self, b0, data, index):
return b0 - 139, index
def read_small_int1(self, b0, data, index):
b1 = ord(data[index:index+1])
return (b0-247)*256 + b1 + 108, index+1
def read_small_int2(self, b0, data, index):
b1 = ord(data[index:index+1])
return -(b0-251)*256 - b1 - 108, index+1
def read_short_int(self, b0, data, index):
value, = unpack_from(b">h", data, index)
return value, index+2
def read_long_int(self, b0, data, index):
value, = unpack_from(b">l", data, index)
return value, index+4
def read_fixed_1616(self, b0, data, index):
value, = unpack_from(b">l", data, index)
return value / 65536.0, index+4
def read_real_number(self, b0, data, index):
number = ''
while True:
b = ord(data[index:index+1])
index = index + 1
nibble0 = (b & 0xf0) >> 4
nibble1 = b & 0x0f
if nibble0 == 0xf:
break
number = number + real_nibbles[nibble0]
if nibble1 == 0xf:
break
number = number + real_nibbles[nibble1]
return float(number), index
def write_float(self, f, encoding='ignored'):
s = str(f).upper()
if s[:2] == "0.":
s = s[1:]
elif s[:3] == "-0.":
s = "-" + s[2:]
nibbles = []
while s:
c = s[0]
s = s[1:]
if c == "E" and s[:1] == "-":
s = s[1:]
c = "E-"
nibbles.append(real_nibbles_map[c])
nibbles.append(0xf)
if len(nibbles) % 2:
nibbles.append(0xf)
d = bytearray([30])
for i in range(0, len(nibbles), 2):
d.append(nibbles[i] << 4 | nibbles[i+1])
return bytes(d)
def write_int(self, value, encoding="cff"):
four_byte_op = {'cff':29, 't1':255}.get(encoding, None)
if -107 <= value <= 107:
code = bytes(bytearray([value + 139]))
elif 108 <= value <= 1131:
value = value - 108
code = bytes(bytearray([(value >> 8) + 247, (value & 0xFF)]))
elif -1131 <= value <= -108:
value = -value - 108
code = bytes(bytearray([(value >> 8) + 251, (value & 0xFF)]))
elif four_byte_op is None:
# T2 only supports 2 byte ints
code = bytes(bytearray([28])) + pack(b">h", value)
else:
code = bytes(bytearray([four_byte_op])) + pack(b">l", value)
return code
def write_offset(self, value):
return bytes(bytearray([29])) + pack(b">l", value)
def write_number(self, value, encoding="cff"):
f = self.write_float if isinstance(value, float) else self.write_int
return f(value, encoding)
class Dict(ByteCode):
operand_encoding = cff_dict_operand_encoding
TABLE = ()
FILTERED = frozenset()
OFFSETS = frozenset()
def __init__(self):
ByteCode.__init__(self)
self.operators = {op:(name, arg) for op, name, arg, default in
self.TABLE}
self.defaults = {name:default for op, name, arg, default in self.TABLE}
def safe_get(self, name):
return self.get(name, self.defaults[name])
def decompile(self, strings, global_subrs, data):
self.strings = strings
self.global_subrs = global_subrs
self.stack = []
index = 0
while index < len(data):
b0 = ord(data[index:index+1])
index += 1
handler = getattr(self, self.operand_encoding[b0])
value, index = handler(b0, data, index)
if value is not None:
self.stack.append(value)
def do_operator(self, b0, data, index):
if b0 == 12:
op = (b0, ord(data[index:index+1]))
index += 1
else:
op = b0
operator, arg_type = self.operators[op]
self.handle_operator(operator, arg_type)
return None, index
def handle_operator(self, operator, arg_type):
if isinstance(arg_type, tuple):
value = ()
for i in range(len(arg_type)-1, -1, -1):
arg = arg_type[i]
arghandler = getattr(self, 'arg_' + arg)
value = (arghandler(operator),) + value
else:
arghandler = getattr(self, 'arg_' + arg_type)
value = arghandler(operator)
self[operator] = value
def arg_number(self, name):
return self.stack.pop()
def arg_SID(self, name):
return self.strings[self.stack.pop()]
def arg_array(self, name):
ans = self.stack[:]
del self.stack[:]
return ans
def arg_delta(self, name):
out = []
current = 0
for v in self.stack:
current = current + v
out.append(current)
del self.stack[:]
return out
def compile(self, strings):
data = []
for op, name, arg, default in self.TABLE:
if name in self.FILTERED:
continue
val = self.safe_get(name)
opcode = bytes(bytearray(op if isinstance(op, tuple) else [op]))
if val != self.defaults[name]:
self.encoding_offset = name in self.OFFSETS
if isinstance(arg, tuple):
if len(val) != len(arg):
raise ValueError('Invalid argument %s for operator: %s'
%(val, op))
for typ, v in zip(arg, val):
if typ == 'SID':
val = strings(val)
data.append(getattr(self, 'encode_'+typ)(v))
else:
if arg == 'SID':
val = strings(val)
data.append(getattr(self, 'encode_'+arg)(val))
data.append(opcode)
self.raw = b''.join(data)
return self.raw
def encode_number(self, val):
if self.encoding_offset:
return self.write_offset(val)
return self.write_number(val)
def encode_SID(self, val):
return self.write_int(val)
def encode_array(self, val):
return b''.join(map(self.encode_number, val))
def encode_delta(self, value):
out = []
last = 0
for v in value:
out.append(v - last)
last = v
return self.encode_array(out)
class TopDict(Dict):
TABLE = (
# opcode name argument type default
((12, 30), 'ROS', ('SID','SID','number'), None,),
((12, 20), 'SyntheticBase', 'number', None,),
(0, 'version', 'SID', None,),
(1, 'Notice', 'SID', None,),
((12, 0), 'Copyright', 'SID', None,),
(2, 'FullName', 'SID', None,),
((12, 38), 'FontName', 'SID', None,),
(3, 'FamilyName', 'SID', None,),
(4, 'Weight', 'SID', None,),
((12, 1), 'isFixedPitch', 'number', 0,),
((12, 2), 'ItalicAngle', 'number', 0,),
((12, 3), 'UnderlinePosition', 'number', None,),
((12, 4), 'UnderlineThickness', 'number', 50,),
((12, 5), 'PaintType', 'number', 0,),
((12, 6), 'CharstringType', 'number', 2,),
((12, 7), 'FontMatrix', 'array', [0.001,0,0,0.001,0,0],),
(13, 'UniqueID', 'number', None,),
(5, 'FontBBox', 'array', [0,0,0,0],),
((12, 8), 'StrokeWidth', 'number', 0,),
(14, 'XUID', 'array', None,),
((12, 21), 'PostScript', 'SID', None,),
((12, 22), 'BaseFontName', 'SID', None,),
((12, 23), 'BaseFontBlend', 'delta', None,),
((12, 31), 'CIDFontVersion', 'number', 0,),
((12, 32), 'CIDFontRevision', 'number', 0,),
((12, 33), 'CIDFontType', 'number', 0,),
((12, 34), 'CIDCount', 'number', 8720,),
(15, 'charset', 'number', 0,),
((12, 35), 'UIDBase', 'number', None,),
(16, 'Encoding', 'number', 0,),
(18, 'Private', ('number','number'), None,),
((12, 37), 'FDSelect', 'number', None,),
((12, 36), 'FDArray', 'number', None,),
(17, 'CharStrings', 'number', None,),
)
# We will not write these operators out
FILTERED = {'ROS', 'SyntheticBase', 'UniqueID', 'XUID',
'CIDFontVersion', 'CIDFontRevision', 'CIDFontType', 'CIDCount',
'UIDBase', 'Encoding', 'FDSelect', 'FDArray'}
OFFSETS = {'charset', 'Encoding', 'CharStrings', 'Private'}
class PrivateDict(Dict):
TABLE = (
# opcode name argument type default
(6, 'BlueValues', 'delta', None,),
(7, 'OtherBlues', 'delta', None,),
(8, 'FamilyBlues', 'delta', None,),
(9, 'FamilyOtherBlues', 'delta', None,),
((12, 9), 'BlueScale', 'number', 0.039625,),
((12, 10), 'BlueShift', 'number', 7,),
((12, 11), 'BlueFuzz', 'number', 1,),
(10, 'StdHW', 'number', None,),
(11, 'StdVW', 'number', None,),
((12, 12), 'StemSnapH', 'delta', None,),
((12, 13), 'StemSnapV', 'delta', None,),
((12, 14), 'ForceBold', 'number', 0,),
((12, 15), 'ForceBoldThreshold', 'number', None,), # deprecated
((12, 16), 'lenIV', 'number', None,), # deprecated
((12, 17), 'LanguageGroup', 'number', 0,),
((12, 18), 'ExpansionFactor', 'number', 0.06,),
((12, 19), 'initialRandomSeed', 'number', 0,),
(20, 'defaultWidthX', 'number', 0,),
(21, 'nominalWidthX', 'number', 0,),
(19, 'Subrs', 'number', None,),
)
OFFSETS = {'Subrs'}
| 11,436 | Python | .py | 263 | 34.745247 | 79 | 0.490023 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,247 | wintest.py | kovidgoyal_calibre/src/calibre/utils/windows/wintest.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
import os
import unittest
class TestWinutil(unittest.TestCase):
def setUp(self):
from calibre_extensions import winutil
self.winutil = winutil
def tearDown(self):
del self.winutil
def test_add_to_recent_docs(self):
path = str(os.path.abspath(__file__))
self.winutil.add_to_recent_docs(path, None)
self.winutil.add_to_recent_docs(path, 'some-app-uid')
def test_file_association(self):
q = self.winutil.file_association('.txt')
self.assertIn('notepad.exe', q.lower())
self.assertNotIn('\0', q)
q = self.winutil.friendly_name(None, 'notepad.exe')
self.assertEqual('Notepad', q)
def test_special_folder_path(self):
self.assertEqual(os.path.expanduser('~'), self.winutil.special_folder_path(self.winutil.CSIDL_PROFILE))
def test_associations_changed(self):
self.assertIsNone(self.winutil.notify_associations_changed())
def find_tests():
return unittest.defaultTestLoader.loadTestsFromTestCase(TestWinutil)
| 1,140 | Python | .py | 26 | 37.307692 | 111 | 0.69873 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,248 | __init__.py | kovidgoyal_calibre/src/calibre/utils/spell/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
| 112 | Python | .py | 3 | 34.666667 | 61 | 0.673077 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,249 | hyphenate.py | kovidgoyal_calibre/src/calibre/ebooks/hyphenate.py | """ Hyphenation, using Frank Liang's algorithm.
This module provides a single function to hyphenate words. hyphenate_word takes
a string (the word), and returns a list of parts that can be separated by hyphens.
>>> hyphenate_word("hyphenation")
['hy', 'phen', 'ation']
>>> hyphenate_word("supercalifragilisticexpialidocious")
['su', 'per', 'cal', 'ifrag', 'ilis', 'tic', 'ex', 'pi', 'ali', 'do', 'cious']
>>> hyphenate_word("project")
['project']
Ned Batchelder, July 2007.
This Python code is in the public domain.
"""
import re
__version__ = '1.0.20070709'
class Hyphenator:
def __init__(self, patterns, exceptions=''):
self.tree = {}
for pattern in patterns.split():
self._insert_pattern(pattern)
self.exceptions = {}
for ex in exceptions.split():
# Convert the hyphenated pattern into a point array for use later.
self.exceptions[ex.replace('-', '')] = [0] + [int(h == '-') for h in re.split(r"[a-z]", ex)]
def _insert_pattern(self, pattern):
# Convert a pattern like 'a1bc3d4' into a string of chars 'abcd'
# and a list of points [ 1, 0, 3, 4 ].
chars = re.sub('[0-9]', '', pattern)
points = [int(d or 0) for d in re.split("[.a-z]", pattern)]
# Insert the pattern into the tree. Each character finds a dict
# another level down in the tree, and leaf nodes have the list of
# points.
t = self.tree
for c in chars:
if c not in t:
t[c] = {}
t = t[c]
t[None] = points
def hyphenate_word(self, word):
""" Given a word, returns a list of pieces, broken at the possible
hyphenation points.
"""
# Short words aren't hyphenated.
if len(word) <= 4:
return [word]
# If the word is an exception, get the stored points.
if word.lower() in self.exceptions:
points = self.exceptions[word.lower()]
else:
work = '.' + word.lower() + '.'
points = [0] * (len(work)+1)
for i in range(len(work)):
t = self.tree
for c in work[i:]:
if c in t:
t = t[c]
if None in t:
p = t[None]
for j in range(len(p)):
points[i+j] = max(points[i+j], p[j])
else:
break
# No hyphens in the first two chars or the last two.
points[1] = points[2] = points[-2] = points[-3] = 0
# Examine the points to build the pieces list.
pieces = ['']
for c, p in zip(word, points[2:]):
pieces[-1] += c
if p % 2:
pieces.append('')
return pieces
patterns = (
# Knuth and Liang's original hyphenation patterns from classic TeX.
# In the public domain.
"""
.ach4 .ad4der .af1t .al3t .am5at .an5c .ang4 .ani5m .ant4 .an3te .anti5s .ar5s
.ar4tie .ar4ty .as3c .as1p .as1s .aster5 .atom5 .au1d .av4i .awn4 .ba4g .ba5na
.bas4e .ber4 .be5ra .be3sm .be5sto .bri2 .but4ti .cam4pe .can5c .capa5b .car5ol
.ca4t .ce4la .ch4 .chill5i .ci2 .cit5r .co3e .co4r .cor5ner .de4moi .de3o .de3ra
.de3ri .des4c .dictio5 .do4t .du4c .dumb5 .earth5 .eas3i .eb4 .eer4 .eg2 .el5d
.el3em .enam3 .en3g .en3s .eq5ui5t .er4ri .es3 .eu3 .eye5 .fes3 .for5mer .ga2
.ge2 .gen3t4 .ge5og .gi5a .gi4b .go4r .hand5i .han5k .he2 .hero5i .hes3 .het3
.hi3b .hi3er .hon5ey .hon3o .hov5 .id4l .idol3 .im3m .im5pin .in1 .in3ci .ine2
.in2k .in3s .ir5r .is4i .ju3r .la4cy .la4m .lat5er .lath5 .le2 .leg5e .len4
.lep5 .lev1 .li4g .lig5a .li2n .li3o .li4t .mag5a5 .mal5o .man5a .mar5ti .me2
.mer3c .me5ter .mis1 .mist5i .mon3e .mo3ro .mu5ta .muta5b .ni4c .od2 .odd5
.of5te .or5ato .or3c .or1d .or3t .os3 .os4tl .oth3 .out3 .ped5al .pe5te .pe5tit
.pi4e .pio5n .pi2t .pre3m .ra4c .ran4t .ratio5na .ree2 .re5mit .res2 .re5stat
.ri4g .rit5u .ro4q .ros5t .row5d .ru4d .sci3e .self5 .sell5 .se2n .se5rie .sh2
.si2 .sing4 .st4 .sta5bl .sy2 .ta4 .te4 .ten5an .th2 .ti2 .til4 .tim5o5 .ting4
.tin5k .ton4a .to4p .top5i .tou5s .trib5ut .un1a .un3ce .under5 .un1e .un5k
.un5o .un3u .up3 .ure3 .us5a .ven4de .ve5ra .wil5i .ye4 4ab. a5bal a5ban abe2
ab5erd abi5a ab5it5ab ab5lat ab5o5liz 4abr ab5rog ab3ul a4car ac5ard ac5aro
a5ceou ac1er a5chet 4a2ci a3cie ac1in a3cio ac5rob act5if ac3ul ac4um a2d ad4din
ad5er. 2adi a3dia ad3ica adi4er a3dio a3dit a5diu ad4le ad3ow ad5ran ad4su 4adu
a3duc ad5um ae4r aeri4e a2f aff4 a4gab aga4n ag5ell age4o 4ageu ag1i 4ag4l ag1n
a2go 3agog ag3oni a5guer ag5ul a4gy a3ha a3he ah4l a3ho ai2 a5ia a3ic. ai5ly
a4i4n ain5in ain5o ait5en a1j ak1en al5ab al3ad a4lar 4aldi 2ale al3end a4lenti
a5le5o al1i al4ia. ali4e al5lev 4allic 4alm a5log. a4ly. 4alys 5a5lyst 5alyt
3alyz 4ama am5ab am3ag ama5ra am5asc a4matis a4m5ato am5era am3ic am5if am5ily
am1in ami4no a2mo a5mon amor5i amp5en a2n an3age 3analy a3nar an3arc anar4i
a3nati 4and ande4s an3dis an1dl an4dow a5nee a3nen an5est. a3neu 2ang ang5ie
an1gl a4n1ic a3nies an3i3f an4ime a5nimi a5nine an3io a3nip an3ish an3it a3niu
an4kli 5anniz ano4 an5ot anoth5 an2sa an4sco an4sn an2sp ans3po an4st an4sur
antal4 an4tie 4anto an2tr an4tw an3ua an3ul a5nur 4ao apar4 ap5at ap5ero a3pher
4aphi a4pilla ap5illar ap3in ap3ita a3pitu a2pl apoc5 ap5ola apor5i apos3t
aps5es a3pu aque5 2a2r ar3act a5rade ar5adis ar3al a5ramete aran4g ara3p ar4at
a5ratio ar5ativ a5rau ar5av4 araw4 arbal4 ar4chan ar5dine ar4dr ar5eas a3ree
ar3ent a5ress ar4fi ar4fl ar1i ar5ial ar3ian a3riet ar4im ar5inat ar3io ar2iz
ar2mi ar5o5d a5roni a3roo ar2p ar3q arre4 ar4sa ar2sh 4as. as4ab as3ant ashi4
a5sia. a3sib a3sic 5a5si4t ask3i as4l a4soc as5ph as4sh as3ten as1tr asur5a a2ta
at3abl at5ac at3alo at5ap ate5c at5ech at3ego at3en. at3era ater5n a5terna
at3est at5ev 4ath ath5em a5then at4ho ath5om 4ati. a5tia at5i5b at1ic at3if
ation5ar at3itu a4tog a2tom at5omiz a4top a4tos a1tr at5rop at4sk at4tag at5te
at4th a2tu at5ua at5ue at3ul at3ura a2ty au4b augh3 au3gu au4l2 aun5d au3r
au5sib aut5en au1th a2va av3ag a5van ave4no av3era av5ern av5ery av1i avi4er
av3ig av5oc a1vor 3away aw3i aw4ly aws4 ax4ic ax4id ay5al aye4 ays4 azi4er azz5i
5ba. bad5ger ba4ge bal1a ban5dag ban4e ban3i barbi5 bari4a bas4si 1bat ba4z 2b1b
b2be b3ber bbi4na 4b1d 4be. beak4 beat3 4be2d be3da be3de be3di be3gi be5gu 1bel
be1li be3lo 4be5m be5nig be5nu 4bes4 be3sp be5str 3bet bet5iz be5tr be3tw be3w
be5yo 2bf 4b3h bi2b bi4d 3bie bi5en bi4er 2b3if 1bil bi3liz bina5r4 bin4d bi5net
bi3ogr bi5ou bi2t 3bi3tio bi3tr 3bit5ua b5itz b1j bk4 b2l2 blath5 b4le. blen4
5blesp b3lis b4lo blun4t 4b1m 4b3n bne5g 3bod bod3i bo4e bol3ic bom4bi bon4a
bon5at 3boo 5bor. 4b1ora bor5d 5bore 5bori 5bos4 b5ota both5 bo4to bound3 4bp
4brit broth3 2b5s2 bsor4 2bt bt4l b4to b3tr buf4fer bu4ga bu3li bumi4 bu4n
bunt4i bu3re bus5ie buss4e 5bust 4buta 3butio b5uto b1v 4b5w 5by. bys4 1ca
cab3in ca1bl cach4 ca5den 4cag4 2c5ah ca3lat cal4la call5in 4calo can5d can4e
can4ic can5is can3iz can4ty cany4 ca5per car5om cast5er cas5tig 4casy ca4th
4cativ cav5al c3c ccha5 cci4a ccompa5 ccon4 ccou3t 2ce. 4ced. 4ceden 3cei 5cel.
3cell 1cen 3cenc 2cen4e 4ceni 3cent 3cep ce5ram 4cesa 3cessi ces5si5b ces5t cet4
c5e4ta cew4 2ch 4ch. 4ch3ab 5chanic ch5a5nis che2 cheap3 4ched che5lo 3chemi
ch5ene ch3er. ch3ers 4ch1in 5chine. ch5iness 5chini 5chio 3chit chi2z 3cho2
ch4ti 1ci 3cia ci2a5b cia5r ci5c 4cier 5cific. 4cii ci4la 3cili 2cim 2cin c4ina
3cinat cin3em c1ing c5ing. 5cino cion4 4cipe ci3ph 4cipic 4cista 4cisti 2c1it
cit3iz 5ciz ck1 ck3i 1c4l4 4clar c5laratio 5clare cle4m 4clic clim4 cly4 c5n 1co
co5ag coe2 2cog co4gr coi4 co3inc col5i 5colo col3or com5er con4a c4one con3g
con5t co3pa cop3ic co4pl 4corb coro3n cos4e cov1 cove4 cow5a coz5e co5zi c1q
cras5t 5crat. 5cratic cre3at 5cred 4c3reta cre4v cri2 cri5f c4rin cris4 5criti
cro4pl crop5o cros4e cru4d 4c3s2 2c1t cta4b ct5ang c5tant c2te c3ter c4ticu
ctim3i ctu4r c4tw cud5 c4uf c4ui cu5ity 5culi cul4tis 3cultu cu2ma c3ume cu4mi
3cun cu3pi cu5py cur5a4b cu5ria 1cus cuss4i 3c4ut cu4tie 4c5utiv 4cutr 1cy cze4
1d2a 5da. 2d3a4b dach4 4daf 2dag da2m2 dan3g dard5 dark5 4dary 3dat 4dativ 4dato
5dav4 dav5e 5day d1b d5c d1d4 2de. deaf5 deb5it de4bon decan4 de4cil de5com
2d1ed 4dee. de5if deli4e del5i5q de5lo d4em 5dem. 3demic dem5ic. de5mil de4mons
demor5 1den de4nar de3no denti5f de3nu de1p de3pa depi4 de2pu d3eq d4erh 5derm
dern5iz der5s des2 d2es. de1sc de2s5o des3ti de3str de4su de1t de2to de1v dev3il
4dey 4d1f d4ga d3ge4t dg1i d2gy d1h2 5di. 1d4i3a dia5b di4cam d4ice 3dict 3did
5di3en d1if di3ge di4lato d1in 1dina 3dine. 5dini di5niz 1dio dio5g di4pl dir2
di1re dirt5i dis1 5disi d4is3t d2iti 1di1v d1j d5k2 4d5la 3dle. 3dled 3dles.
4dless 2d3lo 4d5lu 2dly d1m 4d1n4 1do 3do. do5de 5doe 2d5of d4og do4la doli4
do5lor dom5iz do3nat doni4 doo3d dop4p d4or 3dos 4d5out do4v 3dox d1p 1dr
drag5on 4drai dre4 drea5r 5dren dri4b dril4 dro4p 4drow 5drupli 4dry 2d1s2 ds4p
d4sw d4sy d2th 1du d1u1a du2c d1uca duc5er 4duct. 4ducts du5el du4g d3ule dum4be
du4n 4dup du4pe d1v d1w d2y 5dyn dy4se dys5p e1a4b e3act ead1 ead5ie ea4ge
ea5ger ea4l eal5er eal3ou eam3er e5and ear3a ear4c ear5es ear4ic ear4il ear5k
ear2t eart3e ea5sp e3ass east3 ea2t eat5en eath3i e5atif e4a3tu ea2v eav3en
eav5i eav5o 2e1b e4bel. e4bels e4ben e4bit e3br e4cad ecan5c ecca5 e1ce ec5essa
ec2i e4cib ec5ificat ec5ifie ec5ify ec3im eci4t e5cite e4clam e4clus e2col
e4comm e4compe e4conc e2cor ec3ora eco5ro e1cr e4crem ec4tan ec4te e1cu e4cul
ec3ula 2e2da 4ed3d e4d1er ede4s 4edi e3dia ed3ib ed3ica ed3im ed1it edi5z 4edo
e4dol edon2 e4dri e4dul ed5ulo ee2c eed3i ee2f eel3i ee4ly ee2m ee4na ee4p1
ee2s4 eest4 ee4ty e5ex e1f e4f3ere 1eff e4fic 5efici efil4 e3fine ef5i5nite
3efit efor5es e4fuse. 4egal eger4 eg5ib eg4ic eg5ing e5git5 eg5n e4go. e4gos
eg1ul e5gur 5egy e1h4 eher4 ei2 e5ic ei5d eig2 ei5gl e3imb e3inf e1ing e5inst
eir4d eit3e ei3th e5ity e1j e4jud ej5udi eki4n ek4la e1la e4la. e4lac elan4d
el5ativ e4law elaxa4 e3lea el5ebra 5elec e4led el3ega e5len e4l1er e1les el2f
el2i e3libe e4l5ic. el3ica e3lier el5igib e5lim e4l3ing e3lio e2lis el5ish
e3liv3 4ella el4lab ello4 e5loc el5og el3op. el2sh el4ta e5lud el5ug e4mac e4mag
e5man em5ana em5b e1me e2mel e4met em3ica emi4e em5igra em1in2 em5ine em3i3ni
e4mis em5ish e5miss em3iz 5emniz emo4g emoni5o em3pi e4mul em5ula emu3n e3my
en5amo e4nant ench4er en3dic e5nea e5nee en3em en5ero en5esi en5est en3etr e3new
en5ics e5nie e5nil e3nio en3ish en3it e5niu 5eniz 4enn 4eno eno4g e4nos en3ov
en4sw ent5age 4enthes en3ua en5uf e3ny. 4en3z e5of eo2g e4oi4 e3ol eop3ar e1or
eo3re eo5rol eos4 e4ot eo4to e5out e5ow e2pa e3pai ep5anc e5pel e3pent ep5etitio
ephe4 e4pli e1po e4prec ep5reca e4pred ep3reh e3pro e4prob ep4sh ep5ti5b e4put
ep5uta e1q equi3l e4q3ui3s er1a era4b 4erand er3ar 4erati. 2erb er4bl er3ch
er4che 2ere. e3real ere5co ere3in er5el. er3emo er5ena er5ence 4erene er3ent
ere4q er5ess er3est eret4 er1h er1i e1ria4 5erick e3rien eri4er er3ine e1rio
4erit er4iu eri4v e4riva er3m4 er4nis 4ernit 5erniz er3no 2ero er5ob e5roc ero4r
er1ou er1s er3set ert3er 4ertl er3tw 4eru eru4t 5erwau e1s4a e4sage. e4sages
es2c e2sca es5can e3scr es5cu e1s2e e2sec es5ecr es5enc e4sert. e4serts e4serva
4esh e3sha esh5en e1si e2sic e2sid es5iden es5igna e2s5im es4i4n esis4te esi4u
e5skin es4mi e2sol es3olu e2son es5ona e1sp es3per es5pira es4pre 2ess es4si4b
estan4 es3tig es5tim 4es2to e3ston 2estr e5stro estruc5 e2sur es5urr es4w eta4b
eten4d e3teo ethod3 et1ic e5tide etin4 eti4no e5tir e5titio et5itiv 4etn et5ona
e3tra e3tre et3ric et5rif et3rog et5ros et3ua et5ym et5z 4eu e5un e3up eu3ro
eus4 eute4 euti5l eu5tr eva2p5 e2vas ev5ast e5vea ev3ell evel3o e5veng even4i
ev1er e5verb e1vi ev3id evi4l e4vin evi4v e5voc e5vu e1wa e4wag e5wee e3wh ewil5
ew3ing e3wit 1exp 5eyc 5eye. eys4 1fa fa3bl fab3r fa4ce 4fag fain4 fall5e 4fa4ma
fam5is 5far far5th fa3ta fa3the 4fato fault5 4f5b 4fd 4fe. feas4 feath3 fe4b
4feca 5fect 2fed fe3li fe4mo fen2d fend5e fer1 5ferr fev4 4f1f f4fes f4fie
f5fin. f2f5is f4fly f2fy 4fh 1fi fi3a 2f3ic. 4f3ical f3ican 4ficate f3icen
fi3cer fic4i 5ficia 5ficie 4fics fi3cu fi5del fight5 fil5i fill5in 4fily 2fin
5fina fin2d5 fi2ne f1in3g fin4n fis4ti f4l2 f5less flin4 flo3re f2ly5 4fm 4fn
1fo 5fon fon4de fon4t fo2r fo5rat for5ay fore5t for4i fort5a fos5 4f5p fra4t
f5rea fres5c fri2 fril4 frol5 2f3s 2ft f4to f2ty 3fu fu5el 4fug fu4min fu5ne
fu3ri fusi4 fus4s 4futa 1fy 1ga gaf4 5gal. 3gali ga3lo 2gam ga5met g5amo gan5is
ga3niz gani5za 4gano gar5n4 gass4 gath3 4gativ 4gaz g3b gd4 2ge. 2ged geez4
gel4in ge5lis ge5liz 4gely 1gen ge4nat ge5niz 4geno 4geny 1geo ge3om g4ery 5gesi
geth5 4geto ge4ty ge4v 4g1g2 g2ge g3ger gglu5 ggo4 gh3in gh5out gh4to 5gi. 1gi4a
gia5r g1ic 5gicia g4ico gien5 5gies. gil4 g3imen 3g4in. gin5ge 5g4ins 5gio 3gir
gir4l g3isl gi4u 5giv 3giz gl2 gla4 glad5i 5glas 1gle gli4b g3lig 3glo glo3r g1m
g4my gn4a g4na. gnet4t g1ni g2nin g4nio g1no g4non 1go 3go. gob5 5goe 3g4o4g
go3is gon2 4g3o3na gondo5 go3ni 5goo go5riz gor5ou 5gos. gov1 g3p 1gr 4grada
g4rai gran2 5graph. g5rapher 5graphic 4graphy 4gray gre4n 4gress. 4grit g4ro
gruf4 gs2 g5ste gth3 gu4a 3guard 2gue 5gui5t 3gun 3gus 4gu4t g3w 1gy 2g5y3n
gy5ra h3ab4l hach4 hae4m hae4t h5agu ha3la hala3m ha4m han4ci han4cy 5hand.
han4g hang5er hang5o h5a5niz han4k han4te hap3l hap5t ha3ran ha5ras har2d hard3e
har4le harp5en har5ter has5s haun4 5haz haz3a h1b 1head 3hear he4can h5ecat h4ed
he5do5 he3l4i hel4lis hel4ly h5elo hem4p he2n hena4 hen5at heo5r hep5 h4era
hera3p her4ba here5a h3ern h5erou h3ery h1es he2s5p he4t het4ed heu4 h1f h1h
hi5an hi4co high5 h4il2 himer4 h4ina hion4e hi4p hir4l hi3ro hir4p hir4r his3el
his4s hith5er hi2v 4hk 4h1l4 hlan4 h2lo hlo3ri 4h1m hmet4 2h1n h5odiz h5ods ho4g
hoge4 hol5ar 3hol4e ho4ma home3 hon4a ho5ny 3hood hoon4 hor5at ho5ris hort3e
ho5ru hos4e ho5sen hos1p 1hous house3 hov5el 4h5p 4hr4 hree5 hro5niz hro3po
4h1s2 h4sh h4tar ht1en ht5es h4ty hu4g hu4min hun5ke hun4t hus3t4 hu4t h1w
h4wart hy3pe hy3ph hy2s 2i1a i2al iam4 iam5ete i2an 4ianc ian3i 4ian4t ia5pe
iass4 i4ativ ia4tric i4atu ibe4 ib3era ib5ert ib5ia ib3in ib5it. ib5ite i1bl
ib3li i5bo i1br i2b5ri i5bun 4icam 5icap 4icar i4car. i4cara icas5 i4cay iccu4
4iceo 4ich 2ici i5cid ic5ina i2cip ic3ipa i4cly i2c5oc 4i1cr 5icra i4cry ic4te
ictu2 ic4t3ua ic3ula ic4um ic5uo i3cur 2id i4dai id5anc id5d ide3al ide4s i2di
id5ian idi4ar i5die id3io idi5ou id1it id5iu i3dle i4dom id3ow i4dr i2du id5uo
2ie4 ied4e 5ie5ga ield3 ien5a4 ien4e i5enn i3enti i1er. i3esc i1est i3et 4if.
if5ero iff5en if4fr 4ific. i3fie i3fl 4ift 2ig iga5b ig3era ight3i 4igi i3gib
ig3il ig3in ig3it i4g4l i2go ig3or ig5ot i5gre igu5i ig1ur i3h 4i5i4 i3j 4ik
i1la il3a4b i4lade i2l5am ila5ra i3leg il1er ilev4 il5f il1i il3ia il2ib il3io
il4ist 2ilit il2iz ill5ab 4iln il3oq il4ty il5ur il3v i4mag im3age ima5ry
imenta5r 4imet im1i im5ida imi5le i5mini 4imit im4ni i3mon i2mu im3ula 2in.
i4n3au 4inav incel4 in3cer 4ind in5dling 2ine i3nee iner4ar i5ness 4inga 4inge
in5gen 4ingi in5gling 4ingo 4ingu 2ini i5ni. i4nia in3io in1is i5nite. 5initio
in3ity 4ink 4inl 2inn 2i1no i4no4c ino4s i4not 2ins in3se insur5a 2int. 2in4th
in1u i5nus 4iny 2io 4io. ioge4 io2gr i1ol io4m ion3at ion4ery ion3i io5ph ior3i
i4os io5th i5oti io4to i4our 2ip ipe4 iphras4 ip3i ip4ic ip4re4 ip3ul i3qua
iq5uef iq3uid iq3ui3t 4ir i1ra ira4b i4rac ird5e ire4de i4ref i4rel4 i4res ir5gi
ir1i iri5de ir4is iri3tu 5i5r2iz ir4min iro4g 5iron. ir5ul 2is. is5ag is3ar
isas5 2is1c is3ch 4ise is3er 3isf is5han is3hon ish5op is3ib isi4d i5sis is5itiv
4is4k islan4 4isms i2so iso5mer is1p is2pi is4py 4is1s is4sal issen4 is4ses
is4ta. is1te is1ti ist4ly 4istral i2su is5us 4ita. ita4bi i4tag 4ita5m i3tan
i3tat 2ite it3era i5teri it4es 2ith i1ti 4itia 4i2tic it3ica 5i5tick it3ig
it5ill i2tim 2itio 4itis i4tism i2t5o5m 4iton i4tram it5ry 4itt it3uat i5tud
it3ul 4itz. i1u 2iv iv3ell iv3en. i4v3er. i4vers. iv5il. iv5io iv1it i5vore
iv3o3ro i4v3ot 4i5w ix4o 4iy 4izar izi4 5izont 5ja jac4q ja4p 1je jer5s 4jestie
4jesty jew3 jo4p 5judg 3ka. k3ab k5ag kais4 kal4 k1b k2ed 1kee ke4g ke5li k3en4d
k1er kes4 k3est. ke4ty k3f kh4 k1i 5ki. 5k2ic k4ill kilo5 k4im k4in. kin4de
k5iness kin4g ki4p kis4 k5ish kk4 k1l 4kley 4kly k1m k5nes 1k2no ko5r kosh4 k3ou
kro5n 4k1s2 k4sc ks4l k4sy k5t k1w lab3ic l4abo laci4 l4ade la3dy lag4n lam3o
3land lan4dl lan5et lan4te lar4g lar3i las4e la5tan 4lateli 4lativ 4lav la4v4a
2l1b lbin4 4l1c2 lce4 l3ci 2ld l2de ld4ere ld4eri ldi4 ld5is l3dr l4dri le2a
le4bi left5 5leg. 5legg le4mat lem5atic 4len. 3lenc 5lene. 1lent le3ph le4pr
lera5b ler4e 3lerg 3l4eri l4ero les2 le5sco 5lesq 3less 5less. l3eva lev4er.
lev4era lev4ers 3ley 4leye 2lf l5fr 4l1g4 l5ga lgar3 l4ges lgo3 2l3h li4ag li2am
liar5iz li4as li4ato li5bi 5licio li4cor 4lics 4lict. l4icu l3icy l3ida lid5er
3lidi lif3er l4iff li4fl 5ligate 3ligh li4gra 3lik 4l4i4l lim4bl lim3i li4mo
l4im4p l4ina 1l4ine lin3ea lin3i link5er li5og 4l4iq lis4p l1it l2it. 5litica
l5i5tics liv3er l1iz 4lj lka3 l3kal lka4t l1l l4law l2le l5lea l3lec l3leg l3lel
l3le4n l3le4t ll2i l2lin4 l5lina ll4o lloqui5 ll5out l5low 2lm l5met lm3ing
l4mod lmon4 2l1n2 3lo. lob5al lo4ci 4lof 3logic l5ogo 3logu lom3er 5long lon4i
l3o3niz lood5 5lope. lop3i l3opm lora4 lo4rato lo5rie lor5ou 5los. los5et
5losophiz 5losophy los4t lo4ta loun5d 2lout 4lov 2lp lpa5b l3pha l5phi lp5ing
l3pit l4pl l5pr 4l1r 2l1s2 l4sc l2se l4sie 4lt lt5ag ltane5 l1te lten4 ltera4
lth3i l5ties. ltis4 l1tr ltu2 ltur3a lu5a lu3br luch4 lu3ci lu3en luf4 lu5id
lu4ma 5lumi l5umn. 5lumnia lu3o luo3r 4lup luss4 lus3te 1lut l5ven l5vet4 2l1w
1ly 4lya 4lyb ly5me ly3no 2lys4 l5yse 1ma 2mab ma2ca ma5chine ma4cl mag5in 5magn
2mah maid5 4mald ma3lig ma5lin mal4li mal4ty 5mania man5is man3iz 4map ma5rine.
ma5riz mar4ly mar3v ma5sce mas4e mas1t 5mate math3 ma3tis 4matiza 4m1b mba4t5
m5bil m4b3ing mbi4v 4m5c 4me. 2med 4med. 5media me3die m5e5dy me2g mel5on mel4t
me2m mem1o3 1men men4a men5ac men4de 4mene men4i mens4 mensu5 3ment men4te me5on
m5ersa 2mes 3mesti me4ta met3al me1te me5thi m4etr 5metric me5trie me3try me4v
4m1f 2mh 5mi. mi3a mid4a mid4g mig4 3milia m5i5lie m4ill min4a 3mind m5inee
m4ingl min5gli m5ingly min4t m4inu miot4 m2is mis4er. mis5l mis4ti m5istry 4mith
m2iz 4mk 4m1l m1m mma5ry 4m1n mn4a m4nin mn4o 1mo 4mocr 5mocratiz mo2d1 mo4go
mois2 moi5se 4mok mo5lest mo3me mon5et mon5ge moni3a mon4ism mon4ist mo3niz
monol4 mo3ny. mo2r 4mora. mos2 mo5sey mo3sp moth3 m5ouf 3mous mo2v 4m1p mpara5
mpa5rab mpar5i m3pet mphas4 m2pi mpi4a mp5ies m4p1in m5pir mp5is mpo3ri mpos5ite
m4pous mpov5 mp4tr m2py 4m3r 4m1s2 m4sh m5si 4mt 1mu mula5r4 5mult multi3 3mum
mun2 4mup mu4u 4mw 1na 2n1a2b n4abu 4nac. na4ca n5act nag5er. nak4 na4li na5lia
4nalt na5mit n2an nanci4 nan4it nank4 nar3c 4nare nar3i nar4l n5arm n4as nas4c
nas5ti n2at na3tal nato5miz n2au nau3se 3naut nav4e 4n1b4 ncar5 n4ces. n3cha
n5cheo n5chil n3chis nc1in nc4it ncour5a n1cr n1cu n4dai n5dan n1de nd5est.
ndi4b n5d2if n1dit n3diz n5duc ndu4r nd2we 2ne. n3ear ne2b neb3u ne2c 5neck 2ned
ne4gat neg5ativ 5nege ne4la nel5iz ne5mi ne4mo 1nen 4nene 3neo ne4po ne2q n1er
nera5b n4erar n2ere n4er5i ner4r 1nes 2nes. 4nesp 2nest 4nesw 3netic ne4v n5eve
ne4w n3f n4gab n3gel nge4n4e n5gere n3geri ng5ha n3gib ng1in n5git n4gla ngov4
ng5sh n1gu n4gum n2gy 4n1h4 nha4 nhab3 nhe4 3n4ia ni3an ni4ap ni3ba ni4bl ni4d
ni5di ni4er ni2fi ni5ficat n5igr nik4 n1im ni3miz n1in 5nine. nin4g ni4o 5nis.
nis4ta n2it n4ith 3nitio n3itor ni3tr n1j 4nk2 n5kero n3ket nk3in n1kl 4n1l n5m
nme4 nmet4 4n1n2 nne4 nni3al nni4v nob4l no3ble n5ocl 4n3o2d 3noe 4nog noge4
nois5i no5l4i 5nologis 3nomic n5o5miz no4mo no3my no4n non4ag non5i n5oniz 4nop
5nop5o5li nor5ab no4rary 4nosc nos4e nos5t no5ta 1nou 3noun nov3el3 nowl3 n1p4
npi4 npre4c n1q n1r nru4 2n1s2 ns5ab nsati4 ns4c n2se n4s3es nsid1 nsig4 n2sl
ns3m n4soc ns4pe n5spi nsta5bl n1t nta4b nter3s nt2i n5tib nti4er nti2f n3tine
n4t3ing nti4p ntrol5li nt4s ntu3me nu1a nu4d nu5en nuf4fe n3uin 3nu3it n4um
nu1me n5umi 3nu4n n3uo nu3tr n1v2 n1w4 nym4 nyp4 4nz n3za 4oa oad3 o5a5les oard3
oas4e oast5e oat5i ob3a3b o5bar obe4l o1bi o2bin ob5ing o3br ob3ul o1ce och4
o3chet ocif3 o4cil o4clam o4cod oc3rac oc5ratiz ocre3 5ocrit octor5a oc3ula
o5cure od5ded od3ic odi3o o2do4 odor3 od5uct. od5ucts o4el o5eng o3er oe4ta o3ev
o2fi of5ite ofit4t o2g5a5r og5ativ o4gato o1ge o5gene o5geo o4ger o3gie 1o1gis
og3it o4gl o5g2ly 3ogniz o4gro ogu5i 1ogy 2ogyn o1h2 ohab5 oi2 oic3es oi3der
oiff4 oig4 oi5let o3ing oint5er o5ism oi5son oist5en oi3ter o5j 2ok o3ken ok5ie
o1la o4lan olass4 ol2d old1e ol3er o3lesc o3let ol4fi ol2i o3lia o3lice ol5id.
o3li4f o5lil ol3ing o5lio o5lis. ol3ish o5lite o5litio o5liv olli4e ol5ogiz
olo4r ol5pl ol2t ol3ub ol3ume ol3un o5lus ol2v o2ly om5ah oma5l om5atiz om2be
om4bl o2me om3ena om5erse o4met om5etry o3mia om3ic. om3ica o5mid om1in o5mini
5ommend omo4ge o4mon om3pi ompro5 o2n on1a on4ac o3nan on1c 3oncil 2ond on5do
o3nen on5est on4gu on1ic o3nio on1is o5niu on3key on4odi on3omy on3s onspi4
onspir5a onsu4 onten4 on3t4i ontif5 on5um onva5 oo2 ood5e ood5i oo4k oop3i o3ord
oost5 o2pa ope5d op1er 3opera 4operag 2oph o5phan o5pher op3ing o3pit o5pon
o4posi o1pr op1u opy5 o1q o1ra o5ra. o4r3ag or5aliz or5ange ore5a o5real or3ei
ore5sh or5est. orew4 or4gu 4o5ria or3ica o5ril or1in o1rio or3ity o3riu or2mi
orn2e o5rof or3oug or5pe 3orrh or4se ors5en orst4 or3thi or3thy or4ty o5rum o1ry
os3al os2c os4ce o3scop 4oscopi o5scr os4i4e os5itiv os3ito os3ity osi4u os4l
o2so os4pa os4po os2ta o5stati os5til os5tit o4tan otele4g ot3er. ot5ers o4tes
4oth oth5esi oth3i4 ot3ic. ot5ica o3tice o3tif o3tis oto5s ou2 ou3bl ouch5i
ou5et ou4l ounc5er oun2d ou5v ov4en over4ne over3s ov4ert o3vis oviti4 o5v4ol
ow3der ow3el ow5est ow1i own5i o4wo oy1a 1pa pa4ca pa4ce pac4t p4ad 5pagan
p3agat p4ai pain4 p4al pan4a pan3el pan4ty pa3ny pa1p pa4pu para5bl par5age
par5di 3pare par5el p4a4ri par4is pa2te pa5ter 5pathic pa5thy pa4tric pav4 3pay
4p1b pd4 4pe. 3pe4a pear4l pe2c 2p2ed 3pede 3pedi pedia4 ped4ic p4ee pee4d pek4
pe4la peli4e pe4nan p4enc pen4th pe5on p4era. pera5bl p4erag p4eri peri5st
per4mal perme5 p4ern per3o per3ti pe5ru per1v pe2t pe5ten pe5tiz 4pf 4pg 4ph.
phar5i phe3no ph4er ph4es. ph1ic 5phie ph5ing 5phisti 3phiz ph2l 3phob 3phone
5phoni pho4r 4phs ph3t 5phu 1phy pi3a pian4 pi4cie pi4cy p4id p5ida pi3de 5pidi
3piec pi3en pi4grap pi3lo pi2n p4in. pind4 p4ino 3pi1o pion4 p3ith pi5tha pi2tu
2p3k2 1p2l2 3plan plas5t pli3a pli5er 4plig pli4n ploi4 plu4m plum4b 4p1m 2p3n
po4c 5pod. po5em po3et5 5po4g poin2 5point poly5t po4ni po4p 1p4or po4ry 1pos
pos1s p4ot po4ta 5poun 4p1p ppa5ra p2pe p4ped p5pel p3pen p3per p3pet ppo5site
pr2 pray4e 5preci pre5co pre3em pref5ac pre4la pre3r p3rese 3press pre5ten pre3v
5pri4e prin4t3 pri4s pris3o p3roca prof5it pro3l pros3e pro1t 2p1s2 p2se ps4h
p4sib 2p1t pt5a4b p2te p2th pti3m ptu4r p4tw pub3 pue4 puf4 pul3c pu4m pu2n
pur4r 5pus pu2t 5pute put3er pu3tr put4ted put4tin p3w qu2 qua5v 2que. 3quer
3quet 2rab ra3bi rach4e r5acl raf5fi raf4t r2ai ra4lo ram3et r2ami rane5o ran4ge
r4ani ra5no rap3er 3raphy rar5c rare4 rar5ef 4raril r2as ration4 rau4t ra5vai
rav3el ra5zie r1b r4bab r4bag rbi2 rbi4f r2bin r5bine rb5ing. rb4o r1c r2ce
rcen4 r3cha rch4er r4ci4b rc4it rcum3 r4dal rd2i rdi4a rdi4er rdin4 rd3ing 2re.
re1al re3an re5arr 5reav re4aw r5ebrat rec5oll rec5ompe re4cre 2r2ed re1de
re3dis red5it re4fac re2fe re5fer. re3fi re4fy reg3is re5it re1li re5lu r4en4ta
ren4te re1o re5pin re4posi re1pu r1er4 r4eri rero4 re5ru r4es. re4spi ress5ib
res2t re5stal re3str re4ter re4ti4z re3tri reu2 re5uti rev2 re4val rev3el
r5ev5er. re5vers re5vert re5vil rev5olu re4wh r1f rfu4 r4fy rg2 rg3er r3get
r3gic rgi4n rg3ing r5gis r5git r1gl rgo4n r3gu rh4 4rh. 4rhal ri3a ria4b ri4ag
r4ib rib3a ric5as r4ice 4rici 5ricid ri4cie r4ico rid5er ri3enc ri3ent ri1er
ri5et rig5an 5rigi ril3iz 5riman rim5i 3rimo rim4pe r2ina 5rina. rin4d rin4e
rin4g ri1o 5riph riph5e ri2pl rip5lic r4iq r2is r4is. ris4c r3ish ris4p ri3ta3b
r5ited. rit5er. rit5ers rit3ic ri2tu rit5ur riv5el riv3et riv3i r3j r3ket rk4le
rk4lin r1l rle4 r2led r4lig r4lis rl5ish r3lo4 r1m rma5c r2me r3men rm5ers
rm3ing r4ming. r4mio r3mit r4my r4nar r3nel r4ner r5net r3ney r5nic r1nis4 r3nit
r3niv rno4 r4nou r3nu rob3l r2oc ro3cr ro4e ro1fe ro5fil rok2 ro5ker 5role.
rom5ete rom4i rom4p ron4al ron4e ro5n4is ron4ta 1room 5root ro3pel rop3ic ror3i
ro5ro ros5per ros4s ro4the ro4ty ro4va rov5el rox5 r1p r4pea r5pent rp5er. r3pet
rp4h4 rp3ing r3po r1r4 rre4c rre4f r4reo rre4st rri4o rri4v rron4 rros4 rrys4
4rs2 r1sa rsa5ti rs4c r2se r3sec rse4cr rs5er. rs3es rse5v2 r1sh r5sha r1si
r4si4b rson3 r1sp r5sw rtach4 r4tag r3teb rten4d rte5o r1ti rt5ib rti4d r4tier
r3tig rtil3i rtil4l r4tily r4tist r4tiv r3tri rtroph4 rt4sh ru3a ru3e4l ru3en
ru4gl ru3in rum3pl ru2n runk5 run4ty r5usc ruti5n rv4e rvel4i r3ven rv5er.
r5vest r3vey r3vic rvi4v r3vo r1w ry4c 5rynge ry3t sa2 2s1ab 5sack sac3ri s3act
5sai salar4 sal4m sa5lo sal4t 3sanc san4de s1ap sa5ta 5sa3tio sat3u sau4 sa5vor
5saw 4s5b scan4t5 sca4p scav5 s4ced 4scei s4ces sch2 s4cho 3s4cie 5scin4d scle5
s4cli scof4 4scopy scour5a s1cu 4s5d 4se. se4a seas4 sea5w se2c3o 3sect 4s4ed
se4d4e s5edl se2g seg3r 5sei se1le 5self 5selv 4seme se4mol sen5at 4senc sen4d
s5ened sen5g s5enin 4sentd 4sentl sep3a3 4s1er. s4erl ser4o 4servo s1e4s se5sh
ses5t 5se5um 5sev sev3en sew4i 5sex 4s3f 2s3g s2h 2sh. sh1er 5shev sh1in sh3io
3ship shiv5 sho4 sh5old shon3 shor4 short5 4shw si1b s5icc 3side. 5sides 5sidi
si5diz 4signa sil4e 4sily 2s1in s2ina 5sine. s3ing 1sio 5sion sion5a si2r sir5a
1sis 3sitio 5siu 1siv 5siz sk2 4ske s3ket sk5ine sk5ing s1l2 s3lat s2le slith5
2s1m s3ma small3 sman3 smel4 s5men 5smith smol5d4 s1n4 1so so4ce soft3 so4lab
sol3d2 so3lic 5solv 3som 3s4on. sona4 son4g s4op 5sophic s5ophiz s5ophy sor5c
sor5d 4sov so5vi 2spa 5spai spa4n spen4d 2s5peo 2sper s2phe 3spher spho5 spil4
sp5ing 4spio s4ply s4pon spor4 4spot squal4l s1r 2ss s1sa ssas3 s2s5c s3sel
s5seng s4ses. s5set s1si s4sie ssi4er ss5ily s4sl ss4li s4sn sspend4 ss2t ssur5a
ss5w 2nd. s2tag s2tal stam4i 5stand s4ta4p 5stat. s4ted stern5i s5tero ste2w
stew5a s3the st2i s4ti. s5tia s1tic 5stick s4tie s3tif st3ing 5stir s1tle 5stock
stom3a 5stone s4top 3store st4r s4trad 5stratu s4tray s4trid 4stry 4st3w s2ty
1su su1al su4b3 su2g3 su5is suit3 s4ul su2m sum3i su2n su2r 4sv sw2 4swo s4y
4syc 3syl syn5o sy5rin 1ta 3ta. 2tab ta5bles 5taboliz 4taci ta5do 4taf4 tai5lo
ta2l ta5la tal5en tal3i 4talk tal4lis ta5log ta5mo tan4de tanta3 ta5per ta5pl
tar4a 4tarc 4tare ta3riz tas4e ta5sy 4tatic ta4tur taun4 tav4 2taw tax4is 2t1b
4tc t4ch tch5et 4t1d 4te. tead4i 4teat tece4 5tect 2t1ed te5di 1tee teg4 te5ger
te5gi 3tel. teli4 5tels te2ma2 tem3at 3tenan 3tenc 3tend 4tenes 1tent ten4tag
1teo te4p te5pe ter3c 5ter3d 1teri ter5ies ter3is teri5za 5ternit ter5v 4tes.
4tess t3ess. teth5e 3teu 3tex 4tey 2t1f 4t1g 2th. than4 th2e 4thea th3eas the5at
the3is 3thet th5ic. th5ica 4thil 5think 4thl th5ode 5thodic 4thoo thor5it
tho5riz 2ths 1tia ti4ab ti4ato 2ti2b 4tick t4ico t4ic1u 5tidi 3tien tif2 ti5fy
2tig 5tigu till5in 1tim 4timp tim5ul 2t1in t2ina 3tine. 3tini 1tio ti5oc tion5ee
5tiq ti3sa 3tise tis4m ti5so tis4p 5tistica ti3tl ti4u 1tiv tiv4a 1tiz ti3za
ti3zen 2tl t5la tlan4 3tle. 3tled 3tles. t5let. t5lo 4t1m tme4 2t1n2 1to to3b
to5crat 4todo 2tof to2gr to5ic to2ma tom4b to3my ton4ali to3nat 4tono 4tony
to2ra to3rie tor5iz tos2 5tour 4tout to3war 4t1p 1tra tra3b tra5ch traci4
trac4it trac4te tras4 tra5ven trav5es5 tre5f tre4m trem5i 5tria tri5ces 5tricia
4trics 2trim tri4v tro5mi tron5i 4trony tro5phe tro3sp tro3v tru5i trus4 4t1s2
t4sc tsh4 t4sw 4t3t2 t4tes t5to ttu4 1tu tu1a tu3ar tu4bi tud2 4tue 4tuf4 5tu3i
3tum tu4nis 2t3up. 3ture 5turi tur3is tur5o tu5ry 3tus 4tv tw4 4t1wa twis4 4two
1ty 4tya 2tyl type3 ty5ph 4tz tz4e 4uab uac4 ua5na uan4i uar5ant uar2d uar3i
uar3t u1at uav4 ub4e u4bel u3ber u4bero u1b4i u4b5ing u3ble. u3ca uci4b uc4it
ucle3 u3cr u3cu u4cy ud5d ud3er ud5est udev4 u1dic ud3ied ud3ies ud5is u5dit
u4don ud4si u4du u4ene uens4 uen4te uer4il 3ufa u3fl ugh3en ug5in 2ui2 uil5iz
ui4n u1ing uir4m uita4 uiv3 uiv4er. u5j 4uk u1la ula5b u5lati ulch4 5ulche
ul3der ul4e u1len ul4gi ul2i u5lia ul3ing ul5ish ul4lar ul4li4b ul4lis 4ul3m
u1l4o 4uls uls5es ul1ti ultra3 4ultu u3lu ul5ul ul5v um5ab um4bi um4bly u1mi
u4m3ing umor5o um2p unat4 u2ne un4er u1ni un4im u2nin un5ish uni3v un3s4 un4sw
unt3ab un4ter. un4tes unu4 un5y un5z u4ors u5os u1ou u1pe uper5s u5pia up3ing
u3pl up3p upport5 upt5ib uptu4 u1ra 4ura. u4rag u4ras ur4be urc4 ur1d ure5at
ur4fer ur4fr u3rif uri4fic ur1in u3rio u1rit ur3iz ur2l url5ing. ur4no uros4
ur4pe ur4pi urs5er ur5tes ur3the urti4 ur4tie u3ru 2us u5sad u5san us4ap usc2
us3ci use5a u5sia u3sic us4lin us1p us5sl us5tere us1tr u2su usur4 uta4b u3tat
4ute. 4utel 4uten uten4i 4u1t2i uti5liz u3tine ut3ing ution5a u4tis 5u5tiz u4t1l
ut5of uto5g uto5matic u5ton u4tou uts4 u3u uu4m u1v2 uxu3 uz4e 1va 5va. 2v1a4b
vac5il vac3u vag4 va4ge va5lie val5o val1u va5mo va5niz va5pi var5ied 3vat 4ve.
4ved veg3 v3el. vel3li ve4lo v4ely ven3om v5enue v4erd 5vere. v4erel v3eren
ver5enc v4eres ver3ie vermi4n 3verse ver3th v4e2s 4ves. ves4te ve4te vet3er
ve4ty vi5ali 5vian 5vide. 5vided 4v3iden 5vides 5vidi v3if vi5gn vik4 2vil
5vilit v3i3liz v1in 4vi4na v2inc vin5d 4ving vio3l v3io4r vi1ou vi4p vi5ro
vis3it vi3so vi3su 4viti vit3r 4vity 3viv 5vo. voi4 3vok vo4la v5ole 5volt 3volv
vom5i vor5ab vori4 vo4ry vo4ta 4votee 4vv4 v4y w5abl 2wac wa5ger wag5o wait5
w5al. wam4 war4t was4t wa1te wa5ver w1b wea5rie weath3 wed4n weet3 wee5v wel4l
w1er west3 w3ev whi4 wi2 wil2 will5in win4de win4g wir4 3wise with3 wiz5 w4k
wl4es wl3in w4no 1wo2 wom1 wo5ven w5p wra4 wri4 writa4 w3sh ws4l ws4pe w5s4t 4wt
wy4 x1a xac5e x4ago xam3 x4ap xas5 x3c2 x1e xe4cuto x2ed xer4i xe5ro x1h xhi2
xhil5 xhu4 x3i xi5a xi5c xi5di x4ime xi5miz x3o x4ob x3p xpan4d xpecto5 xpe3d
x1t2 x3ti x1u xu3a xx4 y5ac 3yar4 y5at y1b y1c y2ce yc5er y3ch ych4e ycom4 ycot4
y1d y5ee y1er y4erf yes4 ye4t y5gi 4y3h y1i y3la ylla5bl y3lo y5lu ymbol5 yme4
ympa3 yn3chr yn5d yn5g yn5ic 5ynx y1o4 yo5d y4o5g yom4 yo5net y4ons y4os y4ped
yper5 yp3i y3po y4poc yp2ta y5pu yra5m yr5ia y3ro yr4r ys4c y3s2e ys3ica ys3io
3ysis y4so yss4 ys1t ys3ta ysur4 y3thin yt3ic y1w za1 z5a2b zar2 4zb 2ze ze4n
ze4p z1er ze3ro zet4 2z1i z4il z4is 5zl 4zm 1zo zo4m zo5ol zte4 4z1z2 z4zy
"""
# Extra patterns, from ushyphmax.tex, dated 2005-05-30.
# Copyright (C) 1990, 2004, 2005 Gerard D.C. Kuiken.
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
#
# These patterns are based on the Hyphenation Exception Log
# published in TUGboat, Volume 10 (1989), No. 3, pp. 337-341,
# and a large number of incorrectly hyphenated words not yet published.
"""
.con5gr .de5riva .dri5v4 .eth1y6l1 .eu4ler .ev2 .ever5si5b .ga4s1om1 .ge4ome
.ge5ot1 .he3mo1 .he3p6a .he3roe .in5u2t .kil2n3i .ko6r1te1 .le6ices .me4ga1l
.met4ala .mim5i2c1 .mi1s4ers .ne6o3f .noe1th .non1e2m .poly1s .post1am .pre1am
.rav5en1o .semi5 .sem4ic .semid6 .semip4 .semir4 .sem6is4 .semiv4 .sph6in1
.spin1o .ta5pes1tr .te3legr .to6pog .to2q .un3at5t .un5err5 .vi2c3ar .we2b1l
.re1e4c a5bolic a2cabl af6fish am1en3ta5b anal6ys ano5a2c ans5gr ans3v anti1d
an3ti1n2 anti1re a4pe5able ar3che5t ar2range as5ymptot ath3er1o1s at6tes.
augh4tl au5li5f av3iou back2er. ba6r1onie ba1thy bbi4t be2vie bi5d2if bil2lab
bio5m bi1orb bio1rh b1i3tive blan2d1 blin2d1 blon2d2 bor1no5 bo2t1u1l brus4q
bus6i2er bus6i2es buss4ing but2ed. but4ted cad5e1m cat1a1s2 4chs. chs3hu chie5vo
cig3a3r cin2q cle4ar co6ph1o3n cous2ti cri3tie croc1o1d cro5e2co c2tro3me6c
1cu2r1ance 2d3alone data1b dd5a5b d2d5ib de4als. de5clar1 de2c5lina de3fin3iti
de2mos des3ic de2tic dic1aid dif5fra 3di1methy di2ren di2rer 2d1lead 2d1li2e
3do5word dren1a5l drif2t1a d1ri3pleg5 drom3e5d d3tab du2al. du1op1o1l ea4n3ies
e3chas edg1l ed1uling eli2t1is e1loa en1dix eo3grap 1e6p3i3neph1 e2r3i4an.
e3spac6i eth1y6l1ene 5eu2clid1 feb1rua fermi1o 3fich fit5ted. fla1g6el flow2er.
3fluor gen2cy. ge3o1d ght1we g1lead get2ic. 4g1lish 5glo5bin 1g2nac gnet1ism
gno5mo g2n1or. g2noresp 2g1o4n3i1za graph5er. griev1 g1utan hair1s ha2p3ar5r
hatch1 hex2a3 hite3sid h3i5pel1a4 hnau3z ho6r1ic. h2t1eou hypo1tha id4ios
ifac1et ign4it ignit1er i4jk im3ped3a infra1s2 i5nitely. irre6v3oc i1tesima
ith5i2l itin5er5ar janu3a japan1e2s je1re1m 1ke6ling 1ki5netic 1kovian k3sha
la4c3i5e lai6n3ess lar5ce1n l3chai l3chil6d1 lead6er. lea4s1a 1lec3ta6b
le3g6en2dre 1le1noid lith1o5g ll1fl l2l3ish l5mo3nell lo1bot1o1 lo2ges. load4ed.
load6er. l3tea lth5i2ly lue1p 1lunk3er 1lum5bia. 3lyg1a1mi ly5styr ma1la1p m2an.
man3u1sc mar1gin1 medi2c med3i3cin medio6c1 me3gran3 m2en. 3mi3da5b 3milita
mil2l1ag mil5li5li mi6n3is. mi1n2ut1er mi1n2ut1est m3ma1b 5maph1ro1 5moc1ra1t
mo5e2las mol1e5c mon4ey1l mono3ch mo4no1en moro6n5is mono1s6 moth4et2 m1ou3sin
m5shack2 mu2dro mul2ti5u n3ar4chs. n3ch2es1t ne3back 2ne1ski n1dieck nd3thr
nfi6n3ites 4n5i4an. nge5nes ng1ho ng1spr nk3rup n5less 5noc3er1os nom1a6l
nom5e1no n1o1mist non1eq non1i4so 5nop1oly. no1vemb ns5ceiv ns4moo ntre1p
obli2g1 o3chas odel3li odit1ic oerst2 oke1st o3les3ter oli3gop1o1 o1lo3n4om
o3mecha6 onom1ic o3norma o3no2t1o3n o3nou op1ism. or4tho3ni4t orth1ri or5tively
o4s3pher o5test1er o5tes3tor oth3e1o1s ou3ba3do o6v3i4an. oxi6d1ic pal6mat
parag6ra4 par4a1le param4 para3me pee2v1 phi2l3ant phi5lat1e3l pi2c1a3d pli2c1ab
pli5nar poin3ca 1pole. poly1e po3lyph1ono 1prema3c pre1neu pres2pli pro2cess
proc3i3ty. pro2g1e 3pseu2d pseu3d6o3d2 pseu3d6o3f2 pto3mat4 p5trol3 pu5bes5c
quain2t1e qu6a3si3 quasir6 quasis6 quin5tes5s qui3v4ar r1abolic 3rab1o1loi
ra3chu r3a3dig radi1o6g r2amen 3ra4m5e1triz ra3mou ra5n2has ra1or r3bin1ge
re2c3i1pr rec5t6ang re4t1ribu r3ial. riv1o1l 6rk. rk1ho r1krau 6rks. r5le5qu
ro1bot1 ro5e2las ro5epide1 ro3mesh ro1tron r3pau5li rse1rad1i r1thou r1treu
r1veil rz1sc sales3c sales5w 5sa3par5il sca6p1er sca2t1ol s4chitz schro1ding1
1sci2utt scrap4er. scy4th1 sem1a1ph se3mes1t se1mi6t5ic sep3temb shoe1st sid2ed.
side5st side5sw si5resid sky1sc 3slova1kia 3s2og1a1my so2lute 3s2pace 1s2pacin
spe3cio spher1o spi2c1il spokes5w sports3c sports3w s3qui3to s2s1a3chu1 ss3hat
s2s3i4an. s5sign5a3b 1s2tamp s2t1ant5shi star3tli sta1ti st5b 1stor1ab strat1a1g
strib5ut st5scr stu1pi4d1 styl1is su2per1e6 1sync 1syth3i2 swimm6 5tab1o1lism
ta3gon. talk1a5 t1a1min t6ap6ath 5tar2rh tch1c tch3i1er t1cr teach4er. tele2g
tele1r6o 3ter1gei ter2ic. t3ess2es tha4l1am tho3don th1o5gen1i tho1k2er thy4l1an
thy3sc 2t3i4an. ti2n3o1m t1li2er tolo2gy tot3ic trai3tor1 tra1vers travers3a3b
treach1e tr4ial. 3tro1le1um trof4ic. tro3fit tro1p2is 3trop1o5les 3trop1o5lis
t1ro1pol3it tsch3ie ttrib1ut1 turn3ar t1wh ty2p5al ua3drati uad1ratu u5do3ny
uea1m u2r1al. uri4al. us2er. v1ativ v1oir5du1 va6guer vaude3v 1verely. v1er1eig
ves1tite vi1vip3a3r voice1p waste3w6a2 wave1g4 w3c week1n wide5sp wo4k1en
wrap3aro writ6er. x1q xquis3 y5che3d ym5e5try y1stro yes5ter1y z3ian. z3o1phr
z2z3w
""")
exceptions = """
as-so-ciate as-so-ciates dec-li-na-tion oblig-a-tory phil-an-thropic present
presents project projects reci-procity re-cog-ni-zance ref-or-ma-tion
ret-ri-bu-tion ta-ble
"""
hyphenator = Hyphenator(patterns, exceptions)
hyphenate_word = hyphenator.hyphenate_word
del patterns
del exceptions
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
for word in sys.argv[1:]:
print('-'.join(hyphenate_word(word)))
else:
import doctest
doctest.testmod(verbose=True)
| 35,535 | Python | .py | 507 | 67.583826 | 104 | 0.809763 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,250 | html_entities.py | kovidgoyal_calibre/src/calibre/ebooks/html_entities.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2017, Kovid Goyal <kovid at kovidgoyal.net>
from calibre import my_unichr
html5_entities = {
# ENTITY_DATA {{{
'AElig': 'Æ',
'AMP': '&',
'Aacute': '�',
'Abreve': 'Ä‚',
'Acirc': 'Â',
'Acy': '�',
'Afr': '�',
'Agrave': 'À',
'Alpha': 'Α',
'Amacr': 'Ä€',
'And': 'â©“',
'Aogon': 'Ä„',
'Aopf': '�',
'ApplyFunction': '\u2061',
'Aring': 'Ã…',
'Ascr': '�',
'Assign': '≔',
'Atilde': 'Ã',
'Auml': 'Ä',
'Backslash': '∖',
'Barv': 'â«§',
'Barwed': '⌆',
'Bcy': 'Ğ‘',
'Because': '∵',
'Bernoullis': 'ℬ',
'Beta': 'Î’',
'Bfr': '�',
'Bopf': '�',
'Breve': '˘',
'Bscr': 'ℬ',
'Bumpeq': '�',
'CHcy': 'Ч',
'COPY': '©',
'Cacute': 'Ć',
'Cap': 'â‹’',
'CapitalDifferentialD': 'â……',
'Cayleys': 'â„',
'Ccaron': 'Č',
'Ccedil': 'Ç',
'Ccirc': 'Ĉ',
'Cconint': '∰',
'Cdot': 'ÄŠ',
'Cedilla': '¸',
'CenterDot': '·',
'Cfr': 'â„',
'Chi': 'Χ',
'CircleDot': '⊙',
'CircleMinus': '⊖',
'CirclePlus': '⊕',
'CircleTimes': '⊗',
'ClockwiseContourIntegral': '∲',
'CloseCurlyDoubleQuote': '�',
'CloseCurlyQuote': '’',
'Colon': '∷',
'Colone': 'â©´',
'Congruent': '≡',
'Conint': '∯',
'ContourIntegral': '∮',
'Copf': 'â„‚',
'Coproduct': '�',
'CounterClockwiseContourIntegral': '∳',
'Cross': '⨯',
'Cscr': '�',
'Cup': 'â‹“',
'CupCap': '�',
'DD': 'â……',
'DDotrahd': '⤑',
'DJcy': 'Ğ‚',
'DScy': 'Ğ…',
'DZcy': '�',
'Dagger': '‡',
'Darr': '↡',
'Dashv': '⫤',
'Dcaron': '�',
'Dcy': 'Ğ”',
'Del': '∇',
'Delta': 'Δ',
'Dfr': '�',
'DiacriticalAcute': '´',
'DiacriticalDot': 'Ë™',
'DiacriticalDoubleAcute': '�',
'DiacriticalGrave': '`',
'DiacriticalTilde': '˜',
'Diamond': 'â‹„',
'DifferentialD': 'â…†',
'Dopf': '�',
'Dot': '¨',
'DotDot': '⃜',
'DotEqual': '�',
'DoubleContourIntegral': '∯',
'DoubleDot': '¨',
'DoubleDownArrow': '⇓',
'DoubleLeftArrow': '�',
'DoubleLeftRightArrow': '⇔',
'DoubleLeftTee': '⫤',
'DoubleLongLeftArrow': '⟸',
'DoubleLongLeftRightArrow': '⟺',
'DoubleLongRightArrow': '⟹',
'DoubleRightArrow': '⇒',
'DoubleRightTee': '⊨',
'DoubleUpArrow': '⇑',
'DoubleUpDownArrow': '⇕',
'DoubleVerticalBar': '∥',
'DownArrow': '↓',
'DownArrowBar': '⤓',
'DownArrowUpArrow': '⇵',
'DownBreve': 'Ì‘',
'DownLeftRightVector': '�',
'DownLeftTeeVector': '�',
'DownLeftVector': '↽',
'DownLeftVectorBar': '⥖',
'DownRightTeeVector': '⥟',
'DownRightVector': '�',
'DownRightVectorBar': '⥗',
'DownTee': '⊤',
'DownTeeArrow': '↧',
'Downarrow': '⇓',
'Dscr': '�',
'Dstrok': '�',
'ENG': 'ÅŠ',
'ETH': '�',
'Eacute': 'É',
'Ecaron': 'Äš',
'Ecirc': 'Ê',
'Ecy': 'Ğ',
'Edot': 'Ä–',
'Efr': '�',
'Egrave': 'È',
'Element': '∈',
'Emacr': 'Ä’',
'EmptySmallSquare': 'â—»',
'EmptyVerySmallSquare': 'â–«',
'Eogon': 'Ę',
'Eopf': '�',
'Epsilon': 'Ε',
'Equal': '⩵',
'EqualTilde': '≂',
'Equilibrium': '⇌',
'Escr': 'â„°',
'Esim': '⩳',
'Eta': 'Η',
'Euml': 'Ë',
'Exists': '∃',
'ExponentialE': 'â…‡',
'Fcy': 'Ф',
'Ffr': '�',
'FilledSmallSquare': 'â—¼',
'FilledVerySmallSquare': 'â–ª',
'Fopf': '�',
'ForAll': '∀',
'Fouriertrf': 'ℱ',
'Fscr': 'ℱ',
'GJcy': 'Ѓ',
'GT': '>',
'Gamma': 'Γ',
'Gammad': 'Ϝ',
'Gbreve': '�',
'Gcedil': 'Ä¢',
'Gcirc': 'Ĝ',
'Gcy': 'Ğ“',
'Gdot': 'Ä ',
'Gfr': '�',
'Gg': 'â‹™',
'Gopf': '�',
'GreaterEqual': '≥',
'GreaterEqualLess': 'â‹›',
'GreaterFullEqual': '≧',
'GreaterGreater': '⪢',
'GreaterLess': '≷',
'GreaterSlantEqual': '⩾',
'GreaterTilde': '≳',
'Gscr': '�',
'Gt': '≫',
'HARDcy': 'Ъ',
'Hacek': 'ˇ',
'Hat': '^',
'Hcirc': 'Ĥ',
'Hfr': 'ℌ',
'HilbertSpace': 'â„‹',
'Hopf': '�',
'HorizontalLine': '─',
'Hscr': 'â„‹',
'Hstrok': 'Ħ',
'HumpDownHump': '�',
'HumpEqual': '�',
'IEcy': 'Ğ•',
'IJlig': 'IJ',
'IOcy': '�',
'Iacute': '�',
'Icirc': '�',
'Icy': 'И',
'Idot': 'İ',
'Ifr': 'â„‘',
'Igrave': 'Ì',
'Im': 'â„‘',
'Imacr': 'Ī',
'ImaginaryI': 'â…ˆ',
'Implies': '⇒',
'Int': '∬',
'Integral': '∫',
'Intersection': 'â‹‚',
'InvisibleComma': '\u2063',
'InvisibleTimes': '\u2062',
'Iogon': 'Ä®',
'Iopf': '�',
'Iota': 'Ι',
'Iscr': '�',
'Itilde': 'Ĩ',
'Iukcy': 'І',
'Iuml': '�',
'Jcirc': 'Ä´',
'Jcy': 'Ğ™',
'Jfr': '�',
'Jopf': '�',
'Jscr': '�',
'Jsercy': 'Ј',
'Jukcy': 'Ğ„',
'KHcy': 'Ğ¥',
'KJcy': 'Ќ',
'Kappa': 'Κ',
'Kcedil': 'Ķ',
'Kcy': 'Ğš',
'Kfr': '�',
'Kopf': '�',
'Kscr': '�',
'LJcy': 'Љ',
'LT': '<',
'Lacute': 'Ĺ',
'Lambda': 'Λ',
'Lang': '⟪',
'Laplacetrf': 'â„’',
'Larr': '�',
'Lcaron': 'Ľ',
'Lcedil': 'Ä»',
'Lcy': 'Ğ›',
'LeftAngleBracket': '⟨',
'LeftArrow': '�',
'LeftArrowBar': '⇤',
'LeftArrowRightArrow': '⇆',
'LeftCeiling': '⌈',
'LeftDoubleBracket': '⟦',
'LeftDownTeeVector': '⥡',
'LeftDownVector': '⇃',
'LeftDownVectorBar': '⥙',
'LeftFloor': '⌊',
'LeftRightArrow': '↔',
'LeftRightVector': '�',
'LeftTee': '⊣',
'LeftTeeArrow': '↤',
'LeftTeeVector': '⥚',
'LeftTriangle': '⊲',
'LeftTriangleBar': '�',
'LeftTriangleEqual': '⊴',
'LeftUpDownVector': '⥑',
'LeftUpTeeVector': '⥠',
'LeftUpVector': '↿',
'LeftUpVectorBar': '⥘',
'LeftVector': '↼',
'LeftVectorBar': '⥒',
'Leftarrow': '�',
'Leftrightarrow': '⇔',
'LessEqualGreater': '⋚',
'LessFullEqual': '≦',
'LessGreater': '≶',
'LessLess': '⪡',
'LessSlantEqual': '⩽',
'LessTilde': '≲',
'Lfr': '�',
'Ll': '⋘',
'Lleftarrow': '⇚',
'Lmidot': 'Ä¿',
'LongLeftArrow': '⟵',
'LongLeftRightArrow': '⟷',
'LongRightArrow': '⟶',
'Longleftarrow': '⟸',
'Longleftrightarrow': '⟺',
'Longrightarrow': '⟹',
'Lopf': '�',
'LowerLeftArrow': '↙',
'LowerRightArrow': '↘',
'Lscr': 'â„’',
'Lsh': '↰',
'Lstrok': '�',
'Lt': '≪',
'Map': '⤅',
'Mcy': 'М',
'MediumSpace': '\u205f',
'Mellintrf': 'ℳ',
'Mfr': '�',
'MinusPlus': '∓',
'Mopf': '�',
'Mscr': 'ℳ',
'Mu': 'Μ',
'NJcy': 'ĞŠ',
'Nacute': 'Ń',
'Ncaron': 'Ň',
'Ncedil': 'Å…',
'Ncy': '�',
'NegativeMediumSpace': '\u200b',
'NegativeThickSpace': '\u200b',
'NegativeThinSpace': '\u200b',
'NegativeVeryThinSpace': '\u200b',
'NestedGreaterGreater': '≫',
'NestedLessLess': '≪',
'NewLine': '\n',
'Nfr': '�',
'NoBreak': '\u2060',
'NonBreakingSpace': '\xa0',
'Nopf': 'â„•',
'Not': '⫬',
'NotCongruent': '≢',
'NotCupCap': 'â‰',
'NotDoubleVerticalBar': '∦',
'NotElement': '∉',
'NotEqual': '≠',
'NotEqualTilde': '≂̸',
'NotExists': '∄',
'NotGreater': '≯',
'NotGreaterEqual': '≱',
'NotGreaterFullEqual': '≧̸',
'NotGreaterGreater': '≫̸',
'NotGreaterLess': '≹',
'NotGreaterSlantEqual': '⩾̸',
'NotGreaterTilde': '≵',
'NotHumpDownHump': '�̸',
'NotHumpEqual': '�̸',
'NotLeftTriangle': '⋪',
'NotLeftTriangleBar': '�̸',
'NotLeftTriangleEqual': '⋬',
'NotLess': '≮',
'NotLessEqual': '≰',
'NotLessGreater': '≸',
'NotLessLess': '≪̸',
'NotLessSlantEqual': '⩽̸',
'NotLessTilde': '≴',
'NotNestedGreaterGreater': '⪢̸',
'NotNestedLessLess': '⪡̸',
'NotPrecedes': '⊀',
'NotPrecedesEqual': '⪯̸',
'NotPrecedesSlantEqual': 'â‹ ',
'NotReverseElement': '∌',
'NotRightTriangle': 'â‹«',
'NotRightTriangleBar': '�̸',
'NotRightTriangleEqual': 'â‹',
'NotSquareSubset': '�̸',
'NotSquareSubsetEqual': 'â‹¢',
'NotSquareSuperset': '�̸',
'NotSquareSupersetEqual': 'â‹£',
'NotSubset': '⊂⃒',
'NotSubsetEqual': '⊈',
'NotSucceeds': '�',
'NotSucceedsEqual': '⪰̸',
'NotSucceedsSlantEqual': 'â‹¡',
'NotSucceedsTilde': '≿̸',
'NotSuperset': '⊃⃒',
'NotSupersetEqual': '⊉',
'NotTilde': '�',
'NotTildeEqual': '≄',
'NotTildeFullEqual': '≇',
'NotTildeTilde': '≉',
'NotVerticalBar': '∤',
'Nscr': '�',
'Ntilde': 'Ñ',
'Nu': '�',
'OElig': 'Å’',
'Oacute': 'Ó',
'Ocirc': 'Ô',
'Ocy': '�',
'Odblac': '�',
'Ofr': '�',
'Ograve': 'Ã’',
'Omacr': 'Ō',
'Omega': 'Ω',
'Omicron': 'Ο',
'Oopf': '�',
'OpenCurlyDoubleQuote': '“',
'OpenCurlyQuote': '‘',
'Or': 'â©”',
'Oscr': '�',
'Oslash': 'Ø',
'Otilde': 'Õ',
'Otimes': '⨷',
'Ouml': 'Ö',
'OverBar': '‾',
'OverBrace': '�',
'OverBracket': '�',
'OverParenthesis': '�',
'PartialD': '∂',
'Pcy': 'ĞŸ',
'Pfr': '�',
'Phi': 'Φ',
'Pi': 'Î ',
'PlusMinus': '±',
'Poincareplane': 'ℌ',
'Popf': 'â„™',
'Pr': '⪻',
'Precedes': '≺',
'PrecedesEqual': '⪯',
'PrecedesSlantEqual': '≼',
'PrecedesTilde': '≾',
'Prime': '″',
'Product': '�',
'Proportion': '∷',
'Proportional': '�',
'Pscr': '�',
'Psi': 'Ψ',
'QUOT': '"',
'Qfr': '�',
'Qopf': 'ℚ',
'Qscr': '�',
'RBarr': '�',
'REG': '®',
'Racute': 'Å”',
'Rang': '⟫',
'Rarr': '↠',
'Rarrtl': '⤖',
'Rcaron': 'Ř',
'Rcedil': 'Å–',
'Rcy': 'Ğ ',
'Re': 'ℜ',
'ReverseElement': '∋',
'ReverseEquilibrium': '⇋',
'ReverseUpEquilibrium': '⥯',
'Rfr': 'ℜ',
'Rho': 'Ρ',
'RightAngleBracket': '⟩',
'RightArrow': '→',
'RightArrowBar': '⇥',
'RightArrowLeftArrow': '⇄',
'RightCeiling': '⌉',
'RightDoubleBracket': '⟧',
'RightDownTeeVector': '�',
'RightDownVector': '⇂',
'RightDownVectorBar': '⥕',
'RightFloor': '⌋',
'RightTee': '⊢',
'RightTeeArrow': '↦',
'RightTeeVector': '⥛',
'RightTriangle': '⊳',
'RightTriangleBar': '�',
'RightTriangleEqual': '⊵',
'RightUpDownVector': '�',
'RightUpTeeVector': '⥜',
'RightUpVector': '↾',
'RightUpVectorBar': '⥔',
'RightVector': '⇀',
'RightVectorBar': '⥓',
'Rightarrow': '⇒',
'Ropf': '�',
'RoundImplies': '⥰',
'Rrightarrow': '⇛',
'Rscr': 'â„›',
'Rsh': '↱',
'RuleDelayed': 'â§´',
'SHCHcy': 'Ğ©',
'SHcy': 'Ш',
'SOFTcy': 'Ь',
'Sacute': 'Åš',
'Sc': '⪼',
'Scaron': 'Å ',
'Scedil': '�',
'Scirc': 'Ŝ',
'Scy': 'Ğ¡',
'Sfr': '�',
'ShortDownArrow': '↓',
'ShortLeftArrow': '�',
'ShortRightArrow': '→',
'ShortUpArrow': '↑',
'Sigma': 'Σ',
'SmallCircle': '∘',
'Sopf': '�',
'Sqrt': '√',
'Square': 'â–¡',
'SquareIntersection': '⊓',
'SquareSubset': '�',
'SquareSubsetEqual': '⊑',
'SquareSuperset': '�',
'SquareSupersetEqual': '⊒',
'SquareUnion': '⊔',
'Sscr': '�',
'Star': '⋆',
'Sub': '�',
'Subset': '�',
'SubsetEqual': '⊆',
'Succeeds': '≻',
'SucceedsEqual': '⪰',
'SucceedsSlantEqual': '≽',
'SucceedsTilde': '≿',
'SuchThat': '∋',
'Sum': '∑',
'Sup': 'â‹‘',
'Superset': '⊃',
'SupersetEqual': '⊇',
'Supset': 'â‹‘',
'THORN': '�',
'TRADE': 'â„¢',
'TSHcy': 'Ğ‹',
'TScy': 'Ц',
'Tab': '\t',
'Tau': 'Τ',
'Tcaron': 'Ť',
'Tcedil': 'Å¢',
'Tcy': 'Ğ¢',
'Tfr': '�',
'Therefore': '∴',
'Theta': 'Θ',
'ThickSpace': '\u205f\u200a',
'ThinSpace': '\u2009',
'Tilde': '∼',
'TildeEqual': '≃',
'TildeFullEqual': '≅',
'TildeTilde': '≈',
'Topf': '�',
'TripleDot': '⃛',
'Tscr': '�',
'Tstrok': 'Ŧ',
'Uacute': 'Ú',
'Uarr': '↟',
'Uarrocir': '⥉',
'Ubrcy': '�',
'Ubreve': 'Ŭ',
'Ucirc': 'Û',
'Ucy': 'Ğ£',
'Udblac': 'Ű',
'Ufr': '�',
'Ugrave': 'Ù',
'Umacr': 'Ū',
'UnderBar': '_',
'UnderBrace': '�',
'UnderBracket': '�',
'UnderParenthesis': '�',
'Union': '⋃',
'UnionPlus': '�',
'Uogon': 'Ų',
'Uopf': '�',
'UpArrow': '↑',
'UpArrowBar': '⤒',
'UpArrowDownArrow': '⇅',
'UpDownArrow': '↕',
'UpEquilibrium': '⥮',
'UpTee': '⊥',
'UpTeeArrow': '↥',
'Uparrow': '⇑',
'Updownarrow': '⇕',
'UpperLeftArrow': '↖',
'UpperRightArrow': '↗',
'Upsi': 'Ï’',
'Upsilon': 'Î¥',
'Uring': 'Å®',
'Uscr': '�',
'Utilde': 'Ũ',
'Uuml': 'Ü',
'VDash': '⊫',
'Vbar': 'â««',
'Vcy': 'Ğ’',
'Vdash': '⊩',
'Vdashl': '⫦',
'Vee': '�',
'Verbar': '‖',
'Vert': '‖',
'VerticalBar': '∣',
'VerticalLine': '|',
'VerticalSeparator': '�',
'VerticalTilde': '≀',
'VeryThinSpace': '\u200a',
'Vfr': '�',
'Vopf': '�',
'Vscr': '�',
'Vvdash': '⊪',
'Wcirc': 'Å´',
'Wedge': 'â‹€',
'Wfr': '�',
'Wopf': '�',
'Wscr': '�',
'Xfr': '�',
'Xi': '�',
'Xopf': '�',
'Xscr': '�',
'YAcy': 'Я',
'YIcy': 'Ї',
'YUcy': 'Ğ®',
'Yacute': '�',
'Ycirc': 'Ŷ',
'Ycy': 'Ğ«',
'Yfr': '�',
'Yopf': '�',
'Yscr': '�',
'Yuml': 'Ÿ',
'ZHcy': 'Ğ–',
'Zacute': 'Ź',
'Zcaron': 'Ž',
'Zcy': 'Ğ—',
'Zdot': 'Å»',
'ZeroWidthSpace': '\u200b',
'Zeta': 'Ζ',
'Zfr': 'ℨ',
'Zopf': 'ℤ',
'Zscr': '�',
'aacute': 'á',
'abreve': 'ă',
'ac': '∾',
'acE': '∾̳',
'acd': '∿',
'acirc': 'â',
'acute': '´',
'acy': 'а',
'aelig': 'æ',
'af': '\u2061',
'afr': '�',
'agrave': 'Ã ',
'alefsym': 'ℵ',
'aleph': 'ℵ',
'alpha': 'α',
'amacr': '�',
'amalg': '⨿',
'amp': '&',
'and': '∧',
'andand': 'â©•',
'andd': '⩜',
'andslope': '⩘',
'andv': '⩚',
'ang': '∠',
'ange': '⦤',
'angle': '∠',
'angmsd': '∡',
'angmsdaa': '⦨',
'angmsdab': '⦩',
'angmsdac': '⦪',
'angmsdad': '⦫',
'angmsdae': '⦬',
'angmsdaf': 'â¦',
'angmsdag': '⦮',
'angmsdah': '⦯',
'angrt': '∟',
'angrtvb': '⊾',
'angrtvbd': '�',
'angsph': '∢',
'angst': 'Ã…',
'angzarr': '�',
'aogon': 'Ä…',
'aopf': '�',
'ap': '≈',
'apE': 'â©°',
'apacir': '⩯',
'ape': '≊',
'apid': '≋',
'apos': "'",
'approx': '≈',
'approxeq': '≊',
'aring': 'Ã¥',
'ascr': '�',
'ast': '*',
'asymp': '≈',
'asympeq': '�',
'atilde': 'ã',
'auml': 'ä',
'awconint': '∳',
'awint': '⨑',
'bNot': 'â«',
'backcong': '≌',
'backepsilon': '϶',
'backprime': '‵',
'backsim': '∽',
'backsimeq': '�',
'barvee': '⊽',
'barwed': '⌅',
'barwedge': '⌅',
'bbrk': '�',
'bbrktbrk': '�',
'bcong': '≌',
'bcy': 'б',
'bdquo': '�',
'becaus': '∵',
'because': '∵',
'bemptyv': '⦰',
'bepsi': '϶',
'bernou': 'ℬ',
'beta': 'β',
'beth': 'â„¶',
'between': '≬',
'bfr': '�',
'bigcap': 'â‹‚',
'bigcirc': 'â—¯',
'bigcup': '⋃',
'bigodot': '⨀',
'bigoplus': '�',
'bigotimes': '⨂',
'bigsqcup': '⨆',
'bigstar': '★',
'bigtriangledown': 'â–½',
'bigtriangleup': 'â–³',
'biguplus': '⨄',
'bigvee': '�',
'bigwedge': 'â‹€',
'bkarow': '�',
'blacklozenge': 'â§«',
'blacksquare': 'â–ª',
'blacktriangle': 'â–´',
'blacktriangledown': 'â–¾',
'blacktriangleleft': 'â—‚',
'blacktriangleright': 'â–¸',
'blank': '�',
'blk12': 'â–’',
'blk14': 'â–‘',
'blk34': 'â–“',
'block': 'â–ˆ',
'bne': '=⃥',
'bnequiv': '≡⃥',
'bnot': '�',
'bopf': '�',
'bot': '⊥',
'bottom': '⊥',
'bowtie': '⋈',
'boxDL': 'â•—',
'boxDR': 'â•”',
'boxDl': 'â•–',
'boxDr': 'â•“',
'boxH': '�',
'boxHD': '╦',
'boxHU': 'â•©',
'boxHd': '╤',
'boxHu': 'â•§',
'boxUL': '�',
'boxUR': '╚',
'boxUl': '╜',
'boxUr': 'â•™',
'boxV': 'â•‘',
'boxVH': '╬',
'boxVL': 'â•£',
'boxVR': 'â• ',
'boxVh': 'â•«',
'boxVl': 'â•¢',
'boxVr': '╟',
'boxbox': '⧉',
'boxdL': 'â••',
'boxdR': 'â•’',
'boxdl': '�',
'boxdr': '┌',
'boxh': '─',
'boxhD': 'â•¥',
'boxhU': '╨',
'boxhd': '┬',
'boxhu': 'â”´',
'boxminus': '⊟',
'boxplus': '�',
'boxtimes': '⊠',
'boxuL': 'â•›',
'boxuR': '╘',
'boxul': '┘',
'boxur': 'â””',
'boxv': '│',
'boxvH': '╪',
'boxvL': 'â•¡',
'boxvR': '�',
'boxvh': '┼',
'boxvl': '┤',
'boxvr': '├',
'bprime': '‵',
'breve': '˘',
'brvbar': '¦',
'bscr': '�',
'bsemi': '�',
'bsim': '∽',
'bsime': '�',
'bsol': '\\',
'bsolb': 'â§…',
'bsolhsub': '⟈',
'bull': '•',
'bullet': '•',
'bump': '�',
'bumpE': '⪮',
'bumpe': '�',
'bumpeq': '�',
'cacute': 'ć',
'cap': '∩',
'capand': 'â©„',
'capbrcup': '⩉',
'capcap': 'â©‹',
'capcup': '⩇',
'capdot': 'â©€',
'caps': '∩︀',
'caret': '�',
'caron': 'ˇ',
'ccaps': '�',
'ccaron': '�',
'ccedil': 'ç',
'ccirc': 'ĉ',
'ccups': '⩌',
'ccupssm': '�',
'cdot': 'Ä‹',
'cedil': '¸',
'cemptyv': '⦲',
'cent': '¢',
'centerdot': '·',
'cfr': 'ğ�” ',
'chcy': 'ч',
'check': '✓',
'checkmark': '✓',
'chi': 'χ',
'cir': 'â—‹',
'cirE': '⧃',
'circ': 'ˆ',
'circeq': '≗',
'circlearrowleft': '↺',
'circlearrowright': '↻',
'circledR': '®',
'circledS': 'Ⓢ',
'circledast': '⊛',
'circledcirc': '⊚',
'circleddash': '�',
'cire': '≗',
'cirfnint': '�',
'cirmid': '⫯',
'cirscir': 'â§‚',
'clubs': '♣',
'clubsuit': '♣',
'colon': ':',
'colone': '≔',
'coloneq': '≔',
'comma': ',',
'commat': '@',
'comp': '�',
'compfn': '∘',
'complement': '�',
'complexes': 'â„‚',
'cong': '≅',
'congdot': 'â©',
'conint': '∮',
'copf': '�',
'coprod': '�',
'copy': '©',
'copysr': 'â„—',
'crarr': '↵',
'cross': '✗',
'cscr': '�',
'csub': '�',
'csube': 'â«‘',
'csup': '�',
'csupe': 'â«’',
'ctdot': '⋯',
'cudarrl': '⤸',
'cudarrr': '⤵',
'cuepr': '�',
'cuesc': '⋟',
'cularr': '↶',
'cularrp': '⤽',
'cup': '∪',
'cupbrcap': '⩈',
'cupcap': '⩆',
'cupcup': '⩊',
'cupdot': '�',
'cupor': 'â©…',
'cups': '∪︀',
'curarr': '↷',
'curarrm': '⤼',
'curlyeqprec': '�',
'curlyeqsucc': '⋟',
'curlyvee': '�',
'curlywedge': '�',
'curren': '¤',
'curvearrowleft': '↶',
'curvearrowright': '↷',
'cuvee': '�',
'cuwed': '�',
'cwconint': '∲',
'cwint': '∱',
'cylcty': 'âŒ',
'dArr': '⇓',
'dHar': '⥥',
'dagger': '†',
'daleth': 'ℸ',
'darr': '↓',
'dash': '�',
'dashv': '⊣',
'dbkarow': '�',
'dblac': '�',
'dcaron': '�',
'dcy': 'Ğ´',
'dd': 'â…†',
'ddagger': '‡',
'ddarr': '⇊',
'ddotseq': 'â©·',
'deg': '°',
'delta': 'δ',
'demptyv': '⦱',
'dfisht': '⥿',
'dfr': '�',
'dharl': '⇃',
'dharr': '⇂',
'diam': 'â‹„',
'diamond': 'â‹„',
'diamondsuit': '♦',
'diams': '♦',
'die': '¨',
'digamma': '�',
'disin': '⋲',
'div': '÷',
'divide': '÷',
'divideontimes': '⋇',
'divonx': '⋇',
'djcy': 'Ñ’',
'dlcorn': '�',
'dlcrop': '�',
'dollar': '$',
'dopf': '�',
'dot': 'Ë™',
'doteq': '�',
'doteqdot': '≑',
'dotminus': '∸',
'dotplus': '∔',
'dotsquare': '⊡',
'doublebarwedge': '⌆',
'downarrow': '↓',
'downdownarrows': '⇊',
'downharpoonleft': '⇃',
'downharpoonright': '⇂',
'drbkarow': '�',
'drcorn': '⌟',
'drcrop': '⌌',
'dscr': '�',
'dscy': 'Ñ•',
'dsol': 'â§¶',
'dstrok': 'Ä‘',
'dtdot': '⋱',
'dtri': 'â–¿',
'dtrif': 'â–¾',
'duarr': '⇵',
'duhar': '⥯',
'dwangle': '⦦',
'dzcy': 'ÑŸ',
'dzigrarr': '⟿',
'eDDot': 'â©·',
'eDot': '≑',
'eacute': 'é',
'easter': 'â©®',
'ecaron': 'Ä›',
'ecir': '≖',
'ecirc': 'ê',
'ecolon': '≕',
'ecy': '�',
'edot': 'Ä—',
'ee': 'â…‡',
'efDot': '≒',
'efr': '�',
'eg': '⪚',
'egrave': 'è',
'egs': '⪖',
'egsdot': '⪘',
'el': '⪙',
'elinters': '�',
'ell': 'â„“',
'els': '⪕',
'elsdot': '⪗',
'emacr': 'Ä“',
'empty': '∅',
'emptyset': '∅',
'emptyv': '∅',
'emsp': '\u2003',
'emsp13': '\u2004',
'emsp14': '\u2005',
'eng': 'Å‹',
'ensp': '\u2002',
'eogon': 'Ä™',
'eopf': '�',
'epar': 'â‹•',
'eparsl': 'â§£',
'eplus': '⩱',
'epsi': 'ε',
'epsilon': 'ε',
'epsiv': 'ϵ',
'eqcirc': '≖',
'eqcolon': '≕',
'eqsim': '≂',
'eqslantgtr': '⪖',
'eqslantless': '⪕',
'equals': '=',
'equest': '≟',
'equiv': '≡',
'equivDD': '⩸',
'eqvparsl': 'â§¥',
'erDot': '≓',
'erarr': '⥱',
'escr': 'ℯ',
'esdot': '�',
'esim': '≂',
'eta': 'η',
'eth': 'ð',
'euml': 'ë',
'euro': '€',
'excl': '!',
'exist': '∃',
'expectation': 'â„°',
'exponentiale': 'â…‡',
'fallingdotseq': '≒',
'fcy': 'Ñ„',
'female': '♀',
'ffilig': 'ffi',
'fflig': 'ff',
'ffllig': 'ffl',
'ffr': '�',
'filig': '�',
'fjlig': 'fj',
'flat': 'â™',
'fllig': 'fl',
'fltns': 'â–±',
'fnof': 'Æ’',
'fopf': '�',
'forall': '∀',
'fork': 'â‹”',
'forkv': 'â«™',
'fpartint': '�',
'frac12': '½',
'frac13': 'â…“',
'frac14': '¼',
'frac15': 'â…•',
'frac16': 'â…™',
'frac18': 'â…›',
'frac23': 'â…”',
'frac25': 'â…–',
'frac34': '¾',
'frac35': 'â…—',
'frac38': '⅜',
'frac45': 'â…˜',
'frac56': 'â…š',
'frac58': 'â…�',
'frac78': 'â…�',
'frasl': '�',
'frown': '⌢',
'fscr': '�',
'gE': '≧',
'gEl': '⪌',
'gacute': 'ǵ',
'gamma': 'γ',
'gammad': '�',
'gap': '⪆',
'gbreve': 'ÄŸ',
'gcirc': '�',
'gcy': 'г',
'gdot': 'Ä¡',
'ge': '≥',
'gel': 'â‹›',
'geq': '≥',
'geqq': '≧',
'geqslant': '⩾',
'ges': '⩾',
'gescc': '⪩',
'gesdot': '⪀',
'gesdoto': '⪂',
'gesdotol': '⪄',
'gesl': '⋛︀',
'gesles': '⪔',
'gfr': '�',
'gg': '≫',
'ggg': 'â‹™',
'gimel': 'â„·',
'gjcy': 'Ñ“',
'gl': '≷',
'glE': '⪒',
'gla': '⪥',
'glj': '⪤',
'gnE': '≩',
'gnap': '⪊',
'gnapprox': '⪊',
'gne': '⪈',
'gneq': '⪈',
'gneqq': '≩',
'gnsim': 'â‹§',
'gopf': '�',
'grave': '`',
'gscr': 'ℊ',
'gsim': '≳',
'gsime': '�',
'gsiml': '�',
'gt': '>',
'gtcc': '⪧',
'gtcir': '⩺',
'gtdot': 'â‹—',
'gtlPar': '⦕',
'gtquest': '⩼',
'gtrapprox': '⪆',
'gtrarr': '⥸',
'gtrdot': 'â‹—',
'gtreqless': 'â‹›',
'gtreqqless': '⪌',
'gtrless': '≷',
'gtrsim': '≳',
'gvertneqq': '≩︀',
'gvnE': '≩︀',
'hArr': '⇔',
'hairsp': '\u200a',
'half': '½',
'hamilt': 'â„‹',
'hardcy': 'ÑŠ',
'harr': '↔',
'harrcir': '⥈',
'harrw': 'â†',
'hbar': '�',
'hcirc': 'Ä¥',
'hearts': '♥',
'heartsuit': '♥',
'hellip': '…',
'hellips': '…',
'hercon': '⊹',
'hfr': '�',
'hksearow': '⤥',
'hkswarow': '⤦',
'hoarr': '⇿',
'homtht': '∻',
'hookleftarrow': '↩',
'hookrightarrow': '↪',
'hopf': '�',
'horbar': '―',
'hscr': '�',
'hslash': '�',
'hstrok': 'ħ',
'hybull': '�',
'hyphen': '�',
'iacute': 'Ã',
'ic': '\u2063',
'icirc': 'î',
'icy': 'и',
'iecy': 'е',
'iexcl': '¡',
'iff': '⇔',
'ifr': '�',
'igrave': 'ì',
'ii': 'â…ˆ',
'iiiint': '⨌',
'iiint': 'âˆ',
'iinfin': '⧜',
'iiota': 'â„©',
'ijlig': 'ij',
'imacr': 'Ä«',
'image': 'â„‘',
'imagline': '�',
'imagpart': 'â„‘',
'imath': 'ı',
'imof': '⊷',
'imped': 'Ƶ',
'in': '∈',
'incare': 'â„…',
'infin': '�',
'infintie': '�',
'inodot': 'ı',
'int': '∫',
'intcal': '⊺',
'integers': 'ℤ',
'intercal': '⊺',
'intlarhk': '⨗',
'intprod': '⨼',
'iocy': 'Ñ‘',
'iogon': 'į',
'iopf': '�',
'iota': 'ι',
'iprod': '⨼',
'iquest': '¿',
'iscr': '�',
'isin': '∈',
'isinE': '⋹',
'isindot': '⋵',
'isins': 'â‹´',
'isinsv': '⋳',
'isinv': '∈',
'it': '\u2062',
'itilde': 'Ä©',
'iukcy': 'Ñ–',
'iuml': 'ï',
'jcirc': 'ĵ',
'jcy': 'й',
'jfr': '�',
'jmath': 'È·',
'jopf': '�',
'jscr': '�',
'jsercy': 'ј',
'jukcy': 'Ñ”',
'kappa': 'κ',
'kappav': 'ϰ',
'kcedil': 'Ä·',
'kcy': 'к',
'kfr': '�',
'kgreen': 'ĸ',
'khcy': 'Ñ…',
'kjcy': 'ќ',
'kopf': '�',
'kscr': '�',
'lAarr': '⇚',
'lArr': '�',
'lAtail': '⤛',
'lBarr': '�',
'lE': '≦',
'lEg': '⪋',
'lHar': '⥢',
'lacute': 'ĺ',
'laemptyv': '⦴',
'lagran': 'â„’',
'lambda': 'λ',
'lang': '⟨',
'langd': '⦑',
'langle': '⟨',
'lap': '⪅',
'laquo': '«',
'larr': '�',
'larrb': '⇤',
'larrbfs': '⤟',
'larrfs': '�',
'larrhk': '↩',
'larrlp': '↫',
'larrpl': '⤹',
'larrsim': '⥳',
'larrtl': '↢',
'lat': '⪫',
'latail': '⤙',
'late': 'âª',
'lates': 'âªï¸€',
'lbarr': '⤌',
'lbbrk': '�',
'lbrace': '{',
'lbrack': '[',
'lbrke': '⦋',
'lbrksld': '�',
'lbrkslu': '�',
'lcaron': 'ľ',
'lcedil': 'ļ',
'lceil': '⌈',
'lcub': '{',
'lcy': 'Ğ»',
'ldca': '⤶',
'ldquo': '“',
'ldquor': '�',
'ldrdhar': '⥧',
'ldrushar': '⥋',
'ldsh': '↲',
'le': '≤',
'leftarrow': '�',
'leftarrowtail': '↢',
'leftharpoondown': '↽',
'leftharpoonup': '↼',
'leftleftarrows': '⇇',
'leftrightarrow': '↔',
'leftrightarrows': '⇆',
'leftrightharpoons': '⇋',
'leftrightsquigarrow': 'â†',
'leftthreetimes': 'â‹‹',
'leg': '⋚',
'leq': '≤',
'leqq': '≦',
'leqslant': '⩽',
'les': '⩽',
'lescc': '⪨',
'lesdot': 'â©¿',
'lesdoto': '�',
'lesdotor': '⪃',
'lesg': '⋚︀',
'lesges': '⪓',
'lessapprox': '⪅',
'lessdot': 'â‹–',
'lesseqgtr': '⋚',
'lesseqqgtr': '⪋',
'lessgtr': '≶',
'lesssim': '≲',
'lfisht': '⥼',
'lfloor': '⌊',
'lfr': '�',
'lg': '≶',
'lgE': '⪑',
'lhard': '↽',
'lharu': '↼',
'lharul': '⥪',
'lhblk': 'â–„',
'ljcy': 'Ñ™',
'll': '≪',
'llarr': '⇇',
'llcorner': '�',
'llhard': '⥫',
'lltri': 'â—º',
'lmidot': 'Å€',
'lmoust': '�',
'lmoustache': '�',
'lnE': '≨',
'lnap': '⪉',
'lnapprox': '⪉',
'lne': '⪇',
'lneq': '⪇',
'lneqq': '≨',
'lnsim': '⋦',
'loang': '⟬',
'loarr': '⇽',
'lobrk': '⟦',
'longleftarrow': '⟵',
'longleftrightarrow': '⟷',
'longmapsto': '⟼',
'longrightarrow': '⟶',
'looparrowleft': '↫',
'looparrowright': '↬',
'lopar': '⦅',
'lopf': '�',
'loplus': 'â¨',
'lotimes': '⨴',
'lowast': '∗',
'lowbar': '_',
'loz': 'â—Š',
'lozenge': 'â—Š',
'lozf': 'â§«',
'lpar': '(',
'lparlt': '⦓',
'lrarr': '⇆',
'lrcorner': '⌟',
'lrhar': '⇋',
'lrhard': 'â¥',
'lrm': '\u200e',
'lrtri': '⊿',
'lsaquo': '‹',
'lscr': '�',
'lsh': '↰',
'lsim': '≲',
'lsime': '�',
'lsimg': '�',
'lsqb': '[',
'lsquo': '‘',
'lsquor': '‚',
'lstrok': 'Å‚',
'lt': '<',
'ltcc': '⪦',
'ltcir': '⩹',
'ltdot': 'â‹–',
'lthree': 'â‹‹',
'ltimes': '⋉',
'ltlarr': '⥶',
'ltquest': 'â©»',
'ltrPar': '⦖',
'ltri': 'â—ƒ',
'ltrie': '⊴',
'ltrif': 'â—‚',
'lurdshar': '⥊',
'luruhar': '⥦',
'lvertneqq': '≨︀',
'lvnE': '≨︀',
'mDDot': '∺',
'macr': '¯',
'male': '♂',
'malt': '✠',
'maltese': '✠',
'map': '↦',
'mapsto': '↦',
'mapstodown': '↧',
'mapstoleft': '↤',
'mapstoup': '↥',
'marker': 'â–®',
'mcomma': '⨩',
'mcy': 'м',
'mdash': '—',
'measuredangle': '∡',
'mfr': '�',
'mho': 'â„§',
'micro': 'µ',
'mid': '∣',
'midast': '*',
'midcir': 'â«°',
'middot': '·',
'minus': '−',
'minusb': '⊟',
'minusd': '∸',
'minusdu': '⨪',
'mlcp': 'â«›',
'mldr': '…',
'mnplus': '∓',
'models': '⊧',
'mopf': '�',
'mp': '∓',
'mscr': '�',
'mstpos': '∾',
'mu': 'μ',
'multimap': '⊸',
'mumap': '⊸',
'nGg': '⋙̸',
'nGt': '≫⃒',
'nGtv': '≫̸',
'nLeftarrow': '�',
'nLeftrightarrow': '�',
'nLl': '⋘̸',
'nLt': '≪⃒',
'nLtv': '≪̸',
'nRightarrow': '�',
'nVDash': '⊯',
'nVdash': '⊮',
'nabla': '∇',
'nacute': 'Å„',
'nang': '∠⃒',
'nap': '≉',
'napE': '⩰̸',
'napid': '≋̸',
'napos': 'ʼn',
'napprox': '≉',
'natur': 'â™®',
'natural': 'â™®',
'naturals': 'â„•',
'nbsp': '\xa0',
'nbump': '�̸',
'nbumpe': '�̸',
'ncap': '⩃',
'ncaron': 'ň',
'ncedil': 'ņ',
'ncong': '≇',
'ncongdot': 'â©Ì¸',
'ncup': 'â©‚',
'ncy': 'н',
'ndash': '–',
'ne': '≠',
'neArr': '⇗',
'nearhk': '⤤',
'nearr': '↗',
'nearrow': '↗',
'nedot': '�̸',
'nequiv': '≢',
'nesear': '⤨',
'nesim': '≂̸',
'nexist': '∄',
'nexists': '∄',
'nfr': '�',
'ngE': '≧̸',
'nge': '≱',
'ngeq': '≱',
'ngeqq': '≧̸',
'ngeqslant': '⩾̸',
'nges': '⩾̸',
'ngsim': '≵',
'ngt': '≯',
'ngtr': '≯',
'nhArr': '�',
'nharr': '↮',
'nhpar': '⫲',
'ni': '∋',
'nis': '⋼',
'nisd': '⋺',
'niv': '∋',
'njcy': 'Ñš',
'nlArr': '�',
'nlE': '≦̸',
'nlarr': '↚',
'nldr': '‥',
'nle': '≰',
'nleftarrow': '↚',
'nleftrightarrow': '↮',
'nleq': '≰',
'nleqq': '≦̸',
'nleqslant': '⩽̸',
'nles': '⩽̸',
'nless': '≮',
'nlsim': '≴',
'nlt': '≮',
'nltri': '⋪',
'nltrie': '⋬',
'nmid': '∤',
'nopf': '�',
'not': '¬',
'notin': '∉',
'notinE': '⋹̸',
'notindot': '⋵̸',
'notinva': '∉',
'notinvb': 'â‹·',
'notinvc': 'â‹¶',
'notni': '∌',
'notniva': '∌',
'notnivb': '⋾',
'notnivc': '⋽',
'npar': '∦',
'nparallel': '∦',
'nparsl': '⫽⃥',
'npart': '∂̸',
'npolint': '⨔',
'npr': '⊀',
'nprcue': 'â‹ ',
'npre': '⪯̸',
'nprec': '⊀',
'npreceq': '⪯̸',
'nrArr': '�',
'nrarr': '↛',
'nrarrc': '⤳̸',
'nrarrw': '�̸',
'nrightarrow': '↛',
'nrtri': 'â‹«',
'nrtrie': 'â‹',
'nsc': '�',
'nsccue': 'â‹¡',
'nsce': '⪰̸',
'nscr': '�',
'nshortmid': '∤',
'nshortparallel': '∦',
'nsim': '�',
'nsime': '≄',
'nsimeq': '≄',
'nsmid': '∤',
'nspar': '∦',
'nsqsube': 'â‹¢',
'nsqsupe': 'â‹£',
'nsub': '⊄',
'nsubE': '⫅̸',
'nsube': '⊈',
'nsubset': '⊂⃒',
'nsubseteq': '⊈',
'nsubseteqq': '⫅̸',
'nsucc': '�',
'nsucceq': '⪰̸',
'nsup': '⊅',
'nsupE': '⫆̸',
'nsupe': '⊉',
'nsupset': '⊃⃒',
'nsupseteq': '⊉',
'nsupseteqq': '⫆̸',
'ntgl': '≹',
'ntilde': 'ñ',
'ntlg': '≸',
'ntriangleleft': '⋪',
'ntrianglelefteq': '⋬',
'ntriangleright': 'â‹«',
'ntrianglerighteq': 'â‹',
'nu': 'ν',
'num': '#',
'numero': 'â„–',
'numsp': '\u2007',
'nvDash': 'âŠ',
'nvHarr': '⤄',
'nvap': '�⃒',
'nvdash': '⊬',
'nvge': '≥⃒',
'nvgt': '>⃒',
'nvinfin': '�',
'nvlArr': '⤂',
'nvle': '≤⃒',
'nvlt': '<⃒',
'nvltrie': '⊴⃒',
'nvrArr': '⤃',
'nvrtrie': '⊵⃒',
'nvsim': '∼⃒',
'nwArr': '⇖',
'nwarhk': '⤣',
'nwarr': '↖',
'nwarrow': '↖',
'nwnear': '⤧',
'oS': 'Ⓢ',
'oacute': 'ó',
'oast': '⊛',
'ocir': '⊚',
'ocirc': 'ô',
'ocy': 'о',
'odash': '�',
'odblac': 'Å‘',
'odiv': '⨸',
'odot': '⊙',
'odsold': '⦼',
'oelig': 'Å“',
'ofcir': '⦿',
'ofr': '�',
'ogon': 'Ë›',
'ograve': 'ò',
'ogt': '�',
'ohbar': '⦵',
'ohm': 'Ω',
'oint': '∮',
'olarr': '↺',
'olcir': '⦾',
'olcross': '⦻',
'oline': '‾',
'olt': 'â§€',
'omacr': '�',
'omega': 'ω',
'omicron': 'ο',
'omid': '⦶',
'ominus': '⊖',
'oopf': 'ğ�• ',
'opar': '⦷',
'operp': '⦹',
'oplus': '⊕',
'or': '∨',
'orarr': '↻',
'ord': '�',
'order': 'â„´',
'orderof': 'â„´',
'ordf': 'ª',
'ordm': 'º',
'origof': '⊶',
'oror': 'â©–',
'orslope': 'â©—',
'orv': 'â©›',
'oscr': 'â„´',
'oslash': 'ø',
'osol': '⊘',
'otilde': 'õ',
'otimes': '⊗',
'otimesas': '⨶',
'ouml': 'ö',
'ovbar': '⌽',
'par': '∥',
'para': '¶',
'parallel': '∥',
'parsim': '⫳',
'parsl': '⫽',
'part': '∂',
'pcy': 'Ğ¿',
'percnt': '%',
'period': '.',
'permil': '‰',
'perp': '⊥',
'pertenk': '‱',
'pfr': 'ğ�”',
'phi': 'φ',
'phiv': 'Ï•',
'phmmat': 'ℳ',
'phone': '�',
'pi': 'Ï€',
'pitchfork': 'â‹”',
'piv': 'Ï–',
'planck': '�',
'planckh': '�',
'plankv': '�',
'plus': '+',
'plusacir': '⨣',
'plusb': '�',
'pluscir': '⨢',
'plusdo': '∔',
'plusdu': '⨥',
'pluse': '⩲',
'plusmn': '±',
'plussim': '⨦',
'plustwo': '⨧',
'pm': '±',
'pointint': '⨕',
'popf': '�',
'pound': '£',
'pr': '≺',
'prE': '⪳',
'prap': '⪷',
'prcue': '≼',
'pre': '⪯',
'prec': '≺',
'precapprox': '⪷',
'preccurlyeq': '≼',
'preceq': '⪯',
'precnapprox': '⪹',
'precneqq': '⪵',
'precnsim': '⋨',
'precsim': '≾',
'prime': '′',
'primes': 'â„™',
'prnE': '⪵',
'prnap': '⪹',
'prnsim': '⋨',
'prod': '�',
'profalar': '⌮',
'profline': '⌒',
'profsurf': '⌓',
'prop': '�',
'propto': '�',
'prsim': '≾',
'prurel': '⊰',
'pscr': '�',
'psi': 'ψ',
'puncsp': '\u2008',
'qfr': '�',
'qint': '⨌',
'qopf': '�',
'qprime': '�',
'qscr': '�',
'quaternions': '�',
'quatint': '⨖',
'quest': '?',
'questeq': '≟',
'quot': '"',
'rAarr': '⇛',
'rArr': '⇒',
'rAtail': '⤜',
'rBarr': '�',
'rHar': '⥤',
'race': '∽̱',
'racute': 'Å•',
'radic': '√',
'raemptyv': '⦳',
'rang': '⟩',
'rangd': '⦒',
'range': '⦥',
'rangle': '⟩',
'raquo': '»',
'rarr': '→',
'rarrap': '⥵',
'rarrb': '⇥',
'rarrbfs': '⤠',
'rarrc': '⤳',
'rarrfs': '�',
'rarrhk': '↪',
'rarrlp': '↬',
'rarrpl': '⥅',
'rarrsim': '⥴',
'rarrtl': '↣',
'rarrw': '�',
'ratail': '⤚',
'ratio': '∶',
'rationals': 'ℚ',
'rbarr': '�',
'rbbrk': '�',
'rbrace': '}',
'rbrack': ']',
'rbrke': '⦌',
'rbrksld': '�',
'rbrkslu': '�',
'rcaron': 'Å™',
'rcedil': 'Å—',
'rceil': '⌉',
'rcub': '}',
'rcy': 'Ñ€',
'rdca': '⤷',
'rdldhar': '⥩',
'rdquo': '�',
'rdquor': '�',
'rdsh': '↳',
'real': 'ℜ',
'realine': 'â„›',
'realpart': 'ℜ',
'reals': '�',
'rect': 'â–',
'reg': '®',
'rfisht': '⥽',
'rfloor': '⌋',
'rfr': '�',
'rhard': '�',
'rharu': '⇀',
'rharul': '⥬',
'rho': '�',
'rhov': 'ϱ',
'rightarrow': '→',
'rightarrowtail': '↣',
'rightharpoondown': '�',
'rightharpoonup': '⇀',
'rightleftarrows': '⇄',
'rightleftharpoons': '⇌',
'rightrightarrows': '⇉',
'rightsquigarrow': '�',
'rightthreetimes': '⋌',
'ring': 'Ëš',
'risingdotseq': '≓',
'rlarr': '⇄',
'rlhar': '⇌',
'rlm': '\u200f',
'rmoust': '�',
'rmoustache': '�',
'rnmid': 'â«®',
'roang': 'âŸ',
'roarr': '⇾',
'robrk': '⟧',
'ropar': '⦆',
'ropf': '�',
'roplus': '⨮',
'rotimes': '⨵',
'rpar': ')',
'rpargt': '⦔',
'rppolint': '⨒',
'rrarr': '⇉',
'rsaquo': '›',
'rscr': '�',
'rsh': '↱',
'rsqb': ']',
'rsquo': '’',
'rsquor': '’',
'rthree': '⋌',
'rtimes': '⋊',
'rtri': 'â–¹',
'rtrie': '⊵',
'rtrif': 'â–¸',
'rtriltri': '�',
'ruluhar': '⥨',
'rx': '�',
'sacute': 'Å›',
'sbquo': '‚',
'sc': '≻',
'scE': '⪴',
'scap': '⪸',
'scaron': 'Å¡',
'sccue': '≽',
'sce': '⪰',
'scedil': 'ÅŸ',
'scirc': '�',
'scnE': '⪶',
'scnap': '⪺',
'scnsim': 'â‹©',
'scpolint': '⨓',
'scsim': '≿',
'scy': '�',
'sdot': 'â‹…',
'sdotb': '⊡',
'sdote': '⩦',
'seArr': '⇘',
'searhk': '⤥',
'searr': '↘',
'searrow': '↘',
'sect': '§',
'semi': ';',
'seswar': '⤩',
'setminus': '∖',
'setmn': '∖',
'sext': '✶',
'sfr': '�',
'sfrown': '⌢',
'sharp': '♯',
'shchcy': 'щ',
'shcy': 'ш',
'shortmid': '∣',
'shortparallel': '∥',
'shy': '\xad',
'sigma': 'σ',
'sigmaf': 'Ï‚',
'sigmav': 'Ï‚',
'sim': '∼',
'simdot': '⩪',
'sime': '≃',
'simeq': '≃',
'simg': '�',
'simgE': '⪠',
'siml': '�',
'simlE': '⪟',
'simne': '≆',
'simplus': '⨤',
'simrarr': '⥲',
'slarr': '�',
'smallsetminus': '∖',
'smashp': '⨳',
'smeparsl': '⧤',
'smid': '∣',
'smile': '⌣',
'smt': '⪪',
'smte': '⪬',
'smtes': '⪬︀',
'softcy': 'ь',
'sol': '/',
'solb': 'â§„',
'solbar': '⌿',
'sopf': '�',
'spades': 'â™ ',
'spadesuit': 'â™ ',
'spar': '∥',
'sqcap': '⊓',
'sqcaps': '⊓︀',
'sqcup': '⊔',
'sqcups': '⊔︀',
'sqsub': '�',
'sqsube': '⊑',
'sqsubset': '�',
'sqsubseteq': '⊑',
'sqsup': '�',
'sqsupe': '⊒',
'sqsupset': '�',
'sqsupseteq': '⊒',
'squ': 'â–¡',
'square': 'â–¡',
'squarf': 'â–ª',
'squf': 'â–ª',
'squot': "'",
'srarr': '→',
'sscr': '�',
'ssetmn': '∖',
'ssmile': '⌣',
'sstarf': '⋆',
'star': '☆',
'starf': '★',
'straightepsilon': 'ϵ',
'straightphi': 'Ï•',
'strns': '¯',
'sub': '⊂',
'subE': 'â«…',
'subdot': '⪽',
'sube': '⊆',
'subedot': '⫃',
'submult': '�',
'subnE': 'â«‹',
'subne': '⊊',
'subplus': '⪿',
'subrarr': '⥹',
'subset': '⊂',
'subseteq': '⊆',
'subseteqq': 'â«…',
'subsetneq': '⊊',
'subsetneqq': 'â«‹',
'subsim': '⫇',
'subsub': 'â«•',
'subsup': 'â«“',
'succ': '≻',
'succapprox': '⪸',
'succcurlyeq': '≽',
'succeq': '⪰',
'succnapprox': '⪺',
'succneqq': '⪶',
'succnsim': 'â‹©',
'succsim': '≿',
'sum': '∑',
'sung': '♪',
'sup': '⊃',
'sup1': '¹',
'sup2': '²',
'sup3': '³',
'supE': '⫆',
'supdot': '⪾',
'supdsub': '⫘',
'supe': '⊇',
'supedot': 'â«„',
'suphsol': '⟉',
'suphsub': 'â«—',
'suplarr': '⥻',
'supmult': 'â«‚',
'supnE': '⫌',
'supne': '⊋',
'supplus': 'â«€',
'supset': '⊃',
'supseteq': '⊇',
'supseteqq': '⫆',
'supsetneq': '⊋',
'supsetneqq': '⫌',
'supsim': '⫈',
'supsub': 'â«”',
'supsup': 'â«–',
'swArr': '⇙',
'swarhk': '⤦',
'swarr': '↙',
'swarrow': '↙',
'swnwar': '⤪',
'szlig': 'ß',
'target': '⌖',
'tau': 'Ï„',
'tbrk': '�',
'tcaron': 'Å¥',
'tcedil': 'Å£',
'tcy': 'Ñ‚',
'tdot': '⃛',
'telrec': '⌕',
'tfr': '�',
'there4': '∴',
'therefore': '∴',
'theta': 'θ',
'thetasym': 'Ï‘',
'thetav': 'Ï‘',
'thickapprox': '≈',
'thicksim': '∼',
'thinsp': '\u2009',
'thkap': '≈',
'thksim': '∼',
'thorn': 'þ',
'tilde': '˜',
'times': '×',
'timesb': '⊠',
'timesbar': '⨱',
'timesd': '⨰',
'tint': 'âˆ',
'toea': '⤨',
'top': '⊤',
'topbot': '⌶',
'topcir': '⫱',
'topf': '�',
'topfork': '⫚',
'tosa': '⤩',
'tprime': '‴',
'trade': 'â„¢',
'triangle': 'â–µ',
'triangledown': 'â–¿',
'triangleleft': 'â—ƒ',
'trianglelefteq': '⊴',
'triangleq': '≜',
'triangleright': 'â–¹',
'trianglerighteq': '⊵',
'tridot': 'â—¬',
'trie': '≜',
'triminus': '⨺',
'triplus': '⨹',
'trisb': '�',
'tritime': '⨻',
'trpezium': '�',
'tscr': '�',
'tscy': 'ц',
'tshcy': 'Ñ›',
'tstrok': 'ŧ',
'twixt': '≬',
'twoheadleftarrow': '�',
'twoheadrightarrow': '↠',
'uArr': '⇑',
'uHar': '⥣',
'uacute': 'ú',
'uarr': '↑',
'ubrcy': '�',
'ubreve': 'Å',
'ucirc': 'û',
'ucy': 'у',
'udarr': '⇅',
'udblac': 'ű',
'udhar': '⥮',
'ufisht': '⥾',
'ufr': '�',
'ugrave': 'ù',
'uharl': '↿',
'uharr': '↾',
'uhblk': 'â–€',
'ulcorn': '⌜',
'ulcorner': '⌜',
'ulcrop': '�',
'ultri': 'â—¸',
'umacr': 'Å«',
'uml': '¨',
'uogon': 'ų',
'uopf': '�',
'uparrow': '↑',
'updownarrow': '↕',
'upharpoonleft': '↿',
'upharpoonright': '↾',
'uplus': '�',
'upsi': 'Ï…',
'upsih': 'Ï’',
'upsilon': 'Ï…',
'upuparrows': '⇈',
'urcorn': '�',
'urcorner': '�',
'urcrop': '�',
'uring': 'ů',
'urtri': 'â—¹',
'uscr': '�',
'utdot': 'â‹°',
'utilde': 'Å©',
'utri': 'â–µ',
'utrif': 'â–´',
'uuarr': '⇈',
'uuml': 'ü',
'uwangle': '⦧',
'vArr': '⇕',
'vBar': '⫨',
'vBarv': 'â«©',
'vDash': '⊨',
'vangrt': '⦜',
'varepsilon': 'ϵ',
'varkappa': 'ϰ',
'varnothing': '∅',
'varphi': 'Ï•',
'varpi': 'Ï–',
'varpropto': '�',
'varr': '↕',
'varrho': 'ϱ',
'varsigma': 'Ï‚',
'varsubsetneq': '⊊︀',
'varsubsetneqq': '⫋︀',
'varsupsetneq': '⊋︀',
'varsupsetneqq': '⫌︀',
'vartheta': 'Ï‘',
'vartriangleleft': '⊲',
'vartriangleright': '⊳',
'vcy': 'в',
'vdash': '⊢',
'vee': '∨',
'veebar': '⊻',
'veeeq': '≚',
'vellip': 'â‹®',
'verbar': '|',
'vert': '|',
'vfr': '�',
'vltri': '⊲',
'vnsub': '⊂⃒',
'vnsup': '⊃⃒',
'vopf': '�',
'vprop': '�',
'vrtri': '⊳',
'vscr': '�',
'vsubnE': '⫋︀',
'vsubne': '⊊︀',
'vsupnE': '⫌︀',
'vsupne': '⊋︀',
'vzigzag': '⦚',
'wcirc': 'ŵ',
'wedbar': '⩟',
'wedge': '∧',
'wedgeq': '≙',
'weierp': '℘',
'wfr': '�',
'wopf': '�',
'wp': '℘',
'wr': '≀',
'wreath': '≀',
'wscr': '�',
'xcap': 'â‹‚',
'xcirc': 'â—¯',
'xcup': '⋃',
'xdtri': 'â–½',
'xfr': '�',
'xhArr': '⟺',
'xharr': '⟷',
'xi': 'ξ',
'xlArr': '⟸',
'xlarr': '⟵',
'xmap': '⟼',
'xnis': 'â‹»',
'xodot': '⨀',
'xopf': '�',
'xoplus': '�',
'xotime': '⨂',
'xrArr': '⟹',
'xrarr': '⟶',
'xscr': '�',
'xsqcup': '⨆',
'xuplus': '⨄',
'xutri': 'â–³',
'xvee': '�',
'xwedge': 'â‹€',
'yacute': 'ý',
'yacy': '�',
'ycirc': 'Å·',
'ycy': 'Ñ‹',
'yen': 'Â¥',
'yfr': '�',
'yicy': 'Ñ—',
'yopf': '�',
'yscr': '�',
'yucy': '�',
'yuml': 'ÿ',
'zacute': 'ź',
'zcaron': 'ž',
'zcy': 'Ğ·',
'zdot': 'ż',
'zeetrf': 'ℨ',
'zeta': 'ζ',
'zfr': '�',
'zhcy': 'ж',
'zigrarr': '�',
'zopf': '�',
'zscr': '�',
'zwj': '\u200d',
'zwnj': '\u200c',
# }}}
}
def entity_to_unicode_in_python(match, exceptions=(), encoding='cp1252', result_exceptions={}):
def check(ch):
return result_exceptions.get(ch, ch)
ent = match.group(1)
if ent in exceptions:
return '&'+ent+';'
if ent in {'apos', 'squot'}: # squot is generated by some broken CMS software
return check("'")
if ent == 'hellips':
ent = 'hellip'
if ent.startswith('#'):
try:
if ent[1] in ('x', 'X'):
num = int(ent[2:], 16)
else:
num = int(ent[1:])
except:
return '&'+ent+';'
if encoding is None or num > 255:
return check(my_unichr(num))
try:
return check(bytes(bytearray((num,))).decode(encoding))
except UnicodeDecodeError:
return check(my_unichr(num))
from calibre.ebooks.html_entities import html5_entities
try:
return check(html5_entities[ent])
except KeyError:
pass
from polyglot.html_entities import name2codepoint
try:
return check(my_unichr(name2codepoint[ent]))
except KeyError:
return '&'+ent+';'
def find_tests():
import unittest
class TestHTMLEntityReplacement(unittest.TestCase):
def test_html_entity_replacement(self):
from calibre_extensions.fast_html_entities import replace_all_entities
def t(inp, exp):
self.assertEqual(exp, replace_all_entities(inp), f'Failed for input: {inp!r}')
def x(inp, exp):
self.assertEqual(exp, replace_all_entities(inp, True), f'Failed for input: {inp!r}')
t('aӒb', 'aÓ’b')
t('', '')
t('a', 'a')
t('&', '&')
t('&', '&')
t('&', '&')
t('a&;b &#;c', 'a&;b &#;c')
t('<', '<')
t('&<', '&<')
t('a&b<c', 'a&b<c')
t('a∾̳b', 'a∾̳b')
t('aӒb', 'aÓ’b')
t('aሴb', 'a\u1234b')
t('a􃓺b', 'a\U001034fAb')
t('a�b�c', 'abc')
x('&<>'"', '&<>'"')
return unittest.defaultTestLoader.loadTestsFromTestCase(TestHTMLEntityReplacement)
def generate_entity_lists():
import re
from html import entities as e
entities = {k.rstrip(';'): e.name2codepoint[k] for k in e.name2codepoint}
entities.update({k.rstrip(';'): e.html5[k] for k in e.html5})
# common misspelled entity names
for k, v in {'squot': "'", 'hellips': entities['hellip']}.items():
if k not in entities:
entities[k] = v
lines = []
native_lines = '''\
struct html_entity { const char *name, *val; }
%%
'''.splitlines()
def esc_for_c(x):
if x == '\n':
return '\\n'
if x in '''"\\''':
return '\\' + x
return x
for k in sorted(entities):
v = entities[k]
lines.append(f" '{k}': {v!r},")
native_lines.append(f'"{esc_for_c(k)}","{esc_for_c(v)}"')
with open(__file__, 'r+b') as f:
raw = f.read().decode('utf-8')
pat = re.compile(r'^# ENTITY_DATA {{{.+?^# }}}', flags=re.M | re.DOTALL)
raw = pat.sub(lambda m: '# ENTITY_DATA {{{\n' + '\n'.join(lines) + '\n# }}}', raw)
f.seek(0), f.truncate(), f.write(raw.encode('utf-8'))
import subprocess
with open(__file__.replace('.py', '.h'), 'wb') as f:
cp = subprocess.run(['gperf', '--struct-type', '--readonly', '--includes', '--compare-strncmp'], input='\n'.join(native_lines).encode(), stdout=f)
if cp.returncode != 0:
raise SystemExit(cp.returncode)
| 49,754 | Python | .py | 2,228 | 17.174147 | 154 | 0.406697 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,251 | BeautifulSoup.py | kovidgoyal_calibre/src/calibre/ebooks/BeautifulSoup.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
import bs4
from bs4 import CData, Comment, Declaration, NavigableString, ProcessingInstruction, SoupStrainer, Tag, __version__ # noqa
def parse_html(markup):
from calibre import xml_replace_entities
from calibre.ebooks.chardet import strip_encoding_declarations, xml_to_unicode
from calibre.utils.cleantext import clean_xml_chars
if isinstance(markup, str):
markup = strip_encoding_declarations(markup)
markup = xml_replace_entities(markup)
else:
markup = xml_to_unicode(markup, strip_encoding_pats=True, resolve_entities=True)[0]
markup = clean_xml_chars(markup)
from html5_parser.soup import parse
return parse(markup, return_root=False)
def prettify(soup):
ans = soup.prettify()
if isinstance(ans, bytes):
ans = ans.decode('utf-8')
return ans
def BeautifulSoup(markup='', *a, **kw):
return parse_html(markup)
def BeautifulStoneSoup(markup='', *a, **kw):
return bs4.BeautifulSoup(markup, 'xml')
| 1,086 | Python | .py | 25 | 38.68 | 123 | 0.729781 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,252 | tweak.py | kovidgoyal_calibre/src/calibre/ebooks/tweak.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os
import sys
import unicodedata
from calibre import as_unicode, prints, walk
from calibre.constants import __appname__, iswindows
from calibre.libunzip import extract as zipextract
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.ipc.simple_worker import WorkerError
from calibre.utils.zipfile import ZIP_DEFLATED, ZIP_STORED, ZipFile
class Error(ValueError):
pass
def ask_cli_question(msg):
prints(msg, end=' [y/N]: ')
sys.stdout.flush()
if iswindows:
import msvcrt
ans = msvcrt.getch()
else:
import termios
import tty
old_settings = termios.tcgetattr(sys.stdin.fileno())
try:
tty.setraw(sys.stdin.fileno())
try:
ans = sys.stdin.read(1)
except KeyboardInterrupt:
ans = b''
finally:
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)
print()
return ans == b'y'
def mobi_exploder(path, tdir, question=lambda x:True):
from calibre.ebooks.mobi.tweak import BadFormat, explode
try:
return explode(path, tdir, question=question)
except BadFormat as e:
raise Error(as_unicode(e))
def zip_exploder(path, tdir, question=lambda x:True):
zipextract(path, tdir)
for f in walk(tdir):
if f.lower().endswith('.opf'):
return f
raise Error('Invalid book: Could not find .opf')
def zip_rebuilder(tdir, path):
with ZipFile(path, 'w', compression=ZIP_DEFLATED) as zf:
# Write mimetype
mt = os.path.join(tdir, 'mimetype')
if os.path.exists(mt):
zf.write(mt, 'mimetype', compress_type=ZIP_STORED)
# Write everything else
exclude_files = {'.DS_Store', 'mimetype', 'iTunesMetadata.plist'}
for root, dirs, files in os.walk(tdir):
for fn in files:
if fn in exclude_files:
continue
absfn = os.path.join(root, fn)
zfn = unicodedata.normalize('NFC', os.path.relpath(absfn, tdir).replace(os.sep, '/'))
zf.write(absfn, zfn)
def docx_exploder(path, tdir, question=lambda x:True):
zipextract(path, tdir)
from calibre.ebooks.docx.dump import pretty_all_xml_in_dir
pretty_all_xml_in_dir(tdir)
for f in walk(tdir):
if os.path.basename(f) == 'document.xml':
return f
raise Error('Invalid book: Could not find document.xml')
def get_tools(fmt):
fmt = fmt.lower()
if fmt in {'mobi', 'azw', 'azw3'}:
from calibre.ebooks.mobi.tweak import rebuild
ans = mobi_exploder, rebuild
elif fmt in {'epub', 'htmlz'}:
ans = zip_exploder, zip_rebuilder
elif fmt == 'docx':
ans = docx_exploder, zip_rebuilder
else:
ans = None, None
return ans
def explode(ebook_file, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.isdir(output_dir):
raise SystemExit('%s is not a directory' % output_dir)
output_dir = os.path.abspath(output_dir)
fmt = ebook_file.rpartition('.')[-1].lower()
exploder, rebuilder = get_tools(fmt)
if exploder is None:
raise SystemExit('Cannot tweak %s files. Supported formats are: EPUB, HTMLZ, AZW3, MOBI, DOCX' % fmt.upper())
try:
opf = exploder(ebook_file, output_dir, question=ask_cli_question)
except WorkerError as e:
prints('Failed to unpack', ebook_file)
prints(e.orig_tb)
raise SystemExit(1)
except Error as e:
prints(as_unicode(e), file=sys.stderr)
raise SystemExit(1)
if opf is None:
# The question was answered with No
return
h = '_' if iswindows else '.'
with open(os.path.join(output_dir, h + '__explode_fmt__'), 'wb') as f:
f.write(fmt.encode('utf-8'))
prints('Book extracted to', output_dir)
prints('Make your changes and once you are done, use --implode-book to rebuild')
def implode(output_dir, ebook_file):
output_dir = os.path.abspath(output_dir)
fmt = ebook_file.rpartition('.')[-1].lower()
exploder, rebuilder = get_tools(fmt)
if rebuilder is None:
raise SystemExit('Cannot tweak %s files. Supported formats are: EPUB, HTMLZ, AZW3, MOBI, DOCX' % fmt.upper())
h = '_' if iswindows else '.'
efmt_path = os.path.join(output_dir, h + '__explode_fmt__')
try:
with open(efmt_path, 'rb') as f:
efmt = f.read().decode('utf-8')
except Exception:
raise SystemExit('The folder %s does not seem to have been created by --explode-book' % output_dir)
if efmt != fmt:
raise SystemExit('You must use the same format of file as was used when exploding the book')
os.remove(efmt_path)
try:
rebuilder(output_dir, ebook_file)
except WorkerError as e:
prints('Failed to rebuild', ebook_file)
prints(e.orig_tb)
raise SystemExit(1)
prints(ebook_file, 'successfully rebuilt')
def tweak(ebook_file):
''' Command line interface to the Tweak Book tool '''
fmt = ebook_file.rpartition('.')[-1].lower()
exploder, rebuilder = get_tools(fmt)
if exploder is None:
prints('Cannot tweak %s files. Supported formats are: EPUB, HTMLZ, AZW3, MOBI' % fmt.upper()
, file=sys.stderr)
raise SystemExit(1)
with TemporaryDirectory('_tweak_'+
os.path.basename(ebook_file).rpartition('.')[0]) as tdir:
try:
opf = exploder(ebook_file, tdir, question=ask_cli_question)
except WorkerError as e:
prints('Failed to unpack', ebook_file)
prints(e.orig_tb)
raise SystemExit(1)
except Error as e:
prints(as_unicode(e), file=sys.stderr)
raise SystemExit(1)
if opf is None:
# The question was answered with No
return
prints('Book extracted to', tdir)
prints('Make your tweaks and once you are done,', __appname__,
'will rebuild', ebook_file, 'from', tdir)
print()
proceed = ask_cli_question('Rebuild ' + ebook_file + '?')
if proceed:
prints('Rebuilding', ebook_file, 'please wait ...')
try:
rebuilder(tdir, ebook_file)
except WorkerError as e:
prints('Failed to rebuild', ebook_file)
prints(e.orig_tb)
raise SystemExit(1)
prints(ebook_file, 'successfully tweaked')
| 6,683 | Python | .py | 168 | 31.875 | 117 | 0.625174 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,253 | css_transform_rules.py | kovidgoyal_calibre/src/calibre/ebooks/css_transform_rules.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import numbers
import operator
from collections import OrderedDict
from functools import partial
from css_parser.css import CSSRule, Property
from calibre import force_unicode
from calibre.ebooks import parse_css_length
from calibre.ebooks.oeb.normalize_css import normalizers, safe_parser
from polyglot.builtins import iteritems
def compile_pat(pat):
import regex
REGEX_FLAGS = regex.VERSION1 | regex.UNICODE | regex.IGNORECASE
return regex.compile(pat, flags=REGEX_FLAGS)
def all_properties(decl):
' This is needed because CSSStyleDeclaration.getProperties(None, all=True) does not work and is slower than it needs to be. '
for item in decl.seq:
p = item.value
if isinstance(p, Property):
yield p
class StyleDeclaration:
def __init__(self, css_declaration):
self.css_declaration = css_declaration
self.expanded_properties = {}
self.changed = False
def __iter__(self):
dec = self.css_declaration
for p in all_properties(dec):
n = normalizers.get(p.name)
if n is None:
yield p, None
else:
if p not in self.expanded_properties:
self.expanded_properties[p] = [Property(k, v, p.literalpriority) for k, v in iteritems(n(p.name, p.propertyValue))]
for ep in self.expanded_properties[p]:
yield ep, p
def expand_property(self, parent_prop):
props = self.expanded_properties.pop(parent_prop, None)
if props is None:
return
dec = self.css_declaration
seq = dec._tempSeq()
for item in dec.seq:
if item.value is parent_prop:
for c in sorted(props, key=operator.attrgetter('name')):
c.parent = dec
seq.append(c, 'Property')
else:
seq.appendItem(item)
dec._setSeq(seq)
def remove_property(self, prop, parent_prop):
if parent_prop is not None:
self.expand_property(parent_prop)
dec = self.css_declaration
seq = dec._tempSeq()
for item in dec.seq:
if item.value is not prop:
seq.appendItem(item)
dec._setSeq(seq)
self.changed = True
def change_property(self, prop, parent_prop, val, match_pat=None):
if parent_prop is not None:
self.expand_property(parent_prop)
if match_pat is None:
prop.value = val
else:
prop.value = match_pat.sub(val, prop.value)
self.changed = True
def append_properties(self, props):
if props:
self.changed = True
for prop in props:
self.css_declaration.setProperty(Property(prop.name, prop.value, prop.literalpriority, parent=self.css_declaration))
def set_property(self, name, value, priority='', replace=True):
# Note that this does not handle shorthand properties, so you must
# call remove_property() yourself in that case
self.changed = True
if replace:
self.css_declaration.removeProperty(name)
self.css_declaration.setProperty(Property(name, value, priority, parent=self.css_declaration))
def __str__(self):
return force_unicode(self.css_declaration.cssText, 'utf-8')
operator_map = {'==':'eq', '!=': 'ne', '<=':'le', '<':'lt', '>=':'ge', '>':'gt', '-':'sub', '+': 'add', '*':'mul', '/':'truediv'}
def unit_convert(value, unit, dpi=96.0, body_font_size=12):
result = None
if unit == 'px':
result = value * 72.0 / dpi
elif unit == 'in':
result = value * 72.0
elif unit == 'pt':
result = value
elif unit == 'pc':
result = value * 12.0
elif unit == 'mm':
result = value * 2.8346456693
elif unit == 'cm':
result = value * 28.346456693
elif unit == 'rem':
result = value * body_font_size
elif unit == 'q':
result = value * 0.708661417325
return result
def parse_css_length_or_number(raw, default_unit=None):
if isinstance(raw, numbers.Number):
return raw, default_unit
try:
return float(raw), default_unit
except Exception:
return parse_css_length(raw)
def numeric_match(value, unit, pts, op, raw):
try:
v, u = parse_css_length_or_number(raw)
except Exception:
return False
if v is None:
return False
if unit is None or u is None or unit == u:
return op(v, value)
if pts is None:
return False
p = unit_convert(v, u)
if p is None:
return False
return op(p, pts)
def transform_number(val, op, raw):
try:
v, u = parse_css_length_or_number(raw, default_unit='')
except Exception:
return raw
if v is None:
return raw
v = op(v, val)
if int(v) == v:
v = int(v)
return str(v) + u
class Rule:
def __init__(self, property='color', match_type='*', query='', action='remove', action_data=''):
self.property_name = property.lower()
self.action, self.action_data = action, action_data
self.match_pat = None
if self.action == 'append':
decl = safe_parser().parseStyle(self.action_data)
self.appended_properties = list(all_properties(decl))
elif self.action in '+-/*':
self.action_operator = partial(transform_number, float(self.action_data), getattr(operator, operator_map[self.action]))
if match_type == 'is':
self.property_matches = lambda x: x.lower() == query.lower()
elif match_type == 'is_not':
self.property_matches = lambda x: x.lower() != query.lower()
elif match_type == '*':
self.property_matches = lambda x: True
elif 'matches' in match_type:
self.match_pat = compile_pat(query)
if match_type.startswith('not_'):
self.property_matches = lambda x: self.match_pat.match(x) is None
else:
self.property_matches = lambda x: self.match_pat.match(x) is not None
else:
value, unit = parse_css_length_or_number(query)
op = getattr(operator, operator_map[match_type])
pts = unit_convert(value, unit)
self.property_matches = partial(numeric_match, value, unit, pts, op)
def process_declaration(self, declaration):
oval, declaration.changed = declaration.changed, False
for prop, parent_prop in tuple(declaration):
if prop.name == self.property_name and self.property_matches(prop.value):
if self.action == 'remove':
declaration.remove_property(prop, parent_prop)
elif self.action == 'change':
declaration.change_property(prop, parent_prop, self.action_data, self.match_pat)
elif self.action == 'append':
declaration.append_properties(self.appended_properties)
else:
val = prop.value
nval = self.action_operator(val)
if val != nval:
declaration.change_property(prop, parent_prop, nval)
changed = declaration.changed
declaration.changed = oval or changed
return changed
ACTION_MAP = OrderedDict((
('remove', _('Remove the property')),
('append', _('Add extra properties')),
('change', _('Change the value to')),
('*', _('Multiply the value by')),
('/', _('Divide the value by')),
('+', _('Add to the value')),
('-', _('Subtract from the value')),
))
MATCH_TYPE_MAP = OrderedDict((
('is', _('is')),
('is_not', _('is not')),
('*', _('is any value')),
('matches', _('matches pattern')),
('not_matches', _('does not match pattern')),
('==', _('is the same length as')),
('!=', _('is not the same length as')),
('<', _('is less than')),
('>', _('is greater than')),
('<=', _('is less than or equal to')),
('>=', _('is greater than or equal to')),
))
allowed_keys = frozenset('property match_type query action action_data'.split())
def validate_rule(rule):
keys = frozenset(rule)
extra = keys - allowed_keys
if extra:
return _('Unknown keys'), _(
'The rule has unknown keys: %s') % ', '.join(extra)
missing = allowed_keys - keys
if missing:
return _('Missing keys'), _(
'The rule has missing keys: %s') % ', '.join(missing)
mt = rule['match_type']
if not rule['property']:
return _('Property required'), _('You must specify a CSS property to match')
if rule['property'] in normalizers:
return _('Shorthand property not allowed'), _(
'{0} is a shorthand property. Use the full form of the property,'
' for example, instead of font, use font-family, instead of margin, use margin-top, etc.').format(rule['property'])
if not rule['query'] and mt != '*':
_('Query required'), _(
'You must specify a value for the CSS property to match')
if mt not in MATCH_TYPE_MAP:
return _('Unknown match type'), _(
'The match type %s is not known') % mt
if 'matches' in mt:
try:
compile_pat(rule['query'])
except Exception:
return _('Query invalid'), _(
'%s is not a valid regular expression') % rule['query']
elif mt in '< > <= >= == !='.split():
try:
num = parse_css_length_or_number(rule['query'])[0]
if num is None:
raise Exception('not a number')
except Exception:
return _('Query invalid'), _(
'%s is not a valid length or number') % rule['query']
ac, ad = rule['action'], rule['action_data']
if ac not in ACTION_MAP:
return _('Unknown action type'), _(
'The action type %s is not known') % mt
if not ad and ac != 'remove':
msg = _('You must specify a number')
if ac == 'append':
msg = _('You must specify at least one CSS property to add')
elif ac == 'change':
msg = _('You must specify a value to change the property to')
return _('No data'), msg
if ac in '+-*/':
try:
float(ad)
except Exception:
return _('Invalid number'), _('%s is not a number') % ad
return None, None
def compile_rules(serialized_rules):
return [Rule(**r) for r in serialized_rules]
def transform_declaration(compiled_rules, decl):
decl = StyleDeclaration(decl)
changed = False
for rule in compiled_rules:
if rule.process_declaration(decl):
changed = True
return changed
def transform_sheet(compiled_rules, sheet):
changed = False
for rule in sheet.cssRules.rulesOfType(CSSRule.STYLE_RULE):
if transform_declaration(compiled_rules, rule.style):
changed = True
return changed
def transform_container(container, serialized_rules, names=()):
from calibre.ebooks.oeb.polish.css import transform_css
rules = compile_rules(serialized_rules)
return transform_css(
container, transform_sheet=partial(transform_sheet, rules),
transform_style=partial(transform_declaration, rules), names=names
)
def rule_to_text(rule):
def get(prop):
return rule.get(prop) or ''
text = _(
'If the property {property} {match_type} {query}\n{action}').format(
property=get('property'), action=ACTION_MAP[rule['action']],
match_type=MATCH_TYPE_MAP[rule['match_type']], query=get('query'))
if get('action_data'):
text += get('action_data')
return text
def export_rules(serialized_rules):
lines = []
for rule in serialized_rules:
lines.extend('# ' + l for l in rule_to_text(rule).splitlines())
lines.extend('{}: {}'.format(k, v.replace('\n', ' ')) for k, v in iteritems(rule) if k in allowed_keys)
lines.append('')
return '\n'.join(lines).encode('utf-8')
def import_rules(raw_data):
import regex
pat = regex.compile(r'\s*(\S+)\s*:\s*(.+)', flags=regex.VERSION1)
current_rule = {}
def sanitize(r):
return {k:(r.get(k) or '') for k in allowed_keys}
for line in raw_data.decode('utf-8').splitlines():
if not line.strip():
if current_rule:
yield sanitize(current_rule)
current_rule = {}
continue
if line.lstrip().startswith('#'):
continue
m = pat.match(line)
if m is not None:
k, v = m.group(1).lower(), m.group(2)
if k in allowed_keys:
current_rule[k] = v
if current_rule:
yield sanitize(current_rule)
def test(return_tests=False): # {{{
import unittest
def apply_rule(style, **rule):
r = Rule(**rule)
decl = StyleDeclaration(safe_parser().parseStyle(style))
r.process_declaration(decl)
return str(decl).rstrip(';')
class TestTransforms(unittest.TestCase):
longMessage = True
maxDiff = None
ae = unittest.TestCase.assertEqual
def test_matching(self):
def m(match_type='*', query='', action_data=''):
action = 'change' if action_data else 'remove'
self.ae(apply_rule(
css, property=prop, match_type=match_type, query=query, action=action, action_data=action_data
), ecss)
prop = 'font-size'
css, ecss = 'font-size: 1.2rem', 'font-size: 1.2em'
m('matches', query='(.+)rem', action_data=r'\1em')
prop = 'color'
css, ecss = 'color: red; margin: 0', 'margin: 0'
m('*')
m('is', 'red')
m('is_not', 'blue')
m('matches', 'R.d')
m('not_matches', 'blue')
ecss = css.replace('; ', ';\n')
m('is', 'blue')
css, ecss = 'color: currentColor; line-height: 0', 'line-height: 0'
m('is', 'currentColor')
prop = 'margin-top'
css, ecss = 'color: red; margin-top: 10', 'color: red'
m('*')
m('==', '10')
m('!=', '11')
m('<=', '10')
m('>=', '10')
m('<', '11')
m('>', '9')
css, ecss = 'color: red; margin-top: 1mm', 'color: red'
m('==', '1')
m('==', '1mm')
m('==', '4q')
ecss = css.replace('; ', ';\n')
m('==', '1pt')
def test_expansion(self):
def m(css, ecss, action='remove', action_data=''):
self.ae(ecss, apply_rule(css, property=prop, action=action, action_data=action_data))
prop = 'margin-top'
m('margin: 0', 'margin-bottom: 0;\nmargin-left: 0;\nmargin-right: 0')
m('margin: 0 !important', 'margin-bottom: 0 !important;\nmargin-left: 0 !important;\nmargin-right: 0 !important')
m('margin: 0', 'margin-bottom: 0;\nmargin-left: 0;\nmargin-right: 0;\nmargin-top: 1pt', 'change', '1pt')
prop = 'font-family'
m('font: 10em "Kovid Goyal", monospace', 'font-size: 10em;\nfont-style: normal;\nfont-variant: normal;\nfont-weight: normal;\nline-height: normal')
def test_append(self):
def m(css, ecss, action_data=''):
self.ae(ecss, apply_rule(css, property=prop, action='append', action_data=action_data))
prop = 'color'
m('color: red', 'color: red;\nmargin: 1pt;\nfont-weight: bold', 'margin: 1pt; font-weight: bold')
def test_change(self):
def m(css, ecss, action='change', action_data=''):
self.ae(ecss, apply_rule(css, property=prop, action=action, action_data=action_data))
prop = 'font-family'
m('font-family: a, b', 'font-family: "c c", d', action_data='"c c", d')
prop = 'line-height'
m('line-height: 1', 'line-height: 3', '*', '3')
m('line-height: 1em', 'line-height: 4em', '+', '3')
m('line-height: 1', 'line-height: 0', '-', '1')
m('line-height: 2', 'line-height: 1', '/', '2')
prop = 'border-top-width'
m('border-width: 1', 'border-bottom-width: 1;\nborder-left-width: 1;\nborder-right-width: 1;\nborder-top-width: 3', '*', '3')
prop = 'font-size'
def test_export_import(self):
rule = {'property':'a', 'match_type':'*', 'query':'some text', 'action':'remove', 'action_data':'color: red; a: b'}
self.ae(rule, next(import_rules(export_rules([rule]))))
tests = unittest.defaultTestLoader.loadTestsFromTestCase(TestTransforms)
if return_tests:
return tests
unittest.TextTestRunner(verbosity=4).run(tests)
if __name__ == '__main__':
test()
# }}}
| 17,064 | Python | .py | 401 | 33.254364 | 159 | 0.574106 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,254 | constants.py | kovidgoyal_calibre/src/calibre/ebooks/constants.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
html5_tags = ( # {{{
frozenset('''\
html
head
title
base
link
meta
style
script
noscript
body
section
nav
article
aside
h1
h2
h3
h4
h5
h6
header
footer
address
p
hr
br
pre
dialog
blockquote
ol
ul
li
dl
dt
dd
a
q
cite
em
strong
small
mark
dfn
abbr
time
progress
meter
code
var
samp
kbd
sub
sup
span
i
b
bdo
ruby
rt
rp
ins
del
figure
img
iframe
embed
object
param
video
audio
source
canvas
map
area
table
caption
colgroup
col
tbody
thead
tfoot
tr
td
th
form
fieldset
label
input
button
select
datalist
optgroup
option
textarea
output
details
command
bb
menu
legend
div'''.splitlines())) # }}}
| 703 | Python | .py | 106 | 5.603774 | 61 | 0.888889 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,255 | covers.py | kovidgoyal_calibre/src/calibre/ebooks/covers.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import numbers
import random
import re
import unicodedata
from collections import namedtuple
from contextlib import contextmanager
from itertools import chain
from math import atan2, ceil, cos, sin, sqrt
from qt.core import (
QBrush,
QColor,
QFont,
QFontMetrics,
QImage,
QLinearGradient,
QPainter,
QPainterPath,
QPen,
QPointF,
QRadialGradient,
QRect,
QRectF,
Qt,
QTextCharFormat,
QTextLayout,
QTextOption,
QTransform,
)
from calibre import fit_image, force_unicode
from calibre.constants import __appname__, __version__
from calibre.ebooks.metadata import fmt_sidx
from calibre.ebooks.metadata.book.base import Metadata
from calibre.ebooks.metadata.book.formatter import SafeFormat
from calibre.gui2 import config, ensure_app, load_builtin_fonts, pixmap_to_data
from calibre.utils.cleantext import clean_ascii_chars, clean_xml_chars
from calibre.utils.config import JSONConfig
from calibre.utils.resources import get_image_path as I
from polyglot.builtins import iteritems, itervalues, string_or_bytes
# Default settings {{{
cprefs = JSONConfig('cover_generation')
cprefs.defaults['title_font_size'] = 120 # px
cprefs.defaults['subtitle_font_size'] = 80 # px
cprefs.defaults['footer_font_size'] = 80 # px
cprefs.defaults['cover_width'] = 1200 # px
cprefs.defaults['cover_height'] = 1600 # px
cprefs.defaults['title_font_family'] = None
cprefs.defaults['subtitle_font_family'] = None
cprefs.defaults['footer_font_family'] = None
cprefs.defaults['color_themes'] = {}
cprefs.defaults['disabled_color_themes'] = []
cprefs.defaults['disabled_styles'] = []
cprefs.defaults['title_template'] = '<b>{title}'
cprefs.defaults['subtitle_template'] = '''{series:'test($, strcat("<i>", $, "</i> - ", raw_field("formatted_series_index")), "")'}'''
cprefs.defaults['footer_template'] = r'''program:
# Show at most two authors, on separate lines.
authors = field('authors');
num = count(authors, ' & ');
authors = sublist(authors, 0, 2, ' & ');
authors = list_re(authors, ' & ', '(.+)', '<b>\1');
authors = re(authors, ' & ', '<br>');
re(authors, '&&', '&')
'''
Prefs = namedtuple('Prefs', ' '.join(sorted(cprefs.defaults)))
_use_roman = None
def get_use_roman():
global _use_roman
if _use_roman is None:
return config['use_roman_numerals_for_series_number']
return _use_roman
def set_use_roman(val):
global _use_roman
_use_roman = bool(val)
# }}}
# Draw text {{{
Point = namedtuple('Point', 'x y')
def parse_text_formatting(text):
pos = 0
tokens = []
for m in re.finditer(r'</?([a-zA-Z1-6]+)/?>', text):
q = text[pos:m.start()]
if q:
tokens.append((False, q))
tokens.append((True, (m.group(1).lower(), '/' in m.group()[:2])))
pos = m.end()
if tokens:
if text[pos:]:
tokens.append((False, text[pos:]))
else:
tokens = [(False, text)]
ranges, open_ranges, text = [], [], []
offset = 0
for is_tag, tok in tokens:
if is_tag:
tag, closing = tok
if closing:
if open_ranges:
r = open_ranges.pop()
r[-1] = offset - r[-2]
if r[-1] > 0:
ranges.append(r)
else:
if tag in {'b', 'strong', 'i', 'em'}:
open_ranges.append([tag, offset, -1])
else:
offset += len(tok.replace('&', '&'))
text.append(tok)
text = ''.join(text)
formats = []
for tag, start, length in chain(ranges, open_ranges):
fmt = QTextCharFormat()
if tag in {'b', 'strong'}:
fmt.setFontWeight(QFont.Weight.Bold)
elif tag in {'i', 'em'}:
fmt.setFontItalic(True)
else:
continue
if length == -1:
length = len(text) - start
if length > 0:
r = QTextLayout.FormatRange()
r.format = fmt
r.start, r.length = start, length
formats.append(r)
return text, formats
class Block:
def __init__(self, text='', width=0, font=None, img=None, max_height=100, align=Qt.AlignmentFlag.AlignCenter):
self.layouts = []
self._position = Point(0, 0)
self.leading = self.line_spacing = 0
if font is not None:
fm = QFontMetrics(font, img)
self.leading = fm.leading()
self.line_spacing = fm.lineSpacing()
for text in text.split('<br>') if text else ():
text, formats = parse_text_formatting(sanitize(text))
l = QTextLayout(unescape_formatting(text), font, img)
l.setFormats(formats)
to = QTextOption(align)
to.setWrapMode(QTextOption.WrapMode.WrapAtWordBoundaryOrAnywhere)
l.setTextOption(to)
l.beginLayout()
height = 0
while height + 3*self.leading < max_height:
line = l.createLine()
if not line.isValid():
break
line.setLineWidth(width)
height += self.leading
line.setPosition(QPointF(0, height))
height += line.height()
max_height -= height
l.endLayout()
if self.layouts:
self.layouts.append(self.leading)
else:
self._position = Point(l.position().x(), l.position().y())
self.layouts.append(l)
if self.layouts:
self.layouts.append(self.leading)
@property
def height(self):
return int(ceil(sum(l if isinstance(l, numbers.Number) else l.boundingRect().height() for l in self.layouts)))
@property
def position(self):
return self._position
@position.setter
def position(self, new_pos):
(x, y) = new_pos
self._position = Point(x, y)
if self.layouts:
self.layouts[0].setPosition(QPointF(x, y))
y += self.layouts[0].boundingRect().height()
for l in self.layouts[1:]:
if isinstance(l, numbers.Number):
y += l
else:
l.setPosition(QPointF(x, y))
y += l.boundingRect().height()
def draw(self, painter):
for l in self.layouts:
if hasattr(l, 'draw'):
# Etch effect for the text
painter.save()
painter.setRenderHints(QPainter.RenderHint.TextAntialiasing | QPainter.RenderHint.Antialiasing)
painter.save()
painter.setPen(QColor(255, 255, 255, 125))
l.draw(painter, QPointF(1, 1))
painter.restore()
l.draw(painter, QPointF())
painter.restore()
def layout_text(prefs, img, title, subtitle, footer, max_height, style):
width = img.width() - 2 * style.hmargin
title, subtitle, footer = title, subtitle, footer
title_font = QFont(prefs.title_font_family or 'Liberation Serif')
title_font.setPixelSize(prefs.title_font_size)
title_font.setStyleStrategy(QFont.StyleStrategy.PreferAntialias)
title_block = Block(title, width, title_font, img, max_height, style.TITLE_ALIGN)
title_block.position = style.hmargin, style.vmargin
subtitle_block = Block()
if subtitle:
subtitle_font = QFont(prefs.subtitle_font_family or 'Liberation Sans')
subtitle_font.setPixelSize(prefs.subtitle_font_size)
subtitle_font.setStyleStrategy(QFont.StyleStrategy.PreferAntialias)
gap = 2 * title_block.leading
mh = max_height - title_block.height - gap
subtitle_block = Block(subtitle, width, subtitle_font, img, mh, style.SUBTITLE_ALIGN)
subtitle_block.position = style.hmargin, title_block.position.y + title_block.height + gap
footer_font = QFont(prefs.footer_font_family or 'Liberation Serif')
footer_font.setStyleStrategy(QFont.StyleStrategy.PreferAntialias)
footer_font.setPixelSize(prefs.footer_font_size)
footer_block = Block(footer, width, footer_font, img, max_height, style.FOOTER_ALIGN)
footer_block.position = style.hmargin, img.height() - style.vmargin - footer_block.height
return title_block, subtitle_block, footer_block
# }}}
# Format text using templates {{{
def sanitize(s):
return unicodedata.normalize('NFC', clean_xml_chars(clean_ascii_chars(force_unicode(s or ''))))
_formatter = None
_template_cache = {}
def escape_formatting(val):
return val.replace('&', '&').replace('<', '<').replace('>', '>')
def unescape_formatting(val):
return val.replace('<', '<').replace('>', '>').replace('&', '&')
class Formatter(SafeFormat):
def get_value(self, orig_key, args, kwargs):
ans = SafeFormat.get_value(self, orig_key, args, kwargs)
return escape_formatting(ans)
def formatter():
global _formatter
if _formatter is None:
_formatter = Formatter()
return _formatter
def format_fields(mi, prefs):
f = formatter()
def safe_format(field):
return f.safe_format(
getattr(prefs, field), mi, _('Template error'), mi, template_cache=_template_cache
)
return map(safe_format, ('title_template', 'subtitle_template', 'footer_template'))
@contextmanager
def preserve_fields(obj, fields):
if isinstance(fields, string_or_bytes):
fields = fields.split()
null = object()
mem = {f:getattr(obj, f, null) for f in fields}
try:
yield
finally:
for f, val in iteritems(mem):
if val is null:
delattr(obj, f)
else:
setattr(obj, f, val)
def format_text(mi, prefs):
with preserve_fields(mi, 'authors formatted_series_index'):
mi.authors = [a for a in mi.authors if a != _('Unknown')]
mi.formatted_series_index = fmt_sidx(mi.series_index or 0, use_roman=get_use_roman())
return tuple(format_fields(mi, prefs))
# }}}
# Colors {{{
ColorTheme = namedtuple('ColorTheme', 'color1 color2 contrast_color1 contrast_color2')
def to_theme(x):
return {k:v for k, v in zip(ColorTheme._fields[:4], x.split())}
fallback_colors = to_theme('ffffff 000000 000000 ffffff')
default_color_themes = {
'Earth' : to_theme('e8d9ac c7b07b 564628 382d1a'),
'Grass' : to_theme('d8edb5 abc8a4 375d3b 183128'),
'Water' : to_theme('d3dcf2 829fe4 00448d 00305a'),
'Silver': to_theme('e6f1f5 aab3b6 6e7476 3b3e40'),
}
def theme_to_colors(theme):
colors = {k:QColor('#' + theme[k]) for k in ColorTheme._fields}
return ColorTheme(**colors)
def load_color_themes(prefs):
t = default_color_themes.copy()
t.update(prefs.color_themes)
disabled = frozenset(prefs.disabled_color_themes)
ans = [theme_to_colors(v) for k, v in iteritems(t) if k not in disabled]
if not ans:
# Ignore disabled and return only the builtin color themes
ans = [theme_to_colors(v) for k, v in iteritems(default_color_themes)]
return ans
def color(color_theme, name):
ans = getattr(color_theme, name)
if not ans.isValid():
ans = QColor('#' + fallback_colors[name])
return ans
# }}}
# Styles {{{
class Style:
TITLE_ALIGN = SUBTITLE_ALIGN = FOOTER_ALIGN = Qt.AlignmentFlag.AlignHCenter | Qt.AlignmentFlag.AlignTop
def __init__(self, color_theme, prefs):
self.load_colors(color_theme)
self.calculate_margins(prefs)
def calculate_margins(self, prefs):
self.hmargin = int((50 / 600) * prefs.cover_width)
self.vmargin = int((50 / 800) * prefs.cover_height)
def load_colors(self, color_theme):
self.color1 = color(color_theme, 'color1')
self.color2 = color(color_theme, 'color2')
self.ccolor1 = color(color_theme, 'contrast_color1')
self.ccolor2 = color(color_theme, 'contrast_color2')
class Cross(Style):
NAME = 'The Cross'
GUI_NAME = _('The Cross')
def __call__(self, painter, rect, color_theme, title_block, subtitle_block, footer_block):
painter.fillRect(rect, self.color1)
r = QRect(0, int(title_block.position.y), rect.width(),
title_block.height + subtitle_block.height + subtitle_block.line_spacing // 2 + title_block.leading)
painter.save()
p = QPainterPath()
p.addRoundedRect(QRectF(r), 10, 10 * r.width()/r.height(), Qt.SizeMode.RelativeSize)
painter.setClipPath(p)
painter.setRenderHint(QPainter.RenderHint.Antialiasing)
painter.fillRect(r, self.color2)
painter.restore()
r = QRect(0, 0, int(title_block.position.x), rect.height())
painter.fillRect(r, self.color2)
return self.ccolor2, self.ccolor2, self.ccolor1
class Half(Style):
NAME = 'Half and Half'
GUI_NAME = _('Half and half')
def __call__(self, painter, rect, color_theme, title_block, subtitle_block, footer_block):
g = QLinearGradient(QPointF(0, 0), QPointF(0, rect.height()))
g.setStops([(0, self.color1), (0.7, self.color2), (1, self.color1)])
painter.fillRect(rect, QBrush(g))
return self.ccolor1, self.ccolor1, self.ccolor1
def rotate_vector(angle, x, y):
return x * cos(angle) - y * sin(angle), x * sin(angle) + y * cos(angle)
def draw_curved_line(painter_path, dx, dy, c1_frac, c1_amp, c2_frac, c2_amp):
length = sqrt(dx * dx + dy * dy)
angle = atan2(dy, dx)
c1 = QPointF(*rotate_vector(angle, c1_frac * length, c1_amp * length))
c2 = QPointF(*rotate_vector(angle, c2_frac * length, c2_amp * length))
pos = painter_path.currentPosition()
painter_path.cubicTo(pos + c1, pos + c2, pos + QPointF(dx, dy))
class Banner(Style):
NAME = 'Banner'
GUI_NAME = _('Banner')
GRADE = 0.07
def calculate_margins(self, prefs):
Style.calculate_margins(self, prefs)
self.hmargin = int(0.15 * prefs.cover_width)
self.fold_width = int(0.1 * prefs.cover_width)
def __call__(self, painter, rect, color_theme, title_block, subtitle_block, footer_block):
painter.fillRect(rect, self.color1)
top = title_block.position.y + 2
extra_spacing = subtitle_block.line_spacing // 2 if subtitle_block.line_spacing else title_block.line_spacing // 3
height = title_block.height + subtitle_block.height + extra_spacing + title_block.leading
right = rect.right() - self.hmargin
width = right - self.hmargin
# Draw main banner
p = main = QPainterPath(QPointF(self.hmargin, top))
draw_curved_line(p, rect.width() - 2 * self.hmargin, 0, 0.1, -0.1, 0.9, -0.1)
deltax = self.GRADE * height
p.lineTo(right + deltax, top + height)
right_corner = p.currentPosition()
draw_curved_line(p, - width - 2 * deltax, 0, 0.1, 0.05, 0.9, 0.05)
left_corner = p.currentPosition()
p.closeSubpath()
# Draw fold rectangles
rwidth = self.fold_width
yfrac = 0.1
width23 = int(0.67 * rwidth)
rtop = top + height * yfrac
def draw_fold(x, m=1, corner=left_corner):
ans = p = QPainterPath(QPointF(x, rtop))
draw_curved_line(p, rwidth*m, 0, 0.1, 0.1*m, 0.5, -0.2*m)
fold_upper = p.currentPosition()
p.lineTo(p.currentPosition() + QPointF(-deltax*m, height))
fold_corner = p.currentPosition()
draw_curved_line(p, -rwidth*m, 0, 0.2, -0.1*m, 0.8, -0.1*m)
draw_curved_line(p, deltax*m, -height, 0.2, 0.1*m, 0.8, 0.1*m)
p = inner_fold = QPainterPath(corner)
dp = fold_corner - p.currentPosition()
draw_curved_line(p, dp.x(), dp.y(), 0.5, 0.3*m, 1, 0*m)
p.lineTo(fold_upper), p.closeSubpath()
return ans, inner_fold
left_fold, left_inner = draw_fold(self.hmargin - width23)
right_fold, right_inner = draw_fold(right + width23, m=-1, corner=right_corner)
painter.save()
painter.setRenderHint(QPainter.RenderHint.Antialiasing)
pen = QPen(self.ccolor2)
pen.setWidth(3)
pen.setJoinStyle(Qt.PenJoinStyle.RoundJoin)
painter.setPen(pen)
for r in (left_fold, right_fold):
painter.fillPath(r, QBrush(self.color2))
painter.drawPath(r)
for r in (left_inner, right_inner):
painter.fillPath(r, QBrush(self.color2.darker()))
painter.drawPath(r)
painter.fillPath(main, QBrush(self.color2))
painter.drawPath(main)
painter.restore()
return self.ccolor2, self.ccolor2, self.ccolor1
class Ornamental(Style):
NAME = 'Ornamental'
GUI_NAME = _('Ornamental')
# SVG vectors {{{
CORNER_VECTOR = "m 67.791903,64.260958 c -4.308097,-2.07925 -4.086719,-8.29575 0.334943,-9.40552 4.119758,-1.03399 8.732363,5.05239 5.393055,7.1162 -0.55,0.33992 -1,1.04147 -1,1.55902 0,1.59332 2.597425,1.04548 5.365141,-1.1316 1.999416,-1.57274 2.634859,-2.96609 2.634859,-5.7775 0,-9.55787 -9.827495,-13.42961 -24.43221,-9.62556 -3.218823,0.83839 -5.905663,1.40089 -5.970755,1.25 -0.06509,-0.1509 -0.887601,-1.19493 -1.827799,-2.32007 -1.672708,-2.00174 -1.636693,-2.03722 1.675668,-1.65052 1.861815,0.21736 6.685863,-0.35719 10.720107,-1.27678 12.280767,-2.79934 20.195487,-0.0248 22.846932,8.0092 3.187273,9.65753 -6.423297,17.7497 -15.739941,13.25313 z m 49.881417,-20.53932 c -3.19204,-2.701 -3.72967,-6.67376 -1.24009,-9.16334 2.48236,-2.48236 5.35141,-2.67905 7.51523,-0.51523 1.85966,1.85966 2.07045,6.52954 0.37143,8.22857 -2.04025,2.04024 3.28436,1.44595 6.92316,-0.77272 9.66959,-5.89579 0.88581,-18.22422 -13.0777,-18.35516 -5.28594,-0.0496 -10.31098,1.88721 -14.26764,5.4991 -1.98835,1.81509 -2.16454,1.82692 -2.7936,0.18763 -0.40973,-1.06774 0.12141,-2.82197 1.3628,-4.50104 2.46349,-3.33205 1.67564,-4.01299 -2.891784,-2.49938 -2.85998,0.94777 -3.81038,2.05378 -5.59837,6.51495 -1.184469,2.95536 -3.346819,6.86882 -4.805219,8.69657 -1.4584,1.82776 -2.65164,4.02223 -2.65164,4.87662 0,3.24694 -4.442667,0.59094 -5.872557,-3.51085 -1.361274,-3.90495 0.408198,-8.63869 4.404043,-11.78183 5.155844,-4.05558 1.612374,-3.42079 -9.235926,1.65457 -12.882907,6.02725 -16.864953,7.18038 -24.795556,7.18038 -8.471637,0 -13.38802,-1.64157 -17.634617,-5.88816 -2.832233,-2.83224 -3.849773,-4.81378 -4.418121,-8.6038 -1.946289,-12.9787795 8.03227,-20.91713135 19.767685,-15.7259993 5.547225,2.4538018 6.993631,6.1265383 3.999564,10.1557393 -5.468513,7.35914 -15.917883,-0.19431 -10.657807,-7.7041155 1.486298,-2.1219878 1.441784,-2.2225068 -0.984223,-2.2225068 -1.397511,0 -4.010527,1.3130878 -5.806704,2.9179718 -2.773359,2.4779995 -3.265777,3.5977995 -3.265777,7.4266705 0,5.10943 2.254112,8.84197 7.492986,12.40748 8.921325,6.07175 19.286666,5.61396 37.12088,-1.63946 15.35037,-6.24321 21.294999,-7.42408 34.886123,-6.92999 11.77046,0.4279 19.35803,3.05537 24.34054,8.42878 4.97758,5.3681 2.53939,13.58271 -4.86733,16.39873 -4.17361,1.58681 -11.00702,1.19681 -13.31978,-0.76018 z m 26.50156,-0.0787 c -2.26347,-2.50111 -2.07852,-7.36311 0.39995,-10.51398 2.68134,-3.40877 10.49035,-5.69409 18.87656,-5.52426 l 6.5685,0.13301 -7.84029,0.82767 c -8.47925,0.89511 -12.76997,2.82233 -16.03465,7.20213 -1.92294,2.57976 -1.96722,3.00481 -0.57298,5.5 1.00296,1.79495 2.50427,2.81821 4.46514,3.04333 2.92852,0.33623 2.93789,0.32121 1.08045,-1.73124 -1.53602,-1.69728 -1.64654,-2.34411 -0.61324,-3.58916 2.84565,-3.4288 7.14497,-0.49759 5.03976,3.43603 -1.86726,3.48903 -8.65528,4.21532 -11.3692,1.21647 z m -4.17462,-14.20302 c -0.38836,-0.62838 -0.23556,-1.61305 0.33954,-2.18816 1.3439,-1.34389 4.47714,-0.17168 3.93038,1.47045 -0.5566,1.67168 -3.38637,2.14732 -4.26992,0.71771 z m -8.48037,-9.1829 c -12.462,-4.1101 -12.53952,-4.12156 -25.49998,-3.7694 -24.020921,0.65269 -32.338219,0.31756 -37.082166,-1.49417 -5.113999,-1.95305 -8.192504,-6.3647405 -6.485463,-9.2940713 0.566827,-0.972691 1.020091,-1.181447 1.037211,-0.477701 0.01685,0.692606 1.268676,1.2499998 2.807321,1.2499998 1.685814,0 4.868609,1.571672 8.10041,4.0000015 4.221481,3.171961 6.182506,3.999221 9.473089,3.996261 l 4.149585,-0.004 -3.249996,-1.98156 c -3.056252,-1.863441 -4.051566,-3.8760635 -2.623216,-5.3044145 0.794,-0.794 6.188222,1.901516 9.064482,4.5295635 1.858669,1.698271 3.461409,1.980521 10.559493,1.859621 11.30984,-0.19266 20.89052,1.29095 31.97905,4.95208 7.63881,2.52213 11.51931,3.16471 22.05074,3.65141 7.02931,0.32486 13.01836,0.97543 13.30902,1.44571 0.29065,0.47029 -5.2356,0.83436 -12.28056,0.80906 -12.25942,-0.044 -13.34537,-0.2229 -25.30902,-4.16865 z" # noqa
# }}}
PATH_CACHE = {}
VIEWPORT = (400, 500)
def calculate_margins(self, prefs):
self.hmargin = int((51 / self.VIEWPORT[0]) * prefs.cover_width)
self.vmargin = int((83 / self.VIEWPORT[1]) * prefs.cover_height)
def __call__(self, painter, rect, color_theme, title_block, subtitle_block, footer_block):
if not self.PATH_CACHE:
from calibre.utils.speedups import svg_path_to_painter_path
try:
self.__class__.PATH_CACHE['corner'] = svg_path_to_painter_path(self.CORNER_VECTOR)
except Exception:
import traceback
traceback.print_exc()
p = painter
painter.setRenderHint(QPainter.RenderHint.Antialiasing)
g = QRadialGradient(QPointF(rect.center()), rect.width())
g.setColorAt(0, self.color1), g.setColorAt(1, self.color2)
painter.fillRect(rect, QBrush(g))
painter.save()
painter.setWindow(0, 0, *self.VIEWPORT)
try:
path = self.PATH_CACHE['corner']
except KeyError:
path = QPainterPath()
pen = p.pen()
pen.setColor(self.ccolor1)
p.setPen(pen)
def corner():
b = QBrush(self.ccolor1)
p.fillPath(path, b)
p.rotate(90), p.translate(100, -100), p.scale(1, -1), p.translate(-103, -97)
p.fillPath(path, b)
p.setWorldTransform(QTransform())
# Top-left corner
corner()
# Top right corner
p.scale(-1, 1), p.translate(-400, 0), corner()
# Bottom left corner
p.scale(1, -1), p.translate(0, -500), corner()
# Bottom right corner
p.scale(-1, -1), p.translate(-400, -500), corner()
for y in (28.4, 471.7):
p.drawLine(QPointF(160, y), QPointF(240, y))
for x in (31.3, 368.7):
p.drawLine(QPointF(x, 155), QPointF(x, 345))
pen.setWidthF(1.8)
p.setPen(pen)
for y in (23.8, 476.7):
p.drawLine(QPointF(160, y), QPointF(240, y))
for x in (26.3, 373.7):
p.drawLine(QPointF(x, 155), QPointF(x, 345))
painter.restore()
return self.ccolor2, self.ccolor2, self.ccolor1
class Blocks(Style):
NAME = 'Blocks'
GUI_NAME = _('Blocks')
FOOTER_ALIGN = Qt.AlignmentFlag.AlignRight | Qt.AlignmentFlag.AlignTop
def __call__(self, painter, rect, color_theme, title_block, subtitle_block, footer_block):
painter.fillRect(rect, self.color1)
y = rect.height() - rect.height() // 3
r = QRect(rect)
r.setBottom(y)
painter.fillRect(rect, self.color1)
r = QRect(rect)
r.setTop(y)
painter.fillRect(r, self.color2)
return self.ccolor1, self.ccolor1, self.ccolor2
def all_styles():
return {
x.NAME for x in itervalues(globals()) if
isinstance(x, type) and issubclass(x, Style) and x is not Style
}
def load_styles(prefs, respect_disabled=True):
disabled = frozenset(prefs.disabled_styles) if respect_disabled else ()
ans = tuple(x for x in itervalues(globals()) if
isinstance(x, type) and issubclass(x, Style) and x is not Style and x.NAME not in disabled)
if not ans and disabled:
# If all styles have been disabled, ignore the disabling and return all
# the styles
ans = load_styles(prefs, respect_disabled=False)
return ans
# }}}
def init_environment():
ensure_app()
load_builtin_fonts()
def generate_cover(mi, prefs=None, as_qimage=False):
init_environment()
prefs = prefs or cprefs
prefs = {k:prefs.get(k) for k in cprefs.defaults}
prefs = Prefs(**prefs)
color_theme = random.choice(load_color_themes(prefs))
style = random.choice(load_styles(prefs))(color_theme, prefs)
title, subtitle, footer = format_text(mi, prefs)
img = QImage(prefs.cover_width, prefs.cover_height, QImage.Format.Format_ARGB32)
title_block, subtitle_block, footer_block = layout_text(
prefs, img, title, subtitle, footer, img.height() // 3, style)
p = QPainter(img)
rect = QRect(0, 0, img.width(), img.height())
colors = style(p, rect, color_theme, title_block, subtitle_block, footer_block)
for block, color in zip((title_block, subtitle_block, footer_block), colors):
p.setPen(color)
block.draw(p)
p.end()
img.setText('Generated cover', f'{__appname__} {__version__}')
if as_qimage:
return img
return pixmap_to_data(img)
def override_prefs(base_prefs, **overrides):
ans = {k:overrides.get(k, base_prefs[k]) for k in cprefs.defaults}
override_color_theme = overrides.get('override_color_theme')
if override_color_theme is not None:
all_themes = set(default_color_themes) | set(ans['color_themes'])
if override_color_theme in all_themes:
all_themes.discard(override_color_theme)
ans['disabled_color_themes'] = all_themes
override_style = overrides.get('override_style')
if override_style is not None:
styles = all_styles()
if override_style in styles:
styles.discard(override_style)
ans['disabled_styles'] = styles
return ans
def create_cover(title, authors, series=None, series_index=1, prefs=None, as_qimage=False):
' Create a cover from the specified title, author and series. Any user set'
' templates are ignored, to ensure that the specified metadata is used. '
mi = Metadata(title, authors)
if series:
mi.series, mi.series_index = series, series_index
d = cprefs.defaults
prefs = override_prefs(
prefs or cprefs, title_template=d['title_template'], subtitle_template=d['subtitle_template'], footer_template=d['footer_template'])
return generate_cover(mi, prefs=prefs, as_qimage=as_qimage)
def calibre_cover2(title, author_string='', series_string='', prefs=None, as_qimage=False, logo_path=None):
init_environment()
title, subtitle, footer = '<b>' + escape_formatting(title), '<i>' + escape_formatting(series_string), '<b>' + escape_formatting(author_string)
prefs = prefs or cprefs
prefs = {k:prefs.get(k) for k in cprefs.defaults}
scale = 800. / prefs['cover_height']
scale_cover(prefs, scale)
prefs = Prefs(**prefs)
img = QImage(prefs.cover_width, prefs.cover_height, QImage.Format.Format_ARGB32)
img.fill(Qt.GlobalColor.white)
# colors = to_theme('ffffff ffffff 000000 000000')
color_theme = theme_to_colors(fallback_colors)
class CalibeLogoStyle(Style):
NAME = GUI_NAME = 'calibre'
def __call__(self, painter, rect, color_theme, title_block, subtitle_block, footer_block):
top = title_block.position.y + 10
extra_spacing = subtitle_block.line_spacing // 2 if subtitle_block.line_spacing else title_block.line_spacing // 3
height = title_block.height + subtitle_block.height + extra_spacing + title_block.leading
top += height + 25
bottom = footer_block.position.y - 50
logo = QImage(logo_path or I('library.png'))
pwidth, pheight = rect.width(), bottom - top
scaled, width, height = fit_image(logo.width(), logo.height(), pwidth, pheight)
x, y = (pwidth - width) // 2, (pheight - height) // 2
rect = QRect(x, top + y, width, height)
painter.setRenderHint(QPainter.RenderHint.SmoothPixmapTransform)
painter.drawImage(rect, logo)
return self.ccolor1, self.ccolor1, self.ccolor1
style = CalibeLogoStyle(color_theme, prefs)
title_block, subtitle_block, footer_block = layout_text(
prefs, img, title, subtitle, footer, img.height() // 3, style)
p = QPainter(img)
rect = QRect(0, 0, img.width(), img.height())
colors = style(p, rect, color_theme, title_block, subtitle_block, footer_block)
for block, color in zip((title_block, subtitle_block, footer_block), colors):
p.setPen(color)
block.draw(p)
p.end()
img.setText('Generated cover', f'{__appname__} {__version__}')
if as_qimage:
return img
return pixmap_to_data(img)
def message_image(text, width=500, height=400, font_size=20):
init_environment()
img = QImage(width, height, QImage.Format.Format_ARGB32)
img.fill(Qt.GlobalColor.white)
p = QPainter(img)
f = QFont()
f.setPixelSize(font_size)
p.setFont(f)
r = img.rect().adjusted(10, 10, -10, -10)
p.drawText(r, Qt.AlignmentFlag.AlignJustify | Qt.AlignmentFlag.AlignVCenter | Qt.TextFlag.TextWordWrap, text)
p.end()
return pixmap_to_data(img)
def scale_cover(prefs, scale):
for x in ('cover_width', 'cover_height', 'title_font_size', 'subtitle_font_size', 'footer_font_size'):
prefs[x] = int(scale * prefs[x])
def generate_masthead(title, output_path=None, width=600, height=60, as_qimage=False, font_family=None):
init_environment()
font_family = font_family or cprefs['title_font_family'] or 'Liberation Serif'
img = QImage(width, height, QImage.Format.Format_ARGB32)
img.fill(Qt.GlobalColor.white)
p = QPainter(img)
p.setRenderHints(QPainter.RenderHint.Antialiasing | QPainter.RenderHint.TextAntialiasing)
f = QFont(font_family)
f.setStyleStrategy(QFont.StyleStrategy.PreferAntialias)
f.setPixelSize((height * 3) // 4), f.setBold(True)
p.setFont(f)
p.drawText(img.rect(), Qt.AlignmentFlag.AlignLeft | Qt.AlignmentFlag.AlignVCenter, sanitize(title))
p.end()
if as_qimage:
return img
data = pixmap_to_data(img)
if output_path is None:
return data
with open(output_path, 'wb') as f:
f.write(data)
def test(scale=0.25):
from qt.core import QGridLayout, QLabel, QMainWindow, QPixmap, QScrollArea, QWidget
from calibre.gui2 import Application
app = Application([])
mi = Metadata('Unknown', ['Kovid Goyal', 'John & Doe', 'Author'])
mi.series = 'A series & styles'
m = QMainWindow()
sa = QScrollArea(m)
w = QWidget(m)
sa.setWidget(w)
l = QGridLayout(w)
w.setLayout(l), l.setSpacing(30)
scale *= w.devicePixelRatioF()
labels = []
for r, color in enumerate(sorted(default_color_themes)):
for c, style in enumerate(sorted(all_styles())):
mi.series_index = c + 1
mi.title = 'An algorithmic cover [%s]' % color
prefs = override_prefs(cprefs, override_color_theme=color, override_style=style)
scale_cover(prefs, scale)
img = generate_cover(mi, prefs=prefs, as_qimage=True)
img.setDevicePixelRatio(w.devicePixelRatioF())
la = QLabel()
la.setPixmap(QPixmap.fromImage(img))
l.addWidget(la, r, c)
labels.append(la)
m.setCentralWidget(sa)
w.resize(w.sizeHint())
m.show()
app.exec()
if __name__ == '__main__':
test()
| 31,664 | Python | .py | 646 | 41.400929 | 3,861 | 0.642198 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,256 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Code for the conversion of ebook formats and the reading of metadata
from various formats.
'''
import numbers
import os
import re
import sys
from contextlib import suppress
from calibre import prints
from calibre.ebooks.chardet import xml_to_unicode
class ConversionError(Exception):
def __init__(self, msg, only_msg=False):
Exception.__init__(self, msg)
self.only_msg = only_msg
class UnknownFormatError(Exception):
pass
class DRMError(ValueError):
pass
class ParserError(ValueError):
pass
BOOK_EXTENSIONS = ['lrf', 'rar', 'zip', 'rtf', 'lit', 'txt', 'txtz', 'text', 'htm', 'xhtm',
'html', 'htmlz', 'xhtml', 'pdf', 'pdb', 'updb', 'pdr', 'prc', 'mobi', 'azw', 'doc',
'epub', 'fb2', 'fbz', 'djv', 'djvu', 'lrx', 'cbr', 'cb7', 'cbz', 'cbc', 'oebzip',
'rb', 'imp', 'odt', 'chm', 'tpz', 'azw1', 'pml', 'pmlz', 'mbp', 'tan', 'snb',
'xps', 'oxps', 'azw4', 'book', 'zbf', 'pobi', 'docx', 'docm', 'md',
'textile', 'markdown', 'ibook', 'ibooks', 'iba', 'azw3', 'ps', 'kepub', 'kfx', 'kpf']
def return_raster_image(path):
from calibre.utils.imghdr import what
if os.access(path, os.R_OK):
with open(path, 'rb') as f:
raw = f.read()
if what(None, raw) not in (None, 'svg'):
return raw
def extract_cover_from_embedded_svg(html, base, log):
from calibre.ebooks.oeb.base import SVG, XLINK, XPath
from calibre.utils.xml_parse import safe_xml_fromstring
root = safe_xml_fromstring(html)
svg = XPath('//svg:svg')(root)
if len(svg) == 1 and len(svg[0]) == 1 and svg[0][0].tag == SVG('image'):
image = svg[0][0]
href = image.get(XLINK('href'), None)
if href:
path = os.path.join(base, *href.split('/'))
return return_raster_image(path)
def extract_calibre_cover(raw, base, log):
from calibre.ebooks.BeautifulSoup import BeautifulSoup
soup = BeautifulSoup(raw)
matches = soup.find(name=['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'span',
'font', 'br'])
images = soup.findAll('img', src=True)
if matches is None and len(images) == 1 and \
images[0].get('alt', '').lower()=='cover':
img = images[0]
img = os.path.join(base, *img['src'].split('/'))
q = return_raster_image(img)
if q is not None:
return q
# Look for a simple cover, i.e. a body with no text and only one <img> tag
if matches is None:
body = soup.find('body')
if body is not None:
text = ''.join(map(str, body.findAll(text=True)))
if text.strip():
# Body has text, abort
return
images = body.findAll('img', src=True)
if len(images) == 1:
img = os.path.join(base, *images[0]['src'].split('/'))
return return_raster_image(img)
def render_html_svg_workaround(path_to_html, log, width=590, height=750, root=''):
from calibre.ebooks.oeb.base import SVG_NS
with open(path_to_html, 'rb') as f:
raw = f.read()
raw = xml_to_unicode(raw, strip_encoding_pats=True)[0]
data = None
if SVG_NS in raw:
try:
data = extract_cover_from_embedded_svg(raw,
os.path.dirname(path_to_html), log)
except Exception:
pass
if data is None:
try:
data = extract_calibre_cover(raw, os.path.dirname(path_to_html), log)
except Exception:
pass
if data is None:
data = render_html_data(path_to_html, width, height, root=root)
return data
def render_html_data(path_to_html, width, height, root=''):
from calibre.ptempfile import TemporaryDirectory
from calibre.utils.ipc.simple_worker import WorkerError, fork_job
result = {}
def report_error(text=''):
prints('Failed to render', path_to_html, 'with errors:', file=sys.stderr)
if text:
prints(text, file=sys.stderr)
if result and result['stdout_stderr']:
with open(result['stdout_stderr'], 'rb') as f:
prints(f.read(), file=sys.stderr)
with TemporaryDirectory('-render-html') as tdir:
try:
result = fork_job('calibre.ebooks.render_html', 'main', args=(path_to_html, tdir, 'jpeg', root))
except WorkerError as e:
report_error(e.orig_tb)
else:
if result['result']:
with open(os.path.join(tdir, 'rendered.jpeg'), 'rb') as f:
return f.read()
else:
report_error()
def check_ebook_format(stream, current_guess):
ans = current_guess
if current_guess.lower() in ('prc', 'mobi', 'azw', 'azw1', 'azw3'):
stream.seek(0)
if stream.read(3) == b'TPZ':
ans = 'tpz'
stream.seek(0)
return ans
def normalize(x):
if isinstance(x, str):
import unicodedata
x = unicodedata.normalize('NFC', x)
return x
def calibre_cover(title, author_string, series_string=None,
output_format='jpg', title_size=46, author_size=36, logo_path=None):
title = normalize(title)
author_string = normalize(author_string)
series_string = normalize(series_string)
from calibre.ebooks.covers import calibre_cover2
from calibre.utils.img import image_to_data
ans = calibre_cover2(title, author_string or '', series_string or '', logo_path=logo_path, as_qimage=True)
return image_to_data(ans, fmt=output_format)
UNIT_RE = re.compile(r'^(-*[0-9]*[.]?[0-9]*)\s*(%|em|ex|en|px|mm|cm|in|pt|pc|rem|q)$')
def unit_convert(value, base, font, dpi, body_font_size=12):
' Return value in pts'
if isinstance(value, numbers.Number):
return value
with suppress(Exception):
return float(value) * 72.0 / dpi
result = value
m = UNIT_RE.match(value)
if m is not None and m.group(1):
try:
value = float(m.group(1))
except ValueError:
value = 0
unit = m.group(2)
if unit == '%':
result = (value / 100.0) * base
elif unit == 'px':
result = value * 72.0 / dpi
elif unit == 'in':
result = value * 72.0
elif unit == 'pt':
result = value
elif unit == 'em':
result = value * font
elif unit in ('ex', 'en'):
# This is a hack for ex since we have no way to know
# the x-height of the font
font = font
result = value * font * 0.5
elif unit == 'pc':
result = value * 12.0
elif unit == 'mm':
result = value * 2.8346456693
elif unit == 'cm':
result = value * 28.346456693
elif unit == 'rem':
result = value * body_font_size
elif unit == 'q':
result = value * 0.708661417325
return result
def parse_css_length(value):
try:
m = UNIT_RE.match(value)
except TypeError:
return None, None
if m is not None and m.group(1):
value = float(m.group(1))
unit = m.group(2)
return value, unit.lower()
return None, None
def generate_masthead(title, output_path=None, width=600, height=60):
from calibre.ebooks.conversion.config import load_defaults
recs = load_defaults('mobi_output')
masthead_font_family = recs.get('masthead_font', None)
from calibre.ebooks.covers import generate_masthead
return generate_masthead(title, output_path=output_path, width=width, height=height, font_family=masthead_font_family)
def escape_xpath_attr(value):
if '"' in value:
if "'" in value:
parts = re.split('("+)', value)
ans = []
for x in parts:
if x:
q = "'" if '"' in x else '"'
ans.append(q + x + q)
return 'concat(%s)' % ', '.join(ans)
else:
return "'%s'" % value
return '"%s"' % value
| 8,160 | Python | .py | 206 | 31.160194 | 122 | 0.576053 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,257 | html_transform_rules.py | kovidgoyal_calibre/src/calibre/ebooks/html_transform_rules.py | #!/usr/bin/env python
# License: GPLv3 Copyright: 2016, Kovid Goyal <kovid at kovidgoyal.net>
import re
from functools import partial
from html5_parser import parse
from lxml import etree
from calibre.ebooks.metadata.tag_mapper import uniq
from calibre.ebooks.oeb.base import OEB_DOCS, XPath
from calibre.ebooks.oeb.parse_utils import XHTML
from css_selectors.select import Select, get_parsed_selector
def non_empty_validator(label, val):
if not val:
return _('{} must not be empty').format(label)
def always_valid(*a):
pass
class Action:
def __init__(self, name, short_text, long_text, placeholder='', validator=None):
self.name = name
self.short_text = short_text
self.long_text = long_text
self.placeholder = placeholder
if validator is None and placeholder:
validator = partial(non_empty_validator, self.placeholder)
self.validator = validator or always_valid
ACTION_MAP = {a.name: a for a in (
Action('rename', _('Change tag name'), _('Rename tag to the specified name'), _('New tag name')),
Action('remove', _('Remove tag and children'), _('Remove the tag and all its contents')),
Action('unwrap', _('Remove tag only'), _('Remove the tag but keep its contents')),
Action('add_classes', _('Add classes'), _('Add the specified classes, e.g.:') + ' bold green', _('Space separated class names')),
Action('remove_classes', _('Remove classes'), _('Remove the specified classes, e.g.:') + ' bold green', _('Space separated class names')),
Action('remove_attrs', _('Remove attributes'), _(
'Remove the specified attributes from the tag. Multiple attribute names should be separated by spaces.'
' The special value * removes all attributes.'), _('Space separated attribute names')),
Action('add_attrs', _('Add attributes'), _('Add the specified attributes, e.g.:') + ' class="red" name="test"', _('Space separated attribute names')),
Action('empty', _('Empty the tag'), _('Remove all contents from the tag')),
Action('wrap', _('Wrap the tag'), _(
'Wrap the tag in the specified tag, e.g.: {0} will wrap the tag in a DIV tag with class {1}').format(
'<div class="box">', 'box'), _('An HTML opening tag')),
Action('insert', _('Insert HTML at start'), _(
'The specified HTML snippet is inserted after the opening tag. Note that only valid HTML snippets can be used without unclosed tags'),
_('HTML snippet')),
Action('insert_end', _('Insert HTML at end'), _(
'The specified HTML snippet is inserted before the closing tag. Note that only valid HTML snippets can be used without unclosed tags'),
_('HTML snippet')),
Action('prepend', _('Insert HTML before tag'), _(
'The specified HTML snippet is inserted before the opening tag. Note that only valid HTML snippets can be used without unclosed tags'),
_('HTML snippet')),
Action('append', _('Insert HTML after tag'), _(
'The specified HTML snippet is inserted after the closing tag. Note that only valid HTML snippets can be used without unclosed tags'),
_('HTML snippet')),
)}
def validate_action(action):
if set(action) != {'type', 'data'}:
return _('Action must have both:') + ' type and data'
a = ACTION_MAP[action['type']]
return a.validator(action['data'])
def validate_css_selector(val):
try:
get_parsed_selector(val)
except Exception:
return _('{} is not a valid CSS selector').format(val)
def validate_xpath_selector(val):
try:
XPath(val)
except Exception:
return _('{} is not a valid XPath selector').format(val)
class Match:
def __init__(self, name, text, placeholder='', validator=None):
self.name = name
self.text = text
self.placeholder = placeholder
if validator is None and placeholder:
validator = partial(non_empty_validator, self.placeholder)
self.validator = validator or always_valid
MATCH_TYPE_MAP = {m.name: m for m in (
Match('is', _('is'), _('Tag name')),
Match('has_class', _('has class'), _('Class name')),
Match('not_has_class', _('does not have class'), _('Class name')),
Match('css', _('matches CSS selector'), _('CSS selector'), validate_css_selector),
Match('xpath', _('matches XPath selector'), _('XPath selector'), validate_xpath_selector),
Match('*', _('is any tag')),
Match('contains_text', _('contains text'), _('Text')),
)}
allowed_keys = frozenset('match_type query actions'.split())
def validate_rule(rule):
keys = frozenset(rule)
extra = keys - allowed_keys
if extra:
return _('Unknown keys'), _(
'The rule has unknown keys: %s') % ', '.join(extra)
missing = allowed_keys - keys
if missing:
return _('Missing keys'), _(
'The rule has missing keys: %s') % ', '.join(missing)
mt = rule['match_type']
if mt not in MATCH_TYPE_MAP:
return _('Unknown match type'), _(
'The match type %s is not known') % mt
if mt != '*' and not rule['query']:
_('Query required'), _(
'You must specify a value for the tag to match')
m = MATCH_TYPE_MAP[rule['match_type']]
err = m.validator(rule.get('query') or '')
if err:
return _('Invalid {}').format(m.placeholder), err
if not rule['actions']:
return _('No actions'), _('The rule has no actions')
for action in rule['actions']:
err = validate_action(action)
if err:
return _('Invalid action'), err
return None, None
def rename_tag(new_name, tag):
if new_name != tag.tag:
tag.tag = new_name
return True
return False
def qualify_tag_name(name):
return XHTML(name)
def remove_tag(tag):
p = tag.getparent()
idx = p.index(tag)
sibling = p[idx-1] if idx else None
p.remove(tag)
if tag.tail:
if sibling is None:
p.text = (p.text or '') + tag.tail
else:
sibling.tail = (sibling.tail or '') + tag.tail
return True
def unwrap_tag(tag):
p = tag.getparent()
idx = p.index(tag)
sibling = p[idx-1] if idx else None
if tag.text:
if sibling is None:
p.text = (p.text or '') + tag.text
else:
sibling.tail = (sibling.tail or '') + tag.text
for i, child in enumerate(reversed(tag)):
p.insert(idx, child)
if i == 0:
sibling = child
p.remove(tag)
if tag.tail:
if sibling is None:
p.text = (p.text or '') + tag.tail
else:
sibling.tail = (sibling.tail or '') + tag.tail
return True
def add_classes(classes, tag):
orig_cls = tag.get('class', '')
orig = list(filter(None, str.split(orig_cls)))
new_cls = ' '.join(uniq(orig + classes))
if new_cls != orig_cls:
tag.set('class', new_cls)
return True
return False
def remove_classes(classes, tag):
orig_cls = tag.get('class', '')
orig = list(filter(None, str.split(orig_cls)))
for x in classes:
while True:
try:
orig.remove(x)
except ValueError:
break
new_cls = ' '.join(orig)
if new_cls != orig_cls:
tag.set('class', new_cls)
return True
return False
def remove_attrs(attrs, tag):
changed = False
if not tag.attrib:
return False
for a in attrs:
if a == '*':
changed = True
tag.attrib.clear()
else:
if tag.attrib.pop(a, None) is not None:
changed = True
return changed
def parse_attrs(text):
div = parse(f'<div {text} ></div>', fragment_context='div')[0]
return div.items()
def add_attrs(attrib, tag):
orig = tag.items()
for k, v in attrib:
tag.set(k, v)
return orig != tag.items()
def empty(tag):
changed = len(tag) > 0 or bool(tag.text)
tag.text = None
del tag[:]
return changed
def parse_start_tag(text):
try:
tag = parse(text, namespace_elements=True, fragment_context='div')[0]
except IndexError as e:
raise ValueError(_('No tag found in: {}. The tag specification must be of the form <tag> for example: <p>')) from e
return {'tag': tag.tag, 'attrib': tag.items()}
def wrap(data, tag):
elem = tag.makeelement(data['tag'])
for k, v in data['attrib']:
elem.set(k, v)
elem.tail = tag.tail
tag.tail = None
p = tag.getparent()
idx = p.index(tag)
p.insert(idx, elem)
elem.append(tag)
return True
def parse_html_snippet(text):
return parse(f'<div>{text}</div>', namespace_elements=True, fragment_context='div')[0]
def clone(src_element, target_tree):
if src_element.tag is etree.Comment:
ans = etree.Comment('')
else:
ans = target_tree.makeelement(src_element.tag)
for k, v in src_element.items():
ans.set(k, v)
ans.extend(src_element)
ans.text = src_element.text
ans.tail = src_element.tail
return ans
def insert_snippet(container, before_children, tag):
if before_children:
orig_text = tag.text
tag.text = container.text
if len(container):
for i, child in enumerate(reversed(container)):
c = clone(child, tag)
tag.insert(0, c)
if i == 0 and orig_text:
c.tail = (c.tail or '') + orig_text
else:
tag.text = (tag.text or '') + orig_text
else:
if container.text:
if len(tag) > 0:
tag[-1].tail = (tag[-1].tail or '') + container.text
else:
tag.text = (tag.text or '') + container.text
for child in container:
c = clone(child, tag)
tag.append(c)
return True
def append_snippet(container, before_tag, tag):
p = tag.getparent()
idx = p.index(tag)
if not before_tag:
idx += 1
if container.text:
if idx:
c = p[idx-1]
c.tail = (c.tail or '') + container.text
else:
p.text = (p.text or '') + container.text
for child in reversed(container):
c = clone(child, tag)
p.insert(idx, c)
return True
action_map = {
'rename': lambda data: partial(rename_tag, qualify_tag_name(data)),
'remove': lambda data: remove_tag,
'unwrap': lambda data: unwrap_tag,
'empty': lambda data: empty,
'add_classes': lambda data: partial(add_classes, str.split(data)),
'remove_classes': lambda data: partial(remove_classes, str.split(data)),
'remove_attrs': lambda data: partial(remove_attrs, str.split(data)),
'add_attrs': lambda data: partial(add_attrs, parse_attrs(data)),
'wrap': lambda data: partial(wrap, parse_start_tag(data)),
'insert': lambda data: partial(insert_snippet, parse_html_snippet(data), True),
'insert_end': lambda data: partial(insert_snippet, parse_html_snippet(data), False),
'prepend': lambda data: partial(append_snippet, parse_html_snippet(data), True),
'append': lambda data: partial(append_snippet, parse_html_snippet(data), False),
}
def create_action(serialized_action):
return action_map[serialized_action['type']](serialized_action.get('data', ''))
def text_as_xpath_literal(text):
if '"' in text:
if "'" not in text:
return f"'{text}'"
parts = []
for x in re.split(r'(")', text):
if not x:
continue
if x == '"':
x = "'\"'"
else:
x = f'"{x}"'
parts.append(x)
return f'concat({",".join(parts)})'
return f'"{text}"'
class Rule:
def __init__(self, serialized_rule):
self.sel_type = 'xpath'
mt = serialized_rule['match_type']
q = serialized_rule['query']
if mt == 'xpath':
self.xpath_selector = XPath(q)
self.selector = self.xpath
elif mt in ('is', 'css'):
self.css_selector = q
self.selector = self.css
elif mt == '*':
self.xpath_selector = XPath('//*')
self.selector = self.xpath
elif mt == 'has_class':
self.css_selector = '.' + q
self.selector = self.css
elif mt == 'not_has_class':
self.css_selector = f":not(.{q})"
self.selector = self.css
elif mt == 'contains_text':
self.xpath_selector = XPath(f'//*[contains(text(), {text_as_xpath_literal(q)})]')
self.selector = self.xpath
else:
raise KeyError(f'Unknown match_type: {mt}')
self.actions = tuple(map(create_action, serialized_rule['actions']))
def xpath(self, root):
return self.xpath_selector(root)
def css(self, root):
return tuple(Select(root)(self.css_selector))
def __call__(self, root):
changed = False
for tag in self.selector(root):
for action in self.actions:
if action(tag):
changed = True
return changed
def transform_doc(root, rules):
changed = False
for rule in rules:
if rule(root):
changed = True
return changed
def transform_html(html, serialized_rules):
root = parse(html, namespace_elements=True)
rules = tuple(Rule(r) for r in serialized_rules)
changed = transform_doc(root, rules)
return changed, etree.tostring(root, encoding='unicode')
def transform_container(container, serialized_rules, names=()):
if not names:
types = OEB_DOCS
names = []
for name, mt in container.mime_map.items():
if mt in types:
names.append(name)
doc_changed = False
rules = tuple(Rule(r) for r in serialized_rules)
for name in names:
mt = container.mime_map.get(name)
if mt in OEB_DOCS:
root = container.parsed(name)
if transform_doc(root, rules):
container.dirty(name)
doc_changed = True
return doc_changed
def transform_conversion_book(oeb, opts, serialized_rules):
rules = tuple(Rule(r) for r in serialized_rules)
for item in oeb.spine:
root = item.data
if not hasattr(root, 'xpath'):
continue
transform_doc(root, rules)
def rule_to_text(rule):
text = _('If the tag {match_type} {query}').format(
match_type=MATCH_TYPE_MAP[rule['match_type']].text, query=rule.get('query') or '')
for action in rule['actions']:
text += '\n'
text += _('{action_type} {action_data}').format(
action_type=ACTION_MAP[action['type']].short_text, action_data=action.get('data') or '')
return text
def export_rules(serialized_rules):
lines = []
for rule in serialized_rules:
lines.extend('# ' + l for l in rule_to_text(rule).splitlines())
lines.extend('{}: {}'.format(k, v.replace('\n', ' ')) for k, v in rule.items() if k in allowed_keys and k != 'actions')
for action in rule.get('actions', ()):
lines.append(f"action: {action['type']}: {action.get('data', '')}")
lines.append('')
return '\n'.join(lines).encode('utf-8')
def import_rules(raw_data):
current_rule = {}
def sanitize(r):
return {k:(r.get(k) or '') for k in allowed_keys}
for line in raw_data.decode('utf-8').splitlines():
if not line.strip():
if current_rule:
yield sanitize(current_rule)
current_rule = {}
continue
if line.lstrip().startswith('#'):
continue
parts = line.split(':', 1)
if len(parts) == 2:
k, v = parts[0].lower().strip(), parts[1].strip()
if k == 'action':
actions = current_rule.setdefault('actions', [])
parts = v.split(':', 1)
actions.append({'type': parts[0].strip(), 'data': parts[1].strip() if len(parts) == 2 else ''})
elif k in allowed_keys:
current_rule[k] = v
if current_rule:
yield sanitize(current_rule)
def test(return_tests=False): # {{{
import unittest
class TestTransforms(unittest.TestCase):
longMessage = True
maxDiff = None
ae = unittest.TestCase.assertEqual
def test_matching(self):
root = parse(namespace_elements=True, html='''
<html id='root'>
<head id='head'></head>
<body id='body'>
<p class="one red" id='p1'>simple
<p class="two green" id='p2'>a'b"c
''')
all_ids = root.xpath('//*/@id')
def q(mt, query=''):
r = Rule({'match_type': mt, 'query': query, 'actions':[]})
ans = []
for tag in r.selector(root):
ans.append(tag.get('id'))
return ans
def t(mt, query='', expected=[]):
self.ae(expected, q(mt, query))
t('*', expected=all_ids)
t('is', 'body', ['body'])
t('is', 'p', ['p1', 'p2'])
t('has_class', 'one', ['p1'])
ei = list(all_ids)
ei.remove('p1')
t('not_has_class', 'one', ei)
t('css', '#body > p.red', ['p1'])
t('xpath', '//h:body', ['body'])
t('contains_text', 'imple', ['p1'])
t('contains_text', 'a\'b"c', ['p2'])
def test_validate_rule(self):
def av(match_type='*', query='', atype='remove', adata=''):
rule = {'match_type': match_type, 'query': query, 'actions': [{'type': atype, 'data': adata}]}
self.ae(validate_rule(rule), (None, None))
def ai(match_type='*', query='', atype='remove', adata=''):
rule = {'match_type': match_type, 'query': query, 'actions': [{'type': atype, 'data': adata}]}
self.assertNotEqual(validate_rule(rule), (None, None))
av()
av('css', 'p')
ai('css', 'p..c')
av('xpath', '//h:p')
ai('xpath', '//h:p[')
ai(atype='wrap')
def test_export_import(self):
rule = {'match_type': 'is', 'query': 'p', 'actions': [{'type': 'rename', 'data': 'div'}, {'type': 'remove', 'data': ''}]}
self.ae(rule, next(iter(import_rules(export_rules([rule])))))
rule = {'match_type': '*', 'actions': [{'type': 'remove'}]}
erule = {'match_type': '*', 'query': '', 'actions': [{'type': 'remove', 'data': ''}]}
self.ae(erule, next(iter(import_rules(export_rules([rule])))))
def test_html_transform_actions(self):
parse('a', fragment_context='div')
def r(html='<p>hello'):
return parse(namespace_elements=True, html=html)[1]
def tostring(x, with_tail=True):
return etree.tostring(x, encoding='unicode', with_tail=with_tail)
def ax(x, expected):
v = tostring(x)
self.ae(expected, v.replace(' xmlns="http://www.w3.org/1999/xhtml"', ''))
def t(name, data=''):
return action_map[name](data)
p = r()[0]
self.assertFalse(t('rename', 'p')(p))
self.assertTrue(t('rename', 'div')(p))
self.ae(p.tag, XHTML('div'))
div = r('<div><div><span>remove</span></div>keep</div>')[0]
self.assertTrue(t('remove')(div[0]))
ax(div, '<div>keep</div>')
div = r('<div><div></div><div><span>remove</span></div>keep</div>')[0]
self.assertTrue(t('remove')(div[1]))
ax(div, '<div><div/>keep</div>')
div = r('<div><div>text<span>unwrap</span></div>tail</div>')[0]
self.assertTrue(t('unwrap')(div[0]))
ax(div, '<div>text<span>unwrap</span>tail</div>')
div = r('<div><div></div><div>text<span>unwrap</span></div>tail</div>')[0]
self.assertTrue(t('unwrap')(div[1]))
ax(div, '<div><div/>text<span>unwrap</span>tail</div>')
p = r()[0]
self.assertTrue(t('add_classes', 'a b')(p))
self.ae(p.get('class'), 'a b')
p = r('<p class="c a d">')[0]
self.assertTrue(t('add_classes', 'a b')(p))
self.ae(p.get('class'), 'c a d b')
p = r('<p class="c a d">')[0]
self.assertFalse(t('add_classes', 'a')(p))
self.ae(p.get('class'), 'c a d')
p = r()[0]
self.assertFalse(t('remove_classes', 'a b')(p))
self.ae(p.get('class'), None)
p = r('<p class="c a a d">')[0]
self.assertTrue(t('remove_classes', 'a')(p))
self.ae(p.get('class'), 'c d')
p = r()[0]
self.assertFalse(t('remove_attrs', 'a b')(p))
self.assertFalse(p.attrib)
p = r('<p class="c" x="y" id="p">')[0]
self.assertTrue(t('remove_attrs', 'class id')(p))
self.ae(list(p.attrib), ['x'])
p = r('<p class="c" x="y" id="p">')[0]
self.assertTrue(t('remove_attrs', '*')(p))
self.ae(list(p.attrib), [])
p = r()[0]
self.assertTrue(t('add_attrs', "class='c' data-m=n")(p))
self.ae(p.items(), [('class', 'c'), ('data-m', 'n')])
p = r('<p a=1>')[0]
self.assertTrue(t('add_attrs', "a=2")(p))
self.ae(p.items(), [('a', '2')])
p = r('<p>t<span>s')[0]
self.assertTrue(t('empty')(p))
ax(p, '<p/>')
p = r('<p>t<span>s</p>tail')[0]
self.assertTrue(t('wrap', '<div a=b c=d>')(p))
ax(p.getparent(), '<div a="b" c="d"><p>t<span>s</span></p></div>tail')
p = r('<p>hello<span>s')[0]
self.assertTrue(t('insert', 'text<div a=b c=d><!-- comm -->tail')(p))
ax(p, '<p>text<div a="b" c="d"><!-- comm -->tail</div>hello<span>s</span></p>')
p = r('<p>hello<span>s')[0]
self.assertTrue(t('insert', 'text')(p))
ax(p, '<p>texthello<span>s</span></p>')
p = r('<p>hello<span>s')[0]
self.assertTrue(t('insert_end', 'text<div><!-- comm -->tail')(p))
ax(p, '<p>hello<span>s</span>text<div><!-- comm -->tail</div></p>')
p = r('<p>hello<span>s</span>tail')[0]
self.assertTrue(t('insert_end', 'text')(p))
ax(p, '<p>hello<span>s</span>tailtext</p>')
p = r('<p>hello')[0]
self.assertTrue(t('prepend', 'text<div>x</div>tail')(p))
ax(p.getparent(), '<body>text<div>x</div>tail<p>hello</p></body>')
p = r('<p>hello')[0]
self.assertTrue(t('append', 'text')(p))
ax(p.getparent(), '<body><p>hello</p>text</body>')
tests = unittest.defaultTestLoader.loadTestsFromTestCase(TestTransforms)
if return_tests:
return tests
unittest.TextTestRunner(verbosity=4).run(tests)
if __name__ == '__main__':
test()
# }}}
| 23,104 | Python | .py | 549 | 33.132969 | 154 | 0.558438 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,258 | chardet.py | kovidgoyal_calibre/src/calibre/ebooks/chardet.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import codecs
import re
import sys
from calibre import xml_replace_entities
_encoding_pats = (
# XML declaration
r'<\?[^<>]+encoding\s*=\s*[\'"](.*?)[\'"][^<>]*>',
# HTML 5 charset
r'''<meta\s+charset=['"]([-_a-z0-9]+)['"][^<>]*>(?:\s*</meta>){0,1}''',
# HTML 4 Pragma directive
r'''<meta\s+?[^<>]*?content\s*=\s*['"][^'"]*?charset=([-_a-z0-9]+)[^'"]*?['"][^<>]*>(?:\s*</meta>){0,1}''',
)
substitute_entities = substitute_entites = xml_replace_entities # for plugins that might use this
def compile_pats(binary):
for raw in _encoding_pats:
if binary:
raw = raw.encode('ascii')
yield re.compile(raw, flags=re.IGNORECASE)
class LazyEncodingPats:
def __call__(self, binary=False):
attr = 'binary_pats' if binary else 'unicode_pats'
pats = getattr(self, attr, None)
if pats is None:
pats = tuple(compile_pats(binary))
setattr(self, attr, pats)
yield from pats
lazy_encoding_pats = LazyEncodingPats()
def strip_encoding_declarations(raw, limit=50*1024, preserve_newlines=False):
prefix = raw[:limit]
suffix = raw[limit:]
is_binary = isinstance(raw, bytes)
if preserve_newlines:
if is_binary:
def sub(m):
return (b'\n' * m.group().count(b'\n'))
else:
def sub(m):
return ('\n' * m.group().count('\n'))
else:
sub = b'' if is_binary else ''
for pat in lazy_encoding_pats(is_binary):
prefix = pat.sub(sub, prefix)
raw = prefix + suffix
return raw
def replace_encoding_declarations(raw, enc='utf-8', limit=50*1024):
prefix = raw[:limit]
suffix = raw[limit:]
changed = [False]
is_binary = isinstance(raw, bytes)
if is_binary:
if not isinstance(enc, bytes):
enc = enc.encode('ascii')
else:
if isinstance(enc, bytes):
enc = enc.decode('ascii')
def sub(m):
ans = m.group()
if m.group(1).lower() != enc.lower():
changed[0] = True
start, end = m.start(1) - m.start(0), m.end(1) - m.end(0)
ans = ans[:start] + enc + ans[end:]
return ans
for pat in lazy_encoding_pats(is_binary):
prefix = pat.sub(sub, prefix)
raw = prefix + suffix
return raw, changed[0]
def find_declared_encoding(raw, limit=50*1024):
prefix = raw[:limit]
is_binary = isinstance(raw, bytes)
for pat in lazy_encoding_pats(is_binary):
m = pat.search(prefix)
if m is not None:
ans = m.group(1)
if is_binary:
ans = ans.decode('ascii', 'replace')
return ans
_CHARSET_ALIASES = {"macintosh" : "mac-roman", "x-sjis" : "shift-jis"}
def detect(bytestring):
if isinstance(bytestring, str):
bytestring = bytestring.encode('utf-8', 'replace')
from calibre_extensions.uchardet import detect as implementation
enc = implementation(bytestring).lower()
return {'encoding': enc, 'confidence': 1 if enc else 0}
def force_encoding(raw, verbose, assume_utf8=False):
from calibre.constants import preferred_encoding
try:
chardet = detect(raw[:1024*50])
except Exception:
chardet = {'encoding':preferred_encoding, 'confidence':0}
encoding = chardet['encoding']
if chardet['confidence'] < 1:
if verbose:
print(f'WARNING: Encoding detection confidence for {chardet["encoding"]} is {chardet["confidence"]}', file=sys.stderr)
if assume_utf8:
encoding = 'utf-8'
if not encoding:
encoding = preferred_encoding
encoding = encoding.lower()
encoding = _CHARSET_ALIASES.get(encoding, encoding)
if encoding == 'ascii':
encoding = 'utf-8'
return encoding
def detect_xml_encoding(raw, verbose=False, assume_utf8=False):
if not raw or isinstance(raw, str):
return raw, None
for x in ('utf8', 'utf-16-le', 'utf-16-be'):
bom = getattr(codecs, 'BOM_'+x.upper().replace('-16', '16').replace(
'-', '_'))
if raw.startswith(bom):
return raw[len(bom):], x
encoding = None
for pat in lazy_encoding_pats(True):
match = pat.search(raw)
if match:
encoding = match.group(1)
encoding = encoding.decode('ascii', 'replace')
break
if encoding is None:
if assume_utf8:
try:
return raw.decode('utf-8'), 'utf-8'
except UnicodeDecodeError:
pass
encoding = force_encoding(raw, verbose, assume_utf8=assume_utf8)
if encoding.lower().strip() == 'macintosh':
encoding = 'mac-roman'
if encoding.lower().replace('_', '-').strip() in (
'gb2312', 'chinese', 'csiso58gb231280', 'euc-cn', 'euccn',
'eucgb2312-cn', 'gb2312-1980', 'gb2312-80', 'iso-ir-58'):
# Microsoft Word exports to HTML with encoding incorrectly set to
# gb2312 instead of gbk. gbk is a superset of gb2312, anyway.
encoding = 'gbk'
try:
codecs.lookup(encoding)
except LookupError:
encoding = 'utf-8'
return raw, encoding
def xml_to_unicode(raw, verbose=False, strip_encoding_pats=False,
resolve_entities=False, assume_utf8=False):
'''
Force conversion of byte string to unicode. Tries to look for XML/HTML
encoding declaration first, if not found uses the chardet library and
prints a warning if detection confidence is < 100%
@return: (unicode, encoding used)
'''
if not raw:
return '', None
raw, encoding = detect_xml_encoding(raw, verbose=verbose,
assume_utf8=assume_utf8)
if not isinstance(raw, str):
raw = raw.decode(encoding, 'replace')
if strip_encoding_pats:
raw = strip_encoding_declarations(raw)
if resolve_entities:
raw = xml_replace_entities(raw)
return raw, encoding
| 6,107 | Python | .py | 160 | 30.71875 | 130 | 0.603483 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,259 | render_html.py | kovidgoyal_calibre/src/calibre/ebooks/render_html.py | #!/usr/bin/env python
# License: GPL v3 Copyright: 2019, Kovid Goyal <kovid at kovidgoyal.net>
import os
import sys
from qt.core import QApplication, QByteArray, QMarginsF, QPageLayout, QPageSize, Qt, QTimer, QUrl
from qt.webengine import (
QWebEnginePage,
QWebEngineProfile,
QWebEngineScript,
QWebEngineUrlRequestInterceptor,
QWebEngineUrlRequestJob,
QWebEngineUrlSchemeHandler,
)
from calibre.constants import FAKE_HOST, FAKE_PROTOCOL
from calibre.ebooks.metadata.pdf import page_images
from calibre.ebooks.oeb.polish.utils import guess_type
from calibre.gui2 import must_use_qt
from calibre.gui_launch import setup_qt_logging
from calibre.utils.filenames import atomic_rename
from calibre.utils.logging import default_log
from calibre.utils.monotonic import monotonic
from calibre.utils.webengine import secure_webengine, send_reply, setup_fake_protocol, setup_profile
LOAD_TIMEOUT = 20
PRINT_TIMEOUT = 10
class RequestInterceptor(QWebEngineUrlRequestInterceptor):
def interceptRequest(self, request_info):
method = bytes(request_info.requestMethod())
if method not in (b'GET', b'HEAD'):
default_log.warn(f'Blocking URL request with method: {method}')
request_info.block(True)
return
qurl = request_info.requestUrl()
if qurl.scheme() not in (FAKE_PROTOCOL,):
default_log.warn(f'Blocking URL request {qurl.toString()} as it is not for a resource related to the HTML file being rendered')
request_info.block(True)
return
class UrlSchemeHandler(QWebEngineUrlSchemeHandler):
def __init__(self, root, parent=None):
self.root = root
super().__init__(parent)
self.allowed_hosts = (FAKE_HOST,)
def requestStarted(self, rq):
if bytes(rq.requestMethod()) != b'GET':
return self.fail_request(rq, QWebEngineUrlRequestJob.Error.RequestDenied)
url = rq.requestUrl()
host = url.host()
if host not in self.allowed_hosts or url.scheme() != FAKE_PROTOCOL:
return self.fail_request(rq)
path = url.path()
rp = path[1:]
if not rp:
return self.fail_request(rq, QWebEngineUrlRequestJob.Error.UrlNotFound)
resolved_path = os.path.abspath(os.path.join(self.root, rp.replace('/', os.sep)))
if not resolved_path.startswith(self.root):
return self.fail_request(rq, QWebEngineUrlRequestJob.Error.UrlNotFound)
try:
with open(resolved_path, 'rb') as f:
data = f.read()
except OSError as err:
default_log(f'Failed to read file: {rp} with error: {err}')
return self.fail_request(rq, QWebEngineUrlRequestJob.Error.RequestFailed)
send_reply(rq, guess_type(os.path.basename(resolved_path)), data)
def fail_request(self, rq, fail_code=None):
if fail_code is None:
fail_code = QWebEngineUrlRequestJob.Error.UrlNotFound
rq.fail(fail_code)
print(f"Blocking FAKE_PROTOCOL request: {rq.requestUrl().toString()} with code: {fail_code}", file=sys.stderr)
class Render(QWebEnginePage):
def __init__(self, profile):
QWebEnginePage.__init__(self, profile, QApplication.instance())
secure_webengine(self)
self.printing_started = False
self.loadFinished.connect(self.load_finished, type=Qt.ConnectionType.QueuedConnection)
self.pdfPrintingFinished.connect(self.print_finished)
self.hang_timer = t = QTimer(self)
t.setInterval(500)
t.timeout.connect(self.hang_check)
def break_cycles(self):
self.hang_timer.timeout.disconnect()
self.pdfPrintingFinished.disconnect()
self.setParent(None)
def load_finished(self, ok):
if ok:
self.runJavaScript('''
var ans = {};
var meta = document.querySelector('meta[name=calibre-html-render-data]');
if (meta) {
try {
ans = JSON.parse(meta.content);
console.log(ans);
} catch {}
}
ans;
''', QWebEngineScript.ScriptWorldId.ApplicationWorld, self.start_print)
else:
self.hang_timer.stop()
QApplication.instance().exit(1)
def javaScriptConsoleMessage(self, level, msg, linenumber, source_id):
pass
def start_load(self, path_to_html, root):
url = QUrl(f'{FAKE_PROTOCOL}://{FAKE_HOST}')
url.setPath('/' + os.path.relpath(path_to_html, root).replace(os.sep, '/'))
self.setUrl(url)
self.start_time = monotonic()
self.hang_timer.start()
def hang_check(self):
if self.printing_started:
if monotonic() - self.start_time > PRINT_TIMEOUT:
self.hang_timer.stop()
QApplication.instance().exit(4)
else:
if monotonic() - self.start_time > LOAD_TIMEOUT:
self.hang_timer.stop()
QApplication.instance().exit(3)
def start_print(self, data):
margins = QMarginsF(0, 0, 0, 0)
page_size = QPageSize(QPageSize.PageSizeId.A4)
if isinstance(data, dict):
try:
if 'margins' in data:
margins = QMarginsF(*data['margins'])
if 'size' in data:
sz = data['size']
if type(getattr(QPageSize, sz, None)) is type(QPageSize.PageSizeId.A4): # noqa
page_size = QPageSize(getattr(QPageSize, sz))
else:
from calibre.ebooks.pdf.image_writer import parse_pdf_page_size
ps = parse_pdf_page_size(sz, data.get('unit', 'inch'))
if ps is not None:
page_size = ps
except Exception:
pass
page_layout = QPageLayout(page_size, QPageLayout.Orientation.Portrait, margins)
self.printToPdf('rendered.pdf', page_layout)
self.printing_started = True
self.start_time = monotonic()
def print_finished(self, path, ok):
QApplication.instance().exit(0 if ok else 2)
self.hang_timer.stop()
def main(path_to_html, tdir, image_format='jpeg', root=''):
if image_format not in ('jpeg', 'png'):
raise ValueError('Image format must be either jpeg or png')
must_use_qt()
setup_qt_logging()
setup_fake_protocol()
profile = setup_profile(QWebEngineProfile(QApplication.instance()))
path_to_html = os.path.abspath(path_to_html)
url_handler = UrlSchemeHandler(root or os.path.dirname(path_to_html), parent=profile)
interceptor = RequestInterceptor(profile)
profile.installUrlSchemeHandler(QByteArray(FAKE_PROTOCOL.encode('ascii')), url_handler)
profile.setUrlRequestInterceptor(interceptor)
os.chdir(tdir)
renderer = Render(profile)
renderer.start_load(path_to_html, url_handler.root)
ret = QApplication.instance().exec()
renderer.break_cycles()
del renderer
if ret == 0:
page_images('rendered.pdf', image_format=image_format)
ext = {'jpeg': 'jpg'}.get(image_format, image_format)
atomic_rename('page-images-1.' + ext, 'rendered.' + image_format)
return ret == 0
if __name__ == '__main__':
if not main(sys.argv[-1], '.'):
raise SystemExit('Failed to render HTML')
| 7,437 | Python | .py | 165 | 35.818182 | 139 | 0.640293 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,260 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/iterator/__init__.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
| 146 | Python | .py | 4 | 35 | 58 | 0.678571 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,261 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/epub/__init__.py | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Conversion to EPUB.
'''
from calibre.utils.zipfile import ZIP_STORED, ZipFile
def rules(stylesheets):
for s in stylesheets:
if hasattr(s, 'cssText'):
for r in s:
if r.type == r.STYLE_RULE:
yield r
def simple_container_xml(opf_path, extra_entries=''):
return '''\
<?xml version="1.0"?>
<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
<rootfiles>
<rootfile full-path="{0}" media-type="application/oebps-package+xml"/>
{extra_entries}
</rootfiles>
</container>
'''.format(opf_path, extra_entries=extra_entries)
def initialize_container(path_to_container, opf_name='metadata.opf',
extra_entries=[]):
'''
Create an empty EPUB document, with a default skeleton.
'''
rootfiles = ''
for path, mimetype, _ in extra_entries:
rootfiles += '<rootfile full-path="{}" media-type="{}"/>'.format(
path, mimetype)
CONTAINER = simple_container_xml(opf_name, rootfiles).encode('utf-8')
zf = ZipFile(path_to_container, 'w')
zf.writestr('mimetype', b'application/epub+zip', compression=ZIP_STORED)
zf.writestr('META-INF/', b'', 0o755)
zf.writestr('META-INF/container.xml', CONTAINER)
for path, _, data in extra_entries:
zf.writestr(path, data)
return zf
| 1,472 | Python | .py | 40 | 31.275 | 81 | 0.647719 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,262 | periodical.py | kovidgoyal_calibre/src/calibre/ebooks/epub/periodical.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import time
from uuid import uuid4
from calibre import prepare_string_for_xml as xml
from calibre import strftime
from calibre.constants import __appname__, __version__
from calibre.utils.date import parse_date
SONY_METADATA = '''\
<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:prs="http://xmlns.sony.net/e-book/prs/">
<rdf:Description rdf:about="">
<dc:title>{title}</dc:title>
<dc:publisher>{publisher}</dc:publisher>
<dcterms:alternative>{short_title}</dcterms:alternative>
<dcterms:issued>{issue_date}</dcterms:issued>
<dc:language>{language}</dc:language>
<dcterms:conformsTo rdf:resource="http://xmlns.sony.net/e-book/prs/periodicals/1.0/newspaper/1.0"/>
<dcterms:type rdf:resource="http://xmlns.sony.net/e-book/prs/datatype/newspaper"/>
<dcterms:type rdf:resource="http://xmlns.sony.net/e-book/prs/datatype/periodical"/>
</rdf:Description>
</rdf:RDF>
'''
SONY_ATOM = '''\
<?xml version="1.0" encoding="utf-8" ?>
<feed xmlns="http://www.w3.org/2005/Atom"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:dcterms="http://purl.org/dc/terms/"
xmlns:prs="http://xmlns.sony.net/e-book/prs/"
xmlns:media="http://video.search.yahoo.com/mrss"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<title>{short_title}</title>
<updated>{updated}</updated>
<id>{id}</id>
{entries}
</feed>
'''
SONY_ATOM_SECTION = '''\
<entry rdf:ID="{title}">
<title>{title}</title>
<link href="{href}"/>
<id>{id}</id>
<updated>{updated}</updated>
<summary>{desc}</summary>
<category term="{short_title}/{title}"
scheme="http://xmlns.sony.net/e-book/terms/" label="{title}"/>
<dc:type xsi:type="prs:datatype">newspaper/section</dc:type>
<dcterms:isReferencedBy rdf:resource=""/>
</entry>
'''
SONY_ATOM_ENTRY = '''\
<entry>
<title>{title}</title>
<author><name>{author}</name></author>
<link href="{href}"/>
<id>{id}</id>
<updated>{updated}</updated>
<summary>{desc}</summary>
<category term="{short_title}/{section_title}"
scheme="http://xmlns.sony.net/e-book/terms/" label="{section_title}"/>
<dcterms:extent xsi:type="prs:word-count">{word_count}</dcterms:extent>
<dc:type xsi:type="prs:datatype">newspaper/article</dc:type>
<dcterms:isReferencedBy rdf:resource="#{section_title}"/>
</entry>
'''
def sony_metadata(oeb):
m = oeb.metadata
title = short_title = str(m.title[0])
publisher = __appname__ + ' ' + __version__
try:
pt = str(oeb.metadata.publication_type[0])
short_title = ':'.join(pt.split(':')[2:])
except:
pass
try:
date = parse_date(str(m.date[0]),
as_utc=False).strftime('%Y-%m-%d')
except:
date = strftime('%Y-%m-%d')
try:
language = str(m.language[0]).replace('_', '-')
except:
language = 'en'
short_title = xml(short_title, True)
metadata = SONY_METADATA.format(title=xml(title),
short_title=short_title,
publisher=xml(publisher), issue_date=xml(date),
language=xml(language))
updated = strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
def cal_id(x):
for k, v in x.attrib.items():
if k.endswith('scheme') and v == 'uuid':
return True
try:
base_id = str(list(filter(cal_id, m.identifier))[0])
except:
base_id = str(uuid4())
toc = oeb.toc
if False and toc.depth() < 3:
# Single section periodical
# Disabled since I prefer the current behavior
from calibre.ebooks.oeb.base import TOC
section = TOC(klass='section', title=_('All articles'),
href=oeb.spine[2].href)
for x in toc:
section.nodes.append(x)
toc = TOC(klass='periodical', href=oeb.spine[2].href,
title=str(oeb.metadata.title[0]))
toc.nodes.append(section)
entries = []
seen_titles = set()
for i, section in enumerate(toc):
if not section.href:
continue
secid = 'section%d'%i
sectitle = section.title
if not sectitle:
sectitle = _('Unknown')
d = 1
bsectitle = sectitle
while sectitle in seen_titles:
sectitle = bsectitle + ' ' + str(d)
d += 1
seen_titles.add(sectitle)
sectitle = xml(sectitle, True)
secdesc = section.description
if not secdesc:
secdesc = ''
secdesc = xml(secdesc)
entries.append(SONY_ATOM_SECTION.format(title=sectitle,
href=section.href, id=xml(base_id)+'/'+secid,
short_title=short_title, desc=secdesc, updated=updated))
for j, article in enumerate(section):
if not article.href:
continue
atitle = article.title
btitle = atitle
d = 1
while atitle in seen_titles:
atitle = btitle + ' ' + str(d)
d += 1
auth = article.author if article.author else ''
desc = section.description
if not desc:
desc = ''
aid = 'article%d'%j
entries.append(SONY_ATOM_ENTRY.format(
title=xml(atitle),
author=xml(auth),
updated=updated,
desc=desc,
short_title=short_title,
section_title=sectitle,
href=article.href,
word_count=str(1),
id=xml(base_id)+'/'+secid+'/'+aid
))
atom = SONY_ATOM.format(short_title=short_title,
entries='\n\n'.join(entries), updated=updated,
id=xml(base_id)).encode('utf-8')
return metadata, atom
| 6,117 | Python | .py | 167 | 29.065868 | 107 | 0.595849 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,263 | pages.py | kovidgoyal_calibre/src/calibre/ebooks/epub/pages.py | '''
Add page mapping information to an EPUB book.
'''
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
__docformat__ = 'restructuredtext en'
import re
from itertools import count
from lxml.etree import XPath
from calibre.ebooks.oeb.base import XHTML_NS, OEBBook
NSMAP = {'h': XHTML_NS, 'html': XHTML_NS, 'xhtml': XHTML_NS}
PAGE_RE = re.compile(r'page', re.IGNORECASE)
ROMAN_RE = re.compile(r'^[ivxlcdm]+$', re.IGNORECASE)
def filter_name(name):
name = name.strip()
name = PAGE_RE.sub('', name)
for word in name.split():
if word.isdigit() or ROMAN_RE.match(word):
name = word
break
return name
def build_name_for(expr):
if not expr:
counter = count(1)
return lambda elem: str(next(counter))
selector = XPath(expr, namespaces=NSMAP)
def name_for(elem):
results = selector(elem)
if not results:
return ''
name = ' '.join(results)
return filter_name(name)
return name_for
def add_page_map(opfpath, opts):
oeb = OEBBook(opfpath)
selector = XPath(opts.page, namespaces=NSMAP)
name_for = build_name_for(opts.page_names)
idgen = ("calibre-page-%d" % n for n in count(1))
for item in oeb.spine:
data = item.data
for elem in selector(data):
name = name_for(elem)
id = elem.get('id', None)
if id is None:
id = elem.attrib['id'] = next(idgen)
href = '#'.join((item.href, id))
oeb.pages.add(name, href)
writer = None # DirWriter(version='2.0', page_map=True)
writer.dump(oeb, opfpath)
| 1,673 | Python | .py | 49 | 27.897959 | 66 | 0.61825 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,264 | tests.py | kovidgoyal_calibre/src/calibre/ebooks/epub/cfi/tests.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import numbers
import unittest
from calibre.ebooks.epub.cfi.parse import cfi_sort_key, decode_cfi, parser
from polyglot.builtins import iteritems
class Tests(unittest.TestCase):
def test_sorting(self):
null_offsets = (0, (0, 0), 0)
for path, key in [
('/1/2/3', ((1, 2, 3), null_offsets)),
('/1[id]:34[yyyy]', ((1,), (0, (0, 0), 34))),
('/1@1:2', ((1,), (0, (2, 1), 0))),
('/1~1.2', ((1,), (1.2, (0, 0), 0))),
]:
self.assertEqual(cfi_sort_key(path), key)
def test_parsing(self):
p = parser()
def step(x):
if isinstance(x, numbers.Integral):
return {'num': x}
return {'num':x[0], 'id':x[1]}
def s(*args):
return {'steps':list(map(step, args))}
def r(*args):
idx = args.index('!')
ans = s(*args[:idx])
ans['redirect'] = s(*args[idx+1:])
return ans
def o(*args):
ans = s(1)
step = ans['steps'][-1]
typ, val = args[:2]
step[{'@':'spatial_offset', '~':'temporal_offset', ':':'text_offset'}[typ]] = val
if len(args) == 4:
typ, val = args[2:]
step[{'@':'spatial_offset', '~':'temporal_offset'}[typ]] = val
return ans
def a(before=None, after=None, **params):
ans = o(':', 3)
step = ans['steps'][-1]
ta = {}
if before is not None:
ta['before'] = before
if after is not None:
ta['after'] = after
if params:
ta['params'] = {str(k):(v,) if isinstance(v, str) else v for k, v in iteritems(params)}
if ta:
step['text_assertion'] = ta
return ans
for raw, path, leftover in [
# Test parsing of steps
('/2', s(2), ''),
('/2/3/4', s(2, 3, 4), ''),
('/1/2[some^,^^id]/3', s(1, (2, 'some,^id'), 3), ''),
('/1/2!/3/4', r(1, 2, '!', 3, 4), ''),
('/1/2[id]!/3/4', r(1, (2, 'id'), '!', 3, 4), ''),
('/1!/2[id]/3/4', r(1, '!', (2, 'id'), 3, 4), ''),
# Test parsing of offsets
('/1~0', o('~', 0), ''),
('/1~7', o('~', 7), ''),
('/1~43.1', o('~', 43.1), ''),
('/1~0.01', o('~', 0.01), ''),
('/1~1.301', o('~', 1.301), ''),
('/1@23:34.1', o('@', (23, 34.1)), ''),
('/1@23:34.10', o('@', (23, 34.1)), ''),
('/1~3@3.1:2.3', o('~', 3.0, '@', (3.1, 2.3)), ''),
('/1:0', o(':', 0), ''),
('/1:3', o(':', 3), ''),
# Test parsing of text assertions
('/1:3[aa^,b]', a('aa,b'), ''),
('/1:3[aa-b]', a('aa-b'), ''),
('/1:3[aa^-b]', a('aa-b'), ''),
('/1:3[aa-^--b]', a('aa---b'), ''),
('/1:3[aa^,b,c1]', a('aa,b', 'c1'), ''),
('/1:3[,aa^,b]', a(after='aa,b'), ''),
('/1:3[;s=a]', a(s='a'), ''),
('/1:3[a;s=a]', a('a', s='a'), ''),
('/1:3[a;s=a^,b,c^;d;x=y]', a('a', s=('a,b', 'c;d'), x='y'), ''),
]:
self.assertEqual(p.parse_path(raw), (path, leftover))
def test_cfi_decode(self):
from calibre.ebooks.oeb.polish.parsing import parse
root = parse('''
<html>
<head></head>
<body id="body01">
<p>…</p>
<p>…</p>
<p>…</p>
<p>…</p>
<p id="para05">xxx<em>yyy</em>0123456789</p>
<p>…</p>
<p>…</p>
<img id="svgimg" src="foo.svg" alt="…"/>
<p>…</p>
<p><span>hello</span><span>goodbye</span>text here<em>adieu</em>text there</p>
</body>
</html>
''', line_numbers=True, linenumber_attribute='data-lnum')
body = root[-1]
def test(cfi, expected):
self.assertIs(decode_cfi(root, cfi), expected)
for cfi in '/4 /4[body01] /900[body01] /2[body01]'.split():
test(cfi, body)
for i in range(len(body)):
test(f'/4/{(i + 1)*2}', body[i])
p = body[4]
test('/4/999[para05]', p)
test('/4/999[para05]/2', p[0])
def find_tests():
return unittest.TestLoader().loadTestsFromTestCase(Tests)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(find_tests())
| 4,540 | Python | .py | 116 | 28.396552 | 103 | 0.413738 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,265 | parse.py | kovidgoyal_calibre/src/calibre/ebooks/epub/cfi/parse.py | #!/usr/bin/env python
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import regex
class Parser:
''' See epubcfi.ebnf for the specification that this parser tries to
follow. I have implemented it manually, since I dont want to depend on
grako, and the grammar is pretty simple. This parser is thread-safe, i.e.
it can be used from multiple threads simultaneously. '''
def __init__(self):
# All allowed unicode characters + escaped special characters
special_char = r'[\[\](),;=^]'
unescaped_char = f'[[\t\n\r -\ud7ff\ue000-\ufffd\U00010000-\U0010ffff]--{special_char}]'
# calibre used to escape hyphens as well, so recognize them even though
# not strictly spec compliant
escaped_char = r'\^' + special_char[:-1] + '-]'
chars = fr'(?:{unescaped_char}|(?:{escaped_char}))+'
chars_no_space = chars.replace('0020', '0021')
# No leading zeros allowed for integers
integer = r'(?:[1-9][0-9]*)|0'
# No leading zeros, except for numbers in (0, 1) and no trailing zeros for the fractional part
frac = r'\.[0-9]{1,}'
number = r'(?:[1-9][0-9]*(?:{0})?)|(?:0{0})|(?:0)'.format(frac)
def c(x):
return regex.compile(x, flags=regex.VERSION1)
# A step of the form /integer
self.step_pat = c(r'/(%s)' % integer)
# An id assertion of the form [characters]
self.id_assertion_pat = c(r'\[(%s)\]' % chars)
# A text offset of the form :integer
self.text_offset_pat = c(r':(%s)' % integer)
# A temporal offset of the form ~number
self.temporal_offset_pat = c(r'~(%s)' % number)
# A spatial offset of the form @number:number
self.spatial_offset_pat = c(r'@({0}):({0})'.format(number))
# A spatio-temporal offset of the form ~number@number:number
self.st_offset_pat = c(r'~({0})@({0}):({0})'.format(number))
# Text assertion patterns
self.ta1_pat = c(r'({0})(?:,({0})){{0,1}}'.format(chars))
self.ta2_pat = c(r',(%s)' % chars)
self.parameters_pat = c(fr'(?:;({chars_no_space})=((?:{chars},?)+))+')
self.csv_pat = c(r'(?:(%s),?)+' % chars)
# Unescape characters
unescape_pat = c(fr'{escaped_char[:2]}({escaped_char[2:]})')
self.unescape = lambda x: unescape_pat.sub(r'\1', x)
def parse_epubcfi(self, raw):
' Parse a full epubcfi of the form epubcfi(path [ , path , path ]) '
null = {}, {}, {}, raw
if not raw:
return null
if not raw.startswith('epubcfi('):
return null
raw = raw[len('epubcfi('):]
parent_cfi, raw = self.parse_path(raw)
if not parent_cfi:
return null
start_cfi, end_cfi = {}, {}
if raw.startswith(','):
start_cfi, raw = self.parse_path(raw[1:])
if raw.startswith(','):
end_cfi, raw = self.parse_path(raw[1:])
if not start_cfi or not end_cfi:
return null
if raw.startswith(')'):
raw = raw[1:]
else:
return null
return parent_cfi, start_cfi, end_cfi, raw
def parse_path(self, raw):
' Parse the path component of an epubcfi of the form /step... '
path = {'steps':[]}
raw = self._parse_path(raw, path)
if not path['steps']:
path = {}
return path, raw
def do_match(self, pat, raw):
m = pat.match(raw)
if m is not None:
raw = raw[len(m.group()):]
return m, raw
def _parse_path(self, raw, ans):
m, raw = self.do_match(self.step_pat, raw)
if m is None:
return raw
ans['steps'].append({'num':int(m.group(1))})
m, raw = self.do_match(self.id_assertion_pat, raw)
if m is not None:
ans['steps'][-1]['id'] = self.unescape(m.group(1))
if raw.startswith('!'):
ans['redirect'] = r = {'steps':[]}
return self._parse_path(raw[1:], r)
else:
remaining_raw = self.parse_offset(raw, ans['steps'][-1])
return self._parse_path(raw, ans) if remaining_raw is None else remaining_raw
def parse_offset(self, raw, ans):
m, raw = self.do_match(self.text_offset_pat, raw)
if m is not None:
ans['text_offset'] = int(m.group(1))
return self.parse_text_assertion(raw, ans)
m, raw = self.do_match(self.st_offset_pat, raw)
if m is not None:
t, x, y = m.groups()
ans['temporal_offset'] = float(t)
ans['spatial_offset'] = tuple(map(float, (x, y)))
return raw
m, raw = self.do_match(self.temporal_offset_pat, raw)
if m is not None:
ans['temporal_offset'] = float(m.group(1))
return raw
m, raw = self.do_match(self.spatial_offset_pat, raw)
if m is not None:
ans['spatial_offset'] = tuple(map(float, m.groups()))
return raw
def parse_text_assertion(self, raw, ans):
oraw = raw
if not raw.startswith('['):
return oraw
raw = raw[1:]
ta = {}
m, raw = self.do_match(self.ta1_pat, raw)
if m is not None:
before, after = m.groups()
ta['before'] = self.unescape(before)
if after is not None:
ta['after'] = self.unescape(after)
else:
m, raw = self.do_match(self.ta2_pat, raw)
if m is not None:
ta['after'] = self.unescape(m.group(1))
# parse parameters
m, raw = self.do_match(self.parameters_pat, raw)
if m is not None:
params = {}
for name, value in zip(m.captures(1), m.captures(2)):
params[name] = tuple(map(self.unescape, self.csv_pat.match(value).captures(1)))
if params:
ta['params'] = params
if not raw.startswith(']'):
return oraw # no closing ] or extra content in the assertion
if ta:
ans['text_assertion'] = ta
return raw[1:]
_parser = None
def parser():
global _parser
if _parser is None:
_parser = Parser()
return _parser
def get_steps(pcfi):
ans = tuple(pcfi['steps'])
if 'redirect' in pcfi:
ans += get_steps(pcfi['redirect'])
return ans
def cfi_sort_key(cfi, only_path=True):
p = parser()
try:
if only_path:
pcfi = p.parse_path(cfi)[0]
else:
parent, start = p.parse_epubcfi(cfi)[:2]
pcfi = start or parent
except Exception:
import traceback
traceback.print_exc()
return (), (0, (0, 0), 0)
if not pcfi:
import sys
print('Failed to parse CFI: %r' % cfi, file=sys.stderr)
return (), (0, (0, 0), 0)
steps = get_steps(pcfi)
step_nums = tuple(s.get('num', 0) for s in steps)
step = steps[-1] if steps else {}
offsets = (step.get('temporal_offset', 0), tuple(reversed(step.get('spatial_offset', (0, 0)))), step.get('text_offset', 0), )
return step_nums, offsets
def decode_cfi(root, cfi):
from lxml.etree import XPathEvalError
p = parser()
try:
pcfi = p.parse_path(cfi)[0]
except Exception:
import traceback
traceback.print_exc()
return
if not pcfi:
import sys
print('Failed to parse CFI: %r' % pcfi, file=sys.stderr)
return
steps = get_steps(pcfi)
ans = root
for step in steps:
num = step.get('num', 0)
node_id = step.get('id')
try:
match = ans.xpath('descendant::*[@id="%s"]' % node_id)
except XPathEvalError:
match = ()
if match:
ans = match[0]
continue
index = 0
for child in ans.iterchildren('*'):
index |= 1 # increment index by 1 if it is even
index += 1
if index == num:
ans = child
break
else:
return
return ans
if __name__ == '__main__':
import sys
print(cfi_sort_key(sys.argv[-1], only_path=False))
| 8,253 | Python | .py | 212 | 29.65566 | 129 | 0.544024 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,266 | headings_to_sections.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/headings_to_sections.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import re
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class HeadingsToSections:
"""
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Required:
Nothing
Return:
Nothing
Logic:
The self.__end_list is a list of tokens that will force a list to end.
Likewise, the self.__end_lines is a list of lines that forces a list to end.
"""
self.__state = "default"
self.__all_sections = []
self.__chunk = ''
self.__state_dict={
'default' : self.__default_func,
'in_table' : self.__in_table_func,
'in_list' : self.__in_list_func,
'after_body' : self.__after_body_func,
}
self.__list_depth = 0
self.__end_list = [
'mi<mk<body-close',
# changed 2004-04-26
# 'mi<mk<par-in-fld',
'mi<mk<sect-close', # right before close of section
'mi<mk<sect-start', # right before section start
# this should be sect-close!
# 'mi<mk<header-beg',
# 'mi<mk<header-end',
# 'mi<mk<head___clo',
#
# changed 2004-04-26
# 'mi<mk<fldbk-end_',
# 'mi<mk<sec-fd-beg',
]
self.__headings = [
'heading 1', 'heading 2', 'heading 3', 'heading 4',
'heading 5', 'heading 6', 'heading 7', 'heading 8',
'heading 9'
]
self.__section_num = [0]
self.__id_regex = re.compile(r'\<list-id\>(\d+)')
def __close_lists(self):
"""
Required:
Nothing
Return:
Nothing
Logic:
Reverse the list of dictionaries. Iterate through the list and
get the indent for each list. If the current indent is less than
or equal to the indent in the dictionary, close that level.
Keep track of how many levels you close. Reduce the list by that
many levels.
Reverse the list again.
"""
current_indent = self.__left_indent
self.__all_lists.reverse()
num_levels_closed = 0
for the_dict in self.__all_lists:
list_indent = the_dict.get('left-indent')
if current_indent <= list_indent:
self.__write_end_item()
self.__write_end_list()
num_levels_closed += 1
self.__all_lists = self.__all_lists[num_levels_closed:]
self.__all_lists.reverse()
def __close_sections(self, current_level):
self.__all_sections.reverse()
num_levels_closed = 0
for level in self.__all_sections:
if current_level <= level:
self.__write_end_section()
num_levels_closed += 1
self.__all_sections = self.__all_sections[num_levels_closed:]
self.__all_sections.reverse()
def __write_start_section(self, current_level, name):
section_num = ''
for the_num in self.__section_num:
section_num += '%s.' % the_num
section_num = section_num[:-1]
num_in_level = len(self.__all_sections)
num_in_level = self.__section_num[num_in_level]
level = len(self.__all_sections)
self.__write_obj.write(
'mi<mk<sect-start\n'
)
self.__write_obj.write(
'mi<tg<open-att__<section<num>%s<num-in-level>%s<level>%s'
'<type>%s\n'
% (section_num, num_in_level, level, name)
)
def __write_end_section(self):
self.__write_obj.write('mi<mk<sect-close\n')
self.__write_obj.write('mi<tg<close_____<section\n')
def __default_func(self, line):
"""
Required:
self, line
Returns:
Nothing
Logic
Look for the start of a paragraph definition. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to
in_pard.
"""
if self.__token_info == 'mi<mk<sect-start':
self.__section_num[0] += 1
self.__section_num = self.__section_num[0:1]
if self.__token_info == 'mi<mk<tabl-start':
self.__state = 'in_table'
elif self.__token_info == 'mi<mk<list_start':
self.__state = 'in_list'
self.__list_depth += 1
elif self.__token_info in self.__end_list:
self.__close_sections(0)
elif self.__token_info == 'mi<mk<style-name':
name = line[17:-1]
if name in self.__headings:
self.__handle_heading(name)
if self.__token_info == 'mi<mk<body-close':
self.__state = 'after_body'
self.__write_obj.write(line)
def __handle_heading(self, name):
num = self.__headings.index(name) + 1
self.__close_sections(num)
self.__all_sections.append(num)
level_depth = len(self.__all_sections) + 1
self.__section_num = self.__section_num[:level_depth]
if len(self.__section_num) < level_depth:
self.__section_num.append(1)
else:
self.__section_num[-1] += 1
self.__write_start_section(num, name)
def __in_table_func(self, line):
if self.__token_info == 'mi<mk<table-end_':
self.__state = 'default'
self.__write_obj.write(line)
def __in_list_func(self, line):
if self.__token_info == 'mi<mk<list_close':
self.__list_depth -= 1
elif self.__token_info == 'mi<mk<list_start':
self.__list_depth += 1
if self.__list_depth == 0:
self.__state = 'default'
self.__write_obj.write(line)
def __after_body_func(self, line):
self.__write_obj.write(line)
def make_sections(self):
"""
Required:
nothing
Returns:
original file will be changed
Logic:
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "sections_to_headings.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 8,271 | Python | .py | 213 | 29.117371 | 88 | 0.497264 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,267 | default_encoding.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/default_encoding.py | #########################################################################
# #
# copyright 2002 Paul Henry Tremblay #
# #
#########################################################################
'''
Codepages as to RTF 1.9.1:
437 United States IBM
708 Arabic (ASMO 708)
709 Arabic (ASMO 449+, BCON V4)
710 Arabic (transparent Arabic)
711 Arabic (Nafitha Enhanced)
720 Arabic (transparent ASMO)
819 Windows 3.1 (United States and Western Europe)
850 IBM multilingual
852 Eastern European
860 Portuguese
862 Hebrew
863 French Canadian
864 Arabic
865 Norwegian
866 Soviet Union
874 Thai
932 Japanese
936 Simplified Chinese
949 Korean
950 Traditional Chinese
1250 Eastern European
1251 Cyrillic
1252 Western European
1253 Greek
1254 Turkish
1255 Hebrew
1256 Arabic
1257 Baltic
1258 Vietnamese
1361 Johab
10000 MAC Roman
10001 MAC Japan
10004 MAC Arabic
10005 MAC Hebrew
10006 MAC Greek
10007 MAC Cyrillic
10029 MAC Latin2
10081 MAC Turkish
57002 Devanagari
57003 Bengali
57004 Tamil
57005 Telugu
57006 Assamese
57007 Oriya
57008 Kannada
57009 Malayalam
57010 Gujarati
57011 Punjabi
'''
import re
from . import open_for_read
class DefaultEncoding:
"""
Find the default encoding for the doc
"""
# Note: not all those encoding are really supported by rtf2xml
# See http://msdn.microsoft.com/en-us/library/windows/desktop/dd317756%28v=vs.85%29.aspx
# and src\calibre\gui2\widgets.py for the input list in calibre
ENCODINGS = {
# Special cases
'cp1252':'1252',
'utf-8':'1252',
'ascii':'1252',
# Normal cases
'big5':'950',
'cp1250':'1250',
'cp1251':'1251',
'cp1253':'1253',
'cp1254':'1254',
'cp1255':'1255',
'cp1256':'1256',
'shift_jis':'932',
'gb2312':'936',
# Not in RTF 1.9.1 codepage specification
'hz':'52936',
'iso8859_5':'28595',
'iso2022_jp':'50222',
'iso2022_kr':'50225',
'euc_jp':'51932',
'euc_kr':'51949',
'gb18030':'54936',
}
def __init__(self, in_file, bug_handler, default_encoding, run_level=1, check_raw=False):
self.__file = in_file
self.__bug_handler = bug_handler
self.__platform = 'Windows'
self.__default_num = 'not-defined'
self.__code_page = self.ENCODINGS.get(default_encoding, '1252')
self.__datafetched = False
self.__fetchraw = check_raw
def find_default_encoding(self):
if not self.__datafetched:
self._encoding()
self.__datafetched = True
code_page = 'ansicpg' + self.__code_page
# if self.__code_page == '10000':
# self.__code_page = 'mac_roman'
return self.__platform, code_page, self.__default_num
def get_codepage(self):
if not self.__datafetched:
self._encoding()
self.__datafetched = True
# if self.__code_page == '10000':
# self.__code_page = 'mac_roman'
return self.__code_page
def get_platform(self):
if not self.__datafetched:
self._encoding()
self.__datafetched = True
return self.__platform
def _encoding(self):
with open_for_read(self.__file) as read_obj:
cpfound = False
if not self.__fetchraw:
for line in read_obj:
self.__token_info = line[:16]
if self.__token_info == 'mi<mk<rtfhed-end':
break
if self.__token_info == 'cw<ri<macintosh_':
self.__platform = 'Macintosh'
elif self.__token_info == 'cw<ri<pc________':
self.__platform = 'IBMPC'
elif self.__token_info == 'cw<ri<pca_______':
self.__platform = 'OS/2'
if self.__token_info == 'cw<ri<ansi-codpg' \
and int(line[20:-1]):
self.__code_page = line[20:-1]
if self.__token_info == 'cw<ri<deflt-font':
self.__default_num = line[20:-1]
cpfound = True
# cw<ri<deflt-font<nu<0
if self.__platform != 'Windows' and \
not cpfound:
if self.__platform == 'Macintosh':
self.__code_page = '10000'
elif self.__platform == 'IBMPC':
self.__code_page = '437'
elif self.__platform == 'OS/2':
self.__code_page = '850'
else:
fenc = re.compile(r'\\(mac|pc|ansi|pca)[\\ \{\}\t\n]+')
fenccp = re.compile(r'\\ansicpg(\d+)[\\ \{\}\t\n]+')
for line in read_obj:
if fenc.search(line):
enc = fenc.search(line).group(1)
if fenccp.search(line):
cp = fenccp.search(line).group(1)
if not int(cp):
self.__code_page = cp
cpfound = True
break
if self.__platform != 'Windows' and \
not cpfound:
if enc == 'mac':
self.__code_page = '10000'
elif enc == 'pc':
self.__code_page = '437'
elif enc == 'pca':
self.__code_page = '850'
if __name__ == '__main__':
import sys
encode_obj = DefaultEncoding(
in_file=sys.argv[1],
default_encoding=sys.argv[2],
bug_handler=Exception,
check_raw=True,
)
print(encode_obj.get_codepage())
| 6,373 | Python | .py | 175 | 24.417143 | 93 | 0.465643 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,268 | colors.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/colors.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import re
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Colors:
"""
Change lines with color info from color numbers to the actual color names.
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1
):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__copy = copy
self.__bug_handler = bug_handler
self.__line = 0
self.__write_to = better_mktemp()
self.__run_level = run_level
def __initiate_values(self):
"""
Initiate all values.
"""
self.__color_dict = {}
self.__state = 'before_color_table'
self.__state_dict = {
'before_color_table': self.__before_color_func,
'in_color_table' : self.__in_color_func,
'after_color_table' : self.__after_color_func,
'cw<ci<red_______' : self.__default_color_func,
'cw<ci<green_____' : self.__default_color_func,
'cw<ci<blue______' : self.__blue_func,
'tx<nu<__________' : self.__do_nothing_func,
}
self.__color_string = '#'
self.__color_num = 1
self.__line_color_exp = re.compile(r'bdr-color_:(\d+)')
# cw<bd<bor-par-to<nu<bdr-hair__|bdr-li-wid:0.50|bdr-sp-wid:1.00|bdr-color_:2
def __before_color_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
Check to see if the line marks the beginning of the color table.
If so, change states.
Always print out the line.
"""
# mi<mk<clrtbl-beg
if self.__token_info == 'mi<mk<clrtbl-beg':
self.__state = 'in_color_table'
self.__write_obj.write(line)
def __default_color_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
get the hex number from the line and add it to the color string.
"""
hex_num = line[-3:-1]
self.__color_string += hex_num
def __blue_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
Get the hex number from the line and add it to the color string.
Add a key -> value pair to the color dictionary, with the number
as the key, and the hex number as the value. Write an empty tag
with the hex number and number as attributes. Add one to the color
number. Reset the color string to '#'
"""
hex_num = line[-3:-1]
self.__color_string += hex_num
self.__color_dict[self.__color_num] = self.__color_string
self.__write_obj.write(
'mi<tg<empty-att_'
'<color-in-table<num>%s<value>%s\n' % (self.__color_num, self.__color_string)
)
self.__color_num += 1
self.__color_string = '#'
def __in_color_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
Check if the end of the color table has been reached. If so,
change the state to after the color table.
Otherwise, get a function by passing the self.__token_info to the
state dictionary.
"""
# mi<mk<clrtbl-beg
# cw<ci<red_______<nu<00
if self.__token_info == 'mi<mk<clrtbl-end':
self.__state = 'after_color_table'
else:
action = self.__state_dict.get(self.__token_info)
if action is None:
sys.stderr.write('in module colors.py\n'
'function is self.__in_color_func\n'
'no action for %s' % self.__token_info
)
action(line)
def __after_color_func(self, line):
"""
Check the to see if it contains color info. If it does, extract the
number and look up the hex value in the color dictionary. If the color
dictionary has no key for the number, print out an error message.
Otherwise, print out the line.
Added Oct 10, 2003
If the number is 0, that indicates no color
"""
# cw<ci<font-color<nu<2
if self.__token_info == 'cw<ci<font-color':
hex_num = int(line[20:-1])
hex_num = self.__figure_num(hex_num)
if hex_num:
self.__write_obj.write(
'cw<ci<font-color<nu<%s\n' % hex_num
)
elif line[0:5] == 'cw<bd':
the_index = line.find('bdr-color_')
if the_index > -1:
line = re.sub(self.__line_color_exp, self.__sub_from_line_color, line)
self.__write_obj.write(line)
"""
if num == 0:
hex_num = 'false'
else:
hex_num = self.__color_dict.get(num)
if hex_num == None:
if self.__run_level > 0:
sys.stderr.write(
'module is colors.py\n'
'function is self.__after_color_func\n'
'no value in self.__color_dict for key %s\n' % num
)
if self.__run_level > 3:
sys.stderr.write(
'run level is %s\n'
'Script will now quit\n'
% self.__run_level)
else:
self.__write_obj.write(
'cw<ci<font-color<nu<%s\n' % hex_num
)
"""
else:
self.__write_obj.write(line)
# cw<bd<bor-par-to<nu<bdr-hair__|bdr-li-wid:0.50|bdr-sp-wid:1.00|bdr-color_:2
def __sub_from_line_color(self, match_obj):
num = match_obj.group(1)
try:
num = int(num)
except ValueError:
if self.__run_level > 3:
msg = 'can\'t make integer from string\n'
raise self.__bug_handler(msg)
else:
return 'bdr-color_:no-value'
hex_num = self.__figure_num(num)
return 'bdr-color_:%s' % hex_num
def __figure_num(self, num):
if num == 0:
hex_num = 'false'
else:
hex_num = self.__color_dict.get(num)
if hex_num is None:
hex_num = '0'
if self.__run_level > 3:
msg = 'no value in self.__color_dict' \
'for key %s at line %d\n' % (num, self.__line)
raise self.__bug_handler(msg)
return hex_num
def __do_nothing_func(self, line):
"""
Bad RTF will have text in the color table
"""
pass
def convert_colors(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is before the color table, look for the
beginning of the color table.
If the state is in the color table, create the color dictionary
and print out the tags.
If the state if after the color table, look for lines with color
info, and substitute the number with the hex number.
"""
self.__initiate_values()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
self.__line+=1
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action is None:
try:
sys.stderr.write('no matching state in module fonts.py\n')
sys.stderr.write(self.__state + '\n')
except:
pass
action(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "color.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 9,537 | Python | .py | 245 | 27.726531 | 86 | 0.481729 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,269 | inline.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/inline.py | import os
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
"""
States.
1. default
1. an open bracket ends this state.
2. Text print out text. Print out any groups_in_waiting.
3. closed bracket. Close groups
2. after an open bracket
1. The lack of a control word ends this state.
2. paragraph end -- close out all tags
3. footnote beg -- close out all tags
"""
class Inline:
"""
Make inline tags within lists.
Logic:
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__state_dict = {
'default': self.__default_func,
'after_open_bracket': self.__after_open_bracket_func,
}
self.__default_dict = {
'ob<nu<open-brack': self.__found_open_bracket_func,
'tx<nu<__________' : self.__found_text_func,
'tx<hx<__________' : self.__found_text_func,
'tx<ut<__________' : self.__found_text_func,
'mi<mk<inline-fld' : self.__found_text_func,
'text' : self.__found_text_func,
'cb<nu<clos-brack' : self.__close_bracket_func,
'mi<mk<par-end___' : self.__end_para_func,
'mi<mk<footnt-ope' : self.__end_para_func,
'mi<mk<footnt-ind' : self.__end_para_func,
}
self.__after_open_bracket_dict = {
'cb<nu<clos-brack' : self.__close_bracket_func,
'tx<nu<__________' : self.__found_text_func,
'tx<hx<__________' : self.__found_text_func,
'tx<ut<__________' : self.__found_text_func,
'text' : self.__found_text_func,
'mi<mk<inline-fld' : self.__found_text_func,
'ob<nu<open-brack': self.__found_open_bracket_func,
'mi<mk<par-end___' : self.__end_para_func,
'mi<mk<footnt-ope' : self.__end_para_func,
'mi<mk<footnt-ind' : self.__end_para_func,
'cw<fd<field_____' : self.__found_field_func,
}
self.__state = 'default'
self.__brac_count = 0 # do I need this?
self.__list_inline_list = []
self.__body_inline_list = []
self.__groups_in_waiting_list = [0]
self.__groups_in_waiting_body = [0]
self.__groups_in_waiting = self.__groups_in_waiting_body
self.__place = 'non_list'
self.__inline_list = self.__body_inline_list
self.__in_para = 0 # not in paragraph
self.__char_dict = {
# character info => ci
'annotation' : 'annotation',
'blue______' : 'blue',
'bold______' : 'bold',
'caps______' : 'caps',
'char-style' : 'character-style',
'dbl-strike' : 'double-strike-through',
'emboss____' : 'emboss',
'engrave___' : 'engrave',
'font-color' : 'font-color',
'font-down_' : 'subscript',
'font-size_' : 'font-size',
'font-style' : 'font-style',
'font-up___' : 'superscript',
'footnot-mk' : 'footnote-marker',
'green_____' : 'green',
'hidden____' : 'hidden',
'italics___' : 'italics',
'outline___' : 'outline',
'red_______' : 'red',
'shadow____' : 'shadow',
'small-caps' : 'small-caps',
'strike-thr' : 'strike-through',
'subscript_' : 'subscript',
'superscrip' : 'superscript',
'underlined' : 'underlined',
}
self.__caps_list = ['false']
def __set_list_func(self, line):
"""
Requires:
line--line of text
Returns:
nothing
Logic:
"""
if self.__place == 'in_list':
if self.__token_info == 'mi<mk<lst-tx-end':
self.__place = 'not_in_list'
self.__inline_list = self.__body_inline_list
self.__groups_in_waiting = self.__groups_in_waiting_body
else:
if self.__token_info == 'mi<mk<lst-tx-beg':
self.__place = 'in_list'
self.__inline_list = self.__list_inline_list
self.__groups_in_waiting = self.__groups_in_waiting_list
def __default_func(self, line):
"""
Requires:
line-- line of text
Returns:
nothing
Logic:
Write if not hardline break
"""
action = self.__default_dict.get(self.__token_info)
if action:
action(line)
self.__write_obj.write(line)
def __found_open_bracket_func(self, line):
"""
Requires:
line -- current line of text
Returns:
nothing
Logic:
Change the state to 'after_open_bracket'
"""
self.__state = 'after_open_bracket'
self.__brac_count += 1
self.__groups_in_waiting[0] += 1
self.__inline_list.append({})
self.__inline_list[-1]['contains_inline'] = 0
def __after_open_bracket_func(self, line):
"""
Requires:
line --line of text
Returns:
nothing
Logic:
If the token is a control word for character info (cw<ci), use another
method to add to the dictionary.
Use the dictionary to get the appropriate function.
Always print out the line.
"""
if line[0:5] == 'cw<ci': # calibre: bug in original function no diff between cw<ci and cw<pf
self.__handle_control_word(line)
else:
action = self.__after_open_bracket_dict.get(self.__token_info)
if action:
self.__state = 'default' # a non control word?
action(line)
self.__write_obj.write(line)
def __handle_control_word(self, line):
"""
Required:
line --line of text
Returns:
nothing
Logic:
Handle the control word for inline groups.
Add each name - value to a dictionary.
If the font style of Symbol, Wingdings, or Dingbats is found,
always mark this. I need this later to convert the text to
the right utf.
"""
# cw<ci<shadow_____<nu<true
# self.__char_dict = {
char_info = line[6:16]
char_value = line[20:-1]
name = self.__char_dict.get(char_info)
if name:
self.__inline_list[-1]['contains_inline'] = 1
self.__inline_list[-1][name] = char_value
"""
if name == 'font-style':
if char_value == 'Symbol':
self.__write_obj.write('mi<mk<font-symbo\n')
elif char_value == 'Wingdings':
self.__write_obj.write('mi<mk<font-wingd\n')
elif char_value == 'Zapf Dingbats':
self.__write_obj.write('mi<mk<font-dingb\n')
"""
def __close_bracket_func(self, line):
"""
Requires:
line --line of text
Returns:
Nothing
Logic:
If there are no inline groups, do nothing.
Get the keys of the last dictionary in the inline_groups.
If 'contains_inline' in the keys, write a close tag.
If the_dict contains font information, write a mk tag.
"""
if len(self.__inline_list) == 0:
# nothing to add
return
the_dict = self.__inline_list[-1]
the_keys = the_dict.keys()
# always close out
if self.__place == 'in_list':
if 'contains_inline' in the_keys and the_dict['contains_inline'] == 1\
and self.__groups_in_waiting[0] == 0:
self.__write_obj.write('mi<tg<close_____<inline\n')
if 'font-style' in the_keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in the_keys:
self.__write_obj.write('mi<mk<caps-end__\n')
else:
# close out only if in a paragraph
if 'contains_inline' in the_keys and the_dict['contains_inline'] == 1\
and self.__in_para and self.__groups_in_waiting[0] == 0:
self.__write_obj.write('mi<tg<close_____<inline\n')
if 'font-style' in the_keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in the_keys:
self.__write_obj.write('mi<mk<caps-end__\n')
self.__inline_list.pop()
if self.__groups_in_waiting[0] != 0:
self.__groups_in_waiting[0] -= 1
def __found_text_func(self, line):
"""
Required:
line--line of text
Return:
nothing
Logic:
Three cases:
1. in a list. Simply write inline
2. Not in a list
Text can mark the start of a paragraph.
If already in a paragraph, check to see if any groups are waiting
to be added. If so, use another method to write these groups.
"""
if self.__place == 'in_list':
self.__write_inline()
else:
if not self.__in_para:
self.__in_para = 1
self.__start_para_func(line)
elif self.__groups_in_waiting[0] != 0:
self.__write_inline()
def __write_inline(self):
"""
Required:
nothing
Returns
Nothing
Logic:
Method for writing inline when text is found.
Only write those groups that are "waiting", or that have no
tags yet.
First, slice the list self.__inline list to get just the groups
in waiting.
Iterate through this slice, which contains only dictionaries.
Get the keys in each dictionary. If 'font-style' is in the keys,
write a marker tag. (I will use this marker tag later when converting
hext text to utf8.)
Write a tag for the inline values.
"""
if self.__groups_in_waiting[0] != 0:
last_index = -1 * self.__groups_in_waiting[0]
inline_list = self.__inline_list[last_index:]
if len(inline_list) <= 0:
if self.__run_level > 3:
msg = 'self.__inline_list is %s\n' % self.__inline_list
raise self.__bug_handler(msg)
self.__write_obj.write('error\n')
self.__groups_in_waiting[0] = 0
return
for the_dict in inline_list:
if the_dict['contains_inline']:
the_keys = the_dict.keys()
if 'font-style' in the_keys:
face = the_dict['font-style']
self.__write_obj.write('mi<mk<font______<%s\n' % face)
if 'caps' in the_keys:
value = the_dict['caps']
self.__write_obj.write('mi<mk<caps______<%s\n' % value)
self.__write_obj.write('mi<tg<open-att__<inline')
for the_key in the_keys:
if the_key != 'contains_inline':
self.__write_obj.write(f'<{the_key}>{the_dict[the_key]}')
self.__write_obj.write('\n')
self.__groups_in_waiting[0] = 0
def __end_para_func(self, line):
"""
Requires:
line -- line of text
Returns:
nothing
Logic:
Slice from the end the groups in waiting.
Iterate through the list. If the dictionary contaings info, write
a closing tag.
"""
if not self.__in_para:
return
if self.__groups_in_waiting[0] == 0:
inline_list = self.__inline_list
else:
last_index = -1 * self.__groups_in_waiting[0]
inline_list = self.__inline_list[0:last_index]
for the_dict in inline_list:
contains_info = the_dict.get('contains_inline')
if contains_info:
the_keys = the_dict.keys()
if 'font-style' in the_keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in the_keys:
self.__write_obj.write('mi<mk<caps-end__\n')
self.__write_obj.write('mi<tg<close_____<inline\n')
self.__in_para = 0
def __start_para_func(self, line):
"""
Requires:
line -- line of text
Returns:
nothing
Logic:
Iterate through the self.__inline_list to get each dict.
If the dict containst inline info, get the keys.
Iterate through the keys and print out the key and value.
"""
for the_dict in self.__inline_list:
contains_info = the_dict.get('contains_inline')
if contains_info :
the_keys = the_dict.keys()
if 'font-style' in the_keys:
face = the_dict['font-style']
self.__write_obj.write('mi<mk<font______<%s\n' % face)
if 'caps' in the_keys:
value = the_dict['caps']
self.__write_obj.write('mi<mk<caps______<%s\n' % value)
self.__write_obj.write('mi<tg<open-att__<inline')
for the_key in the_keys:
if the_key != 'contains_inline':
self.__write_obj.write(f'<{the_key}>{the_dict[the_key]}')
self.__write_obj.write('\n')
self.__groups_in_waiting[0] = 0
def __found_field_func(self, line):
"""
Just a default function to make sure I don't prematurely exit
default state
"""
pass
def form_tags(self):
"""
Requires:
area--area to parse (list or non-list)
Returns:
nothing
Logic:
Read one line in at a time. Determine what action to take based on
the state.
"""
self.__initiate_values()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
token = line[0:-1]
self.__token_info = ''
if token == 'tx<mc<__________<rdblquote'\
or token == 'tx<mc<__________<ldblquote'\
or token == 'tx<mc<__________<lquote'\
or token == 'tx<mc<__________<rquote'\
or token == 'tx<mc<__________<emdash'\
or token == 'tx<mc<__________<endash'\
or token == 'tx<mc<__________<bullet':
self.__token_info = 'text'
else:
self.__token_info = line[:16]
self.__set_list_func(line)
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write('No matching state in module inline.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "inline.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 16,619 | Python | .py | 409 | 28.202934 | 101 | 0.48317 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,270 | group_borders.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/group_borders.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import re
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class GroupBorders:
"""
Form lists.
Use RTF's own formatting to determine if a paragraph definition is part of a
list.
Use indents to determine items and how lists are nested.
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
wrap=0,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
self.__wrap = wrap
def __initiate_values(self):
"""
Required:
Nothing
Return:
Nothing
Logic:
The self.__end_list is a list of tokens that will force a list to end.
Likewise, the self.__end_lines is a list of lines that forces a list to end.
"""
self.__state = "default"
self.__left_indent = 0
self.__border_num = 0
self.__list_type = 'not-defined'
self.__pard_def = ""
self.__all_lists = []
self.__list_chunk = ''
self.__state_dict={
'default' : self.__default_func,
'in_pard' : self.__in_pard_func,
'after_pard' : self.__after_pard_func,
}
# section end
self.__end_list = [
# section end
'mi<mk<sect-close',
'mi<mk<sect-start',
# table begin
'mi<mk<tabl-start',
# field block begin
'mi<mk<fldbk-end_',
'mi<mk<fldbkstart',
# cell end
'mi<mk<close_cell',
# item end
'mi<tg<item_end__',
# footnote end
'mi<mk<foot___clo',
'mi<mk<footnt-ope',
# heading end
'mi<mk<header-beg',
'mi<mk<header-end',
'mi<mk<head___clo',
# lists
'mi<tg<item_end__',
'mi<tg<item_end__',
'mi<mk<list_start'
# body close
#
# style-group
'mi<mk<style-grp_',
'mi<mk<style_grp_',
'mi<mk<style_gend',
'mi<mk<stylegend_',
# don't use
# 'mi<mk<body-close',
# 'mi<mk<par-in-fld',
# 'cw<tb<cell______',
# 'cw<tb<row-def___',
# 'cw<tb<row_______',
# 'mi<mk<sec-fd-beg',
]
# <name>Normal<
self.__name_regex = re.compile(r'(<name>[^<]+)')
self.__border_regex = re.compile(r'border-paragraph')
self.__found_appt = 0
self.__line_num = 0
self.__border_regex = re.compile(r'(<border-paragraph[^<]+|<border-for-every-paragraph[^<]+)')
self.__last_border_string = ''
def __in_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
You are in a list, but in the middle of a paragraph definition.
Don't do anything until you find the end of the paragraph definition.
"""
if self.__token_info == 'mi<tg<close_____' \
and line[17:-1] == 'paragraph-definition':
self.__state = 'after_pard'
else:
self.__write_obj.write(line)
def __after_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
"""
if self.__token_info == 'mi<tg<open-att__' \
and line[17:37] == 'paragraph-definition':
# found paragraph definition
self.__pard_after_par_def_func(line)
elif self.__token_info == 'mi<tg<close_____' \
and line[17:-1] == 'paragraph-definition':
sys.stderr.write('Wrong flag in __after_pard_func\n')
if self.__run_level > 2:
msg = 'wrong flag'
raise self.__bug_handler(msg)
elif self.__token_info in self.__end_list:
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_border_tag()
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'default'
self.__write_obj.write(line)
else:
self.__list_chunk += line
def __close_pard_(self, line):
self.__write_obj.write(self.__list_chunk)
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__list_chunk = ''
self.__state = 'default'
def __pard_after_par_def_func(self, line):
"""
Required:
line -- the line of current text.
id -- the id of the current list
Return:
Nothing
Logic:
"""
is_border = self.__is_border_func(line)
if not is_border:
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_border_tag()
self.__write_obj.write(self.__list_chunk)
self.__write_obj.write(line)
self.__state = 'default'
self.__list_chunk = ''
else:
border_string, pard_string = self.__parse_pard_with_border(line)
if self.__last_border_string == border_string:
# just keep going
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'in_pard'
self.__write_obj.write(pard_string)
else:
# different name for the paragraph definition
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_border_tag()
self.__write_obj.write(self.__list_chunk)
self.__write_start_border_tag(border_string)
self.__write_obj.write(pard_string)
self.__state = 'in_pard'
self.__last_border_string = border_string
self.__list_chunk = ''
def __default_func(self, line):
"""
Required:
self, line
Returns:
Nothing
Logic
Look for the start of a paragraph definition. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to
in_pard.
"""
if self.__token_info == 'mi<tg<open-att__' \
and line[17:37] == 'paragraph-definition':
contains_border = self.__is_border_func(line)
if contains_border:
border_string, pard_string = self.__parse_pard_with_border(line)
self.__write_start_border_tag(border_string)
self.__write_obj.write(pard_string)
self.__last_border_string = border_string
self.__state = 'in_pard'
else:
self.__write_obj.write(line)
else:
self.__write_obj.write(line)
def __write_start_border_tag(self, the_string):
self.__write_obj.write('mi<mk<start-brdg\n')
self.__border_num += 1
num = '%04d' % self.__border_num
num_string = 's%s' % num
the_string += '<num>%s' % num_string
self.__write_obj.write('mi<tg<open-att__<border-group%s\n' % the_string)
def __write_end_border_tag(self):
self.__write_obj.write('mi<mk<end-brdg__\n')
self.__write_obj.write('mi<tg<close_____<border-group\n')
def __is_border_func(self, line):
line = re.sub(self.__name_regex, '', line)
index = line.find('border-paragraph')
if index > -1:
return 1
return 0
def __parse_pard_with_border(self, line):
border_string = ''
pard_string = ''
tokens = re.split(self.__border_regex, line)
for token in tokens:
if token[0:17] == '<border-paragraph':
border_string += token
else:
pard_string += token
return border_string, pard_string
def __write_pard_with_border(self, line):
border_string = ''
pard_string = ''
tokens = re.split(self.__border_regex, line)
for token in tokens:
if token[0:17] == '<border-paragraph':
border_string += token
else:
pard_string += token
self.__write_start_border_tag(border_string)
self.__write_obj.write(pard_string)
def __get_style_name(self, line):
if self.__token_info == 'mi<mk<style-name':
self.__style_name = line[17:-1]
def group_borders(self):
"""
Required:
nothing
Returns:
original file will be changed
Logic:
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
self.__get_style_name(line)
action = self.__state_dict.get(self.__state)
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "group_borders.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 10,920 | Python | .py | 291 | 27.474227 | 103 | 0.497031 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,271 | add_brackets.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/add_brackets.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import check_brackets, copy
from calibre.ptempfile import better_mktemp
from polyglot.builtins import iteritems
from . import open_for_read, open_for_write
class AddBrackets:
"""
Add brackets for old RTF.
Logic:
When control words without their own brackets are encountered
and in the list of allowed words, this will add brackets
to facilitate the treatment of the file
"""
def __init__(self, in_file,
bug_handler,
copy=None,
run_level=1,
):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
self.__run_level = run_level
self.__state_dict = {
'before_body' : self.__before_body_func,
'in_body' : self.__in_body_func,
'after_control_word' : self.__after_control_word_func,
'in_ignore' : self.__ignore_func,
}
self.__accept = [
'cw<ci<bold______' ,
'cw<ci<annotation' ,
'cw<ci<blue______' ,
# 'cw<ci<bold______' ,
'cw<ci<caps______' ,
'cw<ci<char-style' ,
'cw<ci<dbl-strike' ,
'cw<ci<emboss____' ,
'cw<ci<engrave___' ,
'cw<ci<font-color' ,
'cw<ci<font-down_' ,
'cw<ci<font-size_' ,
'cw<ci<font-style' ,
'cw<ci<font-up___' ,
'cw<ci<footnot-mk' ,
'cw<ci<green_____' ,
'cw<ci<hidden____' ,
'cw<ci<italics___' ,
'cw<ci<outline___' ,
'cw<ci<red_______' ,
'cw<ci<shadow____' ,
'cw<ci<small-caps' ,
'cw<ci<strike-thr' ,
'cw<ci<subscript_' ,
'cw<ci<superscrip' ,
'cw<ci<underlined' ,
# 'cw<ul<underlined' ,
]
def __initiate_values(self):
"""
Init temp values
"""
self.__state = 'before_body'
self.__inline = {}
self.__temp_group = []
self.__open_bracket = False
self.__found_brackets = False
def __before_body_func(self, line):
"""
If we are before the body, not interest in changing anything
"""
if self.__token_info == 'mi<mk<body-open_':
self.__state = 'in_body'
self.__write_obj.write(line)
def __in_body_func(self, line):
"""
Select what action to take in body:
1-At the end of the file close the braket if a bracket was opened
This happens if there is achange
2-If an open bracket is found the code inside is ignore
(written without modifications)
3-If an accepted control word is found put the line
in a buffer then change state to after cw
4-Else simply write the line
"""
if line == 'cb<nu<clos-brack<0001\n' and self.__open_bracket:
self.__write_obj.write(
'cb<nu<clos-brack<0003\n'
)
self.__write_obj.write(line)
elif self.__token_info == 'ob<nu<open-brack':
self.__found_brackets = True
self.__state = 'in_ignore'
self.__ignore_count = self.__ob_count
self.__write_obj.write(line)
elif self.__token_info in self.__accept:
self.__temp_group.append(line)
self.__state = 'after_control_word'
else:
self.__write_obj.write(line)
def __after_control_word_func(self, line):
"""
After a cw either add next allowed cw to temporary list or
change group and write it.
If the token leading to an exit is an open bracket go to
ignore otherwise goto in body
"""
if self.__token_info in self.__accept:
self.__temp_group.append(line)
else:
self.__change_permanent_group()
self.__write_group()
self.__write_obj.write(line)
if self.__token_info == 'ob<nu<open-brack':
self.__state = 'in_ignore'
self.__ignore_count = self.__ob_count
else:
self.__state = 'in_body'
def __write_group(self):
"""
Write a temporary group after accepted control words end
But this is mostly useless in my opinion as there is no list of rejected cw
This may be a way to implement future old rtf processing for cw
Utility: open a group to just put brackets but why be so complicated?
Scheme: open brackets, write cw then go to body and back with cw after
"""
if self.__open_bracket:
self.__write_obj.write(
'cb<nu<clos-brack<0003\n'
)
self.__open_bracket = False
inline_string = ''.join([f'{k}<nu<{v}\n'
for k, v in iteritems(self.__inline)
if v != 'false'])
if inline_string:
self.__write_obj.write('ob<nu<open-brack<0003\n'
'%s' % inline_string)
self.__open_bracket = True
self.__temp_group = []
def __change_permanent_group(self):
"""
Use temp group to change permanent group
If the control word is not accepted remove it
What is the interest as it is build to accept only accepted cw
in __after_control_word_func?
"""
self.__inline = {line[:16] : line[20:-1]
for line in self.__temp_group\
# Is this really necessary?
if line[:16] in self.__accept}
def __ignore_func(self, line):
"""
Just copy data inside of RTF brackets already here.
"""
self.__write_obj.write(line)
if self.__token_info == 'cb<nu<clos-brack'\
and self.__cb_count == self.__ignore_count:
self.__state = 'in_body'
def __check_brackets(self, in_file):
"""
Return True if brackets match
"""
check_brack_obj = check_brackets.CheckBrackets(file=in_file)
return check_brack_obj.check_brackets()[0]
def add_brackets(self):
"""
"""
self.__initiate_values()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write(
'No matching state in module add_brackets.py\n'
'%s\n' % self.__state)
action(line)
# Check bad brackets
if self.__check_brackets(self.__write_to):
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "add_brackets.data")
copy_obj.rename(self.__write_to, self.__file)
else:
if self.__run_level > 0:
sys.stderr.write(
'Sorry, but this files has a mix of old and new RTF.\n'
'Some characteristics cannot be converted.\n')
os.remove(self.__write_to)
| 8,941 | Python | .py | 218 | 30.183486 | 83 | 0.486794 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,272 | convert_to_tags.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/convert_to_tags.py | import os
import sys
from calibre.ebooks.rtf2xml import check_encoding, copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
public_dtd = 'rtf2xml1.0.dtd'
class ConvertToTags:
"""
Convert file to XML
"""
def __init__(self,
in_file,
bug_handler,
dtd_path,
no_dtd,
encoding,
indent=None,
copy=None,
run_level=1,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__dtd_path = dtd_path
self.__no_dtd = no_dtd
self.__encoding = 'cp' + encoding
# if encoding == 'mac_roman':
# self.__encoding = 'mac_roman'
self.__indent = indent
self.__run_level = run_level
self.__write_to = better_mktemp()
self.__convert_utf = False
self.__bad_encoding = False
def __initiate_values(self):
"""
Set values, including those for the dictionary.
"""
self.__state = 'default'
self.__new_line = 0
self.__block = ('doc', 'preamble', 'rtf-definition', 'font-table',
'font-in-table', 'color-table', 'color-in-table', 'style-sheet',
'paragraph-styles', 'paragraph-style-in-table', 'character-styles',
'character-style-in-table', 'list-table', 'doc-information', 'title',
'author', 'operator', 'creation-time', 'revision-time',
'editing-time', 'time', 'number-of-pages', 'number-of-words',
'number-of-characters', 'page-definition', 'section-definition',
'headers-and-footers', 'section', 'para', 'body',
'paragraph-definition', 'cell', 'row', 'table', 'revision-table',
'style-group', 'border-group','styles-in-body', 'paragraph-style-in-body',
'list-in-table', 'level-in-table', 'override-table','override-list',
)
self.__two_new_line = ('section', 'body', 'table', 'row' 'list-table')
self.__state_dict = {
'default' : self.__default_func,
'mi<tg<open______' : self.__open_func,
'mi<tg<close_____' : self.__close_func,
'mi<tg<open-att__' : self.__open_att_func,
'mi<tg<empty-att_' : self.__empty_att_func,
'tx<nu<__________' : self.__text_func,
'tx<ut<__________' : self.__text_func,
'mi<tg<empty_____' : self.__empty_func,
}
def __open_func(self, line):
"""
Print the opening tag and newlines when needed.
"""
# mi<tg<open______<style-sheet
info = line[17:-1]
self.__new_line = 0
if info in self.__block:
self.__write_new_line()
if info in self.__two_new_line:
self.__write_extra_new_line()
self.__write_obj.write('<%s>' % info)
def __empty_func(self, line):
"""
Print out empty tag and newlines when needed.
"""
info = line[17:-1]
self.__write_obj.write(
'<%s/>' % info)
self.__new_line = 0
if info in self.__block:
self.__write_new_line()
if info in self.__two_new_line:
self.__write_extra_new_line()
def __open_att_func(self, line):
"""
Process lines for open tags that have attributes.
The important info is between [17:-1]. Take this info and split it
with the delimiter '<'. The first token in this group is the element
name. The rest are attributes, separated fromt their values by '>'. So
read each token one at a time, and split them by '>'.
"""
# mi<tg<open-att__<footnote<num>
info = line[17:-1]
tokens = info.split("<")
element_name = tokens[0]
tokens = tokens[1:]
self.__write_obj.write('<%s' % element_name)
for token in tokens:
groups = token.split('>')
try:
val = groups[0]
att = groups[1]
att = att.replace('"', '"')
att = att.replace("'", '"')
self.__write_obj.write(
f' {val}="{att}"'
)
except:
if self.__run_level > 3:
msg = 'index out of range\n'
raise self.__bug_handler(msg)
self.__write_obj.write('>')
self.__new_line = 0
if element_name in self.__block:
self.__write_new_line()
if element_name in self.__two_new_line:
self.__write_extra_new_line()
def __empty_att_func(self, line):
"""
Same as the __open_att_func, except a '/' is placed at the end of the tag.
"""
# mi<tg<open-att__<footnote<num>
info = line[17:-1]
tokens = info.split("<")
element_name = tokens[0]
tokens = tokens[1:]
self.__write_obj.write('<%s' % element_name)
for token in tokens:
groups = token.split('>')
val = groups[0]
att = groups[1]
att = att.replace('"', '"')
att = att.replace("'", '"')
self.__write_obj.write(
f' {val}="{att}"')
self.__write_obj.write('/>')
self.__new_line = 0
if element_name in self.__block:
self.__write_new_line()
if element_name in self.__two_new_line:
self.__write_extra_new_line()
def __close_func(self, line):
"""
Print out the closed tag and new lines, if appropriate.
"""
# mi<tg<close_____<style-sheet\n
info = line[17:-1]
self.__write_obj.write(
'</%s>' % info)
self.__new_line = 0
if info in self.__block:
self.__write_new_line()
if info in self.__two_new_line:
self.__write_extra_new_line()
def __text_func(self, line):
"""
Simply print out the information between [17:-1]
"""
# tx<nu<__________<Normal;
# change this!
self.__write_obj.write(line[17:-1])
def __write_extra_new_line(self):
"""
Print out extra new lines if the new lines have not exceeded two. If
the new lines are greater than two, do nothing.
"""
if not self.__indent:
return
if self.__new_line < 2:
self.__write_obj.write('\n')
def __default_func(self, line):
pass
def __write_new_line(self):
"""
Print out a new line if a new line has not already been printed out.
"""
if not self.__indent:
return
if not self.__new_line:
self.__write_obj.write('\n')
self.__new_line += 1
def __write_dec(self):
"""
Write the XML declaration at the top of the document.
"""
# keep maximum compatibility with previous version
check_encoding_obj = check_encoding.CheckEncoding(
bug_handler=self.__bug_handler)
if not check_encoding_obj.check_encoding(self.__file, verbose=False):
self.__write_obj.write('<?xml version="1.0" encoding="US-ASCII" ?>')
elif not check_encoding_obj.check_encoding(self.__file, self.__encoding, verbose=False):
self.__write_obj.write('<?xml version="1.0" encoding="UTF-8" ?>')
self.__convert_utf = True
else:
self.__write_obj.write('<?xml version="1.0" encoding="US-ASCII" ?>')
sys.stderr.write('Bad RTF encoding, revert to US-ASCII chars and'
' hope for the best')
self.__bad_encoding = True
self.__new_line = 0
self.__write_new_line()
if self.__no_dtd:
pass
elif self.__dtd_path:
self.__write_obj.write(
'<!DOCTYPE doc SYSTEM "%s">' % self.__dtd_path
)
elif self.__dtd_path == '':
# don't print dtd if further transformations are going to take
# place
pass
else:
self.__write_obj.write(
'<!DOCTYPE doc PUBLIC "publicID" '
'"http://rtf2xml.sourceforge.net/dtd/%s">' % public_dtd
)
self.__new_line = 0
self.__write_new_line()
def convert_to_tags(self):
"""
Read in the file one line at a time. Get the important info, between
[:16]. Check if this info matches a dictionary entry. If it does, call
the appropriate function.
The functions that are called:
a text function for text
an open function for open tags
an open with attribute function for tags with attributes
an empty with attribute function for tags that are empty but have
attributes.
a closed function for closed tags.
an empty tag function.
"""
self.__initiate_values()
with open_for_write(self.__write_to) as self.__write_obj:
self.__write_dec()
with open_for_read(self.__file) as read_obj:
for line in read_obj:
self.__token_info = line[:16]
action = self.__state_dict.get(self.__token_info)
if action is not None:
action(line)
# convert all encodings to UTF8 or ASCII to avoid unsupported encodings in lxml
if self.__convert_utf or self.__bad_encoding:
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
copy_obj.rename(self.__write_to, self.__file)
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as write_obj:
for line in read_obj:
write_obj.write(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "convert_to_tags.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 10,505 | Python | .py | 266 | 28.631579 | 96 | 0.522994 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,273 | group_styles.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/group_styles.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import re
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class GroupStyles:
"""
Form lists.
Use RTF's own formatting to determine if a paragraph definition is part of a
list.
Use indents to determine items and how lists are nested.
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
wrap=0,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
self.__wrap = wrap
def __initiate_values(self):
"""
Required:
Nothing
Return:
Nothing
Logic:
The self.__end_list is a list of tokens that will force a list to end.
Likewise, the self.__end_lines is a list of lines that forces a list to end.
"""
self.__state = "default"
self.__left_indent = 0
self.__list_type = 'not-defined'
self.__pard_def = ""
self.__all_lists = []
self.__list_chunk = ''
self.__state_dict={
'default' : self.__default_func,
'in_pard' : self.__in_pard_func,
'after_pard' : self.__after_pard_func,
}
# section end
self.__end_list = [
# section end
'mi<mk<sect-close',
'mi<mk<sect-start',
# table begin
'mi<mk<tabl-start',
# field block begin
'mi<mk<fldbk-end_',
'mi<mk<fldbkstart',
# cell end
'mi<mk<close_cell',
# item end
'mi<tg<item_end__',
# footnote end
'mi<mk<foot___clo',
'mi<mk<footnt-ope',
# heading end
'mi<mk<header-beg',
'mi<mk<header-end',
'mi<mk<head___clo',
# lists
'mi<tg<item_end__',
'mi<tg<item_end__',
'mi<mk<list_start'
# body close
# don't use
# 'mi<mk<body-close',
# 'mi<mk<par-in-fld',
# 'cw<tb<cell______',
# 'cw<tb<row-def___',
# 'cw<tb<row_______',
# 'mi<mk<sec-fd-beg',
]
self.__name_regex = re.compile(r'<name>')
self.__found_appt = 0
self.__line_num = 0
def __in_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
You are in a list, but in the middle of a paragraph definition.
Don't do anything until you find the end of the paragraph definition.
"""
if self.__token_info == 'mi<tg<close_____' \
and line[17:-1] == 'paragraph-definition':
self.__state = 'after_pard'
else:
self.__write_obj.write(line)
def __after_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
"""
if self.__token_info == 'mi<tg<open-att__' \
and line[17:37] == 'paragraph-definition':
# found paragraph definition
self.__pard_after_par_def_func(line)
elif self.__token_info == 'mi<tg<close_____' \
and line[17:-1] == 'paragraph-definition':
sys.stderr.write('Wrong flag in __after_pard_func\n')
if self.__run_level > 2:
msg = 'wrong flag'
raise self.__bug_handler(msg)
elif self.__token_info in self.__end_list:
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'default'
self.__write_obj.write(line)
else:
self.__list_chunk += line
def __close_pard_(self, line):
self.__write_obj.write(self.__list_chunk)
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__list_chunk = ''
self.__state = 'default'
def __write_start_wrap(self, name):
if self.__wrap:
self.__write_obj.write('mi<mk<style-grp_<%s\n' % name)
self.__write_obj.write('mi<tg<open-att__<style-group<name>%s\n' % name)
self.__write_obj.write('mi<mk<style_grp_<%s\n' % name)
def __write_end_wrap(self):
if self.__wrap:
self.__write_obj.write('mi<mk<style_gend\n')
self.__write_obj.write('mi<tg<close_____<style-group\n')
self.__write_obj.write('mi<mk<stylegend_\n')
def __pard_after_par_def_func(self, line):
"""
Required:
line -- the line of current text.
id -- the id of the current list
Return:
Nothing
Logic:
"""
if self.__last_style_name == self.__style_name:
# just keep going
if self.__wrap:
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'in_pard'
if self.__wrap:
self.__write_obj.write(line)
else:
# different name for the paragraph definition
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_end_wrap()
self.__write_obj.write(self.__list_chunk)
self.__write_start_wrap(self.__style_name)
self.__write_obj.write(line)
self.__state = 'in_pard'
self.__last_style_name = self.__style_name
self.__list_chunk = ''
def __default_func(self, line):
"""
Required:
self, line
Returns:
Nothing
Logic
Look for the start of a paragraph definition. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to
in_pard.
"""
if self.__token_info == 'mi<tg<open-att__' \
and line[17:37] == 'paragraph-definition':
self.__state = 'in_pard'
self.__last_style_name = self.__style_name
self.__write_start_wrap(self.__last_style_name)
self.__write_obj.write(line)
else:
self.__write_obj.write(line)
def __get_style_name(self, line):
if self.__token_info == 'mi<mk<style-name':
self.__style_name = line[17:-1]
def group_styles(self):
"""
Required:
nothing
Returns:
original file will be changed
Logic:
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
self.__get_style_name(line)
action = self.__state_dict.get(self.__state)
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "group_styles.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 8,813 | Python | .py | 240 | 27.041667 | 88 | 0.487848 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,274 | get_char_map.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/get_char_map.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
class GetCharMap:
"""
Return the character map for the given value
"""
def __init__(self, bug_handler, char_file):
"""
Required:
'char_file'--the file with the mappings
Returns:
nothing
"""
self.__char_file = char_file
self.__bug_handler = bug_handler
def get_char_map(self, map):
# if map == 'ansicpg10000':
# map = 'mac_roman'
found_map = False
map_dict = {}
self.__char_file.seek(0)
for line in self.__char_file:
if not line.strip():
continue
begin_element = '<%s>' % map
end_element = '</%s>' % map
if not found_map:
if begin_element in line:
found_map = True
else:
if end_element in line:
break
fields = line.split(':')
fields[1].replace('\\colon', ':')
map_dict[fields[1]] = fields[3]
if not found_map:
msg = 'no map found\nmap is "%s"\n'%(map,)
raise self.__bug_handler(msg)
return map_dict
| 2,080 | Python | .py | 49 | 33.44898 | 73 | 0.373947 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,275 | border_parse.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/border_parse.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys
class BorderParse:
"""
Parse a border line and return a dictionary of attributes and values
"""
def __init__(self):
# cw<bd<bor-t-r-hi<nu<true
self.__border_dict = {
'bor-t-r-hi' : 'border-table-row-horizontal-inside',
'bor-t-r-vi' : 'border-table-row-vertical-inside',
'bor-t-r-to' : 'border-table-row-top',
'bor-t-r-le' : 'border-table-row-left',
'bor-t-r-bo' : 'border-table-row-bottom',
'bor-t-r-ri' : 'border-table-row-right',
'bor-cel-bo' : 'border-cell-bottom',
'bor-cel-to' : 'border-cell-top',
'bor-cel-le' : 'border-cell-left',
'bor-cel-ri' : 'border-cell-right',
'bor-par-bo' : 'border-paragraph-bottom',
'bor-par-to' : 'border-paragraph-top',
'bor-par-le' : 'border-paragraph-left',
'bor-par-ri' : 'border-paragraph-right',
'bor-par-bx' : 'border-paragraph-box',
'bor-for-ev' : 'border-for-every-paragraph',
'bor-outsid' : 'border-outside',
'bor-none__' : 'border',
# border type => bt
'bdr-li-wid' : 'line-width',
'bdr-sp-wid' : 'padding',
'bdr-color_' : 'color',
}
self.__border_style_dict = {
'bdr-single' : 'single',
'bdr-doubtb' : 'double-thickness-border',
'bdr-shadow' : 'shadowed-border',
'bdr-double' : 'double-border',
'bdr-dotted' : 'dotted-border',
'bdr-dashed' : 'dashed',
'bdr-hair__' : 'hairline',
'bdr-inset_' : 'inset',
'bdr-das-sm' : 'dash-small',
'bdr-dot-sm' : 'dot-dash',
'bdr-dot-do' : 'dot-dot-dash',
'bdr-outset' : 'outset',
'bdr-trippl' : 'tripple',
'bdr-thsm__' : 'thick-thin-small',
'bdr-htsm__' : 'thin-thick-small',
'bdr-hthsm_' : 'thin-thick-thin-small',
'bdr-thm___' : 'thick-thin-medium',
'bdr-htm___' : 'thin-thick-medium',
'bdr-hthm__' : 'thin-thick-thin-medium',
'bdr-thl___' : 'thick-thin-large',
'bdr-hthl__' : 'thin-thick-thin-large',
'bdr-wavy__' : 'wavy',
'bdr-d-wav_' : 'double-wavy',
'bdr-strip_' : 'striped',
'bdr-embos_' : 'emboss',
'bdr-engra_' : 'engrave',
'bdr-frame_' : 'frame',
}
def parse_border(self, line):
"""
Requires:
line -- line with border definition in it
Returns:
?
Logic:
"""
border_dict = {}
border_style_dict = {}
border_style_list = []
border_type = self.__border_dict.get(line[6:16])
if not border_type:
sys.stderr.write(
'module is border_parse.py\n'
'function is parse_border\n'
'token does not have a dictionary value\n'
'token is "%s"' % line
)
return border_dict
att_line = line[20:-1]
atts = att_line.split('|')
# cw<bd<bor-cel-ri<nu<
# border has no value--should be no lines
if len(atts) == 1 and atts[0] == '':
border_dict[border_type] = 'none'
return border_dict
# border-paragraph-right
for att in atts:
values = att.split(':')
if len(values) ==2:
att = values[0]
value = values[1]
else:
value = 'true'
style_att = self.__border_style_dict.get(att)
if style_att:
att = f'{border_type}-{att}'
border_style_dict[att] = value
border_style_list.append(style_att)
else:
att = self.__border_dict.get(att)
if not att:
sys.stderr.write(
'module is border_parse_def.py\n'
'function is parse_border\n'
'token does not have an att value\n'
'line is "%s"' % line
)
att = f'{border_type}-{att}'
border_dict[att] = value
new_border_dict = self.__determine_styles(border_type, border_style_list)
border_dict.update(new_border_dict)
return border_dict
def __determine_styles(self, border_type, border_style_list):
new_border_dict = {}
att = '%s-style' % border_type
if 'shadowed-border' in border_style_list:
new_border_dict[att] = 'shadowed'
elif 'engraved' in border_style_list:
new_border_dict[att] = 'engraved'
elif 'emboss' in border_style_list:
new_border_dict[att] = 'emboss'
elif 'striped' in border_style_list:
new_border_dict[att] = 'striped'
elif 'thin-thick-thin-small' in border_style_list:
new_border_dict[att] = 'thin-thick-thin-small'
elif 'thick-thin-large' in border_style_list:
new_border_dict[att] = 'thick-thin-large'
elif 'thin-thick-thin-medium' in border_style_list:
new_border_dict[att] = 'thin-thick-thin-medium'
elif 'thin-thick-medium' in border_style_list:
new_border_dict[att] = 'thin-thick-medium'
elif 'thick-thin-medium' in border_style_list:
new_border_dict[att] = 'thick-thin-medium'
elif 'thick-thin-small' in border_style_list:
new_border_dict[att] = 'thick-thin-small'
elif 'thick-thin-small' in border_style_list:
new_border_dict[att] = 'thick-thin-small'
elif 'double-wavy' in border_style_list:
new_border_dict[att] = 'double-wavy'
elif 'dot-dot-dash' in border_style_list:
new_border_dict[att] = 'dot-dot-dash'
elif 'dot-dash' in border_style_list:
new_border_dict[att] = 'dot-dash'
elif 'dotted-border' in border_style_list:
new_border_dict[att] = 'dotted'
elif 'wavy' in border_style_list:
new_border_dict[att] = 'wavy'
elif 'dash-small' in border_style_list:
new_border_dict[att] = 'dash-small'
elif 'dashed' in border_style_list:
new_border_dict[att] = 'dashed'
elif 'frame' in border_style_list:
new_border_dict[att] = 'frame'
elif 'inset' in border_style_list:
new_border_dict[att] = 'inset'
elif 'outset' in border_style_list:
new_border_dict[att] = 'outset'
elif 'tripple-border' in border_style_list:
new_border_dict[att] = 'tripple'
elif 'double-border' in border_style_list:
new_border_dict[att] = 'double'
elif 'double-thickness-border' in border_style_list:
new_border_dict[att] = 'double-thickness'
elif 'hairline' in border_style_list:
new_border_dict[att] = 'hairline'
elif 'single' in border_style_list:
new_border_dict[att] = 'single'
else:
if border_style_list:
new_border_dict[att] = border_style_list[0]
return new_border_dict
| 8,117 | Python | .py | 185 | 33.745946 | 81 | 0.493882 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,276 | get_options.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/get_options.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
"""
Gets options for main part of script
"""
import os
import sys
from calibre.ebooks.rtf2xml import configure_txt, options_trem
class GetOptions:
def __init__(self,
system_arguments,
rtf_dir,
bug_handler,
configuration_file=None,
):
self.__system_arguments = system_arguments
self.__rtf_dir = rtf_dir
self.__configuration_file = configuration_file
self.__bug_handler = bug_handler
def get_options(self):
"""
return valid, output, help, show_warnings, debug, file
"""
return_options = self.__get_config_options()
options_dict = {
'dir' : [1],
'help' : [0, 'h'],
'show-warnings' : [0],
'caps' : [0,],
'no-caps' : [0],
'symbol' : [0],
'no-symbol' : [0],
'windings' : [0],
'no-wingdings' : [0],
'zapf' : [0],
'no-zapf' : [0],
'font' : [0],
'no-font' : [0],
'dtd' : [1],
'no-dtd' : [0],
'version' : [0],
'output' : [1, 'o'],
'no-namespace' : [0],
'level' : [1],
'indent' : [1],
'no-lists' : [0],
'lists' : [0],
'group-styles' : [0],
'no-group-styles' : [0],
'group-borders' : [0],
'no-group-borders' : [0],
'headings-to-sections' : [0],
'no-headings-to-sections' : [0],
'empty-para' : [0],
'no-empty-para' : [0],
'format' : [1, 'f'],
'config' : [0],
}
options_obj = options_trem.ParseOptions(
system_string=self.__system_arguments,
options_dict=options_dict
)
options, arguments = options_obj.parse_options()
if options == 0:
return_options['valid'] = 0
return return_options
the_keys = options.keys()
return_options['help'] = 0
if 'help' in the_keys:
return_options['help'] = 1
return return_options
return_options['config'] = 0
if 'config' in the_keys:
return_options['config'] = 1
return return_options
return_options['version'] = 0
if 'version' in the_keys:
return_options['version'] = 1
return return_options
# unused
return_options['out-dir'] = 0
if 'dir' in the_keys:
out_dir = options['dir']
if not os.path.isdir(out_dir):
sys.stderr.write('Your output must be an existing directory.\n')
return_options['valid'] = 0
else:
return_options['dir'] = options['dir']
return_options['out-file'] = 0
if 'output' in the_keys:
# out_file = options['output']
return_options['out-file'] = options['output']
else:
pass
"""
sys.stderr.write(
'You must provide an output file with the \'o\' option\n')
return_options['valid'] = 0
"""
if 'level' in the_keys:
return_options['level'] = options['level']
the_level = return_options.get('level')
if the_level:
try:
return_options['level'] = int(the_level)
except ValueError:
sys.stderr.write('The options "--level" must be a number.\n')
return_options['valid'] = 0
return return_options
if 'dtd' in the_keys:
# dtd = options['dtd']
return_options['raw-dtd-path'] = options['dtd']
acceptable = ['sdoc', 'raw', 'tei']
if 'format' in the_keys:
format = options['format']
if format not in acceptable:
sys.stderr.write('--format must take either \'sdoc\' or '
'\'tei\'\n')
return_options['valid'] = 0
return return_options
else:
return_options['format'] = options['format']
# a hack! python chokes on external dtd
# Was able to fix this
# format = return_options.get('format')
# if format != 'raw' and format != None:
# return_options['raw-dtd-path'] = ''
return_options['show-warnings'] = 0
if 'show-warnings' in the_keys:
return_options['show-warnings'] = 1
if 'no-font' in the_keys:
return_options['convert-symbol'] = 0
return_options['convert-zapf'] = 0
return_options['convert-wingdings'] = 0
if 'font' in the_keys:
return_options['convert-symbol'] = 1
return_options['convert-zapf'] = 1
return_options['convert-wingdings'] = 1
if 'symbol' in the_keys:
return_options['convert-symbol'] = 1
if 'no-symbol' in the_keys:
return_options['convert-symbol'] = 0
if 'wingdings' in the_keys:
return_options['convert-wingdings'] = 1
if 'no-wingdings' in the_keys:
return_options['convert-wingdings'] = 0
if 'zapf' in the_keys:
return_options['convert-zapf'] = 1
if 'no-zapf' in the_keys:
return_options['convert-zapf'] = 0
if 'caps' in the_keys:
return_options['convert-caps'] = 1
if 'no-caps' in the_keys:
return_options['convert-caps'] = 0
if 'no-dtd' in the_keys:
return_options['no-dtd'] = 1
else:
return_options['no-dtd'] = 0
return_options['no-ask'] = 0
if 'no-ask' in the_keys:
return_options['no-ask'] = 1
sys.stderr.write('You can also permanetly set the no-ask option in the rtf2xml file.\n')
if 'no-namespace' in the_keys:
return_options['no-namespace'] = 1
if 'headings-to-sections' in the_keys:
return_options['headings-to-sections'] = 1
elif 'no-headings-to-sections' in the_keys:
return_options['headings-to-sections'] = 0
if 'no-lists' in the_keys:
return_options['form-lists'] = 0
elif 'lists' in the_keys:
return_options['form-lists'] = 1
if 'group-styles' in the_keys:
return_options['group-styles'] = 1
elif 'no-group-styles' in the_keys:
return_options['group-styles'] = 0
if 'group-borders' in the_keys:
return_options['group-borders'] = 1
elif 'no-group-borders' in the_keys:
return_options['group-borders'] = 0
if 'empty-para' in the_keys:
return_options['empty-paragraphs'] = 1
elif 'no-empty-para' in the_keys:
return_options['empty-paragraphs'] = 0
if len(arguments) == 0:
sys.stderr.write(
'You must provide a file to convert.\n')
return_options['valid'] = 0
return return_options
elif len(arguments) > 1:
sys.stderr.write(
'You can only convert one file at a time.\n')
return_options['valid'] = 0
else:
return_options['in-file'] = arguments[0]
# check for out file
smart_output = return_options.get('smart-output')
if smart_output == 'false':
smart_output = 0
if smart_output and not return_options['out-file']:
in_file = return_options['in-file']
the_file_name, ext = os.path.splitext(in_file)
if ext != '.rtf':
sys.stderr.write(
'Sorry, but this file does not have an "rtf" extension, so \n'
'the script will not attempt to convert it.\n'
'If it is in fact an rtf file, use the "-o" option.\n'
)
return_options['valid'] = 0
else:
return_options['out-file'] = '%s.xml' % the_file_name
if not smart_output and not return_options['out-file']:
"""
sys.stderr.write(
'Please provide and file to output with the -o option.\n'
'Or set \'<smart-output value = "true"/>\'.\n'
'in the configuration file.\n'
)
return_options['valid'] = 0
"""
pass
if 'indent' in the_keys:
try:
value = int(options['indent'])
return_options['indent'] = value
except ValueError:
sys.stderr.write('--indent must take an integer')
return_options['valid'] = 0
# check for format and pyxml
"""
the_format = return_options.get('format')
if the_format != 'raw':
no_pyxml = return_options.get('no-pyxml')
if no_pyxml:
sys.stderr.write('You want to convert your file to "%s".\n'
'Sorry, but you must have pyxml installed\n'
'in order to convert your document to anything but raw XML.\n'
'Please do not use the --format option.\n\n'
% the_format
)
return_options['valid'] = 0
xslt_proc = return_options.get('xslt-processor')
if xslt_proc == None and not no_pyxml:
sys.stderr.write('You want to convert your file to "%s".\n'
'Sorry, but you must have an xslt processor set up\n'
'in order to conevert your document to anything but raw XML.\n'
'Please use --format raw.\n\n'
% the_format
)
return_options['valid'] = 0
"""
return return_options
def __get_config_options(self):
configure_obj = configure_txt.Configure(
bug_handler=self.__bug_handler,
configuration_file=self.__configuration_file)
options_dict = configure_obj.get_configuration(type='normal')
if options_dict == 1:
sys.exit(1)
options_dict['valid'] = 1
convert_caps = options_dict.get('convert-caps')
if convert_caps == 'false':
options_dict['convert-caps'] = 0
convert_symbol = options_dict.get('convert-symbol')
if convert_symbol == 'false':
options_dict['convert-symbol'] = 0
convert_wingdings = options_dict.get('convert-wingdings')
if convert_wingdings == 'false':
options_dict['convert-wingdings'] = 0
convert_zapf = options_dict.get('convert-zapf-dingbats')
if convert_zapf == 'false':
options_dict['convert-zapf'] = 0
elif convert_zapf == 'true':
options_dict['convert-zapf'] = 1
headings_to_sections = options_dict.get('headings-to-sections')
if headings_to_sections == 'true':
options_dict['headings-to-sections'] = 1
elif headings_to_sections == '1':
options_dict['headings-to-sections'] = 1
elif headings_to_sections == 'false':
options_dict['headings-to-sections'] = 0
elif headings_to_sections == '0':
options_dict['headings-to-sections'] = 0
else:
options_dict['headings-to-sections'] = 0
write_empty_paragraphs = options_dict.get('write-empty-paragraphs')
if write_empty_paragraphs == 'true':
options_dict['empty-paragraphs'] = 1
elif write_empty_paragraphs == '1':
options_dict['empty-paragraphs'] = 1
elif write_empty_paragraphs == 'false':
options_dict['empty-paragraphs'] = 0
elif write_empty_paragraphs == '0':
options_dict['empty-paragraphs'] = 0
else:
options_dict['empty-paragraphs'] = 1
form_lists = options_dict.get('lists')
if form_lists == 'true' or form_lists == '1':
options_dict['form-lists'] = 1
elif form_lists == 'false' or form_lists == '0':
options_dict['form-lists'] = 0
else:
options_dict['form-lists'] = 0
group_styles = options_dict.get('group-styles')
if group_styles == 'true' or group_styles == '1':
options_dict['group-styles'] = 1
elif group_styles == 'false' or group_styles == '0':
options_dict['group-styles'] = 0
else:
options_dict['group-styles'] = 0
group_borders = options_dict.get('group-borders')
if group_borders == 'true' or group_borders == '1':
options_dict['group-borders'] = 1
elif group_borders == 'false' or group_borders == '0':
options_dict['group-borders'] = 0
else:
options_dict['group-borders'] = 0
return options_dict
| 14,734 | Python | .py | 328 | 31.939024 | 100 | 0.471458 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,277 | preamble_rest.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/preamble_rest.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import copy
from . import open_for_read, open_for_write
class Preamble:
"""
Fix the reamaing parts of the preamble. This module does very little. It
makes sure that no text gets put in the revision of list table. In the
future, when I understand how to interpret the revision table and list
table, I will make these methods more functional.
"""
def __init__(self, file,
bug_handler,
platform,
default_font,
code_page,
copy=None,
temp_dir=None,
):
"""
Required:
file--file to parse
platform --Windows or Macintosh
default_font -- the default font
code_page --the code page (ansi1252, for example)
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file=file
self.__bug_handler = bug_handler
self.__copy = copy
self.__default_font = default_font
self.__code_page = code_page
self.__platform = platform
if temp_dir:
self.__write_to = os.path.join(temp_dir,"info_table_info.data")
else:
self.__write_to = "info_table_info.data"
def __initiate_values(self):
"""
Initiate all values.
"""
self.__state = 'default'
self.__text_string = ''
self.__state_dict = {
'default' : self.__default_func,
'revision' : self.__revision_table_func,
'list_table' : self.__list_table_func,
'body' : self.__body_func,
}
self.__default_dict = {
'mi<mk<rtfhed-beg' : self.__found_rtf_head_func,
'mi<mk<listabbeg_' : self.__found_list_table_func,
'mi<mk<revtbl-beg' : self.__found_revision_table_func,
'mi<mk<body-open_' : self.__found_body_func,
}
def __default_func(self, line):
action = self.__default_dict.get(self.__token_info)
if action:
action(line)
else:
self.__write_obj.write(line)
def __found_rtf_head_func(self, line):
"""
Requires:
line -- the line to parse
Returns:
nothing.
Logic:
Write to the output file the default font info, the code page
info, and the platform info.
"""
self.__write_obj.write(
'mi<tg<empty-att_<rtf-definition'
'<default-font>%s<code-page>%s'
'<platform>%s\n' % (self.__default_font, self.__code_page,
self.__platform)
)
def __found_list_table_func(self, line):
self.__state = 'list_table'
def __list_table_func(self, line):
if self.__token_info == 'mi<mk<listabend_':
self.__state = 'default'
elif line[0:2] == 'tx':
pass
else:
self.__write_obj.write(line)
def __found_revision_table_func(self, line):
self.__state = 'revision'
def __revision_table_func(self, line):
if self.__token_info == 'mi<mk<revtbl-end':
self.__state = 'default'
elif line[0:2] == 'tx':
pass
else:
self.__write_obj.write(line)
def __found_body_func(self, line):
self.__state = 'body'
self.__write_obj.write(line)
def __body_func(self, line):
self.__write_obj.write(line)
def fix_preamble(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. The state can either be default, the revision table, or
the list table.
"""
self.__initiate_values()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write(
'no matching state in module preamble_rest.py\n' + self.__state + '\n')
action(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "preamble_div.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 5,699 | Python | .py | 143 | 30.216783 | 95 | 0.491788 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,278 | check_encoding.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/check_encoding.py | #!/usr/bin/env python
import sys
class CheckEncoding:
def __init__(self, bug_handler):
self.__bug_handler = bug_handler
def __get_position_error(self, line, encoding, line_num):
char_position = 0
for char in line:
char_position +=1
try:
char.decode(encoding)
except ValueError as msg:
sys.stderr.write('line: %s char: %s\n%s\n' % (line_num, char_position, str(msg)))
def check_encoding(self, path, encoding='us-ascii', verbose=True):
line_num = 0
with open(path, 'rb') as read_obj:
for line in read_obj:
line_num += 1
try:
line.decode(encoding)
except ValueError:
if verbose:
if len(line) < 1000:
self.__get_position_error(line, encoding, line_num)
else:
sys.stderr.write('line: %d has bad encoding\n' % line_num)
return True
return False
if __name__ == '__main__':
check_encoding_obj = CheckEncoding()
check_encoding_obj.check_encoding(sys.argv[1])
| 1,220 | Python | .py | 31 | 26.451613 | 98 | 0.514407 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,279 | field_strings.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/field_strings.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import re
import sys
class FieldStrings:
"""
This module is given a string. It processes the field instruction string and
returns a list of three values.
"""
def __init__(self, bug_handler, run_level=1):
"""
Requires:
nothing
Returns:
nothing
"""
self.__run_level = run_level
self.__bug_handler = bug_handler
self.__initiate_values()
def __initiate_values(self):
"""
Requires:
nothing.
Returns:
nothing.
Logic:
initiate values for rest of class.
self.__field_instruction_dict:
The dictionary for all field names.
"""
self.__field_instruction_dict = {
# number type (arabic, etc.) and number format (\# " ")
'EDITTIME' : (self.__num_type_and_format_func, 'editing-time'),
'NUMCHARS' : (self.__num_type_and_format_func, 'number-of-characters-in-doc'),
'NUMPAGES' : (self.__num_type_and_format_func, 'number-of-pages-in-doc'),
'NUMWORDS' : (self.__num_type_and_format_func, 'number-of-words-in-doc'),
'REVNUM' : (self.__num_type_and_format_func, 'revision-number'),
'SECTIONPAGES' : (self.__num_type_and_format_func, 'num-of-pages-in-section'),
'SECTION' : (self.__num_type_and_format_func, 'insert-section-number'),
'QUOTE' : (self.__num_type_and_format_func, 'quote'),
# number formatting (\# "")
'PAGE' : (self.__default_inst_func, 'insert-page-number'),
'page' : (self.__default_inst_func, 'insert-page-number'),
# date format (\@ "")
'CREATEDATE' : (self.__date_func, 'insert-date'),
'PRINTDATE' : (self.__date_func, 'insert-date'),
# PRINTDATE?
'SAVEDATE' : (self.__date_func, 'last-saved'),
'TIME' : (self.__date_func, 'insert-time'),
# numbers?
# these fields take four switches
'AUTHOR' : (self.__simple_info_func, 'user-name'),
'COMMENTS' : (self.__simple_info_func, 'comments'),
'FILENAME' : (self.__simple_info_func, 'file-name'),
'filename' : (self.__simple_info_func, 'file-name'),
'KEYWORDS' : (self.__simple_info_func, 'keywords'),
'LASTSAVEDBY' : (self.__simple_info_func, 'last-saved-by'),
'SUBJECT' : (self.__simple_info_func, 'subject'),
'TEMPLATE' : (self.__simple_info_func, 'based-on-template'),
'TITLE' : (self.__simple_info_func, 'document-title'),
'USERADDRESS' : (self.__simple_info_func, 'user-address'),
'USERINITIALS' : (self.__simple_info_func, 'user-initials'),
'USERNAME' : (self.__simple_info_func, 'user-name'),
'EQ' : (self.__equation_func, 'equation'),
'HYPERLINK' : (self.__hyperlink_func, 'hyperlink'),
'INCLUDEPICTURE': (self.__include_pict_func, 'include-picture'),
'INCLUDETEXT' : (self.__include_text_func, 'include-text-from-file'),
'INDEX' : (self.__index_func, 'index'),
'NOTEREF' : (self.__note_ref_func, 'reference-to-note'),
'PAGEREF' : (self.__page_ref_func, 'reference-to-page'),
'REF' : (self.__ref_func, 'reference'),
'ref' : (self.__ref_func, 'reference'),
'SEQ' : (self.__sequence_func, 'numbering-sequence'),
'SYMBOL' : (self.__symbol_func, 'symbol'),
'TA' : (self.__ta_func, 'anchor-for-table-of-authorities'),
'TOA' : (self.__toc_table_func, 'table-of-authorities'),
'TOC' : (self.__toc_table_func, 'table-of-contents'),
# no switches
'AUTONUMOUT' : (self.__no_switch_func, 'auto-num-out?'),
'COMPARE' : (self.__no_switch_func, 'compare'),
'DOCVARIABLE' : (self.__no_switch_func, 'document-variable'),
'GOTOBUTTON' : (self.__no_switch_func, 'go-button'),
'NEXT' : (self.__no_switch_func, 'next'),
'NEXTIF' : (self.__no_switch_func, 'next-if'),
'SKIPIF' : (self.__no_switch_func, 'skip-if'),
'IF' : (self.__no_switch_func, 'if'),
'MERGEFIELD' : (self.__no_switch_func, 'merge-field'),
'MERGEREC' : (self.__no_switch_func, 'merge-record'),
'MERGESEQ' : (self.__no_switch_func, 'merge-sequence'),
'PLACEHOLDER' : (self.__no_switch_func, 'place-holder'),
'PRIVATE' : (self.__no_switch_func, 'private'),
'RD' : (self.__no_switch_func, 'referenced-document'),
'SET' : (self.__no_switch_func, 'set'),
# default instructions (haven't written a method for them
'ADVANCE' : (self.__default_inst_func, 'advance'),
'ASK' : (self.__default_inst_func, 'prompt-user'),
'AUTONUMLGL' : (self.__default_inst_func, 'automatic-number'),
'AUTONUM' : (self.__default_inst_func, 'automatic-number'),
'AUTOTEXTLIST' : (self.__default_inst_func, 'auto-list-text'),
'AUTOTEXT' : (self.__default_inst_func, 'auto-text'),
'BARCODE' : (self.__default_inst_func, 'barcode'),
'CONTACT' : (self.__default_inst_func, 'contact'),
'DATABASE' : (self.__default_inst_func, 'database'),
'DATE' : (self.__default_inst_func, 'date'),
'date' : (self.__default_inst_func, 'date'),
'DOCPROPERTY' : (self.__default_inst_func, 'document-property'),
'FILESIZE' : (self.__default_inst_func, 'file-size'),
'FILLIN' : (self.__default_inst_func, 'fill-in'),
'INFO' : (self.__default_inst_func, 'document-info'),
'LINK' : (self.__default_inst_func, 'link'),
'PA' : (self.__default_inst_func, 'page'),
'PRINT' : (self.__default_inst_func, 'print'),
'STYLEREF' : (self.__default_inst_func, 'style-reference'),
'USERPROPERTY' : (self.__default_inst_func, 'user-property'),
'FORMCHECKBOX' : (self.__default_inst_func, 'form-checkbox'),
'FORMTEXT' : (self.__default_inst_func, 'form-text'),
# buttons
'MACROBUTTON' : (self.__default_inst_func, 'macro-button'),
}
self.__number_dict = {
'Arabic' : 'arabic',
'alphabetic' : 'alphabetic',
'ALPHABETIC' : 'capital-alphabetic',
'roman' : 'roman',
'ROMAN' : 'capital-roman',
'Ordinal' : 'ordinal',
'CardText' : 'cardinal-text',
'OrdText' : 'ordinal-text',
'Hex' : 'hexadecimal',
'DollarText' : 'dollar-text',
'Upper' : 'upper-case',
'Lower' : 'lower-case',
'FirstCap' : 'first-cap',
'Caps' : 'caps',
}
self.__text_format_dict = {
'Upper' : 'upper',
'Lower' : 'lower',
'FirstCap' : 'first-cap',
'Caps' : 'caps',
}
self.__symbol_num_exp = re.compile(r'SYMBOL (.*?) ')
self.__symbol_font_exp = re.compile(r'\\f "(.*?)"')
self.__symbol_size_exp = re.compile(r'\\s (\d+)')
# self.__toc_figure_exp = re.compile(r'\\c "Figure"')
# \\@ "dddd, MMMM d, yyyy"
self.__date_exp = re.compile(r'\\@\s{1,}"(.*?)"')
self.__num_type_exp = re.compile(
r'\\\*\s{1,}(Arabic|alphabetic|ALPHABETIC|roman|ROMAN|Ordinal|CardText|OrdText|Hex|DollarText|Upper|Lower|FirstCap|Caps)')
self.__format_text_exp = re.compile(r'\\\*\s{1,}(Upper|Lower|FirstCap|Caps)')
self.__merge_format_exp = re.compile(r'\\\*\s{1,}MERGEFORMAT')
self.__ta_short_field_exp = re.compile(r'\\s\s{1,}"(.*?)"')
self.__ta_long_field_exp = re.compile(r'\\l\s{1,}"(.*?)"')
self.__ta_category_exp = re.compile(r'\\c\s{1,}(\d+)')
# indices
self.__index_insert_blank_line_exp = re.compile(r'\\h\s{1,}""')
self.__index_insert_letter_exp = re.compile(r'\\h\s{1,}"()"')
self.__index_columns_exp = re.compile(r'\\c\s{1,}"(.*?)"')
self.__bookmark_exp = re.compile(r'\\b\s{1,}(.*?)\s')
self.__d_separator = re.compile(r'\\d\s{1,}(.*?)\s')
self.__e_separator = re.compile(r'\\e\s{1,}(.*?)\s')
self.__l_separator = re.compile(r'\\l\s{1,}(.*?)\s')
self.__p_separator = re.compile(r'\\p\s{1,}(.*?)\s')
self.__index_sequence = re.compile(r'\\s\s{1,}(.*?)\s')
self.__index_entry_typ_exp = re.compile(r'\\f\s{1,}"(.*?)"')
self.__quote_exp = re.compile(r'"(.*?)"')
self.__filter_switch = re.compile(r'\\c\s{1,}(.*?)\s')
self.__link_switch = re.compile(r'\\l\s{1,}(.*?)\s')
def process_string(self, my_string, type):
"""
Requires:
my_string --the string to parse.
type -- the type of string.
Returns:
Returns a string for a field instrution attribute.
Logic:
This handles all "large" fields, which means everything except
toc entries, index entries, and bookmarks
Split the string by spaces, and get the first item in the
resulting list. This item is the field's type. Check for the
action in the field instructions dictionary for further parsing.
If no action is found, print out an error message.
"""
changed_string = ''
lines = my_string.split('\n')
for line in lines:
if line[0:2] == 'tx':
changed_string += line[17:]
fields = changed_string.split()
field_name = fields[0]
action, name = self.__field_instruction_dict.get(field_name, (None, None))
match_obj = re.search(self.__merge_format_exp, changed_string)
if match_obj and name:
name += '<update>dynamic'
elif name:
name += '<update>static'
else:
pass
# no name--not in list above
if action:
the_list = action(field_name, name, changed_string)
else:
# change -1 to 0--for now, I want users to report bugs
msg = f'no key for "{field_name}" "{changed_string}"\n'
sys.stderr.write(msg)
if self.__run_level > 3:
msg = f'no key for "{field_name}" "{changed_string}"\n'
raise self.__bug_handler(msg)
the_list = self.__fall_back_func(field_name, line)
return the_list
return the_list
def __default_inst_func(self, field_name, name, line):
"""
Requires:
field_name -- the first word in the string
name -- the changed name according to the dictionary
line -- the string to be parsed
Returns:
The name of the field.
Logic:
I only need the changed name for the field.
"""
return [None, None, name]
def __fall_back_func(self, field_name, line):
"""
Requires:
field_name -- the first word in the string
name -- the changed name according to the dictionary
line -- the string to be parsed
Returns:
The name of the field.
Logic:
Used for fields not found in dict
"""
the_string = field_name
the_string += '<update>none'
return [None, None, the_string]
def __equation_func(self, field_name, name, line):
"""
Required:
field_name -- the first word in the string
name --the changed name according to the dictionary
line -- the string to be parse
Returns:
The name of the field
Logic:
"""
return [None, None, name]
def __no_switch_func(self, field_name, name, line):
"""
Required:
field_name --the first
field_name -- the first word in the string
name --the changed name according to the dictionary
line -- the string to be parse
Returns:
The name of the field
Logic:
"""
return [None, None, name]
def __num_type_and_format_func(self, field_name, name, line):
"""
Required:
field_name -- the first word in the string
name --the changed name according to the dictionary
line -- the string to be parse
Returns:
list of None, None, and part of a tag
Logic:
parse num_type
parse num_format
"""
the_string = name
num_format = self.__parse_num_format(line)
if num_format:
the_string += '<number-format>%s' % num_format
num_type = self.__parse_num_type(line)
if num_type:
the_string += '<number-type>%s' % num_type
# Only QUOTE takes a (mandatory?) argument
if field_name == 'QUOTE':
match_group = re.search(r'QUOTE\s{1,}"(.*?)"', line)
if match_group:
arg = match_group.group(1)
the_string += '<argument>%s' % arg
return [None, None, the_string]
def __num_format_func(self, field_name, name, line):
"""
Required:
field_name -- the first word in the string
name --the changed name according to the dictionary
line -- the string to be parse
Returns:
list of None, None, and part of a tag
Logic:
"""
the_string = name
num_format = self.__parse_num_format(line)
if num_format:
the_string += '<number-format>%s' % num_format
return [None, None, the_string]
def __parse_num_format(self, the_string):
"""
Required:
the_string -- the string to parse
Returns:
a string if the_string contains number formatting information
None, otherwise
Logic:
"""
match_group = re.search(self.__date_exp, the_string)
if match_group:
return match_group(1)
def __parse_num_type(self, the_string):
"""
Required:
the_string -- the string to parse
Returns:
a string if the_string contains number type information
None, otherwise
Logic:
the_string might look like:
USERNAME \\* Arabic \\* MERGEFORMAT
Get the \\* Upper part. Use a dictionary to convert the "Arabic" to
a more-readable word for the value of the key "number-type".
(<field number-type = "Arabic">
"""
match_group = re.search(self.__num_type_exp, the_string)
if match_group:
name = match_group.group(1)
changed_name = self.__number_dict.get(name)
if changed_name:
return changed_name
else:
sys.stderr.write('module is fields_string\n')
sys.stderr.write('method is __parse_num_type\n')
sys.stderr.write('no dictionary entry for %s\n' % name)
def __date_func(self, field_name, name, line):
"""
Required:
field_name --the fist
field_name -- the first word in the string
name --the changed name according to the dictionary
line -- the string to be parse
Returns:
list of None, None, and part of a tag
Logic:
"""
the_string = name
match_group = re.search(self.__date_exp, line)
if match_group:
the_string += '<date-format>%s' % match_group.group(1)
return [None, None, the_string]
def __simple_info_func(self, field_name, name, line):
"""
Required:
field_name -- the first word in the string
name --the changed name according to the dictionary
line -- the string to be parse
Returns:
The name of the field
Logic:
These fields can only have the following switches:
1. Upper
2. Lower
3. FirstCap
4. Caps
"""
the_string = name
match_group = re.search(self.__format_text_exp, line)
if match_group:
name = match_group.group(1)
changed_name = self.__text_format_dict.get(name)
if changed_name:
the_string += '<format>%s' % changed_name
else:
sys.stderr.write('module is fields_string\n')
sys.stderr.write('method is __parse_num_type\n')
sys.stderr.write('no dictionary entry for %s\n' % name)
return [None, None, the_string]
def __hyperlink_func(self, field_name, name, line):
"""
Required:
field_name -- the first word in the string
name --the changed name according to the dictionary
line -- the string to be parse
Returns:
The name of the field
"""
self.__link_switch = re.compile(r'\\l\s{1,}"{0,1}(.*?)"{0,1}\s')
the_string = name
match_group = re.search(self.__link_switch, line)
if match_group:
link = match_group.group(1)
link = link.replace('"', """)
the_string += '<link>%s' % link
# \l "txt" "link"
# want "file name" so must get rid of \c "txt"
line = re.sub(self.__link_switch, '', line)
match_group = re.search(self.__quote_exp, line)
if match_group:
arg = match_group.group(1)
the_string += '<argument>%s' % arg
else:
pass
index = line.find('\\m')
if index > -1:
the_string += '<html2-image-map>true'
index = line.find('\\n')
if index > -1:
the_string += '<new-window>true'
index = line.find('\\h')
if index > -1:
the_string += '<no-history>true'
return [None, None, the_string]
def __include_text_func(self, field_name, name, line):
"""
Required:
field_name -- the first word in the string
name --the changed name according to the dictionary
line -- the string to be parse
Returns:
The name of the field
Logic:
"""
the_string = name
match_group = re.search(self.__format_text_exp, line)
if match_group:
name = match_group.group(1)
changed_name = self.__text_format_dict.get(name)
if changed_name:
the_string += '<format>%s' % changed_name
else:
sys.stderr.write('module is fields_string\n')
sys.stderr.write('method is __parse_num_type\n')
sys.stderr.write('no dictionary entry for %s\n' % name)
match_group = re.search(self.__filter_switch, line)
if match_group:
arg = match_group.group(1)
the_string += '<filter>%s' % arg
# \c "txt" "file name"
# want "file name" so must get rid of \c "txt"
line = re.sub(self.__filter_switch, '', line)
match_group = re.search(self.__quote_exp, line)
if match_group:
arg = match_group.group(1)
arg = arg.replace('"', """)
the_string += '<argument>%s' % arg
else:
sys.stderr.write('Module is field_strings\n')
sys.stderr.write('method is include_text_func\n')
sys.stderr.write('no argument for include text\n')
index = line.find('\\!')
if index > -1:
the_string += '<no-field-update>true'
return [None, None, the_string]
def __include_pict_func(self, field_name, name, line):
"""
Required:
field_name -- the first word in the string
name --the changed name according to the dictionary
line -- the string to be parse
Returns:
The name of the field
Logic:
"""
the_string = name
match_group = re.search(self.__filter_switch, line)
if match_group:
arg = match_group.group(1)
arg = arg.replace('"', """)
the_string += '<filter>%s' % arg
# \c "txt" "file name"
# want "file name" so must get rid of \c "txt"
line = re.sub(self.__filter_switch, '', line)
match_group = re.search(self.__quote_exp, line)
if match_group:
arg = match_group.group(1)
the_string += '<argument>%s' % arg
else:
sys.stderr.write('Module is field_strings\n')
sys.stderr.write('method is include_pict_func\n')
sys.stderr.write('no argument for include pict\n')
index = line.find('\\d')
if index > -1:
the_string += '<external>true'
return [None, None, the_string]
def __ref_func(self, field_name, name, line):
"""
Requires:
field_name -- the first word in the string
name -- the changed name according to the dictionary
line -- the string to be parsed
Returns:
The name of the field.
Logic:
A page reference field looks like this:
PAGEREF _Toc440880424 \\h
I want to extract the second line of info, which is used as an
anchor in the resulting XML file.
"""
the_string = name
match_group = re.search(self.__format_text_exp, line)
if match_group:
name = match_group.group(1)
changed_name = self.__text_format_dict.get(name)
if changed_name:
the_string += '<format>%s' % changed_name
else:
sys.stderr.write('module is fields_string\n')
sys.stderr.write('method is __parse_num_type\n')
sys.stderr.write('no dictionary entry for %s\n' % name)
line = re.sub(self.__merge_format_exp, '', line)
words = line.split()
words = words[1:] # get rid of field name
for word in words:
if word[0:1] != '\\':
the_string += '<bookmark>%s' % word
index = line.find('\\f')
if index > -1:
the_string += '<include-note-number>true'
index = line.find('\\h')
if index > -1:
the_string += '<hyperlink>true'
index = line.find('\\n')
if index > -1:
the_string += '<insert-number>true'
index = line.find('\\r')
if index > -1:
the_string += '<insert-number-relative>true'
index = line.find('\\p')
if index > -1:
the_string += '<paragraph-relative-position>true'
index = line.find('\\t')
if index > -1:
the_string += '<suppress-non-delimeter>true'
index = line.find('\\w')
if index > -1:
the_string += '<insert-number-full>true'
return [None, None, the_string]
def __toc_table_func(self, field_name, name, line):
"""
Requires:
field_name -- the name of the first word in the string
name --the changed name, according to the dictionary.
line --the string to be parsed.
Returns:
A string for a TOC table field.
Logic:
If the string contains Figure, it is a table of figures.
Otherwise, it is a plain old table of contents.
"""
the_string = name
index = line.find('\\c "Figure"')
if index > -1:
the_string = the_string.replace('table-of-contents', 'table-of-figures')
# don't really need the first value in this list, I don't believe
return [name, None, the_string]
def __sequence_func(self, field_name, name, line):
"""
Requires:
field_name --the name of the first word in the string.
name --the changed name according to the dictionary.
line -- the string to parse.
Returns:
A string with a value for the type and label attributes
Logic:
The type of sequence--whether figure, graph, my-name, or
whatever--is represented by the second word in the string. Extract
and return.
SEQ Figure \\* ARABIC
"""
fields = line.split()
label = fields[1]
my_string = f'{name}<label>{label}'
return [None, None, my_string]
def __ta_func(self, field_name, name, line):
"""
Requires:
field_name --the name of the first word in the string.
name --the changed name according to the dictionary.
line -- the string to parse.
Returns:
A string with a value for the type and label attributes
Logic:
"""
the_string = name
match_group = re.search(self.__ta_short_field_exp, line)
if match_group:
short_name = match_group.group(1)
the_string += '<short-field>%s' % short_name
match_group = re.search(self.__ta_long_field_exp, line)
if match_group:
long_name = match_group.group(1)
the_string += '<long-field>%s' % long_name
match_group = re.search(self.__ta_category_exp, line)
if match_group:
category = match_group.group(1)
the_string += '<category>%s' % category
index = line.find('\\b')
if index > -1:
the_string += '<bold>true'
index = line.find('\\i')
if index > -1:
the_string += '<italics>true'
return [None, None, the_string]
def __index_func(self, field_name, name, line):
"""
Requires:
field_name --the name of the first word in the string.
name --the changed name according to the dictionary.
line -- the string to parse.
Returns:
A string with a value for the type and label attributes
Logic:
"""
# self.__index_insert_blank_line_exp = re.compile(r'\\h\s{1,}""')
# self.__index_insert_letter_exp = re.compile(r'\\h\s{1,}(".*?")')
the_string = name
match_group = re.search(self.__index_insert_blank_line_exp, line)
if match_group:
the_string += '<insert-blank-line>true'
else:
match_group = re.search(self.__index_insert_letter_exp, line)
if match_group:
insert_letter = match_group.group(1)
the_string += '<insert-letter>%s' % insert_letter
match_group = re.search(self.__index_columns_exp, line)
if match_group:
columns = match_group.group(1)
the_string += '<number-of-columns>%s' % columns
# self.__bookmark_exp = re.compile(r'\\b\s{1,}(.*?)\s')
match_group = re.search(self.__bookmark_exp, line)
if match_group:
bookmark = match_group.group(1)
the_string += '<use-bookmark>%s' % bookmark
match_group = re.search(self.__d_separator, line)
if match_group:
separator = match_group.group(1)
separator = separator.replace('"', '"')
the_string += '<sequence-separator>%s' % separator
# self.__e_separator = re.compile(r'\\e\s{1,}(.*?)\s')
match_group = re.search(self.__e_separator, line)
if match_group:
separator = match_group.group(1)
separator = separator.replace('"', '"')
the_string += '<page-separator>%s' % separator
# self.__index_sequence = re.compile(r'\\s\s{1,}(.*?)\s')
match_group = re.search(self.__index_sequence, line)
if match_group:
sequence = match_group.group(1)
separator = separator.replace('"', '"')
the_string += '<use-sequence>%s' % sequence
# self.__index_entry_typ_exp = re.compile(r'\\f\s{1,}"(.*?)"')
match_group = re.search(self.__index_entry_typ_exp, line)
if match_group:
entry_type = match_group.group(1)
the_string += '<entry-type>%s' % entry_type
# self.__p_separator = re.compile(r'\\p\s{1,}(.*?)\s')
match_group = re.search(self.__p_separator, line)
if match_group:
limit = match_group.group(1)
the_string += '<limit-to-letters>%s' % limit
match_group = re.search(self.__l_separator, line)
if match_group:
separator = match_group.group(1)
separator = separator.replace('"', '"')
the_string += '<multi-page-separator>%s' % separator
index = line.find('\\a')
if index > -1:
the_string += '<accented>true'
index = line.find('\\r')
if index > -1:
the_string += '<sub-entry-on-same-line>true'
index = line.find('\\t')
if index > -1:
the_string += '<enable-yomi-text>true'
return [None, None, the_string]
def __page_ref_func(self, field_name, name, line):
"""
Requires:
field_name --first name in the string.
name -- the changed name according to the dictionary.
line -- the string to parse.
Returns:
A string .
Logic:
"""
the_string = name
num_format = self.__parse_num_format(line)
if num_format:
the_string += '<number-format>%s' % num_format
num_type = self.__parse_num_type(line)
if num_type:
the_string += '<number-type>%s' % num_type
line = re.sub(self.__merge_format_exp, '', line)
words = line.split()
words = words[1:] # get rid of field name
for word in words:
if word[0:1] != '\\':
the_string += '<bookmark>%s' % word
index = line.find('\\h')
if index > -1:
the_string += '<hyperlink>true'
index = line.find('\\p')
if index > -1:
the_string += '<paragraph-relative-position>true'
return [None, None, the_string]
def __note_ref_func(self, field_name, name, line):
"""
Requires:
field_name --first name in the string.
name -- the changed name according to the dictionary.
line -- the string to parse.
Returns:
A string .
Logic:
"""
the_string = name
line = re.sub(self.__merge_format_exp, '', line)
words = line.split()
words = words[1:] # get rid of field name
for word in words:
if word[0:1] != '\\':
the_string += '<bookmark>%s' % word
index = line.find('\\h')
if index > -1:
the_string += '<hyperlink>true'
index = line.find('\\p')
if index > -1:
the_string += '<paragraph-relative-position>true'
index = line.find('\\f')
if index > -1:
the_string += '<include-note-number>true'
return [None, None, the_string]
def __symbol_func(self, field_name, name, line):
"""
Requires:
field_name --first name in the string.
name -- the changed name according to the dictionary.
line -- the string to parse.
Returns:
A string containing font size, font style, and a hexadecimal value.
Logic:
The SYMBOL field is one of Microsoft's many quirky ways of
entering text. The string that results from this method looks like
this:
SYMBOL 97 \\f "Symbol" \\s 12
The first word merely tells us that we have encountered a SYMBOL
field.
The next value is the Microsoft decimal value. Change this to
hexadecimal.
The pattern '\\f "some font' tells us the font.
The pattern '\\s some size' tells us the font size.
Extract all of this information. Store this information in a
string, and make this string the last item in a list. The first
item in the list is the simple word 'symbol', which tells me that
I don't really have field, but UTF-8 data.
"""
num = ''
font = ''
font_size = ''
changed_line = ''
search_obj = re.search(self.__symbol_num_exp, line)
if search_obj:
num = search_obj.group(1)
num = int(num)
num = '%X' % num
search_obj = re.search(self.__symbol_font_exp, line)
if search_obj:
font = search_obj.group(1)
changed_line += 'cw<ci<font-style<nu<%s\n' % font
search_obj = re.search(self.__symbol_size_exp, line)
if search_obj:
font_size = search_obj.group(1)
font_size = int(font_size)
font_size = '%.2f' % font_size
changed_line += 'cw<ci<font-size_<nu<%s\n' % font_size
changed_line += 'tx<hx<__________<\'%s\n' % num
return ['Symbol', None, changed_line]
| 34,870 | Python | .py | 790 | 33.683544 | 134 | 0.515182 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,280 | body_styles.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/body_styles.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
"""
Simply write the list of strings after style table
"""
class BodyStyles:
"""
Insert table data for tables.
Logic:
"""
def __init__(self,
in_file,
list_of_styles,
bug_handler,
copy=None,
run_level=1,):
"""
Required:
'file'--file to parse
'table_data' -- a dictionary for each table.
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__list_of_styles = list_of_styles
self.__run_level = run_level
self.__write_to = better_mktemp()
# self.__write_to = 'table_info.data'
def insert_info(self):
"""
"""
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
if line == 'mi<tg<close_____<style-table\n':
if len(self.__list_of_styles) > 0:
self.__write_obj.write('mi<tg<open______<styles-in-body\n')
the_string = ''.join(self.__list_of_styles)
self.__write_obj.write(the_string)
self.__write_obj.write('mi<tg<close_____<styles-in-body\n')
else:
# this shouldn't happen!
if self.__run_level > 3:
msg = 'Not enough data for each table\n'
raise self.__bug_handler(msg)
# why was this line even here?
# self.__write_obj.write('mi<tg<open______<table\n')
self.__write_obj.write(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "body_styles.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 3,343 | Python | .py | 78 | 33.410256 | 79 | 0.451504 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,281 | list_numbers.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/list_numbers.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class ListNumbers:
"""
RTF puts list numbers outside of the paragraph. The public method
in this class put the list numbers inside the paragraphs.
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
initiate values for fix_list_numbers.
Required:
Nothing
Return:
Nothing
"""
self.__state = "default"
self.__list_chunk = ''
self.__previous_line = ''
self.__list_text_ob_count = ''
self.__state_dict={
'default' : self.__default_func,
'after_ob' : self.__after_ob_func,
'list_text' : self.__list_text_func,
'after_list_text' : self.__after_list_text_func
}
def __after_ob_func(self, line):
"""
Handle the line immediately after an open bracket.
Required:
self, line
Returns:
Nothing
"""
if self.__token_info == 'cw<ls<list-text_':
self.__state = 'list_text'
self.__list_chunk = self.__list_chunk + \
self.__previous_line + line
self.__list_text_ob = self.__ob_count
self.__cb_count = 0
else:
self.__write_obj.write(self.__previous_line)
self.__write_obj.write(line)
self.__state = 'default'
def __after_list_text_func(self, line):
"""
Look for an open bracket or a line of text, and then print out the
self.__list_chunk. Print out the line.
"""
if line[0:2] == 'ob' or line[0:2] == 'tx':
self.__state = 'default'
self.__write_obj.write('mi<mk<lst-txbeg_\n')
self.__write_obj.write('mi<mk<para-beg__\n')
self.__write_obj.write('mi<mk<lst-tx-beg\n')
self.__write_obj.write(
# 'mi<tg<open-att__<list-text<type>%s\n' % self.__list_type)
'mi<tg<open-att__<list-text\n')
self.__write_obj.write(self.__list_chunk)
self.__write_obj.write('mi<tg<close_____<list-text\n')
self.__write_obj.write('mi<mk<lst-tx-end\n')
self.__list_chunk = ''
self.__write_obj.write(line)
def __determine_list_type(self, chunk):
"""
Determine if the list is ordered or itemized
"""
lines = chunk.split('\n')
text_string = ''
for line in lines:
if line[0:5] == 'tx<hx':
if line[17:] == '\'B7':
return "unordered"
elif line[0:5] == 'tx<nu':
text_string += line[17:]
text_string = text_string.replace('.', '')
text_string = text_string.replace('(', '')
text_string = text_string.replace(')', '')
if text_string.isdigit():
return 'ordered'
"""
sys.stderr.write('module is list_numbers\n')
sys.stderr.write('method is __determine type\n')
sys.stderr.write('Couldn\'t get type of list\n')
"""
# must be some type of ordered list -- just a guess!
return 'unordered'
def __list_text_func(self, line):
"""
Handle lines that are part of the list text. If the end of the list
text is found (the closing bracket matches the self.__list_text_ob),
then change the state. Always add the line to the self.__list_chunk
Required:
self, line
Returns:
Nothing
"""
if self.__list_text_ob == self.__cb_count:
self.__state = 'after_list_text'
self.__right_after_list_text = 1
self.__list_type = self.__determine_list_type(self.__list_chunk)
self.__write_obj.write('mi<mk<list-type_<%s\n' % self.__list_type)
if self.__token_info != 'cw<pf<par-def___':
self.__list_chunk = self.__list_chunk + line
def __default_func(self, line):
"""
Handle the lines that are not part of any special state. Look for an
opening bracket. If an open bracket is found, add this line to a
temporary self.__previous line, which other methods need. Otherwise,
print out the line.
Required:
self, line
Returns:
Nothing
"""
if self.__token_info == 'ob<nu<open-brack':
self.__state = 'after_ob'
self.__previous_line = line
else:
self.__write_obj.write(line)
def fix_list_numbers(self):
"""
Required:
nothing
Returns:
original file will be changed
Logic:
Read in one line a time from the file. Keep track of opening and
closing brackets. Determine the method ('action') by passing the
state to the self.__state_dict.
Simply print out the line to a temp file until an open bracket
is found. Check the next line. If it is list-text, then start
adding to the self.__list_chunk until the closing bracket is
found.
Next, look for an open bracket or text. When either is found,
print out self.__list_chunk and the line.
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
action = self.__state_dict.get(self.__state)
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "list_numbers.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 7,743 | Python | .py | 190 | 30.847368 | 78 | 0.501392 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,282 | replace_illegals.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/replace_illegals.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from calibre.utils.cleantext import clean_ascii_chars
from . import open_for_read, open_for_write
class ReplaceIllegals:
"""
reaplace illegal lower ascii characters
"""
def __init__(self,
in_file,
copy=None,
run_level=1,
):
self.__file = in_file
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def replace_illegals(self):
"""
"""
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as write_obj:
for line in read_obj:
write_obj.write(clean_ascii_chars(line))
copy_obj = copy.Copy()
if self.__copy:
copy_obj.copy_file(self.__write_to, "replace_illegals.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 1,900 | Python | .py | 42 | 38.952381 | 73 | 0.430886 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,283 | delete_info.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/delete_info.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class DeleteInfo:
"""Delete unnecessary destination groups"""
def __init__(self,
in_file ,
bug_handler,
copy=None,
run_level=1,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
self.__run_level = run_level
self.__initiate_allow()
self.__bracket_count= 0
self.__ob_count = 0
self.__cb_count = 0
self.__ob = 0
self.__write_cb = False
self.__found_delete = False
def __initiate_allow(self):
"""
Initiate a list of destination groups which should be printed out.
"""
self.__allowable = ('cw<ss<char-style',
'cw<it<listtable_',
'cw<it<revi-table',
'cw<ls<list-lev-d',
# Field allowed
'cw<fd<field-inst',
'cw<an<book-mk-st',
'cw<an<book-mk-en',
'cw<an<annotation',
'cw<cm<comment___',
'cw<it<lovr-table',
# info table
'cw<di<company___',
# 'cw<ls<list______',
)
self.__not_allowable = (
'cw<un<unknown___',
'cw<un<company___',
'cw<ls<list-level',
'cw<fd<datafield_',
)
self.__state = 'default'
self.__state_dict = {
'default' : self.__default_func,
'after_asterisk' : self.__asterisk_func,
'delete' : self.__delete_func,
'list' : self.__list_func,
}
def __default_func(self,line):
"""Handle lines when in no special state. Look for an asterisk to
begin a special state. Otherwise, print out line."""
# cw<ml<asterisk__<nu<true
if self.__token_info == 'cw<ml<asterisk__':
self.__state = 'after_asterisk'
self.__delete_count = self.__ob_count
elif self.__token_info == 'ob<nu<open-brack':
# write previous bracket, if exists
if self.__ob:
self.__write_obj.write(self.__ob)
self.__ob = line
return False
else:
# write previous bracket, since didn't find asterisk
if self.__ob:
self.__write_obj.write(self.__ob)
self.__ob = 0
return True
def __delete_func(self,line):
"""Handle lines when in delete state. Don't print out lines
unless the state has ended."""
if self.__delete_count == self.__cb_count:
self.__state = 'default'
if self.__write_cb:
self.__write_cb = True
return True
return False
def __asterisk_func(self,line):
"""
Determine whether to delete info in group
Note on self.__cb flag.
If you find that you are in a delete group, and the previous
token in not an open bracket (self.__ob = 0), that means
that the delete group is nested inside another acceptable
destination group. In this case, you have already written
the open bracket, so you will need to write the closed one
as well.
"""
# Test for {\*}, in which case don't enter
# delete state
self.__found_delete = True
if self.__token_info == 'cb<nu<clos-brack':
if self.__delete_count == self.__cb_count:
self.__state = 'default'
self.__ob = 0
# changed this because haven't printed out start
return False
else:
# not sure what happens here!
# believe I have a '{\*}
if self.__run_level > 3:
msg = 'Flag problem\n'
raise self.__bug_handler(msg)
return True
elif self.__token_info in self.__allowable :
if self.__ob:
self.__write_obj.write(self.__ob)
self.__ob = 0
self.__state = 'default'
else:
pass
return True
elif self.__token_info == 'cw<ls<list______':
self.__ob = 0
self.__found_list_func(line)
elif self.__token_info in self.__not_allowable:
if not self.__ob:
self.__write_cb = True
self.__ob = 0
self.__state = 'delete'
self.__cb_count = 0
return False
else:
if self.__run_level > 5:
msg = ('After an asterisk, and found neither an allowable or non-allowable token\n\
token is "%s"\n') % self.__token_info
raise self.__bug_handler(msg)
if not self.__ob:
self.__write_cb = True
self.__ob = 0
self.__state = 'delete'
self.__cb_count = 0
return False
def __found_list_func(self, line):
"""
print out control words in this group
"""
self.__state = 'list'
def __list_func(self, line):
"""
Check to see if the group has ended.
Return True for all control words.
Return False otherwise.
"""
if self.__delete_count == self.__cb_count and \
self.__token_info == 'cb<nu<clos-brack':
self.__state = 'default'
if self.__write_cb:
self.__write_cb = False
return True
return False
elif line[0:2] == 'cw':
return True
else:
return False
def delete_info(self):
"""Main method for handling other methods. Read one line at
a time, and determine whether to print the line based on the state."""
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
# ob<nu<open-brack<0001
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
# Get action to perform
action = self.__state_dict.get(self.__state)
if not action:
sys.stderr.write('No action in dictionary state is "%s" \n'
% self.__state)
# Print if allowed by action
if action(line):
self.__write_obj.write(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "delete_info.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__found_delete
| 8,347 | Python | .py | 201 | 28.686567 | 99 | 0.453774 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,284 | header.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/header.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Header:
"""
Two public methods are available. The first separates all of the headers
and footers from the body and puts them at the bottom of the text, where
they are easier to process. The second joins those headers and footers to
the proper places in the body.
"""
def __init__(self,
in_file ,
bug_handler,
copy=None,
run_level=1,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
self.__found_a_header = False
def __in_header_func(self, line):
"""
Handle all tokens that are part of header
"""
if self.__cb_count == self.__header_bracket_count:
self.__in_header = False
self.__write_obj.write(line)
self.__write_to_head_obj.write(
'mi<mk<head___clo\n'
'mi<tg<close_____<header-or-footer\n'
'mi<mk<header-clo\n')
else:
self.__write_to_head_obj.write(line)
def __found_header(self, line):
"""
Found a header
"""
# but this could be header or footer
self.__found_a_header = True
self.__in_header = True
self.__header_count += 1
# temporarily set this to zero so I can enter loop
self.__cb_count = 0
self.__header_bracket_count = self.__ob_count
self.__write_obj.write(
'mi<mk<header-ind<%04d\n' % self.__header_count)
self.__write_to_head_obj.write(
'mi<mk<header-ope<%04d\n' % self.__header_count)
info = line[6:16]
type = self.__head_dict.get(info)
if type:
self.__write_to_head_obj.write(
'mi<tg<open-att__<header-or-footer<type>%s\n' % (type)
)
else:
sys.stderr.write(
'module is header\n'
'method is __found_header\n'
'no dict entry\n'
'line is %s' % line)
self.__write_to_head_obj.write(
'mi<tg<open-att__<header-or-footer<type>none\n'
)
def __default_sep(self, line):
"""
Handle all tokens that are not header tokens
"""
if self.__token_info[3:5] == 'hf':
self.__found_header(line)
self.__write_obj.write(line)
def __initiate_sep_values(self):
"""
initiate counters for separate_footnotes method.
"""
self.__bracket_count=0
self.__ob_count = 0
self.__cb_count = 0
self.__header_bracket_count = 0
self.__in_header = False
self.__header_count = 0
self.__head_dict = {
'head-left_' : ('header-left'),
'head-right' : ('header-right'),
'foot-left_' : ('footer-left'),
'foot-right' : ('footer-right'),
'head-first' : ('header-first'),
'foot-first' : ('footer-first'),
'header____' : ('header'),
'footer____' : ('footer'),
}
def separate_headers(self):
"""
Separate all the footnotes in an RTF file and put them at the bottom,
where they are easier to process. Each time a footnote is found,
print all of its contents to a temporary file. Close both the main and
temporary file. Print the footnotes from the temporary file to the
bottom of the main file.
"""
self.__initiate_sep_values()
self.__header_holder = better_mktemp()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
with open_for_write(self.__header_holder) as self.__write_to_head_obj:
for line in read_obj:
self.__token_info = line[:16]
# keep track of opening and closing brackets
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
# In the middle of footnote text
if self.__in_header:
self.__in_header_func(line)
# not in the middle of footnote text
else:
self.__default_sep(line)
with open_for_read(self.__header_holder) as read_obj:
with open_for_write(self.__write_to, append=True) as write_obj:
write_obj.write(
'mi<mk<header-beg\n')
for line in read_obj:
write_obj.write(line)
write_obj.write(
'mi<mk<header-end\n')
os.remove(self.__header_holder)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "header_separate.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
def update_info(self, file, copy):
"""
Unused method
"""
self.__file = file
self.__copy = copy
def __get_head_body_func(self, line):
"""
Process lines in main body and look for beginning of headers.
"""
# mi<mk<footnt-end
if self.__token_info == 'mi<mk<header-beg':
self.__state = 'head'
else:
self.__write_obj.write(line)
def __get_head_head_func(self, line):
"""
Copy headers and footers from bottom of file to a separate, temporary file.
"""
if self.__token_info == 'mi<mk<header-end':
self.__state = 'body'
else:
self.__write_to_head_obj.write(line)
def __get_headers(self):
"""
Private method to remove footnotes from main file. Read one line from
the main file at a time. If the state is 'body', call on the private
__get_foot_foot_func. Otherwise, call on the __get_foot_body_func.
These two functions do the work of separating the footnotes form the
body.
"""
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
with open_for_write(self.__header_holder) as self.__write_to_head_obj:
for line in read_obj:
self.__token_info = line[:16]
if self.__state == 'body':
self.__get_head_body_func(line)
elif self.__state == 'head':
self.__get_head_head_func(line)
def __get_head_from_temp(self, num):
"""
Private method for joining headers and footers to body. This method
reads from the temporary file until the proper footnote marker is
found. It collects all the tokens until the end of the footnote, and
returns them as a string.
"""
look_for = 'mi<mk<header-ope<' + num + '\n'
found_head = False
string_to_return = ''
for line in self.__read_from_head_obj:
if found_head:
if line == 'mi<mk<header-clo\n':
return string_to_return
string_to_return += line
else:
if line == look_for:
found_head = True
def __join_from_temp(self):
"""
Private method for rejoining footnotes to body. Read from the
newly-created, temporary file that contains the body text but no
footnotes. Each time a footnote marker is found, call the private
method __get_foot_from_temp(). This method will return a string to
print out to the third file.
If no footnote marker is found, simply print out the token (line).
"""
self.__read_from_head_obj = open_for_read(self.__header_holder)
self.__write_obj = open_for_write(self.__write_to2)
with open_for_read(self.__write_to) as read_obj:
for line in read_obj:
if line[:16] == 'mi<mk<header-ind':
line = self.__get_head_from_temp(line[17:-1])
self.__write_obj.write(line)
def join_headers(self):
"""
Join the footnotes from the bottom of the file and put them in their
former places. First, remove the footnotes from the bottom of the
input file, outputting them to a temporary file. This creates two new
files, one without footnotes, and one of just footnotes. Open both
these files to read. When a marker is found in the main file, find the
corresponding marker in the footnote file. Output the mix of body and
footnotes to a third file.
"""
if not self.__found_a_header:
return
self.__write_to2 = better_mktemp()
self.__state = 'body'
self.__get_headers()
self.__join_from_temp()
self.__write_obj.close()
self.__read_from_head_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "header_join.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
os.remove(self.__header_holder)
| 10,613 | Python | .py | 243 | 32.720165 | 86 | 0.515989 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,285 | check_brackets.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/check_brackets.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
from . import open_for_read
class CheckBrackets:
"""Check that brackets match up"""
def __init__(self, bug_handler=None, file=None):
self.__file=file
self.__bug_handler = bug_handler
self.__bracket_count=0
self.__ob_count = 0
self.__cb_count = 0
self.__open_bracket_num = []
def open_brack(self, line):
num = line[-5:-1]
self.__open_bracket_num.append(num)
self.__bracket_count += 1
def close_brack(self, line):
num = line[-5:-1]
try:
last_num = self.__open_bracket_num.pop()
except:
return False
if num != last_num:
return False
self.__bracket_count -= 1
return True
def check_brackets(self):
line_count = 0
with open_for_read(self.__file) as read_obj:
for line in read_obj:
line_count += 1
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.open_brack(line)
if self.__token_info == 'cb<nu<clos-brack':
if not self.close_brack(line):
return (False, "closed bracket doesn't match, line %s" % line_count)
if self.__bracket_count != 0:
msg = ('At end of file open and closed brackets don\'t match\n'
'total number of brackets is %s') % self.__bracket_count
return (False, msg)
return (True, "Brackets match!")
| 2,429 | Python | .py | 52 | 37.923077 | 92 | 0.424831 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,286 | output.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/output.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from . import open_for_read, open_for_write
# , codecs
class Output:
"""
Output file
"""
def __init__(self,
file,
orig_file,
output_dir=None,
out_file=None,
no_ask=True
):
"""
Required:
'file' -- xml file ready to output
orig_file -- original rtf file
Optional:
output_file -- the file to output to
Returns:
nothing
"""
self.__file = file
self.__orig_file = orig_file
self.__output_dir = output_dir
self.__no_ask = no_ask
self.__out_file = out_file
def output(self):
"""
Required:
nothing
Returns:
nothing
Logic:
output the line to the screen if no output file given. Otherwise, output to
the file.
"""
if self.__output_dir:
self.__output_to_dir_func()
elif self.__out_file:
self.__output_to_file_func()
# self.__output_xml(self.__file, self.__out_file)
else:
self.__output_to_standard_func()
def __output_to_dir_func(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Create a file within the output directory.
Read one file at a time. Output line to the newly-created file.
"""
base_name = os.path.basename(self.__orig_file)
base_name, ext = os.path.splitext(base_name)
output_file = os.path.join(self.__output_dir, '%s.xml' % base_name)
# change if user wants to output to a specific file
if self.__out_file:
output_file = os.path.join(self.__output_dir, self.__out_file)
user_response = 'o'
if os.path.isfile(output_file) and not self.__no_ask:
msg = 'Do you want to overwrite %s?\n' % output_file
msg += ('Type "o" to overwrite.\n'
'Type any other key to print to standard output.\n')
sys.stderr.write(msg)
user_response = input()
if user_response == 'o':
with open_for_read(self.__file) as read_obj:
with open_for_write(self.output_file) as write_obj:
for line in read_obj:
write_obj.write(line)
else:
self.__output_to_standard_func()
def __output_to_file_func(self):
"""
Required:
nothing
Returns:
nothing
Logic:
read one line at a time. Output to standard
"""
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__out_file) as write_obj:
for line in read_obj:
write_obj.write(line)
def __output_to_standard_func(self):
"""
Required:
nothing
Returns:
nothing
Logic:
read one line at a time. Output to standard
"""
with open_for_read(self.__file) as read_obj:
for line in read_obj:
sys.stdout.write(line)
| 4,114 | Python | .py | 113 | 26.79646 | 87 | 0.455411 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,287 | process_tokens.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/process_tokens.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import re
from calibre.ebooks.rtf2xml import check_brackets, copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class ProcessTokens:
"""
Process each token on a line and add information that will be useful for
later processing. Information will be put on one line, delimited by "<"
for main fields, and ">" for sub fields
"""
def __init__(self,
in_file,
exception_handler,
bug_handler,
copy=None,
run_level=1,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
self.initiate_token_dict()
# self.initiate_token_actions()
self.compile_expressions()
self.__bracket_count=0
self.__exception_handler = exception_handler
self.__bug_handler = bug_handler
def compile_expressions(self):
self.__num_exp = re.compile(r"([a-zA-Z]+)(.*)")
self.__utf_exp = re.compile(r'(&.*?;)')
def initiate_token_dict(self):
self.__return_code = 0
self.dict_token={
# unicode
'mshex' : ('nu', '__________', self.__ms_hex_func),
# brackets
'{' : ('nu', '{', self.ob_func),
'}' : ('nu', '}', self.cb_func),
# microsoft characters
'ldblquote' : ('mc', 'ldblquote', self.ms_sub_func),
'rdblquote' : ('mc', 'rdblquote', self.ms_sub_func),
'rquote' : ('mc', 'rquote', self.ms_sub_func),
'lquote' : ('mc', 'lquote', self.ms_sub_func),
'emdash' : ('mc', 'emdash', self.ms_sub_func),
'endash' : ('mc', 'endash', self.ms_sub_func),
'bullet' : ('mc', 'bullet', self.ms_sub_func),
'~' : ('mc', '~', self.ms_sub_func),
'tab' : ('mc', 'tab', self.ms_sub_func),
'_' : ('mc', '_', self.ms_sub_func),
';' : ('mc', ';', self.ms_sub_func),
# this must be wrong
'-' : ('mc', '-', self.ms_sub_func),
'line' : ('mi', 'hardline-break', self.direct_conv_func), # calibre
# misc => ml
'*' : ('ml', 'asterisk__', self.default_func),
':' : ('ml', 'colon_____', self.default_func),
# text
'backslash' : ('nu', '\\', self.text_func),
'ob' : ('nu', '{', self.text_func),
'cb' : ('nu', '}', self.text_func),
# paragraph formatting => pf
'page' : ('pf', 'page-break', self.default_func),
'par' : ('pf', 'par-end___', self.default_func),
'pard' : ('pf', 'par-def___', self.default_func),
'keepn' : ('pf', 'keep-w-nex', self.bool_st_func),
'widctlpar' : ('pf', 'widow-cntl', self.bool_st_func),
'adjustright' : ('pf', 'adjust-rgt', self.bool_st_func),
'lang' : ('pf', 'language__', self.__language_func),
'ri' : ('pf', 'right-inde', self.divide_by_20),
'fi' : ('pf', 'fir-ln-ind', self.divide_by_20),
'li' : ('pf', 'left-inden', self.divide_by_20),
'sb' : ('pf', 'space-befo', self.divide_by_20),
'sa' : ('pf', 'space-afte', self.divide_by_20),
'sl' : ('pf', 'line-space', self.divide_by_20),
'deftab' : ('pf', 'default-ta', self.divide_by_20),
'ql' : ('pf', 'align_____<left', self.two_part_func),
'qc' : ('pf', 'align_____<cent', self.two_part_func),
'qj' : ('pf', 'align_____<just', self.two_part_func),
'qr' : ('pf', 'align_____<right', self.two_part_func),
'nowidctlpar' : ('pf', 'widow-cntr<false', self.two_part_func),
'tx' : ('pf', 'tab-stop__', self.divide_by_20),
'tb' : ('pf', 'tab-bar-st', self.divide_by_20),
'tqr' : ('pf', 'tab-right_', self.default_func),
'tqdec' : ('pf', 'tab-dec___', self.default_func),
'tqc' : ('pf', 'tab-center', self.default_func),
'tlul' : ('pf', 'leader-und', self.default_func),
'tlhyph' : ('pf', 'leader-hyp', self.default_func),
'tldot' : ('pf', 'leader-dot', self.default_func),
# stylesheet = > ss
'stylesheet' : ('ss', 'style-shet', self.default_func),
'sbasedon' : ('ss', 'based-on__', self.default_func),
'snext' : ('ss', 'next-style', self.default_func),
'cs' : ('ss', 'char-style', self.default_func),
's' : ('ss', 'para-style', self.default_func),
# graphics => gr
'pict' : ('gr', 'picture___', self.default_func),
'objclass' : ('gr', 'obj-class_', self.default_func),
'macpict' : ('gr', 'mac-pic___', self.default_func),
# section => sc
'sect' : ('sc', 'section___', self.default_func),
'sectd' : ('sc', 'sect-defin', self.default_func),
'endhere' : ('sc', 'sect-note_', self.default_func),
# list=> ls
'pntext' : ('ls', 'list-text_', self.default_func),
# this line must be wrong because it duplicates an earlier one
'listtext' : ('ls', 'list-text_', self.default_func),
'pn' : ('ls', 'list______', self.default_func),
'pnseclvl' : ('ls', 'list-level', self.default_func),
'pncard' : ('ls', 'list-cardi', self.bool_st_func),
'pndec' : ('ls', 'list-decim', self.bool_st_func),
'pnucltr' : ('ls', 'list-up-al', self.bool_st_func),
'pnucrm' : ('ls', 'list-up-ro', self.bool_st_func),
'pnord' : ('ls', 'list-ord__', self.bool_st_func),
'pnordt' : ('ls', 'list-ordte', self.bool_st_func),
'pnlvlblt' : ('ls', 'list-bulli', self.bool_st_func),
'pnlvlbody' : ('ls', 'list-simpi', self.bool_st_func),
'pnlvlcont' : ('ls', 'list-conti', self.bool_st_func),
'pnhang' : ('ls', 'list-hang_', self.bool_st_func),
'pntxtb' : ('ls', 'list-tebef', self.bool_st_func),
'ilvl' : ('ls', 'list-level', self.default_func),
'ls' : ('ls', 'list-id___', self.default_func),
'pnstart' : ('ls', 'list-start', self.default_func),
'itap' : ('ls', 'nest-level', self.default_func),
'leveltext' : ('ls', 'level-text', self.default_func),
'levelnumbers' : ('ls', 'level-numb', self.default_func),
'list' : ('ls', 'list-in-tb', self.default_func),
'listlevel' : ('ls', 'list-tb-le', self.default_func),
'listname' : ('ls', 'list-name_', self.default_func),
'listtemplateid' : ('ls', 'ls-tem-id_', self.default_func),
'leveltemplateid' : ('ls', 'lv-tem-id_', self.default_func),
'listhybrid' : ('ls', 'list-hybri', self.default_func),
'levelstartat' : ('ls', 'level-star', self.default_func),
'levelspace' : ('ls', 'level-spac', self.divide_by_20),
'levelindent' : ('ls', 'level-inde', self.default_func),
'levelnfc' : ('ls', 'level-type', self.__list_type_func),
'levelnfcn' : ('ls', 'level-type', self.__list_type_func),
'listid' : ('ls', 'lis-tbl-id', self.default_func),
'listoverride' : ('ls', 'lis-overid', self.default_func),
# duplicate
'pnlvl' : ('ls', 'list-level', self.default_func),
# root info => ri
'rtf' : ('ri', 'rtf_______', self.default_func),
'deff' : ('ri', 'deflt-font', self.default_func),
'mac' : ('ri', 'macintosh_', self.default_func),
'pc' : ('ri', 'pc________', self.default_func),
'pca' : ('ri', 'pca_______', self.default_func),
'ansi' : ('ri', 'ansi______', self.default_func),
'ansicpg' : ('ri', 'ansi-codpg', self.default_func),
# notes => nt
'footnote' : ('nt', 'footnote__', self.default_func),
'ftnalt' : ('nt', 'type______<endnote', self.two_part_func),
# anchor => an
'tc' : ('an', 'toc_______', self.default_func),
'bkmkstt' : ('an', 'book-mk-st', self.default_func),
'bkmkstart' : ('an', 'book-mk-st', self.default_func),
'bkmkend' : ('an', 'book-mk-en', self.default_func),
'xe' : ('an', 'index-mark', self.default_func),
'rxe' : ('an', 'place_____', self.default_func),
# index => in
'bxe' : ('in', 'index-bold', self.default_func),
'ixe' : ('in', 'index-ital', self.default_func),
'txe' : ('in', 'index-see_', self.default_func),
# table of contents => tc
'tcl' : ('tc', 'toc-level_', self.default_func),
'tcn' : ('tc', 'toc-sup-nu', self.default_func),
# field => fd
'field' : ('fd', 'field_____', self.default_func),
'fldinst' : ('fd', 'field-inst', self.default_func),
'fldrslt' : ('fd', 'field-rslt', self.default_func),
'datafield' : ('fd', 'datafield_', self.default_func),
# info-tables => it
'fonttbl' : ('it', 'font-table', self.default_func),
'colortbl' : ('it', 'colr-table', self.default_func),
'listoverridetable' : ('it', 'lovr-table', self.default_func),
'listtable' : ('it', 'listtable_', self.default_func),
'revtbl' : ('it', 'revi-table', self.default_func),
# character info => ci
'b' : ('ci', 'bold______', self.bool_st_func),
'blue' : ('ci', 'blue______', self.color_func),
'caps' : ('ci', 'caps______', self.bool_st_func),
'cf' : ('ci', 'font-color', self.colorz_func),
'chftn' : ('ci', 'footnot-mk', self.bool_st_func),
'dn' : ('ci', 'font-down_', self.divide_by_2),
'embo' : ('ci', 'emboss____', self.bool_st_func),
'f' : ('ci', 'font-style', self.default_func),
'fs' : ('ci', 'font-size_', self.divide_by_2),
'green' : ('ci', 'green_____', self.color_func),
'i' : ('ci', 'italics___', self.bool_st_func),
'impr' : ('ci', 'engrave___', self.bool_st_func),
'outl' : ('ci', 'outline___', self.bool_st_func),
'plain' : ('ci', 'plain_____', self.bool_st_func),
'red' : ('ci', 'red_______', self.color_func),
'scaps' : ('ci', 'small-caps', self.bool_st_func),
'shad' : ('ci', 'shadow____', self.bool_st_func),
'strike' : ('ci', 'strike-thr', self.bool_st_func),
'striked' : ('ci', 'dbl-strike', self.bool_st_func),
'sub' : ('ci', 'subscript_', self.bool_st_func),
'super' : ('ci', 'superscrip', self.bool_st_func),
'nosupersub' : ('ci', 'no-su-supe', self.__no_sup_sub_func),
'up' : ('ci', 'font-up___', self.divide_by_2),
'v' : ('ci', 'hidden____', self.default_func),
# underline
# can't see why it isn't a char info: 'ul'=>'ci'
'ul' : ('ci', 'underlined<continous', self.two_part_func),
'uld' : ('ci', 'underlined<dotted', self.two_part_func),
'uldash' : ('ci', 'underlined<dash', self.two_part_func),
'uldashd' : ('ci', 'underlined<dash-dot', self.two_part_func),
'uldashdd' : ('ci', 'underlined<dash-dot-dot', self.two_part_func),
'uldb' : ('ci', 'underlined<double', self.two_part_func),
'ulhwave' : ('ci', 'underlined<heavy-wave', self.two_part_func),
'ulldash' : ('ci', 'underlined<long-dash', self.two_part_func),
'ulth' : ('ci', 'underlined<thich', self.two_part_func),
'ulthd' : ('ci', 'underlined<thick-dotted', self.two_part_func),
'ulthdash' : ('ci', 'underlined<thick-dash', self.two_part_func),
'ulthdashd' : ('ci', 'underlined<thick-dash-dot', self.two_part_func),
'ulthdashdd' : ('ci', 'underlined<thick-dash-dot-dot', self.two_part_func),
'ulthldash' : ('ci', 'underlined<thick-long-dash', self.two_part_func),
'ululdbwave' : ('ci', 'underlined<double-wave', self.two_part_func),
'ulw' : ('ci', 'underlined<word', self.two_part_func),
'ulwave' : ('ci', 'underlined<wave', self.two_part_func),
'ulnone' : ('ci', 'underlined<false', self.two_part_func),
# table => tb
'trowd' : ('tb', 'row-def___', self.default_func),
'cell' : ('tb', 'cell______', self.default_func),
'row' : ('tb', 'row_______', self.default_func),
'intbl' : ('tb', 'in-table__', self.default_func),
'cols' : ('tb', 'columns___', self.default_func),
'trleft' : ('tb', 'row-pos-le', self.divide_by_20),
'cellx' : ('tb', 'cell-posit', self.divide_by_20),
'trhdr' : ('tb', 'row-header', self.default_func),
# preamble => pr
# document information => di
# TODO integrate \userprops
'info' : ('di', 'doc-info__', self.default_func),
'title' : ('di', 'title_____', self.default_func),
'author' : ('di', 'author____', self.default_func),
'operator' : ('di', 'operator__', self.default_func),
'manager' : ('di', 'manager___', self.default_func),
'company' : ('di', 'company___', self.default_func),
'keywords' : ('di', 'keywords__', self.default_func),
'category' : ('di', 'category__', self.default_func),
'doccomm' : ('di', 'doc-notes_', self.default_func),
'comment' : ('di', 'doc-notes_', self.default_func),
'subject' : ('di', 'subject___', self.default_func),
'creatim' : ('di', 'create-tim', self.default_func),
'yr' : ('di', 'year______', self.default_func),
'mo' : ('di', 'month_____', self.default_func),
'dy' : ('di', 'day_______', self.default_func),
'min' : ('di', 'minute____', self.default_func),
'sec' : ('di', 'second____', self.default_func),
'revtim' : ('di', 'revis-time', self.default_func),
'edmins' : ('di', 'edit-time_', self.default_func),
'printim' : ('di', 'print-time', self.default_func),
'buptim' : ('di', 'backuptime', self.default_func),
'nofwords' : ('di', 'num-of-wor', self.default_func),
'nofchars' : ('di', 'num-of-chr', self.default_func),
'nofcharsws' : ('di', 'numofchrws', self.default_func),
'nofpages' : ('di', 'num-of-pag', self.default_func),
'version' : ('di', 'version___', self.default_func),
'vern' : ('di', 'intern-ver', self.default_func),
'hlinkbase' : ('di', 'linkbase__', self.default_func),
'id' : ('di', 'internalID', self.default_func),
# headers and footers => hf
'headerf' : ('hf', 'head-first', self.default_func),
'headerl' : ('hf', 'head-left_', self.default_func),
'headerr' : ('hf', 'head-right', self.default_func),
'footerf' : ('hf', 'foot-first', self.default_func),
'footerl' : ('hf', 'foot-left_', self.default_func),
'footerr' : ('hf', 'foot-right', self.default_func),
'header' : ('hf', 'header____', self.default_func),
'footer' : ('hf', 'footer____', self.default_func),
# page => pa
'margl' : ('pa', 'margin-lef', self.divide_by_20),
'margr' : ('pa', 'margin-rig', self.divide_by_20),
'margb' : ('pa', 'margin-bot', self.divide_by_20),
'margt' : ('pa', 'margin-top', self.divide_by_20),
'gutter' : ('pa', 'gutter____', self.divide_by_20),
'paperw' : ('pa', 'paper-widt', self.divide_by_20),
'paperh' : ('pa', 'paper-hght', self.divide_by_20),
# annotation => an
'annotation' : ('an', 'annotation', self.default_func),
# border => bd
'trbrdrh' : ('bd', 'bor-t-r-hi', self.default_func),
'trbrdrv' : ('bd', 'bor-t-r-vi', self.default_func),
'trbrdrt' : ('bd', 'bor-t-r-to', self.default_func),
'trbrdrl' : ('bd', 'bor-t-r-le', self.default_func),
'trbrdrb' : ('bd', 'bor-t-r-bo', self.default_func),
'trbrdrr' : ('bd', 'bor-t-r-ri', self.default_func),
'clbrdrb' : ('bd', 'bor-cel-bo', self.default_func),
'clbrdrt' : ('bd', 'bor-cel-to', self.default_func),
'clbrdrl' : ('bd', 'bor-cel-le', self.default_func),
'clbrdrr' : ('bd', 'bor-cel-ri', self.default_func),
'brdrb' : ('bd', 'bor-par-bo', self.default_func),
'brdrt' : ('bd', 'bor-par-to', self.default_func),
'brdrl' : ('bd', 'bor-par-le', self.default_func),
'brdrr' : ('bd', 'bor-par-ri', self.default_func),
'box' : ('bd', 'bor-par-bx', self.default_func),
'chbrdr' : ('bd', 'bor-par-bo', self.default_func),
'brdrbtw' : ('bd', 'bor-for-ev', self.default_func),
'brdrbar' : ('bd', 'bor-outsid', self.default_func),
'brdrnone' : ('bd', 'bor-none__<false', self.two_part_func),
# border type => bt
'brdrs' : ('bt', 'bdr-single', self.default_func),
'brdrth' : ('bt', 'bdr-doubtb', self.default_func),
'brdrsh' : ('bt', 'bdr-shadow', self.default_func),
'brdrdb' : ('bt', 'bdr-double', self.default_func),
'brdrdot' : ('bt', 'bdr-dotted', self.default_func),
'brdrdash' : ('bt', 'bdr-dashed', self.default_func),
'brdrhair' : ('bt', 'bdr-hair__', self.default_func),
'brdrinset' : ('bt', 'bdr-inset_', self.default_func),
'brdrdashsm' : ('bt', 'bdr-das-sm', self.default_func),
'brdrdashd' : ('bt', 'bdr-dot-sm', self.default_func),
'brdrdashdd' : ('bt', 'bdr-dot-do', self.default_func),
'brdroutset' : ('bt', 'bdr-outset', self.default_func),
'brdrtriple' : ('bt', 'bdr-trippl', self.default_func),
'brdrtnthsg' : ('bt', 'bdr-thsm__', self.default_func),
'brdrthtnsg' : ('bt', 'bdr-htsm__', self.default_func),
'brdrtnthtnsg' : ('bt', 'bdr-hthsm_', self.default_func),
'brdrtnthmg' : ('bt', 'bdr-thm___', self.default_func),
'brdrthtnmg' : ('bt', 'bdr-htm___', self.default_func),
'brdrtnthtnmg' : ('bt', 'bdr-hthm__', self.default_func),
'brdrtnthlg' : ('bt', 'bdr-thl___', self.default_func),
'brdrtnthtnlg' : ('bt', 'bdr-hthl__', self.default_func),
'brdrwavy' : ('bt', 'bdr-wavy__', self.default_func),
'brdrwavydb' : ('bt', 'bdr-d-wav_', self.default_func),
'brdrdashdotstr' : ('bt', 'bdr-strip_', self.default_func),
'brdremboss' : ('bt', 'bdr-embos_', self.default_func),
'brdrengrave' : ('bt', 'bdr-engra_', self.default_func),
'brdrframe' : ('bt', 'bdr-frame_', self.default_func),
'brdrw' : ('bt', 'bdr-li-wid', self.divide_by_20),
'brsp' : ('bt', 'bdr-sp-wid', self.divide_by_20),
'brdrcf' : ('bt', 'bdr-color_', self.default_func),
# comments
# 'comment' : ('cm', 'comment___', self.default_func),
}
self.__number_type_dict = {
0: 'Arabic',
1: 'uppercase Roman numeral',
2: 'lowercase Roman numeral',
3: 'uppercase letter',
4: 'lowercase letter',
5: 'ordinal number',
6: 'cardianl text number',
7: 'ordinal text number',
10: 'Kanji numbering without the digit character',
11: 'Kanji numbering with the digit character',
1246: 'phonetic Katakana characters in aiueo order',
1346: 'phonetic katakana characters in iroha order',
14: 'double byte character',
15: 'single byte character',
16: 'Kanji numbering 3',
17: 'Kanji numbering 4',
18: 'Circle numbering' ,
19: 'double-byte Arabic numbering',
2046: 'phonetic double-byte Katakana characters',
2146: 'phonetic double-byte katakana characters',
22: 'Arabic with leading zero',
23: 'bullet',
24: 'Korean numbering 2',
25: 'Korean numbering 1',
26: 'Chinese numbering 1',
27: 'Chinese numbering 2',
28: 'Chinese numbering 3',
29: 'Chinese numbering 4',
30: 'Chinese Zodiac numbering 1',
31: 'Chinese Zodiac numbering 2',
32: 'Chinese Zodiac numbering 3',
33: 'Taiwanese double-byte numbering 1',
34: 'Taiwanese double-byte numbering 2',
35: 'Taiwanese double-byte numbering 3',
36: 'Taiwanese double-byte numbering 4',
37: 'Chinese double-byte numbering 1',
38: 'Chinese double-byte numbering 2',
39: 'Chinese double-byte numbering 3',
40: 'Chinese double-byte numbering 4',
41: 'Korean double-byte numbering 1',
42: 'Korean double-byte numbering 2',
43: 'Korean double-byte numbering 3',
44: 'Korean double-byte numbering 4',
45: 'Hebrew non-standard decimal',
46: 'Arabic Alif Ba Tah',
47: 'Hebrew Biblical standard',
48: 'Arabic Abjad style',
255: 'No number',
}
self.__language_dict = {
1078 : 'Afrikaans',
1052 : 'Albanian',
1025 : 'Arabic',
5121 : 'Arabic Algeria',
15361 : 'Arabic Bahrain',
3073 : 'Arabic Egypt',
1 : 'Arabic General',
2049 : 'Arabic Iraq',
11265 : 'Arabic Jordan',
13313 : 'Arabic Kuwait',
12289 : 'Arabic Lebanon',
4097 : 'Arabic Libya',
6145 : 'Arabic Morocco',
8193 : 'Arabic Oman',
16385 : 'Arabic Qatar',
10241 : 'Arabic Syria',
7169 : 'Arabic Tunisia',
14337 : 'Arabic U.A.E.',
9217 : 'Arabic Yemen',
1067 : 'Armenian',
1101 : 'Assamese',
2092 : 'Azeri Cyrillic',
1068 : 'Azeri Latin',
1069 : 'Basque',
1093 : 'Bengali',
4122 : 'Bosnia Herzegovina',
1026 : 'Bulgarian',
1109 : 'Burmese',
1059 : 'Byelorussian',
1027 : 'Catalan',
2052 : 'Chinese China',
4 : 'Chinese General',
3076 : 'Chinese Hong Kong',
4100 : 'Chinese Singapore',
1028 : 'Chinese Taiwan',
1050 : 'Croatian',
1029 : 'Czech',
1030 : 'Danish',
2067 : 'Dutch Belgium',
1043 : 'Dutch Standard',
3081 : 'English Australia',
10249 : 'English Belize',
2057 : 'English British',
4105 : 'English Canada',
9225 : 'English Caribbean',
9 : 'English General',
6153 : 'English Ireland',
8201 : 'English Jamaica',
5129 : 'English New Zealand',
13321 : 'English Philippines',
7177 : 'English South Africa',
11273 : 'English Trinidad',
1033 : 'English United States',
1061 : 'Estonian',
1080 : 'Faerose',
1065 : 'Farsi',
1035 : 'Finnish',
1036 : 'French',
2060 : 'French Belgium',
11276 : 'French Cameroon',
3084 : 'French Canada',
12300 : 'French Cote d\'Ivoire',
5132 : 'French Luxembourg',
13324 : 'French Mali',
6156 : 'French Monaco',
8204 : 'French Reunion',
10252 : 'French Senegal',
4108 : 'French Swiss',
7180 : 'French West Indies',
9228 : 'French Democratic Republic of the Congo',
1122 : 'Frisian',
1084 : 'Gaelic',
2108 : 'Gaelic Ireland',
1110 : 'Galician',
1079 : 'Georgian',
1031 : 'German',
3079 : 'German Austrian',
5127 : 'German Liechtenstein',
4103 : 'German Luxembourg',
2055 : 'German Switzerland',
1032 : 'Greek',
1095 : 'Gujarati',
1037 : 'Hebrew',
1081 : 'Hindi',
1038 : 'Hungarian',
1039 : 'Icelandic',
1057 : 'Indonesian',
1040 : 'Italian',
2064 : 'Italian Switzerland',
1041 : 'Japanese',
1099 : 'Kannada',
1120 : 'Kashmiri',
2144 : 'Kashmiri India',
1087 : 'Kazakh',
1107 : 'Khmer',
1088 : 'Kirghiz',
1111 : 'Konkani',
1042 : 'Korean',
2066 : 'Korean Johab',
1108 : 'Lao',
1062 : 'Latvian',
1063 : 'Lithuanian',
2087 : 'Lithuanian Classic',
1086 : 'Malay',
2110 : 'Malay Brunei Darussalam',
1100 : 'Malayalam',
1082 : 'Maltese',
1112 : 'Manipuri',
1102 : 'Marathi',
1104 : 'Mongolian',
1121 : 'Nepali',
2145 : 'Nepali India',
1044 : 'Norwegian Bokmal',
2068 : 'Norwegian Nynorsk',
1096 : 'Oriya',
1045 : 'Polish',
1046 : 'Portuguese (Brazil)',
2070 : 'Portuguese (Portugal)',
1094 : 'Punjabi',
1047 : 'Rhaeto-Romanic',
1048 : 'Romanian',
2072 : 'Romanian Moldova',
1049 : 'Russian',
2073 : 'Russian Moldova',
1083 : 'Sami Lappish',
1103 : 'Sanskrit',
3098 : 'Serbian Cyrillic',
2074 : 'Serbian Latin',
1113 : 'Sindhi',
1051 : 'Slovak',
1060 : 'Slovenian',
1070 : 'Sorbian',
11274 : 'Spanish Argentina',
16394 : 'Spanish Bolivia',
13322 : 'Spanish Chile',
9226 : 'Spanish Colombia',
5130 : 'Spanish Costa Rica',
7178 : 'Spanish Dominican Republic',
12298 : 'Spanish Ecuador',
17418 : 'Spanish El Salvador',
4106 : 'Spanish Guatemala',
18442 : 'Spanish Honduras',
2058 : 'Spanish Mexico',
3082 : 'Spanish Modern',
19466 : 'Spanish Nicaragua',
6154 : 'Spanish Panama',
15370 : 'Spanish Paraguay',
10250 : 'Spanish Peru',
20490 : 'Spanish Puerto Rico',
1034 : 'Spanish Traditional',
14346 : 'Spanish Uruguay',
8202 : 'Spanish Venezuela',
1072 : 'Sutu',
1089 : 'Swahili',
1053 : 'Swedish',
2077 : 'Swedish Finland',
1064 : 'Tajik',
1097 : 'Tamil',
1092 : 'Tatar',
1098 : 'Telugu',
1054 : 'Thai',
1105 : 'Tibetan',
1073 : 'Tsonga',
1074 : 'Tswana',
1055 : 'Turkish',
1090 : 'Turkmen',
1058 : 'Ukranian',
1056 : 'Urdu',
2080 : 'Urdu India',
2115 : 'Uzbek Cyrillic',
1091 : 'Uzbek Latin',
1075 : 'Venda',
1066 : 'Vietnamese',
1106 : 'Welsh',
1076 : 'Xhosa',
1085 : 'Yiddish',
1077 : 'Zulu',
1024 : 'Unkown',
255 : 'Unkown',
}
"""
# unknown
# These must get passed on because they occurred after \\*
'do' : ('un', 'unknown___', self.default_func),
'company' : ('un', 'company___', self.default_func),
'shpinst' : ('un', 'unknown___', self.default_func),
'panose' : ('un', 'unknown___', self.default_func),
'falt' : ('un', 'unknown___', self.default_func),
'listoverridetable' : ('un', 'unknown___', self.default_func),
'category' : ('un', 'unknown___', self.default_func),
'template' : ('un', 'unknown___', self.default_func),
'ud' : ('un', 'unknown___', self.default_func),
'formfield' : ('un', 'unknown___', self.default_func),
'ts' : ('un', 'unknown___', self.default_func),
'rsidtbl' : ('un', 'unknown___', self.default_func),
'generator' : ('un', 'unknown___', self.default_func),
'ftnsep' : ('un', 'unknown___', self.default_func),
'aftnsep' : ('un', 'unknown___', self.default_func),
'aftnsepc' : ('un', 'unknown___', self.default_func),
'aftncn' : ('un', 'unknown___', self.default_func),
'objclass' : ('un', 'unknown___', self.default_func),
'objdata' : ('un', 'unknown___', self.default_func),
'picprop' : ('un', 'unknown___', self.default_func),
'blipuid' : ('un', 'unknown___', self.default_func),
"""
def __ms_hex_func(self, pre, token, num):
num = num[1:] # chop off leading 0, which I added
num = num.upper() # the mappings store hex in caps
return 'tx<hx<__________<\'%s\n' % num # add an ' for the mappings
def ms_sub_func(self, pre, token, num):
return 'tx<mc<__________<%s\n' % token
def direct_conv_func(self, pre, token, num):
return 'mi<tg<empty_____<%s\n' % token
def default_func(self, pre, token, num):
if num is None:
num = 'true'
return f'cw<{pre}<{token}<nu<{num}\n'
def colorz_func(self, pre, token, num):
if num is None:
num = '0'
return f'cw<{pre}<{token}<nu<{num}\n'
def __list_type_func(self, pre, token, num):
type = 'arabic'
if num is None:
type = 'Arabic'
else:
try:
num = int(num)
except ValueError:
if self.__run_level > 3:
msg = 'Number "%s" cannot be converted to integer\n' % num
raise self.__bug_handler(msg)
type = self.__number_type_dict.get(num)
if type is None:
if self.__run_level > 3:
msg = 'No type for "%s" in self.__number_type_dict\n'
raise self.__bug_handler
type = 'Arabic'
return f'cw<{pre}<{token}<nu<{type}\n'
def __language_func(self, pre, token, num):
lang_name = self.__language_dict.get(int(re.search('[0-9]+', num).group()))
if not lang_name:
lang_name = "not defined"
if self.__run_level > 3:
msg = 'No entry for number "%s"' % num
raise self.__bug_handler(msg)
return f'cw<{pre}<{token}<nu<{lang_name}\n'
def two_part_func(self, pre, token, num):
list = token.split("<")
token = list[0]
num = list[1]
return f'cw<{pre}<{token}<nu<{num}\n'
# return 'cw<nu<nu<nu<%s>num<%s\n' % (token, num)
def divide_by_2(self, pre, token, num):
num = self.divide_num(num, 2)
return f'cw<{pre}<{token}<nu<{num}\n'
# return 'cw<nu<nu<nu<%s>%s<%s\n' % (token, num, token)
def divide_by_20(self, pre, token, num):
num = self.divide_num(num, 20)
return f'cw<{pre}<{token}<nu<{num}\n'
# return 'cw<nu<nu<nu<%s>%s<%s\n' % (token, num, token)
def text_func(self, pre, token, num=None):
return 'tx<nu<__________<%s\n' % token
def ob_func(self, pre, token, num=None):
self.__bracket_count += 1
return 'ob<nu<open-brack<%04d\n' % self.__bracket_count
def cb_func(self, pre, token, num=None):
line = 'cb<nu<clos-brack<%04d\n' % self.__bracket_count
self.__bracket_count -= 1
return line
def color_func(self, pre, token, num):
third_field = 'nu'
if num[-1] == ';':
num = num[:-1]
third_field = 'en'
num = '%X' % int(num)
if len(num) != 2:
num = "0" + num
return f'cw<{pre}<{token}<{third_field}<{num}\n'
# return 'cw<cl<%s<nu<nu<%s>%s<%s\n' % (third_field, token, num, token)
def bool_st_func(self, pre, token, num):
if num is None or num == '' or num == '1':
return f'cw<{pre}<{token}<nu<true\n'
# return 'cw<nu<nu<nu<%s>true<%s\n' % (token, token)
elif num == '0':
return f'cw<{pre}<{token}<nu<false\n'
# return 'cw<nu<nu<nu<%s>false<%s\n' % (token, token)
else:
msg = f"boolean should have some value module process tokens\ntoken is {token}\n'{num}'\n"
raise self.__bug_handler(msg)
def __no_sup_sub_func(self, pre, token, num):
the_string = 'cw<ci<subscript_<nu<false\n'
the_string += 'cw<ci<superscrip<nu<false\n'
return the_string
def divide_num(self, numerator, denominator):
try:
# calibre why ignore negative number? Wrong in case of \fi
numerator = float(re.search('[0-9.\\-]+', numerator).group())
except TypeError:
if self.__run_level > 3:
msg = ('No number to process?\nthis indicates that the token \\(\\li\\) \
should have a number and does not\nnumerator is \
"%s"\ndenominator is "%s"\n') % (numerator, denominator)
raise self.__bug_handler(msg)
if 5 > self.__return_code:
self.__return_code = 5
return 0
num = '%0.2f' % round(numerator/denominator, 2)
return num
string_num = str(num)
if string_num[-2:] == ".0":
string_num = string_num[:-2]
return string_num
def split_let_num(self, token):
match_obj = re.search(self.__num_exp,token)
if match_obj is not None:
first = match_obj.group(1)
second = match_obj.group(2)
if not second:
if self.__run_level > 3:
msg = "token is '%s' \n" % token
raise self.__bug_handler(msg)
return first, 0
else:
if self.__run_level > 3:
msg = "token is '%s' \n" % token
raise self.__bug_handler
return token, 0
return first, second
def convert_to_hex(self,number):
"""Convert a string to uppercase hexadecimal"""
num = int(number)
try:
hex_num = "%X" % num
return hex_num
except:
raise self.__bug_handler
def process_cw(self, token):
"""Change the value of the control word by determining what dictionary
it belongs to"""
special = ['*', ':', '}', '{', '~', '_', '-', ';']
# if token != "{" or token != "}":
token = token[1:] # strip off leading \
token = token.replace(" ", "")
# if not token: return
only_alpha = token.isalpha()
num = None
if not only_alpha and token not in special:
token, num = self.split_let_num(token)
pre, token, action = self.dict_token.get(token, (None, None, None))
if action:
return action(pre, token, num)
def __check_brackets(self, in_file):
self.__check_brack_obj = check_brackets.CheckBrackets(file=in_file)
good_br = self.__check_brack_obj.check_brackets()[0]
if not good_br:
return 1
def process_tokens(self):
"""Main method for handling other methods. """
line_count = 0
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as write_obj:
for line in read_obj:
token = line.replace("\n", "")
line_count += 1
if line_count == 1 and token != '\\{':
msg = '\nInvalid RTF: document doesn\'t start with {\n'
raise self.__exception_handler(msg)
elif line_count == 2 and token[0:4] != '\\rtf':
msg = '\nInvalid RTF: document doesn\'t start with \\rtf \n'
raise self.__exception_handler(msg)
the_index = token.find('\\ ')
if token is not None and the_index > -1:
msg = '\nInvalid RTF: token "\\ " not valid.\nError at line %d'\
% line_count
raise self.__exception_handler(msg)
elif token[:1] == "\\":
line = self.process_cw(token)
if line is not None:
write_obj.write(line)
else:
fields = re.split(self.__utf_exp, token)
for field in fields:
if not field:
continue
if field[0:1] == '&':
write_obj.write('tx<ut<__________<%s\n' % field)
else:
write_obj.write('tx<nu<__________<%s\n' % field)
if not line_count:
msg = '\nInvalid RTF: file appears to be empty.\n'
raise self.__exception_handler(msg)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "processed_tokens.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
bad_brackets = self.__check_brackets(self.__file)
if bad_brackets:
msg = '\nInvalid RTF: document does not have matching brackets.\n'
raise self.__exception_handler(msg)
else:
return self.__return_code
| 41,971 | Python | .py | 803 | 41.41345 | 102 | 0.448475 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,288 | configure_txt.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/configure_txt.py | import os
import sys
from . import open_for_read
class Configure:
def __init__(self,
configuration_file,
bug_handler,
debug_dir=None,
show_config_file=None,
):
"""
Requires:
file --file to be read
output --file to output to
Returns:
Nothing. Outputs a file
Logic:
"""
self.__configuration_file = configuration_file
self.__debug_dir = debug_dir
self.__bug_handler = bug_handler
self.__show_config_file = show_config_file
def get_configuration(self, type):
self.__configuration_file = self.__get_file_name()
return_dict = {}
return_dict['config-location'] = self.__configuration_file
if self.__show_config_file and self.__configuration_file:
sys.stderr.write('configuration file is "%s"\n' % self.__configuration_file)
if self.__show_config_file and not self.__configuration_file:
sys.stderr.write('No configuration file found; using default values\n')
if self.__configuration_file:
read_obj = open_for_read(self.__configuration_file)
line_to_read = 1
line_num = 0
while line_to_read:
line_num += 1
line_to_read = read_obj.readline()
line = line_to_read
line = line.strip()
if line[0:1] == '#':
continue
if not line:
continue
fields = line.split('=')
if len(fields) != 2:
msg = line
msg += ('Error in configuration.txt, line %s\n' % line_num)
msg += ('Options take the form of option = value.\n')
msg += ('Please correct the configuration file "%s" before continuing\n'
% self.__configuration_file)
raise self.__bug_handler(msg)
att = fields[0]
value = fields[1]
att = att.strip()
value = value.strip()
return_dict[att] = value
return_dict = self.__parse_dict(return_dict)
if return_dict == 1:
msg = ('Please correct the configuration file "%s" before continuing\n'
% self.__configuration_file)
raise self.__bug_handler(msg)
return return_dict
def __get_file_name(self):
home_var = os.environ.get('HOME')
if home_var:
home_config = os.path.join(home_var, '.rtf2xml')
if os.path.isfile(home_config):
return home_config
home_var = os.environ.get('USERPROFILE')
if home_var:
home_config = os.path.join(home_var, '.rtf2xml')
if os.path.isfile(home_config):
return home_config
script_file = os.path.join(sys.path[0], '.rtf2xml')
if os.path.isfile(script_file):
return script_file
return self.__configuration_file
def __parse_dict(self, return_dict):
allowable = [
'configuration-directory',
'smart-output', # = false
'level', # = 1
'convert-symbol', # = true
'convert-wingdings', # = true
'convert-zapf-dingbats', # = true
'convert-caps', # true
'indent', # = 1
'group-styles',
'group-borders',
'headings-to-sections',
'lists',
'raw-dtd-path',
'write-empty-paragraphs',
'config-location',
'script-name',
]
the_keys = return_dict.keys()
for the_key in the_keys:
if the_key not in allowable:
sys.stderr.write('options "%s" not a legal option.\n'
% the_key)
return 1
configuration_dir = return_dict.get('configuration-directory')
if configuration_dir is None:
return_dict['configure-directory'] = None
else:
if not os.path.isdir(configuration_dir):
sys.stderr.write('The directory "%s" does not appear to be a directory.\n'
% configuration_dir)
return 1
else:
return_dict['configure-directory'] = configuration_dir
smart_output = return_dict.get('smart-output')
if not smart_output:
return_dict['smart-output'] = 0
elif smart_output != 'true' and smart_output != 'false':
sys.stderr.write('"smart-output" must be true or false.\n')
return 1
elif smart_output == 'false':
return_dict['smart-output'] = 0
int_options = ['level', 'indent']
for int_option in int_options:
value = return_dict.get(int_option)
if not value:
if int_option == 'level':
return_dict['level'] = 1
else:
return_dict['indent'] = 0
else:
try:
int_num = int(return_dict[int_option])
return_dict[int_option] = int_num
except:
sys.stderr.write('"%s" must be a number\n' % int_option)
sys.stderr.write('You choose "%s" ' % return_dict[int_option])
return 1
fonts = ['convert-symbol', 'convert-wingdings', 'convert-zapf-dingbats',
'convert-caps'
]
for font in fonts:
value = return_dict.get(font)
if not value:
return_dict[font] = 0
elif value != 'true' and value != 'false':
sys.stderr.write(
'"%s" must be true or false.\n' % font)
elif value == 'false':
return_dict[font] = 0
return_dict['xslt-processor'] = None
return_dict['no-namespace'] = None
return_dict['format'] = 'raw'
return_dict['no-pyxml'] = 'true'
return return_dict
| 6,190 | Python | .py | 153 | 27.071895 | 92 | 0.505473 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,289 | copy.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/copy.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import shutil
class Copy:
"""Copy each changed file to a directory for debugging purposes"""
__dir = ""
def __init__(self, bug_handler, file=None, deb_dir=None, ):
self.__file = file
self.__bug_handler = bug_handler
def set_dir(self, deb_dir):
"""Set the temporary directory to write files to"""
if deb_dir is None:
message = "No directory has been provided to write to in the copy.py"
raise self.__bug_handler(message)
check = os.path.isdir(deb_dir)
if not check:
message = "%(deb_dir)s is not a directory" % vars()
raise self.__bug_handler(message)
Copy.__dir = deb_dir
def remove_files(self):
"""Remove files from directory"""
self.__remove_the_files(Copy.__dir)
def __remove_the_files(self, the_dir):
"""Remove files from directory"""
list_of_files = os.listdir(the_dir)
for file in list_of_files:
rem_file = os.path.join(Copy.__dir,file)
if os.path.isdir(rem_file):
self.__remove_the_files(rem_file)
else:
try:
os.remove(rem_file)
except OSError:
pass
def copy_file(self, file, new_file):
"""
Copy the file to a new name
If the platform is linux, use the faster linux command
of cp. Otherwise, use a safe python method.
"""
write_file = os.path.join(Copy.__dir,new_file)
shutil.copyfile(file, write_file)
def rename(self, source, dest):
shutil.copyfile(source, dest)
| 2,542 | Python | .py | 55 | 38.454545 | 81 | 0.459863 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,290 | __init__.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/__init__.py | def open_for_read(path):
return open(path, encoding='utf-8', errors='replace')
def open_for_write(path, append=False):
mode = 'a' if append else 'w'
return open(path, mode, encoding='utf-8', errors='replace', newline='')
| 235 | Python | .py | 5 | 43.2 | 75 | 0.679825 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,291 | fonts.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/fonts.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Fonts:
"""
Change lines with font info from font numbers to the actual font names.
"""
def __init__(self,
in_file,
bug_handler,
default_font_num,
copy=None,
run_level=1,
):
"""
Required:
'file'--file to parse
'default_font_num'--the default font number
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__default_font_num = default_font_num
self.__write_to = better_mktemp()
self.__run_level = run_level
def __initiate_values(self):
"""
Initiate all values.
"""
self.__special_font_dict = {
'Symbol' : 0,
'Wingdings' : 0,
'Zapf Dingbats' : 0,
}
self.__special_font_list = [
'Symbol', 'Wingdings', 'Zapf Dingbats'
]
self.__state = 'default'
self.__state_dict = {
'default' : self.__default_func,
'font_table' : self.__font_table_func,
'after_font_table' : self.__after_font_table_func,
'font_in_table' : self.__font_in_table_func,
}
self.__font_table = {}
# individual font written
self.__wrote_ind_font = 0
def __default_func(self, line):
"""
Requires:
line
Returns:
nothing
Handle all lines before the font table. Check for the beginning of the
font table. If found, change the state. Print out all lines.
"""
if self.__token_info == 'mi<mk<fonttb-beg':
self.__state = 'font_table'
self.__write_obj.write(line)
def __font_table_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
If the self.__token_info indicates that you have reached the end of
the font table, then change the state to after the font table.
If the self.__token_info indicates that there is a font in the
table, change the state to font in table. Reset the number of the
font to the default font (in case there is no number provided, in
which case RTF assumes the number will be the default font.) Reset
the test string (for the font name) to ''
"""
if self.__token_info == 'mi<mk<fonttb-end':
self.__state = 'after_font_table'
elif self.__token_info == 'mi<mk<fontit-beg':
self.__state = 'font_in_table'
self.__font_num = self.__default_font_num
self.__text_line = ''
# self.__write_obj.write(line)
def __font_in_table_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
Check for four conditions:
The line contains font-info. In this case, store the number in
self.__font_num.
The line contains text. In this case, add to the text string
self.__text_string.
The line marks the end of the individual font in the table. In
this case, add a new key-> value pair to the font-table
dictionary. Also create an empty tag with the name and number
as attributes.
Preamture end of font table
"""
# cw<ci<font-style<nu<4
# tx<nu<__________<Times;
if self.__token_info == 'mi<mk<fontit-end':
self.__wrote_ind_font = 1
self.__state = 'font_table'
self.__text_line = self.__text_line[:-1] # get rid of last ';'
self.__font_table[self.__font_num] = self.__text_line
self.__write_obj.write(
'mi<tg<empty-att_'
'<font-in-table<name>%s<num>%s\n' % (self.__text_line, self.__font_num)
)
elif self.__token_info == 'cw<ci<font-style':
self.__font_num = line[20:-1]
elif self.__token_info == 'tx<nu<__________' or \
self.__token_info == 'tx<ut<__________':
self.__text_line += line[17:-1]
elif self.__token_info == 'mi<mk<fonttb-end':
self.__found_end_font_table_func()
self.__state = 'after_font_table'
def __found_end_font_table_func(self):
"""
Required:
nothing
Returns:
nothing
Logic:
If not individual fonts have been written, write one out
"""
if not self.__wrote_ind_font:
self.__write_obj.write(
'mi<tg<empty-att_'
'<font-in-table<name>Times<num>0\n')
def __after_font_table_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
Check the self.__token_info. If this matches a token with font
info, then extract the number from the line, and look up the font
name in the font dictionary. If no name exists for that number,
print out an error. Otherwise print out the same line, except with
the name rather than the number.
If the line does not contain font info, simply print it out to the
file.
"""
if self.__token_info == 'cw<ci<font-style':
font_num = line[20:-1]
font_name = self.__font_table.get(font_num)
if font_name is None:
if self.__run_level > 3:
msg = 'no value for %s in self.__font_table\n' % font_num
raise self.__bug_handler(msg)
else:
# self.__special_font_dict
if font_name in self.__special_font_list:
self.__special_font_dict[font_name] = 1
self.__write_obj.write(
'cw<ci<font-style<nu<%s\n' % font_name
)
else:
self.__write_obj.write(line)
def convert_fonts(self):
"""
Required:
nothing
Returns:
a dictionary indicating with values for special fonts
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is font_table, looke for individual fonts
and add the number and font name to a dictionary. Also create a
tag for each individual font in the font table.
If the state is after the font table, look for lines with font
info. Substitute a font name for a font number.
"""
self.__initiate_values()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write('no matching state in module fonts.py\n' + self.__state + '\n')
action(line)
default_font_name = self.__font_table.get(self.__default_font_num)
if not default_font_name:
default_font_name = 'Not Defined'
self.__special_font_dict['default-font'] = default_font_name
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "fonts.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__special_font_dict
| 8,962 | Python | .py | 215 | 30.972093 | 104 | 0.506812 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,292 | combine_borders.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/combine_borders.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class CombineBorders:
"""Combine borders in RTF tokens to make later processing easier"""
def __init__(self,
in_file ,
bug_handler,
copy=None,
run_level=1,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
self.__state = 'default'
self.__bord_pos = 'default'
self.__bord_att = []
def found_bd(self, line):
# cw<bd<bor-t-r-vi
self.__state = 'border'
self.__bord_pos = line[6:16]
def __default_func(self, line):
# cw<bd<bor-t-r-vi
if self.__first_five == 'cw<bd':
self.found_bd(line)
return ''
return line
def end_border(self, line, write_obj):
border_string = "|".join(self.__bord_att)
self.__bord_att = []
write_obj.write('cw<bd<{}<nu<{}\n'.format(self.__bord_pos,
border_string))
self.__state = 'default'
self.__bord_string = ''
if self.__first_five == 'cw<bd':
self. found_bd(line)
else:
write_obj.write(line)
def add_to_border_desc(self, line):
# cw<bt<bdr-hair__<nu<true
# cw<bt<bdr-linew<nu<0.50
# tx<__________<some text
border_desc = line[6:16]
num = line[20:-1]
if num == 'true':
num = ''
else:
num = ':' + num
self.__bord_att.append(border_desc + num)
def __border_func(self, line, write_obj):
if self.__first_five != 'cw<bt':
self.end_border(line, write_obj)
else:
self.add_to_border_desc(line)
def combine_borders(self):
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as write_obj:
for line in read_obj:
self.__first_five = line[0:5]
if self.__state == 'border':
self.__border_func(line, write_obj)
else:
write_obj.write(self.__default_func(line))
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "combine_borders.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 3,458 | Python | .py | 82 | 32.939024 | 73 | 0.44844 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,293 | table_info.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/table_info.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
# note to self. This is the first module in which I use tempfile. A good idea?
"""
"""
class TableInfo:
"""
Insert table data for tables.
Logic:
"""
def __init__(self,
in_file,
bug_handler,
table_data,
copy=None,
run_level=1,):
"""
Required:
'file'--file to parse
'table_data' -- a dictionary for each table.
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__table_data = table_data
self.__run_level = run_level
self.__write_to = better_mktemp()
# self.__write_to = 'table_info.data'
def insert_info(self):
"""
"""
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
if line == 'mi<mk<tabl-start\n':
if len(self.__table_data) > 0:
table_dict = self.__table_data[0]
self.__write_obj.write('mi<tg<open-att__<table')
keys = table_dict.keys()
for key in keys:
self.__write_obj.write(f'<{key}>{table_dict[key]}')
self.__write_obj.write('\n')
self.__table_data = self.__table_data[1:]
else:
# this shouldn't happen!
if self.__run_level > 3:
msg = 'Not enough data for each table\n'
raise self.__bug_handler(msg)
self.__write_obj.write('mi<tg<open______<table\n')
elif line == 'mi<mk<table-end_\n':
self.__write_obj.write('mi<tg<close_____<table\n')
self.__write_obj.write(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "table_info.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 3,515 | Python | .py | 82 | 32.95122 | 78 | 0.446293 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,294 | table.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/table.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import border_parse, copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
"""
States.
1. 'not_in_table'
1. 'cw<tb<row-def___' start a row definition
2. 'mi<mk<in-table__' start table
2. 'in_table'
1. 'mi<mk<pard-start', start of a row, cell
2. 'mi<mk<not-in-tbl', end the table.
3. 'cw<tb<row-def___' start a row definition
3. in_row_definition
1. 'mi<mk<not-in-tbl' : end the row definition. If in table, end the table.
2. 'mi<mk<pard-start' : end the row definition
if already in the table, start a row and cell.
3. 'cw<tb<row_______' : end the row definition, end the row
4. 'cw...' use another method to handle the control word
control word might be added to dictionary.
5. 'mi<mk<in-table__' If already in table, do nothing. Otherwise
start the table.
4. 'in_row'
1. 'mi<mk<pard-start', start cell
2. 'mi<mk<not-in-tbl' end table,
3. 'cw<tb<row_______' close row,
5. 'in_cell'
1. 'mi<mk<not-in-tbl', end table
2. 'cw<tb<cell______', end cell
"""
class Table:
"""
Make tables.
Logic:
Read one line at a time. The default state (self.__state) is
'not_in_table'. Look for either a 'cw<tb<in-table__', or a row definition.
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__state_dict = {
'in_table': self.__in_table_func,
'in_row_def': self.__in_row_def_func,
'not_in_table': self.__not_in_table_func,
'in_cell': self.__in_cell_func,
'in_row': self.__in_row_func,
}
self.__not_in_table_dict = {
'cw<tb<row-def___': self.__found_row_def_func,
'cw<tb<in-table__': self.__start_table_func,
'mi<mk<in-table__' : self.__start_table_func,
}
# can't use this dictionary. When in row_definition, many tokens
# require multiple definitions
self.__in_row_definition_dict = {
'mi<mk<not-in-tbl' : self.__end_row_table_func,
'mi<mk<pard-start' : self.__end_row_def_func,
}
self.__in_row_dict = {
'mi<mk<not-in-tbl' : self.__close_table,
'mi<mk<pard-start' : self.__start_cell_func,
'cw<tb<row_______' : self.__end_row_func,
'cw<tb<cell______' : self.__empty_cell,
}
# set the default state
self.__state = ['not_in_table']
# set empty data for all tables
self.__table_data = []
# just in case there is no table data
self.__row_dict = {}
self.__cell_list = []
self.__cell_widths = []
def __in_table_func(self, line):
"""
Requires:
line -- line to parse
Logic:
Look for the end of the table. If found, close out the table.
Look for 'mi<mk<pard-start', which marks the beginning of a row. Start
a row and start a cell.
"""
# 'cell' : ('tb', 'cell______', self.default_func),
if self.__token_info == 'mi<mk<not-in-tbl' or\
self.__token_info == 'mi<mk<sect-start' or\
self.__token_info == 'mi<mk<sect-close' or\
self.__token_info == 'mi<mk<body-close':
self.__close_table(line)
elif self.__token_info == 'mi<mk<pard-start':
self.__start_row_func(line)
self.__start_cell_func(line)
elif self.__token_info == 'cw<tb<row-def___':
self.__found_row_def_func(line)
elif self.__token_info == 'cw<tb<cell______':
self.__start_row_func(line)
self.__empty_cell(line)
self.__write_obj.write(line)
def __not_in_table_func(self, line):
"""
Requires:
line -- the line of text read in from document
Returns:
nothing
Logic:
The state is not in a table, so look for the two tokens that
mark the start of a table: 'cw<tb<row-def', or 'cw<tb<in-table__'.
If these tokens are found, use another method to start a table
and change states. Otherwise, just output the line.
"""
action = self.__not_in_table_dict.get(self.__token_info)
if action:
action(line)
self.__write_obj.write(line)
def __close_table(self, line):
"""
Requires:
line -- line to parse
Returns:
?
Logic:
Write the end marker for the table.
Write the end tag for the table.
Set the state to ['not_in_table']
"""
self.__write_obj.write('mi<mk<table-end_\n')
self.__state = ['not_in_table']
self.__table_data[-1]['number-of-columns'] = self.__max_number_cells_in_row
self.__table_data[-1]['number-of-rows'] = self.__rows_in_table
average_cells_in_row = self.__mode(self.__list_of_cells_in_row)
self.__table_data[-1]['average-cells-per-row'] = average_cells_in_row
average_cell_width = self.__mode(self.__cell_widths)
self.__table_data[-1]['average-cell-width'] = average_cell_width
def __found_row_def_func(self, line):
"""
Requires:
line don't need this except for consistency with other methods.
Returns:
nothing
Logic:
A row definition has been found. Collect all the data from this
to use later in writing attributes for the table.
"""
self.__state.append('in_row_def')
self.__last_cell_position = 0
self.__row_dict = {}
self.__cell_list = []
self.__cell_list.append({})
self.__cell_widths = []
def __start_table_func(self, line):
"""
Requires:
line -- line to parse
Returns:
?
Logic:
Add the 'in_table' to the state list.
Write out the table marker.
Initialize table values (not sure about these yet)
"""
self.__rows_in_table = 0
self.__cells_in_table = 0
self.__cells_in_row = 0
self.__max_number_cells_in_row = 0
self.__table_data.append({})
self.__list_of_cells_in_row = []
self.__write_obj.write('mi<mk<tabl-start\n')
self.__state.append('in_table')
def __end_row_table_func(self, line):
"""
Requires:
line --just for consistencey
Returns:
?
Logic:
?
"""
self.__close_table(self, line)
def __end_row_def_func(self, line):
"""
Requires:
line --just for consistency
Returns:
nothing
Logic:
change the state.
get rid of the last {} in the cell list
figure out the number of cells based on the self.__row_dict[widths]
('122, 122')
"""
if len(self.__state) > 0:
if self.__state[-1] == 'in_row_def':
self.__state.pop()
# added [{]] at the *end* of each /cell. Get rid of extra one
self.__cell_list.pop()
widths = self.__row_dict.get('widths')
if widths:
width_list = widths.split(',')
num_cells = len(width_list)
self.__row_dict['number-of-cells'] = num_cells
def __in_row_def_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
In the text that defines a row. If a control word is found, handle the
control word with another method.
Check for states that will end this state.
While in the row definition, certain tokens can end a row or end a table.
If a paragrah definition (pard-start) is found, and the you are already in
a table, start of a row.
"""
if self.__token_info == 'cw<tb<row_______':
# write tags
self.__end_row_func(line)
# change the state
self.__end_row_def_func(line)
self.__write_obj.write(line)
elif line[0:2] == 'cw':
self.__handle_row_token(line)
self.__write_obj.write(line)
elif self.__token_info == 'mi<mk<not-in-tbl' and 'in_table' in self.__state:
self.__end_row_def_func(line)
self.__close_table(line)
self.__write_obj.write(line)
elif self.__token_info == 'mi<mk<pard-start':
self.__end_row_def_func(line)
# if already in the table, start a row, then cell.
if (self.__state) > 0 and self.__state[-1] == 'in_table':
self.__start_row_func(line)
self.__start_cell_func(line)
self.__write_obj.write(line)
elif self.__token_info == 'mi<mk<in-table__':
self.__end_row_def_func(line)
# if not in table, start a new table
if len(self.__state) > 0 and self.__state[-1] != 'in_table':
self.__start_table_func(line)
self.__write_obj.write(line)
else:
self.__write_obj.write(line)
def __handle_row_token(self, line):
"""
Requires:
line -- line to parse
Returns:
?
Logic:
the tokens in the row definition contain the following information:
1. row borders.
2. cell borders for all cells in the row.
3. cell positions for all cells in the row.
Put all information about row borders into a row dictionary.
Put all information about cell borders into into the dictionary in
the last item in the cell list. ([{border:something, width:something},
{border:something, width:something}])
cw<bd<bor-t-r-to<nu<bdr-hair__|bdr-li-wid:0.50
"""
if line[3:5] == 'bd':
border_obj = border_parse.BorderParse()
the_dict = border_obj.parse_border(line)
keys = the_dict.keys()
# border-cell-top-hairline
in_cell = 0
for key in keys:
if key[0:11] == 'border-cell':
in_cell = 1
for key in keys:
if in_cell:
self.__cell_list[-1][key] = the_dict[key]
else:
self.__row_dict[key] = the_dict[key]
# cw<tb<cell-posit<nu<216.00
elif self.__token_info == 'cw<tb<cell-posit':
self.__found_cell_position(line)
# cw<tb<row-pos-le<nu<-5.40
elif self.__token_info == 'cw<tb<row-pos-le':
position = line[20:-1]
self.__row_dict['left-row-position'] = position
elif self.__token_info == 'cw<tb<row-header':
self.__row_dict['header'] = 'true'
def __start_cell_func(self, line):
"""
Required:
line -- the line of text
Returns:
nothing
Logic:
Append 'in_cell' for states
If the self.__cell list containst dictionaries, get the last dictionary.
Write value => attributes for key=> value
pop the self.__cell_list.
Otherwise, print out a cell tag.
"""
self.__state.append('in_cell')
# self.__cell_list = []
if len(self.__cell_list) > 0:
self.__write_obj.write('mi<tg<open-att__<cell')
# cell_dict = self.__cell_list[-1]
cell_dict = self.__cell_list[0]
keys = cell_dict.keys()
for key in keys:
self.__write_obj.write(f'<{key}>{cell_dict[key]}')
self.__write_obj.write('\n')
# self.__cell_list.pop()
self.__cell_list.pop(0)
# self.__cell_list = self.__cell_list[1:]
else:
self.__write_obj.write('mi<tg<open______<cell\n')
self.__cells_in_table += 1
self.__cells_in_row += 1
def __start_row_func(self, line):
"""
Required:
line -- the line of text
Returns:
nothing
Logic:
Append 'in_row' for states
Write value => attributes for key=> value
"""
self.__state.append('in_row')
self.__write_obj.write('mi<tg<open-att__<row')
keys = self.__row_dict.keys()
for key in keys:
self.__write_obj.write(f'<{key}>{self.__row_dict[key]}')
self.__write_obj.write('\n')
self.__cells_in_row = 0
self.__rows_in_table += 1
def __found_cell_position(self, line):
"""
needs:
line: current line
returns:
nothing
logic:
Calculate the cell width.
If the cell is the first cell, you should add the left cell position to it.
(This value is often negative.)
Next, set the new last_cell_position to the current cell position.
"""
# cw<tb<cell-posit<nu<216.00
new_cell_position = round(float(line[20:-1]), 2)
left_position = 0
if self.__last_cell_position == 0:
left_position = self.__row_dict.get('left-row-position', 0)
left_position = float(left_position)
width = new_cell_position - self.__last_cell_position - left_position
# width = round(width, 2)
width = '%.2f' % width
self.__last_cell_position = new_cell_position
widths_exists = self.__row_dict.get('widths')
if widths_exists:
self.__row_dict['widths'] += ', %s' % str(width)
else:
self.__row_dict['widths'] = str(width)
self.__cell_list[-1]['width'] = width
self.__cell_list.append({})
self.__cell_widths.append(width)
def __in_cell_func(self, line):
"""
Required:
line
Returns:
nothing
Logic:
In the middle of a cell.
Look for the close of the table. If found, use the close table function to close
the table.
Look for the close of the cell. If found, use the close cell function to close out
the cell.
Otherwise, print out the line.
"""
# cw<tb<cell______<nu<true
# mi<mk<sect-start
if self.__token_info == 'mi<mk<not-in-tbl' or\
self.__token_info == 'mi<mk<sect-start' or\
self.__token_info == 'mi<mk<sect-close' or\
self.__token_info == 'mi<mk<body-close':
self.__end_cell_func(line)
self.__end_row_func(line)
self.__close_table(line)
self.__write_obj.write(line)
elif self.__token_info == 'cw<tb<cell______':
self.__end_cell_func(line)
else:
self.__write_obj.write(line)
def __end_cell_func(self, line):
"""
Requires:
line
Returns:
nothing
Logic:
End the cell. Print out the closing marks. Pop the self.__state.
"""
if len(self.__state) > 1:
if self.__state[-1] == 'in_cell':
self.__state.pop()
self.__write_obj.write('mi<mk<close_cell\n')
self.__write_obj.write('mi<tg<close_____<cell\n')
self.__write_obj.write('mi<mk<closecell_\n')
def __in_row_func(self, line):
if self.__token_info == 'mi<mk<not-in-tbl' or\
self.__token_info == 'mi<mk<sect-start' or\
self.__token_info == 'mi<mk<sect-close' or\
self.__token_info == 'mi<mk<body-close':
self.__end_row_func(line)
self.__close_table(line)
self.__write_obj.write(line)
else:
action = self.__in_row_dict.get(self.__token_info)
if action:
action(line)
self.__write_obj.write(line)
"""
elif self.__token_info == 'mi<mk<pard-start':
self.__start_cell_func(line)
self.__write_obj.write(line)
elif self.__token_info == 'cw<tb<row_______':
self.__end_row_func(line)
self.__write_obj.write(line)
else:
self.__write_obj.write(line)
"""
def __end_row_func(self, line):
"""
"""
if len(self.__state) > 1 and self.__state[-1] == 'in_row':
self.__state.pop()
self.__write_obj.write('mi<tg<close_____<row\n')
else:
self.__write_obj.write('mi<tg<empty_____<row\n')
self.__rows_in_table += 1
if self.__cells_in_row > self.__max_number_cells_in_row:
self.__max_number_cells_in_row = self.__cells_in_row
self.__list_of_cells_in_row.append(self.__cells_in_row)
def __empty_cell(self, line):
"""
Required:
line -- line of text
Returns:
nothing
Logic:
Write an empty tag with attributes if there are attributes.
Otherwise, written an empty tag with cell as element.
"""
if len(self.__cell_list) > 0:
self.__write_obj.write('mi<tg<empty-att_<cell')
cell_dict = self.__cell_list[-1]
keys = cell_dict.keys()
for key in keys:
self.__write_obj.write(f'<{key}>{cell_dict[key]}')
self.__write_obj.write('\n')
else:
self.__write_obj.write('mi<tg<empty_____<cell\n')
self.__cells_in_table += 1
self.__cells_in_row += 1
def __mode(self, the_list):
"""
Required:
the_list -- a list of something
Returns:
the number that occurs the most
Logic:
get the count of each item in list. The count that is the greatest
is the mode.
"""
max = 0
mode = 'not-defined'
for item in the_list:
num_of_values = the_list.count(item)
if num_of_values > max:
mode = item
max = num_of_values
return mode
def make_table(self):
"""
Requires:
nothing
Returns:
A dictionary of values for the beginning of the table.
Logic:
Read one line in at a time. Determine what action to take based on
the state.
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state[-1])
# print self.__state[-1]
if action is None:
sys.stderr.write('No matching state in module table.py\n')
sys.stderr.write(self.__state[-1] + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "table.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__table_data
| 20,940 | Python | .py | 541 | 28.530499 | 94 | 0.512148 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,295 | paragraphs.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/paragraphs.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Paragraphs:
"""
=================
Purpose
=================
Write paragraph tags for a tokenized file. (This module won't be any use to use
to you unless you use it as part of the other modules.)
-------------
Method
-------------
RTF does not tell you when a paragraph begins. It only tells you when the
paragraph ends.
In order to make paragraphs out of this limited info, the parser starts in the
body of the documents and assumes it is not in a paragraph. It looks for clues
to begin a paragraph. Text starts a paragraph; so does an inline field or
list-text. If an end of paragraph marker (\\par) is found, then this indicates
a blank paragraph.
Once a paragraph is found, the state changes to 'paragraph.' In this state,
clues are looked to for the end of a paragraph. The end of a paragraph marker
(\\par) marks the end of a paragraph. So does the end of a footnote or heading;
a paragraph definition; the end of a field-block; and the beginning of a
section. (How about the end of a section or the end of a field-block?)
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
write_empty_para=1,
run_level=1,
):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_empty_para = write_empty_para
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__state = 'before_body'
self.__start_marker = 'mi<mk<para-start\n' # outside para tags
self.__start2_marker = 'mi<mk<par-start_\n' # inside para tags
self.__end2_marker = 'mi<mk<par-end___\n' # inside para tags
self.__end_marker = 'mi<mk<para-end__\n' # outside para tags
self.__state_dict = {
'before_body' : self.__before_body_func,
'not_paragraph' : self.__not_paragraph_func,
'paragraph' : self.__paragraph_func,
}
self.__paragraph_dict = {
'cw<pf<par-end___' : self.__close_para_func, # end of paragraph
'mi<mk<headi_-end' : self.__close_para_func, # end of header or footer
# 'cw<pf<par-def___' : self.__close_para_func, # paragraph definition
# 'mi<mk<fld-bk-end' : self.__close_para_func, # end of field-block
'mi<mk<fldbk-end_' : self.__close_para_func, # end of field-block
'mi<mk<body-close' : self.__close_para_func, # end of body
'mi<mk<sect-close' : self.__close_para_func, # end of body
'mi<mk<sect-start' : self.__close_para_func, # start of section
'mi<mk<foot___clo' : self.__close_para_func, # end of footnote
'cw<tb<cell______' : self.__close_para_func, # end of cell
'mi<mk<par-in-fld' : self.__close_para_func, # start of block field
'cw<pf<par-def___' : self.__bogus_para__def_func, # paragraph definition
}
self.__not_paragraph_dict = {
'tx<nu<__________' : self.__start_para_func,
'tx<hx<__________' : self.__start_para_func,
'tx<ut<__________' : self.__start_para_func,
'tx<mc<__________' : self.__start_para_func,
'mi<mk<inline-fld' : self.__start_para_func,
'mi<mk<para-beg__' : self.__start_para_func,
'cw<pf<par-end___' : self.__empty_para_func,
'mi<mk<pict-start' : self.__start_para_func,
'cw<pf<page-break' : self.__empty_pgbk_func, # page break
}
def __before_body_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
This function handles all the lines before the start of the body.
Once the body starts, the state is switched to 'not_paragraph'
"""
if self.__token_info == 'mi<mk<body-open_':
self.__state = 'not_paragraph'
self.__write_obj.write(line)
def __not_paragraph_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing
Logic:
This function handles all lines that are outside of the paragraph.
It looks for clues that start a paragraph, and when found,
switches states and writes the start tags.
"""
action = self.__not_paragraph_dict.get(self.__token_info)
if action:
action(line)
self.__write_obj.write(line)
def __paragraph_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing
Logic:
This function handles all the lines that are in the paragraph. It
looks for clues to the end of the paragraph. When a clue is found,
it calls on another method to write the end of the tag and change
the state.
"""
action = self.__paragraph_dict.get(self.__token_info)
if action:
action(line)
else:
self.__write_obj.write(line)
def __start_para_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
This function writes the beginning tags for a paragraph and
changes the state to paragraph.
"""
self.__write_obj.write(self.__start_marker) # marker for later parsing
self.__write_obj.write(
'mi<tg<open______<para\n'
)
self.__write_obj.write(self.__start2_marker)
self.__state = 'paragraph'
def __empty_para_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
This function writes the empty tags for a paragraph.
It does not do anything if self.__write_empty_para is 0.
"""
if self.__write_empty_para:
self.__write_obj.write(self.__start_marker) # marker for later parsing
self.__write_obj.write(
'mi<tg<empty_____<para\n'
)
self.__write_obj.write(self.__end_marker) # marker for later parsing
def __empty_pgbk_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
This function writes the empty tags for a page break.
"""
self.__write_obj.write(
'mi<tg<empty_____<page-break\n'
)
def __close_para_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
This function writes the end tags for a paragraph and
changes the state to not_paragraph.
"""
self.__write_obj.write(self.__end2_marker) # marker for later parser
self.__write_obj.write(
'mi<tg<close_____<para\n'
)
self.__write_obj.write(self.__end_marker) # marker for later parser
self.__write_obj.write(line)
self.__state = 'not_paragraph'
def __bogus_para__def_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
if a \\pard occurs in a paragraph, I want to ignore it. (I believe)
"""
self.__write_obj.write('mi<mk<bogus-pard\n')
def make_paragraphs(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is before the body, look for the
beginning of the body.
When the body is found, change the state to 'not_paragraph'. The
only other state is 'paragraph'.
"""
self.__initiate_values()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action is None:
try:
sys.stderr.write('no matching state in module paragraphs.py\n')
sys.stderr.write(self.__state + '\n')
except:
pass
action(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "paragraphs.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 10,342 | Python | .py | 249 | 31.975904 | 91 | 0.520441 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,296 | fields_small.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/fields_small.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import re
import sys
from calibre.ebooks.rtf2xml import copy, field_strings
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class FieldsSmall:
"""
=================
Purpose
=================
Write tags for bookmarks, index and toc entry fields in a tokenized file.
This module does not handle toc or index tables. (This module won't be any
use to you unless you use it as part of the other modules.)
-----------
Method
-----------
Look for the beginning of a bookmark, index, or toc entry. When such a token
is found, store the opening bracket count in a variable. Collect all the text
until the closing bracket entry is found. Send the string to the module
field_strings to process it. Write the processed string to the output
file.
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1,
):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = better_mktemp()
self.__run_level = run_level
def __initiate_values(self):
"""
Initiate all values.
"""
self.__string_obj = field_strings.FieldStrings(bug_handler=self.__bug_handler)
self.__state = 'before_body'
self.__text_string = ''
self.__marker = 'mi<mk<inline-fld\n'
self.__state_dict = {
'before_body' : self.__before_body_func,
'body' : self.__body_func,
'bookmark' : self.__bookmark_func,
'toc_index' : self.__toc_index_func,
}
self.__body_dict = {
'cw<an<book-mk-st' : (self.__found_bookmark_func, 'start'),
'cw<an<book-mk-en' : (self.__found_bookmark_func, 'end'),
'cw<an<toc_______' : (self.__found_toc_index_func, 'toc'),
'cw<an<index-mark' : (self.__found_toc_index_func, 'index'),
}
ob = 'ob<nu<open-brack.....'
cb = 'cb<nu<clos-brack'
bk_st = 'cw<an<book-mk-st<nu<true'
tx = 'tx<nu<__________<(.*?)'
reg_st = ob + bk_st + tx + cb
self.__book_start = re.compile(r'%s' % reg_st)
def __before_body_func(self, line):
"""
Requires:
line --the line to parse
Returns:
nothing
Logic:
Look for the beginning of the body. When found, change the state
to body. Always print out the line.
"""
if self.__token_info == 'mi<mk<body-open_':
self.__state = 'body'
self.__write_obj.write(line)
def __body_func(self, line):
"""
Requires:
line --the line to parse
Returns:
nothing
Logic:
This function handles all the lines in the body of the documents.
Look for a bookmark, index or toc entry and take the appropriate action.
"""
action, tag = \
self.__body_dict.get(self.__token_info, (None, None))
if action:
action(line, tag)
else:
self.__write_obj.write(line)
def __found_bookmark_func(self, line, tag):
"""
Requires:
line --the line to parse
Returns:
nothing
Logic:
This function is called when a bookmark is found. The opening
bracket count is stored int eh beginning bracket count. The state
is changed to 'bookmark.'
"""
self.__beg_bracket_count = self.__ob_count
self.__cb_count = 0
self.__state = 'bookmark'
self.__type_of_bookmark = tag
def __bookmark_func(self, line):
"""
Requires:
line --the line to parse
Returns:
nothing
Logic:
This function handles all lines within a bookmark. It adds each
line to a string until the end of the bookmark is found. It
processes the string with the fields_string module, and
prints out the result.
"""
if self.__beg_bracket_count == self.__cb_count:
self.__state = 'body'
type = 'bookmark-%s' % self.__type_of_bookmark
# change here
"""
my_string = self.__string_obj.process_string(
self.__text_string, type)
"""
my_string = self.__parse_bookmark_func(
self.__text_string, type)
self.__write_obj.write(self.__marker)
self.__write_obj.write(my_string)
self.__text_string = ''
self.__write_obj.write(line)
elif line[0:2] == 'tx':
self.__text_string += line[17:-1]
def __parse_index_func(self, my_string):
"""
Requires:
my_string --string to parse
type --type of string
Returns:
A string for a toc instruction field.
Logic:
This method is meant for *both* index and toc entries.
I want to eliminate paragraph endings, and I want to divide the
entry into a main entry and (if it exists) a sub entry.
Split the string by newlines. Read on token at a time. If the
token is a special colon, end the main entry element and start the
sub entry element.
If the token is a pargrah ending, ignore it, since I don't won't
paragraphs within toc or index entries.
"""
my_string, see_string = self.__index_see_func(my_string)
my_string, bookmark_string = self.__index_bookmark_func(my_string)
italics, bold = self.__index__format_func(my_string)
found_sub = 0
my_changed_string = 'mi<tg<empty-att_<field<type>index-entry'
my_changed_string += '<update>static'
if see_string:
my_changed_string += '<additional-text>%s' % see_string
if bookmark_string:
my_changed_string += '<bookmark>%s' % bookmark_string
if italics:
my_changed_string += '<italics>true'
if bold:
my_changed_string += '<bold>true'
main_entry = ''
sub_entry = ''
lines = my_string.split('\n')
for line in lines:
token_info = line[:16]
if token_info == 'cw<ml<colon_____':
found_sub = 1
elif token_info[0:2] == 'tx':
if found_sub:
sub_entry += line[17:]
else:
main_entry += line[17:]
my_changed_string += '<main-entry>%s' % main_entry
if found_sub:
my_changed_string += '<sub-entry>%s' % sub_entry
my_changed_string += '\n'
return my_changed_string
def __index_see_func(self, my_string):
in_see = 0
bracket_count = 0
see_string = ''
changed_string = ''
lines = my_string.split('\n')
end_bracket_count = sys.maxsize
for line in lines:
token_info = line[:16]
if token_info == 'ob<nu<open-brack':
bracket_count += 1
if token_info == 'cb<nu<clos-brack':
bracket_count -= 1
if in_see:
if bracket_count == end_bracket_count and token_info == 'cb<nu<clos-brack':
in_see = 0
else:
if token_info == 'tx<nu<__________':
see_string += line[17:]
else:
if token_info == 'cw<in<index-see_':
end_bracket_count = bracket_count - 1
in_see = 1
changed_string += '%s\n' % line
return changed_string, see_string
def __index_bookmark_func(self, my_string):
"""
Requires:
my_string -- string in all the index
Returns:
bookmark_string -- the text string of the book mark
index_string -- string minus the bookmark_string
"""
# cw<an<place_____<nu<true
in_bookmark = 0
bracket_count = 0
bookmark_string = ''
index_string = ''
lines = my_string.split('\n')
end_bracket_count = sys.maxsize
for line in lines:
token_info = line[:16]
if token_info == 'ob<nu<open-brack':
bracket_count += 1
if token_info == 'cb<nu<clos-brack':
bracket_count -= 1
if in_bookmark:
if bracket_count == end_bracket_count and token_info == 'cb<nu<clos-brack':
in_bookmark = 0
index_string += '%s\n' % line
else:
if token_info == 'tx<nu<__________':
bookmark_string += line[17:]
else:
index_string += '%s\n' % line
else:
if token_info == 'cw<an<place_____':
end_bracket_count = bracket_count - 1
in_bookmark = 1
index_string += '%s\n' % line
return index_string, bookmark_string
def __index__format_func(self, my_string):
italics = 0
bold =0
lines = my_string.split('\n')
for line in lines:
token_info = line[:16]
if token_info == 'cw<in<index-bold':
bold = 1
if token_info == 'cw<in<index-ital':
italics = 1
return italics, bold
def __parse_toc_func(self, my_string):
"""
Requires:
my_string -- all the string in the toc
Returns:
modidified string
Logic:
"""
toc_level = 0
toc_suppress = 0
my_string, book_start_string, book_end_string =\
self.__parse_bookmark_for_toc(my_string)
main_entry = ''
my_changed_string = 'mi<tg<empty-att_<field<type>toc-entry'
my_changed_string += '<update>static'
if book_start_string:
my_changed_string += '<bookmark-start>%s' % book_start_string
if book_end_string:
my_changed_string += '<bookmark-end>%s' % book_end_string
lines = my_string.split('\n')
for line in lines:
token_info = line[:16]
if token_info[0:2] == 'tx':
main_entry += line[17:]
if token_info == 'cw<tc<toc-level_':
toc_level = line[20:]
if token_info == 'cw<tc<toc-sup-nu':
toc_suppress = 1
if toc_level:
my_changed_string += '<toc-level>%s' % toc_level
if toc_suppress:
my_changed_string += '<toc-suppress-number>true'
my_changed_string += '<main-entry>%s' % main_entry
my_changed_string += '\n'
return my_changed_string
def __parse_bookmark_for_toc(self, my_string):
"""
Requires:
the_string --string of toc, with new lines
Returns:
the_string -- string minus bookmarks
bookmark_string -- bookmarks
Logic:
"""
in_bookmark = 0
bracket_count = 0
book_start_string = ''
book_end_string = ''
book_type = 0
toc_string = ''
lines = my_string.split('\n')
end_bracket_count = sys.maxsize
for line in lines:
token_info = line[:16]
if token_info == 'ob<nu<open-brack':
bracket_count += 1
if token_info == 'cb<nu<clos-brack':
bracket_count -= 1
if in_bookmark:
if bracket_count == end_bracket_count and token_info == 'cb<nu<clos-brack':
in_bookmark = 0
toc_string += '%s\n' % line
else:
if token_info == 'tx<nu<__________':
if book_type == 'start':
book_start_string += line[17:]
elif book_type == 'end':
book_end_string += line[17:]
else:
toc_string += '%s\n' % line
else:
if token_info == 'cw<an<book-mk-st' or token_info =='cw<an<book-mk-en':
if token_info == 'cw<an<book-mk-st':
book_type = 'start'
if token_info == 'cw<an<book-mk-en':
book_type = 'end'
end_bracket_count = bracket_count - 1
in_bookmark = 1
toc_string += '%s\n' % line
return toc_string, book_start_string, book_end_string
def __parse_bookmark_func(self, my_string, type):
"""
Requires:
my_string --string to parse
type --type of string
Returns:
A string formatted for a field instruction.
Logic:
The type is the name (either bookmark-end or bookmark-start). The
id is the complete text string.
"""
my_changed_string = ('mi<tg<empty-att_<field<type>%s'
'<number>%s<update>none\n' % (type, my_string))
return my_changed_string
def __found_toc_index_func(self, line, tag):
"""
Requires:
line --the line to parse
Returns:
nothing
Logic:
This function is called when a toc or index entry is found. The opening
bracket count is stored in the beginning bracket count. The state
is changed to 'toc_index.'
"""
self.__beg_bracket_count = self.__ob_count
self.__cb_count = 0
self.__state = 'toc_index'
self.__tag = tag
def __toc_index_func(self, line):
"""
Requires:
line --the line to parse
Returns:
nothing
Logic:
This function handles all lines within a toc or index entry. It
adds each line to a string until the end of the entry is found. It
processes the string with the fields_string module, and
prints out the result.
"""
if self.__beg_bracket_count == self.__cb_count:
self.__state = 'body'
type = self.__tag
if type == 'index':
my_string = self.__parse_index_func(
self.__text_string)
elif type == 'toc':
my_string = self.__parse_toc_func(
self.__text_string)
self.__write_obj.write(self.__marker)
self.__write_obj.write(my_string)
self.__text_string = ''
self.__write_obj.write(line)
else:
self.__text_string += line
def fix_fields(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is before the body, look for the
beginning of the body.
The other two states are toc_index (for toc and index entries) and
bookmark.
"""
self.__initiate_values()
with open_for_read(self.__file) as read_obj:
with open_for_write(self.__write_to) as self.__write_obj:
for line in read_obj:
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write('No matching state in module fields_small.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "fields_small.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 17,405 | Python | .py | 442 | 28.067873 | 91 | 0.500266 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,297 | sections.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/sections.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class Sections:
"""
=================
Purpose
=================
Write section tags for a tokenized file. (This module won't be any use to use
to you unless you use it as part of the other modules.)
---------------
logic
---------------
The tags for the first section breaks have already been written.
RTF stores section breaks with the \\sect tag. Each time this tag is
encountered, add one to the counter.
When I encounter the \\sectd tag, I want to collect all the appropriate tokens
that describe the section. When I reach a \\pard, I know I an stop collecting
tokens and write the section tags.
The exception to this method occurs when sections occur in field blocks, such
as the index. Normally, two section break occur within the index and other
field-blocks. (If less or more section breaks occur, this code may not work.)
I want the sections to occur outside of the index. That is, the index
should be nested inside one section tag. After the index is complete, a new
section should begin.
In order to write the sections outside of the field blocks, I have to store
all of the field block as a string. When I encounter the \\sect tag, add one to
the section counter, but store this number in a list. Likewise, store the
information describing the section in another list.
When I reach the end of the field block, choose the first item from the
numbered list as the section number. Choose the first item in the description
list as the values and attributes of the section. Enclose the field string
between the section tags.
Start a new section outside the field-block strings. Use the second number in
the list; use the second item in the description list.
CHANGE (2004-04-26) No longer write sections that occur in field-blocks.
Instead, ignore all section information in a field-block.
"""
def __init__(self,
in_file,
bug_handler,
copy=None,
run_level=1):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
self.__mark_start = 'mi<mk<sect-start\n'
self.__mark_end = 'mi<mk<sect-end__\n'
self.__in_field = 0
self.__section_values = {}
self.__list_of_sec_values = []
self.__field_num = []
self.__section_num = 0
self.__state = 'before_body'
self.__found_first_sec = 0
self.__text_string = ''
self.__field_instruction_string = ''
self.__state_dict = {
'before_body' : self.__before_body_func,
'body' : self.__body_func,
'before_first_sec' : self.__before_first_sec_func,
'section' : self.__section_func,
'section_def' : self.__section_def_func,
'sec_in_field' : self.__sec_in_field_func,
}
# cw<sc<sect-defin<nu<true
self.__body_dict = {
'cw<sc<section___' : self.__found_section_func,
'mi<mk<sec-fd-beg' : self.__found_sec_in_field_func,
'cw<sc<sect-defin' : self.__found_section_def_bef_sec_func,
}
self.__section_def_dict = {
'cw<pf<par-def___' : (self.__end_sec_def_func, None),
'mi<mk<body-open_' : (self.__end_sec_def_func, None),
'cw<tb<columns___' : (self.__attribute_func, 'columns'),
'cw<pa<margin-lef' : (self.__attribute_func, 'margin-left'),
'cw<pa<margin-rig' : (self.__attribute_func, 'margin-right'),
'mi<mk<header-ind' : (self.__end_sec_def_func, None),
# premature endings
# __end_sec_premature_func
'tx<nu<__________' : (self.__end_sec_premature_func, None),
'cw<ci<font-style' : (self.__end_sec_premature_func, None),
'cw<ci<font-size_' : (self.__end_sec_premature_func, None),
}
self.__sec_in_field_dict = {
'mi<mk<sec-fd-end' : self.__end_sec_in_field_func,
# changed this 2004-04-26
# two lines
# 'cw<sc<section___' : self.__found_section_in_field_func,
# 'cw<sc<sect-defin' : self.__found_section_def_in_field_func,
}
def __found_section_def_func(self, line):
"""
Required:
line -- the line to parse
Returns:
nothing
Logic:
I have found a section definition. Change the state to
setion_def (so subsequent lines will be processesed as part of
the section definition), and clear the section_values dictionary.
"""
self.__state = 'section_def'
self.__section_values.clear()
def __attribute_func(self, line, name):
"""
Required:
line -- the line to be parsed
name -- the changed, readable name (as opposed to the
abbreviated one)
Returns:
nothing
Logic:
I need to add the right data to the section values dictionary so I
can retrieve it later. The attribute (or key) is the name; the
value is the last part of the text string.
ex: cw<tb<columns___<nu<2
"""
attribute = name
value = line[20:-1]
self.__section_values[attribute] = value
def __found_section_func(self, line):
"""
Requires:
line -- the line to parse
Returns:
nothing
Logic:
I have found the beginning of a section, so change the state
accordingly. Also add one to the section counter.
"""
self.__state = 'section'
self.__write_obj.write(line)
self.__section_num += 1
def __found_section_def_bef_sec_func(self, line):
"""
Requires:
line -- the line to parse
Returns:
nothing
Logic:
I have found the beginning of a section, so change the state
accordingly. Also add one to the section counter.
"""
self.__section_num += 1
self.__found_section_def_func(line)
self.__write_obj.write(line)
def __section_func(self, line):
"""
Requires:
line --the line to parse
Returns:
nothing
Logic:
"""
if self.__token_info == 'cw<sc<sect-defin':
self.__found_section_def_func(line)
self.__write_obj.write(line)
def __section_def_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing
Logic:
I have found a section definition. Check if the line is the end of
the defnition (a paragraph definition), or if it contains info that
should be added to the values dictionary. If neither of these
cases are true, output the line to a file.
"""
action, name = self.__section_def_dict.get(self.__token_info, (None, None))
if action:
action(line, name)
if self.__in_field:
self.__sec_in_field_string += line
else:
self.__write_obj.write(line)
else:
self.__write_obj.write(line)
def __end_sec_def_func(self, line, name):
"""
Requires:
line --the line to parse
name --changed, readable name
Returns:
nothing
Logic:
The end of the section definition has been found. Reset the state.
Call on the write_section method.
"""
if not self.__in_field:
self.__state = 'body'
else:
self.__state = 'sec_in_field'
self.__write_section(line)
def __end_sec_premature_func(self, line, name):
"""
Requires:
line --the line to parse
name --changed, readable name
Returns:
nothing
Logic:
Text or control words indicating text have been found
before \\pard. This should indicate older RTF. Reset the state
Write the section definition. Insert a paragraph definition.
Insert {} to mark the end of a paragraph definition
"""
if not self.__in_field:
self.__state = 'body'
else:
self.__state = 'sec_in_field'
self.__write_section(line)
self.__write_obj.write('cw<pf<par-def___<nu<true\n')
self.__write_obj.write('ob<nu<open-brack<0000\n')
self.__write_obj.write('cb<nu<clos-brack<0000\n')
def __write_section(self, line):
"""
Requires:
nothing
Returns:
nothing
Logic:
Form a string of attributes and values. If you are not in a field
block, write this string to the output file. Otherwise, call on
the handle_sec_def method to handle this string.
"""
my_string = self.__mark_start
if self.__found_first_sec:
my_string += 'mi<tg<close_____<section\n'
else:
self.__found_first_sec = 1
my_string += 'mi<tg<open-att__<section<num>%s' % str(self.__section_num)
my_string += '<num-in-level>%s' % str(self.__section_num)
my_string += '<type>rtf-native'
my_string += '<level>0'
keys = self.__section_values.keys()
if len(keys) > 0:
for key in keys:
my_string += f'<{key}>{self.__section_values[key]}'
my_string += '\n'
my_string += self.__mark_end
# # my_string += line
if self.__state == 'body':
self.__write_obj.write(my_string)
elif self.__state == 'sec_in_field':
self.__handle_sec_def(my_string)
elif self.__run_level > 3:
msg = 'missed a flag\n'
raise self.__bug_handler(msg)
def __handle_sec_def(self, my_string):
"""
Requires:
my_string -- the string of attributes and values. (Do I need this?)
Returns:
nothing
Logic:
I need to append the dictionary of attributes and values to list
so I can use it later when I reach the end of the field-block.
"""
values_dict = self.__section_values
self.__list_of_sec_values.append(values_dict)
def __body_func(self, line):
"""
Requires:
line --the line to parse
Returns:
nothing
Logic:
Look for the beginning of a section. Otherwise, print the line to
the output file.
"""
action = self.__body_dict.get(self.__token_info)
if action:
action(line)
else:
self.__write_obj.write(line)
def __before_body_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
Look for the beginning of the body. Always print out the line.
"""
if self.__token_info == 'mi<mk<body-open_':
self.__state = 'before_first_sec'
self.__write_obj.write(line)
def __before_first_sec_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
Look for the beginning of the first section. This can be \\sectd,
but in older RTF it could mean the any paragraph or row definition
"""
if self.__token_info == 'cw<sc<sect-defin':
self.__state = 'section_def'
self.__section_num += 1
self.__section_values.clear()
elif self.__token_info == 'cw<pf<par-def___':
self.__state = 'body'
self.__section_num += 1
self.__write_obj.write(
'mi<tg<open-att__<section<num>%s'
'<num-in-level>%s'
'<type>rtf-native'
'<level>0\n'
% (str(self.__section_num), str(self.__section_num))
)
self.__found_first_sec = 1
elif self.__token_info == 'tx<nu<__________':
self.__state = 'body'
self.__section_num += 1
self.__write_obj.write(
'mi<tg<open-att__<section<num>%s'
'<num-in-level>%s'
'<type>rtf-native'
'<level>0\n'
% (str(self.__section_num), str(self.__section_num))
)
self.__write_obj.write(
'cw<pf<par-def___<true\n'
)
self.__found_first_sec = 1
self.__write_obj.write(line)
def __found_sec_in_field_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
I have found the beginning of a field that has a section (or
really, two) inside of it. Change the state, and start adding to
one long string.
"""
self.__state = 'sec_in_field'
self.__sec_in_field_string = line
self.__in_field = 1
def __sec_in_field_func(self, line):
"""
Requires:
line --the line to parse
Returns:
nothing
Logic:
Check for the end of the field, or the beginning of a section
definition.
CHANGED! Just print out each line. Ignore any sections or
section definition info.
"""
action = self.__sec_in_field_dict.get(self.__token_info)
if action:
action(line)
else:
# change this 2004-04-26
# self.__sec_in_field_string += line
self.__write_obj.write(line)
def __end_sec_in_field_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
Add the last line to the field string. Call on the method
print_field_sec_attributes to write the close and beginning of a
section tag. Print out the field string. Call on the same method
to again write the close and beginning of a section tag.
Change the state.
"""
# change this 2004-04-26
# Don't do anything
"""
self.__sec_in_field_string += line
self.__print_field_sec_attributes()
self.__write_obj.write(self.__sec_in_field_string)
self.__print_field_sec_attributes()
"""
self.__state = 'body'
self.__in_field = 0
# this is changed too
self.__write_obj.write(line)
def __print_field_sec_attributes(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Get the number and dictionary of values from the lists. The number
and dictionary will be the first item of each list. Write the
close tag. Write the start tag. Write the attribute and values in
the dictionary. Get rid of the first item in each list.
keys = self.__section_values.keys()
if len(keys) > 0:
my_string += 'mi<tg<open-att__<section-definition'
for key in keys:
my_string += '<%s>%s' % (key, self.__section_values[key])
my_string += '\n'
else:
my_string += 'mi<tg<open______<section-definition\n'
"""
num = self.__field_num[0]
self.__field_num = self.__field_num[1:]
self.__write_obj.write(
'mi<tg<close_____<section\n'
'mi<tg<open-att__<section<num>%s' % str(num)
)
if self.__list_of_sec_values:
keys = self.__list_of_sec_values[0].keys()
for key in keys:
self.__write_obj.write(
f'<{key}>{self.__list_of_sec_values[0][key]}\n')
self.__list_of_sec_values = self.__list_of_sec_values[1:]
self.__write_obj.write('<level>0')
self.__write_obj.write('<type>rtf-native')
self.__write_obj.write('<num-in-level>%s' % str(self.__section_num))
self.__write_obj.write('\n')
# Look here
def __found_section_in_field_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
I have found a section in a field block. Add one to section
counter, and append this number to a list.
"""
self.__section_num += 1
self.__field_num.append(self.__section_num)
self.__sec_in_field_string += line
def __found_section_def_in_field_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
I have found a section definition in a filed block. Change the
state and clear the values dictionary.
"""
self.__state = 'section_def'
self.__section_values.clear()
def make_sections(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state. If the state is before the body, look for the
beginning of the body.
If the state is body, send the line to the body method.
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action is None:
sys.stderr.write('no matching state in module sections.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "sections.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 19,942 | Python | .py | 511 | 29.011742 | 83 | 0.532749 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,298 | make_lists.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/make_lists.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
import re
import sys
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from . import open_for_read, open_for_write
class MakeLists:
"""
Form lists.
Use RTF's own formatting to determine if a paragraph definition is part of a
list.
Use indents to determine items and how lists are nested.
"""
def __init__(self,
in_file,
bug_handler,
headings_to_sections,
list_of_lists,
copy=None,
run_level=1,
no_headings_as_list=1,
write_list_info=0,
):
"""
Required:
'file'
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__run_level = run_level
self.__no_headings_as_list = no_headings_as_list
self.__headings_to_sections = headings_to_sections
self.__copy = copy
self.__write_to = better_mktemp()
self.__list_of_lists = list_of_lists
self.__write_list_info = write_list_info
def __initiate_values(self):
"""
Required:
Nothing
Return:
Nothing
Logic:
The self.__end_list is a list of tokens that will force a list to end.
Likewise, the self.__end_lines is a list of lines that forces a list to end.
"""
self.__state = "default"
self.__left_indent = 0
self.__list_type = 'not-defined'
self.__pard_def = ""
self.__all_lists = []
self.__level = 0
self.__list_chunk = ''
self.__state_dict={
'default' : self.__default_func,
'in_pard' : self.__in_pard_func,
'after_pard' : self.__after_pard_func,
}
self.__headings = [
'heading 1', 'heading 2', 'heading 3', 'heading 4',
'heading 5', 'heading 6', 'heading 7', 'heading 8',
'heading 9'
]
self.__allow_levels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
self.__style_name = ''
self.__end_list = [
'mi<mk<body-close',
'mi<mk<par-in-fld',
'cw<tb<cell______',
'cw<tb<row-def___',
'cw<tb<row_______',
'mi<mk<sect-close',
'mi<mk<sect-start',
'mi<mk<header-beg',
'mi<mk<header-end',
'mi<mk<head___clo',
'mi<mk<fldbk-end_',
'mi<mk<close_cell',
'mi<mk<footnt-ope',
'mi<mk<foot___clo',
'mi<mk<tabl-start',
# 'mi<mk<sec-fd-beg',
]
self.__end_lines = [
'mi<tg<close_____<cell\n',
]
self.__id_regex = re.compile(r'\<list-id\>(\d+)')
self.__lv_regex = re.compile(r'\<list-level\>(\d+)')
self.__found_appt = 0
self.__line_num = 0
def __in_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
You are in a list, but in the middle of a paragraph definition.
Don't do anything until you find the end of the paragraph definition.
"""
if self.__token_info == 'mi<mk<pard-end__':
self.__state = 'after_pard'
self.__write_obj.write(line)
def __after_pard_func(self, line):
"""
Required:
line -- the line of current text.
Return:
Nothing
Logic:
You are in a list, but after a paragraph definition. You have to
determine if the last pargraph definition ends a list, continues
the old one, or starts a new one.
Otherwise, look for a paragraph definition. If one is found, determine if
the paragraph definition contains a list-id. If it does, use the method
self.__list_after_par_def to determine the action.
If the paragraph definition does not contain a list-id, use the method
close_lists to close out items and lists for a paragraph that is not
If a bigger block is found (such as a section or a cell), end all lists.
indented.
If no special line is found, add each line to a buffer.
"""
if self.__token_info == 'mi<tg<open-att__' and line[17:37] == 'paragraph-definition':
is_heading = self.__is_a_heading()
# found paragraph definition and not heading 1
search_obj = re.search(self.__id_regex, line)
if search_obj and not is_heading: # found list-id
search_obj_lv = re.search(self.__lv_regex, line)
if search_obj_lv:
self.__level = search_obj_lv.group(1)
num = search_obj.group(1)
self.__list_after_par_def_func(line, num)
self.__write_obj.write(line)
self.__state = 'in_pard'
# heading 1
elif is_heading:
self.__left_indent = -1000
self.__close_lists()
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'default'
self.__write_obj.write(line)
# Normal with no list id
else:
self.__close_lists()
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__write_obj.write(line)
if len(self.__all_lists) == 0:
self.__state= 'default'
else:
self.__state = 'in_pard'
# section to end lists
elif self.__token_info in self.__end_list :
self.__left_indent = -1000
self.__close_lists()
self.__write_obj.write(self.__list_chunk)
self.__list_chunk = ''
self.__state = 'default'
self.__write_obj.write(line)
else:
self.__list_chunk += line
def __list_after_par_def_func(self, line, id):
"""
Required:
line -- the line of current text.
id -- the id of the current list
Return:
Nothing
Logic:
You have found the end of a paragraph definition, and have found
another paragraph definition with a list id.
If the list-id is different from the last paragraph definition,
write the string in the buffer. Close out the lists with another
method and start a new list.
If the list id is the same as the last one, check the indent on the
current paragraph definition. If it is greater than the previous one,
do not end the current list or item. Start a new list.
"""
last_list_id = self.__all_lists[-1]['id']
if id != last_list_id:
self.__close_lists()
self.__write_obj.write(self.__list_chunk)
self.__write_start_list(id)
self.__list_chunk = ''
else:
last_list_indent = self.__all_lists[-1]['left-indent']
if self.__left_indent > last_list_indent:
self.__write_obj.write(self.__list_chunk)
self.__write_start_list(id)
else:
self.__write_end_item()
self.__write_obj.write(self.__list_chunk)
self.__write_start_item()
self.__list_chunk = ''
def __close_lists(self):
"""
Required:
Nothing
Return:
Nothing
Logic:
Reverse the list of dictionaries. Iterate through the list and
get the indent for each list. If the current indent is less than
or equal to the indent in the dictionary, close that level.
Keep track of how many levels you close. Reduce the list by that
many levels.
Reverse the list again.
"""
if self.__line_num < 25 and self.__found_appt:
sys.stderr.write('in closing out lists\n')
sys.stderr.write('current_indent is "%s"\n' % self.__left_indent)
current_indent = self.__left_indent
self.__all_lists.reverse()
num_levels_closed = 0
for the_dict in self.__all_lists:
list_indent = the_dict.get('left-indent')
if self.__line_num < 25 and self.__found_appt:
sys.stderr.write('last indent is "%s"' % list_indent)
if current_indent <= list_indent:
self.__write_end_item()
self.__write_end_list()
num_levels_closed += 1
self.__all_lists = self.__all_lists[num_levels_closed:]
self.__all_lists.reverse()
def __write_end_list(self):
"""
Required:
Nothing
Return:
Nothing
Logic:
Write the end of a list.
"""
self.__write_obj.write('mi<tg<close_____<list\n')
self.__write_obj.write('mi<mk<list_close\n')
def __write_start_list(self, id):
"""
Required:
id -- the id of the current list.
Return:
Nothing
Logic:
Write the start of a list and add the id and left-indent to the
self.__all_lists list.
Write cues of when a list starts for later processing.
In order to determine the type of list, you have to iterate through
the self.__list_of lists. This list looks like:
[[{list-id: [1, 2], [{}], [{}]] [{list-id: [3, 4], [{}]]]
I need to get the inside lists of the main lists. Then I need to get
the first item of what I just got. This is a dictionary. Get the list-id.
This is a list. Check to see if the current id is in this list. If
so, then get the list-type from the dictionary.
"""
the_dict = {}
the_dict['left-indent'] = self.__left_indent
the_dict['id'] = id
self.__all_lists.append(the_dict)
self.__write_obj.write(
'mi<mk<list_start\n'
)
# bogus levels are sometimes written for empty paragraphs
if str(self.__level) not in self.__allow_levels:
lev_num = '0'
else:
lev_num = self.__level
self.__write_obj.write(
'mi<tg<open-att__<list<list-id>%s<level>%s'
% (id, lev_num)
)
list_dict = {}
if self.__list_of_lists: # older RTF won't generate a list_of_lists
index_of_list = self.__get_index_of_list(id)
if index_of_list is not None: # found a matching id
curlist = self.__list_of_lists[index_of_list]
list_dict = curlist[0]
level = int(self.__level) + 1
if level >= len(curlist):
level = len(curlist) - 1
level_dict = curlist[level][0]
list_type = level_dict.get('numbering-type')
if list_type == 'bullet':
list_type = 'unordered'
else:
list_type = 'ordered'
self.__write_obj.write(
'<list-type>%s' % (list_type))
else: # no matching id
self.__write_obj.write(
'<list-type>%s' % (self.__list_type))
else: # older RTF
self.__write_obj.write(
'<list-type>%s' % (self.__list_type))
# if you want to dump all the info to the list, rather than
# keeping it in the table above, change self.__write_list_info
# to true.
if self.__list_of_lists and self.__write_list_info and list_dict:
not_allow = ['list-id',]
the_keys_list = list_dict.keys()
for the_key in the_keys_list:
if the_key in not_allow:
continue
self.__write_obj.write(f'<{the_key}>{list_dict[the_key]}')
the_keys_level = level_dict.keys()
for the_key in the_keys_level:
self.__write_obj.write(f'<{the_key}>{level_dict[the_key]}')
self.__write_obj.write('\n')
self.__write_obj.write(
'mi<mk<liststart_\n'
)
self.__write_start_item()
def __get_index_of_list(self, id):
"""
Requires:
id -- id of current paragraph-definition
Returns:
an index of where the id occurs in list_of_lists, the
dictionary passed to this module.
Logic:
Iterate through the big lists, the one passed to this module and
get the first item, the dictionary. Use a counter to keep
track of how many times you iterate with the counter.
Once you find a match, return the counter.
If no match is found, print out an error message.
"""
# some RTF use 0 indexed list. Don't know what to do?
if id == '0':
return
the_index = 0
for list in self.__list_of_lists:
the_dict = list[0]
id_in_list = the_dict.get('list-id')
if id in id_in_list:
return the_index
the_index += 1
if self.__run_level > 0:
sys.stderr.write('Module is make_lists.py\n'
'Method is __get_index_of_list\n'
'The main list does not appear to have a matching id for %s \n'
% (id)
)
# sys.stderr.write(repr(self.__list_of_lists))
# if self.__run_level > 3:
# msg = 'level is "%s"\n' % self.__run_level
# self.__bug_handler
def __write_start_item(self):
self.__write_obj.write('mi<mk<item_start\n')
self.__write_obj.write('mi<tg<open______<item\n')
self.__write_obj.write('mi<mk<itemstart_\n')
def __write_end_item(self):
self.__write_obj.write('mi<tg<item_end__\n')
self.__write_obj.write('mi<tg<close_____<item\n')
self.__write_obj.write('mi<tg<item__end_\n')
def __default_func(self, line):
"""
Required:
self, line
Returns:
Nothing
Logic
Look for the start of a paragraph definition. If one is found, check if
it contains a list-id. If it does, start a list. Change the state to
in_pard.
"""
if self.__token_info == 'mi<tg<open-att__' and line[17:37] == 'paragraph-definition':
is_a_heading = self.__is_a_heading()
if not is_a_heading:
search_obj = re.search(self.__id_regex, line)
if search_obj:
num = search_obj.group(1)
self.__state = 'in_pard'
search_obj_lv = re.search(self.__lv_regex, line)
if search_obj_lv:
self.__level = search_obj_lv.group(1)
self.__write_start_list(num)
self.__write_obj.write(line)
def __is_a_heading(self):
if self.__style_name in self.__headings:
if self.__headings_to_sections:
return 1
else:
if self.__no_headings_as_list:
return 1
else:
return 0
else:
return 0
def __get_indent(self, line):
if self.__token_info == 'mi<mk<left_inden':
self.__left_indent = float(line[17:-1])
def __get_list_type(self, line):
if self.__token_info == 'mi<mk<list-type_': # <ordered
self.__list_type = line[17:-1]
if self.__list_type == 'item':
self.__list_type = "unordered"
def __get_style_name(self, line):
if self.__token_info == 'mi<mk<style-name':
self.__style_name = line[17:-1]
def make_lists(self):
"""
Required:
nothing
Returns:
original file will be changed
Logic:
"""
self.__initiate_values()
read_obj = open_for_read(self.__file)
self.__write_obj = open_for_write(self.__write_to)
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
self.__get_indent(line)
self.__get_list_type(line)
self.__get_style_name(line)
action = self.__state_dict.get(self.__state)
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "make_lists.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 18,062 | Python | .py | 444 | 29.362613 | 93 | 0.505995 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |
27,299 | line_endings.py | kovidgoyal_calibre/src/calibre/ebooks/rtf2xml/line_endings.py | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os
from calibre.ebooks.rtf2xml import copy
from calibre.ptempfile import better_mktemp
from calibre.utils.cleantext import clean_ascii_chars
class FixLineEndings:
"""Fix line endings"""
def __init__(self,
bug_handler,
in_file=None,
copy=None,
run_level=1,
replace_illegals=1,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
self.__replace_illegals = replace_illegals
def fix_endings(self):
# read
with open(self.__file, 'rb') as read_obj:
input_file = read_obj.read()
# calibre go from win and mac to unix
input_file = input_file.replace(b'\r\n', b'\n')
input_file = input_file.replace(b'\r', b'\n')
# remove ASCII invalid chars : 0 to 8 and 11-14 to 24-26-27
if self.__replace_illegals:
input_file = clean_ascii_chars(input_file)
# write
with open(self.__write_to, 'wb') as write_obj:
write_obj.write(input_file)
# copy
copy_obj = copy.Copy(bug_handler=self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "line_endings.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
| 2,304 | Python | .py | 50 | 39.14 | 73 | 0.455758 | kovidgoyal/calibre | 19,243 | 2,250 | 4 | GPL-3.0 | 9/5/2024, 5:13:50 PM (Europe/Amsterdam) |