signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def latex_output_graph(self, node):
|
graph = node['<STR_LIT>']<EOL>parts = node['<STR_LIT>']<EOL>graph_hash = get_graph_hash(node)<EOL>name = "<STR_LIT>" % graph_hash<EOL>dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '<STR_LIT>'))<EOL>if not os.path.exists(dest_path):<EOL><INDENT>os.makedirs(dest_path)<EOL><DEDENT>pdf_path = os.path.abspath(os.path.join(dest_path, name + "<STR_LIT>"))<EOL>graph.run_dot(['<STR_LIT>', '<STR_LIT>' % pdf_path],<EOL>name, parts, graph_options={'<STR_LIT:size>': '<STR_LIT>'})<EOL>return '<STR_LIT>' % pdf_path<EOL>
|
Output the graph for LaTeX. This will insert a PDF.
|
f10436:m4
|
def visit_inheritance_diagram(inner_func):
|
def visitor(self, node):<EOL><INDENT>try:<EOL><INDENT>content = inner_func(self, node)<EOL><DEDENT>except DotException as e:<EOL><INDENT>warning = self.document.reporter.warning(str(e), line=node.line)<EOL>warning.parent = node<EOL>node.children = [warning]<EOL><DEDENT>else:<EOL><INDENT>source = self.document.attributes['<STR_LIT:source>']<EOL>self.body.append(content)<EOL>node.children = []<EOL><DEDENT><DEDENT>return visitor<EOL>
|
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
|
f10436:m5
|
def __init__(self, class_names, show_builtins=False):
|
self.class_names = class_names<EOL>self.classes = self._import_classes(class_names)<EOL>self.all_classes = self._all_classes(self.classes)<EOL>if len(self.all_classes) == <NUM_LIT:0>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>self.show_builtins = show_builtins<EOL>
|
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
|
f10436:c1:m0
|
def _import_class_or_module(self, name):
|
try:<EOL><INDENT>path, base = self.py_sig_re.match(name).groups()<EOL><DEDENT>except:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>" % name)<EOL><DEDENT>fullname = (path or '<STR_LIT>') + base<EOL>path = (path and path.rstrip('<STR_LIT:.>'))<EOL>if not path:<EOL><INDENT>path = base<EOL><DEDENT>try:<EOL><INDENT>module = __import__(path, None, None, [])<EOL>my_import(fullname)<EOL><DEDENT>except ImportError:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>" % name)<EOL><DEDENT>try:<EOL><INDENT>todoc = module<EOL>for comp in fullname.split('<STR_LIT:.>')[<NUM_LIT:1>:]:<EOL><INDENT>todoc = getattr(todoc, comp)<EOL><DEDENT><DEDENT>except AttributeError:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>" % name)<EOL><DEDENT>if inspect.isclass(todoc):<EOL><INDENT>return [todoc]<EOL><DEDENT>elif inspect.ismodule(todoc):<EOL><INDENT>classes = []<EOL>for cls in todoc.__dict__.values():<EOL><INDENT>if inspect.isclass(cls) and cls.__module__ == todoc.__name__:<EOL><INDENT>classes.append(cls)<EOL><DEDENT><DEDENT>return classes<EOL><DEDENT>raise ValueError(<EOL>"<STR_LIT>" % name)<EOL>
|
Import a class using its fully-qualified *name*.
|
f10436:c1:m1
|
def _import_classes(self, class_names):
|
classes = []<EOL>for name in class_names:<EOL><INDENT>classes.extend(self._import_class_or_module(name))<EOL><DEDENT>return classes<EOL>
|
Import a list of classes.
|
f10436:c1:m2
|
def _all_classes(self, classes):
|
all_classes = {}<EOL>def recurse(cls):<EOL><INDENT>all_classes[cls] = None<EOL>for c in cls.__bases__:<EOL><INDENT>if c not in all_classes:<EOL><INDENT>recurse(c)<EOL><DEDENT><DEDENT><DEDENT>for cls in classes:<EOL><INDENT>recurse(cls)<EOL><DEDENT>return all_classes.keys()<EOL>
|
Return a list of all classes that are ancestors of *classes*.
|
f10436:c1:m3
|
def class_name(self, cls, parts=<NUM_LIT:0>):
|
module = cls.__module__<EOL>if module == '<STR_LIT>':<EOL><INDENT>fullname = cls.__name__<EOL><DEDENT>else:<EOL><INDENT>fullname = "<STR_LIT>" % (module, cls.__name__)<EOL><DEDENT>if parts == <NUM_LIT:0>:<EOL><INDENT>return fullname<EOL><DEDENT>name_parts = fullname.split('<STR_LIT:.>')<EOL>return '<STR_LIT:.>'.join(name_parts[-parts:])<EOL>
|
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
|
f10436:c1:m4
|
def get_all_class_names(self):
|
return [self.class_name(x) for x in self.all_classes]<EOL>
|
Get all of the class names involved in the graph.
|
f10436:c1:m5
|
def generate_dot(self, fd, name, parts=<NUM_LIT:0>, urls={},<EOL>graph_options={}, node_options={},<EOL>edge_options={}):
|
g_options = self.default_graph_options.copy()<EOL>g_options.update(graph_options)<EOL>n_options = self.default_node_options.copy()<EOL>n_options.update(node_options)<EOL>e_options = self.default_edge_options.copy()<EOL>e_options.update(edge_options)<EOL>fd.write('<STR_LIT>' % name)<EOL>fd.write(self._format_graph_options(g_options))<EOL>for cls in self.all_classes:<EOL><INDENT>if not self.show_builtins and cls in __builtins__.values():<EOL><INDENT>continue<EOL><DEDENT>name = self.class_name(cls, parts)<EOL>this_node_options = n_options.copy()<EOL>url = urls.get(self.class_name(cls))<EOL>if url is not None:<EOL><INDENT>this_node_options['<STR_LIT>'] = '<STR_LIT>' % url<EOL><DEDENT>fd.write('<STR_LIT>' %<EOL>(name, self._format_node_options(this_node_options)))<EOL>for base in cls.__bases__:<EOL><INDENT>if not self.show_builtins and base in __builtins__.values():<EOL><INDENT>continue<EOL><DEDENT>base_name = self.class_name(base, parts)<EOL>fd.write('<STR_LIT>' %<EOL>(base_name, name,<EOL>self._format_node_options(e_options)))<EOL><DEDENT><DEDENT>fd.write('<STR_LIT>')<EOL>
|
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
|
f10436:c1:m8
|
def run_dot(self, args, name, parts=<NUM_LIT:0>, urls={},<EOL>graph_options={}, node_options={}, edge_options={}):
|
try:<EOL><INDENT>dot = subprocess.Popen(['<STR_LIT>'] + list(args),<EOL>stdin=subprocess.PIPE, stdout=subprocess.PIPE,<EOL>close_fds=True)<EOL><DEDENT>except OSError:<EOL><INDENT>raise DotException("<STR_LIT>")<EOL><DEDENT>except ValueError:<EOL><INDENT>raise DotException("<STR_LIT>")<EOL><DEDENT>except:<EOL><INDENT>raise DotException("<STR_LIT>")<EOL><DEDENT>self.generate_dot(dot.stdin, name, parts, urls, graph_options,<EOL>node_options, edge_options)<EOL>dot.stdin.close()<EOL>result = dot.stdout.read()<EOL>returncode = dot.wait()<EOL>if returncode != <NUM_LIT:0>:<EOL><INDENT>raise DotException("<STR_LIT>" % returncode)<EOL><DEDENT>return result<EOL>
|
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
|
f10436:c1:m9
|
def out_of_date(original, derived):
|
return (not os.path.exists(derived) or<EOL>os.stat(derived).st_mtime < os.stat(original).st_mtime)<EOL>
|
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
TODO: this check isn't adequate in some cases. e.g., if we discover
a bug when building the examples, the original and derived will be
unchanged but we still want to force a rebuild.
|
f10437:m0
|
def setup(app):
|
<EOL>pass<EOL>
|
Setup as a sphinx extension.
|
f10439:m0
|
def ignore_patterns(*patterns):
|
import fnmatch<EOL>def _ignore_patterns(path, names):<EOL><INDENT>ignored_names = []<EOL>for pattern in patterns:<EOL><INDENT>ignored_names.extend(fnmatch.filter(names, pattern))<EOL><DEDENT>return set(ignored_names)<EOL><DEDENT>return _ignore_patterns<EOL>
|
Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files
|
f10440:m0
|
def copytree(src, dst, symlinks=False, ignore=None):
|
from shutil import copy2, Error, copystat<EOL>names = os.listdir(src)<EOL>if ignore is not None:<EOL><INDENT>ignored_names = ignore(src, names)<EOL><DEDENT>else:<EOL><INDENT>ignored_names = set()<EOL><DEDENT>os.makedirs(dst)<EOL>errors = []<EOL>for name in names:<EOL><INDENT>if name in ignored_names:<EOL><INDENT>continue<EOL><DEDENT>srcname = os.path.join(src, name)<EOL>dstname = os.path.join(dst, name)<EOL>try:<EOL><INDENT>if symlinks and os.path.islink(srcname):<EOL><INDENT>linkto = os.readlink(srcname)<EOL>os.symlink(linkto, dstname)<EOL><DEDENT>elif os.path.isdir(srcname):<EOL><INDENT>copytree(srcname, dstname, symlinks, ignore)<EOL><DEDENT>else:<EOL><INDENT>copy2(srcname, dstname)<EOL><DEDENT><DEDENT>except Error as err:<EOL><INDENT>errors.extend(err.args[<NUM_LIT:0>])<EOL><DEDENT>except EnvironmentError as why:<EOL><INDENT>errors.append((srcname, dstname, str(why)))<EOL><DEDENT><DEDENT>try:<EOL><INDENT>copystat(src, dst)<EOL><DEDENT>except OSError as why:<EOL><INDENT>if WindowsError is not None and isinstance(why, WindowsError):<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>errors.extend((src, dst, str(why)))<EOL><DEDENT><DEDENT>if errors:<EOL><INDENT>raise Error(errors)<EOL><DEDENT>
|
Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
|
f10440:m1
|
def _isbn_cleanse(isbn, checksum=True):
|
if not isinstance(isbn, string_types):<EOL><INDENT>raise TypeError('<STR_LIT>' % isbn)<EOL><DEDENT>if PY2 and isinstance(isbn, str): <EOL><INDENT>isbn = unicode(isbn)<EOL>uni_input = False<EOL><DEDENT>else: <EOL><INDENT>uni_input = True<EOL><DEDENT>for dash in DASHES:<EOL><INDENT>isbn = isbn.replace(dash, unicode())<EOL><DEDENT>if checksum:<EOL><INDENT>if not isbn[:-<NUM_LIT:1>].isdigit():<EOL><INDENT>raise IsbnError('<STR_LIT>')<EOL><DEDENT>if len(isbn) == <NUM_LIT:9>:<EOL><INDENT>isbn = '<STR_LIT:0>' + isbn<EOL><DEDENT>if len(isbn) == <NUM_LIT:10>:<EOL><INDENT>if not (isbn[-<NUM_LIT:1>].isdigit() or isbn[-<NUM_LIT:1>] in '<STR_LIT>'):<EOL><INDENT>raise IsbnError('<STR_LIT>')<EOL><DEDENT><DEDENT>elif len(isbn) == <NUM_LIT>:<EOL><INDENT>if not isbn[-<NUM_LIT:1>].isdigit():<EOL><INDENT>raise IsbnError('<STR_LIT>')<EOL><DEDENT>if not isbn.startswith(('<STR_LIT>', '<STR_LIT>')):<EOL><INDENT>raise IsbnError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise IsbnError('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if len(isbn) == <NUM_LIT:8>:<EOL><INDENT>isbn = '<STR_LIT:0>' + isbn<EOL><DEDENT>elif len(isbn) == <NUM_LIT:12> and not isbn[:<NUM_LIT:3>].startswith(('<STR_LIT>', '<STR_LIT>')):<EOL><INDENT>raise IsbnError('<STR_LIT>')<EOL><DEDENT>if not isbn.isdigit():<EOL><INDENT>raise IsbnError('<STR_LIT>')<EOL><DEDENT>if not len(isbn) in (<NUM_LIT:9>, <NUM_LIT:12>):<EOL><INDENT>raise IsbnError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>if PY2 and not uni_input: <EOL><INDENT>return str(isbn)<EOL><DEDENT>else: <EOL><INDENT>return isbn<EOL><DEDENT>
|
Check ISBN is a string, and passes basic sanity checks.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
checksum (bool): ``True`` if ``isbn`` includes checksum character
Returns:
``str``: ISBN with hyphenation removed, including when called with a
SBN
Raises:
TypeError: ``isbn`` is not a ``str`` type
IsbnError: Incorrect length for ``isbn``
IsbnError: Incorrect SBN or ISBN formatting
|
f10454:m0
|
def calculate_checksum(isbn):
|
isbn = [int(i) for i in _isbn_cleanse(isbn, checksum=False)]<EOL>if len(isbn) == <NUM_LIT:9>:<EOL><INDENT>products = [x * y for x, y in zip(isbn, range(<NUM_LIT:1>, <NUM_LIT:10>))]<EOL>check = sum(products) % <NUM_LIT:11><EOL>if check == <NUM_LIT:10>:<EOL><INDENT>check = '<STR_LIT:X>'<EOL><DEDENT><DEDENT>else:<EOL><INDENT>products = []<EOL>for i in range(<NUM_LIT:12>):<EOL><INDENT>if i % <NUM_LIT:2> == <NUM_LIT:0>:<EOL><INDENT>products.append(isbn[i])<EOL><DEDENT>else:<EOL><INDENT>products.append(isbn[i] * <NUM_LIT:3>)<EOL><DEDENT><DEDENT>check = <NUM_LIT:10> - sum(products) % <NUM_LIT:10><EOL>if check == <NUM_LIT:10>:<EOL><INDENT>check = <NUM_LIT:0><EOL><DEDENT><DEDENT>return str(check)<EOL>
|
Calculate ISBN checksum.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
Returns:
``str``: Checksum for given ISBN or SBN
|
f10454:m1
|
def convert(isbn, code='<STR_LIT>'):
|
isbn = _isbn_cleanse(isbn)<EOL>if len(isbn) == <NUM_LIT:10>:<EOL><INDENT>isbn = code + isbn[:-<NUM_LIT:1>]<EOL>return isbn + calculate_checksum(isbn)<EOL><DEDENT>else:<EOL><INDENT>if isbn.startswith('<STR_LIT>'):<EOL><INDENT>return isbn[<NUM_LIT:3>:-<NUM_LIT:1>] + calculate_checksum(isbn[<NUM_LIT:3>:-<NUM_LIT:1>])<EOL><DEDENT>else:<EOL><INDENT>raise IsbnError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>
|
Convert ISBNs between ISBN-10 and ISBN-13.
Note:
No attempt to hyphenate converted ISBNs is made, because the
specification requires that *any* hyphenation must be correct but
allows ISBNs without hyphenation.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
code (str): EAN Bookland code
Returns:
``str``: Converted ISBN-10 or ISBN-13
Raise:
IsbnError: When ISBN-13 isn't convertible to an ISBN-10
|
f10454:m2
|
def validate(isbn):
|
isbn = _isbn_cleanse(isbn)<EOL>return isbn[-<NUM_LIT:1>].upper() == calculate_checksum(isbn[:-<NUM_LIT:1>])<EOL>
|
Validate ISBNs.
Warning:
Publishers have been known to go to press with broken ISBNs, and
therefore validation failures do not completely guarantee an ISBN is
incorrectly entered. It should however be noted that it is massively
more likely *you* have entered an invalid ISBN than the published ISBN
is incorrectly produced. An example of this probability in the real
world is that `Amazon <https://www.amazon.com/>`__ consider it so
unlikely that they refuse to search for invalid published ISBNs.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
Returns:
``bool``: ``True`` if ISBN is valid
|
f10454:m3
|
def __init__(self, isbn):
|
super(Isbn, self).__init__()<EOL>self._isbn = isbn<EOL>if len(isbn) in (<NUM_LIT:9>, <NUM_LIT:12>):<EOL><INDENT>self.isbn = _isbn_cleanse(isbn, False)<EOL><DEDENT>else:<EOL><INDENT>self.isbn = _isbn_cleanse(isbn)<EOL><DEDENT>
|
Initialise a new ``Isbn`` object.
Args:
isbn (str): ISBN string
|
f10454:c4:m0
|
def __repr__(self):
|
return '<STR_LIT>' % (self.__class__.__name__, self.isbn)<EOL>
|
Self-documenting string representation.
Returns:
``str``: String to recreate ``Isbn`` object
|
f10454:c4:m1
|
def __str__(self):
|
return '<STR_LIT>' % self._isbn<EOL>
|
Pretty printed ISBN string.
Returns:
``str``: Human readable string representation of ``Isbn`` object
|
f10454:c4:m2
|
def __format__(self, format_spec=None):
|
if not format_spec: <EOL><INDENT>return str(self)<EOL><DEDENT>elif format_spec == '<STR_LIT:url>':<EOL><INDENT>return self.to_url()<EOL><DEDENT>elif format_spec.startswith('<STR_LIT>'):<EOL><INDENT>parts = format_spec.split('<STR_LIT::>')[<NUM_LIT:1>:]<EOL>site = parts[<NUM_LIT:0>]<EOL>if len(parts) > <NUM_LIT:1>:<EOL><INDENT>country = parts[<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>country = '<STR_LIT>'<EOL><DEDENT>return self.to_url(site, country)<EOL><DEDENT>elif format_spec == '<STR_LIT>':<EOL><INDENT>return self.to_urn()<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' % format_spec)<EOL><DEDENT>
|
Extended pretty printing for ISBN strings.
Args:
format_spec (str): Extended format to use
Returns:
``str``: Human readable string representation of ``Isbn`` object
Raises:
ValueError: Unknown value for ``format_spec``
|
f10454:c4:m3
|
def calculate_checksum(self):
|
if len(self.isbn) in (<NUM_LIT:9>, <NUM_LIT:12>):<EOL><INDENT>return calculate_checksum(self.isbn)<EOL><DEDENT>else:<EOL><INDENT>return calculate_checksum(self.isbn[:-<NUM_LIT:1>])<EOL><DEDENT>
|
Calculate ISBN checksum.
Returns:
``str``: ISBN checksum value
|
f10454:c4:m4
|
def convert(self, code='<STR_LIT>'):
|
return convert(self.isbn, code)<EOL>
|
Convert ISBNs between ISBN-10 and ISBN-13.
Args:
code (str): ISBN-13 prefix code
Returns:
``str``: Converted ISBN
|
f10454:c4:m5
|
def validate(self):
|
return validate(self.isbn)<EOL>
|
Validate an ISBN value.
Returns:
``bool``: ``True`` if ISBN is valid
|
f10454:c4:m6
|
def to_url(self, site='<STR_LIT>', country='<STR_LIT>'):
|
try:<EOL><INDENT>try:<EOL><INDENT>url, tlds = URL_MAP[site]<EOL><DEDENT>except ValueError:<EOL><INDENT>tlds = None<EOL>url = URL_MAP[site]<EOL><DEDENT><DEDENT>except KeyError:<EOL><INDENT>raise SiteError(site)<EOL><DEDENT>inject = {'<STR_LIT>': self._isbn}<EOL>if tlds:<EOL><INDENT>if country not in tlds:<EOL><INDENT>raise CountryError(country)<EOL><DEDENT>tld = tlds[country]<EOL>if not tld:<EOL><INDENT>tld = country<EOL><DEDENT>inject['<STR_LIT>'] = tld<EOL><DEDENT>return url % inject<EOL>
|
Generate a link to an online book site.
Args:
site (str): Site to create link to
country (str): Country specific version of ``site``
Returns:
``str``: URL on ``site`` for book
Raises:
SiteError: Unknown site value
CountryError: Unknown country value
|
f10454:c4:m7
|
def to_urn(self):
|
return '<STR_LIT>' % self._isbn<EOL>
|
Generate a RFC 3187 URN.
:rfc:`3187` is the commonly accepted way to use ISBNs as uniform
resource names.
Returns:
``str``: :rfc:`3187` compliant URN
|
f10454:c4:m8
|
def __init__(self, isbn):
|
super(Isbn10, self).__init__(isbn)<EOL>
|
Initialise a new ``Isbn10`` object.
Args:
isbn (str): ISBN-10 string
|
f10454:c5:m0
|
def calculate_checksum(self):
|
return calculate_checksum(self.isbn[:<NUM_LIT:9>])<EOL>
|
Calculate ISBN-10 checksum.
Returns:
``str``: ISBN-10 checksum value
|
f10454:c5:m1
|
def convert(self, code='<STR_LIT>'):
|
return convert(self.isbn, code)<EOL>
|
Convert ISBN-10 to ISBN-13.
Args:
code (str): ISBN-13 prefix code
Returns:
``str``: ISBN-13 string
|
f10454:c5:m2
|
def __init__(self, sbn):
|
isbn = '<STR_LIT:0>' + sbn<EOL>super(Sbn, self).__init__(isbn)<EOL>
|
Initialise a new ``Sbn`` object.
Args:
sbn (str): SBN string
|
f10454:c6:m0
|
def __repr__(self):
|
return '<STR_LIT>' % (self.__class__.__name__, self.isbn[<NUM_LIT:1>:])<EOL>
|
Self-documenting string representation.
Returns:
``str``: String to recreate ``Sbn`` object
|
f10454:c6:m1
|
def calculate_checksum(self):
|
return calculate_checksum(self.isbn[:<NUM_LIT:9>])<EOL>
|
Calculate SBN checksum.
Returns:
``str``: SBN checksum value
|
f10454:c6:m2
|
def convert(self, code='<STR_LIT>'):
|
return super(Sbn, self).convert(code)<EOL>
|
Convert SBN to ISBN-13.
Args:
code (str): ISBN-13 prefix code
Returns:
``str``: ISBN-13 string
|
f10454:c6:m3
|
def __init__(self, isbn):
|
super(Isbn13, self).__init__(isbn)<EOL>
|
Initialise a new ``Isbn13`` object.
Args:
isbn (str): ISBN-13 string
|
f10454:c7:m0
|
def calculate_checksum(self):
|
return calculate_checksum(self.isbn[:<NUM_LIT:12>])<EOL>
|
Calculate ISBN-13 checksum.
Returns:
``str``: ISBN-13 checksum value
|
f10454:c7:m1
|
def convert(self, code=None):
|
return convert(self.isbn)<EOL>
|
Convert ISBN-13 to ISBN-10.
Args:
code: Ignored, only for compatibility with ``Isbn``
Returns:
``str``: ISBN-10 string
Raises:
ValueError: When ISBN-13 isn't a Bookland "978" ISBN
|
f10454:c7:m2
|
def atomize(f):
|
def wrapper(*args, **kwargs):<EOL><INDENT>db = args[<NUM_LIT:0>]<EOL>def if_atomic(*args, **kwargs):<EOL><INDENT>try:<EOL><INDENT>db.connect()<EOL>result = f(*args, **kwargs)<EOL>db.connection.commit()<EOL>db.disconnect()<EOL>return result<EOL><DEDENT>except Exception as e:<EOL><INDENT>db.rollback()<EOL>raise<EOL><DEDENT><DEDENT>if db.atomic:<EOL><INDENT>return if_atomic(*args, **kwargs)<EOL><DEDENT>else:<EOL><INDENT>return f(*args, **kwargs)<EOL><DEDENT><DEDENT>return wrapper<EOL>
|
@ atomize decorator
creates two versions of a function
for atomic and not atomic queries
|
f10465:c0:m3
|
@atomize<EOL><INDENT>def get_one(self, qry, tpl):<DEDENT>
|
self.cur.execute(qry + '<STR_LIT>', tpl)<EOL>result = self.cur.fetchone()<EOL>if type(result) is tuple and len(result) == <NUM_LIT:1>:<EOL><INDENT>result = result[<NUM_LIT:0>]<EOL><DEDENT>return result<EOL>
|
get a single from from a query
limit 1 is automatically added
|
f10465:c0:m4
|
@atomize<EOL><INDENT>def get_all(self, qry, tpl):<DEDENT>
|
self.cur.execute(qry, tpl)<EOL>result = self.cur.fetchall()<EOL>return result<EOL>
|
get all rows for a query
|
f10465:c0:m5
|
@atomize<EOL><INDENT>def put(self, qry, tpl):<DEDENT>
|
self.cur.execute(qry, tpl)<EOL>
|
insert, update or delete query
|
f10465:c0:m6
|
def nextversion(current_version):
|
norm_ver = verlib.suggest_normalized_version(current_version)<EOL>if norm_ver is None:<EOL><INDENT>return None<EOL><DEDENT>norm_ver = verlib.NormalizedVersion(norm_ver)<EOL>parts = norm_ver.parts <EOL>assert(len(parts) == <NUM_LIT:3>)<EOL>if len(parts[<NUM_LIT:2>]) > <NUM_LIT:1>: <EOL><INDENT>if parts[<NUM_LIT:2>][-<NUM_LIT:1>] == '<STR_LIT:f>': <EOL><INDENT>parts = _mk_incremented_parts(parts, part_idx=<NUM_LIT:2>, in_part_idx=-<NUM_LIT:2>, incval=<NUM_LIT:1>)<EOL><DEDENT>else: <EOL><INDENT>parts = _mk_incremented_parts(parts, part_idx=<NUM_LIT:2>, in_part_idx=-<NUM_LIT:1>, incval=<NUM_LIT:1>)<EOL><DEDENT><DEDENT>elif len(parts[<NUM_LIT:1>]) > <NUM_LIT:1>: <EOL><INDENT>parts = _mk_incremented_parts(parts, part_idx=<NUM_LIT:1>, in_part_idx=-<NUM_LIT:1>, incval=<NUM_LIT:1>)<EOL><DEDENT>else: <EOL><INDENT>parts = _mk_incremented_parts(parts, part_idx=<NUM_LIT:0>, in_part_idx=-<NUM_LIT:1>, incval=<NUM_LIT:1>)<EOL><DEDENT>norm_ver.parts = parts<EOL>return str(norm_ver)<EOL>
|
Returns incremented module version number.
:param current_version: version string to increment
:returns: Next version string (PEP 386 compatible) if possible.
If impossible (since `current_version` is too far from PEP 386),
`None` is returned.
|
f10468:m0
|
def _detect_encoding(data=None):
|
import locale<EOL>enc_list = ['<STR_LIT:utf-8>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>']<EOL>code = locale.getpreferredencoding(False)<EOL>if data is None:<EOL><INDENT>return code<EOL><DEDENT>if code.lower() not in enc_list:<EOL><INDENT>enc_list.insert(<NUM_LIT:0>, code.lower())<EOL><DEDENT>for c in enc_list:<EOL><INDENT>try:<EOL><INDENT>for line in data:<EOL><INDENT>line.decode(c)<EOL><DEDENT><DEDENT>except (UnicodeDecodeError, UnicodeError, AttributeError):<EOL><INDENT>continue<EOL><DEDENT>return c<EOL><DEDENT>print("<STR_LIT>")<EOL>
|
Return the default system encoding. If data is passed, try
to decode the data with the default system encoding or from a short
list of encoding types to test.
Args:
data - list of lists
Returns:
enc - system encoding
|
f10476:m0
|
def view(data, enc=None, start_pos=None, delimiter=None, hdr_rows=None,<EOL>idx_cols=None, sheet_index=<NUM_LIT:0>, transpose=False, wait=None,<EOL>recycle=None, detach=None, metavar=None, title=None):
|
global WAIT, RECYCLE, DETACH, VIEW<EOL>model = read_model(data, enc=enc, delimiter=delimiter, hdr_rows=hdr_rows,<EOL>idx_cols=idx_cols, sheet_index=sheet_index,<EOL>transpose=transpose)<EOL>if model is None:<EOL><INDENT>warnings.warn("<STR_LIT>".format(type(data)),<EOL>category=RuntimeWarning)<EOL>return None<EOL><DEDENT>if wait is None: wait = WAIT<EOL>if recycle is None: recycle = RECYCLE<EOL>if detach is None: detach = DETACH<EOL>if wait is None:<EOL><INDENT>if '<STR_LIT>' not in sys.modules:<EOL><INDENT>wait = not bool(detach)<EOL><DEDENT>else:<EOL><INDENT>import matplotlib.pyplot as plt<EOL>wait = not plt.isinteractive()<EOL><DEDENT><DEDENT>if metavar is None:<EOL><INDENT>if isinstance(data, basestring):<EOL><INDENT>metavar = data<EOL><DEDENT>else:<EOL><INDENT>metavar = _varname_in_stack(data, <NUM_LIT:1>)<EOL><DEDENT><DEDENT>if VIEW is None:<EOL><INDENT>if not detach:<EOL><INDENT>VIEW = ViewController()<EOL><DEDENT>else:<EOL><INDENT>VIEW = DetachedViewController()<EOL>VIEW.setDaemon(True)<EOL>VIEW.start()<EOL>if VIEW.is_detached():<EOL><INDENT>atexit.register(VIEW.exit)<EOL><DEDENT>else:<EOL><INDENT>VIEW = None<EOL>return None<EOL><DEDENT><DEDENT><DEDENT>view_kwargs = {'<STR_LIT>': hdr_rows, '<STR_LIT>': idx_cols,<EOL>'<STR_LIT>': start_pos, '<STR_LIT>': metavar, '<STR_LIT:title>': title}<EOL>VIEW.view(model, view_kwargs, wait=wait, recycle=recycle)<EOL>return VIEW<EOL>
|
View the supplied data in an interactive, graphical table widget.
data: When a valid path or IO object, read it as a tabular text file. When
a valid URI, a Blaze object is constructed and visualized. Any other
supported datatype is visualized directly and incrementally *without
copying*.
enc: File encoding (such as "utf-8", normally autodetected).
delimiter: Text file delimiter (normally autodetected).
hdr_rows: For files or lists of lists, specify the number of header rows.
For files only, a default of one header line is assumed.
idx_cols: For files or lists of lists, specify the number of index columns.
By default, no index is assumed.
sheet_index: For multi-table files (such as xls[x]), specify the sheet
index to read, starting from 0. Defaults to the first.
start_pos: A tuple of the form (y, x) specifying the initial cursor
position. Negative offsets count from the end of the dataset.
transpose: Transpose the resulting view.
metavar: name of the variable being shown for display purposes (inferred
automatically when possible).
title: title of the data window.
wait: Wait for the user to close the view before returning. By default, try
to match the behavior of ``matplotlib.is_interactive()``. If
matplotlib is not loaded, wait only if ``detach`` is also False. The
default value can also be set through ``gtabview.WAIT``.
recycle: Recycle the previous window instead of creating a new one. The
default is True, and can also be set through ``gtabview.RECYCLE``.
detach: Create a fully detached GUI thread for interactive use (note: this
is *not* necessary if matplotlib is loaded). The default is False,
and can also be set through ``gtabview.DETACH``.
|
f10480:m1
|
def comparable(klass):
|
<EOL>if not is_py3:<EOL><INDENT>return klass<EOL><DEDENT>def __eq__(self, other):<EOL><INDENT>c = self.__cmp__(other)<EOL>if c is NotImplemented:<EOL><INDENT>return c<EOL><DEDENT>return c == <NUM_LIT:0><EOL><DEDENT>def __ne__(self, other):<EOL><INDENT>c = self.__cmp__(other)<EOL>if c is NotImplemented:<EOL><INDENT>return c<EOL><DEDENT>return c != <NUM_LIT:0><EOL><DEDENT>def __lt__(self, other):<EOL><INDENT>c = self.__cmp__(other)<EOL>if c is NotImplemented:<EOL><INDENT>return c<EOL><DEDENT>return c < <NUM_LIT:0><EOL><DEDENT>def __le__(self, other):<EOL><INDENT>c = self.__cmp__(other)<EOL>if c is NotImplemented:<EOL><INDENT>return c<EOL><DEDENT>return c <= <NUM_LIT:0><EOL><DEDENT>def __gt__(self, other):<EOL><INDENT>c = self.__cmp__(other)<EOL>if c is NotImplemented:<EOL><INDENT>return c<EOL><DEDENT>return c > <NUM_LIT:0><EOL><DEDENT>def __ge__(self, other):<EOL><INDENT>c = self.__cmp__(other)<EOL>if c is NotImplemented:<EOL><INDENT>return c<EOL><DEDENT>return c >= <NUM_LIT:0><EOL><DEDENT>klass.__lt__ = __lt__<EOL>klass.__gt__ = __gt__<EOL>klass.__le__ = __le__<EOL>klass.__ge__ = __ge__<EOL>klass.__eq__ = __eq__<EOL>klass.__ne__ = __ne__<EOL>return klass<EOL>
|
Class decorator that ensures support for the special C{__cmp__} method.
On Python 2 this does nothing.
On Python 3, C{__eq__}, C{__lt__}, etc. methods are added to the class,
relying on C{__cmp__} to implement their comparisons.
|
f10486:m0
|
def validate_class_type_arguments(operator):
|
def inner(function):<EOL><INDENT>def wrapper(self, *args, **kwargs):<EOL><INDENT>for arg in args + tuple(kwargs.values()):<EOL><INDENT>if not isinstance(arg, self.__class__):<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>'.format(<EOL>type(self).__name__, operator, type(arg).__name__<EOL>)<EOL>)<EOL><DEDENT><DEDENT>return function(self, *args, **kwargs)<EOL><DEDENT>return wrapper<EOL><DEDENT>return inner<EOL>
|
Decorator to validate all the arguments to function
are of the type of calling class for passed operator
|
f10489:m0
|
def validate_arguments_type_of_function(param_type=None):
|
def inner(function):<EOL><INDENT>def wrapper(self, *args, **kwargs):<EOL><INDENT>type_ = param_type or type(self)<EOL>for arg in args + tuple(kwargs.values()):<EOL><INDENT>if not isinstance(arg, type_):<EOL><INDENT>raise TypeError(<EOL>(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>).format(<EOL>type(self).__name__,<EOL>function.__name__,<EOL>type_.__name__,<EOL>)<EOL>)<EOL><DEDENT><DEDENT>return function(self, *args, **kwargs)<EOL><DEDENT>return wrapper<EOL><DEDENT>return inner<EOL>
|
Decorator to validate the <type> of arguments in
the calling function are of the `param_type` class.
if `param_type` is None, uses `param_type` as the class where it is used.
Note: Use this decorator on the functions of the class.
|
f10489:m1
|
def utc_offset(time_struct=None):
|
if time_struct:<EOL><INDENT>ts = time_struct<EOL><DEDENT>else:<EOL><INDENT>ts = time.localtime()<EOL><DEDENT>if ts[-<NUM_LIT:1>]:<EOL><INDENT>offset = time.altzone<EOL><DEDENT>else:<EOL><INDENT>offset = time.timezone<EOL><DEDENT>return offset<EOL>
|
Returns the time offset from UTC accounting for DST
Keyword Arguments:
time_struct {time.struct_time} -- the struct time for which to
return the UTC offset.
If None, use current local time.
|
f10489:m2
|
def now():
|
epoch = time.time()<EOL>return MayaDT(epoch=epoch)<EOL>
|
Returns a MayaDT instance for this exact moment.
|
f10489:m7
|
def when(string, timezone='<STR_LIT>', prefer_dates_from='<STR_LIT>'):
|
settings = {<EOL>'<STR_LIT>': timezone,<EOL>'<STR_LIT>': True,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': prefer_dates_from,<EOL>}<EOL>dt = dateparser.parse(string, settings=settings)<EOL>if dt is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return MayaDT.from_datetime(dt)<EOL>
|
Returns a MayaDT instance for the human moment specified.
Powered by dateparser. Useful for scraping websites.
Examples:
'next week', 'now', 'tomorrow', '300 years ago', 'August 14, 2015'
Keyword Arguments:
string -- string to be parsed
timezone -- timezone referenced from (default: 'UTC')
prefer_dates_from -- what dates are prefered when `string` is ambigous.
options are 'past', 'future', and 'current_period'
(default: 'current_period'). see: [1]
Reference:
[1] dateparser.readthedocs.io/en/latest/usage.html#handling-incomplete-dates
|
f10489:m8
|
def parse(string, timezone='<STR_LIT>', day_first=False, year_first=True, strict=False):
|
options = {}<EOL>options['<STR_LIT>'] = timezone<EOL>options['<STR_LIT>'] = day_first<EOL>options['<STR_LIT>'] = year_first<EOL>options['<STR_LIT:strict>'] = strict<EOL>dt = pendulum.parse(str(string), **options)<EOL>return MayaDT.from_datetime(dt)<EOL>
|
Returns a MayaDT instance for the machine-produced moment specified.
Powered by pendulum.
Accepts most known formats. Useful for working with data.
Keyword Arguments:
string -- string to be parsed
timezone -- timezone referenced from (default: 'UTC')
day_first -- if true, the first value (e.g. 01/05/2016)
is parsed as day.
if year_first is set to True, this distinguishes
between YDM and YMD. (default: False)
year_first -- if true, the first value (e.g. 2016/05/01)
is parsed as year (default: True)
strict -- if False, allow pendulum to fall back on datetime parsing
if pendulum's own parsing fails
|
f10489:m9
|
def _seconds_or_timedelta(duration):
|
if isinstance(duration, int):<EOL><INDENT>dt_timedelta = timedelta(seconds=duration)<EOL><DEDENT>elif isinstance(duration, timedelta):<EOL><INDENT>dt_timedelta = duration<EOL><DEDENT>else:<EOL><INDENT>raise TypeError(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL><DEDENT>return dt_timedelta<EOL>
|
Returns `datetime.timedelta` object for the passed duration.
Keyword Arguments:
duration -- `datetime.timedelta` object or seconds in `int` format.
|
f10489:m10
|
def intervals(start, end, interval):
|
interval = _seconds_or_timedelta(interval)<EOL>current_timestamp = start<EOL>while current_timestamp.epoch < end.epoch:<EOL><INDENT>yield current_timestamp<EOL>current_timestamp = current_timestamp.add(<EOL>seconds=interval.total_seconds()<EOL>)<EOL><DEDENT>
|
Yields MayaDT objects between the start and end MayaDTs given,
at a given interval (seconds or timedelta).
|
f10489:m12
|
def __format__(self, *args, **kwargs):
|
return format(self.datetime(), *args, **kwargs)<EOL>
|
Return's the datetime's format
|
f10489:c0:m3
|
def add(self, **kwargs):
|
return self.from_datetime(<EOL>pendulum.instance(self.datetime()).add(**kwargs)<EOL>)<EOL>
|
Returns a new MayaDT object with the given offsets.
|
f10489:c0:m14
|
def subtract(self, **kwargs):
|
return self.from_datetime(<EOL>pendulum.instance(self.datetime()).subtract(**kwargs)<EOL>)<EOL>
|
Returns a new MayaDT object with the given offsets.
|
f10489:c0:m15
|
def subtract_date(self, **kwargs):
|
return timedelta(seconds=self.epoch - kwargs['<STR_LIT>'].epoch)<EOL>
|
Returns a timedelta object with the duration between the dates
|
f10489:c0:m16
|
def snap(self, instruction):
|
return self.from_datetime(snaptime.snap(self.datetime(), instruction))<EOL>
|
Returns a new MayaDT object modified by the given instruction.
Powered by snaptime. See https://github.com/zartstrom/snaptime
for a complete documentation about the snaptime instructions.
|
f10489:c0:m17
|
@property<EOL><INDENT>def timezone(self):<DEDENT>
|
return '<STR_LIT>'<EOL>
|
Returns the UTC tzinfo name. It's always UTC. Always.
|
f10489:c0:m18
|
@property<EOL><INDENT>def _tz(self):<DEDENT>
|
return pytz.timezone(self.timezone)<EOL>
|
Returns the UTC tzinfo object.
|
f10489:c0:m19
|
@property<EOL><INDENT>def local_timezone(self):<DEDENT>
|
if self._local_tz.zone in pytz.all_timezones:<EOL><INDENT>return self._local_tz.zone<EOL><DEDENT>return self.timezone<EOL>
|
Returns the name of the local timezone.
|
f10489:c0:m20
|
@property<EOL><INDENT>def _local_tz(self):<DEDENT>
|
return get_localzone()<EOL>
|
Returns the local timezone.
|
f10489:c0:m21
|
@staticmethod<EOL><INDENT>@validate_arguments_type_of_function(Datetime)<EOL>def __dt_to_epoch(dt):<DEDENT>
|
<EOL>if dt.tzinfo is None:<EOL><INDENT>dt = dt.replace(tzinfo=pytz.utc)<EOL><DEDENT>epoch_start = Datetime(*MayaDT.__EPOCH_START, tzinfo=pytz.timezone('<STR_LIT>'))<EOL>return (dt - epoch_start).total_seconds()<EOL>
|
Converts a datetime into an epoch.
|
f10489:c0:m22
|
@classmethod<EOL><INDENT>@validate_arguments_type_of_function(Datetime)<EOL>def from_datetime(klass, dt):<DEDENT>
|
return klass(klass.__dt_to_epoch(dt))<EOL>
|
Returns MayaDT instance from datetime.
|
f10489:c0:m23
|
@classmethod<EOL><INDENT>@validate_arguments_type_of_function(time.struct_time)<EOL>def from_struct(klass, struct, timezone=pytz.UTC):<DEDENT>
|
struct_time = time.mktime(struct) - utc_offset(struct)<EOL>dt = Datetime.fromtimestamp(struct_time, timezone)<EOL>return klass(klass.__dt_to_epoch(dt))<EOL>
|
Returns MayaDT instance from a 9-tuple struct
It's assumed to be from gmtime().
|
f10489:c0:m24
|
@classmethod<EOL><INDENT>def from_iso8601(klass, iso8601_string):<DEDENT>
|
return parse(iso8601_string)<EOL>
|
Returns MayaDT instance from iso8601 string.
|
f10489:c0:m25
|
@staticmethod<EOL><INDENT>def from_rfc2822(rfc2822_string):<DEDENT>
|
return parse(rfc2822_string)<EOL>
|
Returns MayaDT instance from rfc2822 string.
|
f10489:c0:m26
|
@staticmethod<EOL><INDENT>def from_rfc3339(rfc3339_string):<DEDENT>
|
return parse(rfc3339_string)<EOL>
|
Returns MayaDT instance from rfc3339 string.
|
f10489:c0:m27
|
def datetime(self, to_timezone=None, naive=False):
|
if to_timezone:<EOL><INDENT>dt = self.datetime().astimezone(pytz.timezone(to_timezone))<EOL><DEDENT>else:<EOL><INDENT>dt = Datetime.utcfromtimestamp(self._epoch)<EOL>dt.replace(tzinfo=self._tz)<EOL><DEDENT>if naive:<EOL><INDENT>return dt.replace(tzinfo=None)<EOL><DEDENT>else:<EOL><INDENT>if dt.tzinfo is None:<EOL><INDENT>dt = dt.replace(tzinfo=self._tz)<EOL><DEDENT><DEDENT>return dt<EOL>
|
Returns a timezone-aware datetime...
Defaulting to UTC (as it should).
Keyword Arguments:
to_timezone {str} -- timezone to convert to (default: None/UTC)
naive {bool} -- if True,
the tzinfo is simply dropped (default: False)
|
f10489:c0:m28
|
def local_datetime(self):
|
return self.datetime(to_timezone=self.local_timezone, naive=False)<EOL>
|
Returns a local timezone-aware datetime object
It's the same as:
mayaDt.datetime(to_timezone=mayaDt.local_timezone)
|
f10489:c0:m29
|
def iso8601(self):
|
<EOL>dt = self.datetime(naive=True)<EOL>return '<STR_LIT>'.format(dt.isoformat())<EOL>
|
Returns an ISO 8601 representation of the MayaDT.
|
f10489:c0:m30
|
def rfc2822(self):
|
return email.utils.formatdate(self.epoch, usegmt=True)<EOL>
|
Returns an RFC 2822 representation of the MayaDT.
|
f10489:c0:m31
|
def rfc3339(self):
|
return self.datetime().strftime("<STR_LIT>")[:-<NUM_LIT:5>] + "<STR_LIT>"<EOL>
|
Returns an RFC 3339 representation of the MayaDT.
|
f10489:c0:m32
|
@property<EOL><INDENT>def weekday(self):<DEDENT>
|
return self.datetime().isoweekday()<EOL>
|
Return the day of the week as an integer.
Monday is 1 and Sunday is 7.
|
f10489:c0:m38
|
def slang_date(self, locale="<STR_LIT>"):
|
dt = pendulum.instance(self.datetime())<EOL>try:<EOL><INDENT>return _translate(dt, locale)<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>delta = humanize.time.abs_timedelta(<EOL>timedelta(seconds=(self.epoch - now().epoch)))<EOL>format_string = "<STR_LIT>"<EOL>if delta.days >= <NUM_LIT>:<EOL><INDENT>format_string += "<STR_LIT>"<EOL><DEDENT>return dt.format(format_string, locale=locale).title()<EOL>
|
Returns human slang representation of date.
Keyword Arguments:
locale -- locale to translate to, e.g. 'fr' for french.
(default: 'en' - English)
|
f10489:c0:m44
|
def slang_time(self, locale="<STR_LIT>"):
|
dt = self.datetime()<EOL>return pendulum.instance(dt).diff_for_humans(locale=locale)<EOL>
|
Returns human slang representation of time.
Keyword Arguments:
locale -- locale to translate to, e.g. 'fr' for french.
(default: 'en' - English)
|
f10489:c0:m45
|
def iso8601(self):
|
return '<STR_LIT>'.format(self.start.iso8601(), self.end.iso8601())<EOL>
|
Returns an ISO 8601 representation of the MayaInterval.
|
f10489:c1:m2
|
@validate_arguments_type_of_function()<EOL><INDENT>def combine(self, maya_interval):<DEDENT>
|
interval_list = sorted([self, maya_interval])<EOL>if self & maya_interval or self.is_adjacent(maya_interval):<EOL><INDENT>return [<EOL>MayaInterval(<EOL>interval_list[<NUM_LIT:0>].start,<EOL>max(interval_list[<NUM_LIT:0>].end, interval_list[<NUM_LIT:1>].end),<EOL>)<EOL>]<EOL><DEDENT>return interval_list<EOL>
|
Returns a combined list of timespans, merged together.
|
f10489:c1:m16
|
@validate_arguments_type_of_function()<EOL><INDENT>def subtract(self, maya_interval):<DEDENT>
|
if not self & maya_interval:<EOL><INDENT>return [self]<EOL><DEDENT>elif maya_interval.contains(self):<EOL><INDENT>return []<EOL><DEDENT>interval_list = []<EOL>if self.start < maya_interval.start:<EOL><INDENT>interval_list.append(MayaInterval(self.start, maya_interval.start))<EOL><DEDENT>if self.end > maya_interval.end:<EOL><INDENT>interval_list.append(MayaInterval(maya_interval.end, self.end))<EOL><DEDENT>return interval_list<EOL>
|
Removes the given interval.
|
f10489:c1:m17
|
def quantize(self, duration, snap_out=False, timezone='<STR_LIT>'):
|
<EOL>duration = _seconds_or_timedelta(duration)<EOL>timezone = pytz.timezone(timezone)<EOL>if duration <= timedelta(seconds=<NUM_LIT:0>):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>epoch = timezone.localize(Datetime(<NUM_LIT>, <NUM_LIT:1>, <NUM_LIT:1>))<EOL>seconds = int(duration.total_seconds())<EOL>start_seconds = int(<EOL>(self.start.datetime(naive=False) - epoch).total_seconds()<EOL>)<EOL>end_seconds = int(<EOL>(self.end.datetime(naive=False) - epoch).total_seconds()<EOL>)<EOL>if start_seconds % seconds and not snap_out:<EOL><INDENT>start_seconds += seconds<EOL><DEDENT>if end_seconds % seconds and snap_out:<EOL><INDENT>end_seconds += seconds<EOL><DEDENT>start_seconds -= start_seconds % seconds<EOL>end_seconds -= end_seconds % seconds<EOL>if start_seconds > end_seconds:<EOL><INDENT>start_seconds = end_seconds<EOL><DEDENT>return MayaInterval(<EOL>start=MayaDT.from_datetime(epoch).add(seconds=start_seconds),<EOL>end=MayaDT.from_datetime(epoch).add(seconds=end_seconds),<EOL>)<EOL>
|
Returns a quanitzed interval.
|
f10489:c1:m19
|
@validate_arguments_type_of_function()<EOL><INDENT>def intersection(self, maya_interval):<DEDENT>
|
start = max(self.start, maya_interval.start)<EOL>end = min(self.end, maya_interval.end)<EOL>either_instant = self.is_instant or maya_interval.is_instant<EOL>instant_overlap = (self.start == maya_interval.start or start <= end)<EOL>if (either_instant and instant_overlap) or (start < end):<EOL><INDENT>return MayaInterval(start, end)<EOL><DEDENT>
|
Returns the intersection between two intervals.
|
f10489:c1:m20
|
@staticmethod<EOL><INDENT>def status(s):<DEDENT>
|
print('<STR_LIT>'.format(s))<EOL>
|
Prints things in bold.
|
f10490:c0:m0
|
def expected_error_page(error_page):
|
def wrapper(func):<EOL><INDENT>setattr(func, EXPECTED_ERROR_PAGE, error_page)<EOL>return func<EOL><DEDENT>return wrapper<EOL>
|
Decorator expecting defined error page at the end of test method. As param
use what :py:meth:`~.WebdriverWrapperErrorMixin.get_error_page`
returns.
.. versionadded:: 2.0
Before this decorator was called ``ShouldBeErrorPage``.
|
f10502:m0
|
def allowed_error_pages(*error_pages):
|
def wrapper(func):<EOL><INDENT>setattr(func, ALLOWED_ERROR_PAGES, error_pages)<EOL>return func<EOL><DEDENT>return wrapper<EOL>
|
Decorator ignoring defined error pages at the end of test method. As param
use what :py:meth:`~.WebdriverWrapperErrorMixin.get_error_page`
returns.
.. versionadded:: 2.0
|
f10502:m1
|
def expected_error_messages(*error_messages):
|
def wrapper(func):<EOL><INDENT>setattr(func, EXPECTED_ERROR_MESSAGES, error_messages)<EOL>return func<EOL><DEDENT>return wrapper<EOL>
|
Decorator expecting defined error messages at the end of test method. As
param use what
:py:meth:`~.WebdriverWrapperErrorMixin.get_error_messages`
returns.
.. versionadded:: 2.0
Before this decorator was called ``ShouldBeError``.
|
f10502:m2
|
def allowed_error_messages(*error_messages):
|
def wrapper(func):<EOL><INDENT>setattr(func, ALLOWED_ERROR_MESSAGES, error_messages)<EOL>return func<EOL><DEDENT>return wrapper<EOL>
|
Decorator ignoring defined error messages at the end of test method. As
param use what
:py:meth:`~.WebdriverWrapperErrorMixin.get_error_messages`
returns.
.. versionadded:: 2.0
Before this decorator was called ``CanBeError``.
|
f10502:m3
|
def allowed_any_error_message(func):
|
setattr(func, ALLOWED_ERROR_MESSAGES, ANY)<EOL>return func<EOL>
|
Decorator ignoring any error messages at the end of test method. If you want
allow only specific error message, use :py:func:`.allowed_error_messages`
instead.
.. versionadded:: 2.0
|
f10502:m4
|
def check_errors(self, expected_error_page=None, allowed_error_pages=[], expected_error_messages=[], allowed_error_messages=[]):
|
<EOL>self.close_alert(ignore_exception=True)<EOL>expected_error_pages = set([expected_error_page]) if expected_error_page else set()<EOL>allowed_error_pages = set(allowed_error_pages)<EOL>error_page = self.get_error_page()<EOL>error_pages = set([error_page]) if error_page else set()<EOL>if (<EOL>error_pages & expected_error_pages != expected_error_pages<EOL>or<EOL>error_pages - (expected_error_pages | allowed_error_pages)<EOL>):<EOL><INDENT>traceback = self.get_error_traceback()<EOL>raise ErrorPageException(self.current_url, error_page, expected_error_page, allowed_error_pages, traceback)<EOL><DEDENT>error_messages = set(self.get_error_messages())<EOL>expected_error_messages = set(expected_error_messages)<EOL>allowed_error_messages = error_messages if allowed_error_messages is ANY else set(allowed_error_messages)<EOL>if (<EOL>error_messages & expected_error_messages != expected_error_messages<EOL>or<EOL>error_messages - (expected_error_messages | allowed_error_messages)<EOL>):<EOL><INDENT>raise ErrorMessagesException(self.current_url, error_messages, expected_error_messages, allowed_error_messages)<EOL><DEDENT>js_errors = self.get_js_errors()<EOL>if js_errors:<EOL><INDENT>raise JSErrorsException(self.current_url, js_errors)<EOL><DEDENT>
|
This method should be called whenever you need to check if there is some
error. Normally you need only ``check_expected_errors`` called after each
test (which you specify only once), but it will check errors only at the
end of test. When you have big use case and you need to be sure that on
every step is not any error, use this.
To parameters you should pass same values like to decorators
:py:func:`.expected_error_page`, :py:func:`.allowed_error_pages`,
:py:func:`.expected_error_messages` and :py:func:`.allowed_error_messages`.
|
f10502:c1:m1
|
def get_error_page(self):
|
try:<EOL><INDENT>error_page = self.get_elm(class_name='<STR_LIT>')<EOL><DEDENT>except NoSuchElementException:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>header = error_page.get_elm(tag_name='<STR_LIT>')<EOL>return header.text<EOL><DEDENT>
|
Method returning error page. Should return string.
By default it find element with class ``error-page`` and returns text
of ``h1`` header. You can change this method accordingly to your app.
Error page returned from this method is used in decorators
:py:func:`.expected_error_page` and :py:func:`.allowed_error_pages`.
|
f10502:c1:m2
|
def get_error_traceback(self):
|
try:<EOL><INDENT>error_page = self.get_elm(class_name='<STR_LIT>')<EOL>traceback = error_page.get_elm(class_name='<STR_LIT>')<EOL><DEDENT>except NoSuchElementException:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>return traceback.text<EOL><DEDENT>
|
Method returning traceback of error page.
By default it find element with class ``error-page`` and returns text
of element with class ``traceback``. You can change this method
accordingly to your app.
|
f10502:c1:m3
|
def get_error_messages(self):
|
try:<EOL><INDENT>error_elms = self.get_elms(class_name='<STR_LIT:error>')<EOL><DEDENT>except NoSuchElementException:<EOL><INDENT>return []<EOL><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>error_values = [error_elm.get_attribute('<STR_LIT:error>') for error_elm in error_elms]<EOL><DEDENT>except Exception:<EOL><INDENT>error_values = [error_elm.text for error_elm in error_elms]<EOL><DEDENT>finally:<EOL><INDENT>return error_values<EOL><DEDENT><DEDENT>
|
Method returning error messages. Should return list of messages.
By default it find element with class ``error`` and theirs value in
attribute ``error`` or text if that attribute is missing. You can change
this method accordingly to your app.
Error messages returned from this method are used in decorators
:py:func:`.expected_error_messages` and :py:func:`.allowed_error_messages`.
|
f10502:c1:m4
|
def get_js_errors(self):
|
return self.execute_script('<STR_LIT>')<EOL>
|
Method returning JS errors. Should return list of errors.
You have to include following JS snippet to your web app which will
record all JS errors and this method will automatically read them.
.. code-block:: html
<script type="text/javascript">
window.jsErrors = [];
window.onerror = function(errorMessage) {
window.jsErrors[window.jsErrors.length] = errorMessage;
}
</script>
|
f10502:c1:m5
|
@property<EOL><INDENT>def method(self):<DEDENT>
|
return self._response.request.method.lower()<EOL>
|
Used method of request. ``GET`` or ``POST``.
|
f10503:c0:m0
|
@property<EOL><INDENT>def status_code(self):<DEDENT>
|
return self._response.status_code<EOL>
|
Status code of response.
|
f10503:c0:m1
|
@property<EOL><INDENT>def encoding(self):<DEDENT>
|
return self._response.encoding<EOL>
|
Encoding of reponse.
|
f10503:c0:m2
|
@property<EOL><INDENT>def headers(self):<DEDENT>
|
return self._response.headers<EOL>
|
Headers of response.
See :py:attr:`requests.Response.headers` for more information.
|
f10503:c0:m3
|
@property<EOL><INDENT>def data(self):<DEDENT>
|
return self._response.text<EOL>
|
RAW data of response.
|
f10503:c0:m4
|
def fill_out_and_submit(self, data, prefix='<STR_LIT>', skip_reset=False):
|
self.fill_out(data, prefix, skip_reset)<EOL>self.submit()<EOL>
|
Calls :py:meth:`~.Form.fill_out` and then :py:meth:`.submit`.
|
f10504:c0:m0
|
def fill_out(self, data, prefix='<STR_LIT>', skip_reset=False):
|
for elm_name, value in data.items():<EOL><INDENT>FormElement(self, prefix + elm_name).fill_out(value, skip_reset)<EOL><DEDENT>
|
Fill out ``data`` by dictionary (key is name attribute of inputs). You
can pass normal Pythonic data and don't have to care about how to use
API of WebDriver.
By ``prefix`` you can specify prefix of all name attributes. For example
you can have inputs called ``client.name`` and ``client.surname`` -
then you will pass to ``prefix`` string ``"client."`` and in dictionary
just ``"name"``.
Option ``skip_reset`` is for skipping reset, so it can go faster. For
example for multiple selects it calls ``deselect_all`` first, but it need
to for every option check if it is selected and it is very slow for
really big multiple selects. If you know that it is not filled, you can
skip it and safe in some cases up to one minute! Also same with text
inputs, but first is called ``clear``.
Example:
.. code-block:: python
driver.get_elm('formid').fill_out({
'name': 'Michael',
'surname': 'Horejsek',
'age': 24,
'enabled': True,
'multibox': ['value1', 'value2']
}, prefix='user_')
.. versionchanged:: 2.2
``turbo`` renamed to ``skip_reset`` and used also for common elements
like text inputs or textareas.
|
f10504:c0:m1
|
def submit(self):
|
elm_name = '<STR_LIT>' % self.get_attribute('<STR_LIT:id>')<EOL>try:<EOL><INDENT>self.click(elm_name)<EOL><DEDENT>except NoSuchElementException:<EOL><INDENT>super(Form, self).submit()<EOL><DEDENT>
|
Try to find element with ID "[FORM_ID]_submit" and click on it. If no
element doesn't exists it will call default behaviour - submiting of
form by pressing enter.
|
f10504:c0:m2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.