repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
lmcro/letsencrypt
|
refs/heads/master
|
letsencrypt-nginx/letsencrypt_nginx/parser.py
|
8
|
"""NginxParser is a member object of the NginxConfigurator class."""
import glob
import logging
import os
import pyparsing
import re
from letsencrypt import errors
from letsencrypt_nginx import obj
from letsencrypt_nginx import nginxparser
logger = logging.getLogger(__name__)
class NginxParser(object):
"""Class handles the fine details of parsing the Nginx Configuration.
:ivar str root: Normalized abosulte path to the server root
directory. Without trailing slash.
:ivar dict parsed: Mapping of file paths to parsed trees
"""
def __init__(self, root, ssl_options):
self.parsed = {}
self.root = os.path.abspath(root)
self.loc = self._set_locations(ssl_options)
# Parse nginx.conf and included files.
# TODO: Check sites-available/ as well. For now, the configurator does
# not enable sites from there.
self.load()
def load(self):
"""Loads Nginx files into a parsed tree.
"""
self.parsed = {}
self._parse_recursively(self.loc["root"])
def _parse_recursively(self, filepath):
"""Parses nginx config files recursively by looking at 'include'
directives inside 'http' and 'server' blocks. Note that this only
reads Nginx files that potentially declare a virtual host.
:param str filepath: The path to the files to parse, as a glob
"""
filepath = self.abs_path(filepath)
trees = self._parse_files(filepath)
for tree in trees:
for entry in tree:
if _is_include_directive(entry):
# Parse the top-level included file
self._parse_recursively(entry[1])
elif entry[0] == ['http'] or entry[0] == ['server']:
# Look for includes in the top-level 'http'/'server' context
for subentry in entry[1]:
if _is_include_directive(subentry):
self._parse_recursively(subentry[1])
elif entry[0] == ['http'] and subentry[0] == ['server']:
# Look for includes in a 'server' context within
# an 'http' context
for server_entry in subentry[1]:
if _is_include_directive(server_entry):
self._parse_recursively(server_entry[1])
def abs_path(self, path):
"""Converts a relative path to an absolute path relative to the root.
Does nothing for paths that are already absolute.
:param str path: The path
:returns: The absolute path
:rtype: str
"""
if not os.path.isabs(path):
return os.path.join(self.root, path)
else:
return path
def get_vhosts(self):
# pylint: disable=cell-var-from-loop
"""Gets list of all 'virtual hosts' found in Nginx configuration.
Technically this is a misnomer because Nginx does not have virtual
hosts, it has 'server blocks'.
:returns: List of :class:`~letsencrypt_nginx.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
enabled = True # We only look at enabled vhosts for now
vhosts = []
servers = {}
for filename in self.parsed:
tree = self.parsed[filename]
servers[filename] = []
srv = servers[filename] # workaround undefined loop var in lambdas
# Find all the server blocks
_do_for_subarray(tree, lambda x: x[0] == ['server'],
lambda x: srv.append(x[1]))
# Find 'include' statements in server blocks and append their trees
for i, server in enumerate(servers[filename]):
new_server = self._get_included_directives(server)
servers[filename][i] = new_server
for filename in servers:
for server in servers[filename]:
# Parse the server block into a VirtualHost object
parsed_server = parse_server(server)
vhost = obj.VirtualHost(filename,
parsed_server['addrs'],
parsed_server['ssl'],
enabled,
parsed_server['names'],
server)
vhosts.append(vhost)
return vhosts
def _get_included_directives(self, block):
"""Returns array with the "include" directives expanded out by
concatenating the contents of the included file to the block.
:param list block:
:rtype: list
"""
result = list(block) # Copy the list to keep self.parsed idempotent
for directive in block:
if _is_include_directive(directive):
included_files = glob.glob(
self.abs_path(directive[1]))
for incl in included_files:
try:
result.extend(self.parsed[incl])
except KeyError:
pass
return result
def _parse_files(self, filepath, override=False):
"""Parse files from a glob
:param str filepath: Nginx config file path
:param bool override: Whether to parse a file that has been parsed
:returns: list of parsed tree structures
:rtype: list
"""
files = glob.glob(filepath)
trees = []
for item in files:
if item in self.parsed and not override:
continue
try:
with open(item) as _file:
parsed = nginxparser.load(_file)
self.parsed[item] = parsed
trees.append(parsed)
except IOError:
logger.warn("Could not open file: %s", item)
except pyparsing.ParseException:
logger.debug("Could not parse file: %s", item)
return trees
def _set_locations(self, ssl_options):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
root = self._find_config_root()
default = root
nginx_temp = os.path.join(self.root, "nginx_ports.conf")
if os.path.isfile(nginx_temp):
listen = nginx_temp
name = nginx_temp
else:
listen = default
name = default
return {"root": root, "default": default, "listen": listen,
"name": name, "ssl_options": ssl_options}
def _find_config_root(self):
"""Find the Nginx Configuration Root file."""
location = ['nginx.conf']
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError(
"Could not find configuration root")
def filedump(self, ext='tmp'):
"""Dumps parsed configurations into files.
:param str ext: The file extension to use for the dumped files. If
empty, this overrides the existing conf files.
"""
for filename in self.parsed:
tree = self.parsed[filename]
if ext:
filename = filename + os.path.extsep + ext
try:
logger.debug('Dumping to %s:\n%s', filename, nginxparser.dumps(tree))
with open(filename, 'w') as _file:
nginxparser.dump(tree, _file)
except IOError:
logger.error("Could not open file for writing: %s", filename)
def _has_server_names(self, entry, names):
"""Checks if a server block has the given set of server_names. This
is the primary way of identifying server blocks in the configurator.
Returns false if 'entry' doesn't look like a server block at all.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param list entry: The block to search
:param set names: The names to match
:rtype: bool
"""
if len(names) == 0:
# Nothing to identify blocks with
return False
if not isinstance(entry, list):
# Can't be a server block
return False
new_entry = self._get_included_directives(entry)
server_names = set()
for item in new_entry:
if not isinstance(item, list):
# Can't be a server block
return False
if len(item) > 0 and item[0] == 'server_name':
server_names.update(_get_servernames(item[1]))
return server_names == names
def add_server_directives(self, filename, names, directives,
replace):
"""Add or replace directives in the first server block with names.
..note :: If replace is True, this raises a misconfiguration error
if the directive does not already exist.
..note :: If replace is False nothing gets added if an identical
block exists already.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param str filename: The absolute filename of the config file
:param set names: The server_name to match
:param list directives: The directives to add
:param bool replace: Whether to only replace existing directives
"""
try:
_do_for_subarray(self.parsed[filename],
lambda x: self._has_server_names(x, names),
lambda x: _add_directives(x, directives, replace))
except errors.MisconfigurationError as err:
raise errors.MisconfigurationError("Problem in %s: %s" % (filename, err.message))
def add_http_directives(self, filename, directives):
"""Adds directives to the first encountered HTTP block in filename.
We insert new directives at the top of the block to work around
https://trac.nginx.org/nginx/ticket/810: If the first server block
doesn't enable OCSP stapling, stapling is broken for all blocks.
:param str filename: The absolute filename of the config file
:param list directives: The directives to add
"""
_do_for_subarray(self.parsed[filename],
lambda x: x[0] == ['http'],
lambda x: x[1].insert(0, directives))
def get_all_certs_keys(self):
"""Gets all certs and keys in the nginx config.
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: set
"""
c_k = set()
vhosts = self.get_vhosts()
for vhost in vhosts:
tup = [None, None, vhost.filep]
if vhost.ssl:
for directive in vhost.raw:
if directive[0] == 'ssl_certificate':
tup[0] = directive[1]
elif directive[0] == 'ssl_certificate_key':
tup[1] = directive[1]
if tup[0] is not None and tup[1] is not None:
c_k.add(tuple(tup))
return c_k
def _do_for_subarray(entry, condition, func):
"""Executes a function for a subarray of a nested array if it matches
the given condition.
:param list entry: The list to iterate over
:param function condition: Returns true iff func should be executed on item
:param function func: The function to call for each matching item
"""
if isinstance(entry, list):
if condition(entry):
func(entry)
else:
for item in entry:
_do_for_subarray(item, condition, func)
def get_best_match(target_name, names):
"""Finds the best match for target_name out of names using the Nginx
name-matching rules (exact > longest wildcard starting with * >
longest wildcard ending with * > regex).
:param str target_name: The name to match
:param set names: The candidate server names
:returns: Tuple of (type of match, the name that matched)
:rtype: tuple
"""
exact = []
wildcard_start = []
wildcard_end = []
regex = []
for name in names:
if _exact_match(target_name, name):
exact.append(name)
elif _wildcard_match(target_name, name, True):
wildcard_start.append(name)
elif _wildcard_match(target_name, name, False):
wildcard_end.append(name)
elif _regex_match(target_name, name):
regex.append(name)
if len(exact) > 0:
# There can be more than one exact match; e.g. eff.org, .eff.org
match = min(exact, key=len)
return ('exact', match)
if len(wildcard_start) > 0:
# Return the longest wildcard
match = max(wildcard_start, key=len)
return ('wildcard_start', match)
if len(wildcard_end) > 0:
# Return the longest wildcard
match = max(wildcard_end, key=len)
return ('wildcard_end', match)
if len(regex) > 0:
# Just return the first one for now
match = regex[0]
return ('regex', match)
return (None, None)
def _exact_match(target_name, name):
return target_name == name or '.' + target_name == name
def _wildcard_match(target_name, name, start):
# Degenerate case
if name == '*':
return True
parts = target_name.split('.')
match_parts = name.split('.')
# If the domain ends in a wildcard, do the match procedure in reverse
if not start:
parts.reverse()
match_parts.reverse()
# The first part must be a wildcard or blank, e.g. '.eff.org'
first = match_parts.pop(0)
if first != '*' and first != '':
return False
target_name = '.'.join(parts)
name = '.'.join(match_parts)
# Ex: www.eff.org matches *.eff.org, eff.org does not match *.eff.org
return target_name.endswith('.' + name)
def _regex_match(target_name, name):
# Must start with a tilde
if len(name) < 2 or name[0] != '~':
return False
# After tilde is a perl-compatible regex
try:
regex = re.compile(name[1:])
if re.match(regex, target_name):
return True
else:
return False
except re.error: # pragma: no cover
# perl-compatible regexes are sometimes not recognized by python
return False
def _is_include_directive(entry):
"""Checks if an nginx parsed entry is an 'include' directive.
:param list entry: the parsed entry
:returns: Whether it's an 'include' directive
:rtype: bool
"""
return (isinstance(entry, list) and
len(entry) == 2 and entry[0] == 'include' and
isinstance(entry[1], str))
def _get_servernames(names):
"""Turns a server_name string into a list of server names
:param str names: server names
:rtype: list
"""
whitespace_re = re.compile(r'\s+')
names = re.sub(whitespace_re, ' ', names)
return names.split(' ')
def parse_server(server):
"""Parses a list of server directives.
:param list server: list of directives in a server block
:rtype: dict
"""
parsed_server = {'addrs': set(),
'ssl': False,
'names': set()}
for directive in server:
if directive[0] == 'listen':
addr = obj.Addr.fromstring(directive[1])
parsed_server['addrs'].add(addr)
if not parsed_server['ssl'] and addr.ssl:
parsed_server['ssl'] = True
elif directive[0] == 'server_name':
parsed_server['names'].update(
_get_servernames(directive[1]))
elif directive[0] == 'ssl' and directive[1] == 'on':
parsed_server['ssl'] = True
return parsed_server
def _add_directives(block, directives, replace):
"""Adds or replaces directives in a config block.
When replace=False, it's an error to try and add a directive that already
exists in the config block with a conflicting value.
When replace=True, a directive with the same name MUST already exist in the
config block, and the first instance will be replaced.
..todo :: Find directives that are in included files.
:param list block: The block to replace in
:param list directives: The new directives.
"""
for directive in directives:
_add_directive(block, directive, replace)
repeatable_directives = set(['server_name', 'listen', 'include'])
def _add_directive(block, directive, replace):
"""Adds or replaces a single directive in a config block.
See _add_directives for more documentation.
"""
location = -1
# Find the index of a config line where the name of the directive matches
# the name of the directive we want to add.
for index, line in enumerate(block):
if len(line) > 0 and line[0] == directive[0]:
location = index
break
if replace:
if location == -1:
raise errors.MisconfigurationError(
'expected directive for %s in the Nginx '
'config but did not find it.' % directive[0])
block[location] = directive
else:
# Append directive. Fail if the name is not a repeatable directive name,
# and there is already a copy of that directive with a different value
# in the config file.
directive_name = directive[0]
directive_value = directive[1]
if location != -1 and directive_name.__str__() not in repeatable_directives:
if block[location][1] == directive_value:
# There's a conflict, but the existing value matches the one we
# want to insert, so it's fine.
pass
else:
raise errors.MisconfigurationError(
'tried to insert directive "%s" but found conflicting "%s".' % (
directive, block[location]))
else:
block.append(directive)
|
biddisco/VTK
|
refs/heads/master
|
ThirdParty/Twisted/twisted/conch/avatar.py
|
91
|
# -*- test-case-name: twisted.conch.test.test_conch -*-
from interfaces import IConchUser
from error import ConchError
from ssh.connection import OPEN_UNKNOWN_CHANNEL_TYPE
from twisted.python import log
from zope import interface
class ConchUser:
interface.implements(IConchUser)
def __init__(self):
self.channelLookup = {}
self.subsystemLookup = {}
def lookupChannel(self, channelType, windowSize, maxPacket, data):
klass = self.channelLookup.get(channelType, None)
if not klass:
raise ConchError(OPEN_UNKNOWN_CHANNEL_TYPE, "unknown channel")
else:
return klass(remoteWindow = windowSize,
remoteMaxPacket = maxPacket,
data=data, avatar=self)
def lookupSubsystem(self, subsystem, data):
log.msg(repr(self.subsystemLookup))
klass = self.subsystemLookup.get(subsystem, None)
if not klass:
return False
return klass(data, avatar=self)
def gotGlobalRequest(self, requestType, data):
# XXX should this use method dispatch?
requestType = requestType.replace('-','_')
f = getattr(self, "global_%s" % requestType, None)
if not f:
return 0
return f(data)
|
renweizhukov/LearningPythonTheHardWay
|
refs/heads/master
|
ex28.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
True and True
False and True
1 == 1 and 2 == 1
"test" == "test"
1 == 1 or 2 != 1
True and 1 == 1
False and 0 != 0
True or 1 == 1
"test" == "testing"
1 != 0 and 2 == 1
"test" != "testing"
"test" == 1
not (True and False)
not (1 == 1 and 0 != 1)
not (10 == 1 or 1000 == 1000)
not (1 != 10 or 3 == 4)
not ("testing" == "testing" and "Zed" == "Cool Guy")
1 == 1 and (not ("testing" == 1 or 1 == 0))
"chunky" == "bacon" and (not (3 == 4 or 3 == 3))
3 == 3 and (not ("testing" == "testing" or "Python" == "Fun"))
|
gangadharkadam/office_frappe
|
refs/heads/develop
|
frappe/core/doctype/tag/tag.py
|
39
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Tag(Document):
pass
|
hubsaysnuaa/odoo
|
refs/heads/8.0
|
addons/website_sale_options/models/product.py
|
395
|
# -*- coding: utf-8 -*-
from openerp import tools
from openerp.osv import osv, fields
class product_template(osv.Model):
_inherit = "product.template"
_columns = {
'optional_product_ids': fields.many2many('product.template','product_optional_rel','src_id','dest_id',string='Optional Products', help="Products to propose when add to cart."),
}
|
obeattie/sqlalchemy
|
refs/heads/master
|
lib/sqlalchemy/sql/util.py
|
1
|
from sqlalchemy import exc, schema, topological, util, sql, types as sqltypes
from sqlalchemy.sql import expression, operators, visitors
from itertools import chain
"""Utility functions that build upon SQL and Schema constructs."""
def sort_tables(tables):
"""sort a collection of Table objects in order of their foreign-key dependency."""
tables = list(tables)
tuples = []
def visit_foreign_key(fkey):
if fkey.use_alter:
return
parent_table = fkey.column.table
if parent_table in tables:
child_table = fkey.parent.table
tuples.append( ( parent_table, child_table ) )
for table in tables:
visitors.traverse(table, {'schema_visitor':True}, {'foreign_key':visit_foreign_key})
return topological.sort(tuples, tables)
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(expression._from_objects(join_to))
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
return i, f
else:
return None, None
def find_tables(clause, check_columns=False,
include_aliases=False, include_joins=False,
include_selects=False, include_crud=False):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors['select'] = _visitors['compound_select'] = tables.append
if include_joins:
_visitors['join'] = tables.append
if include_aliases:
_visitors['alias'] = tables.append
if include_crud:
_visitors['insert'] = _visitors['update'] = \
_visitors['delete'] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors['column'] = visit_column
_visitors['table'] = tables.append
visitors.traverse(clause, {'column_collections':False}, _visitors)
return tables
def find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
visitors.traverse(clause, {}, {'column':cols.add})
return cols
def _quote_ddl_expr(element):
if isinstance(element, basestring):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
def expression_as_ddl(clause):
"""Given a SQL expression, convert for usage in DDL, such as
CREATE INDEX and CHECK CONSTRAINT.
Converts bind params into quoted literals, column identifiers
into detached column constructs so that the parent table
identifier is not included.
"""
def repl(element):
if isinstance(element, expression._BindParamClause):
return expression.literal_column(_quote_ddl_expr(element.value))
elif isinstance(element, expression.ColumnClause) and \
element.table is not None:
return expression.column(element.name)
else:
return None
return visitors.replacement_traverse(clause, {}, repl)
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements to IS NULL."""
def visit_binary(binary):
if isinstance(binary.left, expression._BindParamClause) and binary.left.key in nulls:
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = expression.null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif isinstance(binary.right, expression._BindParamClause) and binary.right.key in nulls:
binary.right = expression.null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {'binary':visit_binary})
def join_condition(a, b, ignore_nonexistent_tables=False):
"""create a join condition between two tables.
ignore_nonexistent_tables=True allows a join condition to be
determined between two tables which may contain references to
other not-yet-defined tables. In general the NoSuchTableError
raised is only required if the user is trying to join selectables
across multiple MetaData objects (which is an extremely rare use
case).
"""
crit = []
constraints = set()
for fk in b.foreign_keys:
try:
col = fk.get_referent(a)
except exc.NoReferencedTableError:
if ignore_nonexistent_tables:
continue
else:
raise
if col is not None:
crit.append(col == fk.parent)
constraints.add(fk.constraint)
if a is not b:
for fk in a.foreign_keys:
try:
col = fk.get_referent(b)
except exc.NoReferencedTableError:
if ignore_nonexistent_tables:
continue
else:
raise
if col is not None:
crit.append(col == fk.parent)
constraints.add(fk.constraint)
if len(crit) == 0:
raise exc.ArgumentError(
"Can't find any foreign key relationships "
"between '%s' and '%s'" % (a.description, b.description))
elif len(constraints) > 1:
raise exc.ArgumentError(
"Can't determine join between '%s' and '%s'; "
"tables have more than one foreign key "
"constraint relationship between them. "
"Please specify the 'onclause' of this "
"join explicitly." % (a.description, b.description))
elif len(crit) == 1:
return (crit[0])
else:
return sql.and_(*crit)
class Annotated(object):
"""clones a ClauseElement and applies an 'annotations' dictionary.
Unlike regular clones, this clone also mimics __hash__() and
__cmp__() of the original element so that it takes its place
in hashed collections.
A reference to the original element is maintained, for the important
reason of keeping its hash value current. When GC'ed, the
hash value may be reused, causing conflicts.
"""
def __new__(cls, *args):
if not args:
# clone constructor
return object.__new__(cls)
else:
element, values = args
# pull appropriate subclass from registry of annotated
# classes
try:
cls = annotated_classes[element.__class__]
except KeyError:
cls = annotated_classes[element.__class__] = type.__new__(type,
"Annotated%s" % element.__class__.__name__,
(Annotated, element.__class__), {})
return object.__new__(cls)
def __init__(self, element, values):
# force FromClause to generate their internal
# collections into __dict__
if isinstance(element, expression.FromClause):
element.c
self.__dict__ = element.__dict__.copy()
self.__element = element
self._annotations = values
def _annotate(self, values):
_values = self._annotations.copy()
_values.update(values)
clone = self.__class__.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone._annotations = _values
return clone
def _deannotate(self):
return self.__element
def _clone(self):
clone = self.__element._clone()
if clone is self.__element:
# detect immutable, don't change anything
return self
else:
# update the clone with any changes that have occured
# to this object's __dict__.
clone.__dict__.update(self.__dict__)
return Annotated(clone, self._annotations)
def __hash__(self):
return hash(self.__element)
def __cmp__(self, other):
return cmp(hash(self.__element), hash(other))
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
# so that the resulting objects are pickleable.
annotated_classes = {}
from sqlalchemy.sql import expression
for cls in expression.__dict__.values() + [schema.Column, schema.Table]:
if isinstance(cls, type) and issubclass(cls, expression.ClauseElement):
exec "class Annotated%s(Annotated, cls):\n" \
" __visit_name__ = cls.__visit_name__\n"\
" pass" % (cls.__name__, ) in locals()
exec "annotated_classes[cls] = Annotated%s" % (cls.__name__)
def _deep_annotate(element, annotations, exclude=None):
"""Deep copy the given ClauseElement, annotating each element with the given annotations dictionary.
Elements within the exclude collection will be cloned but not annotated.
"""
def clone(elem):
# check if element is present in the exclude list.
# take into account proxying relationships.
if exclude and elem.proxy_set.intersection(exclude):
elem = elem._clone()
elif annotations != elem._annotations:
elem = elem._annotate(annotations.copy())
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
def _deep_deannotate(element):
"""Deep copy the given element, removing all annotations."""
def clone(elem):
elem = elem._deannotate()
elem._copy_internals(clone=clone)
return elem
if element is not None:
element = clone(element)
return element
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, expression.Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
"""given a list of columns, return a 'reduced' set based on natural equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two columns
will ultimately represent the same value because they are related by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured.
This function is primarily used to determine the most minimal "primary key"
from a selectable, by reducing the set of primary key columns present
in the the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedTableError:
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(chain(*[c.proxy_set for c in columns.difference(omit)]))
if binary.left in cols and binary.right in cols:
for c in columns:
if c.shares_lineage(binary.right):
omit.add(c)
break
for clause in clauses:
visitors.traverse(clause, {}, {'binary':visit_binary})
return expression.ColumnSet(columns.difference(omit))
def criterion_as_pairs(expression, consider_as_foreign_keys=None,
consider_as_referenced_keys=None, any_operator=False):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError("Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'")
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, sql.ColumnElement) or \
not isinstance(binary.right, sql.ColumnElement):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and \
(binary.right is binary.left or
binary.right not in consider_as_foreign_keys):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and \
(binary.left is binary.right or
binary.left not in consider_as_foreign_keys):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and \
(binary.right is binary.left or
binary.right not in consider_as_referenced_keys):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and \
(binary.left is binary.right or
binary.left not in consider_as_referenced_keys):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, schema.Column) and \
isinstance(binary.right, schema.Column):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {'binary':visit_binary})
return pairs
def folded_equivalents(join, equivs=None):
"""Return a list of uniquely named columns.
The column list of the given Join will be narrowed
down to a list of all equivalently-named,
equated columns folded into one column, where 'equated' means they are
equated to each other in the ON clause of this join.
This function is used by Join.select(fold_equivalents=True).
Deprecated. This function is used for a certain kind of
"polymorphic_union" which is designed to achieve joined
table inheritance where the base table has no "discriminator"
column; [ticket:1131] will provide a better way to
achieve this.
"""
if equivs is None:
equivs = set()
def visit_binary(binary):
if binary.operator == operators.eq and binary.left.name == binary.right.name:
equivs.add(binary.right)
equivs.add(binary.left)
visitors.traverse(join.onclause, {}, {'binary':visit_binary})
collist = []
if isinstance(join.left, expression.Join):
left = folded_equivalents(join.left, equivs)
else:
left = list(join.left.columns)
if isinstance(join.right, expression.Join):
right = folded_equivalents(join.right, equivs)
else:
right = list(join.right.columns)
used = set()
for c in left + right:
if c in equivs:
if c.name not in used:
collist.append(c)
used.add(c.name)
else:
collist.append(c)
return collist
class AliasedRow(object):
"""Wrap a RowProxy with a translation map.
This object allows a set of keys to be translated
to those present in a RowProxy.
"""
def __init__(self, row, map):
# AliasedRow objects don't nest, so un-nest
# if another AliasedRow was passed
if isinstance(row, AliasedRow):
self.row = row.row
else:
self.row = row
self.map = map
def __contains__(self, key):
return self.map[key] in self.row
def has_key(self, key):
return key in self
def __getitem__(self, key):
return self.row[self.map[key]]
def keys(self):
return self.row.keys()
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(self, selectable, equivalents=None, include=None, exclude=None):
self.__traverse_options__ = {'column_collections':False, 'stop_on':[selectable]}
self.selectable = selectable
self.include = include
self.exclude = exclude
self.equivalents = util.column_dict(equivalents or {})
def _corresponding_column(self, col, require_embedded, _seen=util.EMPTY_SET):
newcol = self.selectable.corresponding_column(col, require_embedded=require_embedded)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(equiv, require_embedded=require_embedded, _seen=_seen.union([col]))
if newcol is not None:
return newcol
return newcol
def replace(self, col):
if isinstance(col, expression.FromClause):
if self.selectable.is_derived_from(col):
return self.selectable
if not isinstance(col, expression.ColumnElement):
return None
if self.include and col not in self.include:
return None
elif self.exclude and col in self.exclude:
return None
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Provides the ability to "wrap" this ClauseAdapter
around another, a columns dictionary which returns
cached, adapted elements given an original, and an
adapted_row() factory.
"""
def __init__(self, selectable, equivalents=None, chain_to=None, include=None, exclude=None, adapt_required=False):
ClauseAdapter.__init__(self, selectable, equivalents, include, exclude)
if chain_to:
self.chain(chain_to)
self.columns = util.populate_column_dict(self._locate_col)
self.adapt_required = adapt_required
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__ = self.__dict__.copy()
ac._locate_col = ac._wrap(ac._locate_col, adapter._locate_col)
ac.adapt_clause = ac._wrap(ac.adapt_clause, adapter.adapt_clause)
ac.adapt_list = ac._wrap(ac.adapt_list, adapter.adapt_list)
ac.columns = util.populate_column_dict(ac._locate_col)
return ac
adapt_clause = ClauseAdapter.traverse
adapt_list = ClauseAdapter.copy_and_process
def _wrap(self, local, wrapped):
def locate(col):
col = local(col)
return wrapped(col)
return locate
def _locate_col(self, col):
c = self._corresponding_column(col, False)
if c is None:
c = self.adapt_clause(col)
# anonymize labels in case they have a hardcoded name
if isinstance(c, expression._Label):
c = c.label(None)
# adapt_required indicates that if we got the same column
# back which we put in (i.e. it passed through),
# it's not correct. this is used by eagerloading which
# knows that all columns and expressions need to be adapted
# to a result row, and a "passthrough" is definitely targeting
# the wrong column.
if self.adapt_required and c is col:
return None
return c
def adapted_row(self, row):
return AliasedRow(row, self.columns)
def __getstate__(self):
d = self.__dict__.copy()
del d['columns']
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.PopulateDict(self._locate_col)
|
ShawnPengxy/Flask-madeBlog
|
refs/heads/master
|
site-packages/pip/_vendor/requests/packages/chardet/jisfreq.py
|
3130
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
|
corruptnova/namebench
|
refs/heads/master
|
tools/add_linkcount_and_version_to_csv.py
|
174
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add link count and version to csv"""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import csv
import check_nameserver_popularity
import sys
reader = csv.reader(open(sys.argv[1]))
writer = csv.writer(open('output.csv', 'w'))
sys.path.append('..')
#sys.path.append('/Users/tstromberg/namebench')
import third_party
from libnamebench import addr_util
from libnamebench import nameserver
for row in reader:
ip = row[0]
ns = nameserver.NameServer(ip)
ns.timeout = 0.5
ns.health_timeout = 0.5
try:
link_count = len(check_nameserver_popularity.GetUrls(ip))
except:
link_count = ''
row.insert(-1, link_count)
row.append(ns.version or '')
print "%s: %s" % (ip, ns.version)
writer.writerow(row)
|
agry/NGECore2
|
refs/heads/master
|
scripts/object/tangible/deed/player_house_deed/generic_house_medium_windowed_s02_deed.py
|
96
|
import sys
def setup(core, object):
object.setAttachment('radial_filename', 'deeds/structureDeed')
return
def use(core, actor, object):
return
|
ToBeReplaced/ansible-modules-extras
|
refs/heads/devel
|
database/postgresql/postgresql_ext.py
|
81
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: postgresql_ext
short_description: Add or remove PostgreSQL extensions from a database.
description:
- Add or remove PostgreSQL extensions from a database.
version_added: "0.1"
options:
name:
description:
- name of the extension to add or remove
required: true
default: null
db:
description:
- name of the database to add or remove the extension to/from
required: true
default: null
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
port:
description:
- Database port to connect to.
required: false
default: 5432
state:
description:
- The database extension state
required: false
default: present
choices: [ "present", "absent" ]
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author: "Daniel Schep (@dschep)"
'''
EXAMPLES = '''
# Adds postgis to the database "acme"
- postgresql_ext: name=postgis db=acme
'''
try:
import psycopg2
import psycopg2.extras
except ImportError:
postgresqldb_found = False
else:
postgresqldb_found = True
class NotSupportedError(Exception):
pass
# ===========================================
# PostgreSQL module specific support methods.
#
def ext_exists(cursor, ext):
query = "SELECT * FROM pg_extension WHERE extname=%(ext)s"
cursor.execute(query, {'ext': ext})
return cursor.rowcount == 1
def ext_delete(cursor, ext):
if ext_exists(cursor, ext):
query = "DROP EXTENSION \"%s\"" % ext
cursor.execute(query)
return True
else:
return False
def ext_create(cursor, ext):
if not ext_exists(cursor, ext):
query = 'CREATE EXTENSION "%s"' % ext
cursor.execute(query)
return True
else:
return False
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default="postgres"),
login_password=dict(default=""),
login_host=dict(default=""),
port=dict(default="5432"),
db=dict(required=True),
ext=dict(required=True, aliases=['name']),
state=dict(default="present", choices=["absent", "present"]),
),
supports_check_mode = True
)
if not postgresqldb_found:
module.fail_json(msg="the python psycopg2 module is required")
db = module.params["db"]
ext = module.params["ext"]
port = module.params["port"]
state = module.params["state"]
changed = False
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host":"host",
"login_user":"user",
"login_password":"password",
"port":"port"
}
kw = dict( (params_map[k], v) for (k, v) in module.params.iteritems()
if k in params_map and v != '' )
try:
db_connection = psycopg2.connect(database=db, **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True
else:
db_connection.set_isolation_level(psycopg2
.extensions
.ISOLATION_LEVEL_AUTOCOMMIT)
cursor = db_connection.cursor(
cursor_factory=psycopg2.extras.DictCursor)
except Exception, e:
module.fail_json(msg="unable to connect to database: %s" % e)
try:
if module.check_mode:
if state == "absent":
changed = not db_exists(cursor, ext)
elif state == "present":
changed = db_exists(cursor, ext)
module.exit_json(changed=changed,ext=ext)
if state == "absent":
changed = ext_delete(cursor, ext)
elif state == "present":
changed = ext_create(cursor, ext)
except NotSupportedError, e:
module.fail_json(msg=str(e))
except Exception, e:
module.fail_json(msg="Database query failed: %s" % e)
module.exit_json(changed=changed, db=db)
# import module snippets
from ansible.module_utils.basic import *
main()
|
coala-analyzer/coala-quickstart
|
refs/heads/master
|
tests/test_bears/LinterBearWithCreateArguments.py
|
1
|
from coalib.bearlib.abstractions.Linter import linter
@linter(executable='some_lint',
output_format='regex',
output_regex=r'.+:(?P<line>\d+):(?P<message>.*)')
class LinterBearWithCreateArguments:
CAN_DETECT = {'Syntax', 'Security'}
CAN_FIX = {'Formatting'}
LANGUAGES = {}
@staticmethod
def create_arguments(filename, file, config_file, nonopsetting,
someoptionalsetting=True):
return ()
@staticmethod
def generate_config(filename, file, yes: bool, rick,
makman2='awesome'):
return str(makman2)
|
traveloka/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/lxd/lxd_profile.py
|
18
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: lxd_profile
short_description: Manage LXD profiles
version_added: "2.2"
description:
- Management of LXD profiles
author: "Hiroaki Nakamura (@hnakamur)"
options:
name:
description:
- Name of a profile.
required: true
config:
description:
- 'The config for the container (e.g. {"limits.memory": "4GB"}).
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
- If the profile already exists and its "config" value in metadata
obtained from
GET /1.0/profiles/<name>
U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
are different, they this module tries to apply the configurations.
- Not all config values are supported to apply the existing profile.
Maybe you need to delete and recreate a profile.
required: false
devices:
description:
- 'The devices for the profile
(e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
required: false
new_name:
description:
- A new name of a profile.
- If this parameter is specified a profile will be renamed to this name.
See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
required: false
state:
choices:
- present
- absent
description:
- Define the state of a profile.
required: false
default: present
url:
description:
- The unix domain socket path or the https URL for the LXD server.
required: false
default: unix:/var/lib/lxd/unix.socket
key_file:
description:
- The client certificate key file path.
required: false
default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])'
cert_file:
description:
- The client certificate file path.
required: false
default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])'
trust_password:
description:
- The client trusted password.
- You need to set this password on the LXD server before
running this module using the following command.
lxc config set core.trust_password <some random password>
See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
- If trust_password is set, this module send a request for
authentication before sending any requests.
required: false
notes:
- Profiles must have a unique name. If you attempt to create a profile
with a name that already existed in the users namespace the module will
simply return as "unchanged".
'''
EXAMPLES = '''
# An example for creating a profile
- hosts: localhost
connection: local
tasks:
- name: Create a profile
lxd_profile:
name: macvlan
state: present
config: {}
description: my macvlan profile
devices:
eth0:
nictype: macvlan
parent: br0
type: nic
# An example for creating a profile via http connection
- hosts: localhost
connection: local
tasks:
- name: create macvlan profile
lxd_profile:
url: https://127.0.0.1:8443
# These cert_file and key_file values are equal to the default values.
#cert_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
#key_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
trust_password: mypassword
name: macvlan
state: present
config: {}
description: my macvlan profile
devices:
eth0:
nictype: macvlan
parent: br0
type: nic
# An example for deleting a profile
- hosts: localhost
connection: local
tasks:
- name: Delete a profile
lxd_profile:
name: macvlan
state: absent
# An example for renaming a profile
- hosts: localhost
connection: local
tasks:
- name: Rename a profile
lxd_profile:
name: macvlan
new_name: macvlan2
state: present
'''
RETURN='''
old_state:
description: The old state of the profile
returned: success
type: string
sample: "absent"
logs:
description: The logs of requests and responses.
returned: when ansible-playbook is invoked with -vvvv.
type: list
sample: "(too long to be placed here)"
actions:
description: List of actions performed for the profile.
returned: success
type: list
sample: '["create"]'
'''
import os
from ansible.module_utils.lxd import LXDClient, LXDClientException
# PROFILE_STATES is a list for states supported
PROFILES_STATES = [
'present', 'absent'
]
# CONFIG_PARAMS is a list of config attribute names.
CONFIG_PARAMS = [
'config', 'description', 'devices'
]
class LXDProfileManagement(object):
def __init__(self, module):
"""Management of LXC containers via Ansible.
:param module: Processed Ansible Module.
:type module: ``object``
"""
self.module = module
self.name = self.module.params['name']
self._build_config()
self.state = self.module.params['state']
self.new_name = self.module.params.get('new_name', None)
self.url = self.module.params['url']
self.key_file = self.module.params.get('key_file', None)
self.cert_file = self.module.params.get('cert_file', None)
self.debug = self.module._verbosity >= 4
try:
self.client = LXDClient(
self.url, key_file=self.key_file, cert_file=self.cert_file,
debug=self.debug
)
except LXDClientException as e:
self.module.fail_json(msg=e.msg)
self.trust_password = self.module.params.get('trust_password', None)
self.actions = []
def _build_config(self):
self.config = {}
for attr in CONFIG_PARAMS:
param_val = self.module.params.get(attr, None)
if param_val is not None:
self.config[attr] = param_val
def _get_profile_json(self):
return self.client.do(
'GET', '/1.0/profiles/{0}'.format(self.name),
ok_error_codes=[404]
)
@staticmethod
def _profile_json_to_module_state(resp_json):
if resp_json['type'] == 'error':
return 'absent'
return 'present'
def _update_profile(self):
if self.state == 'present':
if self.old_state == 'absent':
if self.new_name is None:
self._create_profile()
else:
self.module.fail_json(
msg='new_name must not be set when the profile does not exist and the specified state is present',
changed=False)
else:
if self.new_name is not None and self.new_name != self.name:
self._rename_profile()
if self._needs_to_apply_profile_configs():
self._apply_profile_configs()
elif self.state == 'absent':
if self.old_state == 'present':
if self.new_name is None:
self._delete_profile()
else:
self.module.fail_json(
msg='new_name must not be set when the profile exists and the specified state is absent',
changed=False)
def _create_profile(self):
config = self.config.copy()
config['name'] = self.name
self.client.do('POST', '/1.0/profiles', config)
self.actions.append('create')
def _rename_profile(self):
config = {'name': self.new_name}
self.client.do('POST', '/1.0/profiles/{}'.format(self.name), config)
self.actions.append('rename')
self.name = self.new_name
def _needs_to_change_profile_config(self, key):
if key not in self.config:
return False
old_configs = self.old_profile_json['metadata'].get(key, None)
return self.config[key] != old_configs
def _needs_to_apply_profile_configs(self):
return (
self._needs_to_change_profile_config('config') or
self._needs_to_change_profile_config('description') or
self._needs_to_change_profile_config('devices')
)
def _apply_profile_configs(self):
config = self.old_profile_json.copy()
for k, v in self.config.items():
config[k] = v
self.client.do('PUT', '/1.0/profiles/{}'.format(self.name), config)
self.actions.append('apply_profile_configs')
def _delete_profile(self):
self.client.do('DELETE', '/1.0/profiles/{}'.format(self.name))
self.actions.append('delete')
def run(self):
"""Run the main method."""
try:
if self.trust_password is not None:
self.client.authenticate(self.trust_password)
self.old_profile_json = self._get_profile_json()
self.old_state = self._profile_json_to_module_state(self.old_profile_json)
self._update_profile()
state_changed = len(self.actions) > 0
result_json = {
'changed': state_changed,
'old_state': self.old_state,
'actions': self.actions
}
if self.client.debug:
result_json['logs'] = self.client.logs
self.module.exit_json(**result_json)
except LXDClientException as e:
state_changed = len(self.actions) > 0
fail_params = {
'msg': e.msg,
'changed': state_changed,
'actions': self.actions
}
if self.client.debug:
fail_params['logs'] = e.kwargs['logs']
self.module.fail_json(**fail_params)
def main():
"""Ansible Main module."""
module = AnsibleModule(
argument_spec=dict(
name=dict(
type='str',
required=True
),
new_name=dict(
type='str',
),
config=dict(
type='dict',
),
description=dict(
type='str',
),
devices=dict(
type='dict',
),
state=dict(
choices=PROFILES_STATES,
default='present'
),
url=dict(
type='str',
default='unix:/var/lib/lxd/unix.socket'
),
key_file=dict(
type='str',
default='{}/.config/lxc/client.key'.format(os.environ['HOME'])
),
cert_file=dict(
type='str',
default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
),
trust_password=dict(
type='str',
)
),
supports_check_mode=False,
)
lxd_manage = LXDProfileManagement(module=module)
lxd_manage.run()
# import module bits
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
Verizon/libcloud
|
refs/heads/trunk
|
libcloud/utils/decorators.py
|
48
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from functools import wraps
from libcloud.common.types import LibcloudError
__all__ = [
'wrap_non_libcloud_exceptions'
]
def wrap_non_libcloud_exceptions(func):
"""
Decorators function which catches non LibcloudError exceptions, wraps them
in LibcloudError class and re-throws the wrapped exception.
Note: This function should only be used to wrap methods on the driver
classes.
"""
@wraps(func)
def decorated_function(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
e = sys.exc_info()[1]
if isinstance(e, LibcloudError):
raise e
if len(args) >= 1:
driver = args[0]
else:
driver = None
fault = getattr(e, 'fault', None)
if fault and getattr(fault, 'string', None):
message = fault.string
else:
message = e.message
raise LibcloudError(value=message, driver=driver)
return decorated_function
|
ClaudioNahmad/Servicio-Social
|
refs/heads/master
|
Parametros/CosmoMC/CosmoMC-master/python/GetDistPlots.py
|
1
|
from getdist.plots import *
|
rocky4570/moto
|
refs/heads/master
|
moto/kinesis/models.py
|
4
|
from __future__ import unicode_literals
import datetime
import time
import boto.kinesis
import re
import six
import itertools
from operator import attrgetter
from hashlib import md5
from moto.compat import OrderedDict
from moto.core import BaseBackend, BaseModel
from moto.core.utils import unix_time
from .exceptions import StreamNotFoundError, ShardNotFoundError, ResourceInUseError, \
ResourceNotFoundError, InvalidArgumentError
from .utils import compose_shard_iterator, compose_new_shard_iterator, decompose_shard_iterator
class Record(BaseModel):
def __init__(self, partition_key, data, sequence_number, explicit_hash_key):
self.partition_key = partition_key
self.data = data
self.sequence_number = sequence_number
self.explicit_hash_key = explicit_hash_key
self.created_at_datetime = datetime.datetime.utcnow()
self.created_at = unix_time(self.created_at_datetime)
def to_json(self):
return {
"Data": self.data,
"PartitionKey": self.partition_key,
"SequenceNumber": str(self.sequence_number),
"ApproximateArrivalTimestamp": self.created_at_datetime.isoformat()
}
class Shard(BaseModel):
def __init__(self, shard_id, starting_hash, ending_hash):
self._shard_id = shard_id
self.starting_hash = starting_hash
self.ending_hash = ending_hash
self.records = OrderedDict()
@property
def shard_id(self):
return "shardId-{0}".format(str(self._shard_id).zfill(12))
def get_records(self, last_sequence_id, limit):
last_sequence_id = int(last_sequence_id)
results = []
secs_behind_latest = 0
for sequence_number, record in self.records.items():
if sequence_number > last_sequence_id:
results.append(record)
last_sequence_id = sequence_number
very_last_record = self.records[next(reversed(self.records))]
secs_behind_latest = very_last_record.created_at - record.created_at
if len(results) == limit:
break
millis_behind_latest = int(secs_behind_latest * 1000)
return results, last_sequence_id, millis_behind_latest
def put_record(self, partition_key, data, explicit_hash_key):
# Note: this function is not safe for concurrency
if self.records:
last_sequence_number = self.get_max_sequence_number()
else:
last_sequence_number = 0
sequence_number = last_sequence_number + 1
self.records[sequence_number] = Record(
partition_key, data, sequence_number, explicit_hash_key)
return sequence_number
def get_min_sequence_number(self):
if self.records:
return list(self.records.keys())[0]
return 0
def get_max_sequence_number(self):
if self.records:
return list(self.records.keys())[-1]
return 0
def get_sequence_number_at(self, at_timestamp):
if not self.records or at_timestamp < list(self.records.values())[0].created_at:
return 0
else:
# find the last item in the list that was created before
# at_timestamp
r = next((r for r in reversed(self.records.values()) if r.created_at < at_timestamp), None)
return r.sequence_number
def to_json(self):
return {
"HashKeyRange": {
"EndingHashKey": str(self.ending_hash),
"StartingHashKey": str(self.starting_hash)
},
"SequenceNumberRange": {
"EndingSequenceNumber": self.get_max_sequence_number(),
"StartingSequenceNumber": self.get_min_sequence_number(),
},
"ShardId": self.shard_id
}
class Stream(BaseModel):
def __init__(self, stream_name, shard_count, region):
self.stream_name = stream_name
self.shard_count = shard_count
self.region = region
self.account_number = "123456789012"
self.shards = {}
self.tags = {}
if six.PY3:
izip_longest = itertools.zip_longest
else:
izip_longest = itertools.izip_longest
for index, start, end in izip_longest(range(shard_count),
range(0, 2**128, 2 **
128 // shard_count),
range(2**128 // shard_count, 2 **
128, 2**128 // shard_count),
fillvalue=2**128):
shard = Shard(index, start, end)
self.shards[shard.shard_id] = shard
@property
def arn(self):
return "arn:aws:kinesis:{region}:{account_number}:{stream_name}".format(
region=self.region,
account_number=self.account_number,
stream_name=self.stream_name
)
def get_shard(self, shard_id):
if shard_id in self.shards:
return self.shards[shard_id]
else:
raise ShardNotFoundError(shard_id)
def get_shard_for_key(self, partition_key, explicit_hash_key):
if not isinstance(partition_key, six.string_types):
raise InvalidArgumentError("partition_key")
if len(partition_key) > 256:
raise InvalidArgumentError("partition_key")
if explicit_hash_key:
if not isinstance(explicit_hash_key, six.string_types):
raise InvalidArgumentError("explicit_hash_key")
key = int(explicit_hash_key)
if key >= 2**128:
raise InvalidArgumentError("explicit_hash_key")
else:
key = int(md5(partition_key.encode('utf-8')).hexdigest(), 16)
for shard in self.shards.values():
if shard.starting_hash <= key < shard.ending_hash:
return shard
def put_record(self, partition_key, explicit_hash_key, sequence_number_for_ordering, data):
shard = self.get_shard_for_key(partition_key, explicit_hash_key)
sequence_number = shard.put_record(
partition_key, data, explicit_hash_key)
return sequence_number, shard.shard_id
def to_json(self):
return {
"StreamDescription": {
"StreamARN": self.arn,
"StreamName": self.stream_name,
"StreamStatus": "ACTIVE",
"HasMoreShards": False,
"Shards": [shard.to_json() for shard in self.shards.values()],
}
}
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
region = properties.get('Region', 'us-east-1')
shard_count = properties.get('ShardCount', 1)
return Stream(properties['Name'], shard_count, region)
class FirehoseRecord(BaseModel):
def __init__(self, record_data):
self.record_id = 12345678
self.record_data = record_data
class DeliveryStream(BaseModel):
def __init__(self, stream_name, **stream_kwargs):
self.name = stream_name
self.redshift_username = stream_kwargs.get('redshift_username')
self.redshift_password = stream_kwargs.get('redshift_password')
self.redshift_jdbc_url = stream_kwargs.get('redshift_jdbc_url')
self.redshift_role_arn = stream_kwargs.get('redshift_role_arn')
self.redshift_copy_command = stream_kwargs.get('redshift_copy_command')
self.s3_role_arn = stream_kwargs.get('s3_role_arn')
self.s3_bucket_arn = stream_kwargs.get('s3_bucket_arn')
self.s3_prefix = stream_kwargs.get('s3_prefix')
self.s3_compression_format = stream_kwargs.get(
's3_compression_format', 'UNCOMPRESSED')
self.s3_buffering_hings = stream_kwargs.get('s3_buffering_hings')
self.redshift_s3_role_arn = stream_kwargs.get('redshift_s3_role_arn')
self.redshift_s3_bucket_arn = stream_kwargs.get(
'redshift_s3_bucket_arn')
self.redshift_s3_prefix = stream_kwargs.get('redshift_s3_prefix')
self.redshift_s3_compression_format = stream_kwargs.get(
'redshift_s3_compression_format', 'UNCOMPRESSED')
self.redshift_s3_buffering_hings = stream_kwargs.get(
'redshift_s3_buffering_hings')
self.records = []
self.status = 'ACTIVE'
self.created_at = datetime.datetime.utcnow()
self.last_updated = datetime.datetime.utcnow()
@property
def arn(self):
return 'arn:aws:firehose:us-east-1:123456789012:deliverystream/{0}'.format(self.name)
def destinations_to_dict(self):
if self.s3_role_arn:
return [{
'DestinationId': 'string',
'S3DestinationDescription': {
'RoleARN': self.s3_role_arn,
'BucketARN': self.s3_bucket_arn,
'Prefix': self.s3_prefix,
'BufferingHints': self.s3_buffering_hings,
'CompressionFormat': self.s3_compression_format,
}
}]
else:
return [{
"DestinationId": "string",
"RedshiftDestinationDescription": {
"ClusterJDBCURL": self.redshift_jdbc_url,
"CopyCommand": self.redshift_copy_command,
"RoleARN": self.redshift_role_arn,
"S3DestinationDescription": {
"BucketARN": self.redshift_s3_bucket_arn,
"BufferingHints": self.redshift_s3_buffering_hings,
"CompressionFormat": self.redshift_s3_compression_format,
"Prefix": self.redshift_s3_prefix,
"RoleARN": self.redshift_s3_role_arn
},
"Username": self.redshift_username,
},
}
]
def to_dict(self):
return {
"DeliveryStreamDescription": {
"CreateTimestamp": time.mktime(self.created_at.timetuple()),
"DeliveryStreamARN": self.arn,
"DeliveryStreamName": self.name,
"DeliveryStreamStatus": self.status,
"Destinations": self.destinations_to_dict(),
"HasMoreDestinations": False,
"LastUpdateTimestamp": time.mktime(self.last_updated.timetuple()),
"VersionId": "string",
}
}
def put_record(self, record_data):
record = FirehoseRecord(record_data)
self.records.append(record)
return record
class KinesisBackend(BaseBackend):
def __init__(self):
self.streams = OrderedDict()
self.delivery_streams = {}
def create_stream(self, stream_name, shard_count, region):
if stream_name in self.streams:
raise ResourceInUseError(stream_name)
stream = Stream(stream_name, shard_count, region)
self.streams[stream_name] = stream
return stream
def describe_stream(self, stream_name):
if stream_name in self.streams:
return self.streams[stream_name]
else:
raise StreamNotFoundError(stream_name)
def list_streams(self):
return self.streams.values()
def delete_stream(self, stream_name):
if stream_name in self.streams:
return self.streams.pop(stream_name)
raise StreamNotFoundError(stream_name)
def get_shard_iterator(self, stream_name, shard_id, shard_iterator_type, starting_sequence_number,
at_timestamp):
# Validate params
stream = self.describe_stream(stream_name)
shard = stream.get_shard(shard_id)
shard_iterator = compose_new_shard_iterator(
stream_name, shard, shard_iterator_type, starting_sequence_number, at_timestamp
)
return shard_iterator
def get_records(self, shard_iterator, limit):
decomposed = decompose_shard_iterator(shard_iterator)
stream_name, shard_id, last_sequence_id = decomposed
stream = self.describe_stream(stream_name)
shard = stream.get_shard(shard_id)
records, last_sequence_id, millis_behind_latest = shard.get_records(last_sequence_id, limit)
next_shard_iterator = compose_shard_iterator(
stream_name, shard, last_sequence_id)
return next_shard_iterator, records, millis_behind_latest
def put_record(self, stream_name, partition_key, explicit_hash_key, sequence_number_for_ordering, data):
stream = self.describe_stream(stream_name)
sequence_number, shard_id = stream.put_record(
partition_key, explicit_hash_key, sequence_number_for_ordering, data
)
return sequence_number, shard_id
def put_records(self, stream_name, records):
stream = self.describe_stream(stream_name)
response = {
"FailedRecordCount": 0,
"Records": []
}
for record in records:
partition_key = record.get("PartitionKey")
explicit_hash_key = record.get("ExplicitHashKey")
data = record.get("Data")
sequence_number, shard_id = stream.put_record(
partition_key, explicit_hash_key, None, data
)
response['Records'].append({
"SequenceNumber": sequence_number,
"ShardId": shard_id
})
return response
def split_shard(self, stream_name, shard_to_split, new_starting_hash_key):
stream = self.describe_stream(stream_name)
if shard_to_split not in stream.shards:
raise ResourceNotFoundError(shard_to_split)
if not re.match(r'0|([1-9]\d{0,38})', new_starting_hash_key):
raise InvalidArgumentError(new_starting_hash_key)
new_starting_hash_key = int(new_starting_hash_key)
shard = stream.shards[shard_to_split]
last_id = sorted(stream.shards.values(),
key=attrgetter('_shard_id'))[-1]._shard_id
if shard.starting_hash < new_starting_hash_key < shard.ending_hash:
new_shard = Shard(
last_id + 1, new_starting_hash_key, shard.ending_hash)
shard.ending_hash = new_starting_hash_key
stream.shards[new_shard.shard_id] = new_shard
else:
raise InvalidArgumentError(new_starting_hash_key)
records = shard.records
shard.records = OrderedDict()
for index in records:
record = records[index]
stream.put_record(
record.partition_key, record.explicit_hash_key, None, record.data
)
def merge_shards(self, stream_name, shard_to_merge, adjacent_shard_to_merge):
stream = self.describe_stream(stream_name)
if shard_to_merge not in stream.shards:
raise ResourceNotFoundError(shard_to_merge)
if adjacent_shard_to_merge not in stream.shards:
raise ResourceNotFoundError(adjacent_shard_to_merge)
shard1 = stream.shards[shard_to_merge]
shard2 = stream.shards[adjacent_shard_to_merge]
if shard1.ending_hash == shard2.starting_hash:
shard1.ending_hash = shard2.ending_hash
elif shard2.ending_hash == shard1.starting_hash:
shard1.starting_hash = shard2.starting_hash
else:
raise InvalidArgumentError(adjacent_shard_to_merge)
del stream.shards[shard2.shard_id]
for index in shard2.records:
record = shard2.records[index]
shard1.put_record(record.partition_key,
record.data, record.explicit_hash_key)
''' Firehose '''
def create_delivery_stream(self, stream_name, **stream_kwargs):
stream = DeliveryStream(stream_name, **stream_kwargs)
self.delivery_streams[stream_name] = stream
return stream
def get_delivery_stream(self, stream_name):
if stream_name in self.delivery_streams:
return self.delivery_streams[stream_name]
else:
raise StreamNotFoundError(stream_name)
def list_delivery_streams(self):
return self.delivery_streams.values()
def delete_delivery_stream(self, stream_name):
self.delivery_streams.pop(stream_name)
def put_firehose_record(self, stream_name, record_data):
stream = self.get_delivery_stream(stream_name)
record = stream.put_record(record_data)
return record
def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None, limit=None):
stream = self.describe_stream(stream_name)
tags = []
result = {
'HasMoreTags': False,
'Tags': tags
}
for key, val in sorted(stream.tags.items(), key=lambda x: x[0]):
if limit and len(tags) >= limit:
result['HasMoreTags'] = True
break
if exclusive_start_tag_key and key < exclusive_start_tag_key:
continue
tags.append({
'Key': key,
'Value': val
})
return result
def add_tags_to_stream(self, stream_name, tags):
stream = self.describe_stream(stream_name)
stream.tags.update(tags)
def remove_tags_from_stream(self, stream_name, tag_keys):
stream = self.describe_stream(stream_name)
for key in tag_keys:
if key in stream.tags:
del stream.tags[key]
kinesis_backends = {}
for region in boto.kinesis.regions():
kinesis_backends[region.name] = KinesisBackend()
|
Sabayon/anaconda
|
refs/heads/master
|
pyanaconda/ui/tui/tools/run-text-spoke.py
|
9
|
#!/usr/bin/python
import sys, os
import os.path
# Check command line arguments
if len(sys.argv)<2:
print "Usage: $0 <spoke module name> [<spoke widget class>]"
sys.exit(1)
# Logging always needs to be set up first thing, or there'll be tracebacks.
from pyanaconda import anaconda_log
anaconda_log.init()
from pyanaconda.installclass import DefaultInstall
from blivet import Blivet
from pyanaconda.threads import initThreading
from pyanaconda.packaging.yumpayload import YumPayload
from pykickstart.version import makeVersion
from pyanaconda.ui.tui.simpleline import App
from pyanaconda.ui.tui import YesNoDialog
# Don't worry with fcoe, iscsi, dasd, any of that crud.
from pyanaconda.flags import flags
flags.imageInstall = True
flags.testing = True
initThreading()
# Figure out the part we are about to show: hub/spoke?
# And get the name of the module which represents it
if os.path.basename(sys.argv[0]) == "run-text-spoke.py":
spokeModuleName = "pyanaconda.ui.tui.spokes.%s" % sys.argv[1]
from pyanaconda.ui.common import Spoke
spokeBaseClass = Spoke
spokeText = "spoke"
SpokeText = "Spoke"
elif os.path.basename(sys.argv[0]) == "run-text-hub.py":
spokeModuleName = "pyanaconda.ui.tui.hubs.%s" % sys.argv[1]
from pyanaconda.ui.common import Hub
spokeBaseClass = Hub
spokeText = "hub"
SpokeText = "Hub"
else:
print "You have to run this command as run-spoke.py or run-hub.py."
sys.exit(1)
# Set default spoke class
spokeClass = None
spokeClassName = None
# Load spoke specified on the command line
# If the spoke module was specified, but the spoke class was not,
# try to find it using class hierarchy
try:
spokeClassName = sys.argv[2]
__import__(spokeModuleName, fromlist = [spokeClassName])
spokeModule = sys.modules[spokeModuleName]
except IndexError:
__import__(spokeModuleName)
spokeModule = sys.modules[spokeModuleName]
for k,v in vars(spokeModule).iteritems():
try:
print k,v
if issubclass(v, spokeBaseClass) and v != spokeBaseClass:
spokeClassName = k
spokeClass = v
except TypeError:
pass
if not spokeClass:
try:
spokeClass = getattr(spokeModule, spokeClassName)
except KeyError:
print "%s %s could not be found in %s" % (SpokeText, spokeClassName, spokeModuleName)
sys.exit(1)
print "Running %s %s from %s" % (spokeText, spokeClass, spokeModule)
ksdata = makeVersion()
storage = Blivet(ksdata=ksdata)
storage.reset()
instclass = DefaultInstall()
app = App("TEST HARNESS", yes_or_no_question = YesNoDialog)
payload = YumPayload(ksdata)
payload.setup(storage)
payload.install_log = sys.stdout
spoke = spokeClass(app, ksdata, storage, payload, instclass)
if not spoke.showable:
print "This %s is not showable, but I'll continue anyway." % spokeText
app.schedule_screen(spoke)
app.run()
if hasattr(spoke, "status"):
print "%s status:\n%s\n" % (SpokeText, spoke.status)
if hasattr(spoke, "completed"):
print "%s completed:\n%s\n" % (SpokeText, spoke.completed)
print "%s kickstart fragment:\n%s" % (SpokeText, ksdata)
|
richteer/halibot-buildapcsales
|
refs/heads/master
|
buildapcsales.py
|
1
|
# TODO: Remove this when modules get proper localdir support
import sys
sys.path.append("modules/buildapcsales")
import time
import threading
import requests
import re
import bapc_filter as bapcfilter
from halibot import HalModule
from halibot import Message, Context
class BuildAPcSales(HalModule):
run = False
thread = None
url = "http://reddit.com/r/buildapcsales/new.json?before={before}"
form = "{title} ({domain}) - {short_url}"
delay = 120
last = ""
resp = None
target = ""
def init(self):
self.target = self.config["target"] # Mandatory, let error trickle up
self.delay = self.config.get("delay",120)
self.filters = {}
for line in self.config.get("filters", ["all: .*"]):
self.add_filter(line)
self.form = self.config.get("format", self.form)
self.start_watcher()
def add_filter(self, line):
name, fil = bapcfilter.parse_command(line)
self.filters[name] = fil
def start_watcher(self):
self.thread = threading.Thread(target=self._refreshloop)
self.run = True
self.thread.start()
# Join?
def stop_watcher(self):
self.run = False
self.last = ""
self.oldthread = self.thread
self.oldthread.join(self.delay)
if self.oldthread.is_alive():
self.log.warning("Old thread did not stop!")
def _refreshloop(self):
self.first = True
while self.run:
r = self.resp = self.make_request(before=self.last)
if r.ok and r.status_code == 200:
try:
new = self.parse(r.json()["data"]["children"], first=self.first)
self.send_updates(new)
except Exception as e:
print("error parsing: " + str(e))
# apply filters here
self.first = False
time.sleep(self.delay)
# TODO move error checking into here, return only data?
def make_request(self, **kwargs):
return requests.get(self.url.format(**kwargs), headers={"User-Agent":"Mozilla/5.0"})
def parse(self, data, first=False):
new = []
for d in data:
d = d["data"]
if d["stickied"]:
continue
for name, fil in self.filters.items():
if fil.match(d['title']):
new.append(d)
continue # match only one filter for now
if len(new):
self.last = new[0]["name"]
return new if not first else []
def send_updates(self, new):
msg = Message(context=Context(agent="irc",whom=self.target))
for d in new:
msg.body = self.outform(d)
self.send_to(msg, ["irc"]) # TODO: Not need this
def outform(self, entry):
entry["short_url"] = "http://redd.it/" + entry["id"]
return self.form.format(**entry)
def receive(self, msg):
ls = msg.body.split(' ')
cmd = ls[0]
arg = ' '.join(ls[1:]).strip()
# TODO: Clean this up
if cmd == "!bapc":
if arg == "start" and not self.run:
self.start_watcher()
self.reply(msg, body="Started watcher")
elif arg == "stop" and self.run:
self.stop_watcher()
self.reply(msg, body="Stopping watcher")
elif arg == "restart":
if self.run:
self.reply(msg, body="Stopping watcher")
self.stop_watcher()
#time.sleep(self.delay)
self.start_watcher()
self.reply(msg, body="Started watcher")
elif arg == "reset":
self.last = ""
self.first = True
elif arg == "test":
self.send_to(Message(context=Context(agent="irc",whom=self.target), body="Hello World!"), ["irc"])
elif arg.startswith("filter"):
args = arg.split(" ",2)[1:]
try:
if args[0] == "add":
self.add_filter(args[1])
elif args[0] == "show":
for it in self.filters.values():
self.reply(msg, body=it.line)
elif args[0] in ("drop","del"):
self.filters.pop(args[1])
except:
pass
|
ilovelinux/BottonPi
|
refs/heads/master
|
modules/twitch.py
|
1
|
from irc3.plugins.command import command
import urllib.request
import json
@command
def twitch(self=None, mask=None, target=None, args=None):
"""Indica il meteo della città
%%twitch <channel>...
"""
channel = ' '.join(args['<channel>'])
homeurl = 'http://www.twitch.tv/'
streaming = '{}{}'.format(homeurl, channel.replace(' ', '%20'))
url = 'https://api.twitch.tv/kraken/streams/{}'.format(channel.replace(' ', '%20'))
try:
opened = urllib.request.urlopen(url).read().decode('utf8')
except urllib.error.HTTPError:
return 'Il canale "{}" non è stato trovato'.format(channel)
except:
pass
result = json.loads(opened)
if not result['stream']:
return '{} è offline'.format(channel)
play = result['stream']['game']
play = '{} è online e sta giocando a {}'.format(channel, play)
viewers = result['stream']['viewers']
views = result['stream']['channel']['views']
statistiche = 'Visitatori: {} (Totali: {})'.format(viewers, views)
followers = 'Followers: {}'.format(result['stream']['channel']['followers'])
visitator = ' · '.join([statistiche, followers])
return ['{} ({})'.format(play, visitator), 'Url: {}'.format(streaming)]
|
hustcc/iOS-private-api-checker
|
refs/heads/master
|
app/utils/RequestUtil.py
|
1
|
#coding=utf-8
'''
Created on 2015年8月21日
@author: atool
'''
#获得参数,post或者get
def get_parameter(request, key, default = None):
'''
info:获得请求参数,包括get和post,其他类型的访问不管
'''
#post参数
if request.method == 'POST':
param = request.form.get(key, default)
#get
elif request.method == 'GET':
param = request.args.get(key, default)
else:
return default
return param
#用户IP
def get_request_ip(request):
return request.remote_addr
#获得用户访问方式
def get_request_method(request):
return request.method
def get_request_ua(request):
return request.headers.get('User-Agent', '')
def get_request_accept_lang(request):
request.environ.get('HTTP_ACCEPT_LANGUAGE', '')
|
daodaoliang/python-phonenumbers
|
refs/heads/dev
|
python/phonenumbers/data/region_ME.py
|
9
|
"""Auto-generated file, do not edit by hand. ME metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_ME = PhoneMetadata(id='ME', country_code=382, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[2-9]\\d{7,8}', possible_number_pattern='\\d{6,9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:20[2-8]|3(?:0[2-7]|[12][35-7]|3[4-7])|4(?:0[2367]|1[267])|5(?:0[467]|1[267]|2[367]))\\d{5}', possible_number_pattern='\\d{6,8}', example_number='30234567'),
mobile=PhoneNumberDesc(national_number_pattern='6(?:00\\d|32\\d|[89]\\d{2}|61\\d|7(?:[0-8]\\d|9(?:[3-9]|[0-2]\\d)))\\d{4}', possible_number_pattern='\\d{8,9}', example_number='67622901'),
toll_free=PhoneNumberDesc(national_number_pattern='80\\d{6}', possible_number_pattern='\\d{8}', example_number='80080002'),
premium_rate=PhoneNumberDesc(national_number_pattern='(?:9(?:4[1568]|5[178]))\\d{5}', possible_number_pattern='\\d{8}', example_number='94515151'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='78[1-9]\\d{5}', possible_number_pattern='\\d{8}', example_number='78108780'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='77\\d{6}', possible_number_pattern='\\d{8}', example_number='77273012'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['[2-57-9]|6[036-9]', '[2-57-9]|6(?:[03689]|7(?:[0-8]|9[3-9]))'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(67)(9)(\\d{3})(\\d{3})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['679', '679[0-2]'], national_prefix_formatting_rule='0\\1')])
|
mdblv2/joatu-django
|
refs/heads/master
|
application/site-packages/django/contrib/__init__.py
|
12133432
| |
vdmann/cse-360-image-hosting-website
|
refs/heads/master
|
src/varnishapp/management/commands/__init__.py
|
12133432
| |
magnunor/hyperspy
|
refs/heads/RELEASE_next_minor
|
hyperspy/samfire_utils/weights/__init__.py
|
12133432
| |
DazWorrall/ansible
|
refs/heads/devel
|
test/units/parsing/vault/__init__.py
|
12133432
| |
gangadharkadam/saloon_erp
|
refs/heads/master
|
erpnext/projects/doctype/task_depends_on/__init__.py
|
12133432
| |
GuessWhoSamFoo/pandas
|
refs/heads/master
|
asv_bench/benchmarks/io/stata.py
|
5
|
import numpy as np
from pandas import DataFrame, date_range, read_stata
import pandas.util.testing as tm
from ..pandas_vb_common import BaseIO
class Stata(BaseIO):
params = ['tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty']
param_names = ['convert_dates']
def setup(self, convert_dates):
self.fname = '__test__.dta'
N = 100000
C = 5
self.df = DataFrame(np.random.randn(N, C),
columns=['float{}'.format(i) for i in range(C)],
index=date_range('20000101', periods=N, freq='H'))
self.df['object'] = tm.makeStringIndex(N)
self.df['int8_'] = np.random.randint(np.iinfo(np.int8).min,
np.iinfo(np.int8).max - 27, N)
self.df['int16_'] = np.random.randint(np.iinfo(np.int16).min,
np.iinfo(np.int16).max - 27, N)
self.df['int32_'] = np.random.randint(np.iinfo(np.int32).min,
np.iinfo(np.int32).max - 27, N)
self.df['float32_'] = np.array(np.random.randn(N),
dtype=np.float32)
self.convert_dates = {'index': convert_dates}
self.df.to_stata(self.fname, self.convert_dates)
def time_read_stata(self, convert_dates):
read_stata(self.fname)
def time_write_stata(self, convert_dates):
self.df.to_stata(self.fname, self.convert_dates)
from ..pandas_vb_common import setup # noqa: F401
|
Cclleemm/FriendlyTorrent
|
refs/heads/master
|
src/tornado/BitTornado/HTTPHandler.py
|
15
|
# Written by Bram Cohen
# see LICENSE.txt for license information
from cStringIO import StringIO
from sys import stdout
import time
from clock import clock
from gzip import GzipFile
try:
True
except:
True = 1
False = 0
DEBUG = False
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
class HTTPConnection:
def __init__(self, handler, connection):
self.handler = handler
self.connection = connection
self.buf = ''
self.closed = False
self.done = False
self.donereading = False
self.next_func = self.read_type
def get_ip(self):
return self.connection.get_ip()
def data_came_in(self, data):
if self.donereading or self.next_func is None:
return True
self.buf += data
while True:
try:
i = self.buf.index('\n')
except ValueError:
return True
val = self.buf[:i]
self.buf = self.buf[i+1:]
self.next_func = self.next_func(val)
if self.donereading:
return True
if self.next_func is None or self.closed:
return False
def read_type(self, data):
self.header = data.strip()
words = data.split()
if len(words) == 3:
self.command, self.path, garbage = words
self.pre1 = False
elif len(words) == 2:
self.command, self.path = words
self.pre1 = True
if self.command != 'GET':
return None
else:
return None
if self.command not in ('HEAD', 'GET'):
return None
self.headers = {}
return self.read_header
def read_header(self, data):
data = data.strip()
if data == '':
self.donereading = True
if self.headers.get('accept-encoding','').find('gzip') > -1:
self.encoding = 'gzip'
else:
self.encoding = 'identity'
r = self.handler.getfunc(self, self.path, self.headers)
if r is not None:
self.answer(r)
return None
try:
i = data.index(':')
except ValueError:
return None
self.headers[data[:i].strip().lower()] = data[i+1:].strip()
if DEBUG:
print data[:i].strip() + ": " + data[i+1:].strip()
return self.read_header
def answer(self, (responsecode, responsestring, headers, data)):
if self.closed:
return
if self.encoding == 'gzip':
compressed = StringIO()
gz = GzipFile(fileobj = compressed, mode = 'wb', compresslevel = 9)
gz.write(data)
gz.close()
cdata = compressed.getvalue()
if len(cdata) >= len(data):
self.encoding = 'identity'
else:
if DEBUG:
print "Compressed: %i Uncompressed: %i\n" % (len(cdata),len(data))
data = cdata
headers['Content-Encoding'] = 'gzip'
# i'm abusing the identd field here, but this should be ok
if self.encoding == 'identity':
ident = '-'
else:
ident = self.encoding
self.handler.log( self.connection.get_ip(), ident, '-',
self.header, responsecode, len(data),
self.headers.get('referer','-'),
self.headers.get('user-agent','-') )
self.done = True
r = StringIO()
r.write('HTTP/1.0 ' + str(responsecode) + ' ' +
responsestring + '\r\n')
if not self.pre1:
headers['Content-Length'] = len(data)
for key, value in headers.items():
r.write(key + ': ' + str(value) + '\r\n')
r.write('\r\n')
if self.command != 'HEAD':
r.write(data)
self.connection.write(r.getvalue())
if self.connection.is_flushed():
self.connection.shutdown(1)
class HTTPHandler:
def __init__(self, getfunc, minflush):
self.connections = {}
self.getfunc = getfunc
self.minflush = minflush
self.lastflush = clock()
def external_connection_made(self, connection):
self.connections[connection] = HTTPConnection(self, connection)
def connection_flushed(self, connection):
if self.connections[connection].done:
connection.shutdown(1)
def connection_lost(self, connection):
ec = self.connections[connection]
ec.closed = True
del ec.connection
del ec.next_func
del self.connections[connection]
def data_came_in(self, connection, data):
c = self.connections[connection]
if not c.data_came_in(data) and not c.closed:
c.connection.shutdown(1)
def log(self, ip, ident, username, header,
responsecode, length, referrer, useragent):
year, month, day, hour, minute, second, a, b, c = time.localtime(time.time())
print '%s %s %s [%02d/%3s/%04d:%02d:%02d:%02d] "%s" %i %i "%s" "%s"' % (
ip, ident, username, day, months[month], year, hour,
minute, second, header, responsecode, length, referrer, useragent)
t = clock()
if t - self.lastflush > self.minflush:
self.lastflush = t
stdout.flush()
|
Tokyo-Buffalo/tokyosouth
|
refs/heads/master
|
env/lib/python3.6/site-packages/scrapy/extensions/closespider.py
|
35
|
"""CloseSpider is an extension that forces spiders to be closed after certain
conditions are met.
See documentation in docs/topics/extensions.rst
"""
from collections import defaultdict
from twisted.internet import reactor
from scrapy import signals
from scrapy.exceptions import NotConfigured
class CloseSpider(object):
def __init__(self, crawler):
self.crawler = crawler
self.close_on = {
'timeout': crawler.settings.getfloat('CLOSESPIDER_TIMEOUT'),
'itemcount': crawler.settings.getint('CLOSESPIDER_ITEMCOUNT'),
'pagecount': crawler.settings.getint('CLOSESPIDER_PAGECOUNT'),
'errorcount': crawler.settings.getint('CLOSESPIDER_ERRORCOUNT'),
}
if not any(self.close_on.values()):
raise NotConfigured
self.counter = defaultdict(int)
if self.close_on.get('errorcount'):
crawler.signals.connect(self.error_count, signal=signals.spider_error)
if self.close_on.get('pagecount'):
crawler.signals.connect(self.page_count, signal=signals.response_received)
if self.close_on.get('timeout'):
crawler.signals.connect(self.spider_opened, signal=signals.spider_opened)
if self.close_on.get('itemcount'):
crawler.signals.connect(self.item_scraped, signal=signals.item_scraped)
crawler.signals.connect(self.spider_closed, signal=signals.spider_closed)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def error_count(self, failure, response, spider):
self.counter['errorcount'] += 1
if self.counter['errorcount'] == self.close_on['errorcount']:
self.crawler.engine.close_spider(spider, 'closespider_errorcount')
def page_count(self, response, request, spider):
self.counter['pagecount'] += 1
if self.counter['pagecount'] == self.close_on['pagecount']:
self.crawler.engine.close_spider(spider, 'closespider_pagecount')
def spider_opened(self, spider):
self.task = reactor.callLater(self.close_on['timeout'], \
self.crawler.engine.close_spider, spider, \
reason='closespider_timeout')
def item_scraped(self, item, spider):
self.counter['itemcount'] += 1
if self.counter['itemcount'] == self.close_on['itemcount']:
self.crawler.engine.close_spider(spider, 'closespider_itemcount')
def spider_closed(self, spider):
task = getattr(self, 'task', False)
if task and task.active():
task.cancel()
|
odoocn/odoomrp-wip
|
refs/heads/8.0
|
product_purchase_warrant/models/__init__.py
|
31
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import product
from . import stock
|
rossella/neutron
|
refs/heads/master
|
quantum/plugins/cisco/models/virt_phy_sw_v2.py
|
1
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
# @author: Rohit Agarwalla, Cisco Systems, Inc.
#
from copy import deepcopy
import inspect
import logging
from keystoneclient.v2_0 import client as keystone_client
from novaclient.v1_1 import client as nova_client
from quantum.db import l3_db
from quantum.manager import QuantumManager
from quantum.openstack.common import importutils
from quantum.plugins.cisco.common import cisco_constants as const
from quantum.plugins.cisco.common import cisco_credentials_v2 as cred
from quantum.plugins.cisco.db import network_db_v2 as cdb
from quantum.plugins.cisco import l2network_plugin_configuration as conf
from quantum.plugins.openvswitch import ovs_db_v2 as odb
from quantum import quantum_plugin_base_v2
LOG = logging.getLogger(__name__)
class VirtualPhysicalSwitchModelV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
"""
This implementation works with OVS and Nexus plugin for the
following topology:
One or more servers to a nexus switch.
"""
MANAGE_STATE = True
__native_bulk_support = True
supported_extension_aliases = []
_plugins = {}
_inventory = {}
_methods_to_delegate = ['get_network', 'get_networks',
'create_port_bulk', 'update_port',
'get_port', 'get_ports',
'create_subnet', 'create_subnet_bulk',
'delete_subnet', 'update_subnet',
'get_subnet', 'get_subnets', ]
def __init__(self):
"""
Initialize the segmentation manager, check which device plugins are
configured, and load the inventories those device plugins for which the
inventory is configured
"""
cdb.initialize()
cred.Store.initialize()
for key in conf.PLUGINS[const.PLUGINS].keys():
plugin_obj = conf.PLUGINS[const.PLUGINS][key]
self._plugins[key] = importutils.import_object(plugin_obj)
LOG.debug(_("Loaded device plugin %s\n"),
conf.PLUGINS[const.PLUGINS][key])
if key in conf.PLUGINS[const.INVENTORY].keys():
inventory_obj = conf.PLUGINS[const.INVENTORY][key]
self._inventory[key] = importutils.import_object(inventory_obj)
LOG.debug(_("Loaded device inventory %s\n"),
conf.PLUGINS[const.INVENTORY][key])
if hasattr(self._plugins[const.VSWITCH_PLUGIN],
"supported_extension_aliases"):
self.supported_extension_aliases.extend(
self._plugins[const.VSWITCH_PLUGIN].
supported_extension_aliases)
LOG.debug(_("%(module)s.%(name)s init done"),
{'module': __name__,
'name': self.__class__.__name__})
def __getattribute__(self, name):
"""
This delegates the calls to the methods implemented only by the OVS
sub-plugin.
"""
super_getattr = super(VirtualPhysicalSwitchModelV2,
self).__getattribute__
methods = super_getattr('_methods_to_delegate')
if name in methods:
plugin = super_getattr('_plugins')[const.VSWITCH_PLUGIN]
return getattr(plugin, name)
try:
return super_getattr(name)
except AttributeError:
plugin = super_getattr('_plugins')[const.VSWITCH_PLUGIN]
return getattr(plugin, name)
def _func_name(self, offset=0):
"""Get the name of the calling function"""
frame_record = inspect.stack()[1 + offset]
func_name = frame_record[3]
return func_name
def _invoke_plugin_per_device(self, plugin_key, function_name, args):
"""
Invokes a device plugin's relevant functions (on the it's
inventory and plugin implementation) for completing this operation.
"""
if plugin_key not in self._plugins:
LOG.info(_("No %s Plugin loaded"), plugin_key)
LOG.info(_("%(plugin_key)s: %(function_name)s with args %(args)s "
"ignored"), locals())
return
device_params = self._invoke_inventory(plugin_key, function_name,
args)
device_ips = device_params[const.DEVICE_IP]
if not device_ips:
return [self._invoke_plugin(plugin_key, function_name, args,
device_params)]
else:
output = []
for device_ip in device_ips:
new_device_params = deepcopy(device_params)
new_device_params[const.DEVICE_IP] = device_ip
output.append(self._invoke_plugin(plugin_key, function_name,
args, new_device_params))
return output
def _invoke_inventory(self, plugin_key, function_name, args):
"""
Invokes the relevant function on a device plugin's
inventory for completing this operation.
"""
if plugin_key not in self._inventory:
LOG.info(_("No %s inventory loaded"), plugin_key)
LOG.info(_("%(plugin_key)s: %(function_name)s with args %(args)s "
"ignored"), locals())
return {const.DEVICE_IP: []}
else:
return getattr(self._inventory[plugin_key], function_name)(args)
def _invoke_plugin(self, plugin_key, function_name, args, kwargs):
"""
Invokes the relevant function on a device plugin's
implementation for completing this operation.
"""
func = getattr(self._plugins[plugin_key], function_name)
func_args_len = int(inspect.getargspec(func).args.__len__()) - 1
fargs, varargs, varkw, defaults = inspect.getargspec(func)
if args.__len__() > func_args_len:
func_args = args[:func_args_len]
extra_args = args[func_args_len:]
for dict_arg in extra_args:
for k, v in dict_arg.iteritems():
kwargs[k] = v
return func(*func_args, **kwargs)
else:
if (varkw == 'kwargs'):
return func(*args, **kwargs)
else:
return func(*args)
def _get_segmentation_id(self, network_id):
binding_seg_id = odb.get_network_binding(None, network_id)
return binding_seg_id.segmentation_id
def _get_all_segmentation_ids(self):
vlan_ids = cdb.get_ovs_vlans()
vlanids = ''
for v_id in vlan_ids:
if int(v_id) > 0:
vlanids = str(v_id) + ',' + vlanids
return vlanids.strip(',')
def _validate_vlan_id(self, vlan_id):
if vlan_id and int(vlan_id) > 1:
return True
else:
return False
def _get_instance_host(self, tenant_id, instance_id):
keystone = cred._creds_dictionary['keystone']
kc = keystone_client.Client(username=keystone['username'],
password=keystone['password'],
tenant_id=tenant_id,
auth_url=keystone['auth_url'])
tenant = kc.tenants.get(tenant_id)
tenant_name = tenant.name
nc = nova_client.Client(keystone['username'],
keystone['password'],
tenant_name,
keystone['auth_url'],
no_cache=True)
serv = nc.servers.get(instance_id)
host = serv.__getattr__('OS-EXT-SRV-ATTR:host')
return host
def create_network(self, context, network):
"""
Perform this operation in the context of the configured device
plugins.
"""
LOG.debug(_("create_network() called"))
try:
args = [context, network]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
vlan_id = self._get_segmentation_id(ovs_output[0]['id'])
if not self._validate_vlan_id(vlan_id):
return ovs_output[0]
vlan_name = conf.VLAN_NAME_PREFIX + str(vlan_id)
vlanids = self._get_all_segmentation_ids()
args = [ovs_output[0]['tenant_id'], ovs_output[0]['name'],
ovs_output[0]['id'], vlan_name, vlan_id,
{'vlan_ids': vlanids}]
return ovs_output[0]
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
def create_network_bulk(self, context, networks):
"""
Perform this operation in the context of the configured device
plugins.
"""
LOG.debug(_("create_network_bulk() called"))
try:
args = [context, networks]
ovs_output = self._plugins[
const.VSWITCH_PLUGIN].create_network_bulk(context, networks)
LOG.debug(_("ovs_output: %s"), ovs_output)
vlanids = self._get_all_segmentation_ids()
ovs_networks = ovs_output
return ovs_output
except:
# TODO (Sumit): Check if we need to perform any rollback here
raise
def update_network(self, context, id, network):
"""
Perform this operation in the context of the configured device
plugins.
"""
LOG.debug(_("update_network() called"))
args = [context, id, network]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
vlan_id = self._get_segmentation_id(ovs_output[0]['id'])
if not self._validate_vlan_id(vlan_id):
return ovs_output[0]
vlanids = self._get_all_segmentation_ids()
args = [ovs_output[0]['tenant_id'], id, {'vlan_id': vlan_id},
{'net_admin_state': ovs_output[0]['admin_state_up']},
{'vlan_ids': vlanids}]
nexus_output = self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
self._func_name(),
args)
return ovs_output[0]
def delete_network(self, context, id):
"""
Perform this operation in the context of the configured device
plugins.
"""
try:
base_plugin_ref = QuantumManager.get_plugin()
n = base_plugin_ref.get_network(context, id)
tenant_id = n['tenant_id']
vlan_id = self._get_segmentation_id(id)
args = [context, id]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
args = [tenant_id, id, {const.VLANID: vlan_id},
{const.CONTEXT: context},
{const.BASE_PLUGIN_REF: base_plugin_ref}]
if self._validate_vlan_id(vlan_id):
self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
self._func_name(), args)
return ovs_output[0]
except:
raise
def get_network(self, context, id, fields=None):
"""For this model this method will be delegated to vswitch plugin"""
pass
def get_networks(self, context, filters=None, fields=None):
"""For this model this method will be delegated to vswitch plugin"""
pass
def create_port(self, context, port):
"""
Perform this operation in the context of the configured device
plugins.
"""
LOG.debug(_("create_port() called"))
try:
args = [context, port]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
net_id = port['port']['network_id']
instance_id = port['port']['device_id']
tenant_id = port['port']['tenant_id']
net_dict = self.get_network(context, net_id)
net_name = net_dict['name']
vlan_id = self._get_segmentation_id(net_id)
host = ''
if hasattr(conf, 'TEST'):
host = conf.TEST['host']
else:
host = self._get_instance_host(tenant_id, instance_id)
# Trunk segmentation id for only this host
vlan_name = conf.VLAN_NAME_PREFIX + str(vlan_id)
n_args = [tenant_id, net_name, net_id,
vlan_name, vlan_id, host, instance_id]
nexus_output = self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
'create_network',
n_args)
return ovs_output[0]
except:
# TODO (asomya): Check if we need to perform any rollback here
raise
def get_port(self, context, id, fields=None):
"""For this model this method will be delegated to vswitch plugin"""
pass
def get_ports(self, context, filters=None, fields=None):
"""For this model this method will be delegated to vswitch plugin"""
pass
def update_port(self, context, id, port):
"""For this model this method will be delegated to vswitch plugin"""
pass
def delete_port(self, context, id):
"""
Perform this operation in the context of the configured device
plugins.
"""
LOG.debug(_("delete_port() called"))
try:
args = [context, id]
port = self.get_port(context, id)
vlan_id = self._get_segmentation_id(port['network_id'])
n_args = [port['device_id'], vlan_id]
ovs_output = self._invoke_plugin_per_device(const.VSWITCH_PLUGIN,
self._func_name(),
args)
nexus_output = self._invoke_plugin_per_device(const.NEXUS_PLUGIN,
self._func_name(),
n_args)
return ovs_output[0]
except:
# TODO (asomya): Check if we need to perform any rollback here
raise
def create_subnet(self, context, subnet):
"""For this model this method will be delegated to vswitch plugin"""
pass
def update_subnet(self, context, id, subnet):
"""For this model this method will be delegated to vswitch plugin"""
pass
def get_subnet(self, context, id, fields=None):
"""For this model this method will be delegated to vswitch plugin"""
pass
def delete_subnet(self, context, id, kwargs):
"""For this model this method will be delegated to vswitch plugin"""
pass
def get_subnets(self, context, filters=None, fields=None):
"""For this model this method will be delegated to vswitch plugin"""
pass
|
jyhuh/cnuhawk
|
refs/heads/master
|
Tools/autotest/pysim/testwind.py
|
246
|
#!/usr/bin/env python
# simple test of wind generation code
import util, time, random
from rotmat import Vector3
wind = util.Wind('7,90,0.1')
t0 = time.time()
velocity = Vector3(0,0,0)
t = 0
deltat = 0.01
while t < 60:
print("%.4f %f" % (t, wind.drag(velocity, deltat=deltat).length()))
t += deltat
|
jroxendal/PySCXML
|
refs/heads/master
|
w3c_tests/w3cTests.py
|
1
|
from scxml.pyscxml import StateMachine
from scxml.pyscxml_server import PySCXMLServer
import os, shutil
from scxml.compiler import ScriptFetchError
from test import pyscxmlTest
from eventlet import wsgi
ASSERTION_DIR = "./"
TIMEOUT = 12
class W3CTester(StateMachine):
'''
For running a fresh batch of tests from the internal w3c
assertions manager. Useful for little else.
'''
def __init__(self, xml, log_function=lambda fn, y:None, sessionid=None):
self.didPass = False
StateMachine.__init__(self, xml, log_function, None)
def on_exit(self, sender, final):
self.didPass = final == "pass"
StateMachine.on_exit(self, sender, final)
class TestServer(PySCXMLServer):
def __init__(self, host, port, default_scxml_source=None, init_sessions={},
session_path="/", default_datamodel="python", onSuccess=None, onFail=None):
PySCXMLServer.__init__(self, host, port, default_scxml_source, init_sessions, session_path, default_datamodel)
self.n_sessions = len(init_sessions)
self.failed = []
self.passed = []
self.onSuccess = onSuccess
self.onFail = onFail
def on_sm_exit(self, sender, final):
PySCXMLServer.on_sm_exit(self, sender, final)
# if sender not in self: return
filename = os.path.join(sender.filedir, sender.filename)
if final == "pass":
self.passed.append(sender.sessionid)
self.onSuccess(filename)
else:
self.failed.append(sender.sessionid)
self.onFail(filename)
if len(self.passed + self.failed) == self.n_sessions:
print "all done!", os.path.join(sender.filedir, sender.filename)
raise KeyboardInterrupt()
def move(src, dest):
srcs = [src.replace(".", "%s." % fn) for fn in ["", "sub1", "sub2"]]
for url in srcs:
try:
shutil.move(url, dest + url)
except:
pass
if __name__ == '__main__':
import futures, os, glob, sys, eventlet
os.chdir("ecma_schema/")
for fn in glob.glob("*.xml"):
shutil.move(fn, fn.split(".")[0] + ".scxml")
try:
os.mkdir("passed")
os.mkdir("failed")
os.mkdir("rejected")
except:
pass
stoplist = [
#"test201.scxml", #basichttp eventprocessor for sending within machine.
"test267.scxml", #exmode strict
"test268.scxml", #exmode strict
"test269.scxml", #exmode strict
"test320.scxml", #send content parsing
#"test325.scxml", #_ioprocessors bound at startup
#"test326.scxml", #_ioprocessors bound till end
#"test336.scxml", #_event.origin
#"test349.scxml", #_event.origin
#"test350.scxml", #target yourself using #_scxml_sessionid
"test360.scxml", #exmode strict
#"test500.scxml", #location field of ioprocessor in event
#"test501.scxml", #location field of ioprocessor in event
]
supposed_to_fail = [
"test178.scxml", #manual test
"test230.scxml",
"test250.scxml",
"test307.scxml",
]
run_on_server = [
"test508.scxml",
"test509.scxml",
"test510.scxml",
"test511.scxml",
"test513.scxml",
"test518.scxml",
"test519.scxml",
"test520.scxml",
"test522.scxml",
"test531.scxml",
"test532.scxml",
"test534.scxml",
"test567.scxml",
]
filelist = [fn for fn in os.listdir(ASSERTION_DIR) if
"sub" not in fn and
not os.path.isdir(fn) and
fn.endswith("xml") and
fn not in stoplist + supposed_to_fail + run_on_server]
def onSuccess(url):
print "passed:", url
move(url, "passed/")
def onFail(url):
print "failed:", url
move(url, "failed/")
pyscxmlTest.parallelize(filelist, onSuccess, onFail)
# server = TestServer("localhost", 8081, init_sessions=dict(zip(run_on_server, run_on_server)), onFail=onFail, onSuccess=onSuccess)
# wsgi.server(eventlet.listen(("localhost", 8081)), server.request_handler)
# print "Done"
|
CyanogenMod/android_external_chromium_org
|
refs/heads/cm-12.0
|
tools/cr/cr/targets/__init__.py
|
112
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A package for all the built in commands.
This package has all the standard commands built in to the cr tool.
Most commands use actions to perform the real work.
"""
import cr
cr.Import(__name__, 'target')
|
TmpName/venom-xbmc-addons-beta
|
refs/heads/master
|
plugin.video.vstream/resources/sites/trash/bundesliga_de.py
|
4
|
from resources.lib.handler.outputParameterHandler import cOutputParameterHandler
from resources.lib.handler.inputParameterHandler import cInputParameterHandler
from resources.lib.parser import cParser
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.gui.guiElement import cGuiElement
from resources.lib.gui.gui import cGui
from resources.lib.player import cPlayer
SITE_IDENTIFIER = 'bundesliga_de'
SITE_NAME = 'Bundesliga.de'
URL_MAIN = 'http://www.bundesliga.de'
URL_TV = 'http://www.bundesliga.de/de/bundesliga-tv/navigation.php?area='
URL_GET_STREAM = 'http://btd-flv-lbwww-01.odmedia.net/bundesliga/'
def load():
cGui().showInfo('HS', SITE_NAME, 5)
return False
oGui = cGui()
__createMainMenuItem(oGui, 'Aktuell', 'aktuell')
__createMainMenuItem(oGui, 'Spieltag', 'spieltag')
__createMainMenuItem(oGui, 'Stars', 'stars')
__createMainMenuItem(oGui, 'Insider', 'insider')
__createMainMenuItem(oGui, 'Historie', 'historie')
__createMainMenuItem(oGui, 'Vereine', 'vereine')
oGui.setEndOfDirectory()
def __createMainMenuItem(oGui, sTitle, sPlaylistId):
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('listVideos')
oGuiElement.setTitle(sTitle)
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('playlistId', sPlaylistId)
oGui.addFolder(oGuiElement, oOutputParameterHandler)
def listVideos():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
if (oInputParameterHandler.exist('playlistId')):
sPlaylistId = oInputParameterHandler.getValue('playlistId')
sUrl = URL_TV + str(sPlaylistId)
sPattern = '<div class="zeile">.*?<img src="([^"]+)" id="bild" class="previewImg".*?<a href="javascript:showVideoSnippet\(\'(.*?)\'\).*?<div class="describe">(.*?)</div>'
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setFunction('play')
oGuiElement.setTitle(aEntry[2])
sThumbnail = URL_MAIN + str(aEntry[0])
oGuiElement.setThumbnail(sThumbnail)
sUrl = URL_MAIN + str(aEntry[1])
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('sUrl', sUrl)
oGui.addFolder(oGuiElement, oOutputParameterHandler)
oGui.setEndOfDirectory()
def play():
oGui = cGui()
oInputParameterHandler = cInputParameterHandler()
if (oInputParameterHandler.exist('sUrl')):
sUrl = oInputParameterHandler.getValue('sUrl')
oRequest = cRequestHandler(sUrl)
sHtmlContent = oRequest.request()
sPattern = 'ake_playlist.php%3Fflv%3D(.*?)%26'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
sUrl = URL_GET_STREAM + str(aResult[1][0])
oGuiElement = cGuiElement()
oGuiElement.setSiteName(SITE_IDENTIFIER)
oGuiElement.setMediaUrl(sUrl)
oPlayer = cPlayer()
oPlayer.addItemToPlaylist(oGuiElement)
oPlayer.startPlayer()
return
oGui.setEndOfDirectory()
|
richard-willowit/odoo
|
refs/heads/master
|
addons/sale_service_rating/models/__init__.py
|
70
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import project
|
sergiosgc/Syncthing.py
|
refs/heads/master
|
syncthing/bep/connection.py
|
1
|
from asyncio import Protocol,coroutine,Task
from zlib import decompressobj, compressobj, MAX_WBITS, Z_DEFAULT_COMPRESSION, DEFLATED,Z_SYNC_FLUSH
from syncthing.bep.message import BEPMessage
from syncthing.bep.clusterconfigmessage import BEPClusterConfigMessage
from syncthing.bep.indexmessage import BEPIndexMessage
from syncthing.bep.requestmessage import BEPRequestMessage
from syncthing.bep.responsemessage import BEPResponseMessage
from syncthing.bep.pingmessage import BEPPingMessage
from syncthing.bep.pongmessage import BEPPongMessage
from syncthing.bep.indexupdatemessage import BEPIndexUpdateMessage
from syncthing.xdr.XDRIntegerUnserializer import XDRIntegerUnserializer
class BEPConnection(Protocol):
def __init__(self, app, loop):
self._ssl_context = None
self.app = app
self.loop = loop
def get_ssl_context(self):
if self._ssl_context is None:
import ssl
certFiles = self.app.getCertificateFiles()
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
self._ssl_context.verify_mode = ssl.CERT_NONE
self._ssl_context.set_ciphers('DHE-RSA-AES256-GCM-SHA384,DHE-RSA-AES256-SHA256,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-SHA384,DHE-RSA-AES128-GCM-SHA256,DHE-RSA-AES128-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-SHA256')
self._ssl_context.load_cert_chain(certFiles[0], certFiles[1])
self._ssl_context.load_verify_locations(certFiles[0])
return self._ssl_context
ssl_context = property(get_ssl_context, None, None, None)
def message_factory(self, messageVersion, messageType, messageId):
if messageType == BEPClusterConfigMessage.BEP_TYPE:
return BEPClusterConfigMessage(messageVersion, messageId)
if messageType == BEPIndexMessage.BEP_TYPE:
return BEPIndexMessage(messageVersion, messageId)
if messageType == BEPRequestMessage.BEP_TYPE:
return BEPRequestMessage(messageVersion, messageId)
if messageType == BEPResponseMessage.BEP_TYPE:
return BEPResponseMessage(messageVersion, messageId)
if messageType == BEPPingMessage.BEP_TYPE:
return BEPPingMessage(messageVersion, messageId)
if messageType == BEPPongMessage.BEP_TYPE:
return BEPPongMessage(messageVersion, messageId)
if messageType == BEPIndexUpdateMessage.BEP_TYPE:
return BEPIndexUpdateMessage(messageVersion, messageId)
raise Exception("Unknown message type received: " + str(messageType))
def protocol_factory(self):
return self
def connection_made(self, transport):
self.transport = transport
self.decompressor = decompressobj(-MAX_WBITS)
self.compressor = compressobj(level=Z_DEFAULT_COMPRESSION, method=DEFLATED, wbits=-MAX_WBITS)
self.unused_data = bytearray()
self.incoming_message = None
try:
self.app.on_connect(connection=self)
except AttributeError:
# I really do not like this approach. The exception is silenced because
# the app method is not mandatory and its non-existence should not crash
# the application. However, should a real AttributeError -- with another
# cause land us here, this error silencing will be painful
#
# TODO Find a pythonic way to achieve the optional call without this secondary effect
pass
def data_received(self, data):
self.unused_data += self.decompressor.decompress(data)
self.consume_unused_data()
def consume_unused_data(self):
if self.incoming_message is None and len(self.unused_data) >= 4:
toUnpack = XDRIntegerUnserializer().continueUnserialization(self.unused_data)
messageVersion = (toUnpack>>28) & 0xf
messageId = (toUnpack>>16) & 0xfff
messageType = (toUnpack>>8) & 0xff
self.incoming_message = self.message_factory(messageVersion, messageType, messageId)
if self.incoming_message is not None:
candidate = self.incoming_message.continueUnserialization(self.unused_data)
if candidate is not None: # Unserialization is complete
self.incoming_message = None
result = []
result.append(candidate)
candidate = self.consume_unused_data()
if candidate is not None:
result.append(candidate)
tasks = [Task(self.message_received(msg), loop=self.loop) for msg in result]
def connection_lost(self, exc):
try:
self.app.on_disconnect(connection=self, reason=exc)
except AttributeError:
# I really do not like this approach. The exception is silenced because
# the app method is not mandatory and its non-existence should not crash
# the application. However, should a real AttributeError -- with another
# cause land us here, this error silencing will be painful
#
# TODO Find a pythonic way to achieve the optional call without this secondary effect
pass
pass
@coroutine
def message_received(self, msg):
if type(msg) is BEPClusterConfigMessage:
yield from self.app.handle_cluster_config_message(self, msg)
if type(msg) is BEPIndexMessage:
yield from self.app.handle_index_message(self, msg)
if type(msg) is BEPRequestMessage:
yield from self.app.handle_request_message(self, msg)
if type(msg) is BEPResponseMessage:
yield from self.app.handle_response_message(self, msg)
if type(msg) is BEPPingMessage:
yield from self.app.handle_ping_message(self, msg)
if type(msg) is BEPPongMessage:
yield from self.app.handle_pong_message(self, msg)
yield from []
def write(self, data, flush=True):
"""Send the data through the connection
This method acts simultaneously like a transport write() and a protocol write(). A
protocol write() receives a BEPMessage object and sends it. A transport write receives
a byte-ish argument and sends it. The transport behaviour is alien here, it should be
embedded in the asyncio transport. It isn't because those are not easily extensible,
and BEP requires zlib compression of the SSL transport stream.
Long story short: expected usage is as a protocol write(). Pass in a BEPMessage and it
will be sent to the remote endpoint.
Arguments:
data -- An instance of a BEPMessage, to be sent
"""
if not isinstance(data, (bytes, bytearray, memoryview, BEPMessage)):
raise TypeError('data argument must be byte-ish or a BEPMessage (%r)', type(data))
if isinstance(data, BEPMessage):
return data.serialize(self)
print("Writing:" + str(data))
compressed = self.compressor.compress(data)
if flush:
compressed += self.compressor.flush(Z_SYNC_FLUSH)
if len(compressed) > 0:
print("Writing (compressed):" + str(compressed))
self.transport.write(compressed)
|
sgraham/nope
|
refs/heads/master
|
third_party/closure_compiler/tools/compile_coverage.py
|
79
|
#!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from ast import literal_eval
import os
_HERE = os.path.dirname(__file__)
_SRC_ROOT = os.path.join(_HERE, '..', '..', '..')
_FROM_SRC = lambda p: os.path.abspath(os.path.join(_SRC_ROOT, p))
from sys import path as sys_path
sys_path.insert(0, os.path.join(_HERE, '..'))
import processor
# High priority code to compile.
_NEED_TO_COMPILE = map(_FROM_SRC, [
'chrome/browser/resources/bookmark_manager',
'chrome/browser/resources/downloads',
'chrome/browser/resources/extensions',
'chrome/browser/resources/help',
'chrome/browser/resources/history',
'chrome/browser/resources/ntp4',
'chrome/browser/resources/options',
'chrome/browser/resources/print_preview',
'chrome/browser/resources/uber',
'ui/webui/resources/js',
])
# Code that we'd eventually like to compile.
_WANT_TO_COMPILE = map(_FROM_SRC, [
'chrome/browser/resources',
'chrome/browser/ui/webui',
'chrome/renderer/resources',
'chrome/test/data',
'content/renderer/resources',
'content/test/data',
'extensions/renderer',
'extensions/test/data',
'remoting',
'ui/file_manager',
'ui/keyboard',
])
_GIT_IGNORE = open(_FROM_SRC('.gitignore')).read().splitlines()
_IGNORE_DIRS = tuple(map(_FROM_SRC, map(lambda p: p[1:], _GIT_IGNORE)))
_IGNORE_DIRS = filter(os.path.isdir, _IGNORE_DIRS)
_RELEVANT_JS = lambda f: f.endswith('.js') and not f.startswith(_IGNORE_DIRS)
def main():
line_cache = {}
def js_files_and_deps_in_dir(js_dir):
found_files = set()
for root, dirs, files in os.walk(js_dir):
abs_files = [os.path.abspath(os.path.join(root, f)) for f in files]
relevant_files = filter(_RELEVANT_JS, abs_files)
found_files.update(relevant_files)
for f in relevant_files:
found_files.update(processor.Processor(f).included_files)
return found_files
def num_lines(f):
f = os.path.abspath(f)
if f not in line_cache:
line_cache[f] = len(open(f, 'r').read().splitlines())
return line_cache[f]
# All the files that are already compiled.
compiled = set()
closure_dir = os.path.join(_HERE, '..')
root_gyp = os.path.join(closure_dir, 'compiled_resources.gyp')
root_contents = open(root_gyp, 'r').read()
gyp_files = literal_eval(root_contents)['targets'][0]['dependencies']
for g in gyp_files:
gyp_file = os.path.join(closure_dir, g.replace(':*', ''))
targets = literal_eval(open(gyp_file, 'r').read())['targets']
for target in targets:
gyp_dir = os.path.dirname(gyp_file)
target_file = os.path.join(gyp_dir, target['target_name'] + '.js')
compiled.add(os.path.abspath(target_file))
compiled.update(processor.Processor(target_file).included_files)
if 'variables' in target and 'depends' in target['variables']:
depends = target['variables']['depends']
rel_depends = [os.path.join(gyp_dir, d) for d in depends]
compiled.update([os.path.abspath(d) for d in rel_depends])
compiled_lines = sum(map(num_lines, compiled))
print 'compiled: %d files, %d lines' % (len(compiled), compiled_lines)
# Find and calculate the line count of all .js files in the wanted or needed
# resource directories.
files = set()
for n in _NEED_TO_COMPILE:
files.update(js_files_and_deps_in_dir(n))
need_lines = sum(map(num_lines, files))
print 'need: %d files, %d lines' % (len(files), need_lines)
need_done = float(compiled_lines) / need_lines * 100
print '%.2f%% done with the code we need to compile' % need_done
for w in _WANT_TO_COMPILE:
files.update(js_files_and_deps_in_dir(w))
want_lines = sum(map(num_lines, files))
print 'want: %d files, %d lines' % (len(files), want_lines)
want_done = float(compiled_lines) / want_lines * 100
print '%.2f%% done with the code we want to compile' % want_done
if __name__ == '__main__':
main()
|
cliffe/SecGen
|
refs/heads/master
|
modules/utilities/unix/audit_tools/ghidra/files/release/Ghidra/Features/Python/data/jython-2.7.1/Lib/lib2to3/fixes/fix_map.py
|
327
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
exists a 'from future_builtins import map' statement in the top-level
namespace.
As a special case, map(None, X) is changed into list(X). (This is
necessary because the semantics are changed in this case -- the new
map(None, X) is equivalent to [(x,) for x in X].)
We avoid the transformation (except for the special case mentioned
above) if the map() call is directly contained in iter(<>), list(<>),
tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
NOTE: This is still not correct if the original code was depending on
map(F, X, Y, ...) to go on until the longest argument is exhausted,
substituting None for missing values -- like zip(), it now stops as
soon as the shortest argument is exhausted.
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name, Call, ListComp, in_special_context
from ..pygram import python_symbols as syms
class FixMap(fixer_base.ConditionalFix):
BM_compatible = True
PATTERN = """
map_none=power<
'map'
trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
>
|
map_lambda=power<
'map'
trailer<
'('
arglist<
lambdef< 'lambda'
(fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
>
','
it=any
>
')'
>
>
|
power<
'map' trailer< '(' [arglist=any] ')' >
>
"""
skip_on = 'future_builtins.map'
def transform(self, node, results):
if self.should_skip(node):
return
if node.parent.type == syms.simple_stmt:
self.warning(node, "You should use a for loop here")
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
elif "map_lambda" in results:
new = ListComp(results["xp"].clone(),
results["fp"].clone(),
results["it"].clone())
else:
if "map_none" in results:
new = results["arg"].clone()
else:
if "arglist" in results:
args = results["arglist"]
if args.type == syms.arglist and \
args.children[0].type == token.NAME and \
args.children[0].value == "None":
self.warning(node, "cannot convert map(None, ...) "
"with multiple arguments because map() "
"now truncates to the shortest sequence")
return
if in_special_context(node):
return None
new = node.clone()
new.prefix = u""
new = Call(Name(u"list"), [new])
new.prefix = node.prefix
return new
|
jasonwzhy/django
|
refs/heads/master
|
django/contrib/gis/db/backends/postgis/features.py
|
345
|
from django.contrib.gis.db.backends.base.features import BaseSpatialFeatures
from django.db.backends.postgresql.features import \
DatabaseFeatures as Psycopg2DatabaseFeatures
class DatabaseFeatures(BaseSpatialFeatures, Psycopg2DatabaseFeatures):
supports_3d_storage = True
supports_3d_functions = True
supports_left_right_lookups = True
supports_raster = True
|
bhargav2408/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/urllib/request.py
|
45
|
"""An extensible library for opening URLs using a variety of protocols
The simplest way to use this module is to call the urlopen function,
which accepts a string containing a URL or a Request object (described
below). It opens the URL and returns the results as file-like
object; the returned object has some extra methods described below.
The OpenerDirector manages a collection of Handler objects that do
all the actual work. Each Handler implements a particular protocol or
option. The OpenerDirector is a composite object that invokes the
Handlers needed to open the requested URL. For example, the
HTTPHandler performs HTTP GET and POST requests and deals with
non-error returns. The HTTPRedirectHandler automatically deals with
HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
deals with digest authentication.
urlopen(url, data=None) -- Basic usage is the same as original
urllib. pass the url and optionally data to post to an HTTP URL, and
get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of
IOError); for HTTP errors, raises an HTTPError, which can also be
treated as a valid response.
build_opener -- Function that creates a new OpenerDirector instance.
Will install the default handlers. Accepts one or more Handlers as
arguments, either instances or Handler classes that it will
instantiate. If one of the argument is a subclass of the default
handler, the argument will be installed instead of the default.
install_opener -- Installs a new opener as the default opener.
objects of interest:
OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages
the Handler classes, while dealing with requests and responses.
Request -- An object that encapsulates the state of a request. The
state can be as simple as the URL. It can also include extra HTTP
headers, e.g. a User-Agent.
BaseHandler --
internals:
BaseHandler and parent
_call_chain conventions
Example usage:
import urllib.request
# set up authentication info
authinfo = urllib.request.HTTPBasicAuthHandler()
authinfo.add_password(realm='PDQ Application',
uri='https://mahler:8092/site-updates.py',
user='klem',
passwd='geheim$parole')
proxy_support = urllib.request.ProxyHandler({"http" : "http://ahad-haam:3128"})
# build a new opener that adds authentication and caching FTP handlers
opener = urllib.request.build_opener(proxy_support, authinfo,
urllib.request.CacheFTPHandler)
# install it
urllib.request.install_opener(opener)
f = urllib.request.urlopen('http://www.python.org/')
"""
# XXX issues:
# If an authentication error handler that tries to perform
# authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
# ftp errors aren't handled cleanly
# check digest against correct (i.e. non-apache) implementation
# Possible extensions:
# complex proxies XXX not sure what exactly was meant by this
# abstract factory for opener
import base64
import bisect
import email
import hashlib
import http.client
import io
import os
import posixpath
import random
import re
import socket
import sys
import time
import collections
from urllib.error import URLError, HTTPError, ContentTooShortError
from urllib.parse import (
urlparse, urlsplit, urljoin, unwrap, quote, unquote,
splittype, splithost, splitport, splituser, splitpasswd,
splitattr, splitquery, splitvalue, splittag, to_bytes, urlunparse)
from urllib.response import addinfourl, addclosehook
# check for SSL
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
# used in User-Agent header sent
__version__ = sys.version[:3]
_opener = None
def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
*, cafile=None, capath=None):
global _opener
if cafile or capath:
if not _have_ssl:
raise ValueError('SSL support not available')
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if cafile or capath:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile, capath)
check_hostname = True
else:
check_hostname = False
https_handler = HTTPSHandler(context=context, check_hostname=check_hostname)
opener = build_opener(https_handler)
elif _opener is None:
_opener = opener = build_opener()
else:
opener = _opener
return opener.open(url, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
# TODO(jhylton): Make this work with the same global opener.
_urlopener = None
def urlretrieve(url, filename=None, reporthook=None, data=None):
global _urlopener
if not _urlopener:
_urlopener = FancyURLopener()
return _urlopener.retrieve(url, filename, reporthook, data)
def urlcleanup():
if _urlopener:
_urlopener.cleanup()
global _opener
if _opener:
_opener = None
# copied from cookielib.py
_cut_port_re = re.compile(r":\d+$", re.ASCII)
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.full_url
host = urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = _cut_port_re.sub("", host, 1)
return host.lower()
class Request:
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
self.full_url = unwrap(url)
self.full_url, self.fragment = splittag(self.full_url)
self.data = data
self.headers = {}
self._tunnel_host = None
for key, value in headers.items():
self.add_header(key, value)
self.unredirected_hdrs = {}
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
self.unverifiable = unverifiable
self._parse()
def _parse(self):
self.type, rest = splittype(self.full_url)
if self.type is None:
raise ValueError("unknown url type: %s" % self.full_url)
self.host, self.selector = splithost(rest)
if self.host:
self.host = unquote(self.host)
def get_method(self):
if self.data is not None:
return "POST"
else:
return "GET"
# Begin deprecated methods
def add_data(self, data):
self.data = data
def has_data(self):
return self.data is not None
def get_data(self):
return self.data
def get_full_url(self):
if self.fragment:
return '%s#%s' % (self.full_url, self.fragment)
else:
return self.full_url
def get_type(self):
return self.type
def get_host(self):
return self.host
def get_selector(self):
return self.selector
def is_unverifiable(self):
return self.unverifiable
def get_origin_req_host(self):
return self.origin_req_host
# End deprecated methods
def set_proxy(self, host, type):
if self.type == 'https' and not self._tunnel_host:
self._tunnel_host = self.host
else:
self.type= type
self.selector = self.full_url
self.host = host
def has_proxy(self):
return self.selector == self.full_url
def add_header(self, key, val):
# useful for something like authentication
self.headers[key.capitalize()] = val
def add_unredirected_header(self, key, val):
# will not be added to a redirected request
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return list(hdrs.items())
class OpenerDirector:
def __init__(self):
client_version = "Python-urllib/%s" % __version__
self.addheaders = [('User-agent', client_version)]
# self.handlers is retained only for backward compatibility
self.handlers = []
# manage the individual handlers
self.handle_open = {}
self.handle_error = {}
self.process_response = {}
self.process_request = {}
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
i = meth.find("_")
protocol = meth[:i]
condition = meth[i+1:]
if condition.startswith("error"):
j = condition.find("_") + i + 1
kind = meth[j+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = self.handle_error.get(protocol, {})
self.handle_error[protocol] = lookup
elif condition == "open":
kind = protocol
lookup = self.handle_open
elif condition == "response":
kind = protocol
lookup = self.process_response
elif condition == "request":
kind = protocol
lookup = self.process_request
else:
continue
handlers = lookup.setdefault(kind, [])
if handlers:
bisect.insort(handlers, handler)
else:
handlers.append(handler)
added = True
if added:
bisect.insort(self.handlers, handler)
handler.add_parent(self)
def close(self):
# Only exists for backwards compatibility.
pass
def _call_chain(self, chain, kind, meth_name, *args):
# Handlers raise an exception if no one else should try to handle
# the request, or return None if they can't but another handler
# could. Otherwise, they return the response.
handlers = chain.get(kind, ())
for handler in handlers:
func = getattr(handler, meth_name)
result = func(*args)
if result is not None:
return result
def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
# accept a URL or a Request object
if isinstance(fullurl, str):
req = Request(fullurl, data)
else:
req = fullurl
if data is not None:
req.data = data
req.timeout = timeout
protocol = req.type
# pre-process request
meth_name = protocol+"_request"
for processor in self.process_request.get(protocol, []):
meth = getattr(processor, meth_name)
req = meth(req)
response = self._open(req, data)
# post-process response
meth_name = protocol+"_response"
for processor in self.process_response.get(protocol, []):
meth = getattr(processor, meth_name)
response = meth(req, response)
return response
def _open(self, req, data=None):
result = self._call_chain(self.handle_open, 'default',
'default_open', req)
if result:
return result
protocol = req.type
result = self._call_chain(self.handle_open, protocol, protocol +
'_open', req)
if result:
return result
return self._call_chain(self.handle_open, 'unknown',
'unknown_open', req)
def error(self, proto, *args):
if proto in ('http', 'https'):
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = self._call_chain(*args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return self._call_chain(*args)
# XXX probably also want an abstract factory that knows when it makes
# sense to skip a superclass in favor of a subclass and when it might
# make sense to include both
def build_opener(*handlers):
"""Create an opener object from a list of handlers.
The opener will use several default handlers, including support
for HTTP, FTP and when applicable HTTPS.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
def isclass(obj):
return isinstance(obj, type) or hasattr(obj, "__bases__")
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler, HTTPErrorProcessor]
if hasattr(http.client, "HTTPSConnection"):
default_classes.append(HTTPSHandler)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
class BaseHandler:
handler_order = 500
def add_parent(self, parent):
self.parent = parent
def close(self):
# Only exists for backwards compatibility
pass
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# Try to preserve the old behavior of having custom classes
# inserted after default ones (works only for custom user
# classes which are not aware of handler_order).
return True
return self.handler_order < other.handler_order
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses."""
handler_order = 1000 # after all other processing
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if not (200 <= code < 300):
response = self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
raise HTTPError(req.full_url, code, msg, hdrs, fp)
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (not (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST")):
raise HTTPError(req.full_url, code, msg, headers, fp)
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib.request, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
CONTENT_HEADERS = ("content-length", "content-type")
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in CONTENT_HEADERS)
return Request(newurl,
headers=newheaders,
origin_req_host=req.origin_req_host,
unverifiable=True)
# Implementation note: To avoid the server sending us into an
# infinite loop, the request object needs to track what URLs we
# have already seen. Do this by adding a handler-specific
# attribute to the Request object.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if "location" in headers:
newurl = headers["location"]
elif "uri" in headers:
newurl = headers["uri"]
else:
return
# fix a possible malformed URL
urlparts = urlparse(newurl)
# For security reasons we don't allow redirection to anything other
# than http, https or ftp.
if not urlparts.scheme in ('http', 'https', 'ftp'):
raise HTTPError(newurl, code,
msg +
" - Redirection to url '%s' is not allowed" %
newurl,
headers, fp)
if not urlparts.path:
urlparts = list(urlparts)
urlparts[2] = "/"
newurl = urlunparse(urlparts)
newurl = urljoin(req.full_url, newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(req, fp, code, msg, headers, newurl)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.full_url, code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new, timeout=req.timeout)
http_error_301 = http_error_303 = http_error_307 = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
def _parse_proxy(proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
class ProxyHandler(BaseHandler):
# Proxies must be in front
handler_order = 100
def __init__(self, proxies=None):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'keys'), "proxies must be a mapping"
self.proxies = proxies
for type, url in proxies.items():
setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type))
def proxy_open(self, req, proxy, type):
orig_type = req.type
proxy_type, user, password, hostport = _parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if req.host and proxy_bypass(req.host):
return None
if user and password:
user_pass = '%s:%s' % (unquote(user),
unquote(password))
creds = base64.b64encode(user_pass.encode()).decode("ascii")
req.add_header('Proxy-authorization', 'Basic ' + creds)
hostport = unquote(hostport)
req.set_proxy(hostport, proxy_type)
if orig_type == proxy_type or orig_type == 'https':
# let other handlers take care of it
return None
else:
# need to start over, because the other handlers don't
# grok the proxy's URL type
# e.g. if we have a constructor arg proxies like so:
# {'http': 'ftp://proxy.example.com'}, we may end up turning
# a request for http://acme.example.com/a into one for
# ftp://proxy.example.com/a
return self.parent.open(req, timeout=req.timeout)
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if isinstance(uri, str):
uri = [uri]
if not realm in self.passwd:
self.passwd[realm] = {}
for default_port in True, False:
reduced_uri = tuple(
[self.reduce_uri(u, default_port) for u in uri])
self.passwd[realm][reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
domains = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uris, authinfo in domains.items():
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return None, None
def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
class AbstractBasicAuthHandler:
# XXX this allows for multiple auth-schemes, but will stupidly pick
# the last one with a realm specified.
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
'realm=(["\'])(.*?)\\2', re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,
# end of section 2, and section 1.2 immediately after "credentials"
# production).
def __init__(self, password_mgr=None):
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
self.retried = 0
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, authreq, host, req, headers):
# host may be an authority (without userinfo) or a URL with an
# authority
# XXX could be multiple headers
authreq = headers.get(authreq, None)
if self.retried > 5:
# retry sending the username:password 5 times before failing.
raise HTTPError(req.get_full_url(), 401, "basic auth failed",
headers, None)
else:
self.retried += 1
if authreq:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, quote, realm = mo.groups()
if scheme.lower() == 'basic':
response = self.retry_http_basic_auth(host, req, realm)
if response and response.code != 401:
self.retried = 0
return response
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = "%s:%s" % (user, pw)
auth = "Basic " + base64.b64encode(raw.encode()).decode("ascii")
if req.headers.get(self.auth_header, None) == auth:
return None
req.add_unredirected_header(self.auth_header, auth)
return self.parent.open(req, timeout=req.timeout)
else:
return None
class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Authorization'
def http_error_401(self, req, fp, code, msg, headers):
url = req.full_url
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
self.reset_retry_count()
return response
class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Proxy-authorization'
def http_error_407(self, req, fp, code, msg, headers):
# http_error_auth_reqed requires that there is no userinfo component in
# authority. Assume there isn't one, since urllib.request does not (and
# should not, RFC 3986 s. 3.2.1) support requests for URLs containing
# userinfo.
authority = req.host
response = self.http_error_auth_reqed('proxy-authenticate',
authority, req, headers)
self.reset_retry_count()
return response
def randombytes(n):
"""Return n random bytes."""
return os.urandom(n)
class AbstractDigestAuthHandler:
# Digest authentication is specified in RFC 2617.
# XXX The client does not inspect the Authentication-Info header
# in a successful response.
# XXX It should be possible to test this implementation against
# a mock server that just generates a static set of challenges.
# XXX qop="auth-int" supports is shaky
def __init__(self, passwd=None):
if passwd is None:
passwd = HTTPPasswordMgr()
self.passwd = passwd
self.add_password = self.passwd.add_password
self.retried = 0
self.nonce_count = 0
self.last_nonce = None
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, auth_header, host, req, headers):
authreq = headers.get(auth_header, None)
if self.retried > 5:
# Don't fail endlessly - if we failed once, we'll probably
# fail a second time. Hm. Unless the Password Manager is
# prompting for the information. Crap. This isn't great
# but it's better than the current 'repeat until recursion
# depth exceeded' approach <wink>
raise HTTPError(req.full_url, 401, "digest auth failed",
headers, None)
else:
self.retried += 1
if authreq:
scheme = authreq.split()[0]
if scheme.lower() == 'digest':
return self.retry_http_digest_auth(req, authreq)
def retry_http_digest_auth(self, req, auth):
token, challenge = auth.split(' ', 1)
chal = parse_keqv_list(filter(None, parse_http_list(challenge)))
auth = self.get_authorization(req, chal)
if auth:
auth_val = 'Digest %s' % auth
if req.headers.get(self.auth_header, None) == auth_val:
return None
req.add_unredirected_header(self.auth_header, auth_val)
resp = self.parent.open(req, timeout=req.timeout)
return resp
def get_cnonce(self, nonce):
# The cnonce-value is an opaque
# quoted string value provided by the client and used by both client
# and server to avoid chosen plaintext attacks, to provide mutual
# authentication, and to provide some message integrity protection.
# This isn't a fabulous effort, but it's probably Good Enough.
s = "%s:%s:%s:" % (self.nonce_count, nonce, time.ctime())
b = s.encode("ascii") + randombytes(8)
dig = hashlib.sha1(b).hexdigest()
return dig[:16]
def get_authorization(self, req, chal):
try:
realm = chal['realm']
nonce = chal['nonce']
qop = chal.get('qop')
algorithm = chal.get('algorithm', 'MD5')
# mod_digest doesn't send an opaque, even though it isn't
# supposed to be optional
opaque = chal.get('opaque', None)
except KeyError:
return None
H, KD = self.get_algorithm_impls(algorithm)
if H is None:
return None
user, pw = self.passwd.find_user_password(realm, req.full_url)
if user is None:
return None
# XXX not implemented yet
if req.data is not None:
entdig = self.get_entity_digest(req.data, chal)
else:
entdig = None
A1 = "%s:%s:%s" % (user, realm, pw)
A2 = "%s:%s" % (req.get_method(),
# XXX selector: what about proxies and full urls
req.selector)
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
self.last_nonce = nonce
ncvalue = '%08x' % self.nonce_count
cnonce = self.get_cnonce(nonce)
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
else:
# XXX handle auth-int.
raise URLError("qop '%s' is not supported." % qop)
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (user, realm, nonce, req.selector,
respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return base
def get_algorithm_impls(self, algorithm):
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
H = lambda x: hashlib.md5(x.encode("ascii")).hexdigest()
elif algorithm == 'SHA':
H = lambda x: hashlib.sha1(x.encode("ascii")).hexdigest()
# XXX MD5-sess
KD = lambda s, d: H("%s:%s" % (s, d))
return H, KD
def get_entity_digest(self, data, chal):
# XXX not implemented yet
return None
class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
"""An authentication protocol defined by RFC 2069
Digest authentication improves on basic authentication because it
does not transmit passwords in the clear.
"""
auth_header = 'Authorization'
handler_order = 490 # before Basic auth
def http_error_401(self, req, fp, code, msg, headers):
host = urlparse(req.full_url)[1]
retry = self.http_error_auth_reqed('www-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Proxy-Authorization'
handler_order = 490 # before Basic auth
def http_error_407(self, req, fp, code, msg, headers):
host = req.host
retry = self.http_error_auth_reqed('proxy-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.host
if not host:
raise URLError('no host given')
if request.data is not None: # POST
data = request.data
if isinstance(data, str):
raise TypeError("POST data should be bytes"
" or an iterable of bytes. It cannot be str.")
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
if not request.has_header('Content-length'):
try:
mv = memoryview(data)
except TypeError:
if isinstance(data, collections.Iterable):
raise ValueError("Content-Length should be specified "
"for iterable data of type %r %r" % (type(data),
data))
else:
request.add_unredirected_header(
'Content-length', '%d' % (len(mv) * mv.itemsize))
sel_host = host
if request.has_proxy():
scheme, sel = splittype(request.selector)
sel_host, sel_path = splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req, **http_conn_args):
"""Return an HTTPResponse object for the request, using http_class.
http_class must implement the HTTPConnection API from http.client.
"""
host = req.host
if not host:
raise URLError('no host given')
# will parse host:port
h = http_class(host, timeout=req.timeout, **http_conn_args)
headers = dict(req.unredirected_hdrs)
headers.update(dict((k, v) for k, v in req.headers.items()
if k not in headers))
# TODO(jhylton): Should this be redesigned to handle
# persistent connections?
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict((name.title(), val) for name, val in headers.items())
if req._tunnel_host:
tunnel_headers = {}
proxy_auth_hdr = "Proxy-Authorization"
if proxy_auth_hdr in headers:
tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
# Proxy-Authorization should not be sent to origin
# server.
del headers[proxy_auth_hdr]
h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
try:
h.request(req.get_method(), req.selector, req.data, headers)
except socket.error as err: # timeout error
h.close()
raise URLError(err)
else:
r = h.getresponse()
r.url = req.get_full_url()
# This line replaces the .msg attribute of the HTTPResponse
# with .headers, because urllib clients expect the response to
# have the reason in .msg. It would be good to mark this
# attribute is deprecated and get then to use info() or
# .headers.
r.msg = r.reason
return r
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(http.client.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(http.client, 'HTTPSConnection'):
import ssl
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, debuglevel=0, context=None, check_hostname=None):
AbstractHTTPHandler.__init__(self, debuglevel)
self._context = context
self._check_hostname = check_hostname
def https_open(self, req):
return self.do_open(http.client.HTTPSConnection, req,
context=self._context, check_hostname=self._check_hostname)
https_request = AbstractHTTPHandler.do_request_
class HTTPCookieProcessor(BaseHandler):
def __init__(self, cookiejar=None):
import http.cookiejar
if cookiejar is None:
cookiejar = http.cookiejar.CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
class UnknownHandler(BaseHandler):
def unknown_open(self, req):
type = req.type
raise URLError('unknown url type: %s' % type)
def parse_keqv_list(l):
"""Parse list of key=value strings where keys are not duplicated."""
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed
def parse_http_list(s):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Neither commas nor quotes count if they are escaped.
Only double-quotes count, not single-quotes.
"""
res = []
part = ''
escape = quote = False
for cur in s:
if escape:
part += cur
escape = False
continue
if quote:
if cur == '\\':
escape = True
continue
elif cur == '"':
quote = False
part += cur
continue
if cur == ',':
res.append(part)
part = ''
continue
if cur == '"':
quote = True
part += cur
# append last part
if part:
res.append(part)
return [part.strip() for part in res]
class FileHandler(BaseHandler):
# Use local file or FTP depending on form of URL
def file_open(self, req):
url = req.selector
if url[:2] == '//' and url[2:3] != '/' and (req.host and
req.host != 'localhost'):
if not req.host is self.get_names():
raise URLError("file:// scheme is supported only on localhost")
else:
return self.open_local_file(req)
# names for the localhost
names = None
def get_names(self):
if FileHandler.names is None:
try:
FileHandler.names = tuple(
socket.gethostbyname_ex('localhost')[2] +
socket.gethostbyname_ex(socket.gethostname())[2])
except socket.gaierror:
FileHandler.names = (socket.gethostbyname('localhost'),)
return FileHandler.names
# not entirely sure what the rules are here
def open_local_file(self, req):
import email.utils
import mimetypes
host = req.host
filename = req.selector
localfile = url2pathname(filename)
try:
stats = os.stat(localfile)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(filename)[0]
headers = email.message_from_string(
'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified))
if host:
host, port = splitport(host)
if not host or \
(not port and _safe_gethostbyname(host) in self.get_names()):
if host:
origurl = 'file://' + host + filename
else:
origurl = 'file://' + filename
return addinfourl(open(localfile, 'rb'), headers, origurl)
except OSError as msg:
# users shouldn't expect OSErrors coming from urlopen()
raise URLError(msg)
raise URLError('file not on local host')
def _safe_gethostbyname(host):
try:
return socket.gethostbyname(host)
except socket.gaierror:
return None
class FTPHandler(BaseHandler):
def ftp_open(self, req):
import ftplib
import mimetypes
host = req.host
if not host:
raise URLError('ftp error: no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
try:
host = socket.gethostbyname(host)
except socket.error as msg:
raise URLError(msg)
path, attrs = splitattr(req.selector)
dirs = path.split('/')
dirs = list(map(unquote, dirs))
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
fp, retrlen = fw.retrfile(file, type)
headers = ""
mtype = mimetypes.guess_type(req.full_url)[0]
if mtype:
headers += "Content-type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-length: %d\n" % retrlen
headers = email.message_from_string(headers)
return addinfourl(fp, headers, req.full_url)
except ftplib.all_errors as msg:
exc = URLError('ftp error: %s' % msg)
raise exc.with_traceback(sys.exc_info()[2])
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
return ftpwrapper(user, passwd, host, port, dirs, timeout,
persistent=False)
class CacheFTPHandler(FTPHandler):
# XXX would be nice to have pluggable cache strategies
# XXX this stuff is definitely not thread safe
def __init__(self):
self.cache = {}
self.timeout = {}
self.soonest = 0
self.delay = 60
self.max_conns = 16
def setTimeout(self, t):
self.delay = t
def setMaxConns(self, m):
self.max_conns = m
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
key = user, host, port, '/'.join(dirs), timeout
if key in self.cache:
self.timeout[key] = time.time() + self.delay
else:
self.cache[key] = ftpwrapper(user, passwd, host, port,
dirs, timeout)
self.timeout[key] = time.time() + self.delay
self.check_cache()
return self.cache[key]
def check_cache(self):
# first check for old ones
t = time.time()
if self.soonest <= t:
for k, v in list(self.timeout.items()):
if v < t:
self.cache[k].close()
del self.cache[k]
del self.timeout[k]
self.soonest = min(list(self.timeout.values()))
# then check the size
if len(self.cache) == self.max_conns:
for k, v in list(self.timeout.items()):
if v == self.soonest:
del self.cache[k]
del self.timeout[k]
break
self.soonest = min(list(self.timeout.values()))
def clear_cache(self):
for conn in self.cache.values():
conn.close()
self.cache.clear()
self.timeout.clear()
# Code move from the old urllib module
MAXFTPCACHE = 10 # Trim the ftp cache beyond this size
# Helper for non-unix systems
if os.name == 'nt':
from nturl2path import url2pathname, pathname2url
else:
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
return unquote(pathname)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
return quote(pathname)
# This really consists of two pieces:
# (1) a class which handles opening of all sorts of URLs
# (plus assorted utilities etc.)
# (2) a set of functions for parsing URLs
# XXX Should these be separated out into different modules?
ftpcache = {}
class URLopener:
"""Class to open URLs.
This is a class rather than just a subroutine because we may need
more than one set of global protocol-specific options.
Note -- this is a base class for those who don't want the
automatic handling of errors type 302 (relocated) and 401
(authorization needed)."""
__tempfiles = None
version = "Python-urllib/%s" % __version__
# Constructor
def __init__(self, proxies=None, **x509):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'keys'), "proxies must be a mapping"
self.proxies = proxies
self.key_file = x509.get('key_file')
self.cert_file = x509.get('cert_file')
self.addheaders = [('User-Agent', self.version)]
self.__tempfiles = []
self.__unlink = os.unlink # See cleanup()
self.tempcache = None
# Undocumented feature: if you assign {} to tempcache,
# it is used to cache files retrieved with
# self.retrieve(). This is not enabled by default
# since it does not work for changing documents (and I
# haven't got the logic to check expiration headers
# yet).
self.ftpcache = ftpcache
# Undocumented feature: you can use a different
# ftp cache by assigning to the .ftpcache member;
# in case you want logically independent URL openers
# XXX This is not threadsafe. Bah.
def __del__(self):
self.close()
def close(self):
self.cleanup()
def cleanup(self):
# This code sometimes runs when the rest of this module
# has already been deleted, so it can't use any globals
# or import anything.
if self.__tempfiles:
for file in self.__tempfiles:
try:
self.__unlink(file)
except OSError:
pass
del self.__tempfiles[:]
if self.tempcache:
self.tempcache.clear()
def addheader(self, *args):
"""Add a header to be used by the HTTP interface only
e.g. u.addheader('Accept', 'sound/basic')"""
self.addheaders.append(args)
# External interface
def open(self, fullurl, data=None):
"""Use URLopener().open(file) instead of open(file, 'r')."""
fullurl = unwrap(to_bytes(fullurl))
fullurl = quote(fullurl, safe="%/:=&?~#+!$,;'@()*[]|")
if self.tempcache and fullurl in self.tempcache:
filename, headers = self.tempcache[fullurl]
fp = open(filename, 'rb')
return addinfourl(fp, headers, fullurl)
urltype, url = splittype(fullurl)
if not urltype:
urltype = 'file'
if urltype in self.proxies:
proxy = self.proxies[urltype]
urltype, proxyhost = splittype(proxy)
host, selector = splithost(proxyhost)
url = (host, fullurl) # Signal special case to open_*()
else:
proxy = None
name = 'open_' + urltype
self.type = urltype
name = name.replace('-', '_')
if not hasattr(self, name):
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)
else:
return self.open_unknown(fullurl, data)
try:
if data is None:
return getattr(self, name)(url)
else:
return getattr(self, name)(url, data)
except socket.error as msg:
raise IOError('socket error', msg).with_traceback(sys.exc_info()[2])
def open_unknown(self, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError('url error', 'unknown url type', type)
def open_unknown_proxy(self, proxy, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError('url error', 'invalid proxy for %s' % type, proxy)
# External interface
def retrieve(self, url, filename=None, reporthook=None, data=None):
"""retrieve(url) returns (filename, headers) for a local object
or (tempfilename, headers) for a remote object."""
url = unwrap(to_bytes(url))
if self.tempcache and url in self.tempcache:
return self.tempcache[url]
type, url1 = splittype(url)
if filename is None and (not type or type == 'file'):
try:
fp = self.open_local_file(url1)
hdrs = fp.info()
fp.close()
return url2pathname(splithost(url1)[1]), hdrs
except IOError as msg:
pass
fp = self.open(url, data)
try:
headers = fp.info()
if filename:
tfp = open(filename, 'wb')
else:
import tempfile
garbage, path = splittype(url)
garbage, path = splithost(path or "")
path, garbage = splitquery(path or "")
path, garbage = splitattr(path or "")
suffix = os.path.splitext(path)[1]
(fd, filename) = tempfile.mkstemp(suffix)
self.__tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
try:
result = filename, headers
if self.tempcache is not None:
self.tempcache[url] = result
bs = 1024*8
size = -1
read = 0
blocknum = 0
if reporthook:
if "content-length" in headers:
size = int(headers["Content-Length"])
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if not block:
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
finally:
tfp.close()
finally:
fp.close()
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError(
"retrieval incomplete: got only %i out of %i bytes"
% (read, size), result)
return result
# Each method named open_<type> knows how to open that type of URL
def _open_generic_http(self, connection_factory, url, data):
"""Make an HTTP connection using connection_class.
This is an internal method that should be called from
open_http() or open_https().
Arguments:
- connection_factory should take a host name and return an
HTTPConnection instance.
- url is the url to retrieval or a host, relative-path pair.
- data is payload for a POST request or None.
"""
user_passwd = None
proxy_passwd= None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
# check whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
# now we proceed with the url we want to obtain
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'http':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
if proxy_bypass(realhost):
host = realhost
#print "proxy via http:", host, selector
if not host: raise IOError('http error', 'no host given')
if proxy_passwd:
import base64
proxy_auth = base64.b64encode(proxy_passwd.encode()).decode('ascii')
else:
proxy_auth = None
if user_passwd:
import base64
auth = base64.b64encode(user_passwd.encode()).decode('ascii')
else:
auth = None
http_conn = connection_factory(host)
headers = {}
if proxy_auth:
headers["Proxy-Authorization"] = "Basic %s" % proxy_auth
if auth:
headers["Authorization"] = "Basic %s" % auth
if realhost:
headers["Host"] = realhost
# Add Connection:close as we don't support persistent connections yet.
# This helps in closing the socket and avoiding ResourceWarning
headers["Connection"] = "close"
for header, value in self.addheaders:
headers[header] = value
if data is not None:
headers["Content-Type"] = "application/x-www-form-urlencoded"
http_conn.request("POST", selector, data, headers)
else:
http_conn.request("GET", selector, headers=headers)
try:
response = http_conn.getresponse()
except http.client.BadStatusLine:
# something went wrong with the HTTP status line
raise URLError("http protocol error: bad status line")
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if 200 <= response.status < 300:
return addinfourl(response, response.msg, "http:" + url,
response.status)
else:
return self.http_error(
url, response.fp,
response.status, response.reason, response.msg, data)
def open_http(self, url, data=None):
"""Use HTTP protocol."""
return self._open_generic_http(http.client.HTTPConnection, url, data)
def http_error(self, url, fp, errcode, errmsg, headers, data=None):
"""Handle http errors.
Derived class can override this, or provide specific handlers
named http_error_DDD where DDD is the 3-digit error code."""
# First check if there's a specific handler for this error
name = 'http_error_%d' % errcode
if hasattr(self, name):
method = getattr(self, name)
if data is None:
result = method(url, fp, errcode, errmsg, headers)
else:
result = method(url, fp, errcode, errmsg, headers, data)
if result: return result
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handler: close the connection and raise IOError."""
void = fp.read()
fp.close()
raise HTTPError(url, errcode, errmsg, headers, None)
if _have_ssl:
def _https_connection(self, host):
return http.client.HTTPSConnection(host,
key_file=self.key_file,
cert_file=self.cert_file)
def open_https(self, url, data=None):
"""Use HTTPS protocol."""
return self._open_generic_http(self._https_connection, url, data)
def open_file(self, url):
"""Use local file or FTP depending on form of URL."""
if not isinstance(url, str):
raise URLError('file error', 'proxy support for file protocol currently not implemented')
if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/':
raise ValueError("file:// scheme is supported only on localhost")
else:
return self.open_local_file(url)
def open_local_file(self, url):
"""Use local file."""
import mimetypes, email.utils
from io import StringIO
host, file = splithost(url)
localname = url2pathname(file)
try:
stats = os.stat(localname)
except OSError as e:
raise URLError(e.errno, e.strerror, e.filename)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(url)[0]
headers = email.message_from_string(
'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified))
if not host:
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
return addinfourl(open(localname, 'rb'), headers, urlfile)
host, port = splitport(host)
if (not port
and socket.gethostbyname(host) in (localhost() + thishost())):
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
return addinfourl(open(localname, 'rb'), headers, urlfile)
raise URLError('local file error', 'not on local host')
def open_ftp(self, url):
"""Use FTP protocol."""
if not isinstance(url, str):
raise URLError('ftp error', 'proxy support for ftp protocol currently not implemented')
import mimetypes
from io import StringIO
host, path = splithost(url)
if not host: raise URLError('ftp error', 'no host given')
host, port = splitport(host)
user, host = splituser(host)
if user: user, passwd = splitpasswd(user)
else: passwd = None
host = unquote(host)
user = unquote(user or '')
passwd = unquote(passwd or '')
host = socket.gethostbyname(host)
if not port:
import ftplib
port = ftplib.FTP_PORT
else:
port = int(port)
path, attrs = splitattr(path)
path = unquote(path)
dirs = path.split('/')
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]: dirs = dirs[1:]
if dirs and not dirs[0]: dirs[0] = '/'
key = user, host, port, '/'.join(dirs)
# XXX thread unsafe!
if len(self.ftpcache) > MAXFTPCACHE:
# Prune the cache, rather arbitrarily
for k in self.ftpcache.keys():
if k != key:
v = self.ftpcache[k]
del self.ftpcache[k]
v.close()
try:
if not key in self.ftpcache:
self.ftpcache[key] = \
ftpwrapper(user, passwd, host, port, dirs)
if not file: type = 'D'
else: type = 'I'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
(fp, retrlen) = self.ftpcache[key].retrfile(file, type)
mtype = mimetypes.guess_type("ftp:" + url)[0]
headers = ""
if mtype:
headers += "Content-Type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-Length: %d\n" % retrlen
headers = email.message_from_string(headers)
return addinfourl(fp, headers, "ftp:" + url)
except ftperrors() as msg:
raise URLError('ftp error', msg).with_traceback(sys.exc_info()[2])
def open_data(self, url, data=None):
"""Use "data" URL."""
if not isinstance(url, str):
raise URLError('data error', 'proxy support for data protocol currently not implemented')
# ignore POSTed data
#
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
try:
[type, data] = url.split(',', 1)
except ValueError:
raise IOError('data error', 'bad data URL')
if not type:
type = 'text/plain;charset=US-ASCII'
semi = type.rfind(';')
if semi >= 0 and '=' not in type[semi:]:
encoding = type[semi+1:]
type = type[:semi]
else:
encoding = ''
msg = []
msg.append('Date: %s'%time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime(time.time())))
msg.append('Content-type: %s' % type)
if encoding == 'base64':
import base64
# XXX is this encoding/decoding ok?
data = base64.decodebytes(data.encode('ascii')).decode('latin1')
else:
data = unquote(data)
msg.append('Content-Length: %d' % len(data))
msg.append('')
msg.append(data)
msg = '\n'.join(msg)
headers = email.message_from_string(msg)
f = io.StringIO(msg)
#f.fileno = None # needed for addinfourl
return addinfourl(f, headers, url)
class FancyURLopener(URLopener):
"""Derived class with handlers for errors we can handle (perhaps)."""
def __init__(self, *args, **kwargs):
URLopener.__init__(self, *args, **kwargs)
self.auth_cache = {}
self.tries = 0
self.maxtries = 10
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handling -- don't raise an exception."""
return addinfourl(fp, headers, "http:" + url, errcode)
def http_error_302(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 302 -- relocated (temporarily)."""
self.tries += 1
if self.maxtries and self.tries >= self.maxtries:
if hasattr(self, "http_error_500"):
meth = self.http_error_500
else:
meth = self.http_error_default
self.tries = 0
return meth(url, fp, 500,
"Internal Server Error: Redirect Recursion", headers)
result = self.redirect_internal(url, fp, errcode, errmsg, headers,
data)
self.tries = 0
return result
def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
if 'location' in headers:
newurl = headers['location']
elif 'uri' in headers:
newurl = headers['uri']
else:
return
void = fp.read()
fp.close()
# In case the server sent a relative URL, join with original:
newurl = urljoin(self.type + ":" + url, newurl)
urlparts = urlparse(newurl)
# For security reasons, we don't allow redirection to anything other
# than http, https and ftp.
# We are using newer HTTPError with older redirect_internal method
# This older method will get deprecated in 3.3
if not urlparts.scheme in ('http', 'https', 'ftp'):
raise HTTPError(newurl, errcode,
errmsg +
" Redirection to url '%s' is not allowed." % newurl,
headers, fp)
return self.open(newurl)
def http_error_301(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 301 -- also relocated (permanently)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_303(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 303 -- also relocated (essentially identical to 302)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_307(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 307 -- relocated, but turn POST into error."""
if data is None:
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
else:
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None,
retry=False):
"""Error 401 -- authentication required.
This function supports Basic authentication only."""
if not 'www-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['www-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
if not retry:
URLopener.http_error_default(self, url, fp, errcode, errmsg,
headers)
name = 'retry_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def http_error_407(self, url, fp, errcode, errmsg, headers, data=None,
retry=False):
"""Error 407 -- proxy authentication required.
This function supports Basic authentication only."""
if not 'proxy-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['proxy-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
if not retry:
URLopener.http_error_default(self, url, fp, errcode, errmsg,
headers)
name = 'retry_proxy_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def retry_proxy_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'http://' + host + selector
proxy = self.proxies['http']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = "%s:%s@%s" % (quote(user, safe=''),
quote(passwd, safe=''), proxyhost)
self.proxies['http'] = 'http://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_proxy_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'https://' + host + selector
proxy = self.proxies['https']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = "%s:%s@%s" % (quote(user, safe=''),
quote(passwd, safe=''), proxyhost)
self.proxies['https'] = 'https://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = "%s:%s@%s" % (quote(user, safe=''),
quote(passwd, safe=''), host)
newurl = 'http://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = "%s:%s@%s" % (quote(user, safe=''),
quote(passwd, safe=''), host)
newurl = 'https://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def get_user_passwd(self, host, realm, clear_cache=0):
key = realm + '@' + host.lower()
if key in self.auth_cache:
if clear_cache:
del self.auth_cache[key]
else:
return self.auth_cache[key]
user, passwd = self.prompt_user_passwd(host, realm)
if user or passwd: self.auth_cache[key] = (user, passwd)
return user, passwd
def prompt_user_passwd(self, host, realm):
"""Override this in a GUI environment!"""
import getpass
try:
user = input("Enter username for %s at %s: " % (realm, host))
passwd = getpass.getpass("Enter password for %s in %s at %s: " %
(user, realm, host))
return user, passwd
except KeyboardInterrupt:
print()
return None, None
# Utility functions
_localhost = None
def localhost():
"""Return the IP address of the magic hostname 'localhost'."""
global _localhost
if _localhost is None:
_localhost = socket.gethostbyname('localhost')
return _localhost
_thishost = None
def thishost():
"""Return the IP addresses of the current host."""
global _thishost
if _thishost is None:
_thishost = tuple(socket.gethostbyname_ex(socket.gethostname()[2]))
return _thishost
_ftperrors = None
def ftperrors():
"""Return the set of errors raised by the FTP class."""
global _ftperrors
if _ftperrors is None:
import ftplib
_ftperrors = ftplib.all_errors
return _ftperrors
_noheaders = None
def noheaders():
"""Return an empty email Message object."""
global _noheaders
if _noheaders is None:
_noheaders = email.message_from_string("")
return _noheaders
# Utility classes
class ftpwrapper:
"""Class used by open_ftp() for cache of open FTP connections."""
def __init__(self, user, passwd, host, port, dirs, timeout=None,
persistent=True):
self.user = user
self.passwd = passwd
self.host = host
self.port = port
self.dirs = dirs
self.timeout = timeout
self.refcount = 0
self.keepalive = persistent
self.init()
def init(self):
import ftplib
self.busy = 0
self.ftp = ftplib.FTP()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
for dir in self.dirs:
self.ftp.cwd(dir)
def retrfile(self, file, type):
import ftplib
self.endtransfer()
if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
else: cmd = 'TYPE ' + type; isdir = 0
try:
self.ftp.voidcmd(cmd)
except ftplib.all_errors:
self.init()
self.ftp.voidcmd(cmd)
conn = None
if file and not isdir:
# Try to retrieve as a file
try:
cmd = 'RETR ' + file
conn, retrlen = self.ftp.ntransfercmd(cmd)
except ftplib.error_perm as reason:
if str(reason)[:3] != '550':
raise URLError('ftp error', reason).with_traceback(
sys.exc_info()[2])
if not conn:
# Set transfer mode to ASCII!
self.ftp.voidcmd('TYPE A')
# Try a directory listing. Verify that directory exists.
if file:
pwd = self.ftp.pwd()
try:
try:
self.ftp.cwd(file)
except ftplib.error_perm as reason:
raise URLError('ftp error', reason) from reason
finally:
self.ftp.cwd(pwd)
cmd = 'LIST ' + file
else:
cmd = 'LIST'
conn, retrlen = self.ftp.ntransfercmd(cmd)
self.busy = 1
ftpobj = addclosehook(conn.makefile('rb'), self.file_close)
self.refcount += 1
conn.close()
# Pass back both a suitably decorated object and a retrieval length
return (ftpobj, retrlen)
def endtransfer(self):
if not self.busy:
return
self.busy = 0
try:
self.ftp.voidresp()
except ftperrors():
pass
def close(self):
self.keepalive = False
if self.refcount <= 0:
self.real_close()
def file_close(self):
self.endtransfer()
self.refcount -= 1
if self.refcount <= 0 and not self.keepalive:
self.real_close()
def real_close(self):
self.endtransfer()
try:
self.ftp.close()
except ftperrors():
pass
# Proxy handling
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
proxies = {}
for name, value in os.environ.items():
name = name.lower()
if value and name[-6:] == '_proxy':
proxies[name[:-6]] = value
return proxies
def proxy_bypass_environment(host):
"""Test if proxies should not be used for a particular host.
Checks the environment for a variable named no_proxy, which should
be a list of DNS suffixes separated by commas, or '*' for all hosts.
"""
no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
# '*' is special case for always bypass
if no_proxy == '*':
return 1
# strip port off host
hostonly, port = splitport(host)
# check if the host ends with any of the DNS suffixes
no_proxy_list = [proxy.strip() for proxy in no_proxy.split(',')]
for name in no_proxy_list:
if name and (hostonly.endswith(name) or host.endswith(name)):
return 1
# otherwise, don't bypass
return 0
# This code tests an OSX specific data structure but is testable on all
# platforms
def _proxy_bypass_macosx_sysconf(host, proxy_settings):
"""
Return True iff this host shouldn't be accessed using a proxy
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
proxy_settings come from _scproxy._get_proxy_settings or get mocked ie:
{ 'exclude_simple': bool,
'exceptions': ['foo.bar', '*.bar.com', '127.0.0.1', '10.1', '10.0/16']
}
"""
import re
import socket
from fnmatch import fnmatch
hostonly, port = splitport(host)
def ip2num(ipAddr):
parts = ipAddr.split('.')
parts = list(map(int, parts))
if len(parts) != 4:
parts = (parts + [0, 0, 0, 0])[:4]
return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3]
# Check for simple host names:
if '.' not in host:
if proxy_settings['exclude_simple']:
return True
hostIP = None
for value in proxy_settings.get('exceptions', ()):
# Items in the list are strings like these: *.local, 169.254/16
if not value: continue
m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value)
if m is not None:
if hostIP is None:
try:
hostIP = socket.gethostbyname(hostonly)
hostIP = ip2num(hostIP)
except socket.error:
continue
base = ip2num(m.group(1))
mask = m.group(2)
if mask is None:
mask = 8 * (m.group(1).count('.') + 1)
else:
mask = int(mask[1:])
mask = 32 - mask
if (hostIP >> mask) == (base >> mask):
return True
elif fnmatch(host, value):
return True
return False
if sys.platform == 'darwin':
from _scproxy import _get_proxy_settings, _get_proxies
def proxy_bypass_macosx_sysconf(host):
proxy_settings = _get_proxy_settings()
return _proxy_bypass_macosx_sysconf(host, proxy_settings)
def getproxies_macosx_sysconf():
"""Return a dictionary of scheme -> proxy server URL mappings.
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
"""
return _get_proxies()
def proxy_bypass(host):
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_macosx_sysconf(host)
def getproxies():
return getproxies_environment() or getproxies_macosx_sysconf()
elif os.name == 'nt':
def getproxies_registry():
"""Return a dictionary of scheme -> proxy server URL mappings.
Win32 uses the registry to store proxies.
"""
proxies = {}
try:
import winreg
except ImportError:
# Std module, so should be around - but you never know!
return proxies
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
if proxyEnable:
# Returned as Unicode but problems if not converted to ASCII
proxyServer = str(winreg.QueryValueEx(internetSettings,
'ProxyServer')[0])
if '=' in proxyServer:
# Per-protocol settings
for p in proxyServer.split(';'):
protocol, address = p.split('=', 1)
# See if address has a type:// prefix
import re
if not re.match('^([^/:]+)://', address):
address = '%s://%s' % (protocol, address)
proxies[protocol] = address
else:
# Use one setting for all protocols
if proxyServer[:5] == 'http:':
proxies['http'] = proxyServer
else:
proxies['http'] = 'http://%s' % proxyServer
proxies['https'] = 'https://%s' % proxyServer
proxies['ftp'] = 'ftp://%s' % proxyServer
internetSettings.Close()
except (WindowsError, ValueError, TypeError):
# Either registry key not found etc, or the value in an
# unexpected format.
# proxies already set up to be empty so nothing to do
pass
return proxies
def getproxies():
"""Return a dictionary of scheme -> proxy server URL mappings.
Returns settings gathered from the environment, if specified,
or the registry.
"""
return getproxies_environment() or getproxies_registry()
def proxy_bypass_registry(host):
try:
import winreg
import re
except ImportError:
# Std modules, so should be around - but you never know!
return 0
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
proxyOverride = str(winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0])
# ^^^^ Returned as Unicode but problems if not converted to ASCII
except WindowsError:
return 0
if not proxyEnable or not proxyOverride:
return 0
# try to make a host list from name and IP address.
rawHost, port = splitport(host)
host = [rawHost]
try:
addr = socket.gethostbyname(rawHost)
if addr != rawHost:
host.append(addr)
except socket.error:
pass
try:
fqdn = socket.getfqdn(rawHost)
if fqdn != rawHost:
host.append(fqdn)
except socket.error:
pass
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in rawHost:
return 1
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
for val in host:
# print "%s <--> %s" %( test, val )
if re.match(test, val, re.I):
return 1
return 0
def proxy_bypass(host):
"""Return a dictionary of scheme -> proxy server URL mappings.
Returns settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
else:
# By default use environment variables
getproxies = getproxies_environment
proxy_bypass = proxy_bypass_environment
|
ngenovictor/django-crispy-forms
|
refs/heads/dev
|
docs/_themes/flask_theme_support.py
|
2228
|
# flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
|
mozilla/mozilla-ignite
|
refs/heads/master
|
vendor-local/lib/python/fudge/__init__.py
|
5
|
"""Fudge is a module for replacing real objects with fakes (mocks, stubs, etc) while testing.
See :ref:`using-fudge` for common scenarios.
"""
__version__ = '1.0.3'
import os
import re
import sys
import thread
import warnings
from fudge.exc import FakeDeclarationError
from fudge.patcher import *
from fudge.util import wraps, fmt_val, fmt_dict_vals
__all__ = ['Fake', 'patch', 'test', 'clear_calls', 'verify',
'clear_expectations']
class Registry(object):
"""An internal, thread-safe registry of expected calls.
You do not need to use this directly, use Fake.expects(...), etc
"""
def __init__(self):
self.expected_calls = {}
self.expected_call_order = {}
self.call_stacks = []
def __contains__(self, obj):
return obj in self.get_expected_calls()
def clear_actual_calls(self):
for exp in self.get_expected_calls():
exp.was_called = False
def clear_all(self):
self.clear_actual_calls()
self.clear_expectations()
def clear_calls(self):
"""Clears out any calls that were made on previously
registered fake objects and resets all call stacks.
You do not need to use this directly. Use fudge.clear_calls()
"""
self.clear_actual_calls()
for stack in self.call_stacks:
stack.reset()
for fake, call_order in self.get_expected_call_order().items():
call_order.reset_calls()
def clear_expectations(self):
c = self.get_expected_calls()
c[:] = []
d = self.get_expected_call_order()
d.clear()
def expect_call(self, expected_call):
c = self.get_expected_calls()
c.append(expected_call)
call_order = self.get_expected_call_order()
if expected_call.fake in call_order:
this_call_order = call_order[expected_call.fake]
this_call_order.add_expected_call(expected_call)
def get_expected_calls(self):
self.expected_calls.setdefault(thread.get_ident(), [])
return self.expected_calls[thread.get_ident()]
def get_expected_call_order(self):
self.expected_call_order.setdefault(thread.get_ident(), {})
return self.expected_call_order[thread.get_ident()]
def remember_expected_call_order(self, expected_call_order):
ordered_fakes = self.get_expected_call_order()
fake = expected_call_order.fake
## does nothing if called twice like:
# Fake().remember_order().remember_order()
ordered_fakes.setdefault(fake, expected_call_order)
def register_call_stack(self, call_stack):
self.call_stacks.append(call_stack)
def verify(self):
"""Ensure all expected calls were called,
raise AssertionError otherwise.
You do not need to use this directly. Use fudge.verify()
"""
try:
for exp in self.get_expected_calls():
exp.assert_called()
exp.assert_times_called()
for fake, call_order in self.get_expected_call_order().items():
call_order.assert_order_met(finalize=True)
finally:
self.clear_calls()
registry = Registry()
def clear_calls():
"""Begin a new set of calls on fake objects.
Specifically, clear out any calls that
were made on previously registered fake
objects and reset all call stacks.
You should call this any time you begin
making calls on fake objects.
This is also available in :func:`fudge.patch`, :func:`fudge.test` and :func:`fudge.with_fakes`
"""
registry.clear_calls()
def verify():
"""Verify that all methods have been called as expected.
Specifically, analyze all registered fake
objects and raise an AssertionError if an
expected call was never made to one or more
objects.
This is also available in :func:`fudge.patch`, :func:`fudge.test` and :func:`fudge.with_fakes`
"""
registry.verify()
## Deprecated:
def start():
"""Start testing with fake objects.
Deprecated. Use :func:`fudge.clear_calls` instead.
"""
warnings.warn(
"fudge.start() has been deprecated. Use fudge.clear_calls() instead",
DeprecationWarning, 3)
clear_calls()
def stop():
"""Stop testing with fake objects.
Deprecated. Use :func:`fudge.verify` instead.
"""
warnings.warn(
"fudge.stop() has been deprecated. Use fudge.verify() instead",
DeprecationWarning, 3)
verify()
##
def clear_expectations():
registry.clear_expectations()
def with_fakes(method):
"""Decorator that calls :func:`fudge.clear_calls` before method() and :func:`fudge.verify` afterwards.
"""
@wraps(method)
def apply_clear_and_verify(*args, **kw):
clear_calls()
method(*args, **kw)
verify() # if no exceptions
return apply_clear_and_verify
def test(method):
"""Decorator for a test that uses fakes directly (not patched).
Most of the time you probably want to use :func:`fudge.patch` instead.
.. doctest::
:hide:
>>> import fudge
.. doctest::
>>> @fudge.test
... def test():
... db = fudge.Fake('db').expects('connect')
... # do stuff...
...
>>> test()
Traceback (most recent call last):
...
AssertionError: fake:db.connect() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
@wraps(method)
def clear_and_verify(*args, **kw):
clear_expectations()
clear_calls()
try:
v = method(*args, **kw)
verify() # if no exceptions
finally:
clear_expectations()
return v
return clear_and_verify
test.__test__ = False # Nose: do not collect
class Call(object):
"""A call that can be made on a Fake object.
You do not need to use this directly, use Fake.provides(...), etc
index=None
When numerical, this indicates the position of the call
(as in, a CallStack)
callable=False
Means this object acts like a function, not a method of an
object.
call_order=ExpectedCallOrder()
A call order to append each call to. Default is None
"""
def __init__(self, fake, call_name=None, index=None,
callable=False, call_order=None):
self.fake = fake
self.call_name = call_name
self.call_replacement = None
self.expected_arg_count = None
self.expected_kwarg_count = None
self.expected_args = None
self.expected_kwargs = None
self.expected_matching_args = None
self.expected_matching_kwargs = None
self.index = index
self.exception_to_raise = None
self.return_val = None
self.was_called = False
self.expected_times_called = None
self.actual_times_called = 0
self.callable = callable
self.call_order = call_order
def __call__(self, *args, **kwargs):
self.was_called = True
self.actual_times_called += 1
if self.call_order:
self.call_order.add_actual_call(self)
self.call_order.assert_order_met(finalize=False)
# make sure call count doesn't go over :
if self.expected_times_called is not None and \
self.actual_times_called > self.expected_times_called:
raise AssertionError(
'%s was called %s time(s). Expected %s.' % (
self, self.actual_times_called,
self.expected_times_called))
return_val = None
replacement_return = None
if self.call_replacement:
replacement_return = self.call_replacement(*args, **kwargs)
if self.return_val is not None:
# this wins:
return_value = self.return_val
else:
# but it is intuitive to otherwise
# return the replacement's return:
return_value = replacement_return
# determine whether we should inspect arguments or not:
with_args = (self.expected_args or self.expected_kwargs)
if with_args:
# check keyword args first because of python arg coercion...
if self.expected_kwargs is None:
self.expected_kwargs = {} # empty **kw
if self.expected_kwargs != kwargs:
raise AssertionError(
"%s was called unexpectedly with args %s" % (
self,
self._repr_call(args, kwargs,
shorten_long_vals=False)))
if self.expected_args is None:
self.expected_args = tuple([]) # empty *args
if self.expected_args != args:
raise AssertionError(
"%s was called unexpectedly with args %s" % (
self,
self._repr_call(args, kwargs,
shorten_long_vals=False)))
# now check for matching keyword args.
# i.e. keyword args that are only checked if the call provided them
if self.expected_matching_kwargs:
for expected_arg, expected_value in \
self.expected_matching_kwargs.items():
if expected_arg in kwargs:
if expected_value != kwargs[expected_arg]:
raise AssertionError(
"%s was called unexpectedly with args %s" % (
self,
self._repr_call(args,
{expected_arg: kwargs[expected_arg]},
shorten_long_vals=False))
)
# now check for matching args.
# i.e. args that are only checked if the call provided them
if self.expected_matching_args:
if self.expected_matching_args != args:
raise AssertionError(
"%s was called unexpectedly with args %s" % (
self,
self._repr_call(args, kwargs,
shorten_long_vals=False)))
# determine whether we should inspect argument counts or not:
with_arg_counts = (self.expected_arg_count is not None or
self.expected_kwarg_count is not None)
if with_arg_counts:
if self.expected_arg_count is None:
self.expected_arg_count = 0
if len(args) != self.expected_arg_count:
raise AssertionError(
"%s was called with %s arg(s) but expected %s" % (
self, len(args), self.expected_arg_count))
if self.expected_kwarg_count is None:
self.expected_kwarg_count = 0
if len(kwargs.keys()) != self.expected_kwarg_count:
raise AssertionError(
"%s was called with %s keyword arg(s) but expected %s" % (
self, len(kwargs.keys()), self.expected_kwarg_count))
if self.exception_to_raise is not None:
raise self.exception_to_raise
return return_value
## hmmm, arg diffing (for Call().__call__()) needs more thought
# def _arg_diff(self, actual_args, expected_args):
# """return differnce between keywords"""
# if len(actual_args) > len(expected_args):
# pass
#
#
# def _keyword_diff(self, actual_kwargs, expected_kwargs):
# """returns difference between keywords.
# """
# expected_keys = set(expected_kwargs.keys())
# if (len(expected_keys)<=1 and len(actual_kwargs.keys())<=1):
# # no need for detailed messages
# if actual_kwargs == expected_kwargs:
# return (True, "")
# else:
# return (False, "")
#
# for k,v in actual_kwargs.items():
# if k not in expected_keys:
# return (False, "keyword %r was not expected" % k)
# if v != expected_kwargs[k]:
# return (False, "%s=%r != %s=%r" % (k, v, k, expected_kwargs[k]))
# expected_keys.remove(k)
#
# exp_key_len = len(expected_keys)
# if exp_key_len:
# these = exp_key_len==1 and "this keyword" or "these keywords"
# return (False, "%s never showed up: %r" % (these, tuple(expected_keys)))
#
# return (True, "")
def _repr_call(self, expected_args, expected_kwargs, shorten_long_vals=True):
args = []
if expected_args:
args.extend([fmt_val(a, shorten=shorten_long_vals) for a in expected_args])
if expected_kwargs:
args.extend(fmt_dict_vals(expected_kwargs, shorten=shorten_long_vals))
if args:
call = "(%s)" % ", ".join(args)
else:
call = "()"
return call
def __repr__(self):
cls_name = repr(self.fake)
if self.call_name and not self.callable:
call = "%s.%s" % (cls_name, self.call_name)
else:
call = "%s" % cls_name
call = "%s%s" % (call, self._repr_call(self.expected_args, self.expected_kwargs))
if self.index is not None:
call = "%s[%s]" % (call, self.index)
return call
def get_call_object(self):
"""return self.
this exists for compatibility with :class:`CallStack`
"""
return self
def assert_times_called(self):
if self.expected_times_called is not None and \
self.actual_times_called != self.expected_times_called:
raise AssertionError(
'%s was called %s time(s). Expected %s.' % (
self, self.actual_times_called, self.expected_times_called))
class ExpectedCall(Call):
"""An expectation that a call will be made on a Fake object.
You do not need to use this directly, use Fake.expects(...), etc
"""
def __init__(self, *args, **kw):
super(ExpectedCall, self).__init__(*args, **kw)
registry.expect_call(self)
def assert_called(self):
if not self.was_called:
raise AssertionError("%s was not called" % (self))
class ExpectedCallOrder(object):
"""An expectation that calls should be called in a specific order."""
def __init__(self, fake):
self.fake = fake
self._call_order = []
self._actual_calls = []
def __repr__(self):
return "%r(%r)" % (self.fake, self._call_order)
__str__ = __repr__
def _repr_call_list(self, call_list):
if not len(call_list):
return "no calls"
else:
stack = ["#%s %r" % (i+1,c) for i,c in enumerate(call_list)]
stack.append("end")
return ", ".join(stack)
def add_expected_call(self, call):
self._call_order.append(call)
def add_actual_call(self, call):
self._actual_calls.append(call)
def assert_order_met(self, finalize=False):
"""assert that calls have been made in the right order."""
error = None
actual_call_len = len(self._actual_calls)
expected_call_len = len(self._call_order)
if actual_call_len == 0:
error = "Not enough calls were made"
else:
for i,call in enumerate(self._call_order):
if actual_call_len < i+1:
if not finalize:
# we're not done asserting calls so
# forget about not having enough calls
continue
calls_made = len(self._actual_calls)
if calls_made == 1:
error = "Only 1 call was made"
else:
error = "Only %s calls were made" % calls_made
break
ac_call = self._actual_calls[i]
if ac_call is not call:
error = "Call #%s was %r" % (i+1, ac_call)
break
if not error:
if actual_call_len > expected_call_len:
# only show the first extra call since this
# will be triggered before all calls are finished:
error = "#%s %s was unexpected" % (
expected_call_len+1,
self._actual_calls[expected_call_len]
)
if error:
msg = "%s; Expected: %s" % (
error, self._repr_call_list(self._call_order))
raise AssertionError(msg)
def reset_calls(self):
self._actual_calls[:] = []
class CallStack(object):
"""A stack of :class:`Call` objects
Calling this object behaves just like Call except
the Call instance you operate on gets changed each time __call__() is made
expected=False
When True, this indicates that the call stack was derived
from an expected call. This is used by Fake to register
each call on the stack.
call_name
Name of the call
"""
def __init__(self, fake, initial_calls=None, expected=False, call_name=None):
self.fake = fake
self._pointer = 0 # position of next call to be made (can be reset)
self._calls = []
if initial_calls is not None:
for c in initial_calls:
self.add_call(c)
self.expected = expected
self.call_name = call_name
registry.register_call_stack(self)
def __iter__(self):
for c in self._calls:
yield c
def __repr__(self):
return "<%s for %r>" % (self.__class__.__name__, self._calls)
__str__ = __repr__
def add_call(self, call):
self._calls.append(call)
call.index = len(self._calls)-1
def get_call_object(self):
"""returns the last *added* call object.
this is so Fake knows which one to alter
"""
return self._calls[len(self._calls)-1]
def reset(self):
self._pointer = 0
def __call__(self, *args, **kw):
try:
current_call = self._calls[self._pointer]
except IndexError:
raise AssertionError(
"This attribute of %s can only be called %s time(s). "
"Call reset() if necessary or fudge.clear_calls()." % (
self.fake, len(self._calls)))
self._pointer += 1
return current_call(*args, **kw)
class Fake(object):
"""A fake object that replaces a real one while testing.
Most calls with a few exceptions return ``self`` so that you can chain
them together to create readable code.
Instance methods will raise either AssertionError or :class:`fudge.FakeDeclarationError`
Keyword arguments:
**name=None**
Name of the class, module, or function you mean to replace. If not
specified, Fake() will try to guess the name by inspecting the calling
frame (if possible).
**allows_any_call=False**
This is **deprecated**. Use :meth:`Fake:is_a_stub()` instead.
**callable=False**
This is **deprecated**. Use :meth:`Fake.is_callable` instead.
**expect_call=True**
This is **deprecated**. Use :meth:`Fake.expects_call` instead.
"""
def __init__(self, name=None, allows_any_call=False,
callable=False, expect_call=False):
self._attributes = {}
self._declared_calls = {}
self._name = (name or self._guess_name())
self._last_declared_call_name = None
self._is_a_stub = False
if allows_any_call:
warnings.warn('Fake(allows_any_call=True) is deprecated;'
' use Fake.is_a_stub()')
self.is_a_stub()
self._call_stack = None
if expect_call:
self.expects_call()
elif callable or allows_any_call:
self.is_callable()
else:
self._callable = None
self._expected_call_order = None
def __getattribute__(self, name):
"""Favors stubbed out attributes, falls back to real attributes
"""
# this getter circumvents infinite loops:
def g(n):
return object.__getattribute__(self, n)
if name in g('_declared_calls'):
# if it's a call that has been declared
# as that of the real object then hand it over:
return g('_declared_calls')[name]
elif name in g('_attributes'):
# return attribute declared on real object
return g('_attributes')[name]
else:
# otherwise, first check if it's a call
# of Fake itself (i.e. returns(), with_args(), etc)
try:
self_call = g(name)
except AttributeError:
pass
else:
return self_call
if g('_is_a_stub'):
# Lazily create a attribute (which might later get called):
stub = Fake(name=self._endpoint_name(name)).is_a_stub()
self.has_attr(**{name: stub})
return getattr(self, name)
raise AttributeError(
"%s object does not allow call or attribute '%s' "
"(maybe you want %s.is_a_stub() ?)" % (
self, name, self.__class__.__name__))
def __call__(self, *args, **kwargs):
if '__init__' in self._declared_calls:
# special case, simulation of __init__():
call = self._declared_calls['__init__']
result = call(*args, **kwargs)
if result is None:
# assume more calls were expected / provided by the same fake
return self
else:
# a new custom object has been declared
return result
elif self._callable:
return self._callable(*args, **kwargs)
elif self._is_a_stub:
self.is_callable().returns_fake().is_a_stub()
return self.__call__(*args, **kwargs)
else:
raise RuntimeError(
"%s object cannot be called (maybe you want "
"%s.is_callable() ?)" % (self, self.__class__.__name__))
def __setattr__(self, name, val):
if hasattr(self, '_attributes') and name in self._attributes:
self._attributes[name] = val
else:
object.__setattr__(self, name, val)
def __repr__(self):
return "fake:%s" % (self._name or "unnamed")
def _declare_call(self, call_name, call):
self._declared_calls[call_name] = call
_assignment = re.compile(r"\s*(?P<name>[a-zA-Z0-9_]+)\s*=\s*(fudge\.)?Fake\(.*")
def _guess_asn_from_file(self, frame):
if frame.f_code.co_filename:
if os.path.exists(frame.f_code.co_filename):
cofile = open(frame.f_code.co_filename,'r')
try:
for ln, line in enumerate(cofile):
# I'm not sure why -1 is needed
if ln==frame.f_lineno-1:
possible_asn = line
m = self._assignment.match(possible_asn)
if m:
return m.group('name')
finally:
cofile.close()
def _guess_name(self):
if not hasattr(sys, '_getframe'):
# Stackless?
return None
if sys.platform.startswith('java'):
# frame objects are completely different,
# not worth the hassle.
return None
# get frame where class was instantiated,
# my_obj = Fake()
# ^
# we want to set self._name = 'my_obj'
frame = sys._getframe(2)
if len(frame.f_code.co_varnames):
# at the top-most frame:
co_names = frame.f_code.co_varnames
else:
# any other frame:
co_names = frame.f_code.co_names
# find names that are not locals.
# this probably indicates my_obj = ...
candidates = [n for n in co_names if n not in frame.f_locals]
if len(candidates)==0:
# the value was possibly queued for deref
# foo = 44
# foo = Fake()
return self._guess_asn_from_file(frame)
elif len(candidates)==1:
return candidates[0]
else:
# we are possibly halfway through a module
# where not all names have been compiled
return self._guess_asn_from_file(frame)
def _get_current_call(self):
if not self._last_declared_call_name:
if not self._callable:
raise FakeDeclarationError(
"Call to a method that expects a predefined call but no such call exists. "
"Maybe you forgot expects('method') or provides('method') ?")
return self._callable.get_call_object()
exp = self._declared_calls[self._last_declared_call_name].get_call_object()
return exp
def _endpoint_name(self, endpoint):
p = [self._name or 'unnamed']
if endpoint != self._name:
p.append(str(endpoint))
return '.'.join(p)
def expects_call(self):
"""The fake must be called.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = fudge.Fake('os.remove').expects_call()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:os.remove() was not called
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
self._callable = ExpectedCall(self, call_name=self._name,
callable=True)
return self
def is_callable(self):
"""The fake can be called.
This is useful for when you stub out a function
as opposed to a class. For example::
>>> import fudge
>>> remove = Fake('os.remove').is_callable()
>>> remove('some/path')
"""
self._callable = Call(self, call_name=self._name, callable=True)
return self
def is_a_stub(self):
"""Turns this fake into a stub.
When a stub, any method is allowed to be called on the Fake() instance
and any attribute can be accessed. When an unknown attribute or
call is made, a new Fake() is returned. You can of course override
any of this with :meth:`Fake.expects` and the other methods.
"""
self._is_a_stub = True
return self
def calls(self, call):
"""Redefine a call.
The fake method will execute your function. I.E.::
>>> f = Fake().provides('hello').calls(lambda: 'Why, hello there')
>>> f.hello()
'Why, hello there'
"""
exp = self._get_current_call()
exp.call_replacement = call
return self
def expects(self, call_name):
"""Expect a call.
.. doctest::
:hide:
>>> import fudge
>>> fudge.clear_expectations()
>>> fudge.clear_calls()
If the method *call_name* is never called, then raise an error. I.E.::
>>> session = Fake('session').expects('open').expects('close')
>>> session.open()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:session.close() was not called
.. note::
If you want to also verify the order these calls are made in,
use :func:`fudge.Fake.remember_order`. When using :func:`fudge.Fake.next_call`
after ``expects(...)``, each new call will be part of the expected order
Declaring ``expects()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call`
"""
if call_name in self._declared_calls:
return self.next_call(for_method=call_name)
self._last_declared_call_name = call_name
c = ExpectedCall(self, call_name, call_order=self._expected_call_order)
self._declare_call(call_name, c)
return self
def has_attr(self, **attributes):
"""Sets available attributes.
I.E.::
>>> User = Fake('User').provides('__init__').has_attr(name='Harry')
>>> user = User()
>>> user.name
'Harry'
"""
self._attributes.update(attributes)
return self
def next_call(self, for_method=None):
"""Start expecting or providing multiple calls.
.. note:: next_call() cannot be used in combination with :func:`fudge.Fake.times_called`
Up until calling this method, calls are infinite.
For example, before next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f.status()
'Awake!'
>>> f.status()
'Awake!'
After next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f = f.next_call().returns('Asleep')
>>> f = f.next_call().returns('Dreaming')
>>> f.status()
'Awake!'
>>> f.status()
'Asleep'
>>> f.status()
'Dreaming'
>>> f.status()
Traceback (most recent call last):
...
AssertionError: This attribute of fake:unnamed can only be called 3 time(s). Call reset() if necessary or fudge.clear_calls().
If you need to affect the next call of something other than the last declared call,
use ``next_call(for_method="other_call")``. Here is an example using getters and setters
on a session object ::
>>> from fudge import Fake
>>> sess = Fake('session').provides('get_count').returns(1)
>>> sess = sess.provides('set_count').with_args(5)
Now go back and adjust return values for get_count() ::
>>> sess = sess.next_call(for_method='get_count').returns(5)
This allows these calls to be made ::
>>> sess.get_count()
1
>>> sess.set_count(5)
>>> sess.get_count()
5
When using :func:`fudge.Fake.remember_order` in combination with :func:`fudge.Fake.expects` and :func:`fudge.Fake.next_call` each new call will be part of the expected order.
"""
last_call_name = self._last_declared_call_name
if for_method:
if for_method not in self._declared_calls:
raise FakeDeclarationError(
"next_call(for_method=%r) is not possible; "
"declare expects(%r) or provides(%r) first" % (
for_method, for_method, for_method))
else:
# set this for the local function:
last_call_name = for_method
# reset this for subsequent methods:
self._last_declared_call_name = last_call_name
if last_call_name:
exp = self._declared_calls[last_call_name]
elif self._callable:
exp = self._callable
else:
raise FakeDeclarationError('next_call() must follow provides(), '
'expects() or is_callable()')
if getattr(exp, 'expected_times_called', None) is not None:
raise FakeDeclarationError("Cannot use next_call() in combination with times_called()")
if not isinstance(exp, CallStack):
# lazily create a stack with the last defined
# expected call as the first on the stack:
stack = CallStack(self, initial_calls=[exp],
expected=isinstance(exp, ExpectedCall),
call_name=exp.call_name)
# replace the old call obj using the same name:
if last_call_name:
self._declare_call(last_call_name, stack)
elif self._callable:
self._callable = stack
else:
stack = exp
# hmm, we need a copy here so that the last call
# falls off the stack.
if stack.expected:
next_call = ExpectedCall(self, call_name=exp.call_name, call_order=self._expected_call_order)
else:
next_call = Call(self, call_name=exp.call_name)
stack.add_call(next_call)
return self
def provides(self, call_name):
"""Provide a call.
The call acts as a stub -- no error is raised if it is not called.::
>>> session = Fake('session').provides('open').provides('close')
>>> import fudge
>>> fudge.clear_expectations() # from any previously declared fakes
>>> fudge.clear_calls()
>>> session.open()
>>> fudge.verify() # close() not called but no error
Declaring ``provides()`` multiple times is the same as
declaring :func:`fudge.Fake.next_call`
"""
if call_name in self._declared_calls:
return self.next_call(for_method=call_name)
self._last_declared_call_name = call_name
c = Call(self, call_name)
self._declare_call(call_name, c)
return self
def raises(self, exc):
"""Set last call to raise an exception class or instance.
For example::
>>> import fudge
>>> db = fudge.Fake('db').provides('insert').raises(ValueError("not enough parameters for insert"))
>>> db.insert()
Traceback (most recent call last):
...
ValueError: not enough parameters for insert
"""
exp = self._get_current_call()
exp.exception_to_raise = exc
return self
def remember_order(self):
"""Verify that subsequent :func:`fudge.Fake.expects` are called in the right order.
For example::
>>> import fudge
>>> db = fudge.Fake('db').remember_order().expects('insert').expects('update')
>>> db.update()
Traceback (most recent call last):
...
AssertionError: Call #1 was fake:db.update(); Expected: #1 fake:db.insert(), #2 fake:db.update(), end
>>> fudge.clear_expectations()
When declaring multiple calls using :func:`fudge.Fake.next_call`, each subsequent call will be added
to the expected order of calls ::
>>> import fudge
>>> sess = fudge.Fake("session").remember_order().expects("get_id").returns(1)
>>> sess = sess.expects("set_id").with_args(5)
>>> sess = sess.next_call(for_method="get_id").returns(5)
Multiple calls to ``get_id()`` are now expected ::
>>> sess.get_id()
1
>>> sess.set_id(5)
>>> sess.get_id()
5
>>> fudge.verify()
>>> fudge.clear_expectations()
"""
if self._callable:
raise FakeDeclarationError(
"remember_order() cannot be used for Fake(callable=True) or Fake(expect_call=True)")
self._expected_call_order = ExpectedCallOrder(self)
registry.remember_expected_call_order(self._expected_call_order)
return self
def returns(self, val):
"""Set the last call to return a value.
Set a static value to return when a method is called. I.E.::
>>> f = Fake().provides('get_number').returns(64)
>>> f.get_number()
64
"""
exp = self._get_current_call()
exp.return_val = val
return self
def returns_fake(self, *args, **kwargs):
"""Set the last call to return a new :class:`fudge.Fake`.
Any given arguments are passed to the :class:`fudge.Fake` constructor
Take note that this is different from the cascading nature of
other methods. This will return an instance of the *new* Fake,
not self, so you should be careful to store its return value in a new
variable.
I.E.::
>>> session = Fake('session')
>>> query = session.provides('query').returns_fake(name="Query")
>>> assert query is not session
>>> query = query.provides('one').returns(['object'])
>>> session.query().one()
['object']
"""
exp = self._get_current_call()
endpoint = kwargs.get('name', exp.call_name)
name = self._endpoint_name(endpoint)
kwargs['name'] = '%s()' % name
fake = self.__class__(*args, **kwargs)
exp.return_val = fake
return fake
def times_called(self, n):
"""Set the number of times an object can be called.
When working with provided calls, you'll only see an
error if the expected call count is exceeded ::
>>> auth = Fake('auth').provides('login').times_called(1)
>>> auth.login()
>>> auth.login()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 2 time(s). Expected 1.
When working with expected calls, you'll see an error if
the call count is never met ::
>>> import fudge
>>> auth = fudge.Fake('auth').expects('login').times_called(2)
>>> auth.login()
>>> fudge.verify()
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called 1 time(s). Expected 2.
.. note:: This cannot be used in combination with :func:`fudge.Fake.next_call`
"""
if self._last_declared_call_name:
actual_last_call = self._declared_calls[self._last_declared_call_name]
if isinstance(actual_last_call, CallStack):
raise FakeDeclarationError("Cannot use times_called() in combination with next_call()")
# else: # self._callable is in effect
exp = self._get_current_call()
exp.expected_times_called = n
return self
def with_args(self, *args, **kwargs):
"""Set the last call to expect specific argument values.
The app under test must send all declared arguments and keyword arguments
otherwise your test will raise an AssertionError. For example:
.. doctest::
>>> import fudge
>>> counter = fudge.Fake('counter').expects('increment').with_args(25, table='hits')
>>> counter.increment(24, table='clicks')
Traceback (most recent call last):
...
AssertionError: fake:counter.increment(25, table='hits') was called unexpectedly with args (24, table='clicks')
If you need to work with dynamic argument values
consider using :func:`fudge.Fake.with_matching_args` to make looser declarations.
You can also use :mod:`fudge.inspector` functions. Here is an example of providing
a more flexible ``with_args()`` declaration using inspectors:
.. doctest::
:hide:
>>> fudge.clear_expectations()
.. doctest::
>>> import fudge
>>> from fudge.inspector import arg
>>> counter = fudge.Fake('counter')
>>> counter = counter.expects('increment').with_args(
... arg.any(),
... table=arg.endswith("hits"))
...
The above declaration would allow you to call counter like this:
.. doctest::
>>> counter.increment(999, table="image_hits")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_calls()
Or like this:
.. doctest::
>>> counter.increment(22, table="user_profile_hits")
>>> fudge.verify()
.. doctest::
:hide:
>>> fudge.clear_expectations()
"""
exp = self._get_current_call()
if args:
exp.expected_args = args
if kwargs:
exp.expected_kwargs = kwargs
return self
def with_matching_args(self, *args, **kwargs):
"""Set the last call to expect specific argument values if those arguments exist.
Unlike :func:`fudge.Fake.with_args` use this if you want to only declare
expectations about matching arguments. Any unknown keyword arguments
used by the app under test will be allowed.
For example, you can declare positional arguments but ignore keyword arguments:
.. doctest::
>>> import fudge
>>> db = fudge.Fake('db').expects('transaction').with_matching_args('insert')
With this declaration, any keyword argument is allowed:
.. doctest::
>>> db.transaction('insert', isolation_level='lock')
>>> db.transaction('insert', isolation_level='shared')
>>> db.transaction('insert', retry_on_error=True)
.. doctest::
:hide:
>>> fudge.clear_expectations()
.. note::
you may get more mileage out of :mod:`fudge.inspector` functions as
described in :func:`fudge.Fake.with_args`
"""
exp = self._get_current_call()
if args:
exp.expected_matching_args = args
if kwargs:
exp.expected_matching_kwargs = kwargs
return self
def with_arg_count(self, count):
"""Set the last call to expect an exact argument count.
I.E.::
>>> auth = Fake('auth').provides('login').with_arg_count(2)
>>> auth.login('joe_user') # forgot password
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 arg(s) but expected 2
"""
exp = self._get_current_call()
exp.expected_arg_count = count
return self
def with_kwarg_count(self, count):
"""Set the last call to expect an exact count of keyword arguments.
I.E.::
>>> auth = Fake('auth').provides('login').with_kwarg_count(2)
>>> auth.login(username='joe') # forgot password=
Traceback (most recent call last):
...
AssertionError: fake:auth.login() was called with 1 keyword arg(s) but expected 2
"""
exp = self._get_current_call()
exp.expected_kwarg_count = count
return self
|
wgcv/SWW-Crashphone
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/flatpages/tests/test_models.py
|
94
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import set_script_prefix, clear_script_prefix
from django.contrib.flatpages.models import FlatPage
from django.test import TestCase
class FlatpageModelTests(TestCase):
def test_get_absolute_url_urlencodes(self):
pf = FlatPage(title="Café!", url='/café/')
self.assertEqual(pf.get_absolute_url(), '/caf%C3%A9/')
def test_get_absolute_url_honors_script_prefix(self):
pf = FlatPage(title="Tea!", url='/tea/')
set_script_prefix('/beverages/')
try:
self.assertEqual(pf.get_absolute_url(), '/beverages/tea/')
finally:
clear_script_prefix()
|
jolevq/odoopub
|
refs/heads/master
|
addons/document/document.py
|
93
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import errno
import logging
import os
import random
import shutil
import string
import time
from StringIO import StringIO
import psycopg2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.osv.orm import except_orm
import openerp.report.interface
from openerp.tools.misc import ustr
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
from content_index import cntIndex
_logger = logging.getLogger(__name__)
class document_file(osv.osv):
_inherit = 'ir.attachment'
_columns = {
# Columns from ir.attachment:
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
# Fields of document:
'user_id': fields.many2one('res.users', 'Owner', select=1),
'parent_id': fields.many2one('document.directory', 'Directory', select=1, change_default=True),
'index_content': fields.text('Indexed Content'),
'partner_id':fields.many2one('res.partner', 'Partner', select=1),
'file_type': fields.char('Content Type'),
}
_order = "id desc"
_defaults = {
'user_id': lambda self, cr, uid, ctx:uid,
}
_sql_constraints = [
('filename_unique', 'unique (name,parent_id)', 'The filename must be unique in a directory !'),
]
def check(self, cr, uid, ids, mode, context=None, values=None):
"""Overwrite check to verify access on directory to validate specifications of doc/access_permissions.rst"""
if not isinstance(ids, list):
ids = [ids]
super(document_file, self).check(cr, uid, ids, mode, context=context, values=values)
if ids:
self.pool.get('ir.model.access').check(cr, uid, 'document.directory', mode)
# use SQL to avoid recursive loop on read
cr.execute('SELECT DISTINCT parent_id from ir_attachment WHERE id in %s AND parent_id is not NULL', (tuple(ids),))
self.pool.get('document.directory').check_access_rule(cr, uid, [parent_id for (parent_id,) in cr.fetchall()], mode, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# Grab ids, bypassing 'count'
ids = super(document_file, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=False)
if not ids:
return 0 if count else []
# Filter out documents that are in directories that the user is not allowed to read.
# Must use pure SQL to avoid access rules exceptions (we want to remove the records,
# not fail), and the records have been filtered in parent's search() anyway.
cr.execute('SELECT id, parent_id from ir_attachment WHERE id in %s', (tuple(ids),))
# cont a dict of parent -> attach
parents = {}
for attach_id, attach_parent in cr.fetchall():
parents.setdefault(attach_parent, []).append(attach_id)
parent_ids = parents.keys()
# filter parents
visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
# null parents means allowed
ids = parents.get(None,[])
for parent_id in visible_parent_ids:
ids.extend(parents[parent_id])
return len(ids) if count else ids
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
if 'name' not in default:
name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update(name=_("%s (copy)") % (name))
return super(document_file, self).copy(cr, uid, id, default, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
# take partner from uid
if vals.get('res_id', False) and vals.get('res_model', False) and not vals.get('partner_id', False):
vals['partner_id'] = self.__get_partner_id(cr, uid, vals['res_model'], vals['res_id'], context)
if vals.get('datas', False):
vals['file_type'], vals['index_content'] = self._index(cr, uid, vals['datas'].decode('base64'), vals.get('datas_fname', False), None)
return super(document_file, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if vals.get('datas', False):
vals['file_type'], vals['index_content'] = self._index(cr, uid, vals['datas'].decode('base64'), vals.get('datas_fname', False), None)
return super(document_file, self).write(cr, uid, ids, vals, context)
def _index(self, cr, uid, data, datas_fname, file_type):
mime, icont = cntIndex.doIndex(data, datas_fname, file_type or None, None)
icont_u = ustr(icont)
return mime, icont_u
def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
""" A helper to retrieve the associated partner from any res_model+id
It is a hack that will try to discover if the mentioned record is
clearly associated with a partner record.
"""
obj_model = self.pool[res_model]
if obj_model._name == 'res.partner':
return res_id
elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
bro = obj_model.browse(cr, uid, res_id, context=context)
return bro.partner_id.id
return False
class document_directory(osv.osv):
_name = 'document.directory'
_description = 'Directory'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True, select=1),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'user_id': fields.many2one('res.users', 'Owner'),
'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
'parent_id': fields.many2one('document.directory', 'Parent Directory', select=1, change_default=True),
'child_ids': fields.one2many('document.directory', 'parent_id', 'Children'),
'file_ids': fields.one2many('ir.attachment', 'parent_id', 'Files'),
'content_ids': fields.one2many('document.directory.content', 'directory_id', 'Virtual Files'),
'type': fields.selection([ ('directory','Static Directory'), ('ressource','Folders per resource'), ],
'Type', required=True, select=1, change_default=True,
help="Each directory can either have the type Static or be linked to another resource. A static directory, as with Operating Systems, is the classic directory that can contain a set of files. The directories linked to systems resources automatically possess sub-directories for each of resource types defined in the parent directory."),
'domain': fields.char('Domain', help="Use a domain if you want to apply an automatic filter on visible resources."),
'ressource_type_id': fields.many2one('ir.model', 'Resource model', change_default=True,
help="Select an object here and there will be one folder per record of that resource."),
'resource_field': fields.many2one('ir.model.fields', 'Name field', help='Field to be used as name on resource directories. If empty, the "name" will be used.'),
'resource_find_all': fields.boolean('Find all resources',
help="If true, all attachments that match this resource will " \
" be located. If false, only ones that have this as parent." ),
'ressource_parent_type_id': fields.many2one('ir.model', 'Parent Model', change_default=True,
help="If you put an object here, this directory template will appear bellow all of these objects. " \
"Such directories are \"attached\" to the specific model or record, just like attachments. " \
"Don't put a parent directory if you select a parent model."),
'ressource_id': fields.integer('Resource ID',
help="Along with Parent Model, this ID attaches this folder to a specific record of Parent Model."),
'ressource_tree': fields.boolean('Tree Structure',
help="Check this if you want to use the same tree structure as the object selected in the system."),
'dctx_ids': fields.one2many('document.directory.dctx', 'dir_id', 'Context fields'),
'company_id': fields.many2one('res.company', 'Company', change_default=True),
}
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'document.directory', context=c),
'user_id': lambda self,cr,uid,ctx: uid,
'domain': '[]',
'type': 'directory',
'ressource_id': 0,
'resource_find_all': True,
}
_sql_constraints = [
('dirname_uniq', 'unique (name,parent_id,ressource_id,ressource_parent_type_id)', 'The directory name must be unique !'),
('no_selfparent', 'check(parent_id <> id)', 'Directory cannot be parent of itself!'),
]
def name_get(self, cr, uid, ids, context=None):
res = []
if not self.search(cr,uid,[('id','in',ids)]):
ids = []
for d in self.browse(cr, uid, ids, context=context):
s = ''
d2 = d
while d2 and d2.parent_id:
s = d2.name + (s and ('/' + s) or '')
d2 = d2.parent_id
res.append((d.id, s or d.name))
return res
def get_full_path(self, cr, uid, dir_id, context=None):
""" Return the full path to this directory, in a list, root first
"""
if isinstance(dir_id, (tuple, list)):
assert len(dir_id) == 1
dir_id = dir_id[0]
def _parent(dir_id, path):
parent=self.browse(cr, uid, dir_id)
if parent.parent_id and not parent.ressource_parent_type_id:
_parent(parent.parent_id.id,path)
path.append(parent.name)
else:
path.append(parent.name)
return path
path = []
_parent(dir_id, path)
return path
_constraints = [
(osv.osv._check_recursion, 'Error! You cannot create recursive directories.', ['parent_id'])
]
def onchange_content_id(self, cr, uid, ids, ressource_type_id):
return {}
def get_object(self, cr, uid, uri, context=None):
""" Return a node object for the given uri.
This fn merely passes the call to node_context
"""
return get_node_context(cr, uid, context).get_uri(cr, uri)
def get_node_class(self, cr, uid, ids, dbro=None, dynamic=False, context=None):
"""Retrieve the class of nodes for this directory
This function can be overriden by inherited classes ;)
@param dbro The browse object, if caller already has it
"""
if dbro is None:
dbro = self.browse(cr, uid, ids, context=context)
if dynamic:
return node_res_obj
elif dbro.type == 'directory':
return node_dir
elif dbro.type == 'ressource':
return node_res_dir
else:
raise ValueError("dir node for %s type.", dbro.type)
def _prepare_context(self, cr, uid, nctx, context=None):
""" Fill nctx with properties for this database
@param nctx instance of nodes.node_context, to be filled
@param context ORM context (dict) for us
Note that this function is called *without* a list of ids,
it should behave the same for the whole database (based on the
ORM instance of document.directory).
Some databases may override this and attach properties to the
node_context. See WebDAV, CalDAV.
"""
return
def get_dir_permissions(self, cr, uid, ids, context=None):
"""Check what permission user 'uid' has on directory 'id'
"""
assert len(ids) == 1
res = 0
for pperms in [('read', 5), ('write', 2), ('unlink', 8)]:
try:
self.check_access_rule(cr, uid, ids, pperms[0], context=context)
res |= pperms[1]
except except_orm:
pass
return res
def _locate_child(self, cr, uid, root_id, uri, nparent, ncontext):
""" try to locate the node in uri,
Return a tuple (node_dir, remaining_path)
"""
return (node_database(context=ncontext), uri)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default ={}
name = self.read(cr, uid, [id])[0]['name']
default.update(name=_("%s (copy)") % (name))
return super(document_directory,self).copy(cr, uid, id, default, context=context)
def _check_duplication(self, cr, uid, vals, ids=None, op='create'):
name=vals.get('name',False)
parent_id=vals.get('parent_id',False)
ressource_parent_type_id=vals.get('ressource_parent_type_id',False)
ressource_id=vals.get('ressource_id',0)
if op=='write':
for directory in self.browse(cr, SUPERUSER_ID, ids):
if not name:
name=directory.name
if not parent_id:
parent_id=directory.parent_id and directory.parent_id.id or False
# TODO fix algo
if not ressource_parent_type_id:
ressource_parent_type_id=directory.ressource_parent_type_id and directory.ressource_parent_type_id.id or False
if not ressource_id:
ressource_id=directory.ressource_id and directory.ressource_id or 0
res=self.search(cr,uid,[('id','<>',directory.id),('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
if op=='create':
res = self.search(cr, SUPERUSER_ID, [('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
return True
def write(self, cr, uid, ids, vals, context=None):
if not self._check_duplication(cr, uid, vals, ids, op='write'):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
return super(document_directory,self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
if not self._check_duplication(cr, uid, vals):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
newname = vals.get('name',False)
if newname:
for illeg in ('/', '@', '$', '#'):
if illeg in newname:
raise osv.except_osv(_('ValidateError'), _('Directory name contains special characters!'))
return super(document_directory,self).create(cr, uid, vals, context)
class document_directory_dctx(osv.osv):
""" In order to evaluate dynamic folders, child items could have a limiting
domain expression. For that, their parents will export a context where useful
information will be passed on.
If you define sth like "s_id" = "this.id" at a folder iterating over sales, its
children could have a domain like [('sale_id', = ,s_id )]
This system should be used recursively, that is, parent dynamic context will be
appended to all children down the tree.
"""
_name = 'document.directory.dctx'
_description = 'Directory Dynamic Context'
_columns = {
'dir_id': fields.many2one('document.directory', 'Directory', required=True, ondelete="cascade"),
'field': fields.char('Field', required=True, select=1, help="The name of the field."),
'expr': fields.char('Expression', required=True, help="A python expression used to evaluate the field.\n" + \
"You can use 'dir_id' for current dir, 'res_id', 'res_model' as a reference to the current record, in dynamic folders"),
}
class document_directory_content_type(osv.osv):
_name = 'document.directory.content.type'
_description = 'Directory Content Type'
_columns = {
'name': fields.char('Content Type', required=True),
'code': fields.char('Extension', size=4),
'active': fields.boolean('Active'),
'mimetype': fields.char('Mime Type')
}
_defaults = {
'active': lambda *args: 1
}
class document_directory_content(osv.osv):
_name = 'document.directory.content'
_description = 'Directory Content'
_order = "sequence"
def _extension_get(self, cr, uid, context=None):
cr.execute('select code,name from document_directory_content_type where active')
res = cr.fetchall()
return res
_columns = {
'name': fields.char('Content Name', required=True),
'sequence': fields.integer('Sequence', size=16),
'prefix': fields.char('Prefix', size=16),
'suffix': fields.char('Suffix', size=16),
'report_id': fields.many2one('ir.actions.report.xml', 'Report'),
'extension': fields.selection(_extension_get, 'Document Type', required=True, size=4),
'include_name': fields.boolean('Include Record Name',
help="Check this field if you want that the name of the file to contain the record name." \
"\nIf set, the directory will have to be a resource one."),
'directory_id': fields.many2one('document.directory', 'Directory'),
}
_defaults = {
'extension': lambda *args: '.pdf',
'sequence': lambda *args: 1,
'include_name': lambda *args: 1,
}
def _file_get(self, cr, node, nodename, content, context=None):
""" return the nodes of a <node> parent having a <content> content
The return value MUST be false or a list of node_class objects.
"""
# TODO: respect the context!
model = node.res_model
if content.include_name and not model:
return False
res2 = []
tname = ''
if content.include_name:
record_name = node.displayname or ''
if record_name:
tname = (content.prefix or '') + record_name + (content.suffix or '') + (content.extension or '')
else:
tname = (content.prefix or '') + (content.name or '') + (content.suffix or '') + (content.extension or '')
if tname.find('/'):
tname=tname.replace('/', '_')
act_id = False
if 'dctx_res_id' in node.dctx:
act_id = node.dctx['res_id']
elif hasattr(node, 'res_id'):
act_id = node.res_id
else:
act_id = node.context.context.get('res_id',False)
if not nodename:
n = node_content(tname, node, node.context,content, act_id=act_id)
res2.append( n)
else:
if nodename == tname:
n = node_content(tname, node, node.context,content, act_id=act_id)
n.fill_fields(cr)
res2.append(n)
return res2
def process_write(self, cr, uid, node, data, context=None):
if node.extension != '.pdf':
raise Exception("Invalid content: %s" % node.extension)
return True
def process_read(self, cr, uid, node, context=None):
if node.extension != '.pdf':
raise Exception("Invalid content: %s" % node.extension)
report = self.pool.get('ir.actions.report.xml').browse(cr, uid, node.report_id, context=context)
srv = openerp.report.interface.report_int._reports['report.'+report.report_name]
ctx = node.context.context.copy()
ctx.update(node.dctx)
pdf,pdftype = srv.create(cr, uid, [node.act_id,], {}, context=ctx)
return pdf
class ir_action_report_xml(osv.osv):
_name="ir.actions.report.xml"
_inherit ="ir.actions.report.xml"
def _model_get(self, cr, uid, ids, name, arg, context=None):
res = {}
model_pool = self.pool.get('ir.model')
for data in self.read(cr, uid, ids, ['model']):
model = data.get('model',False)
if model:
model_id =model_pool.search(cr, uid, [('model','=',model)])
if model_id:
res[data.get('id')] = model_id[0]
else:
res[data.get('id')] = False
return res
def _model_search(self, cr, uid, obj, name, args, context=None):
if not len(args):
return []
assert len(args) == 1 and args[0][1] == '=', 'expression is not what we expect: %r' % args
model_id= args[0][2]
if not model_id:
# a deviation from standard behavior: when searching model_id = False
# we return *all* reports, not just ones with empty model.
# One reason is that 'model' is a required field so far
return []
model = self.pool.get('ir.model').read(cr, uid, [model_id])[0]['model']
report_id = self.search(cr, uid, [('model','=',model)])
if not report_id:
return [('id','=','0')]
return [('id','in',report_id)]
_columns={
'model_id' : fields.function(_model_get, fnct_search=_model_search, string='Model Id'),
}
class document_storage(osv.osv):
""" The primary object for data storage. Deprecated. """
_name = 'document.storage'
_description = 'Storage Media'
def get_data(self, cr, uid, id, file_node, context=None, fil_obj=None):
""" retrieve the contents of some file_node having storage_id = id
optionally, fil_obj could point to the browse object of the file
(ir.attachment)
"""
boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
data = ira.datas
if data:
out = data.decode('base64')
else:
out = ''
return out
def get_file(self, cr, uid, id, file_node, mode, context=None):
""" Return a file-like object for the contents of some node
"""
if context is None:
context = {}
boo = self.browse(cr, uid, id, context=context)
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
return nodefd_db(file_node, ira_browse=ira, mode=mode)
def set_data(self, cr, uid, id, file_node, data, context=None, fil_obj=None):
""" store the data.
This function MUST be used from an ir.attachment. It wouldn't make sense
to store things persistently for other types (dynamic).
"""
boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
_logger.debug( "Store data for ir.attachment #%d." % ira.id)
store_fname = None
fname = None
filesize = len(data)
self.pool.get('ir.attachment').write(cr, uid, [file_node.file_id], {'datas': data.encode('base64')}, context=context)
# 2nd phase: store the metadata
try:
icont = ''
mime = ira.file_type
if not mime:
mime = ""
try:
mime, icont = cntIndex.doIndex(data, ira.datas_fname, ira.file_type or None, fname)
except Exception:
_logger.debug('Cannot index file.', exc_info=True)
pass
try:
icont_u = ustr(icont)
except UnicodeError:
icont_u = ''
# a hack: /assume/ that the calling write operation will not try
# to write the fname and size, and update them in the db concurrently.
# We cannot use a write() here, because we are already in one.
cr.execute('UPDATE ir_attachment SET file_size = %s, index_content = %s, file_type = %s WHERE id = %s', (filesize, icont_u, mime, file_node.file_id))
self.pool.get('ir.attachment').invalidate_cache(cr, uid, ['file_size', 'index_content', 'file_type'], [file_node.file_id], context=context)
file_node.content_length = filesize
file_node.content_type = mime
return True
except Exception, e :
_logger.warning("Cannot save data.", exc_info=True)
# should we really rollback once we have written the actual data?
# at the db case (only), that rollback would be safe
raise except_orm(_('Error at doc write!'), str(e))
def _str2time(cre):
""" Convert a string with time representation (from db) into time (float)
Note: a place to fix if datetime is used in db.
"""
if not cre:
return time.time()
frac = 0.0
if isinstance(cre, basestring) and '.' in cre:
fdot = cre.find('.')
frac = float(cre[fdot:])
cre = cre[:fdot]
return time.mktime(time.strptime(cre,'%Y-%m-%d %H:%M:%S')) + frac
def get_node_context(cr, uid, context):
return node_context(cr, uid, context)
#
# An object that represent an uri
# path: the uri of the object
# content: the Content it belongs to (_print.pdf)
# type: content or collection
# content: objct = res.partner
# collection: object = directory, object2 = res.partner
# file: objct = ir.attachement
# root: if we are at the first directory of a ressource
#
class node_context(object):
""" This is the root node, representing access to some particular context
A context is a set of persistent data, which may influence the structure
of the nodes. All other transient information during a data query should
be passed down with function arguments.
"""
cached_roots = {}
node_file_class = None
def __init__(self, cr, uid, context=None):
self.dbname = cr.dbname
self.uid = uid
self.context = context
if context is None:
context = {}
context['uid'] = uid
self._dirobj = openerp.registry(cr.dbname).get('document.directory')
self.node_file_class = node_file
self.extra_ctx = {} # Extra keys for context, that do _not_ trigger inequality
assert self._dirobj
self._dirobj._prepare_context(cr, uid, self, context=context)
self.rootdir = False #self._dirobj._get_root_directory(cr,uid,context)
def __eq__(self, other):
if not type(other) == node_context:
return False
if self.dbname != other.dbname:
return False
if self.uid != other.uid:
return False
if self.context != other.context:
return False
if self.rootdir != other.rootdir:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def get(self, name, default=None):
return self.context.get(name, default)
def get_uri(self, cr, uri):
""" Although this fn passes back to doc.dir, it is needed since
it is a potential caching point.
"""
(ndir, duri) = self._dirobj._locate_child(cr, self.uid, self.rootdir, uri, None, self)
while duri:
ndir = ndir.child(cr, duri[0])
if not ndir:
return False
duri = duri[1:]
return ndir
def get_dir_node(self, cr, dbro):
"""Create (or locate) a node for a directory
@param dbro a browse object of document.directory
"""
fullpath = dbro.get_full_path(context=self.context)
klass = dbro.get_node_class(dbro, context=self.context)
return klass(fullpath, None ,self, dbro)
def get_file_node(self, cr, fbro):
""" Create or locate a node for a static file
@param fbro a browse object of an ir.attachment
"""
parent = None
if fbro.parent_id:
parent = self.get_dir_node(cr, fbro.parent_id)
return self.node_file_class(fbro.name, parent, self, fbro)
class node_class(object):
""" this is a superclass for our inodes
It is an API for all code that wants to access the document files.
Nodes have attributes which contain usual file properties
"""
our_type = 'baseclass'
DAV_PROPS = None
DAV_M_NS = None
def __init__(self, path, parent, context):
assert isinstance(context,node_context)
assert (not parent ) or isinstance(parent,node_class)
self.path = path
self.context = context
self.type=self.our_type
self.parent = parent
self.uidperms = 5 # computed permissions for our uid, in unix bits
self.mimetype = 'application/octet-stream'
self.create_date = None
self.write_date = None
self.unixperms = 0660
self.uuser = 'user'
self.ugroup = 'group'
self.content_length = 0
# dynamic context:
self.dctx = {}
if parent:
self.dctx = parent.dctx.copy()
self.displayname = 'Object'
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def full_path(self):
""" Return the components of the full path for some
node.
The returned list only contains the names of nodes.
"""
if self.parent:
s = self.parent.full_path()
else:
s = []
if isinstance(self.path,list):
s+=self.path
elif self.path is None:
s.append('')
else:
s.append(self.path)
return s #map(lambda x: '/' +x, s)
def __repr__(self):
return "%s@/%s" % (self.our_type, '/'.join(self.full_path()))
def children(self, cr, domain=None):
print "node_class.children()"
return [] #stub
def child(self, cr, name, domain=None):
print "node_class.child()"
return None
def get_uri(self, cr, uri):
duri = uri
ndir = self
while duri:
ndir = ndir.child(cr, duri[0])
if not ndir:
return False
duri = duri[1:]
return ndir
def path_get(self):
print "node_class.path_get()"
return False
def get_data(self, cr):
raise TypeError('No data for %s.'% self.type)
def open_data(self, cr, mode):
""" Open a node_descriptor object for this node.
@param the mode of open, eg 'r', 'w', 'a', like file.open()
This operation may lock the data for this node (and accross
other node hierarchies), until the descriptor is close()d. If
the node is locked, subsequent opens (depending on mode) may
immediately fail with an exception (which?).
For this class, there is no data, so no implementation. Each
child class that has data should override this.
"""
raise TypeError('No data for %s.' % self.type)
def get_etag(self, cr):
""" Get a tag, unique per object + modification.
see. http://tools.ietf.org/html/rfc2616#section-13.3.3 """
return '"%s-%s"' % (self._get_ttag(cr), self._get_wtag(cr))
def _get_wtag(self, cr):
""" Return the modification time as a unique, compact string """
return str(_str2time(self.write_date)).replace('.','')
def _get_ttag(self, cr):
""" Get a unique tag for this type/id of object.
Must be overriden, so that each node is uniquely identified.
"""
print "node_class.get_ttag()",self
raise NotImplementedError("get_ttag stub()")
def get_dav_props(self, cr):
""" If this class has special behaviour for GroupDAV etc, export
its capabilities """
# This fn is placed here rather than WebDAV, because we want the
# baseclass methods to apply to all node subclasses
return self.DAV_PROPS or {}
def match_dav_eprop(self, cr, match, ns, prop):
res = self.get_dav_eprop(cr, ns, prop)
if res == match:
return True
return False
def get_dav_eprop(self, cr, ns, prop):
if not self.DAV_M_NS:
return None
if self.DAV_M_NS.has_key(ns):
prefix = self.DAV_M_NS[ns]
else:
_logger.debug('No namespace: %s ("%s").',ns, prop)
return None
mname = prefix + "_" + prop.replace('-','_')
if not hasattr(self, mname):
return None
try:
m = getattr(self, mname)
r = m(cr)
return r
except AttributeError:
_logger.debug('The property %s is not supported.' % prop, exc_info=True)
return None
def get_dav_resourcetype(self, cr):
""" Get the DAV resource type.
Is here because some nodes may exhibit special behaviour, like
CalDAV/GroupDAV collections
"""
raise NotImplementedError
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
""" Move this node to a new parent directory.
@param ndir_node the collection that this node should be moved under
@param new_name a name to rename this node to. If omitted, the old
name is preserved
@param fil_obj, can be None, is the browse object for the file,
if already available.
@param ndir_obj must be the browse object to the new doc.directory
location, where this node should be moved to.
in_write: When called by write(), we shouldn't attempt to write the
object, but instead return the dict of vals (avoid re-entrance).
If false, we should write all data to the object, here, as if the
caller won't do anything after calling move_to()
Return value:
True: the node is moved, the caller can update other values, too.
False: the node is either removed or fully updated, the caller
must discard the fil_obj, not attempt to write any more to it.
dict: values to write back to the object. *May* contain a new id!
Depending on src and target storage, implementations of this function
could do various things.
Should also consider node<->content, dir<->dir moves etc.
Move operations, as instructed from APIs (e.g. request from DAV) could
use this function.
"""
raise NotImplementedError(repr(self))
def create_child(self, cr, path, data=None):
""" Create a regular file under this node
"""
_logger.warning("Attempted to create a file under %r, not possible.", self)
raise IOError(errno.EPERM, "Not allowed to create file(s) here.")
def create_child_collection(self, cr, objname):
""" Create a child collection (directory) under self
"""
_logger.warning("Attempted to create a collection under %r, not possible.", self)
raise IOError(errno.EPERM, "Not allowed to create folder(s) here.")
def rm(self, cr):
raise NotImplementedError(repr(self))
def rmcol(self, cr):
raise NotImplementedError(repr(self))
def get_domain(self, cr, filters):
# TODO Document
return []
def check_perms(self, perms):
""" Check the permissions of the current node.
@param perms either an integers of the bits to check, or
a string with the permission letters
Permissions of nodes are (in a unix way):
1, x : allow descend into dir
2, w : allow write into file, or modification to dir
4, r : allow read of file, or listing of dir contents
8, u : allow remove (unlink)
"""
if isinstance(perms, str):
pe2 = 0
chars = { 'x': 1, 'w': 2, 'r': 4, 'u': 8 }
for c in perms:
pe2 = pe2 | chars[c]
perms = pe2
elif isinstance(perms, int):
if perms < 0 or perms > 15:
raise ValueError("Invalid permission bits.")
else:
raise ValueError("Invalid permission attribute.")
return ((self.uidperms & perms) == perms)
class node_database(node_class):
""" A node representing the database directory
"""
our_type = 'database'
def __init__(self, path=None, parent=False, context=None):
if path is None:
path = []
super(node_database,self).__init__(path, parent, context)
self.unixperms = 040750
self.uidperms = 5
def children(self, cr, domain=None):
res = self._child_get(cr, domain=domain) + self._file_get(cr)
return res
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=None)
if res:
return res[0]
res = self._file_get(cr,name)
if res:
return res[0]
return None
def _child_get(self, cr, name=False, domain=None):
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('parent_id','=', False), ('ressource_parent_type_id','=',False)]
if name:
where.append(('name','=',name))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
if domain:
where = where + domain
ids = dirobj.search(cr, uid, where, context=ctx)
res = []
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name, self, self.context,dirr))
return res
def _file_get(self, cr, nodename=False):
res = []
return res
def _get_ttag(self, cr):
return 'db-%s' % cr.dbname
def mkdosname(company_name, default='noname'):
""" convert a string to a dos-like name"""
if not company_name:
return default
badchars = ' !@#$%^`~*()+={}[];:\'"/?.<>'
n = ''
for c in company_name[:8]:
n += (c in badchars and '_') or c
return n
def _uid2unixperms(perms, has_owner):
""" Convert the uidperms and the owner flag to full unix bits
"""
res = 0
if has_owner:
res |= (perms & 0x07) << 6
res |= (perms & 0x05) << 3
elif perms & 0x02:
res |= (perms & 0x07) << 6
res |= (perms & 0x07) << 3
else:
res |= (perms & 0x07) << 6
res |= (perms & 0x05) << 3
res |= 0x05
return res
class node_dir(node_database):
our_type = 'collection'
def __init__(self, path, parent, context, dirr, dctx=None):
super(node_dir,self).__init__(path, parent,context)
self.dir_id = dirr and dirr.id or False
#todo: more info from dirr
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = dirr and dirr.create_date or False
self.domain = dirr and dirr.domain or []
self.res_model = dirr and dirr.ressource_type_id and dirr.ressource_type_id.model or False
# TODO: the write date should be MAX(file.write)..
self.write_date = dirr and (dirr.write_date or dirr.create_date) or False
self.content_length = 0
try:
self.uuser = (dirr.user_id and dirr.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(dirr.company_id and dirr.company_id.name, default='nogroup')
self.uidperms = dirr.get_dir_permissions()
self.unixperms = 040000 | _uid2unixperms(self.uidperms, dirr and dirr.user_id)
if dctx:
self.dctx.update(dctx)
dc2 = self.context.context
dc2.update(self.dctx)
dc2['dir_id'] = self.dir_id
self.displayname = dirr and dirr.name or False
if dirr and dirr.dctx_ids:
for dfld in dirr.dctx_ids:
try:
self.dctx[dfld.field] = safe_eval(dfld.expr,dc2)
except Exception,e:
print "Cannot eval %s." % dfld.expr
print e
pass
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
# Two directory nodes, for the same document.directory, may have a
# different context! (dynamic folders)
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def get_data(self, cr):
#res = ''
#for child in self.children(cr):
# res += child.get_data(cr)
return None
def _file_get(self, cr, nodename=False):
res = super(node_dir,self)._file_get(cr, nodename)
is_allowed = self.check_perms(nodename and 1 or 5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
res3 = cntobj._file_get(cr, self, nodename, content)
if res3:
res.extend(res3)
return res
def _child_get(self, cr, name=None, domain=None):
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('parent_id','=',self.dir_id)]
if name:
where.append(('name','=',name))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
if not domain:
domain = []
where2 = where + domain + [('ressource_parent_type_id','=',False)]
ids = dirobj.search(cr, uid, where2, context=ctx)
res = []
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name, self, self.context,dirr))
# Static directories should never return files with res_model/res_id
# because static dirs are /never/ related to a record.
# In fact, files related to some model and parented by the root dir
# (the default), will NOT be accessible in the node system unless
# a resource folder for that model exists (with resource_find_all=True).
# Having resource attachments in a common folder is bad practice,
# because they would be visible to all users, and their names may be
# the same, conflicting.
where += [('res_model', '=', False)]
fil_obj = dirobj.pool.get('ir.attachment')
ids = fil_obj.search(cr, uid, where, context=ctx)
if ids:
for fil in fil_obj.browse(cr, uid, ids, context=ctx):
klass = self.context.node_file_class
res.append(klass(fil.name, self, self.context, fil))
return res
def rmcol(self, cr):
uid = self.context.uid
directory = self.context._dirobj.browse(cr, uid, self.dir_id)
res = False
if not directory:
raise OSError(2, 'Not such file or directory.')
if not self.check_perms('u'):
raise IOError(errno.EPERM,"Permission denied.")
if directory._name == 'document.directory':
if self.children(cr):
raise OSError(39, 'Directory not empty.')
res = self.context._dirobj.unlink(cr, uid, [directory.id])
else:
raise OSError(1, 'Operation is not permitted.')
return res
def create_child_collection(self, cr, objname):
object2 = False
if not self.check_perms(2):
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
obj = dirobj.browse(cr, uid, self.dir_id)
if obj and (obj.type == 'ressource') and not object2:
raise OSError(1, 'Operation is not permitted.')
#objname = uri2[-1]
val = {
'name': objname,
'ressource_parent_type_id': obj and obj.ressource_type_id.id or False,
'ressource_id': object2 and object2.id or False,
'parent_id' : obj and obj.id or False
}
return dirobj.create(cr, uid, val)
def create_child(self, cr, path, data=None):
""" API function to create a child file object and node
Return the node_* created
"""
if not self.check_perms(2):
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
fil_obj=dirobj.pool.get('ir.attachment')
val = {
'name': path,
'datas_fname': path,
'parent_id': self.dir_id,
# Datas are not set here
}
fil_id = fil_obj.create(cr, uid, val, context=ctx)
fil = fil_obj.browse(cr, uid, fil_id, context=ctx)
fnode = node_file(path, self, self.context, fil)
if data is not None:
fnode.set_data(cr, data, fil)
return fnode
def _get_ttag(self, cr):
return 'dir-%d' % self.dir_id
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
""" Move directory. This operation is simple, since the present node is
only used for static, simple directories.
Note /may/ be called with ndir_node = None, to rename the document root.
"""
if ndir_node and (ndir_node.context != self.context):
raise NotImplementedError("Cannot move directories between contexts.")
if (not self.check_perms('u')) or (not ndir_node.check_perms('w')):
raise IOError(errno.EPERM,"Permission denied.")
dir_obj = self.context._dirobj
if not fil_obj:
dbro = dir_obj.browse(cr, self.context.uid, self.dir_id, context=self.context.context)
else:
dbro = dir_obj
assert dbro.id == self.dir_id
if not dbro:
raise IndexError("Cannot locate dir %d", self.dir_id)
if (not self.parent) and ndir_node:
if not dbro.parent_id:
raise IOError(errno.EPERM, "Cannot move the root directory!")
self.parent = self.context.get_dir_node(cr, dbro.parent_id)
assert self.parent
if self.parent != ndir_node:
_logger.debug('Cannot move dir %r from %r to %r.', self, self.parent, ndir_node)
raise NotImplementedError('Cannot move dir to another dir.')
ret = {}
if new_name and (new_name != dbro.name):
if ndir_node.child(cr, new_name):
raise IOError(errno.EEXIST, "Destination path already exists.")
ret['name'] = new_name
del dbro
if not in_write:
# We have to update the data ourselves
if ret:
ctx = self.context.context.copy()
ctx['__from_node'] = True
dir_obj.write(cr, self.context.uid, [self.dir_id,], ret, ctx)
ret = True
return ret
class node_res_dir(node_class):
""" A folder containing dynamic folders
A special sibling to node_dir, which does only contain dynamically
created folders foreach resource in the foreign model.
All folders should be of type node_res_obj and merely behave like
node_dirs (with limited domain).
"""
our_type = 'collection'
res_obj_class = None
def __init__(self, path, parent, context, dirr, dctx=None ):
super(node_res_dir,self).__init__(path, parent, context)
self.dir_id = dirr.id
#todo: more info from dirr
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = dirr.create_date
# TODO: the write date should be MAX(file.write)..
self.write_date = dirr.write_date or dirr.create_date
self.content_length = 0
try:
self.uuser = (dirr.user_id and dirr.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(dirr.company_id and dirr.company_id.name, default='nogroup')
self.uidperms = dirr.get_dir_permissions()
self.unixperms = 040000 | _uid2unixperms(self.uidperms, dirr and dirr.user_id)
self.res_model = dirr.ressource_type_id and dirr.ressource_type_id.model or False
self.resm_id = dirr.ressource_id
self.res_find_all = dirr.resource_find_all
self.namefield = dirr.resource_field.name or 'name'
self.displayname = dirr.name
# Important: the domain is evaluated using the *parent* dctx!
self.domain = dirr.domain
self.ressource_tree = dirr.ressource_tree
# and then, we add our own vars in the dctx:
if dctx:
self.dctx.update(dctx)
# and then, we prepare a dctx dict, for deferred evaluation:
self.dctx_dict = {}
for dfld in dirr.dctx_ids:
self.dctx_dict[dfld.field] = dfld.expr
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
# Two nodes, for the same document.directory, may have a
# different context! (dynamic folders)
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def children(self, cr, domain=None):
return self._child_get(cr, domain=domain)
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=domain)
if res:
return res[0]
return None
def _child_get(self, cr, name=None, domain=None):
""" return virtual children of resource, based on the
foreign object.
Note that many objects use NULL for a name, so we should
better call the name_search(),name_get() set of methods
"""
if self.res_model not in self.context._dirobj.pool:
return []
obj = self.context._dirobj.pool[self.res_model]
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
ctx.update(self.context.extra_ctx)
where = []
if self.domain:
app = safe_eval(self.domain, ctx)
if not app:
pass
elif isinstance(app, list):
where.extend(app)
elif isinstance(app, tuple):
where.append(app)
else:
raise RuntimeError("Incorrect domain expr: %s." % self.domain)
if self.resm_id:
where.append(('id','=',self.resm_id))
if name:
# The =like character will match underscores against any characters
# including the special ones that couldn't exist in a FTP/DAV request
where.append((self.namefield,'=like',name.replace('\\','\\\\')))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
# print "Where clause for %s" % self.res_model, where
if self.ressource_tree:
object2 = False
if self.resm_id:
object2 = dirobj.pool[self.res_model].browse(cr, uid, self.resm_id) or False
if obj._parent_name in obj.fields_get(cr, uid):
where.append((obj._parent_name,'=',object2 and object2.id or False))
resids = obj.search(cr, uid, where, context=ctx)
res = []
for bo in obj.browse(cr, uid, resids, context=ctx):
if not bo:
continue
res_name = getattr(bo, self.namefield)
if not res_name:
continue
# Yes! we can't do better but skip nameless records.
# Escape the name for characters not supported in filenames
res_name = res_name.replace('/','_') # any other weird char?
if name and (res_name != ustr(name)):
# we have matched _ to any character, but we only meant to match
# the special ones.
# Eg. 'a_c' will find 'abc', 'a/c', 'a_c', may only
# return 'a/c' and 'a_c'
continue
res.append(self.res_obj_class(res_name, self.dir_id, self, self.context, self.res_model, bo))
return res
def _get_ttag(self, cr):
return 'rdir-%d' % self.dir_id
class node_res_obj(node_class):
""" A dynamically created folder.
A special sibling to node_dir, which does only contain dynamically
created folders foreach resource in the foreign model.
All folders should be of type node_res_obj and merely behave like
node_dirs (with limited domain).
"""
our_type = 'collection'
def __init__(self, path, dir_id, parent, context, res_model, res_bo, res_id=None):
super(node_res_obj,self).__init__(path, parent,context)
assert parent
#todo: more info from dirr
self.dir_id = dir_id
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = parent.create_date
# TODO: the write date should be MAX(file.write)..
self.write_date = parent.write_date
self.content_length = 0
self.uidperms = parent.uidperms & 15
self.unixperms = 040000 | _uid2unixperms(self.uidperms, True)
self.uuser = parent.uuser
self.ugroup = parent.ugroup
self.res_model = res_model
self.domain = parent.domain
self.displayname = path
self.dctx_dict = parent.dctx_dict
if isinstance(parent, node_res_dir):
self.res_find_all = parent.res_find_all
else:
self.res_find_all = False
if res_bo:
self.res_id = res_bo.id
dc2 = self.context.context.copy()
dc2.update(self.dctx)
dc2['res_model'] = res_model
dc2['res_id'] = res_bo.id
dc2['this'] = res_bo
for fld,expr in self.dctx_dict.items():
try:
self.dctx[fld] = safe_eval(expr, dc2)
except Exception,e:
print "Cannot eval %s for %s." % (expr, fld)
print e
pass
else:
self.res_id = res_id
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
if not self.res_model == other.res_model:
return False
if not self.res_id == other.res_id:
return False
if self.domain != other.domain:
return False
if self.res_find_all != other.res_find_all:
return False
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def children(self, cr, domain=None):
return self._child_get(cr, domain=domain) + self._file_get(cr)
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=domain)
if res:
return res[0]
res = self._file_get(cr, name)
if res:
return res[0]
return None
def _file_get(self, cr, nodename=False):
res = []
is_allowed = self.check_perms((nodename and 1) or 5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
#if self.domain:
# where.extend(self.domain)
# print "res_obj file_get clause", where
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
res3 = cntobj._file_get(cr, self, nodename, content, context=ctx)
if res3:
res.extend(res3)
return res
def get_dav_props_DEPR(self, cr):
# Deprecated! (but document_ics must be cleaned, first)
res = {}
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
if content.extension == '.ics': # FIXME: call the content class!
res['http://groupdav.org/'] = ('resourcetype',)
return res
def get_dav_eprop_DEPR(self, cr, ns, prop):
# Deprecated!
if ns != 'http://groupdav.org/' or prop != 'resourcetype':
_logger.warning("Who asks for %s:%s?" % (ns, prop))
return None
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr,uid,where,context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
# TODO: remove relic of GroupDAV
if content.extension == '.ics': # FIXME: call the content class!
return ('vevent-collection','http://groupdav.org/')
return None
def _child_get(self, cr, name=None, domain=None):
dirobj = self.context._dirobj
is_allowed = self.check_perms((name and 1) or 5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
directory = dirobj.browse(cr, uid, self.dir_id)
obj = dirobj.pool[self.res_model]
where = []
res = []
if name:
where.append(('name','=',name))
# Directory Structure display in tree structure
if self.res_id and directory.ressource_tree:
where1 = []
if name:
where1.append(('name','=like',name.replace('\\','\\\\')))
if obj._parent_name in obj.fields_get(cr, uid):
where1.append((obj._parent_name, '=', self.res_id))
namefield = directory.resource_field.name or 'name'
resids = obj.search(cr, uid, where1, context=ctx)
for bo in obj.browse(cr, uid, resids, context=ctx):
if not bo:
continue
res_name = getattr(bo, namefield)
if not res_name:
continue
res_name = res_name.replace('/', '_')
if name and (res_name != ustr(name)):
continue
# TODO Revise
klass = directory.get_node_class(directory, dynamic=True, context=ctx)
rnode = klass(res_name, dir_id=self.dir_id, parent=self, context=self.context,
res_model=self.res_model, res_bo=bo)
rnode.res_find_all = self.res_find_all
res.append(rnode)
where2 = where + [('parent_id','=',self.dir_id) ]
ids = dirobj.search(cr, uid, where2, context=ctx)
bo = obj.browse(cr, uid, self.res_id, context=ctx)
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
if name and (name != dirr.name):
continue
if dirr.type == 'directory':
klass = dirr.get_node_class(dirr, dynamic=True, context=ctx)
res.append(klass(dirr.name, dirr.id, self, self.context, self.res_model, res_bo = bo, res_id = self.res_id))
elif dirr.type == 'ressource':
# child resources can be controlled by properly set dctx
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name,self,self.context, dirr, {'active_id': self.res_id})) # bo?
fil_obj = dirobj.pool.get('ir.attachment')
if self.res_find_all:
where2 = where
where3 = where2 + [('res_model', '=', self.res_model), ('res_id','=',self.res_id)]
# print "where clause for dir_obj", where3
ids = fil_obj.search(cr, uid, where3, context=ctx)
if ids:
for fil in fil_obj.browse(cr, uid, ids, context=ctx):
klass = self.context.node_file_class
res.append(klass(fil.name, self, self.context, fil))
# Get Child Ressource Directories
if directory.ressource_type_id and directory.ressource_type_id.id:
where4 = where + [('ressource_parent_type_id','=',directory.ressource_type_id.id)]
where5 = where4 + ['|', ('ressource_id','=',0), ('ressource_id','=',self.res_id)]
dirids = dirobj.search(cr,uid, where5)
for dirr in dirobj.browse(cr, uid, dirids, context=ctx):
if dirr.type == 'directory' and not dirr.parent_id:
klass = dirr.get_node_class(dirr, dynamic=True, context=ctx)
rnode = klass(dirr.name, dirr.id, self, self.context, self.res_model, res_bo = bo, res_id = self.res_id)
rnode.res_find_all = dirr.resource_find_all
res.append(rnode)
if dirr.type == 'ressource':
klass = dirr.get_node_class(dirr, context=ctx)
rnode = klass(dirr.name, self, self.context, dirr, {'active_id': self.res_id})
rnode.res_find_all = dirr.resource_find_all
res.append(rnode)
return res
def create_child_collection(self, cr, objname):
dirobj = self.context._dirobj
is_allowed = self.check_perms(2)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
res_obj = dirobj.pool[self.res_model]
object2 = res_obj.browse(cr, uid, self.res_id) or False
obj = dirobj.browse(cr, uid, self.dir_id)
if obj and (obj.type == 'ressource') and not object2:
raise OSError(1, 'Operation is not permitted.')
val = {
'name': objname,
'ressource_parent_type_id': obj and obj.ressource_type_id.id or False,
'ressource_id': object2 and object2.id or False,
'parent_id' : False,
'resource_find_all': False,
}
if (obj and (obj.type in ('directory'))) or not object2:
val['parent_id'] = obj and obj.id or False
return dirobj.create(cr, uid, val)
def create_child(self, cr, path, data=None):
""" API function to create a child file object and node
Return the node_* created
"""
is_allowed = self.check_perms(2)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
fil_obj=dirobj.pool.get('ir.attachment')
val = {
'name': path,
'datas_fname': path,
'res_model': self.res_model,
'res_id': self.res_id,
# Datas are not set here
}
if not self.res_find_all:
val['parent_id'] = self.dir_id
fil_id = fil_obj.create(cr, uid, val, context=ctx)
fil = fil_obj.browse(cr, uid, fil_id, context=ctx)
klass = self.context.node_file_class
fnode = klass(path, self, self.context, fil)
if data is not None:
fnode.set_data(cr, data, fil)
return fnode
def _get_ttag(self, cr):
return 'rodir-%d-%d' % (self.dir_id, self.res_id)
node_res_dir.res_obj_class = node_res_obj
class node_file(node_class):
our_type = 'file'
def __init__(self, path, parent, context, fil):
super(node_file,self).__init__(path, parent,context)
self.file_id = fil.id
#todo: more info from ir_attachment
if fil.file_type and '/' in fil.file_type:
self.mimetype = str(fil.file_type)
self.create_date = fil.create_date
self.write_date = fil.write_date or fil.create_date
self.content_length = fil.file_size
self.displayname = fil.name
self.uidperms = 14
if parent:
if not parent.check_perms('x'):
self.uidperms = 0
elif not parent.check_perms('w'):
self.uidperms = 4
try:
self.uuser = (fil.user_id and fil.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(fil.company_id and fil.company_id.name, default='nogroup')
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
if self.dctx != other.dctx:
return False
return self.file_id == other.file_id
def open_data(self, cr, mode):
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.get_file(cr, self.context.uid, None, self, mode=mode, context=self.context.context)
def rm(self, cr):
uid = self.context.uid
if not self.check_perms(8):
raise IOError(errno.EPERM, "Permission denied.")
document_obj = self.context._dirobj.pool.get('ir.attachment')
if self.type in ('collection','database'):
return False
document = document_obj.browse(cr, uid, self.file_id, context=self.context.context)
res = False
if document and document._name == 'ir.attachment':
res = document_obj.unlink(cr, uid, [document.id])
return res
def fix_ppath(self, cr, fbro):
"""Sometimes we may init this w/o path, parent.
This function fills the missing path from the file browse object
Note: this may be an expensive operation, do on demand. However,
once caching is in, we might want to do that at init time and keep
this object anyway
"""
if self.path or self.parent:
return
assert fbro
uid = self.context.uid
dirpath = []
if fbro.parent_id:
dirobj = self.context._dirobj.pool.get('document.directory')
dirpath = dirobj.get_full_path(cr, uid, fbro.parent_id.id, context=self.context.context)
if fbro.datas_fname:
dirpath.append(fbro.datas_fname)
else:
dirpath.append(fbro.name)
if len(dirpath)>1:
self.path = dirpath
else:
self.path = dirpath[0]
def get_data(self, cr, fil_obj=None):
""" Retrieve the data for some file.
fil_obj may optionally be specified, and should be a browse object
for the file. This is useful when the caller has already initiated
the browse object. """
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.get_data(cr, self.context.uid, None, self,self.context.context, fil_obj)
def get_data_len(self, cr, fil_obj=None):
bin_size = self.context.context.get('bin_size', False)
if bin_size and not self.content_length:
self.content_length = fil_obj.db_datas
return self.content_length
def set_data(self, cr, data, fil_obj=None):
""" Store data at some file.
fil_obj may optionally be specified, and should be a browse object
for the file. This is useful when the caller has already initiated
the browse object. """
if not self.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.set_data(cr, self.context.uid, None, self, data, self.context.context, fil_obj)
def _get_ttag(self, cr):
return 'file-%d' % self.file_id
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
if ndir_node and ndir_node.context != self.context:
raise NotImplementedError("Cannot move files between contexts.")
if (not self.check_perms(8)) and ndir_node.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
doc_obj = self.context._dirobj.pool.get('ir.attachment')
if not fil_obj:
dbro = doc_obj.browse(cr, self.context.uid, self.file_id, context=self.context.context)
else:
dbro = fil_obj
assert dbro.id == self.file_id, "%s != %s for %r." % (dbro.id, self.file_id, self)
if not dbro:
raise IndexError("Cannot locate doc %d.", self.file_id)
if (not self.parent):
# there *must* be a parent node for this one
self.parent = self.context.get_dir_node(cr, dbro.parent_id)
assert self.parent
ret = {}
if ndir_node and self.parent != ndir_node:
if not (isinstance(self.parent, node_dir) and isinstance(ndir_node, node_dir)):
_logger.debug('Cannot move file %r from %r to %r.', self, self.parent, ndir_node)
raise NotImplementedError('Cannot move files between dynamic folders.')
if not ndir_obj:
ndir_obj = self.context._dirobj.browse(cr, self.context.uid, \
ndir_node.dir_id, context=self.context.context)
assert ndir_obj.id == ndir_node.dir_id
r2 = { 'parent_id': ndir_obj.id }
ret.update(r2)
if new_name and (new_name != dbro.name):
if len(ret):
raise NotImplementedError("Cannot rename and move.") # TODO
r2 = { 'name': new_name, 'datas_fname': new_name }
ret.update(r2)
del dbro
if not in_write:
# We have to update the data ourselves
if ret:
ctx = self.context.context.copy()
ctx['__from_node'] = True
doc_obj.write(cr, self.context.uid, [self.file_id,], ret, ctx )
ret = True
return ret
class node_content(node_class):
our_type = 'content'
def __init__(self, path, parent, context, cnt, dctx=None, act_id=None):
super(node_content,self).__init__(path, parent,context)
self.cnt_id = cnt.id
self.create_date = False
self.write_date = False
self.content_length = False
self.unixperms = 0640
if parent:
self.uidperms = parent.uidperms & 14
self.uuser = parent.uuser
self.ugroup = parent.ugroup
self.extension = cnt.extension
self.report_id = cnt.report_id and cnt.report_id.id
#self.mimetype = cnt.extension.
self.displayname = path
if dctx:
self.dctx.update(dctx)
self.act_id = act_id
def fill_fields(self, cr, dctx=None):
""" Try to read the object and fill missing fields, like mimetype,
dates etc.
This function must be different from the constructor, because
it uses the db cursor.
"""
cr.execute('SELECT DISTINCT mimetype FROM document_directory_content_type WHERE active AND code = %s;',
(self.extension,))
res = cr.fetchall()
if res and res[0][0]:
self.mimetype = str(res[0][0])
def get_data(self, cr, fil_obj=None):
cntobj = self.context._dirobj.pool.get('document.directory.content')
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
data = cntobj.process_read(cr, self.context.uid, self, ctx)
if data:
self.content_length = len(data)
return data
def open_data(self, cr, mode):
if mode.endswith('b'):
mode = mode[:-1]
if mode in ('r', 'w'):
cperms = mode[:1]
elif mode in ('r+', 'w+'):
cperms = 'rw'
else:
raise IOError(errno.EINVAL, "Cannot open at mode %s." % mode)
if not self.check_perms(cperms):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
return nodefd_content(self, cr, mode, ctx)
def get_data_len(self, cr, fil_obj=None):
# FIXME : here, we actually generate the content twice!!
# we should have cached the generated content, but it is
# not advisable to do keep it in memory, until we have a cache
# expiration logic.
if not self.content_length:
self.get_data(cr,fil_obj)
return self.content_length
def set_data(self, cr, data, fil_obj=None):
cntobj = self.context._dirobj.pool.get('document.directory.content')
if not self.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
return cntobj.process_write(cr, self.context.uid, self, data, ctx)
def _get_ttag(self, cr):
return 'cnt-%d%s' % (self.cnt_id,(self.act_id and ('-' + str(self.act_id))) or '')
def get_dav_resourcetype(self, cr):
return ''
class node_descriptor(object):
"""A file-like interface to the data contents of a node.
This class is NOT a node, but an /open descriptor/ for some
node. It can hold references to a cursor or a file object,
because the life of a node_descriptor will be the open period
of the data.
It should also take care of locking, with any native mechanism
or using the db.
For the implementation, it would be OK just to wrap around file,
StringIO or similar class. The node_descriptor is only needed to
provide the link to the parent /node/ object.
"""
def __init__(self, parent):
assert isinstance(parent, node_class)
self.name = parent.displayname
self.__parent = parent
def _get_parent(self):
return self.__parent
def open(self, **kwargs):
raise NotImplementedError
def close(self):
raise NotImplementedError
def read(self, size=None):
raise NotImplementedError
def seek(self, offset, whence=None):
raise NotImplementedError
def tell(self):
raise NotImplementedError
def write(self, str):
raise NotImplementedError
def size(self):
raise NotImplementedError
def __len__(self):
return self.size()
def __nonzero__(self):
""" Ensure that a node_descriptor will never equal False
Since we do define __len__ and __iter__ for us, we must avoid
being regarded as non-true objects.
"""
return True
def next(self, str):
raise NotImplementedError
class nodefd_content(StringIO, node_descriptor):
""" A descriptor to content nodes
"""
def __init__(self, parent, cr, mode, ctx):
node_descriptor.__init__(self, parent)
self._context=ctx
self._size = 0L
if mode in ('r', 'r+'):
cntobj = parent.context._dirobj.pool.get('document.directory.content')
data = cntobj.process_read(cr, parent.context.uid, parent, ctx)
if data:
self._size = len(data)
parent.content_length = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
if self.mode == 'r':
StringIO.close(self)
return
par = self._get_parent()
uid = par.context.uid
cr = openerp.registry(par.context.dbname).cursor()
try:
if self.mode in ('w', 'w+', 'r+'):
data = self.getvalue()
cntobj = par.context._dirobj.pool.get('document.directory.content')
cntobj.process_write(cr, uid, par, data, par.context.context)
elif self.mode == 'a':
raise NotImplementedError
cr.commit()
except Exception:
_logger.exception('Cannot update db content #%d for close.', par.cnt_id)
raise
finally:
cr.close()
StringIO.close(self)
class nodefd_static(StringIO, node_descriptor):
""" A descriptor to nodes with static data.
"""
def __init__(self, parent, cr, mode, ctx=None):
node_descriptor.__init__(self, parent)
self._context=ctx
self._size = 0L
if mode in ('r', 'r+'):
data = parent.get_data(cr)
if data:
self._size = len(data)
parent.content_length = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
if self.mode == 'r':
StringIO.close(self)
return
par = self._get_parent()
# uid = par.context.uid
cr = openerp.registry(par.context.dbname).cursor()
try:
if self.mode in ('w', 'w+', 'r+'):
data = self.getvalue()
par.set_data(cr, data)
elif self.mode == 'a':
raise NotImplementedError
cr.commit()
except Exception:
_logger.exception('Cannot update db content #%d for close.', par.cnt_id)
raise
finally:
cr.close()
StringIO.close(self)
class nodefd_db(StringIO, node_descriptor):
""" A descriptor to db data
"""
def __init__(self, parent, ira_browse, mode):
node_descriptor.__init__(self, parent)
self._size = 0L
if mode.endswith('b'):
mode = mode[:-1]
if mode in ('r', 'r+'):
data = ira_browse.datas
if data:
data = data.decode('base64')
self._size = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
par = self._get_parent()
# uid = par.context.uid
registry = openerp.modules.registry.RegistryManager.get(par.context.dbname)
with registry.cursor() as cr:
data = self.getvalue().encode('base64')
if self.mode in ('w', 'w+', 'r+'):
registry.get('ir.attachment').write(cr, 1, par.file_id, {'datas': data})
cr.commit()
StringIO.close(self)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sgelb/impositioner
|
refs/heads/master
|
tools/pdfSampler.py
|
1
|
#!/usr/bin/env python
# Copyright (C) sgelb 2019
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import argparse
from reportlab import pdfgen
paperformats = {
"a0": [2384, 3371],
"a1": [1685, 2384],
"a2": [1190, 1684],
"a3": [842, 1190],
"a4": [595, 842],
"a5": [420, 595],
"a6": [298, 420],
"a7": [210, 298],
"a8": [148, 210],
"b4": [729, 1032],
"b5": [516, 729],
"letter": [612, 792],
"legal": [612, 1008],
"ledger": [1224, 792],
"tabloid": [792, 1224],
"executive": [540, 720],
}
def main():
parser = argparse.ArgumentParser(
description="""
Create sample PDF file with specified number of pages and size
"""
)
parser.add_argument("pages", action="store", type=int, help="number of pages")
parser.add_argument(
"size",
action="store",
type=str.lower,
help="standard paper format like A4, letter, ",
)
parser.add_argument(
"--landscape",
"-l",
action="store_true",
help="output in landscape (default: portrait)",
)
parser.add_argument("--bbox", "-b", action="store_true", help="draw bbox")
args = parser.parse_args()
if args.size not in paperformats:
print(
"Unknown paper format: {}. Must be one of the following "
"standard formats: {}".format(
args.paperformat, ", ".join(sorted(paperformats.keys()))
)
)
pagesize = paperformats[args.size]
orientation = "portrait"
if args.landscape:
pagesize = list(reversed(pagesize))
orientation = "landscape"
outfname = "{}_{}_{}.pdf".format(args.size, orientation, str(args.pages))
canvas = pdfgen.canvas.Canvas(outfname, pagesize)
w, h = pagesize
font = canvas.getAvailableFonts()[0]
for i in range(1, args.pages + 1):
canvas.setFont(font, 50)
canvas.drawCentredString(w / 2, h / 2 + 100, orientation)
canvas.drawCentredString(w / 2, h / 2 + 50, args.size)
canvas.setFont(font, 100)
canvas.drawCentredString(w / 2, h / 2 - 50, str(i))
if args.bbox:
canvas.setLineWidth(2)
canvas.setStrokeColorRGB(255, 0, 255)
canvas.rect(5, 5, w - 10, h - 10)
canvas.showPage()
canvas.save()
print("Created", outfname)
if __name__ == "__main__":
main()
|
swarna-k/MyDiary
|
refs/heads/master
|
flask/lib/python2.7/site-packages/whoosh/qparser/dateparse.py
|
95
|
# Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import re
import sys
from datetime import datetime, timedelta
from whoosh.compat import string_type, iteritems
from whoosh.qparser import plugins, syntax
from whoosh.qparser.taggers import Tagger
from whoosh.support.relativedelta import relativedelta
from whoosh.util.text import rcompile
from whoosh.util.times import adatetime, timespan
from whoosh.util.times import fill_in, is_void, relative_days
from whoosh.util.times import TimeError
class DateParseError(Exception):
"Represents an error in parsing date text."
# Utility functions
def print_debug(level, msg, *args):
if level > 0:
print((" " * (level - 1)) + (msg % args))
# Parser element objects
class Props(object):
"""A dumb little object that just puts copies a dictionary into attibutes
so I can use dot syntax instead of square bracket string item lookup and
save a little bit of typing. Used by :class:`Regex`.
"""
def __init__(self, **args):
self.__dict__ = args
def __repr__(self):
return repr(self.__dict__)
def get(self, key, default=None):
return self.__dict__.get(key, default)
class ParserBase(object):
"""Base class for date parser elements.
"""
def to_parser(self, e):
if isinstance(e, string_type):
return Regex(e)
else:
return e
def parse(self, text, dt, pos=0, debug=-9999):
raise NotImplementedError
def date_from(self, text, dt=None, pos=0, debug=-9999):
if dt is None:
dt = datetime.now()
d, pos = self.parse(text, dt, pos, debug + 1)
return d
class MultiBase(ParserBase):
"""Base class for date parser elements such as Sequence and Bag that
have sub-elements.
"""
def __init__(self, elements, name=None):
"""
:param elements: the sub-elements to match.
:param name: a name for this element (for debugging purposes only).
"""
self.elements = [self.to_parser(e) for e in elements]
self.name = name
def __repr__(self):
return "%s<%s>%r" % (self.__class__.__name__, self.name or '',
self.elements)
class Sequence(MultiBase):
"""Merges the dates parsed by a sequence of sub-elements.
"""
def __init__(self, elements, sep="(\\s+|\\s*,\\s*)", name=None,
progressive=False):
"""
:param elements: the sequence of sub-elements to parse.
:param sep: a separator regular expression to match between elements,
or None to not have separators.
:param name: a name for this element (for debugging purposes only).
:param progressive: if True, elements after the first do not need to
match. That is, for elements (a, b, c) and progressive=True, the
sequence matches like ``a[b[c]]``.
"""
super(Sequence, self).__init__(elements, name)
self.sep_pattern = sep
if sep:
self.sep_expr = rcompile(sep, re.IGNORECASE)
else:
self.sep_expr = None
self.progressive = progressive
def parse(self, text, dt, pos=0, debug=-9999):
d = adatetime()
first = True
foundall = False
failed = False
print_debug(debug, "Seq %s sep=%r text=%r", self.name,
self.sep_pattern, text[pos:])
for e in self.elements:
print_debug(debug, "Seq %s text=%r", self.name, text[pos:])
if self.sep_expr and not first:
print_debug(debug, "Seq %s looking for sep", self.name)
m = self.sep_expr.match(text, pos)
if m:
pos = m.end()
else:
print_debug(debug, "Seq %s didn't find sep", self.name)
break
print_debug(debug, "Seq %s trying=%r at=%s", self.name, e, pos)
try:
at, newpos = e.parse(text, dt, pos=pos, debug=debug + 1)
except TimeError:
failed = True
break
print_debug(debug, "Seq %s result=%r", self.name, at)
if not at:
break
pos = newpos
print_debug(debug, "Seq %s adding=%r to=%r", self.name, at, d)
try:
d = fill_in(d, at)
except TimeError:
print_debug(debug, "Seq %s Error in fill_in", self.name)
failed = True
break
print_debug(debug, "Seq %s filled date=%r", self.name, d)
first = False
else:
foundall = True
if not failed and (foundall or (not first and self.progressive)):
print_debug(debug, "Seq %s final=%r", self.name, d)
return (d, pos)
else:
print_debug(debug, "Seq %s failed", self.name)
return (None, None)
class Combo(Sequence):
"""Parses a sequence of elements in order and combines the dates parsed
by the sub-elements somehow. The default behavior is to accept two dates
from the sub-elements and turn them into a range.
"""
def __init__(self, elements, fn=None, sep="(\\s+|\\s*,\\s*)", min=2, max=2,
name=None):
"""
:param elements: the sequence of sub-elements to parse.
:param fn: a function to run on all dates found. It should return a
datetime, adatetime, or timespan object. If this argument is None,
the default behavior accepts two dates and returns a timespan.
:param sep: a separator regular expression to match between elements,
or None to not have separators.
:param min: the minimum number of dates required from the sub-elements.
:param max: the maximum number of dates allowed from the sub-elements.
:param name: a name for this element (for debugging purposes only).
"""
super(Combo, self).__init__(elements, sep=sep, name=name)
self.fn = fn
self.min = min
self.max = max
def parse(self, text, dt, pos=0, debug=-9999):
dates = []
first = True
print_debug(debug, "Combo %s sep=%r text=%r", self.name,
self.sep_pattern, text[pos:])
for e in self.elements:
if self.sep_expr and not first:
print_debug(debug, "Combo %s looking for sep at %r",
self.name, text[pos:])
m = self.sep_expr.match(text, pos)
if m:
pos = m.end()
else:
print_debug(debug, "Combo %s didn't find sep", self.name)
return (None, None)
print_debug(debug, "Combo %s trying=%r", self.name, e)
try:
at, pos = e.parse(text, dt, pos, debug + 1)
except TimeError:
at, pos = None, None
print_debug(debug, "Combo %s result=%r", self.name, at)
if at is None:
return (None, None)
first = False
if is_void(at):
continue
if len(dates) == self.max:
print_debug(debug, "Combo %s length > %s", self.name, self.max)
return (None, None)
dates.append(at)
print_debug(debug, "Combo %s dates=%r", self.name, dates)
if len(dates) < self.min:
print_debug(debug, "Combo %s length < %s", self.name, self.min)
return (None, None)
return (self.dates_to_timespan(dates), pos)
def dates_to_timespan(self, dates):
if self.fn:
return self.fn(dates)
elif len(dates) == 2:
return timespan(dates[0], dates[1])
else:
raise DateParseError("Don't know what to do with %r" % (dates,))
class Choice(MultiBase):
"""Returns the date from the first of its sub-elements that matches.
"""
def parse(self, text, dt, pos=0, debug=-9999):
print_debug(debug, "Choice %s text=%r", self.name, text[pos:])
for e in self.elements:
print_debug(debug, "Choice %s trying=%r", self.name, e)
try:
d, newpos = e.parse(text, dt, pos, debug + 1)
except TimeError:
d, newpos = None, None
if d:
print_debug(debug, "Choice %s matched", self.name)
return (d, newpos)
print_debug(debug, "Choice %s no match", self.name)
return (None, None)
class Bag(MultiBase):
"""Parses its sub-elements in any order and merges the dates.
"""
def __init__(self, elements, sep="(\\s+|\\s*,\\s*)", onceper=True,
requireall=False, allof=None, anyof=None, name=None):
"""
:param elements: the sub-elements to parse.
:param sep: a separator regular expression to match between elements,
or None to not have separators.
:param onceper: only allow each element to match once.
:param requireall: if True, the sub-elements can match in any order,
but they must all match.
:param allof: a list of indexes into the list of elements. When this
argument is not None, this element matches only if all the
indicated sub-elements match.
:param allof: a list of indexes into the list of elements. When this
argument is not None, this element matches only if any of the
indicated sub-elements match.
:param name: a name for this element (for debugging purposes only).
"""
super(Bag, self).__init__(elements, name)
self.sep_expr = rcompile(sep, re.IGNORECASE)
self.onceper = onceper
self.requireall = requireall
self.allof = allof
self.anyof = anyof
def parse(self, text, dt, pos=0, debug=-9999):
first = True
d = adatetime()
seen = [False] * len(self.elements)
while True:
newpos = pos
print_debug(debug, "Bag %s text=%r", self.name, text[pos:])
if not first:
print_debug(debug, "Bag %s looking for sep", self.name)
m = self.sep_expr.match(text, pos)
if m:
newpos = m.end()
else:
print_debug(debug, "Bag %s didn't find sep", self.name)
break
for i, e in enumerate(self.elements):
print_debug(debug, "Bag %s trying=%r", self.name, e)
try:
at, xpos = e.parse(text, dt, newpos, debug + 1)
except TimeError:
at, xpos = None, None
print_debug(debug, "Bag %s result=%r", self.name, at)
if at:
if self.onceper and seen[i]:
return (None, None)
d = fill_in(d, at)
newpos = xpos
seen[i] = True
break
else:
break
pos = newpos
if self.onceper and all(seen):
break
first = False
if (not any(seen)
or (self.allof and not all(seen[pos] for pos in self.allof))
or (self.anyof and not any(seen[pos] for pos in self.anyof))
or (self.requireall and not all(seen))):
return (None, None)
print_debug(debug, "Bag %s final=%r", self.name, d)
return (d, pos)
class Optional(ParserBase):
"""Wraps a sub-element to indicate that the sub-element is optional.
"""
def __init__(self, element):
self.element = self.to_parser(element)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.element)
def parse(self, text, dt, pos=0, debug=-9999):
try:
d, pos = self.element.parse(text, dt, pos, debug + 1)
except TimeError:
d, pos = None, None
if d:
return (d, pos)
else:
return (adatetime(), pos)
class ToEnd(ParserBase):
"""Wraps a sub-element and requires that the end of the sub-element's match
be the end of the text.
"""
def __init__(self, element):
self.element = element
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.element)
def parse(self, text, dt, pos=0, debug=-9999):
try:
d, pos = self.element.parse(text, dt, pos, debug + 1)
except TimeError:
d, pos = None, None
if d and pos == len(text):
return (d, pos)
else:
return (None, None)
class Regex(ParserBase):
"""Matches a regular expression and maps named groups in the pattern to
datetime attributes using a function or overridden method.
There are two points at which you can customize the behavior of this class,
either by supplying functions to the initializer or overriding methods.
* The ``modify`` function or ``modify_props`` method takes a ``Props``
object containing the named groups and modifies its values (in place).
* The ``fn`` function or ``props_to_date`` method takes a ``Props`` object
and the base datetime and returns an adatetime/datetime.
"""
fn = None
modify = None
def __init__(self, pattern, fn=None, modify=None):
self.pattern = pattern
self.expr = rcompile(pattern, re.IGNORECASE)
self.fn = fn
self.modify = modify
def __repr__(self):
return "<%r>" % (self.pattern,)
def parse(self, text, dt, pos=0, debug=-9999):
m = self.expr.match(text, pos)
if not m:
return (None, None)
props = self.extract(m)
self.modify_props(props)
try:
d = self.props_to_date(props, dt)
except TimeError:
d = None
if d:
return (d, m.end())
else:
return (None, None)
def extract(self, match):
d = match.groupdict()
for key, value in iteritems(d):
try:
value = int(value)
d[key] = value
except (ValueError, TypeError):
pass
return Props(**d)
def modify_props(self, props):
if self.modify:
self.modify(props)
def props_to_date(self, props, dt):
if self.fn:
return self.fn(props, dt)
else:
args = {}
for key in adatetime.units:
args[key] = props.get(key)
return adatetime(**args)
class Month(Regex):
def __init__(self, *patterns):
self.patterns = patterns
self.exprs = [rcompile(pat, re.IGNORECASE) for pat in self.patterns]
self.pattern = ("(?P<month>"
+ "|".join("(%s)" % pat for pat in self.patterns)
+ ")")
self.expr = rcompile(self.pattern, re.IGNORECASE)
def modify_props(self, p):
text = p.month
for i, expr in enumerate(self.exprs):
m = expr.match(text)
if m:
p.month = i + 1
break
class PlusMinus(Regex):
def __init__(self, years, months, weeks, days, hours, minutes, seconds):
rel_years = "((?P<years>[0-9]+) *(%s))?" % years
rel_months = "((?P<months>[0-9]+) *(%s))?" % months
rel_weeks = "((?P<weeks>[0-9]+) *(%s))?" % weeks
rel_days = "((?P<days>[0-9]+) *(%s))?" % days
rel_hours = "((?P<hours>[0-9]+) *(%s))?" % hours
rel_mins = "((?P<mins>[0-9]+) *(%s))?" % minutes
rel_secs = "((?P<secs>[0-9]+) *(%s))?" % seconds
self.pattern = ("(?P<dir>[+-]) *%s *%s *%s *%s *%s *%s *%s(?=(\\W|$))"
% (rel_years, rel_months, rel_weeks, rel_days,
rel_hours, rel_mins, rel_secs))
self.expr = rcompile(self.pattern, re.IGNORECASE)
def props_to_date(self, p, dt):
if p.dir == "-":
dir = -1
else:
dir = 1
delta = relativedelta(years=(p.get("years") or 0) * dir,
months=(p.get("months") or 0) * dir,
weeks=(p.get("weeks") or 0) * dir,
days=(p.get("days") or 0) * dir,
hours=(p.get("hours") or 0) * dir,
minutes=(p.get("mins") or 0) * dir,
seconds=(p.get("secs") or 0) * dir)
return dt + delta
class Daynames(Regex):
def __init__(self, next, last, daynames):
self.next_pattern = next
self.last_pattern = last
self._dayname_exprs = tuple(rcompile(pat, re.IGNORECASE)
for pat in daynames)
dn_pattern = "|".join(daynames)
self.pattern = ("(?P<dir>%s|%s) +(?P<day>%s)(?=(\\W|$))"
% (next, last, dn_pattern))
self.expr = rcompile(self.pattern, re.IGNORECASE)
def props_to_date(self, p, dt):
if re.match(p.dir, self.last_pattern):
dir = -1
else:
dir = 1
for daynum, expr in enumerate(self._dayname_exprs):
m = expr.match(p.day)
if m:
break
current_daynum = dt.weekday()
days_delta = relative_days(current_daynum, daynum, dir)
d = dt.date() + timedelta(days=days_delta)
return adatetime(year=d.year, month=d.month, day=d.day)
class Time12(Regex):
def __init__(self):
self.pattern = ("(?P<hour>[1-9]|10|11|12)(:(?P<mins>[0-5][0-9])"
"(:(?P<secs>[0-5][0-9])(\\.(?P<usecs>[0-9]{1,5}))?)?)?"
"\\s*(?P<ampm>am|pm)(?=(\\W|$))")
self.expr = rcompile(self.pattern, re.IGNORECASE)
def props_to_date(self, p, dt):
isam = p.ampm.lower().startswith("a")
if p.hour == 12:
if isam:
hr = 0
else:
hr = 12
else:
hr = p.hour
if not isam:
hr += 12
return adatetime(hour=hr, minute=p.mins, second=p.secs, microsecond=p.usecs)
# Top-level parser classes
class DateParser(object):
"""Base class for locale-specific parser classes.
"""
day = Regex("(?P<day>([123][0-9])|[1-9])(?=(\\W|$))(?!=:)",
lambda p, dt: adatetime(day=p.day))
year = Regex("(?P<year>[0-9]{4})(?=(\\W|$))",
lambda p, dt: adatetime(year=p.year))
time24 = Regex("(?P<hour>([0-1][0-9])|(2[0-3])):(?P<mins>[0-5][0-9])"
"(:(?P<secs>[0-5][0-9])(\\.(?P<usecs>[0-9]{1,5}))?)?"
"(?=(\\W|$))",
lambda p, dt: adatetime(hour=p.hour, minute=p.mins,
second=p.secs, microsecond=p.usecs))
time12 = Time12()
def __init__(self):
simple_year = "(?P<year>[0-9]{4})"
simple_month = "(?P<month>[0-1][0-9])"
simple_day = "(?P<day>[0-3][0-9])"
simple_hour = "(?P<hour>([0-1][0-9])|(2[0-3]))"
simple_minute = "(?P<minute>[0-5][0-9])"
simple_second = "(?P<second>[0-5][0-9])"
simple_usec = "(?P<microsecond>[0-9]{6})"
tup = (simple_year, simple_month, simple_day, simple_hour,
simple_minute, simple_second, simple_usec)
simple_seq = Sequence(tup, sep="[- .:/]*", name="simple",
progressive=True)
self.simple = Sequence((simple_seq, "(?=(\\s|$))"), sep='')
self.setup()
def setup(self):
raise NotImplementedError
#
def get_parser(self):
return self.all
def parse(self, text, dt, pos=0, debug=-9999):
parser = self.get_parser()
d, newpos = parser.parse(text, dt, pos=pos, debug=debug)
if isinstance(d, (adatetime, timespan)):
d = d.disambiguated(dt)
return (d, newpos)
def date_from(self, text, basedate=None, pos=0, debug=-9999, toend=True):
if basedate is None:
basedate = datetime.utcnow()
parser = self.get_parser()
if toend:
parser = ToEnd(parser)
d = parser.date_from(text, basedate, pos=pos, debug=debug)
if isinstance(d, (adatetime, timespan)):
d = d.disambiguated(basedate)
return d
class English(DateParser):
day = Regex("(?P<day>([123][0-9])|[1-9])(st|nd|rd|th)?(?=(\\W|$))",
lambda p, dt: adatetime(day=p.day))
def setup(self):
self.plusdate = PlusMinus("years|year|yrs|yr|ys|y",
"months|month|mons|mon|mos|mo",
"weeks|week|wks|wk|ws|w",
"days|day|dys|dy|ds|d",
"hours|hour|hrs|hr|hs|h",
"minutes|minute|mins|min|ms|m",
"seconds|second|secs|sec|s")
self.dayname = Daynames("next", "last",
("monday|mon|mo", "tuesday|tues|tue|tu",
"wednesday|wed|we", "thursday|thur|thu|th",
"friday|fri|fr", "saturday|sat|sa",
"sunday|sun|su"))
midnight_l = lambda p, dt: adatetime(hour=0, minute=0, second=0,
microsecond=0)
midnight = Regex("midnight", midnight_l)
noon_l = lambda p, dt: adatetime(hour=12, minute=0, second=0,
microsecond=0)
noon = Regex("noon", noon_l)
now = Regex("now", lambda p, dt: dt)
self.time = Choice((self.time12, self.time24, midnight, noon, now),
name="time")
def tomorrow_to_date(p, dt):
d = dt.date() + timedelta(days=+1)
return adatetime(year=d.year, month=d.month, day=d.day)
tomorrow = Regex("tomorrow", tomorrow_to_date)
def yesterday_to_date(p, dt):
d = dt.date() + timedelta(days=-1)
return adatetime(year=d.year, month=d.month, day=d.day)
yesterday = Regex("yesterday", yesterday_to_date)
thisyear = Regex("this year", lambda p, dt: adatetime(year=dt.year))
thismonth = Regex("this month",
lambda p, dt: adatetime(year=dt.year,
month=dt.month))
today = Regex("today",
lambda p, dt: adatetime(year=dt.year, month=dt.month,
day=dt.day))
self.month = Month("january|jan", "february|febuary|feb", "march|mar",
"april|apr", "may", "june|jun", "july|jul",
"august|aug", "september|sept|sep", "october|oct",
"november|nov", "december|dec")
# If you specify a day number you must also specify a month... this
# Choice captures that constraint
self.dmy = Choice((Sequence((self.day, self.month, self.year),
name="dmy"),
Sequence((self.month, self.day, self.year),
name="mdy"),
Sequence((self.year, self.month, self.day),
name="ymd"),
Sequence((self.year, self.day, self.month),
name="ydm"),
Sequence((self.day, self.month), name="dm"),
Sequence((self.month, self.day), name="md"),
Sequence((self.month, self.year), name="my"),
self.month, self.year, self.dayname, tomorrow,
yesterday, thisyear, thismonth, today, now,
), name="date")
self.datetime = Bag((self.time, self.dmy), name="datetime")
self.bundle = Choice((self.plusdate, self.datetime, self.simple),
name="bundle")
self.torange = Combo((self.bundle, "to", self.bundle), name="torange")
self.all = Choice((self.torange, self.bundle), name="all")
# QueryParser plugin
class DateParserPlugin(plugins.Plugin):
"""Adds more powerful parsing of DATETIME fields.
>>> parser.add_plugin(DateParserPlugin())
>>> parser.parse(u"date:'last tuesday'")
"""
def __init__(self, basedate=None, dateparser=None, callback=None,
free=False, free_expr="([A-Za-z][A-Za-z_0-9]*):([^^]+)"):
"""
:param basedate: a datetime object representing the current time
against which to measure relative dates. If you do not supply this
argument, the plugin uses ``datetime.utcnow()``.
:param dateparser: an instance of
:class:`whoosh.qparser.dateparse.DateParser`. If you do not supply
this argument, the plugin automatically uses
:class:`whoosh.qparser.dateparse.English`.
:param callback: a callback function for parsing errors. This allows
you to provide feedback to the user about problems parsing dates.
:param remove: if True, unparseable dates are removed from the token
stream instead of being replaced with ErrorToken.
:param free: if True, this plugin will install a filter early in the
parsing process and try to find undelimited dates such as
``date:last tuesday``. Note that allowing this could result in
normal query words accidentally being parsed as dates sometimes.
"""
self.basedate = basedate
if dateparser is None:
dateparser = English()
self.dateparser = dateparser
self.callback = callback
self.free = free
self.freeexpr = free_expr
def taggers(self, parser):
if self.free:
# If we're tokenizing, we have to go before the FieldsPlugin
return [(DateTagger(self, self.freeexpr), -1)]
else:
return ()
def filters(self, parser):
# Run the filter after the FieldsPlugin assigns field names
return [(self.do_dates, 110)]
def errorize(self, message, node):
if self.callback:
self.callback(message)
return syntax.ErrorNode(message, node)
def text_to_dt(self, node):
text = node.text
try:
dt = self.dateparser.date_from(text, self.basedate)
if dt is None:
return self.errorize(text, node)
else:
n = DateTimeNode(node.fieldname, dt, node.boost)
except DateParseError:
e = sys.exc_info()[1]
n = self.errorize(e, node)
n.startchar = node.startchar
n.endchar = node.endchar
return n
def range_to_dt(self, node):
start = end = None
dp = self.dateparser.get_parser()
if node.start:
start = dp.date_from(node.start, self.basedate)
if start is None:
return self.errorize(node.start, node)
if node.end:
end = dp.date_from(node.end, self.basedate)
if end is None:
return self.errorize(node.end, node)
if start and end:
ts = timespan(start, end).disambiguated(self.basedate)
start, end = ts.start, ts.end
elif start:
start = start.disambiguated(self.basedate)
if isinstance(start, timespan):
start = start.start
elif end:
end = end.disambiguated(self.basedate)
if isinstance(end, timespan):
end = end.end
drn = DateRangeNode(node.fieldname, start, end, boost=node.boost)
drn.startchar = node.startchar
drn.endchar = node.endchar
return drn
def do_dates(self, parser, group):
schema = parser.schema
if not schema:
return group
from whoosh.fields import DATETIME
datefields = frozenset(fieldname for fieldname, field
in parser.schema.items()
if isinstance(field, DATETIME))
for i, node in enumerate(group):
if node.has_fieldname:
fname = node.fieldname or parser.fieldname
else:
fname = None
if isinstance(node, syntax.GroupNode):
group[i] = self.do_dates(parser, node)
elif fname in datefields:
if node.has_text:
group[i] = self.text_to_dt(node)
elif isinstance(node, syntax.RangeNode):
group[i] = self.range_to_dt(node)
return group
class DateTimeNode(syntax.SyntaxNode):
has_fieldname = True
has_boost = True
def __init__(self, fieldname, dt, boost=1.0):
self.fieldname = fieldname
self.dt = dt
self.boost = 1.0
def r(self):
return repr(self.dt)
def query(self, parser):
from whoosh import query
fieldname = self.fieldname or parser.fieldname
field = parser.schema[fieldname]
dt = self.dt
if isinstance(self.dt, datetime):
btext = field.to_bytes(dt)
return query.Term(fieldname, btext, boost=self.boost)
elif isinstance(self.dt, timespan):
return query.DateRange(fieldname, dt.start, dt.end,
boost=self.boost)
else:
raise Exception("Unknown time object: %r" % dt)
class DateRangeNode(syntax.SyntaxNode):
has_fieldname = True
has_boost = True
def __init__(self, fieldname, start, end, boost=1.0):
self.fieldname = fieldname
self.start = start
self.end = end
self.boost = 1.0
def r(self):
return "%r-%r" % (self.start, self.end)
def query(self, parser):
from whoosh import query
fieldname = self.fieldname or parser.fieldname
return query.DateRange(fieldname, self.start, self.end,
boost=self.boost)
class DateTagger(Tagger):
def __init__(self, plugin, expr):
self.plugin = plugin
self.expr = rcompile(expr, re.IGNORECASE)
def match(self, parser, text, pos):
from whoosh.fields import DATETIME
match = self.expr.match(text, pos)
if match:
fieldname = match.group(1)
dtext = match.group(2)
if parser.schema and fieldname in parser.schema:
field = parser.schema[fieldname]
if isinstance(field, DATETIME):
plugin = self.plugin
dateparser = plugin.dateparser
basedate = plugin.basedate
d, newpos = dateparser.parse(dtext, basedate)
if d:
node = DateTimeNode(fieldname, d)
node.startchar = match.start()
node.endchar = newpos + match.start(2)
return node
|
Cloudino/Cloudino-Arduino-IDE
|
refs/heads/esp8266
|
arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/poolmanager.py
|
168
|
# urllib3/poolmanager.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import connection_from_url, port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
return pool_cls(host, port, **self.connection_pool_kw)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
# If the scheme, host, or port doesn't match existing open connections,
# open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(RequestMethods):
"""
Given a ConnectionPool to a proxy, the ProxyManager's ``urlopen`` method
will make requests to any url through the defined proxy. The ProxyManager
class will automatically set the 'Host' header if it is not provided.
"""
def __init__(self, proxy_pool):
self.proxy_pool = proxy_pool
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
host = parse_url(url).host
if host:
headers_['Host'] = host
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
kw['assert_same_host'] = False
kw['headers'] = self._set_proxy_headers(url, headers=kw.get('headers'))
return self.proxy_pool.urlopen(method, url, **kw)
def proxy_from_url(url, **pool_kw):
proxy_pool = connection_from_url(url, **pool_kw)
return ProxyManager(proxy_pool)
|
Aptitudetech/ERPNext
|
refs/heads/master
|
erpnext/patches/v7_0/update_mode_of_payment_type.py
|
53
|
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
def execute():
frappe.reload_doc('accounts', 'doctype', 'mode_of_payment')
frappe.db.sql(""" update `tabMode of Payment` set type = 'Cash' where (type is null or type = '') and name = 'Cash'""")
for data in frappe.db.sql("""select name from `tabSales Invoice` where is_pos=1 and docstatus<2 and
(ifnull(paid_amount, 0) - ifnull(change_amount, 0)) > ifnull(grand_total, 0) and modified > '2016-05-01'""", as_dict=1):
if data.name:
si_doc = frappe.get_doc("Sales Invoice", data.name)
remove_payment = []
mode_of_payment = [d.mode_of_payment for d in si_doc.payments if flt(d.amount) > 0]
if mode_of_payment != set(mode_of_payment):
for payment_data in si_doc.payments:
if payment_data.idx != 1 and payment_data.amount == si_doc.grand_total:
remove_payment.append(payment_data)
frappe.db.sql(""" delete from `tabSales Invoice Payment`
where name = %(name)s""", {'name': payment_data.name})
if len(remove_payment) > 0:
for d in remove_payment:
si_doc.remove(d)
si_doc.set_paid_amount()
si_doc.db_set("paid_amount", si_doc.paid_amount, update_modified = False)
si_doc.db_set("base_paid_amount", si_doc.base_paid_amount, update_modified = False)
|
kekeadou/ycmd
|
refs/heads/master
|
ycmd/hmac_utils.py
|
27
|
#!/usr/bin/env python
#
# Copyright (C) 2015 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
import hmac
import hashlib
def CreateHmac( content, hmac_secret ):
# Must ensure that hmac_secret is str and not unicode
return hmac.new( str( hmac_secret ),
msg = content,
digestmod = hashlib.sha256 ).digest()
def CreateRequestHmac( method, path, body, hmac_secret ):
method_hmac = CreateHmac( method, hmac_secret )
path_hmac = CreateHmac( path, hmac_secret )
body_hmac = CreateHmac( body, hmac_secret )
joined_hmac_input = ''.join( ( method_hmac, path_hmac, body_hmac ) )
return CreateHmac( joined_hmac_input, hmac_secret )
# This is the compare_digest function from python 3.4, adapted for 2.7:
# http://hg.python.org/cpython/file/460407f35aa9/Lib/hmac.py#l16
def SecureStringsEqual( a, b ):
"""Returns the equivalent of 'a == b', but avoids content based short
circuiting to reduce the vulnerability to timing attacks."""
# Consistent timing matters more here than data type flexibility
if not ( isinstance( a, str ) and isinstance( b, str ) ):
raise TypeError( "inputs must be str instances" )
# We assume the length of the expected digest is public knowledge,
# thus this early return isn't leaking anything an attacker wouldn't
# already know
if len( a ) != len( b ):
return False
# We assume that integers in the bytes range are all cached,
# thus timing shouldn't vary much due to integer object creation
result = 0
for x, y in zip( a, b ):
result |= ord( x ) ^ ord( y )
return result == 0
|
sahilshekhawat/sympy
|
refs/heads/master
|
sympy/combinatorics/tests/test_testutil.py
|
129
|
from sympy.combinatorics.named_groups import SymmetricGroup, AlternatingGroup,\
CyclicGroup
from sympy.combinatorics.testutil import _verify_bsgs, _cmp_perm_lists,\
_naive_list_centralizer, _verify_centralizer,\
_verify_normal_closure
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.perm_groups import PermutationGroup
from random import shuffle
def test_cmp_perm_lists():
S = SymmetricGroup(4)
els = list(S.generate_dimino())
other = els[:]
shuffle(other)
assert _cmp_perm_lists(els, other) is True
def test_naive_list_centralizer():
# verified by GAP
S = SymmetricGroup(3)
A = AlternatingGroup(3)
assert _naive_list_centralizer(S, S) == [Permutation([0, 1, 2])]
assert PermutationGroup(_naive_list_centralizer(S, A)).is_subgroup(A)
def test_verify_bsgs():
S = SymmetricGroup(5)
S.schreier_sims()
base = S.base
strong_gens = S.strong_gens
assert _verify_bsgs(S, base, strong_gens) is True
assert _verify_bsgs(S, base[:-1], strong_gens) is False
assert _verify_bsgs(S, base, S.generators) is False
def test_verify_centralizer():
# verified by GAP
S = SymmetricGroup(3)
A = AlternatingGroup(3)
triv = PermutationGroup([Permutation([0, 1, 2])])
assert _verify_centralizer(S, S, centr=triv)
assert _verify_centralizer(S, A, centr=A)
def test_verify_normal_closure():
# verified by GAP
S = SymmetricGroup(3)
A = AlternatingGroup(3)
assert _verify_normal_closure(S, A, closure=A)
S = SymmetricGroup(5)
A = AlternatingGroup(5)
C = CyclicGroup(5)
assert _verify_normal_closure(S, A, closure=A)
assert _verify_normal_closure(S, C, closure=A)
|
raphaelmerx/django
|
refs/heads/master
|
django/contrib/contenttypes/forms.py
|
376
|
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.forms import ModelForm, modelformset_factory
from django.forms.models import BaseModelFormSet
class BaseGenericInlineFormSet(BaseModelFormSet):
"""
A formset for generic inline objects to a parent.
"""
def __init__(self, data=None, files=None, instance=None, save_as_new=None,
prefix=None, queryset=None, **kwargs):
opts = self.model._meta
self.instance = instance
self.rel_name = '-'.join((
opts.app_label, opts.model_name,
self.ct_field.name, self.ct_fk_field.name,
))
if self.instance is None or self.instance.pk is None:
qs = self.model._default_manager.none()
else:
if queryset is None:
queryset = self.model._default_manager
qs = queryset.filter(**{
self.ct_field.name: ContentType.objects.get_for_model(
self.instance, for_concrete_model=self.for_concrete_model),
self.ct_fk_field.name: self.instance.pk,
})
super(BaseGenericInlineFormSet, self).__init__(
queryset=qs, data=data, files=files,
prefix=prefix,
**kwargs
)
@classmethod
def get_default_prefix(cls):
opts = cls.model._meta
return '-'.join(
(opts.app_label, opts.model_name,
cls.ct_field.name, cls.ct_fk_field.name)
)
def save_new(self, form, commit=True):
setattr(form.instance, self.ct_field.get_attname(),
ContentType.objects.get_for_model(self.instance).pk)
setattr(form.instance, self.ct_fk_field.get_attname(),
self.instance.pk)
return form.save(commit=commit)
def generic_inlineformset_factory(model, form=ModelForm,
formset=BaseGenericInlineFormSet,
ct_field="content_type", fk_field="object_id",
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None, formfield_callback=None,
validate_max=False, for_concrete_model=True,
min_num=None, validate_min=False):
"""
Returns a ``GenericInlineFormSet`` for the given kwargs.
You must provide ``ct_field`` and ``fk_field`` if they are different from
the defaults ``content_type`` and ``object_id`` respectively.
"""
opts = model._meta
# if there is no field called `ct_field` let the exception propagate
ct_field = opts.get_field(ct_field)
if not isinstance(ct_field, models.ForeignKey) or ct_field.remote_field.model != ContentType:
raise Exception("fk_name '%s' is not a ForeignKey to ContentType" % ct_field)
fk_field = opts.get_field(fk_field) # let the exception propagate
if exclude is not None:
exclude = list(exclude)
exclude.extend([ct_field.name, fk_field.name])
else:
exclude = [ct_field.name, fk_field.name]
FormSet = modelformset_factory(model, form=form,
formfield_callback=formfield_callback,
formset=formset,
extra=extra, can_delete=can_delete, can_order=can_order,
fields=fields, exclude=exclude, max_num=max_num,
validate_max=validate_max, min_num=min_num,
validate_min=validate_min)
FormSet.ct_field = ct_field
FormSet.ct_fk_field = fk_field
FormSet.for_concrete_model = for_concrete_model
return FormSet
|
huanpc/IoT-1
|
refs/heads/master
|
gui/controller/.venv/lib/python3.5/site-packages/django/db/backends/mysql/schema.py
|
173
|
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY "
"(%(column)s) REFERENCES %(to_table)s (%(to_column)s)"
)
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
def quote_value(self, value):
# Inner import to allow module to fail to load gracefully
import MySQLdb.converters
return MySQLdb.escape(value, MySQLdb.converters.conversions)
def skip_default(self, field):
"""
MySQL doesn't accept default values for TEXT and BLOB types, and
implicitly treats these columns as nullable.
"""
db_type = field.db_type(self.connection)
return (
db_type is not None and
db_type.lower() in {
'tinyblob', 'blob', 'mediumblob', 'longblob',
'tinytext', 'text', 'mediumtext', 'longtext',
}
)
def add_field(self, model, field):
super(DatabaseSchemaEditor, self).add_field(model, field)
# Simulate the effect of a one-off default.
# field.default may be unhashable, so a set isn't used for "in" check.
if self.skip_default(field) and field.default not in (None, NOT_PROVIDED):
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
def _model_indexes_sql(self, model):
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
if storage == "InnoDB":
for field in model._meta.local_fields:
if field.db_index and not field.unique and field.get_internal_type() == "ForeignKey":
# Temporary setting db_index to False (in memory) to disable
# index creation for FKs (index automatically created by MySQL)
field.db_index = False
return super(DatabaseSchemaEditor, self)._model_indexes_sql(model)
def _delete_composed_index(self, model, fields, *args):
"""
MySQL can remove an implicit FK index on a field when that field is
covered by another index like a unique_together. "covered" here means
that the more complex index starts like the simpler one.
http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757
We check here before removing the [unique|index]_together if we have to
recreate a FK index.
"""
first_field = model._meta.get_field(fields[0])
if first_field.get_internal_type() == 'ForeignKey':
constraint_names = self._constraint_names(model, [first_field.column], index=True)
if not constraint_names:
self.execute(self._create_index_sql(model, [first_field], suffix=""))
return super(DatabaseSchemaEditor, self)._delete_composed_index(model, fields, *args)
def _set_field_new_type_null_status(self, field, new_type):
"""
Keep the null property of the old field. If it has changed, it will be
handled separately.
"""
if field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return new_type
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(table, old_field, new_field, new_type)
def _rename_field_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._rename_field_sql(table, old_field, new_field, new_type)
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/inspections/ChainedComparison2_after.py
|
83
|
if e < a <= b < c:
print "q"
|
hieukypc/ERP
|
refs/heads/master
|
openerp/addons/sale_mrp/tests/test_move_explode.py
|
23
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.tests import common
class TestMoveExplode(common.TransactionCase):
def setUp(self):
super(TestMoveExplode, self).setUp()
# Usefull models
self.SaleOrderLine = self.env['sale.order.line']
self.SaleOrder = self.env['sale.order']
self.MrpBom = self.env['mrp.bom']
self.Product = self.env['product.product']
#product that has a phantom bom
self.product_bom = self.env.ref('product.product_product_3')
#bom with that product
self.bom = self.env.ref('mrp.mrp_bom_9')
#partner agrolait
self.partner = self.env.ref('base.res_partner_1')
#bom: PC Assemble (with property: DDR 512MB)
self.bom_prop = self.env.ref('mrp.mrp_bom_property_0')
self.template = self.env.ref('product.product_product_3_product_template')
#property: DDR 512MB
self.mrp_property = self.env.ref('mrp.mrp_property_0')
#product: RAM SR2
self.product_bom_prop = self.env.ref('product.product_product_14')
#phantom bom for RAM SR2 with three lines containing properties
self.bom_prop_line = self.env.ref('mrp.mrp_bom_property_line')
#product: iPod included in the phantom bom
self.product_A = self.env.ref('product.product_product_11')
#product: Mouse, Wireless included in the phantom bom
self.product_B = self.env.ref('product.product_product_12')
#pricelist
self.pricelist = self.env.ref('product.list0')
def test_00_sale_move_explode(self):
"""check that when creating a sale order with a product that has a phantom BoM, move explode into content of the
BoM"""
#create sale order with one sale order line containing product with a phantom bom
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'pricelist_id': self.pricelist.id,
}
self.so = self.SaleOrder.create(so_vals)
sol_vals = {
'order_id': self.so.id,
'name': self.product_bom.name,
'product_id': self.product_bom.id,
'product_uom': self.product_bom.uom_id.id,
'product_uom_qty': 1.0,
}
self.SaleOrderLine.create(sol_vals)
#confirm sale order
self.so.action_confirm()
#get all move associated to that sale_order
move_ids = self.so.picking_ids.mapped('move_lines').ids
#we should have same amount of move as the component in the phatom bom
bom_component_length = self.MrpBom._bom_explode(self.bom, self.product_bom, 1.0, [])
self.assertEqual(len(move_ids), len(bom_component_length[0]))
def test_00_bom_find(self):
"""Check that _bom_find searches the bom corresponding to the properties passed or takes the bom with the smallest
sequence."""
res_id = self.MrpBom._bom_find(product_tmpl_id=self.template.id, product_id=None, properties=[self.mrp_property.id])
self.assertEqual(res_id, self.bom_prop.id)
def test_00_bom_explode(self):
"""Check that _bom_explode only takes the lines with the right properties."""
res = self.MrpBom._bom_explode(self.bom_prop_line, self.product_bom_prop, 1, properties=[self.mrp_property.id])
res = set([p['product_id'] for p in res[0]])
self.assertEqual(res, set([self.product_A.id, self.product_B.id]))
|
frdb194/ubuntu-tweak
|
refs/heads/master
|
ubuntutweak/settings/ccm/__init__.py
|
5
|
from Conflicts import *
from Constants import *
from Utils import *
|
Garrett-R/elasticsearch-py
|
refs/heads/master
|
test_elasticsearch/test_server/__init__.py
|
13
|
from elasticsearch.helpers.test import get_test_client, ElasticsearchTestCase as BaseTestCase
client = None
def get_client():
global client
if client is not None:
return client
# try and locate manual override in the local environment
try:
from test_elasticsearch.local import get_client as local_get_client
client = local_get_client()
except ImportError:
# fallback to using vanilla client
client = get_test_client()
return client
def setup():
get_client()
class ElasticsearchTestCase(BaseTestCase):
@staticmethod
def _get_client():
return get_client()
|
scollis/iris
|
refs/heads/master
|
lib/iris/tests/test_pp_module.py
|
1
|
# (C) British Crown Copyright 2013 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
from copy import deepcopy
import os
from types import GeneratorType
import unittest
import biggus
import netcdftime
import iris.fileformats
import iris.fileformats.pp as pp
import iris.util
@tests.skip_data
class TestPPCopy(tests.IrisTest):
def setUp(self):
self.filename = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
def test_copy_field_deferred(self):
field = pp.load(self.filename).next()
clone = field.copy()
self.assertIsInstance(clone._data, biggus.Array)
self.assertEqual(field, clone)
clone.lbyr = 666
self.assertNotEqual(field, clone)
def test_deepcopy_field_deferred(self):
field = pp.load(self.filename).next()
clone = deepcopy(field)
self.assertIsInstance(clone._data, biggus.Array)
self.assertEqual(field, clone)
clone.lbyr = 666
self.assertNotEqual(field, clone)
def test_copy_field_non_deferred(self):
field = pp.load(self.filename, True).next()
clone = field.copy()
self.assertEqual(field, clone)
clone.data[0][0] = 666
self.assertNotEqual(field, clone)
def test_deepcopy_field_non_deferred(self):
field = pp.load(self.filename, True).next()
clone = deepcopy(field)
self.assertEqual(field, clone)
clone.data[0][0] = 666
self.assertNotEqual(field, clone)
class IrisPPTest(tests.IrisTest):
def check_pp(self, pp_fields, reference_filename):
"""
Checks the given iterable of PPField objects matches the reference file, or creates the
reference file if it doesn't exist.
"""
# turn the generator into a list
pp_fields = list(pp_fields)
# Load deferred data for all of the fields (but don't do anything with it)
for pp_field in pp_fields:
pp_field.data
test_string = str(pp_fields)
reference_path = tests.get_result_path(reference_filename)
if os.path.isfile(reference_path):
reference = ''.join(open(reference_path, 'r').readlines())
self._assert_str_same(reference+'\n', test_string+'\n', reference_filename, type_comparison_name='PP files')
else:
tests.logger.warning('Creating result file: %s', reference_path)
open(reference_path, 'w').writelines(test_string)
class TestPPHeaderDerived(unittest.TestCase):
def setUp(self):
self.pp = pp.PPField2()
self.pp.lbuser = (0, 1, 2, 3, 4, 5, 6)
self.pp.lbtim = 11
self.pp.lbproc = 65539
def test_standard_access(self):
self.assertEqual(self.pp.lbtim, 11)
def test_lbtim_access(self):
self.assertEqual(self.pp.lbtim[0], 1)
self.assertEqual(self.pp.lbtim.ic, 1)
def test_lbtim_setter(self):
self.pp.lbtim[4] = 4
self.pp.lbtim[0] = 4
self.assertEqual(self.pp.lbtim[0], 4)
self.assertEqual(self.pp.lbtim.ic, 4)
self.pp.lbtim.ib = 9
self.assertEqual(self.pp.lbtim.ib, 9)
self.assertEqual(self.pp.lbtim[1], 9)
def test_lbproc_access(self):
# lbproc == 65539
self.assertEqual(self.pp.lbproc[0], 9)
self.assertEqual(self.pp.lbproc[19], 0)
self.assertEqual(self.pp.lbproc.flag1, 1)
self.assertEqual(self.pp.lbproc.flag65536, 1)
self.assertEqual(self.pp.lbproc.flag131072, 0)
def test_set_lbuser(self):
self.pp.stash = 'm02s12i003'
self.assertEqual(self.pp.stash, pp.STASH(2, 12, 3))
self.pp.lbuser[6] = 5
self.assertEqual(self.pp.stash, pp.STASH(5, 12, 3))
self.pp.lbuser[3] = 4321
self.assertEqual(self.pp.stash, pp.STASH(5, 4, 321))
def test_set_stash(self):
self.pp.stash = 'm02s12i003'
self.assertEqual(self.pp.stash, pp.STASH(2, 12, 3))
self.pp.stash = pp.STASH(3, 13, 4)
self.assertEqual(self.pp.stash, pp.STASH(3, 13, 4))
self.assertEqual(self.pp.lbuser[3], self.pp.stash.lbuser3())
self.assertEqual(self.pp.lbuser[6], self.pp.stash.lbuser6())
with self.assertRaises(ValueError):
self.pp.stash = (4, 15, 5)
def test_lbproc_bad_access(self):
try:
print self.pp.lbproc.flag65537
except AttributeError:
pass
except Exception, err:
self.fail("Should return a better error: " + str(err))
@tests.skip_data
class TestPPField_GlobalTemperature(IrisPPTest):
def setUp(self):
self.original_pp_filepath = tests.get_data_path(('PP', 'aPPglob1', 'global.pp'))
self.r = list(pp.load(self.original_pp_filepath))
def test_full_file(self):
self.check_pp(self.r[0:10], ('PP', 'global_test.pp.txt'))
def test_lbtim_access(self):
self.assertEqual(self.r[0].lbtim[0], 2)
self.assertEqual(self.r[0].lbtim.ic, 2)
def test_lbproc_access(self):
self.assertEqual(self.r[0].lbproc[0], 8)
self.assertEqual(self.r[0].lbproc[19], 0)
self.assertEqual(self.r[0].lbproc.flag1, 0)
self.assertEqual(self.r[0].lbproc.flag65536, 0)
self.assertEqual(self.r[0].lbproc.flag131072, 0)
def test_t1_t2_access(self):
self.assertEqual(self.r[0].t1.timetuple(), netcdftime.datetime(1994, 12, 1, 0, 0).timetuple())
def test_save_single(self):
temp_filename = iris.util.create_temp_filename(".pp")
self.r[0].save(open(temp_filename, 'wb'))
self.assertEqual(self.file_checksum(temp_filename), self.file_checksum(self.original_pp_filepath))
os.remove(temp_filename)
def test_save_api(self):
filepath = self.original_pp_filepath
f = pp.load(filepath).next()
temp_filename = iris.util.create_temp_filename(".pp")
f.save(open(temp_filename, 'wb'))
self.assertEqual(self.file_checksum(temp_filename), self.file_checksum(filepath))
os.remove(temp_filename)
@tests.skip_data
class TestPackedPP(IrisPPTest):
def test_wgdos(self):
r = pp.load(tests.get_data_path(('PP', 'wgdos_packed', 'nae.20100104-06_0001.pp')))
# Check that the result is a generator and convert to a list so that we can index and get the first one
self.assertEqual( type(r), GeneratorType)
r = list(r)
self.check_pp(r, ('PP', 'nae_unpacked.pp.txt'))
# check that trying to save this field again raises an error (we cannot currently write WGDOS packed fields)
temp_filename = iris.util.create_temp_filename(".pp")
self.assertRaises(NotImplementedError, r[0].save, open(temp_filename, 'wb'))
os.remove(temp_filename)
def test_rle(self):
r = pp.load(tests.get_data_path(('PP', 'ocean_rle', 'ocean_rle.pp')))
# Check that the result is a generator and convert to a list so that we can index and get the first one
self.assertEqual( type(r), GeneratorType)
r = list(r)
self.check_pp(r, ('PP', 'rle_unpacked.pp.txt'))
# check that trying to save this field again raises an error
# (we cannot currently write RLE packed fields)
with self.temp_filename('.pp') as temp_filename:
with self.assertRaises(NotImplementedError):
r[0].save(open(temp_filename, 'wb'))
@tests.skip_data
class TestPPFile(IrisPPTest):
def test_lots_of_extra_data(self):
r = pp.load(tests.get_data_path(('PP', 'cf_processing', 'HadCM2_ts_SAT_ann_18602100.b.pp')))
r = list(r)
self.assertEqual(r[0].lbcode.ix, 13)
self.assertEqual(r[0].lbcode.iy, 23)
self.assertEqual(len(r[0].lbcode), 5)
self.check_pp(r, ('PP', 'extra_data_time_series.pp.txt'))
@tests.skip_data
class TestPPFileExtraXData(IrisPPTest):
def setUp(self):
self.original_pp_filepath = tests.get_data_path(('PP', 'ukV1', 'ukVpmslont.pp'))
self.r = list(pp.load(self.original_pp_filepath))[0:5]
def test_full_file(self):
self.check_pp(self.r, ('PP', 'extra_x_data.pp.txt'))
def test_save_single(self):
filepath = tests.get_data_path(('PP', 'ukV1', 'ukVpmslont_first_field.pp'))
f = pp.load(filepath).next()
temp_filename = iris.util.create_temp_filename(".pp")
f.save(open(temp_filename, 'wb'))
s = pp.load(temp_filename).next()
# force the data to be loaded (this was done for f when save was run)
s.data
self._assert_str_same(str(s)+'\n', str(f)+'\n', '', type_comparison_name='PP files')
self.assertEqual(self.file_checksum(temp_filename), self.file_checksum(filepath))
os.remove(temp_filename)
@tests.skip_data
class TestPPFileWithExtraCharacterData(IrisPPTest):
def setUp(self):
self.original_pp_filepath = tests.get_data_path(('PP', 'model_comp', 'dec_subset.pp'))
self.r = pp.load(self.original_pp_filepath)
self.r_loaded_data = pp.load(self.original_pp_filepath, read_data=True)
# Check that the result is a generator and convert to a list so that we can index and get the first one
self.assertEqual( type(self.r), GeneratorType)
self.r = list(self.r)
self.assertEqual( type(self.r_loaded_data), GeneratorType)
self.r_loaded_data = list(self.r_loaded_data)
def test_extra_field_title(self):
self.assertEqual(self.r[0].field_title, 'AJHQA Time mean !C Atmos u compnt of wind after timestep at 9.998 metres !C 01/12/2007 00:00 -> 01/01/2008 00:00')
def test_full_file(self):
self.check_pp(self.r[0:10], ('PP', 'extra_char_data.pp.txt'))
self.check_pp(self.r_loaded_data[0:10], ('PP', 'extra_char_data.w_data_loaded.pp.txt'))
def test_save_single(self):
filepath = tests.get_data_path(('PP', 'model_comp', 'dec_first_field.pp'))
f = pp.load(filepath).next()
temp_filename = iris.util.create_temp_filename(".pp")
f.save(open(temp_filename, 'wb'))
s = pp.load(temp_filename).next()
# force the data to be loaded (this was done for f when save was run)
s.data
self._assert_str_same(str(s)+'\n', str(f)+'\n', '', type_comparison_name='PP files')
self.assertEqual(self.file_checksum(temp_filename), self.file_checksum(filepath))
os.remove(temp_filename)
class TestBitwiseInt(unittest.TestCase):
def test_3(self):
t = pp.BitwiseInt(3)
self.assertEqual(t[0], 3)
self.assertTrue(t.flag1)
self.assertTrue(t.flag2)
self.assertRaises(AttributeError, getattr, t, "flag1024")
def test_setting_flags(self):
t = pp.BitwiseInt(3)
self.assertEqual(t._value, 3)
t.flag1 = False
self.assertEqual(t._value, 2)
t.flag2 = False
self.assertEqual(t._value, 0)
t.flag1 = True
self.assertEqual(t._value, 1)
t.flag2 = True
self.assertEqual(t._value, 3)
self.assertRaises(AttributeError, setattr, t, "flag1024", True)
self.assertRaises(TypeError, setattr, t, "flag2", 1)
t = pp.BitwiseInt(3, num_bits=11)
t.flag1024 = True
self.assertEqual(t._value, 1027)
def test_standard_operators(self):
t = pp.BitwiseInt(323)
self.assertTrue(t == 323)
self.assertFalse(t == 324)
self.assertFalse(t != 323)
self.assertTrue(t != 324)
self.assertTrue(t >= 323)
self.assertFalse(t >= 324)
self.assertFalse(t > 323)
self.assertTrue(t > 322)
self.assertTrue(t <= 323)
self.assertFalse(t <= 322)
self.assertFalse(t < 323)
self.assertTrue(t < 324)
self.assertTrue(t in [323])
self.assertFalse(t in [324])
def test_323(self):
t = pp.BitwiseInt(323)
self.assertRaises(AttributeError, getattr, t, 'flag0')
self.assertEqual(t.flag1, 1)
self.assertEqual(t.flag2, 1)
self.assertEqual(t.flag4, 0)
self.assertEqual(t.flag8, 0)
self.assertEqual(t.flag16, 0)
self.assertEqual(t.flag32, 0)
self.assertEqual(t.flag64, 1)
self.assertEqual(t.flag128, 0)
self.assertEqual(t.flag256, 1)
def test_33214(self):
t = pp.BitwiseInt(33214)
self.assertEqual(t[0], 4)
self.assertEqual(t.flag1, 0)
self.assertEqual(t.flag2, 1)
def test_negative_number(self):
try:
_ = pp.BitwiseInt(-5)
except ValueError, err:
self.assertEqual(str(err), 'Negative numbers not supported with splittable integers object')
def test_128(self):
t = pp.BitwiseInt(128)
self.assertEqual(t.flag1, 0)
self.assertEqual(t.flag2, 0)
self.assertEqual(t.flag4, 0)
self.assertEqual(t.flag8, 0)
self.assertEqual(t.flag16, 0)
self.assertEqual(t.flag32, 0)
self.assertEqual(t.flag64, 0)
self.assertEqual(t.flag128, 1)
class TestSplittableInt(unittest.TestCase):
def test_3(self):
t = pp.SplittableInt(3)
self.assertEqual(t[0], 3)
def test_grow_str_list(self):
t = pp.SplittableInt(3)
t[1] = 3
self.assertEqual(t[1], 3)
t[5] = 4
self.assertEqual(t[5], 4)
self.assertEqual( int(t), 400033)
self.assertEqual(t, 400033)
self.assertNotEqual(t, 33)
self.assertTrue(t >= 400033)
self.assertFalse(t >= 400034)
self.assertTrue(t <= 400033)
self.assertFalse(t <= 400032)
self.assertTrue(t > 400032)
self.assertFalse(t > 400034)
self.assertTrue(t < 400034)
self.assertFalse(t < 400032)
def test_name_mapping(self):
t = pp.SplittableInt(33214, {'ones':0, 'tens':1, 'hundreds':2})
self.assertEqual(t.ones, 4)
self.assertEqual(t.tens, 1)
self.assertEqual(t.hundreds, 2)
t.ones = 9
t.tens = 4
t.hundreds = 0
self.assertEqual(t.ones, 9)
self.assertEqual(t.tens, 4)
self.assertEqual(t.hundreds, 0)
def test_name_mapping_multi_index(self):
t = pp.SplittableInt(33214, {'weird_number':slice(None, None, 2),
'last_few':slice(-2, -5, -2),
'backwards':slice(None, None, -1)})
self.assertEqual(t.weird_number, 324)
self.assertEqual(t.last_few, 13)
self.assertRaises(ValueError, setattr, t, 'backwards', 1)
self.assertRaises(ValueError, setattr, t, 'last_few', 1)
self.assertEqual(t.backwards, 41233)
self.assertEqual(t, 33214)
t.weird_number = 99
# notice that this will zero the 5th number
self.assertEqual(t, 3919)
t.weird_number = 7899
self.assertEqual(t, 7083919)
t.foo = 1
t = pp.SplittableInt(33214, {'ix':slice(None, 2), 'iy':slice(2, 4)})
self.assertEqual(t.ix, 14)
self.assertEqual(t.iy, 32)
t.ix = 21
self.assertEqual(t, 33221)
t = pp.SplittableInt(33214, {'ix':slice(-1, 2)})
self.assertEqual(t.ix, 0)
t = pp.SplittableInt(4, {'ix':slice(None, 2), 'iy':slice(2, 4)})
self.assertEqual(t.ix, 4)
self.assertEqual(t.iy, 0)
def test_33214(self):
t = pp.SplittableInt(33214)
self.assertEqual(t[4], 3)
self.assertEqual(t[3], 3)
self.assertEqual(t[2], 2)
self.assertEqual(t[1], 1)
self.assertEqual(t[0], 4)
# The rest should be zero
for i in range(5, 100):
self.assertEqual(t[i], 0)
def test_negative_number(self):
self.assertRaises(ValueError, pp.SplittableInt, -5)
try:
_ = pp.SplittableInt(-5)
except ValueError, err:
self.assertEqual(str(err), 'Negative numbers not supported with splittable integers object')
class TestSplittableIntEquality(unittest.TestCase):
def test_not_implemented(self):
class Terry(object): pass
sin = pp.SplittableInt(0)
self.assertIs(sin.__eq__(Terry()), NotImplemented)
self.assertIs(sin.__ne__(Terry()), NotImplemented)
class TestPPDataProxyEquality(unittest.TestCase):
def test_not_implemented(self):
class Terry(object): pass
pox = pp.PPDataProxy("john", "michael", "eric", "graham", "brian",
"spam", "beans", "eggs")
self.assertIs(pox.__eq__(Terry()), NotImplemented)
self.assertIs(pox.__ne__(Terry()), NotImplemented)
class TestPPFieldEquality(unittest.TestCase):
def test_not_implemented(self):
class Terry(object): pass
pox = pp.PPField3()
self.assertIs(pox.__eq__(Terry()), NotImplemented)
self.assertIs(pox.__ne__(Terry()), NotImplemented)
if __name__ == "__main__":
tests.main()
|
saadatqadri/django-oscar
|
refs/heads/master
|
tests/unit/dashboard/voucher_form_tests.py
|
53
|
from django import test
from oscar.apps.dashboard.vouchers import forms
class TestVoucherForm(test.TestCase):
def test_doesnt_crash_on_empty_date_fields(self):
"""
There was a bug fixed in 02b3644 where the voucher form would raise an
exception (instead of just failing validation) when being called with
empty fields. This tests exists to prevent a regression.
"""
data = {
'code': '',
'name': '',
'start_date': '',
'end_date': '',
'benefit_range': '',
'benefit_type': 'Percentage',
'usage': 'Single use',
}
form = forms.VoucherForm(data=data)
try:
form.is_valid()
except Exception as e:
import traceback
self.fail(
"Exception raised while validating voucher form: %s\n\n%s" % (
e.message, traceback.format_exc()))
|
alfayez/gnuradio
|
refs/heads/master
|
gr-howto-write-a-block/docs/doxygen/doxyxml/generated/index.py
|
344
|
#!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from xml.dom import minidom
import os
import sys
import compound
import indexsuper as supermod
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compound=None):
supermod.DoxygenType.__init__(self, version, compound)
def find_compounds_and_members(self, details):
"""
Returns a list of all compounds and their members which match details
"""
results = []
for compound in self.compound:
members = compound.find_members(details)
if members:
results.append([compound, members])
else:
if details.match(compound):
results.append([compound, []])
return results
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class CompoundTypeSub(supermod.CompoundType):
def __init__(self, kind=None, refid=None, name='', member=None):
supermod.CompoundType.__init__(self, kind, refid, name, member)
def find_members(self, details):
"""
Returns a list of all members which match details
"""
results = []
for member in self.member:
if details.match(member):
results.append(member)
return results
supermod.CompoundType.subclass = CompoundTypeSub
# end class CompoundTypeSub
class MemberTypeSub(supermod.MemberType):
def __init__(self, kind=None, refid=None, name=''):
supermod.MemberType.__init__(self, kind, refid, name)
supermod.MemberType.subclass = MemberTypeSub
# end class MemberTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
|
jparyani/Mailpile
|
refs/heads/sandstorm
|
mailpile/plugins/keylookup/nicknym.py
|
2
|
#coding:utf-8
from mailpile.commands import Command
from mailpile.conn_brokers import Master as ConnBroker
from mailpile.plugins import PluginManager
from mailpile.plugins.search import Search
from mailpile.mailutils import Email
# from mailpile.crypto.state import *
from mailpile.crypto.gpgi import GnuPG
import httplib
import re
import socket
import sys
import urllib
import urllib2
import ssl
import json
# TODO:
# * SSL certificate validation
# * Check nicknym server for a given host
# * Store provider keys on first discovery
# * Verify provider key signature
class Nicknym:
def __init__(self, config):
self.config = config
def get_key(self, address, keytype="openpgp", server=None):
"""
Request a key for address.
"""
result, signature = self._nickserver_get_key(address, keytype, server)
if self._verify_result(result, signature):
return self._import_key(result, keytype)
return False
def refresh_keys(self):
"""
Refresh all known keys.
"""
for addr, keytype in self._get_managed_keys():
result, signature = self._nickserver_get_key(addr, keytype)
# TODO: Check whether it needs refreshing and is valid
if self._verify_result(result, signature):
self._import_key(result, keytype)
def send_key(self, address, public_key, type):
"""
Send a new key to the nickserver
"""
# TODO: Unimplemented. There is currently no authentication mechanism
# defined in Nicknym standard
raise NotImplementedError()
def _parse_result(self, result):
"""Parse the result into a JSON blob and a signature"""
# TODO: No signature implemented on server side yet.
# See https://leap.se/code/issues/5340
return json.loads(result), ""
def _nickserver_get_key(self, address, keytype="openpgp", server=None):
if server == None: server = self._discover_server(address)
data = urllib.urlencode({"address": address})
with ConnBroker.context(need=[ConnBroker.OUTGOING_HTTP]):
r = urllib2.urlopen(server, data)
result = r.read()
result, signature = self._parse_result(result)
return result, signature
def _import_key(self, result, keytype):
if keytype == "openpgp":
g = GnuPG(self.config)
res = g.import_keys(result[keytype])
if len(res["updated"]):
self._managed_keys_add(result["address"], keytype)
return res
else:
# We currently only support OpenPGP keys
return False
def _get_providerkey(self, domain):
"""
Request a provider key for the appropriate domain.
This is equivalent to get_key() with address=domain,
except it should store the provider key in an
appropriate key store
"""
pass
def _verify_providerkey(self, domain):
"""
...
"""
pass
def _verify_result(self, result, signature):
"""
Verify that the JSON result blob is correctly signed,
and that the signature is from the correct provider key.
"""
# No signature. See https://leap.se/code/issues/5340
return True
def _discover_server(self, address):
"""
Automatically detect which nicknym server to query
based on the address.
"""
# TODO: Actually perform some form of lookup
addr = address.split("@")
addr.reverse()
domain = addr[0]
return "https://nicknym.%s:6425/" % domain
def _audit_key(self, address, keytype, server):
"""
Ask an alternative server for a key to verify that
the same result is being provided.
"""
result, signature = self._nickserver_get_key(address, keytype, server)
if self._verify_result(result, signature):
# TODO: verify that the result is acceptable
pass
return True
def _managed_keys_add(self, address, keytype):
try:
data = self.config.load_pickle("nicknym.cache")
except IOError:
data = []
data.append((address, keytype))
data = list(set(data))
self.config.save_pickle(data, "nicknym.cache")
def _managed_keys_remove(self, address, keytype):
try:
data = self.config.load_pickle("nicknym.cache")
except IOError:
data = []
data.remove((address, keytype))
self.config.save_pickle(data, "nicknym.cache")
def _get_managed_keys(self):
try:
return self.config.load_pickle("nicknym.cache")
except IOError:
return []
class NicknymGetKey(Command):
"""Get a key from a nickserver"""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/nicknym/getkey', 'crypto/nicknym/getkey',
'<address> [<keytype>] [<server>]')
HTTP_CALLABLE = ('POST',)
HTTP_QUERY_VARS = {
'address': 'The nick/address to fetch a key for',
'keytype': 'What type of key to import (defaults to OpenPGP)',
'server': 'The Nicknym server to use (defaults to autodetect)'}
def command(self):
address = self.data.get('address', self.args[0])
keytype = self.data.get('keytype', None)
server = self.data.get('server', None)
if len(self.args) > 1:
keytype = self.args[1]
else:
keytype = 'openpgp'
if len(self.args) > 2:
server = self.args[2]
n = Nicknym(self.session.config)
return n.get_key(address, keytype, server)
class NicknymRefreshKeys(Command):
"""Get a key from a nickserver"""
ORDER = ('', 0)
SYNOPSIS = (None, 'crypto/nicknym/refreshkeys',
'crypto/nicknym/refreshkeys', '')
HTTP_CALLABLE = ('POST',)
def command(self):
n = Nicknym(self.session.config)
n.refresh_keys()
return True
_plugins = PluginManager(builtin=__file__)
_plugins.register_commands(NicknymGetKey)
_plugins.register_commands(NicknymRefreshKeys)
if __name__ == "__main__":
n = Nicknym()
print n.get_key("varac@bitmask.net")
|
dyeden/earthengine-api
|
refs/heads/master
|
python/examples/Image/hillshade.py
|
3
|
#!/usr/bin/env python
"""Compute hillshade from elevation."""
import math
import ee
import ee.mapclient
ee.Initialize()
ee.mapclient.centerMap(-121.767, 46.852, 11)
def Radians(img):
return img.toFloat().multiply(math.pi).divide(180)
def Hillshade(az, ze, slope, aspect):
"""Compute hillshade for the given illumination az, el."""
azimuth = Radians(ee.Image(az))
zenith = Radians(ee.Image(ze))
# Hillshade = cos(Azimuth - Aspect) * sin(Slope) * sin(Zenith) +
# cos(Zenith) * cos(Slope)
return (azimuth.subtract(aspect).cos()
.multiply(slope.sin())
.multiply(zenith.sin())
.add(
zenith.cos().multiply(slope.cos())))
terrain = ee.Algorithms.Terrain(ee.Image('srtm90_v4'))
slope_img = Radians(terrain.select('slope'))
aspect_img = Radians(terrain.select('aspect'))
# Add 1 hillshade at az=0, el=60.
ee.mapclient.addToMap(Hillshade(0, 60, slope_img, aspect_img))
|
matthappens/taskqueue
|
refs/heads/master
|
taskqueue/venv_tq/lib/python2.7/site-packages/PIL/PsdImagePlugin.py
|
14
|
#
# The Python Imaging Library
# $Id$
#
# Adobe PSD 2.5/3.0 file handling
#
# History:
# 1995-09-01 fl Created
# 1997-01-03 fl Read most PSD images
# 1997-01-18 fl Fixed P and CMYK support
# 2001-10-21 fl Added seek/tell support (for layers)
#
# Copyright (c) 1997-2001 by Secret Labs AB.
# Copyright (c) 1995-2001 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.4"
from PIL import Image, ImageFile, ImagePalette, _binary
MODES = {
# (photoshop mode, bits) -> (pil mode, required channels)
(0, 1): ("1", 1),
(0, 8): ("L", 1),
(1, 8): ("L", 1),
(2, 8): ("P", 1),
(3, 8): ("RGB", 3),
(4, 8): ("CMYK", 4),
(7, 8): ("L", 1), # FIXME: multilayer
(8, 8): ("L", 1), # duotone
(9, 8): ("LAB", 3)
}
#
# helpers
i8 = _binary.i8
i16 = _binary.i16be
i32 = _binary.i32be
# --------------------------------------------------------------------.
# read PSD images
def _accept(prefix):
return prefix[:4] == b"8BPS"
##
# Image plugin for Photoshop images.
class PsdImageFile(ImageFile.ImageFile):
format = "PSD"
format_description = "Adobe Photoshop"
def _open(self):
read = self.fp.read
#
# header
s = read(26)
if s[:4] != b"8BPS" or i16(s[4:]) != 1:
raise SyntaxError("not a PSD file")
psd_bits = i16(s[22:])
psd_channels = i16(s[12:])
psd_mode = i16(s[24:])
mode, channels = MODES[(psd_mode, psd_bits)]
if channels > psd_channels:
raise IOError("not enough channels")
self.mode = mode
self.size = i32(s[18:]), i32(s[14:])
#
# color mode data
size = i32(read(4))
if size:
data = read(size)
if mode == "P" and size == 768:
self.palette = ImagePalette.raw("RGB;L", data)
#
# image resources
self.resources = []
size = i32(read(4))
if size:
# load resources
end = self.fp.tell() + size
while self.fp.tell() < end:
signature = read(4)
id = i16(read(2))
name = read(i8(read(1)))
if not (len(name) & 1):
read(1) # padding
data = read(i32(read(4)))
if (len(data) & 1):
read(1) # padding
self.resources.append((id, name, data))
if id == 1039: # ICC profile
self.info["icc_profile"] = data
#
# layer and mask information
self.layers = []
size = i32(read(4))
if size:
end = self.fp.tell() + size
size = i32(read(4))
if size:
self.layers = _layerinfo(self.fp)
self.fp.seek(end)
#
# image descriptor
self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
# keep the file open
self._fp = self.fp
self.frame = 0
def seek(self, layer):
# seek to given layer (1..max)
if layer == self.frame:
return
try:
if layer <= 0:
raise IndexError
name, mode, bbox, tile = self.layers[layer-1]
self.mode = mode
self.tile = tile
self.frame = layer
self.fp = self._fp
return name, bbox
except IndexError:
raise EOFError("no such layer")
def tell(self):
# return layer number (0=image, 1..max=layers)
return self.frame
def load_prepare(self):
# create image memory if necessary
if not self.im or\
self.im.mode != self.mode or self.im.size != self.size:
self.im = Image.core.fill(self.mode, self.size, 0)
# create palette (optional)
if self.mode == "P":
Image.Image.load(self)
def _layerinfo(file):
# read layerinfo block
layers = []
read = file.read
for i in range(abs(i16(read(2)))):
# bounding box
y0 = i32(read(4)); x0 = i32(read(4))
y1 = i32(read(4)); x1 = i32(read(4))
# image info
info = []
mode = []
types = list(range(i16(read(2))))
if len(types) > 4:
continue
for i in types:
type = i16(read(2))
if type == 65535:
m = "A"
else:
m = "RGBA"[type]
mode.append(m)
size = i32(read(4))
info.append((m, size))
# figure out the image mode
mode.sort()
if mode == ["R"]:
mode = "L"
elif mode == ["B", "G", "R"]:
mode = "RGB"
elif mode == ["A", "B", "G", "R"]:
mode = "RGBA"
else:
mode = None # unknown
# skip over blend flags and extra information
filler = read(12)
name = ""
size = i32(read(4))
combined = 0
if size:
length = i32(read(4))
if length:
mask_y = i32(read(4)); mask_x = i32(read(4))
mask_h = i32(read(4)) - mask_y; mask_w = i32(read(4)) - mask_x
file.seek(length - 16, 1)
combined += length + 4
length = i32(read(4))
if length:
file.seek(length, 1)
combined += length + 4
length = i8(read(1))
if length:
# Don't know the proper encoding, Latin-1 should be a good guess
name = read(length).decode('latin-1', 'replace')
combined += length + 1
file.seek(size - combined, 1)
layers.append((name, mode, (x0, y0, x1, y1)))
# get tiles
i = 0
for name, mode, bbox in layers:
tile = []
for m in mode:
t = _maketile(file, m, bbox, 1)
if t:
tile.extend(t)
layers[i] = name, mode, bbox, tile
i = i + 1
return layers
def _maketile(file, mode, bbox, channels):
tile = None
read = file.read
compression = i16(read(2))
xsize = bbox[2] - bbox[0]
ysize = bbox[3] - bbox[1]
offset = file.tell()
if compression == 0:
#
# raw compression
tile = []
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer = layer + ";I"
tile.append(("raw", bbox, offset, layer))
offset = offset + xsize*ysize
elif compression == 1:
#
# packbits compression
i = 0
tile = []
bytecount = read(channels * ysize * 2)
offset = file.tell()
for channel in range(channels):
layer = mode[channel]
if mode == "CMYK":
layer = layer + ";I"
tile.append(
("packbits", bbox, offset, layer)
)
for y in range(ysize):
offset = offset + i16(bytecount[i:i+2])
i = i + 2
file.seek(offset)
if offset & 1:
read(1) # padding
return tile
# --------------------------------------------------------------------
# registry
Image.register_open("PSD", PsdImageFile, _accept)
Image.register_extension("PSD", ".psd")
|
joshuajan/odoo
|
refs/heads/master
|
addons/membership/__init__.py
|
441
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import membership
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mapleoin/factory_boy
|
refs/heads/master
|
tests/alchemyapp/__init__.py
|
12133432
| |
mission-liao/pyswagger
|
refs/heads/develop
|
pyswagger/tests/__init__.py
|
12133432
| |
zmughal/pygments-mirror
|
refs/heads/master
|
pygments/cmdline.py
|
22
|
# -*- coding: utf-8 -*-
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline, \
guess_decode, guess_decode_from_terminal, terminal_encoding
from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
get_lexer_for_filename, find_lexer_class_for_filename, TextLexer
from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-s] [-v] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
If -s is passed, lexing will be done in "streaming" mode, reading and
highlighting one line at a time. This will only work properly with
lexers that have no constructs spanning multiple lines!
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -s option processes lines one at a time until EOF, rather than
waiting to process the entire file. This only works for stdin, and
is intended for streaming input such as you get from 'tail -f'.
Example usage: "tail -f sql.log | pygmentize -s -l sql"
The -v option prints a detailed traceback on unhandled exceptions,
which is useful for debugging and bug reports.
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str.strip():
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = get_lexer_by_name(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
return 0
except (AttributeError, ValueError):
print("%s not found!" % what, file=sys.stderr)
return 1
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main_inner(popts, args, usage):
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if opts.pop('-h', None) is not None:
print(usage)
return 0
if opts.pop('-V', None) is not None:
print('Pygments version %s, (c) 2006-2015 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print(usage, file=sys.stderr)
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print(usage, file=sys.stderr)
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
print(usage, file=sys.stderr)
return 2
return _print_help(what, name)
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# encodings
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
lexer = find_lexer_class_for_filename(infn)
if lexer is None:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print(usage, file=sys.stderr)
return 2
if opts or args:
print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
print(fmter.get_style_defs(a_opt or ''))
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print(usage, file=sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
# select lexer
lexer = None
# given by name?
lexername = opts.pop('-l', None)
if lexername:
try:
lexer = get_lexer_by_name(lexername, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
# read input code
code = None
if args:
if len(args) > 1:
print(usage, file=sys.stderr)
return 2
if '-s' in opts:
print('Error: -s option not usable when input file specified',
file=sys.stderr)
return 2
infn = args[0]
try:
with open(infn, 'rb') as infp:
code = infp.read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not inencoding:
code, inencoding = guess_decode(code)
# do we have to guess the lexer?
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
elif '-s' not in opts: # treat stdin as full file (-s support is later)
# read code from terminal, always in binary mode since we want to
# decode ourselves and be tolerant with it
if sys.version_info > (3,):
# Python 3: we have to use .buffer to get a binary stream
code = sys.stdin.buffer.read()
else:
code = sys.stdin.read()
if not inencoding:
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
# else the lexer will do the decoding
if not lexer:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else: # -s option needs a lexer with -l
if not lexer:
print('Error: when using -s a lexer has to be selected with -l',
file=sys.stderr)
return 2
# process filters
for fname, fopts in F_opts:
try:
lexer.add_filter(fname, **fopts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
fmter = TerminalFormatter(**parsed_opts)
if sys.version_info > (3,):
# Python 3: we have to use .buffer to get a binary stream
outfile = sys.stdout.buffer
else:
outfile = sys.stdout
# determine output encoding if not explicitly selected
if not outencoding:
if outfn:
# output file? use lexer encoding for now (can still be None)
fmter.encoding = inencoding
else:
# else use terminal encoding
fmter.encoding = terminal_encoding(sys.stdout)
# provide coloring under Windows, if possible
if not outfn and sys.platform in ('win32', 'cygwin') and \
fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
# unfortunately colorama doesn't support binary streams on Py3
if sys.version_info > (3,):
from pygments.util import UnclosingTextIOWrapper
outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
fmter.encoding = None
try:
import colorama.initialise
except ImportError:
pass
else:
outfile = colorama.initialise.wrap_stream(
outfile, convert=None, strip=None, autoreset=False, wrap=True)
# When using the LaTeX formatter and the option `escapeinside` is
# specified, we need a special lexer which collects escaped text
# before running the chosen language lexer.
escapeinside = parsed_opts.get('escapeinside', '')
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
left = escapeinside[0]
right = escapeinside[1]
lexer = LatexEmbeddedLexer(left, right, lexer)
# ... and do it!
if '-s' not in opts:
# process whole input as per normal...
highlight(code, lexer, fmter, outfile)
return 0
else:
# line by line processing of stdin (eg: for 'tail -f')...
try:
while 1:
if sys.version_info > (3,):
# Python 3: we have to use .buffer to get a binary stream
line = sys.stdin.buffer.readline()
else:
line = sys.stdin.readline()
if not line:
break
if not inencoding:
line = guess_decode_from_terminal(line, sys.stdin)[0]
highlight(line, lexer, fmter, outfile)
if hasattr(outfile, 'flush'):
outfile.flush()
return 0
except KeyboardInterrupt: # pragma: no cover
return 0
def main(args=sys.argv):
"""
Main command line entry point.
"""
usage = USAGE % ((args[0],) * 6)
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:vhVHgs")
except getopt.GetoptError:
print(usage, file=sys.stderr)
return 2
try:
return main_inner(popts, args, usage)
except Exception:
if '-v' in dict(popts):
print(file=sys.stderr)
print('*' * 65, file=sys.stderr)
print('An unhandled exception occurred while highlighting.',
file=sys.stderr)
print('Please report the whole traceback to the issue tracker at',
file=sys.stderr)
print('<https://bitbucket.org/birkenfeld/pygments-main/issues>.',
file=sys.stderr)
print('*' * 65, file=sys.stderr)
print(file=sys.stderr)
raise
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
print('*** If this is a bug you want to report, please rerun with -v.',
file=sys.stderr)
return 1
|
pravisankar/origin
|
refs/heads/master
|
vendor/github.com/google/certificate-transparency/python/demo/vdb/verifiable_base.py
|
30
|
import cPickle as pickle
from verifiable_log import VerifiableLog
from verifiable_map import VerifiableMap
# Extend this class, override _apply_operation and add your own API to:
# 1. append to log
# 2. read from map
class VerifiableBase:
def __init__(self, log):
# The log, such as a VerifiableLog
self._log = log
# Internal map that we use. The mapper is responsible for mutating this
# when triggered by log changes.
self._map = VerifiableMap()
# How many log changes have been processed
self._ops_processed = 0
# After we process a log operation, we capture the corresponding map
# mutation index which may be higher or lower.
self._log_sth_to_map_sth = {0: 0}
# Called internally to poll the log and process all updates
def _update_from_log(self):
log_size = self._log.get_tree_head()['tree_size']
ctr = 0
while log_size > self._ops_processed:
for entry in self._log.get_entries(self._ops_processed, log_size - 1):
# Call mapper
self._apply_operation(self._ops_processed, entry, self._map)
self._ops_processed += 1
self._log_sth_to_map_sth[self._ops_processed] = self._map.get_tree_head()['tree_size']
# Called by the underlying map when new entries are sequenced by the log
# Override me!
def _apply_operation(self, idx, entry, map):
pass
# Get the value and proof for a key. Tree size the number of entries in the log
def get(self, key, tree_size):
if tree_size > self._ops_processed:
raise ValueError
return self._map.get(key, self._log_sth_to_map_sth[tree_size])
# Return the current tree head, this triggers fetching the latest entries
# from the log (if needed) and this tree_size should be passed to corresponding
# get() calls.
def get_tree_head(self, tree_size=None):
if tree_size is None or tree_size > self._ops_processed:
self._update_from_log()
if tree_size is None:
tree_size = self._ops_processed
if tree_size > self._ops_processed:
raise ValueError
rv = self._map.get_tree_head(self._log_sth_to_map_sth[tree_size])
rv['tree_size'] = tree_size # override what the map says
rv['log_tree_head'] = self._log.get_tree_head(tree_size)
return rv
def get_log_entries(self, start, end):
return self._log.get_entries(start, end)
def get_log_consistency(self, first, second):
return self._log.consistency_proof(first, second)
# Get the value and proof for a key. Tree size the number of entries in the log
def debug_dump(self, tree_size):
return self._map._root.debug_dump(self._log_sth_to_map_sth[self.get_tree_head(tree_size)['tree_size']])
|
portnov/sverchok
|
refs/heads/master
|
ui/nodeview_bgl_viewer_draw_mk2.py
|
3
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import pprint
import re
from collections import defaultdict
import bpy
import blf
import bgl
from bpy.types import SpaceNodeEditor
from sverchok import node_tree
callback_dict = {}
point_dict = {}
def adjust_list(in_list, x, y):
return [[old_x + x, old_y + y] for (old_x, old_y) in in_list]
def parse_socket(socket, rounding, element_index, view_by_element, props):
data = socket.sv_get(deepcopy=False)
num_data_items = len(data)
if num_data_items > 0 and view_by_element:
if element_index < num_data_items:
data = data[element_index]
str_width = props.line_width
# okay, here we should be more clever and extract part of the list
# to avoid the amount of time it take to format it.
content_str = pprint.pformat(data, width=str_width, depth=props.depth, compact=props.compact)
content_array = content_str.split('\n')
if len(content_array) > 20:
''' first 10, ellipses, last 10 '''
ellipses = ['... ... ...']
head = content_array[0:10]
tail = content_array[-10:]
display_text = head + ellipses + tail
elif len(content_array) == 1:
''' split on subunit - case of no newline to split on. '''
content_array = content_array[0].replace("), (", "),\n (")
display_text = content_array.split("\n")
else:
display_text = content_array
# http://stackoverflow.com/a/7584567/1243487
rounded_vals = re.compile(r"\d*\.\d+")
def mround(match):
format_string = "{{:.{0}g}}".format(rounding)
return format_string.format(float(match.group()))
out = []
for line in display_text:
out.append(re.sub(rounded_vals, mround, line) if not "bpy." in line else line)
return out
## end of util functions
def tag_redraw_all_nodeviews():
for window in bpy.context.window_manager.windows:
for area in window.screen.areas:
if area.type == 'NODE_EDITOR':
for region in area.regions:
if region.type == 'WINDOW':
region.tag_redraw()
def callback_enable(*args):
n_id = args[0]
global callback_dict
if n_id in callback_dict:
return
handle_pixel = SpaceNodeEditor.draw_handler_add(draw_callback_px, args, 'WINDOW', 'POST_VIEW')
callback_dict[n_id] = handle_pixel
tag_redraw_all_nodeviews()
def callback_disable(n_id):
global callback_dict
handle_pixel = callback_dict.get(n_id, None)
if not handle_pixel:
return
SpaceNodeEditor.draw_handler_remove(handle_pixel, 'WINDOW')
del callback_dict[n_id]
tag_redraw_all_nodeviews()
def callback_disable_all():
global callback_dict
temp_list = list(callback_dict.keys())
for n_id in temp_list:
if n_id:
callback_disable(n_id)
def draw_text_data(data):
lines = data.get('content', 'no data')
x, y = data.get('location', (120, 120))
x, y = int(x), int(y)
color = data.get('color', (0.1, 0.1, 0.1))
font_id = data.get('font_id', 0)
scale = data.get('scale', 1.0)
text_height = 15 * scale
line_height = 14 * scale
# why does the text look so jagged? <-- still valid question
# dpi = bpy.context.user_preferences.system.dpi
blf.size(font_id, int(text_height), 72)
bgl.glColor3f(*color)
ypos = y
for line in lines:
blf.position(0, x, ypos, 0)
blf.draw(font_id, line)
ypos -= int(line_height * 1.3)
def draw_rect(x=0, y=0, w=30, h=10, color=(0.0, 0.0, 0.0, 1.0)):
bgl.glColor4f(*color)
bgl.glBegin(bgl.GL_POLYGON)
for coord in [(x, y), (x+w, y), (w+x, y-h), (x, y-h)]:
bgl.glVertex2f(*coord)
bgl.glEnd()
def draw_triangle(x=0, y=0, w=30, h=10, color=(1.0, 0.3, 0.3, 1.0)):
bgl.glColor4f(*color)
bgl.glBegin(bgl.GL_TRIANGLES)
for coord in [(x, y), (x+w, y), (x + (w/2), y-h)]:
bgl.glVertex2f(*coord)
bgl.glEnd()
def draw_graphical_data(data):
lines = data.get('content')
x, y = data.get('location', (120, 120))
color = data.get('color', (0.1, 0.1, 0.1))
font_id = data.get('font_id', 0)
scale = data.get('scale', 1.0)
text_height = 15 * scale
if not lines:
return
blf.size(font_id, int(text_height), 72)
def draw_text(color, xpos, ypos, line):
bgl.glColor3f(*color)
blf.position(0, xpos, ypos, 0)
blf.draw(font_id, line)
return blf.dimensions(font_id, line)
lineheight = 20 * scale
num_containers = len(lines)
for idx, line in enumerate(lines):
y_pos = y - (idx*lineheight)
gfx_x = x
num_items = str(len(line))
kind_of_item = type(line).__name__
tx, _ = draw_text(color, gfx_x, y_pos, "{0} of {1} items".format(kind_of_item, num_items))
gfx_x += (tx + 5)
content_dict = defaultdict(int)
for item in line:
content_dict[type(item).__name__] += 1
tx, _ = draw_text(color, gfx_x, y_pos, str(dict(content_dict)))
gfx_x += (tx + 5)
if idx == 19 and num_containers > 20:
y_pos = y - ((idx+1)*lineheight)
text_body = "Showing the first 20 of {0} items"
draw_text(color, x, y_pos, text_body.format(num_containers))
break
def restore_opengl_defaults():
bgl.glLineWidth(1)
bgl.glDisable(bgl.GL_BLEND)
bgl.glColor4f(0.0, 0.0, 0.0, 1.0)
def draw_callback_px(n_id, data):
space = bpy.context.space_data
ng_view = space.edit_tree
# ng_view can be None
if not ng_view:
return
ng_name = space.edit_tree.name
if not (data['tree_name'] == ng_name):
return
if not isinstance(ng_view, node_tree.SverchCustomTree):
return
if data.get('mode', 'text-based') == 'text-based':
draw_text_data(data)
elif data.get('mode') == "graphical":
draw_graphical_data(data)
restore_opengl_defaults()
elif data.get('mode') == 'custom_function':
drawing_func = data.get('custom_function')
x, y = data.get('loc', (20, 20))
args = data.get('args', (None,))
drawing_func(x, y, args)
restore_opengl_defaults()
def unregister():
callback_disable_all()
|
Aigrefin/py3learn
|
refs/heads/master
|
learn/migrations/0001_initial.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-17 08:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LanguageDictionnary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='TranslationCouple',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('known_word', models.CharField(max_length=200)),
('word_to_learn', models.CharField(max_length=200)),
('language_dictionnary', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='learn.LanguageDictionnary')),
],
),
]
|
c3cashdesk/c6sh
|
refs/heads/master
|
src/postix/core/migrations/0051_auto_20181103_1516.py
|
1
|
# Generated by Django 2.1.3 on 2018-11-03 14:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0050_auto_20181103_1452'),
]
operations = [
migrations.AddField(
model_name='cashdesk',
name='record_detail',
field=models.CharField(blank=True, help_text='For example the name of the bar. Leave empty for presale cashdesks.', max_length=200, null=True),
),
migrations.AddField(
model_name='cashdesk',
name='record_name',
field=models.CharField(blank=True, help_text='For example "Bar", or "Vereinstisch", or "Kassensession"', max_length=200, null=True),
),
]
|
DirtyPiece/dancestudio
|
refs/heads/master
|
Build/Tools/Python27/Lib/test/test_compare.py
|
195
|
import unittest
from test import test_support
class Empty:
def __repr__(self):
return '<Empty>'
class Coerce:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return '<Coerce %s>' % self.arg
def __coerce__(self, other):
if isinstance(other, Coerce):
return self.arg, other.arg
else:
return self.arg, other
class Cmp:
def __init__(self,arg):
self.arg = arg
def __repr__(self):
return '<Cmp %s>' % self.arg
def __cmp__(self, other):
return cmp(self.arg, other)
class ComparisonTest(unittest.TestCase):
set1 = [2, 2.0, 2L, 2+0j, Coerce(2), Cmp(2.0)]
set2 = [[1], (3,), None, Empty()]
candidates = set1 + set2
def test_comparisons(self):
for a in self.candidates:
for b in self.candidates:
if ((a in self.set1) and (b in self.set1)) or a is b:
self.assertEqual(a, b)
else:
self.assertNotEqual(a, b)
def test_id_comparisons(self):
# Ensure default comparison compares id() of args
L = []
for i in range(10):
L.insert(len(L)//2, Empty())
for a in L:
for b in L:
self.assertEqual(cmp(a, b), cmp(id(a), id(b)),
'a=%r, b=%r' % (a, b))
def test_main():
test_support.run_unittest(ComparisonTest)
if __name__ == '__main__':
test_main()
|
raphaelmerx/django
|
refs/heads/master
|
tests/admin_scripts/custom_templates/project_template/project_name/settings.py
|
738
|
# Django settings for {{ project_name }} test project.
|
ropable/statdev
|
refs/heads/master
|
ledger/accounts/migrations/0005_remove_duplicates.py
|
3
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-09-08 06:25
from __future__ import unicode_literals
from django.db import migrations
from ledger.accounts.models import Profile, Address
def remove_duplicate_profile_addreses(apps, schema_editor):
try:
# Get Distinct hashes of addresses
_dis = Address.objects.values_list('hash', flat=True).distinct()
# Get all the multiple addresses
_multiple_addresses = []
for d in _dis:
filter_addr = Address.objects.filter(hash=d)
if len(filter_addr) > 1:
_multiple_addresses.append(list(filter_addr))
# Get the first out of the multiple address and add it to allowed list
allowed_m, allowed_hash = [], []
for m in _multiple_addresses:
allowed_hash.append(m[0].hash)
allowed_m.append(m.pop(0))
# Multiple addresses list is left with duplicate addresses
# Get profiles with hashes in the allowed list in order to update them
profiles = Profile.objects.filter(postal_address__hash__in=allowed_hash)
if profiles:
# Update profiles using the duplicate address to use the correct address
for a in allowed_m:
for profile in profiles:
if profile.postal_address.hash == a.hash and profile.postal_address.id != a.id:
profile.postal_address = a
profile.save()
# Remove the duplicate address
for m in _multiple_addresses:
for i in m:
i.delete()
except: pass
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20160907_1700'),
]
operations = [
migrations.RunPython(remove_duplicate_profile_addreses),
]
|
cloudbase/neutron-virtualbox
|
refs/heads/virtualbox_agent
|
neutron/tests/unit/vmware/apiclient/fake.py
|
1
|
# Copyright 2012 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
import six.moves.urllib.parse as urlparse
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.api_client import exception as api_exc
LOG = logging.getLogger(__name__)
MAX_NAME_LEN = 40
def _validate_name(name):
if name and len(name) > MAX_NAME_LEN:
raise Exception("Logical switch name exceeds %d characters",
MAX_NAME_LEN)
def _validate_resource(body):
_validate_name(body.get('display_name'))
class FakeClient(object):
LSWITCH_RESOURCE = 'lswitch'
LPORT_RESOURCE = 'lport'
LROUTER_RESOURCE = 'lrouter'
NAT_RESOURCE = 'nat'
LQUEUE_RESOURCE = 'lqueue'
SECPROF_RESOURCE = 'securityprofile'
LSWITCH_STATUS = 'lswitchstatus'
LROUTER_STATUS = 'lrouterstatus'
LSWITCH_LPORT_RESOURCE = 'lswitch_lport'
LROUTER_LPORT_RESOURCE = 'lrouter_lport'
LROUTER_NAT_RESOURCE = 'lrouter_nat'
LSWITCH_LPORT_STATUS = 'lswitch_lportstatus'
LSWITCH_LPORT_ATT = 'lswitch_lportattachment'
LROUTER_LPORT_STATUS = 'lrouter_lportstatus'
LROUTER_LPORT_ATT = 'lrouter_lportattachment'
GWSERVICE_RESOURCE = 'gatewayservice'
RESOURCES = [LSWITCH_RESOURCE, LROUTER_RESOURCE, LQUEUE_RESOURCE,
LPORT_RESOURCE, NAT_RESOURCE, SECPROF_RESOURCE,
GWSERVICE_RESOURCE]
FAKE_GET_RESPONSES = {
LSWITCH_RESOURCE: "fake_get_lswitch.json",
LSWITCH_LPORT_RESOURCE: "fake_get_lswitch_lport.json",
LSWITCH_LPORT_STATUS: "fake_get_lswitch_lport_status.json",
LSWITCH_LPORT_ATT: "fake_get_lswitch_lport_att.json",
LROUTER_RESOURCE: "fake_get_lrouter.json",
LROUTER_LPORT_RESOURCE: "fake_get_lrouter_lport.json",
LROUTER_LPORT_STATUS: "fake_get_lrouter_lport_status.json",
LROUTER_LPORT_ATT: "fake_get_lrouter_lport_att.json",
LROUTER_STATUS: "fake_get_lrouter_status.json",
LROUTER_NAT_RESOURCE: "fake_get_lrouter_nat.json",
SECPROF_RESOURCE: "fake_get_security_profile.json",
LQUEUE_RESOURCE: "fake_get_lqueue.json",
GWSERVICE_RESOURCE: "fake_get_gwservice.json"
}
FAKE_POST_RESPONSES = {
LSWITCH_RESOURCE: "fake_post_lswitch.json",
LROUTER_RESOURCE: "fake_post_lrouter.json",
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
SECPROF_RESOURCE: "fake_post_security_profile.json",
LQUEUE_RESOURCE: "fake_post_lqueue.json",
GWSERVICE_RESOURCE: "fake_post_gwservice.json"
}
FAKE_PUT_RESPONSES = {
LSWITCH_RESOURCE: "fake_post_lswitch.json",
LROUTER_RESOURCE: "fake_post_lrouter.json",
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
LSWITCH_LPORT_ATT: "fake_put_lswitch_lport_att.json",
LROUTER_LPORT_ATT: "fake_put_lrouter_lport_att.json",
SECPROF_RESOURCE: "fake_post_security_profile.json",
LQUEUE_RESOURCE: "fake_post_lqueue.json",
GWSERVICE_RESOURCE: "fake_post_gwservice.json"
}
MANAGED_RELATIONS = {
LSWITCH_RESOURCE: [],
LROUTER_RESOURCE: [],
LSWITCH_LPORT_RESOURCE: ['LogicalPortAttachment'],
LROUTER_LPORT_RESOURCE: ['LogicalPortAttachment'],
}
_validators = {
LSWITCH_RESOURCE: _validate_resource,
LSWITCH_LPORT_RESOURCE: _validate_resource,
LROUTER_LPORT_RESOURCE: _validate_resource,
SECPROF_RESOURCE: _validate_resource,
LQUEUE_RESOURCE: _validate_resource,
GWSERVICE_RESOURCE: _validate_resource
}
def __init__(self, fake_files_path):
self.fake_files_path = fake_files_path
self._fake_lswitch_dict = {}
self._fake_lrouter_dict = {}
self._fake_lswitch_lport_dict = {}
self._fake_lrouter_lport_dict = {}
self._fake_lrouter_nat_dict = {}
self._fake_lswitch_lportstatus_dict = {}
self._fake_lrouter_lportstatus_dict = {}
self._fake_securityprofile_dict = {}
self._fake_lqueue_dict = {}
self._fake_gatewayservice_dict = {}
def _get_tag(self, resource, scope):
tags = [tag['tag'] for tag in resource['tags']
if tag['scope'] == scope]
return len(tags) > 0 and tags[0]
def _get_filters(self, querystring):
if not querystring:
return (None, None, None, None)
params = urlparse.parse_qs(querystring)
tag_filter = None
attr_filter = None
if 'tag' in params and 'tag_scope' in params:
tag_filter = {'scope': params['tag_scope'][0],
'tag': params['tag'][0]}
elif 'uuid' in params:
attr_filter = {'uuid': params['uuid'][0]}
# Handle page length and page cursor parameter
page_len = params.get('_page_length')
page_cursor = params.get('_page_cursor')
if page_len:
page_len = int(page_len[0])
else:
# Explicitly set it to None (avoid 0 or empty list)
page_len = None
return (tag_filter, attr_filter, page_len, page_cursor)
def _add_lswitch(self, body):
fake_lswitch = jsonutils.loads(body)
fake_lswitch['uuid'] = uuidutils.generate_uuid()
self._fake_lswitch_dict[fake_lswitch['uuid']] = fake_lswitch
# put the tenant_id and the zone_uuid in the main dict
# for simplyfying templating
zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid']
fake_lswitch['zone_uuid'] = zone_uuid
fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid')
fake_lswitch['lport_count'] = 0
# set status value
fake_lswitch['status'] = 'true'
return fake_lswitch
def _build_lrouter(self, body, uuid=None):
fake_lrouter = jsonutils.loads(body)
if uuid:
fake_lrouter['uuid'] = uuid
fake_lrouter['tenant_id'] = self._get_tag(fake_lrouter, 'os_tid')
default_nexthop = fake_lrouter['routing_config'].get(
'default_route_next_hop')
if default_nexthop:
fake_lrouter['default_next_hop'] = default_nexthop.get(
'gateway_ip_address', '0.0.0.0')
else:
fake_lrouter['default_next_hop'] = '0.0.0.0'
# NOTE(salv-orlando): We won't make the Fake NSX API client
# aware of NSX version. The long term plan is to replace it
# with behavioral mocking of NSX API requests
if 'distributed' not in fake_lrouter:
fake_lrouter['distributed'] = False
distributed_json = ('"distributed": %s,' %
str(fake_lrouter['distributed']).lower())
fake_lrouter['distributed_json'] = distributed_json
return fake_lrouter
def _add_lrouter(self, body):
fake_lrouter = self._build_lrouter(body,
uuidutils.generate_uuid())
self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter
fake_lrouter['lport_count'] = 0
# set status value
fake_lrouter['status'] = 'true'
return fake_lrouter
def _add_lqueue(self, body):
fake_lqueue = jsonutils.loads(body)
fake_lqueue['uuid'] = uuidutils.generate_uuid()
self._fake_lqueue_dict[fake_lqueue['uuid']] = fake_lqueue
return fake_lqueue
def _add_lswitch_lport(self, body, ls_uuid):
fake_lport = jsonutils.loads(body)
new_uuid = uuidutils.generate_uuid()
fake_lport['uuid'] = new_uuid
# put the tenant_id and the ls_uuid in the main dict
# for simplyfying templating
fake_lport['ls_uuid'] = ls_uuid
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
'q_port_id')
fake_lport['neutron_device_id'] = self._get_tag(fake_lport, 'vm_id')
fake_lport['att_type'] = "NoAttachment"
fake_lport['att_info_json'] = ''
self._fake_lswitch_lport_dict[fake_lport['uuid']] = fake_lport
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
fake_lswitch['lport_count'] += 1
fake_lport_status = fake_lport.copy()
fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id']
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
fake_lport_status['ls_name'] = fake_lswitch['display_name']
fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
# set status value
fake_lport['status'] = 'true'
self._fake_lswitch_lportstatus_dict[new_uuid] = fake_lport_status
return fake_lport
def _build_lrouter_lport(self, body, new_uuid=None, lr_uuid=None):
fake_lport = jsonutils.loads(body)
if new_uuid:
fake_lport['uuid'] = new_uuid
# put the tenant_id and the le_uuid in the main dict
# for simplyfying templating
if lr_uuid:
fake_lport['lr_uuid'] = lr_uuid
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
'q_port_id')
# replace ip_address with its json dump
if 'ip_addresses' in fake_lport:
ip_addresses_json = jsonutils.dumps(fake_lport['ip_addresses'])
fake_lport['ip_addresses_json'] = ip_addresses_json
return fake_lport
def _add_lrouter_lport(self, body, lr_uuid):
new_uuid = uuidutils.generate_uuid()
fake_lport = self._build_lrouter_lport(body, new_uuid, lr_uuid)
self._fake_lrouter_lport_dict[fake_lport['uuid']] = fake_lport
try:
fake_lrouter = self._fake_lrouter_dict[lr_uuid]
except KeyError:
raise api_exc.ResourceNotFound()
fake_lrouter['lport_count'] += 1
fake_lport_status = fake_lport.copy()
fake_lport_status['lr_tenant_id'] = fake_lrouter['tenant_id']
fake_lport_status['lr_uuid'] = fake_lrouter['uuid']
fake_lport_status['lr_name'] = fake_lrouter['display_name']
self._fake_lrouter_lportstatus_dict[new_uuid] = fake_lport_status
return fake_lport
def _add_securityprofile(self, body):
fake_securityprofile = jsonutils.loads(body)
fake_securityprofile['uuid'] = uuidutils.generate_uuid()
fake_securityprofile['tenant_id'] = self._get_tag(
fake_securityprofile, 'os_tid')
fake_securityprofile['nova_spid'] = self._get_tag(fake_securityprofile,
'nova_spid')
self._fake_securityprofile_dict[fake_securityprofile['uuid']] = (
fake_securityprofile)
return fake_securityprofile
def _add_lrouter_nat(self, body, lr_uuid):
fake_nat = jsonutils.loads(body)
new_uuid = uuidutils.generate_uuid()
fake_nat['uuid'] = new_uuid
fake_nat['lr_uuid'] = lr_uuid
self._fake_lrouter_nat_dict[fake_nat['uuid']] = fake_nat
if 'match' in fake_nat:
match_json = jsonutils.dumps(fake_nat['match'])
fake_nat['match_json'] = match_json
return fake_nat
def _add_gatewayservice(self, body):
fake_gwservice = jsonutils.loads(body)
fake_gwservice['uuid'] = str(uuidutils.generate_uuid())
fake_gwservice['tenant_id'] = self._get_tag(
fake_gwservice, 'os_tid')
# FIXME(salvatore-orlando): For simplicity we're managing only a
# single device. Extend the fake client for supporting multiple devices
first_gw = fake_gwservice['gateways'][0]
fake_gwservice['transport_node_uuid'] = first_gw['transport_node_uuid']
fake_gwservice['device_id'] = first_gw['device_id']
self._fake_gatewayservice_dict[fake_gwservice['uuid']] = (
fake_gwservice)
return fake_gwservice
def _build_relation(self, src, dst, resource_type, relation):
if relation not in self.MANAGED_RELATIONS[resource_type]:
return # Relation is not desired in output
if '_relations' not in src or not src['_relations'].get(relation):
return # Item does not have relation
relation_data = src['_relations'].get(relation)
dst_relations = dst.get('_relations', {})
dst_relations[relation] = relation_data
dst['_relations'] = dst_relations
def _fill_attachment(self, att_data, ls_uuid=None,
lr_uuid=None, lp_uuid=None):
new_data = att_data.copy()
for k in ('ls_uuid', 'lr_uuid', 'lp_uuid'):
if locals().get(k):
new_data[k] = locals()[k]
def populate_field(field_name):
if field_name in att_data:
new_data['%s_field' % field_name] = ('"%s" : "%s",'
% (field_name,
att_data[field_name]))
del new_data[field_name]
else:
new_data['%s_field' % field_name] = ""
for field in ['vif_uuid', 'peer_port_href', 'vlan_id',
'peer_port_uuid', 'l3_gateway_service_uuid']:
populate_field(field)
return new_data
def _get_resource_type(self, path):
"""Get resource type.
Identifies resource type and relevant uuids in the uri
/ws.v1/lswitch/xxx
/ws.v1/lswitch/xxx/status
/ws.v1/lswitch/xxx/lport/yyy
/ws.v1/lswitch/xxx/lport/yyy/status
/ws.v1/lrouter/zzz
/ws.v1/lrouter/zzz/status
/ws.v1/lrouter/zzz/lport/www
/ws.v1/lrouter/zzz/lport/www/status
/ws.v1/lqueue/xxx
"""
# The first element will always be 'ws.v1' - so we just discard it
uri_split = path.split('/')[1:]
# parse uri_split backwards
suffix = ""
idx = len(uri_split) - 1
if 'status' in uri_split[idx]:
suffix = "status"
idx = idx - 1
elif 'attachment' in uri_split[idx]:
suffix = "attachment"
idx = idx - 1
# then check if we have an uuid
uuids = []
if uri_split[idx].replace('-', '') not in self.RESOURCES:
uuids.append(uri_split[idx])
idx = idx - 1
resource_type = "%s%s" % (uri_split[idx], suffix)
if idx > 1:
uuids.insert(0, uri_split[idx - 1])
resource_type = "%s_%s" % (uri_split[idx - 2], resource_type)
return (resource_type.replace('-', ''), uuids)
def _list(self, resource_type, response_file,
parent_uuid=None, query=None, relations=None):
(tag_filter, attr_filter,
page_len, page_cursor) = self._get_filters(query)
# result_count attribute in response should appear only when
# page_cursor is not specified
do_result_count = not page_cursor
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
if parent_uuid == '*':
parent_uuid = None
# NSX raises ResourceNotFound if lswitch doesn't exist and is not *
elif not res_dict and resource_type == self.LSWITCH_LPORT_RESOURCE:
raise api_exc.ResourceNotFound()
def _attr_match(res_uuid):
if not attr_filter:
return True
item = res_dict[res_uuid]
for (attr, value) in attr_filter.iteritems():
if item.get(attr) != value:
return False
return True
def _tag_match(res_uuid):
if not tag_filter:
return True
return any([x['scope'] == tag_filter['scope'] and
x['tag'] == tag_filter['tag']
for x in res_dict[res_uuid]['tags']])
def _lswitch_match(res_uuid):
# verify that the switch exist
if parent_uuid and parent_uuid not in self._fake_lswitch_dict:
raise Exception(_("lswitch:%s not found") % parent_uuid)
if (not parent_uuid
or res_dict[res_uuid].get('ls_uuid') == parent_uuid):
return True
return False
def _lrouter_match(res_uuid):
# verify that the router exist
if parent_uuid and parent_uuid not in self._fake_lrouter_dict:
raise api_exc.ResourceNotFound()
if (not parent_uuid or
res_dict[res_uuid].get('lr_uuid') == parent_uuid):
return True
return False
def _cursor_match(res_uuid, page_cursor):
if not page_cursor:
return True
if page_cursor == res_uuid:
# always return True once page_cursor has been found
page_cursor = None
return True
return False
def _build_item(resource):
item = jsonutils.loads(response_template % resource)
if relations:
for relation in relations:
self._build_relation(resource, item,
resource_type, relation)
return item
for item in res_dict.itervalues():
if 'tags' in item:
item['tags_json'] = jsonutils.dumps(item['tags'])
if resource_type in (self.LSWITCH_LPORT_RESOURCE,
self.LSWITCH_LPORT_ATT,
self.LSWITCH_LPORT_STATUS):
parent_func = _lswitch_match
elif resource_type in (self.LROUTER_LPORT_RESOURCE,
self.LROUTER_LPORT_ATT,
self.LROUTER_NAT_RESOURCE,
self.LROUTER_LPORT_STATUS):
parent_func = _lrouter_match
else:
parent_func = lambda x: True
items = [_build_item(res_dict[res_uuid])
for res_uuid in res_dict
if (parent_func(res_uuid) and
_tag_match(res_uuid) and
_attr_match(res_uuid) and
_cursor_match(res_uuid, page_cursor))]
# Rather inefficient, but hey this is just a mock!
next_cursor = None
total_items = len(items)
if page_len:
try:
next_cursor = items[page_len]['uuid']
except IndexError:
next_cursor = None
items = items[:page_len]
response_dict = {'results': items}
if next_cursor:
response_dict['page_cursor'] = next_cursor
if do_result_count:
response_dict['result_count'] = total_items
return jsonutils.dumps(response_dict)
def _show(self, resource_type, response_file,
uuid1, uuid2=None, relations=None):
target_uuid = uuid2 or uuid1
if resource_type.endswith('attachment'):
resource_type = resource_type[:resource_type.index('attachment')]
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
for item in res_dict.itervalues():
if 'tags' in item:
item['tags_json'] = jsonutils.dumps(item['tags'])
# replace sec prof rules with their json dump
def jsonify_rules(rule_key):
if rule_key in item:
rules_json = jsonutils.dumps(item[rule_key])
item['%s_json' % rule_key] = rules_json
jsonify_rules('logical_port_egress_rules')
jsonify_rules('logical_port_ingress_rules')
items = [jsonutils.loads(response_template % res_dict[res_uuid])
for res_uuid in res_dict if res_uuid == target_uuid]
if items:
return jsonutils.dumps(items[0])
raise api_exc.ResourceNotFound()
def handle_get(self, url):
#TODO(salvatore-orlando): handle field selection
parsedurl = urlparse.urlparse(url)
(res_type, uuids) = self._get_resource_type(parsedurl.path)
relations = urlparse.parse_qs(parsedurl.query).get('relations')
response_file = self.FAKE_GET_RESPONSES.get(res_type)
if not response_file:
raise api_exc.NsxApiException()
if 'lport' in res_type or 'nat' in res_type:
if len(uuids) > 1:
return self._show(res_type, response_file, uuids[0],
uuids[1], relations=relations)
else:
return self._list(res_type, response_file, uuids[0],
query=parsedurl.query, relations=relations)
elif ('lswitch' in res_type or
'lrouter' in res_type or
self.SECPROF_RESOURCE in res_type or
self.LQUEUE_RESOURCE in res_type or
'gatewayservice' in res_type):
LOG.debug("UUIDS:%s", uuids)
if uuids:
return self._show(res_type, response_file, uuids[0],
relations=relations)
else:
return self._list(res_type, response_file,
query=parsedurl.query,
relations=relations)
else:
raise Exception("unknown resource:%s" % res_type)
def handle_post(self, url, body):
parsedurl = urlparse.urlparse(url)
(res_type, uuids) = self._get_resource_type(parsedurl.path)
response_file = self.FAKE_POST_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
add_resource = getattr(self, '_add_%s' % res_type)
body_json = jsonutils.loads(body)
val_func = self._validators.get(res_type)
if val_func:
val_func(body_json)
args = [body]
if uuids:
args.append(uuids[0])
response = response_template % add_resource(*args)
return response
def handle_put(self, url, body):
parsedurl = urlparse.urlparse(url)
(res_type, uuids) = self._get_resource_type(parsedurl.path)
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
response_template = f.read()
# Manage attachment operations
is_attachment = False
if res_type.endswith('attachment'):
is_attachment = True
res_type = res_type[:res_type.index('attachment')]
res_dict = getattr(self, '_fake_%s_dict' % res_type)
body_json = jsonutils.loads(body)
val_func = self._validators.get(res_type)
if val_func:
val_func(body_json)
try:
resource = res_dict[uuids[-1]]
except KeyError:
raise api_exc.ResourceNotFound()
if not is_attachment:
edit_resource = getattr(self, '_build_%s' % res_type, None)
if edit_resource:
body_json = edit_resource(body)
resource.update(body_json)
else:
relations = resource.get("_relations", {})
body_2 = jsonutils.loads(body)
resource['att_type'] = body_2['type']
relations['LogicalPortAttachment'] = body_2
resource['_relations'] = relations
if body_2['type'] == "PatchAttachment":
# We need to do a trick here
if self.LROUTER_RESOURCE in res_type:
res_type_2 = res_type.replace(self.LROUTER_RESOURCE,
self.LSWITCH_RESOURCE)
elif self.LSWITCH_RESOURCE in res_type:
res_type_2 = res_type.replace(self.LSWITCH_RESOURCE,
self.LROUTER_RESOURCE)
res_dict_2 = getattr(self, '_fake_%s_dict' % res_type_2)
body_2['peer_port_uuid'] = uuids[-1]
resource_2 = \
res_dict_2[jsonutils.loads(body)['peer_port_uuid']]
relations_2 = resource_2.get("_relations")
if not relations_2:
relations_2 = {}
relations_2['LogicalPortAttachment'] = body_2
resource_2['_relations'] = relations_2
resource['peer_port_uuid'] = body_2['peer_port_uuid']
resource['att_info_json'] = (
"\"peer_port_uuid\": \"%s\"," %
resource_2['uuid'])
resource_2['att_info_json'] = (
"\"peer_port_uuid\": \"%s\"," %
body_2['peer_port_uuid'])
elif body_2['type'] == "L3GatewayAttachment":
resource['attachment_gwsvc_uuid'] = (
body_2['l3_gateway_service_uuid'])
resource['vlan_id'] = body_2.get('vlan_id')
elif body_2['type'] == "L2GatewayAttachment":
resource['attachment_gwsvc_uuid'] = (
body_2['l2_gateway_service_uuid'])
elif body_2['type'] == "VifAttachment":
resource['vif_uuid'] = body_2['vif_uuid']
resource['att_info_json'] = (
"\"vif_uuid\": \"%s\"," % body_2['vif_uuid'])
if not is_attachment:
response = response_template % resource
else:
if res_type == self.LROUTER_LPORT_RESOURCE:
lr_uuid = uuids[0]
ls_uuid = None
elif res_type == self.LSWITCH_LPORT_RESOURCE:
ls_uuid = uuids[0]
lr_uuid = None
lp_uuid = uuids[1]
response = response_template % self._fill_attachment(
jsonutils.loads(body), ls_uuid, lr_uuid, lp_uuid)
return response
def handle_delete(self, url):
parsedurl = urlparse.urlparse(url)
(res_type, uuids) = self._get_resource_type(parsedurl.path)
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
if not response_file:
raise Exception("resource not found")
res_dict = getattr(self, '_fake_%s_dict' % res_type)
try:
del res_dict[uuids[-1]]
except KeyError:
raise api_exc.ResourceNotFound()
return ""
def fake_request(self, *args, **kwargs):
method = args[0]
handler = getattr(self, "handle_%s" % method.lower())
return handler(*args[1:])
def reset_all(self):
self._fake_lswitch_dict.clear()
self._fake_lrouter_dict.clear()
self._fake_lswitch_lport_dict.clear()
self._fake_lrouter_lport_dict.clear()
self._fake_lswitch_lportstatus_dict.clear()
self._fake_lrouter_lportstatus_dict.clear()
self._fake_lqueue_dict.clear()
self._fake_securityprofile_dict.clear()
self._fake_gatewayservice_dict.clear()
|
cryptickp/troposphere
|
refs/heads/master
|
examples/EC2Conditions.py
|
25
|
from __future__ import print_function
from troposphere import (
Template, Parameter, Ref, Condition, Equals, And, Or, Not, If
)
from troposphere import ec2
parameters = {
"One": Parameter(
"One",
Type="String",
),
"Two": Parameter(
"Two",
Type="String",
),
"Three": Parameter(
"Three",
Type="String",
),
"Four": Parameter(
"Four",
Type="String",
),
"SshKeyName": Parameter(
"SshKeyName",
Type="String",
)
}
conditions = {
"OneEqualsFoo": Equals(
Ref("One"),
"Foo"
),
"NotOneEqualsFoo": Not(
Condition("OneEqualsFoo")
),
"BarEqualsTwo": Equals(
"Bar",
Ref("Two")
),
"ThreeEqualsFour": Equals(
Ref("Three"),
Ref("Four")
),
"OneEqualsFooOrBarEqualsTwo": Or(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo")
),
"OneEqualsFooAndNotBarEqualsTwo": And(
Condition("OneEqualsFoo"),
Not(Condition("BarEqualsTwo"))
),
"OneEqualsFooAndBarEqualsTwoAndThreeEqualsPft": And(
Condition("OneEqualsFoo"),
Condition("BarEqualsTwo"),
Equals(Ref("Three"), "Pft")
),
"OneIsQuzAndThreeEqualsFour": And(
Equals(Ref("One"), "Quz"),
Condition("ThreeEqualsFour")
),
"LaunchInstance": And(
Condition("OneEqualsFoo"),
Condition("NotOneEqualsFoo"),
Condition("BarEqualsTwo"),
Condition("OneEqualsFooAndNotBarEqualsTwo"),
Condition("OneIsQuzAndThreeEqualsFour")
),
"LaunchWithGusto": And(
Condition("LaunchInstance"),
Equals(Ref("One"), "Gusto")
)
}
resources = {
"Ec2Instance": ec2.Instance(
"Ec2Instance",
Condition="LaunchInstance",
ImageId=If("ConditionNameEqualsFoo", "ami-12345678", "ami-87654321"),
InstanceType="t1.micro",
KeyName=Ref("SshKeyName"),
SecurityGroups=["default"],
)
}
def template():
t = Template()
for p in parameters.values():
t.add_parameter(p)
for k in conditions:
t.add_condition(k, conditions[k])
for r in resources.values():
t.add_resource(r)
return t
print(template().to_json())
|
innstereo/innstereo
|
refs/heads/master
|
innstereo/layer_properties.py
|
2
|
#!/usr/bin/python3
"""
This module contains the layer properties dialog.
Each dialog window has its own class that controls its behaviour. This module
stores the AboutDialog-, PrintDialog-, StereonetProperties-, LayerProperties-,
and FileChooserParse-class.
"""
from gi.repository import Gtk
import matplotlib.colors as colors
import os, sys
import numpy as np
from .i18n import i18n, translate_gui
class LayerProperties(object):
"""
This class intializes the layer properties dialog and handles its signals.
The init method requires a layer object, so the changes can be applied and
a function from the main loop that redraws the plot after changes are
applied.
"""
def __init__(self, layer, redraw_plot, main_window):
"""
Initializes the Gtk.Builder and loads the about dialog from glade file.
The builder creates and instance of the about dialog and connects
the signals.
"""
self.builder = Gtk.Builder()
self.builder.set_translation_domain(i18n().get_ts_domain())
script_dir = os.path.dirname(__file__)
rel_path = "gui_layout.glade"
abs_path = os.path.join(script_dir, rel_path)
self.builder.add_objects_from_file(abs_path,
("dialog_layer_properties", "liststore_line_style",
"adjustment_line_width", "liststore_capstyle",
"liststore_marker_style", "adjustment_marker_size",
"adjustment_edge_width", "adjustment_pole_size",
"adjustment_pole_edge_width", "adjustment_rose_spacing",
"adjustment_rose_bottom", "adjustment_contour_resolution",
"liststore_colormaps", "liststore_contour_method",
"adjustment_contour_sigma", "adjustment_contour_label_size",
"adjustment_lower_limit", "adjustment_upper_limit",
"adjustment_steps", "adjustment_fisher_conf",
"adjustment_dip_rose_spacing"))
self.layer = layer
self.redraw = redraw_plot
self.changes = []
self.dialog = self.builder.get_object("dialog_layer_properties")
self.dialog.set_transient_for(main_window)
self.marker_style_dict = {".": 0, ",": 1, "o": 2, "v": 3, "^": 4, "<": 5,
">": 6, "s": 7, "8": 8, "p": 9, "*": 10, "h": 11,
"H": 12, "+": 13, "x": 14, "D": 15, "d": 16,
"|": 17, "_": 18}
self.capstyle_dict = {"butt": 0, "round": 1, "projecting": 2}
self.line_style_dict = {"-": 0, "--": 1, "-.": 2, ":": 3}
self.contour_method_dict = {"exponential_kamb": 0, "linear_kamb": 1,
"kamb": 2, "schmidt": 3}
self.colormaps_dict = {"Blues": 0, "BuGn": 1, "BuPu": 2, "GnBu": 3,
"Greens": 4, "Greys": 5, "Oranges": 6, "OrRd": 7,
"PuBu": 8, "PuBuGn": 9, "PuRd": 10, "Purples": 11,
"RdPu": 12, "Reds": 13, "YlGn": 14, "YlGnBu": 15,
"YlOrBr": 16, "YlOrRd": 17, "afmhot": 18,
"autumn": 19, "bone": 20, "cool": 21, "copper": 22,
"gist_heat": 23, "gray": 24, "hot": 25, "pink": 26,
"spring": 27, "summer": 28, "winter": 29,
"viridis": 30}
self.load_circle_properties()
self.load_pole_properties()
self.load_linear_properties()
self.load_fault_properties()
self.load_contour_properties()
self.load_rose_properties()
self.hide_gui_elements()
self.set_contour_range_label()
self.builder.connect_signals(self)
if sys.platform == "win32":
translate_gui(self.builder)
self.set_contour_range_label()
def load_circle_properties(self):
"""
Load default settings for great- and small circles
"""
self.switch_render_gcircles = \
self.builder.get_object("switch_render_gcircles")
self.colorbutton_line = \
self.builder.get_object("colorbutton_line")
self.combobox_line_style = \
self.builder.get_object("combobox_line_style")
self.spinbutton_line_width = \
self.builder.get_object("spinbutton_line_width")
self.adjustment_line_width = \
self.builder.get_object("adjustment_line_width")
self.combobox_capstyle = \
self.builder.get_object("combobox_capstyle")
self.colorbutton_line.set_color(self.layer.get_rgba())
self.adjustment_line_width.set_value(self.layer.get_line_width())
self.combobox_line_style.set_active(
self.line_style_dict[self.layer.get_line_style()])
self.combobox_capstyle.set_active(
self.capstyle_dict[self.layer.get_capstyle()])
switch_state = self.layer.get_draw_gcircles()
self.switch_render_gcircles.set_active(switch_state)
self.set_gcircle_sensitivity(switch_state)
def load_pole_properties(self):
"""
Load default settings for pole points
"""
self.switch_render_poles = \
self.builder.get_object("switch_render_poles")
self.colorbutton_pole_fill = \
self.builder.get_object("colorbutton_pole_fill")
self.colorbutton_pole_edge_color = \
self.builder.get_object("colorbutton_pole_edge_color")
self.spinbutton_pole_size = \
self.builder.get_object("spinbutton_pole_size")
self.adjustment_pole_size = \
self.builder.get_object("adjustment_pole_size")
self.spinbutton_pole_edge_width = \
self.builder.get_object("spinbutton_pole_edge_width")
self.adjustment_pole_edge_width = \
self.builder.get_object("adjustment_pole_edge_width")
self.combobox_pole_style = \
self.builder.get_object("combobox_pole_style")
self.colorbutton_pole_fill.set_color(self.layer.get_pole_rgba())
self.colorbutton_pole_edge_color.set_color(
self.layer.get_pole_edge_rgba())
self.adjustment_pole_size.set_value(
self.layer.get_pole_size())
self.adjustment_pole_edge_width.set_value(
self.layer.get_pole_edge_width())
self.combobox_pole_style.set_active(
self.marker_style_dict[self.layer.get_pole_style()])
switch_state = self.layer.get_draw_poles()
self.switch_render_poles.set_active(switch_state)
self.set_pole_sensitivity(switch_state)
def load_linear_properties(self):
"""
Load the current settings for linear markers
"""
self.switch_render_linears = \
self.builder.get_object("switch_render_linears")
self.combobox_marker_style = \
self.builder.get_object("combobox_marker_style")
self.spinbutton_marker_size = \
self.builder.get_object("spinbutton_marker_size")
self.adjustment_marker_size = \
self.builder.get_object("adjustment_marker_size")
self.colorbutton_marker = \
self.builder.get_object("colorbutton_marker")
self.colorbutton_marker_edge = \
self.builder.get_object("colorbutton_marker_edge")
self.spinbutton_edge_width = \
self.builder.get_object("spinbutton_edge_width")
self.adjustment_marker_edge_width = \
self.builder.get_object("adjustment_edge_width")
self.switch_mean_vector = \
self.builder.get_object("switch_mean_vector")
self.switch_fisher_sc = \
self.builder.get_object("switch_fisher_sc")
self.spinbutton_fisher_conf = \
self.builder.get_object("spinbutton_fisher_conf")
self.adjustment_fisher_conf = \
self.builder.get_object("adjustment_fisher_conf")
self.combobox_marker_style.set_active(
self.marker_style_dict[self.layer.get_marker_style()])
self.adjustment_marker_size.set_value(self.layer.get_marker_size())
self.colorbutton_marker.set_color(self.layer.get_marker_rgba())
self.colorbutton_marker_edge.set_color(
self.layer.get_marker_edge_rgba())
self.adjustment_marker_edge_width.set_value(
self.layer.get_marker_edge_width())
switch_state = self.layer.get_draw_linears()
self.switch_render_linears.set_active(switch_state)
self.set_linear_sensitivity(switch_state)
self.switch_mean_vector.set_state(self.layer.get_draw_mean_vector())
self.switch_fisher_sc.set_state(self.layer.get_draw_fisher_sc())
self.adjustment_fisher_conf.set_value(self.layer.get_fisher_conf())
self.set_fisher_conf_sensitivity(self.layer.get_draw_fisher_sc())
def load_fault_properties(self):
"""
Initializes the interface for fault plots.
Loads the ojects from the glade file using the GtkBuilder. Gets the
current settings from the active layer and applies these settings to
the interface.
"""
self.switch_draw_hoeppene = \
self.builder.get_object("switch_draw_hoeppene")
self.switch_draw_angelier = \
self.builder.get_object("switch_draw_angelier")
self.switch_draw_lp_plane = \
self.builder.get_object("switch_draw_lp_plane")
self.switch_draw_hoeppene.set_active(self.layer.get_draw_hoeppener())
self.switch_draw_angelier.set_active(self.layer.get_draw_angelier())
self.switch_draw_lp_plane.set_active(self.layer.get_draw_lp_plane())
def load_contour_properties(self):
"""
Load the current settings for contours
"""
self.switch_draw_contour_fills = \
self.builder.get_object("switch_draw_contour_fills")
self.switch_contour_lines = \
self.builder.get_object("switch_contour_lines")
self.combobox_contour_method = \
self.builder.get_object("combobox_contour_method")
self.combobox_colormaps = \
self.builder.get_object("combobox_colormaps")
self.spinbutton_contour_resolution = \
self.builder.get_object("spinbutton_contour_resolution")
self.adjustment_contour_resolution = \
self.builder.get_object("adjustment_contour_resolution")
self.combobox_contour_line_style = \
self.builder.get_object("combobox_contour_line_style")
self.spinbutton_contour_sigma = \
self.builder.get_object("spinbutton_contour_sigma")
self.adjustment_contour_sigma = \
self.builder.get_object("adjustment_contour_sigma")
self.switch_contour_labels = \
self.builder.get_object("switch_contour_labels")
self.spinbutton_contour_label_size = \
self.builder.get_object("spinbutton_contour_label_size")
self.adjustment_contour_label_size = \
self.builder.get_object("adjustment_contour_label_size")
self.switch_use_line_color = \
self.builder.get_object("switch_use_line_color")
self.colorbutton_contour_line_color = \
self.builder.get_object("colorbutton_contour_line_color")
self.switch_manual_range = \
self.builder.get_object("switch_manual_range")
self.spinbutton_lower_limit = \
self.builder.get_object("spinbutton_lower_limit")
self.spinbutton_upper_limit = \
self.builder.get_object("spinbutton_upper_limit")
self.spinbutton_steps = \
self.builder.get_object("spinbutton_steps")
self.adjustment_lower_limit = \
self.builder.get_object("adjustment_lower_limit")
self.adjustment_upper_limit = \
self.builder.get_object("adjustment_upper_limit")
self.adjustment_steps = \
self.builder.get_object("adjustment_steps")
self.label_contour_steps = \
self.builder.get_object("label_contour_steps")
self.switch_draw_contour_fills.set_active(
self.layer.get_draw_contour_fills())
self.switch_contour_lines.set_active(
self.layer.get_draw_contour_lines())
self.switch_contour_labels.set_active(
self.layer.get_draw_contour_labels())
self.switch_use_line_color.set_active(self.layer.get_use_line_color())
self.adjustment_contour_resolution.set_value(
self.layer.get_contour_resolution())
self.adjustment_contour_sigma.set_value(
self.layer.get_contour_sigma())
self.adjustment_contour_label_size.set_value(
self.layer.get_contour_label_size())
self.combobox_contour_method.set_active(
self.contour_method_dict[self.layer.get_contour_method()])
self.combobox_colormaps.set_active(
self.colormaps_dict[self.layer.get_colormap()])
self.combobox_contour_line_style.set_active(
self.line_style_dict[self.layer.get_contour_line_style()])
self.colorbutton_contour_line_color.set_color(
self.layer.get_contour_line_rgba())
self.adjustment_lower_limit.set_value(self.layer.get_lower_limit())
self.adjustment_upper_limit.set_value(self.layer.get_upper_limit())
self.adjustment_steps.set_value(self.layer.get_steps())
self.switch_draw_contour_fills.set_active(self.layer.get_draw_contour_fills())
self.set_contour_sensitivity()
manual_range_state = self.layer.get_manual_range()
self.switch_manual_range.set_active(manual_range_state)
self.set_manual_range_sensitivity(manual_range_state)
def load_rose_properties(self):
"""
Load the current settings for the rose diagram
"""
self.spinbutton_rose_spacing = \
self.builder.get_object("spinbutton_rose_spacing")
self.adjustment_rose_spacing = \
self.builder.get_object("adjustment_rose_spacing")
self.spinbutton_rose_bottom = \
self.builder.get_object("spinbutton_rose_bottom")
self.adjustment_rose_bottom = \
self.builder.get_object("adjustment_rose_bottom")
self.spinbutton_dip_rose_spacing = \
self.builder.get_object("spinbutton_dip_rose_spacing")
self.adjustment_dip_rose_spacing = \
self.builder.get_object("adjustment_dip_rose_spacing")
self.label_rose_spacing = \
self.builder.get_object("label_rose_spacing")
self.label_dip_rose_spacing = \
self.builder.get_object("label_dip_rose_spacing")
self.adjustment_rose_spacing.set_value(self.layer.get_rose_spacing())
self.adjustment_rose_bottom.set_value(self.layer.get_rose_bottom())
self.adjustment_dip_rose_spacing.set_value(self.layer.get_dip_rose_spacing())
self.set_rose_spacing_label()
self.set_dip_rose_spacing_label()
def hide_gui_elements(self):
"""
Hides some elements of the GUI depending on the layer type
Pages:
0: Great and Small Circles
1: Poles
2: Lines
3: Fault Plots
4: Contours
5: Rose Diagram
"""
self.notebook = \
self.builder.get_object("notebook1")
layertype = self.layer.get_layer_type()
if layertype == "line":
self.notebook.get_nth_page(0).hide()
self.notebook.get_nth_page(1).hide()
self.notebook.get_nth_page(3).hide()
elif layertype == "plane":
self.notebook.get_nth_page(2).hide()
self.notebook.get_nth_page(3).hide()
elif layertype == "smallcircle":
self.notebook.get_nth_page(1).hide()
self.notebook.get_nth_page(2).hide()
self.notebook.get_nth_page(3).hide()
self.notebook.get_nth_page(4).hide()
self.notebook.get_nth_page(5).hide()
elif layertype == "faultplane":
self.notebook.get_nth_page(4).hide()
elif layertype == "eigenvector":
self.notebook.get_nth_page(0).hide()
self.notebook.get_nth_page(1).hide()
self.notebook.get_nth_page(3).hide()
self.notebook.get_nth_page(4).hide()
self.notebook.set_current_page(self.layer.get_page())
def on_switch_render_linears_state_set(self, switch, state):
"""
Queues up the new state for rendering linears.
When the state of the render linears switch is changed this method is
called and queues up the new state in the the list of changes.
"""
self.changes.append(lambda: self.layer.set_draw_linears(state))
self.set_linear_sensitivity(state)
def on_entry_layer_name_changed(self, entry):
"""
Triggered when the layer name is changed. Reads out the text buffer and
stores the name as a string in a temporary variable.
"""
buffer_obj = entry.get_buffer()
new_label = buffer_obj.get_text()
self.changes.append(lambda: self.layer.set_label(new_label))
def on_switch_render_gcircles_state_set(self, checkbutton, state):
"""
Queues up the new state for great and small circles.
When the state of the great and samll circle switch is changed this
method is called and queues up the new state in the the list of changes.
"""
self.changes.append(lambda: self.layer.set_draw_gcircles(state))
self.set_gcircle_sensitivity(state)
def set_gcircle_sensitivity(self, state):
"""
Sets widgets sensitivity according to the great circle switch.
When the switch is turned off, the widgets for line properties are
not nedded and the sensitivity turned off.
"""
self.colorbutton_line.set_sensitive(state)
self.combobox_line_style.set_sensitive(state)
self.combobox_capstyle.set_sensitive(state)
self.spinbutton_line_width.set_sensitive(state)
def set_linear_sensitivity(self, state):
"""
Sets widgets sensitivity according to the linear feature switch.
When the switch is turned off, the widgets for linears customization are
not nedded and the sensitivity turned off.
"""
self.combobox_marker_style.set_sensitive(state)
self.spinbutton_marker_size.set_sensitive(state)
self.colorbutton_marker.set_sensitive(state)
self.colorbutton_marker_edge.set_sensitive(state)
self.spinbutton_edge_width.set_sensitive(state)
def set_pole_sensitivity(self, state):
"""
Sets widgets sensitivity according to the pole-marker switch.
When the switch is turned off, the widgets for pole customization are
not nedded and the sensitivity turned off.
"""
self.colorbutton_pole_fill.set_sensitive(state)
self.colorbutton_pole_edge_color.set_sensitive(state)
self.spinbutton_pole_size.set_sensitive(state)
self.spinbutton_pole_edge_width.set_sensitive(state)
self.combobox_pole_style.set_sensitive(state)
def set_manual_range_sensitivity(self, state):
"""
Sets widgets sensitivity according to the manual range switch.
When the switch is turned off, the widgets for the range customization
are not nedded and the sensitivity turned off.
"""
self.spinbutton_lower_limit.set_sensitive(state)
self.spinbutton_upper_limit.set_sensitive(state)
self.spinbutton_steps.set_sensitive(state)
def set_contour_sensitivity(self):
"""
Turns sensitivity of widget in the contour page on or off.
Depending on which switches are on or off, widget that don't change
the plot are set unsensitive.
"""
fill = self.switch_draw_contour_fills.get_active()
line = self.switch_contour_lines.get_active()
label = self.switch_contour_labels.get_active()
line_color = self.switch_use_line_color.get_active()
if line == True:
self.switch_contour_labels.set_sensitive(True)
if label == True:
self.spinbutton_contour_label_size.set_sensitive(True)
else:
self.spinbutton_contour_label_size.set_sensitive(False)
self.combobox_contour_line_style.set_sensitive(True)
self.switch_use_line_color.set_sensitive(True)
if line_color == True:
self.colorbutton_contour_line_color.set_sensitive(True)
else:
self.colorbutton_contour_line_color.set_sensitive(False)
else:
self.switch_contour_labels.set_sensitive(False)
self.spinbutton_contour_label_size.set_sensitive(False)
self.combobox_contour_line_style.set_sensitive(False)
self.switch_use_line_color.set_sensitive(False)
self.colorbutton_contour_line_color.set_sensitive(False)
if line == True or fill == True:
self.combobox_colormaps.set_sensitive(True)
else:
self.combobox_colormaps.set_sensitive(False)
def on_switch_render_poles_state_set(self, switch, state):
"""
Queues up the new state for rendering poles.
When the state of the render poles switch is changed this method is
called and queues up the new state in the the list of changes.
"""
self.changes.append(lambda: self.layer.set_draw_poles(state))
self.set_pole_sensitivity(state)
def on_colorbutton_choose_line_color_color_set(self, color_button):
"""
Triggered when the line color is changed. The function receives
a Gtk.ColorButton instance. Queues up the new line color in the
list of changes.
"""
rgba = color_button.get_rgba()
rgb_str = rgba.to_string()
red, green, blue = rgb_str[4:-1].split(",")
color_list = [int(red)/255, int(green)/255, int(blue)/255]
new_line_color_hex = colors.rgb2hex(color_list)
self.changes.append(lambda: self.layer.set_line_color(
new_line_color_hex))
def on_combobox_line_style_changed(self, combo):
"""
Queues up the new line style in the list of changes.
"""
combo_iter = combo.get_active_iter()
if combo_iter != None:
model = combo.get_model()
new_line_style = model[combo_iter][1]
self.changes.append(lambda: self.layer.set_line_style(
new_line_style))
def on_spinbutton_line_width_value_changed(self, spinbutton):
"""
Queues up the new line width in the list of changes.
"""
new_line_width = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_line_width(new_line_width))
def on_combobox_capstyle_changed(self, combo):
"""
Queues up the new capstyle in the list of changes.
"""
combo_iter = combo.get_active_iter()
if combo_iter != None:
model = combo.get_model()
new_capstyle = model[combo_iter][1]
self.changes.append(lambda: self.layer.set_capstyle(new_capstyle))
def on_colorbutton_pole_fill_color_set(self, colorbutton):
"""
Queues up the new pole fill color in the list of changes.
"""
rgba = colorbutton.get_rgba()
rgb_str = rgba.to_string()
red, green, blue = rgb_str[4:-1].split(",")
color_list = [int(red)/255, int(green)/255, int(blue)/255]
new_pole_color_hex = colors.rgb2hex(color_list)
self.changes.append(lambda: self.layer.set_pole_fill(
new_pole_color_hex))
def on_colorbutton_pole_edge_color_color_set(self, colorbutton):
"""
Queues up the new pole edge color in the list of changes.
"""
rgba = colorbutton.get_rgba()
rgb_str = rgba.to_string()
red, green, blue = rgb_str[4:-1].split(",")
color_list = [int(red)/255, int(green)/255, int(blue)/255]
new_pole_edge_color_hex = colors.rgb2hex(color_list)
self.changes.append(lambda: self.layer.set_pole_edge_color(
new_pole_edge_color_hex))
def on_combobox_pole_style_changed(self, combobox):
"""
Queues up the new pole style in the list of changes.
"""
combo_iter = combobox.get_active_iter()
if combo_iter != None:
model = combobox.get_model()
new_pole_style = model[combo_iter][1]
self.changes.append(lambda: self.layer.set_pole_style(
new_pole_style))
def on_spinbutton_pole_size_value_changed(self, spinbutton):
"""
Queues up the new pole size in the list of changes.
"""
new_pole_size = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_pole_size(new_pole_size))
def on_spinbutton_pole_edge_width_value_changed(self, spinbutton):
"""
Queues up the new pole edge width in the list of changes.
"""
new_pole_edge_width = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_pole_edge_width(
new_pole_edge_width))
def on_colorbutton_marker_color_set(self, color_button):
"""
Queues up the new marker fill color in the list of changes.
"""
rgba = color_button.get_rgba()
rgb_str = rgba.to_string()
red, green, blue = rgb_str[4:-1].split(",")
color_list = [int(red)/255, int(green)/255, int(blue)/255]
new_marker_color_hex = colors.rgb2hex(color_list)
self.changes.append(lambda: self.layer.set_marker_fill(
new_marker_color_hex))
def on_colorbutton_marker_edge_color_set(self, color_button):
"""
Queues up the new marker edge color in the list of changes.
"""
rgba = color_button.get_rgba()
rgb_str = rgba.to_string()
red, green, blue = rgb_str[4:-1].split(",")
color_list = [int(red)/255, int(green)/255, int(blue)/255]
new_marker_edge_color_hex = colors.rgb2hex(color_list)
self.changes.append(lambda: self.layer.set_marker_edge_color(
new_marker_edge_color_hex))
def on_combobox_marker_style_changed(self, combo):
"""
Queues up the new marker style width in the list of changes.
"""
combo_iter = combo.get_active_iter()
if combo_iter != None:
model = combo.get_model()
new_marker_style = model[combo_iter][1]
self.changes.append(lambda: self.layer.set_marker_style(
new_marker_style))
def on_spinbutton_marker_size_value_changed(self, spinbutton):
"""
Queues up the new marker size in the list of changes.
"""
new_marker_size = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_marker_size(new_marker_size))
def on_spinbutton_edge_width_value_changed(self, spinbutton):
"""
Queues up the new marker edge width in the list of changes.
"""
new_marker_edge_width = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_marker_edge_width(
new_marker_edge_width))
def on_dialog_layer_properties_close(self, widget):
"""
Hides the dialog when the dialog is closed.
"""
self.dialog.hide()
def on_dialog_layer_properties_response(self, widget, signal):
"""
Hides the dialog if a response is triggered.
"""
self.dialog.hide()
def on_button_layerproperties_cancel_clicked(self, widget):
"""
If the dialog is canceled the changes are discarded (automatically),
and the window is hidden.
"""
self.layer.set_page(self.notebook.get_current_page())
self.dialog.hide()
def on_button_layerproperties_apply_clicked(self, widget):
"""
When apply is pressed this function applies all changes and closes
the dialog window.
"""
for change in self.changes:
change()
self.layer.set_page(self.notebook.get_current_page())
self.redraw()
self.dialog.hide()
def run(self):
"""
This function is run when the about dialog is called from the main
window. It shows the about dialog.
"""
self.dialog.run()
def on_dialog_layer_properties_destroy(self, widget):
"""
This function is run when the about dialog is closed with the x-button
in the title bar. It hides the about dialog.
"""
self.dialog.hide()
def on_spinbutton_rose_spacing_value_changed(self, spinbutton):
"""
Triggered when the value in the spinbutton for the spacing of the
rose diagram is changed. Queues up the new value in the list of changes.
"""
new_rose_spacing = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_rose_spacing(
new_rose_spacing))
self.set_rose_spacing_label()
def on_spinbutton_rose_bottom_value_changed(self, spinbutton):
"""
Triggered when the value in the spinbutton for the bottom cutoff of the
rose diagram is changed. Queues up the new value in the list of changes.
"""
new_rose_bottom = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_rose_bottom(
new_rose_bottom))
def on_switch_draw_contour_fills_state_set(self, switch, state):
"""
Triggered when the state of the checkbutton for rendering contour fills
is toggled. Queues up the new state in the list of changes.
"""
self.changes.append(lambda: self.layer.set_draw_contour_fills(state))
self.set_contour_sensitivity()
def on_switch_contour_lines_state_set(self, checkbutton, state):
"""
Triggered when the state of the checkbutton for rendering contour lines
is toggled. Queues up the new state in the list of changes.
"""
self.changes.append(lambda: self.layer.set_draw_contour_lines(state))
self.set_contour_sensitivity()
def on_combobox_contour_method_changed(self, combobox):
"""
Triggered when a new contouring method is chosen. Queues up the
new colormap in the list of changes.
"""
combo_iter = combobox.get_active_iter()
if combo_iter != None:
model = combobox.get_model()
new_method = model[combo_iter][1]
self.changes.append(
lambda: self.layer.set_contour_method(new_method))
def on_spinbutton_contour_resolution_value_changed(self, spinbutton):
"""
Triggered when the grid reolution for the contours is changed. Converts
value to int just to be safe. Queues up the int value in the list of
changes. Values below 3 don't work and above 300 are too slow for
rendering. These limits are set in Glade in the
"adjustment_contour_resolution".
"""
new_contour_resolution = int(spinbutton.get_value())
self.changes.append(
lambda: self.layer.set_contour_resolution(new_contour_resolution))
def on_combobox_colormaps_changed(self, combobox):
"""
Triggered when the colormap is changed. The new colormap is queued up
in the list of changes. Colormap is a string (e.g. "hot")
"""
combo_iter = combobox.get_active_iter()
if combo_iter != None:
model = combobox.get_model()
new_colormap = model[combo_iter][1]
self.changes.append(
lambda: self.layer.set_colormap(new_colormap))
def on_combobox_contour_line_style_changed(self, combobox):
"""
Triggered when the contour-lines line-style is changed. Queues up the
new style in the list of changes.
"""
combo_iter = combobox.get_active_iter()
if combo_iter != None:
model = combobox.get_model()
new_line_style = model[combo_iter][1]
self.changes.append(
lambda: self.layer.set_contour_line_style(new_line_style))
def on_spinbutton_contour_sigma_value_changed(self, spinbutton):
"""
Triggered when the standard deviation for contouring is changed.
Queues up the new value in the list of changes.
"""
new_contour_sigma = int(spinbutton.get_value())
self.changes.append(
lambda: self.layer.set_contour_sigma(new_contour_sigma))
def on_switch_contour_labels_state_set(self, switch, state):
"""
Triggerd when the checkbutton to draw contour labels is toggeled.
Queues up the new state in the list of changes.
"""
self.changes.append(lambda: self.layer.set_draw_contour_labels(state))
self.set_contour_sensitivity()
def on_spinbutton_contour_label_size_value_changed(self, spinbutton):
"""
Triggered when the font size for contour labels is changed. The new
value is queued up in the list of changes.
"""
label_size = int(spinbutton.get_value())
self.changes.append(
lambda: self.layer.set_contour_label_size(label_size))
def on_switch_use_line_color_state_set(self, switch, state):
"""
Queues up the new state for using line colors for contour lines.
When the state of the line color switch is changed this method is
called and queues up the new state in the the list of changes.
"""
self.changes.append(lambda: self.layer.set_use_line_color(state))
self.set_contour_sensitivity()
def on_colorbutton_contour_line_color_color_set(self, colorbutton):
"""
Queues up the new contour line color in the list of changes.
"""
rgba = colorbutton.get_rgba()
rgb_str = rgba.to_string()
red, green, blue = rgb_str[4:-1].split(",")
color_list = [int(red)/255, int(green)/255, int(blue)/255]
new_color = colors.rgb2hex(color_list)
self.changes.append(
lambda: self.layer.set_contour_line_color(new_color))
def on_switch_draw_lp_plane_state_set(self, switch, state):
"""
Queues up a new state for the linear-pole-plane switch.
Triggered when a new state for the linear-pole-plane switch
is set. Queues it up in the list of changes.
"""
self.changes.append(
lambda: self.layer.set_draw_lp_plane(state))
def on_switch_draw_hoeppene_state_set(self, switch, state):
"""
Queues up a new state for the draw Hoeppener switch.
Triggered when a new state for the Hoeppener switch
is set. Queues it up in the list of changes.
"""
self.changes.append(
lambda: self.layer.set_draw_hoeppener(state))
def on_switch_draw_angelier_state_set(self, switch, state):
"""
Queues up a new state for the draw Angelier switch.
Triggered when a new state for the Anglier switch
is set. Queues it up in the list of changes.
"""
self.changes.append(
lambda: self.layer.set_draw_angelier(state))
def on_switch_manual_range_state_set(self, switch, state):
"""
Queues up the new state of the manual range for contours switch.
The new state, a boolean, is queued up in the list of changes, and is
only applied if the "Apply" button is pressed.
"""
self.changes.append(lambda: self.layer.set_manual_range(state))
self.set_contour_range_label()
self.set_manual_range_sensitivity(state)
def set_contour_range_label(self):
"""
Turns the display of the resulting contour interval on or off.
When the manual range switch is on, then the resulting contour steps
are displayed in the dialog. When switch is off, the label is empty.
"""
if self.switch_manual_range.get_active() == True:
lower = self.spinbutton_lower_limit.get_value()
upper = self.spinbutton_upper_limit.get_value()
steps = self.spinbutton_steps.get_value()
spacing = np.linspace(lower, upper, num=steps).round(2)
self.label_contour_steps.set_text(str(spacing))
else:
self.label_contour_steps.set_text("")
def on_spinbutton_lower_limit_value_changed(self, spinbutton):
"""
Queues up the new lower limit for contours in the list of changes.
When the lower limit is changed, this method is called and queues up
the new value in the list of changes.
"""
lower_limit = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_lower_limit(lower_limit))
self.set_contour_range_label()
def on_spinbutton_upper_limit_value_changed(self, spinbutton):
"""
Queues up the new upper limit for contours in the list of changes.
When the upper limit is changed, this method is called and queues up
the new value in the list of changes.
"""
upper_limit = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_upper_limit(upper_limit))
self.set_contour_range_label()
def on_spinbutton_steps_value_changed(self, spinbutton):
"""
Queues up the new number of steps for contours in the list of changes.
When the number of steps is changed, this method is called and queues up
the new value in the list of changes.
"""
steps = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_steps(steps))
self.set_contour_range_label()
def set_fisher_conf_sensitivity(self, state):
"""
Sets the sensitivity of the Fisher confidence spinbutton.
The spinbutton for the level of confidence in the direction is only
sensitive when the Switch for the draw-state is on (= True). The
method is called when the dialog starts and when the draw-state
of the Fisher smallcircle is changed.
"""
self.spinbutton_fisher_conf.set_sensitive(state)
def on_switch_mean_vector_state_set(self, switch, state):
"""
Queues up a new state for the drawing of the mean vector linear.
When the state of the Switch is changed the new state is queued in the
list of changes. True means that a linear is drawn for the mean
direction of the dataset. False means that it is not drawn.
"""
self.changes.append(lambda: self.layer.set_draw_mean_vector(state))
def on_switch_fisher_sc_state_set(self, switch, state):
"""
Queues up a new state for the drawing of the Fisher smallcircle.
When the state of the Switch is changed the new state is queued in the
list of changes. True means that a smallcircle is drawn that represents
the confidence in the mean direction. False means that it is not drawn.
"""
self.changes.append(lambda: self.layer.set_draw_fisher_sc(state))
self.set_fisher_conf_sensitivity(state)
def on_spinbutton_fisher_conf_value_changed(self, spinbutton):
"""
Queues up a new value for the confidence of the Fisher direction.
When the value is changed, the new value is queued in the list of
changes. A larger number means that a larger circle will be drawn.
"""
conf = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_fisher_conf(conf))
def on_spinbutton_dip_rose_spacing_value_changed(self, spinbutton):
"""
"""
conf = spinbutton.get_value()
self.changes.append(lambda: self.layer.set_dip_rose_spacing(conf))
self.set_dip_rose_spacing_label()
def set_rose_spacing_label(self):
"""
Gets the new spacing and calculates the new steps. Sets them as label.
Triggered when the dialog starts or the azimuth spacing of the rose
diagram is changed. Calculates the new steps between 0 and 360
degrees and sets the list as the new label.
"""
degrees = self.spinbutton_rose_spacing.get_value()
brackets = np.arange(0, 360, step=degrees)
self.label_rose_spacing.set_text(str(brackets))
def set_dip_rose_spacing_label(self):
"""
Gets the new dip spacing and calculates the new steps. Sets the label.
Triggered when the dialog starts or the dip spacing of the rose
diagram is changed. Calculates the new steps between 0 and 90
degrees and sets the list as the new label.
"""
degrees = self.spinbutton_dip_rose_spacing.get_value()
brackets = np.arange(0, 90, step=degrees)
self.label_dip_rose_spacing.set_text(str(brackets))
|
eduNEXT/edx-platform
|
refs/heads/master
|
common/djangoapps/util/tests/test_password_policy_validators.py
|
4
|
"""Tests for util.password_policy_validators module."""
import unittest
import pytest
from ddt import data, ddt, unpack
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.core.exceptions import ValidationError
from django.test.utils import override_settings
from common.djangoapps.util.password_policy_validators import (
create_validator_config,
password_validators_instruction_texts,
validate_password
)
@ddt
class PasswordPolicyValidatorsTestCase(unittest.TestCase):
"""
Tests for password validator utility functions
The general framework I went with for testing the validators was to test:
1) requiring a single check (also checks proper singular message)
2) requiring multiple instances of the check (also checks proper plural message)
3) successful check
"""
def validation_errors_checker(self, password, msg, user=None):
"""
This helper function is used to check the proper error messages are
being displayed based on the password and validator.
Parameters:
password (unicode): the password to validate on
user (django.contrib.auth.models.User): user object to use in validation.
This is an optional parameter unless the validator requires a
user object.
msg (str): The expected ValidationError message
"""
if msg is None:
validate_password(password, user)
else:
with pytest.raises(ValidationError) as cm:
validate_password(password, user)
assert msg in ' '.join(cm.value.messages)
def test_unicode_password(self):
""" Tests that validate_password enforces unicode """
unicode_str = '𤭮'
byte_str = unicode_str.encode('utf-8')
# Sanity checks and demonstration of why this test is useful
assert len(byte_str) == 4
assert len(unicode_str) == 1
# Test length check
self.validation_errors_checker(byte_str, 'This password is too short. It must contain at least 2 characters.')
self.validation_errors_checker(byte_str + byte_str, None)
# Test badly encoded password
self.validation_errors_checker(b'\xff\xff', 'Invalid password.')
def test_password_unicode_normalization(self):
""" Tests that validate_password normalizes passwords """
# s ̣ ̇ (s with combining dot below and combining dot above)
not_normalized_password = '\u0073\u0323\u0307'
assert len(not_normalized_password) == 3
# When we normalize we expect the not_normalized password to fail
# because it should be normalized to u'\u1E69' -> ṩ
self.validation_errors_checker(not_normalized_password,
'This password is too short. It must contain at least 2 characters.')
@data(
([create_validator_config('common.djangoapps.util.password_policy_validators.MinimumLengthValidator', {'min_length': 2})], # lint-amnesty, pylint: disable=line-too-long
'at least 2 characters.'),
([
create_validator_config('common.djangoapps.util.password_policy_validators.MinimumLengthValidator', {'min_length': 2}), # lint-amnesty, pylint: disable=line-too-long
create_validator_config('common.djangoapps.util.password_policy_validators.AlphabeticValidator', {'min_alphabetic': 2}), # lint-amnesty, pylint: disable=line-too-long
], 'characters, including 2 letters.'),
([
create_validator_config('common.djangoapps.util.password_policy_validators.MinimumLengthValidator', {'min_length': 2}), # lint-amnesty, pylint: disable=line-too-long
create_validator_config('common.djangoapps.util.password_policy_validators.AlphabeticValidator', {'min_alphabetic': 2}), # lint-amnesty, pylint: disable=line-too-long
create_validator_config('common.djangoapps.util.password_policy_validators.NumericValidator', {'min_numeric': 1}), # lint-amnesty, pylint: disable=line-too-long
], 'characters, including 2 letters & 1 number.'),
([
create_validator_config('common.djangoapps.util.password_policy_validators.MinimumLengthValidator', {'min_length': 2}), # lint-amnesty, pylint: disable=line-too-long
create_validator_config('common.djangoapps.util.password_policy_validators.UppercaseValidator', {'min_upper': 3}), # lint-amnesty, pylint: disable=line-too-long
create_validator_config('common.djangoapps.util.password_policy_validators.NumericValidator', {'min_numeric': 1}), # lint-amnesty, pylint: disable=line-too-long
create_validator_config('common.djangoapps.util.password_policy_validators.SymbolValidator', {'min_symbol': 2}), # lint-amnesty, pylint: disable=line-too-long
], 'including 3 uppercase letters & 1 number & 2 symbols.'),
)
@unpack
def test_password_instructions(self, config, msg):
""" Tests password instructions """
with override_settings(AUTH_PASSWORD_VALIDATORS=config):
assert msg in password_validators_instruction_texts()
@data(
('userna', 'username', 'test@example.com', 'The password is too similar to the username.'),
('password', 'username', 'password@example.com', 'The password is too similar to the email address.'),
('password', 'username', 'test@password.com', 'The password is too similar to the email address.'),
('password', 'username', 'test@example.com', None),
)
@unpack
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.UserAttributeSimilarityValidator')
])
def test_user_attribute_similarity_validation_errors(self, password, username, email, msg):
""" Tests validate_password error messages for the UserAttributeSimilarityValidator """
user = User(username=username, email=email)
self.validation_errors_checker(password, msg, user)
@data(
([create_validator_config('common.djangoapps.util.password_policy_validators.MinimumLengthValidator', {'min_length': 1})], # lint-amnesty, pylint: disable=line-too-long
'', 'This password is too short. It must contain at least 1 character.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.MinimumLengthValidator', {'min_length': 8})], # lint-amnesty, pylint: disable=line-too-long
'd', 'This password is too short. It must contain at least 8 characters.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.MinimumLengthValidator', {'min_length': 8})], # lint-amnesty, pylint: disable=line-too-long
'longpassword', None),
)
@unpack
def test_minimum_length_validation_errors(self, config, password, msg):
""" Tests validate_password error messages for the MinimumLengthValidator """
with override_settings(AUTH_PASSWORD_VALIDATORS=config):
self.validation_errors_checker(password, msg)
@data(
([create_validator_config('common.djangoapps.util.password_policy_validators.MaximumLengthValidator', {'max_length': 1})], # lint-amnesty, pylint: disable=line-too-long
'longpassword', 'This password is too long. It must contain no more than 1 character.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.MaximumLengthValidator', {'max_length': 10})], # lint-amnesty, pylint: disable=line-too-long
'longpassword', 'This password is too long. It must contain no more than 10 characters.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.MaximumLengthValidator', {'max_length': 20})], # lint-amnesty, pylint: disable=line-too-long
'shortpassword', None),
)
@unpack
def test_maximum_length_validation_errors(self, config, password, msg):
""" Tests validate_password error messages for the MaximumLengthValidator """
with override_settings(AUTH_PASSWORD_VALIDATORS=config):
self.validation_errors_checker(password, msg)
@data(
('password', 'This password is too common.'),
('good_password', None),
)
@unpack
@override_settings(AUTH_PASSWORD_VALIDATORS=[
create_validator_config('django.contrib.auth.password_validation.CommonPasswordValidator')
])
def test_common_password_validation_errors(self, password, msg):
""" Tests validate_password error messages for the CommonPasswordValidator """
self.validation_errors_checker(password, msg)
@data(
([create_validator_config('common.djangoapps.util.password_policy_validators.AlphabeticValidator', {'min_alphabetic': 1})], # lint-amnesty, pylint: disable=line-too-long
'12345', 'This password must contain at least 1 letter.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.AlphabeticValidator', {'min_alphabetic': 5})], # lint-amnesty, pylint: disable=line-too-long
'test123', 'This password must contain at least 5 letters.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.AlphabeticValidator', {'min_alphabetic': 2})], # lint-amnesty, pylint: disable=line-too-long
'password', None),
)
@unpack
def test_alphabetic_validation_errors(self, config, password, msg):
""" Tests validate_password error messages for the AlphabeticValidator """
with override_settings(AUTH_PASSWORD_VALIDATORS=config):
self.validation_errors_checker(password, msg)
@data(
([create_validator_config('common.djangoapps.util.password_policy_validators.NumericValidator', {'min_numeric': 1})], # lint-amnesty, pylint: disable=line-too-long
'test', 'This password must contain at least 1 number.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.NumericValidator', {'min_numeric': 4})], # lint-amnesty, pylint: disable=line-too-long
'test123', 'This password must contain at least 4 numbers.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.NumericValidator', {'min_numeric': 2})], # lint-amnesty, pylint: disable=line-too-long
'password123', None),
)
@unpack
def test_numeric_validation_errors(self, config, password, msg):
""" Tests validate_password error messages for the NumericValidator """
with override_settings(AUTH_PASSWORD_VALIDATORS=config):
self.validation_errors_checker(password, msg)
@data(
([create_validator_config('common.djangoapps.util.password_policy_validators.UppercaseValidator', {'min_upper': 1})], # lint-amnesty, pylint: disable=line-too-long
'lowercase', 'This password must contain at least 1 uppercase letter.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.UppercaseValidator', {'min_upper': 6})], # lint-amnesty, pylint: disable=line-too-long
'NOTenough', 'This password must contain at least 6 uppercase letters.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.UppercaseValidator', {'min_upper': 1})], # lint-amnesty, pylint: disable=line-too-long
'camelCase', None),
)
@unpack
def test_upper_case_validation_errors(self, config, password, msg):
""" Tests validate_password error messages for the UppercaseValidator """
with override_settings(AUTH_PASSWORD_VALIDATORS=config):
self.validation_errors_checker(password, msg)
@data(
([create_validator_config('common.djangoapps.util.password_policy_validators.LowercaseValidator', {'min_lower': 1})], # lint-amnesty, pylint: disable=line-too-long
'UPPERCASE', 'This password must contain at least 1 lowercase letter.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.LowercaseValidator', {'min_lower': 4})], # lint-amnesty, pylint: disable=line-too-long
'notENOUGH', 'This password must contain at least 4 lowercase letters.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.LowercaseValidator', {'min_lower': 1})], # lint-amnesty, pylint: disable=line-too-long
'goodPassword', None),
)
@unpack
def test_lower_case_validation_errors(self, config, password, msg):
""" Tests validate_password error messages for the LowercaseValidator """
with override_settings(AUTH_PASSWORD_VALIDATORS=config):
self.validation_errors_checker(password, msg)
@data(
([create_validator_config('common.djangoapps.util.password_policy_validators.PunctuationValidator', {'min_punctuation': 1})], # lint-amnesty, pylint: disable=line-too-long
'no punctuation', 'This password must contain at least 1 punctuation mark.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.PunctuationValidator', {'min_punctuation': 7})], # lint-amnesty, pylint: disable=line-too-long
'p@$$w0rd$!', 'This password must contain at least 7 punctuation marks.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.PunctuationValidator', {'min_punctuation': 3})], # lint-amnesty, pylint: disable=line-too-long
'excl@m@t!on', None),
)
@unpack
def test_punctuation_validation_errors(self, config, password, msg):
""" Tests validate_password error messages for the PunctuationValidator """
with override_settings(AUTH_PASSWORD_VALIDATORS=config):
self.validation_errors_checker(password, msg)
@data(
([create_validator_config('common.djangoapps.util.password_policy_validators.SymbolValidator', {'min_symbol': 1})], # lint-amnesty, pylint: disable=line-too-long
'no symbol', 'This password must contain at least 1 symbol.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.SymbolValidator', {'min_symbol': 3})], # lint-amnesty, pylint: disable=line-too-long
'☹️boo☹️', 'This password must contain at least 3 symbols.'),
([create_validator_config('common.djangoapps.util.password_policy_validators.SymbolValidator', {'min_symbol': 2})], # lint-amnesty, pylint: disable=line-too-long
'☪symbols!☹️', None),
)
@unpack
def test_symbol_validation_errors(self, config, password, msg):
""" Tests validate_password error messages for the SymbolValidator """
with override_settings(AUTH_PASSWORD_VALIDATORS=config):
self.validation_errors_checker(password, msg)
|
mitdbg/modeldb
|
refs/heads/master
|
client/verta/verta/_swagger/_public/modeldb/model/ModeldbObservation.py
|
1
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class ModeldbObservation(BaseType):
def __init__(self, attribute=None, artifact=None, timestamp=None):
required = {
"attribute": False,
"artifact": False,
"timestamp": False,
}
self.attribute = attribute
self.artifact = artifact
self.timestamp = timestamp
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .CommonKeyValue import CommonKeyValue
from .ModeldbArtifact import ModeldbArtifact
tmp = d.get('attribute', None)
if tmp is not None:
d['attribute'] = CommonKeyValue.from_json(tmp)
tmp = d.get('artifact', None)
if tmp is not None:
d['artifact'] = ModeldbArtifact.from_json(tmp)
tmp = d.get('timestamp', None)
if tmp is not None:
d['timestamp'] = tmp
return ModeldbObservation(**d)
|
TomAugspurger/pandas
|
refs/heads/master
|
pandas/tests/dtypes/test_generic.py
|
2
|
from warnings import catch_warnings
import numpy as np
from pandas.core.dtypes import generic as gt
import pandas as pd
import pandas._testing as tm
class TestABCClasses:
tuples = [[1, 2, 2], ["red", "blue", "red"]]
multi_index = pd.MultiIndex.from_arrays(tuples, names=("number", "color"))
datetime_index = pd.to_datetime(["2000/1/1", "2010/1/1"])
timedelta_index = pd.to_timedelta(np.arange(5), unit="s")
period_index = pd.period_range("2000/1/1", "2010/1/1/", freq="M")
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({"names": ["a", "b", "c"]}, index=multi_index)
sparse_array = pd.arrays.SparseArray(np.random.randn(10))
datetime_array = pd.core.arrays.DatetimeArray(datetime_index)
timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index)
def test_abc_types(self):
assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndex)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index)
assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
assert isinstance(self.multi_index, gt.ABCMultiIndex)
assert isinstance(self.datetime_index, gt.ABCDatetimeIndex)
assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex)
assert isinstance(self.period_index, gt.ABCPeriodIndex)
assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex)
assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndexClass)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
assert isinstance(self.df, gt.ABCDataFrame)
assert isinstance(self.sparse_array, gt.ABCExtensionArray)
assert isinstance(self.categorical, gt.ABCCategorical)
assert isinstance(self.datetime_array, gt.ABCDatetimeArray)
assert not isinstance(self.datetime_index, gt.ABCDatetimeArray)
assert isinstance(self.timedelta_array, gt.ABCTimedeltaArray)
assert not isinstance(self.timedelta_index, gt.ABCTimedeltaArray)
def test_setattr_warnings():
# GH7175 - GOTCHA: You can't use dot notation to add a column...
d = {
"one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]),
}
df = pd.DataFrame(d)
with catch_warnings(record=True) as w:
# successfully add new column
# this should not raise a warning
df["three"] = df.two + 1
assert len(w) == 0
assert df.three.sum() > df.two.sum()
with catch_warnings(record=True) as w:
# successfully modify column in place
# this should not raise a warning
df.one += 1
assert len(w) == 0
assert df.one.iloc[0] == 2
with catch_warnings(record=True) as w:
# successfully add an attribute to a series
# this should not raise a warning
df.two.not_an_index = [1, 2]
assert len(w) == 0
with tm.assert_produces_warning(UserWarning):
# warn when setting column to nonexistent name
df.four = df.two + 2
assert df.four.sum() > df.two.sum()
|
rogerhu/django
|
refs/heads/master
|
django/core/checks/compatibility/__init__.py
|
12133432
| |
Manolaru/Python_train
|
refs/heads/master
|
Les_4/Task_12/model/__init__.py
|
12133432
| |
shubhamdhama/zulip
|
refs/heads/master
|
tools/setup/__init__.py
|
12133432
| |
cecep-edu/edx-platform
|
refs/heads/eucalyptus.2
|
cms/djangoapps/course_creators/__init__.py
|
12133432
| |
amenasse/grindstone
|
refs/heads/master
|
grindstone/tasks/migrations/__init__.py
|
12133432
| |
kevclarx/ansible
|
refs/heads/devel
|
test/units/module_utils/gcp/test_utils.py
|
127
|
# -*- coding: utf-8 -*-
# (c) 2016, Tom Melendez <tom@supertom.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from ansible.compat.tests import mock, unittest
from ansible.module_utils.gcp import check_min_pkg_version, GCPUtils, GCPInvalidURLError
def build_distribution(version):
obj = mock.MagicMock()
obj.version = '0.5.0'
return obj
class GCPUtilsTestCase(unittest.TestCase):
params_dict = {
'url_map_name': 'foo_url_map_name',
'description': 'foo_url_map description',
'host_rules': [
{
'description': 'host rules description',
'hosts': [
'www.example.com',
'www2.example.com'
],
'path_matcher': 'host_rules_path_matcher'
}
],
'path_matchers': [
{
'name': 'path_matcher_one',
'description': 'path matcher one',
'defaultService': 'bes-pathmatcher-one-default',
'pathRules': [
{
'service': 'my-one-bes',
'paths': [
'/',
'/aboutus'
]
}
]
},
{
'name': 'path_matcher_two',
'description': 'path matcher two',
'defaultService': 'bes-pathmatcher-two-default',
'pathRules': [
{
'service': 'my-two-bes',
'paths': [
'/webapp',
'/graphs'
]
}
]
}
]
}
@mock.patch("pkg_resources.get_distribution", side_effect=build_distribution)
def test_check_minimum_pkg_version(self, mockobj):
self.assertTrue(check_min_pkg_version('foobar', '0.4.0'))
self.assertTrue(check_min_pkg_version('foobar', '0.5.0'))
self.assertFalse(check_min_pkg_version('foobar', '0.6.0'))
def test_parse_gcp_url(self):
# region, resource, entity, method
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/regions/us-east1/instanceGroupManagers/my-mig/recreateInstances'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertEquals('us-east1', actual['region'])
self.assertEquals('instanceGroupManagers', actual['resource_name'])
self.assertEquals('my-mig', actual['entity_name'])
self.assertEquals('recreateInstances', actual['method_name'])
# zone, resource, entity, method
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/zones/us-east1-c/instanceGroupManagers/my-mig/recreateInstances'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertEquals('us-east1-c', actual['zone'])
self.assertEquals('instanceGroupManagers', actual['resource_name'])
self.assertEquals('my-mig', actual['entity_name'])
self.assertEquals('recreateInstances', actual['method_name'])
# global, resource
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertTrue('global' in actual)
self.assertTrue(actual['global'])
self.assertEquals('urlMaps', actual['resource_name'])
# global, resource, entity
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps/my-url-map'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('myproject', actual['project'])
self.assertTrue('global' in actual)
self.assertTrue(actual['global'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('compute', actual['service'])
# global URL, resource, entity, method_name
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/mybackendservice/getHealth'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertTrue('global' in actual)
self.assertTrue(actual['global'])
self.assertEquals('backendServices', actual['resource_name'])
self.assertEquals('mybackendservice', actual['entity_name'])
self.assertEquals('getHealth', actual['method_name'])
# no location in URL
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies/mytargetproxy/setUrlMap'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertFalse('global' in actual)
self.assertEquals('targetHttpProxies', actual['resource_name'])
self.assertEquals('mytargetproxy', actual['entity_name'])
self.assertEquals('setUrlMap', actual['method_name'])
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies/mytargetproxy'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertFalse('global' in actual)
self.assertEquals('targetHttpProxies', actual['resource_name'])
self.assertEquals('mytargetproxy', actual['entity_name'])
input_url = 'https://www.googleapis.com/compute/v1/projects/myproject/targetHttpProxies'
actual = GCPUtils.parse_gcp_url(input_url)
self.assertEquals('compute', actual['service'])
self.assertEquals('v1', actual['api_version'])
self.assertEquals('myproject', actual['project'])
self.assertFalse('global' in actual)
self.assertEquals('targetHttpProxies', actual['resource_name'])
# test exceptions
no_projects_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject/global/backendServices/mybackendservice/getHealth'
no_resource_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject/global'
no_resource_no_loc_input_url = 'https://www.googleapis.com/compute/v1/not-projects/myproject'
with self.assertRaises(GCPInvalidURLError) as cm:
GCPUtils.parse_gcp_url(no_projects_input_url)
self.assertTrue(cm.exception, GCPInvalidURLError)
with self.assertRaises(GCPInvalidURLError) as cm:
GCPUtils.parse_gcp_url(no_resource_input_url)
self.assertTrue(cm.exception, GCPInvalidURLError)
with self.assertRaises(GCPInvalidURLError) as cm:
GCPUtils.parse_gcp_url(no_resource_no_loc_input_url)
self.assertTrue(cm.exception, GCPInvalidURLError)
def test_params_to_gcp_dict(self):
expected = {
'description': 'foo_url_map description',
'hostRules': [
{
'description': 'host rules description',
'hosts': [
'www.example.com',
'www2.example.com'
],
'pathMatcher': 'host_rules_path_matcher'
}
],
'name': 'foo_url_map_name',
'pathMatchers': [
{
'defaultService': 'bes-pathmatcher-one-default',
'description': 'path matcher one',
'name': 'path_matcher_one',
'pathRules': [
{
'paths': [
'/',
'/aboutus'
],
'service': 'my-one-bes'
}
]
},
{
'defaultService': 'bes-pathmatcher-two-default',
'description': 'path matcher two',
'name': 'path_matcher_two',
'pathRules': [
{
'paths': [
'/webapp',
'/graphs'
],
'service': 'my-two-bes'
}
]
}
]
}
actual = GCPUtils.params_to_gcp_dict(self.params_dict, 'url_map_name')
self.assertEqual(expected, actual)
def test_get_gcp_resource_from_methodId(self):
input_data = 'compute.urlMaps.list'
actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
self.assertEqual('urlMaps', actual)
input_data = None
actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
self.assertFalse(actual)
input_data = 666
actual = GCPUtils.get_gcp_resource_from_methodId(input_data)
self.assertFalse(actual)
def test_get_entity_name_from_resource_name(self):
input_data = 'urlMaps'
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual('urlMap', actual)
input_data = 'targetHttpProxies'
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual('targetHttpProxy', actual)
input_data = 'globalForwardingRules'
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual('forwardingRule', actual)
input_data = ''
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual(None, actual)
input_data = 666
actual = GCPUtils.get_entity_name_from_resource_name(input_data)
self.assertEqual(None, actual)
def test_are_params_equal(self):
params1 = {'one': 1}
params2 = {'one': 1}
actual = GCPUtils.are_params_equal(params1, params2)
self.assertTrue(actual)
params1 = {'one': 1}
params2 = {'two': 2}
actual = GCPUtils.are_params_equal(params1, params2)
self.assertFalse(actual)
params1 = {'three': 3, 'two': 2, 'one': 1}
params2 = {'one': 1, 'two': 2, 'three': 3}
actual = GCPUtils.are_params_equal(params1, params2)
self.assertTrue(actual)
params1 = {
"creationTimestamp": "2017-04-21T11:19:20.718-07:00",
"defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/default-backend-service",
"description": "",
"fingerprint": "ickr_pwlZPU=",
"hostRules": [
{
"description": "",
"hosts": [
"*."
],
"pathMatcher": "path-matcher-one"
}
],
"id": "8566395781175047111",
"kind": "compute#urlMap",
"name": "newtesturlmap-foo",
"pathMatchers": [
{
"defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/bes-pathmatcher-one-default",
"description": "path matcher one",
"name": "path-matcher-one",
"pathRules": [
{
"paths": [
"/data",
"/aboutus"
],
"service": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/my-one-bes"
}
]
}
],
"selfLink": "https://www.googleapis.com/compute/v1/projects/myproject/global/urlMaps/newtesturlmap-foo"
}
params2 = {
"defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/default-backend-service",
"hostRules": [
{
"description": "",
"hosts": [
"*."
],
"pathMatcher": "path-matcher-one"
}
],
"name": "newtesturlmap-foo",
"pathMatchers": [
{
"defaultService": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/bes-pathmatcher-one-default",
"description": "path matcher one",
"name": "path-matcher-one",
"pathRules": [
{
"paths": [
"/data",
"/aboutus"
],
"service": "https://www.googleapis.com/compute/v1/projects/myproject/global/backendServices/my-one-bes"
}
]
}
],
}
# params1 has exclude fields, params2 doesn't. Should be equal
actual = GCPUtils.are_params_equal(params1, params2)
self.assertTrue(actual)
def test_filter_gcp_fields(self):
input_data = {
u'kind': u'compute#httpsHealthCheck',
u'description': u'',
u'timeoutSec': 5,
u'checkIntervalSec': 5,
u'port': 443,
u'healthyThreshold': 2,
u'host': u'',
u'requestPath': u'/',
u'unhealthyThreshold': 2,
u'creationTimestamp': u'2017-05-16T15:09:36.546-07:00',
u'id': u'8727093129334146639',
u'selfLink': u'https://www.googleapis.com/compute/v1/projects/myproject/global/httpsHealthChecks/myhealthcheck',
u'name': u'myhealthcheck'}
expected = {
'name': 'myhealthcheck',
'checkIntervalSec': 5,
'port': 443,
'unhealthyThreshold': 2,
'healthyThreshold': 2,
'host': '',
'timeoutSec': 5,
'requestPath': '/'}
actual = GCPUtils.filter_gcp_fields(input_data)
self.assertEquals(expected, actual)
|
lonnen/socorro
|
refs/heads/master
|
webapp-django/crashstats/manage/forms.py
|
2
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from crashstats.crashstats.forms import BaseForm
class GraphicsDeviceUploadForm(BaseForm):
file = forms.FileField()
|
hoxmark/TDT4501-Specialization-Project
|
refs/heads/master
|
reinforcement/testing/svm_with_entropy.py
|
1
|
# Standard scientific Python imports
import matplotlib.pyplot as plt
import math
import random
import numpy as np
from utils import test_local_logger
entropy = True
lg = test_local_logger(entropy)
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
from sklearn.utils import shuffle
# The digits dataset
digits = datasets.load_digits()
batch_size = 2
def selectRandomData(test_data, test_activeTargets, step):
return shuffle(test_data, test_activeTagets)
def selectBestData(test_data, test_activeTargets, step):
total = 0
if step==0:
return shuffle(test_data, test_activeTagets)
probs = classifier.predict_proba(test_data)
output = np.multiply(probs, np.log2(probs))
output = np.sum(output, axis=1)
output = output * -1
test_data, test_activeTagets
next_data = []
next_taget = []
order = np.argsort(-output)
return np.array(test_data)[order], np.array(test_activeTagets)[order]
total = {}
num_of_iterations = 10
for iteration in range(0,num_of_iterations):
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
test_data = data[:n_samples // 2]
test_activeTagets = digits.target[:n_samples // 2]
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001, probability=True)
# We learn the digits on the first half of the digits
activeData =[]
activeTargets =[]
for step in range(0,50):
if entropy:
test_data, test_activeTagets = selectBestData(test_data, test_activeTagets, step)
else:
test_data, test_activeTagets = selectRandomData(test_data, test_activeTagets, step)
if step == 0:
to_extract = batch_size*50
else:
to_extract = batch_size
activeData.extend(test_data[:to_extract])
activeTargets.extend(test_activeTagets[:to_extract])
test_data = test_data[to_extract:]
test_activeTagets = test_activeTagets[to_extract:]
classifier.fit(activeData, activeTargets)
print(len(test_data))
print(len(activeData))
# # Now predict the value of the digit on the second half:
expected = digits.target[n_samples // 2:]
predicted = classifier.predict(data[n_samples // 2:])
# print(metrics.classification_report(expected, predicted))
acc = metrics.accuracy_score(expected, predicted)
print(acc)
if not step in total:
total[step] = acc
else:
total[step] += acc
lg.scalar_summary('acc/{}'.format(iteration), acc, step)
for k, v in enumerate(total):
print(v)
print(k)
print(total[v])
lg.scalar_summary('avg', total[v]/num_of_iterations, k)
# def get_entropy_sorted_query(self, best):
# probs = data["all_predictions"]
# out = list(map(entropy, probs))
# order = np.argsort(out)
# if best:
# # Good:
# similar_indices = order[len(order)-opt.selection_radius:]
# else:
# # bad
# similar_indices = order[:opt.selection_radius]
# for idx in similar_indices:
# self.add_index(idx)
# return similar_indices
|
LudwigOrtmann/RIOT
|
refs/heads/master
|
tests/bitarithm_timings/tests/01-run.py
|
6
|
#!/usr/bin/env python3
# Copyright (C) 2017 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
def testfunc(child):
child.expect_exact("Start.")
child.expect('\+ bitarithm_msb: \d+ iterations per second')
child.expect('\+ bitarithm_lsb: \d+ iterations per second')
child.expect('\+ bitarithm_bits_set: \d+ iterations per second')
child.expect_exact("Done.")
if __name__ == "__main__":
sys.path.append(os.path.join(os.environ['RIOTBASE'], 'dist/tools/testrunner'))
from testrunner import run
sys.exit(run(testfunc, timeout=30))
|
dezelin/qemu-kvm
|
refs/heads/master
|
tests/qemu-iotests/iotests.py
|
19
|
# Common utilities and Python wrappers for qemu-iotests
#
# Copyright (C) 2012 IBM Corp.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
import subprocess
import unittest
import sys; sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'QMP'))
import qmp
__all__ = ['imgfmt', 'imgproto', 'test_dir' 'qemu_img', 'qemu_io',
'VM', 'QMPTestCase', 'notrun', 'main']
# This will not work if arguments or path contain spaces but is necessary if we
# want to support the override options that ./check supports.
qemu_img_args = os.environ.get('QEMU_IMG', 'qemu-img').strip().split(' ')
qemu_io_args = os.environ.get('QEMU_IO', 'qemu-io').strip().split(' ')
qemu_args = os.environ.get('QEMU', 'qemu').strip().split(' ')
imgfmt = os.environ.get('IMGFMT', 'raw')
imgproto = os.environ.get('IMGPROTO', 'file')
test_dir = os.environ.get('TEST_DIR', '/var/tmp')
def qemu_img(*args):
'''Run qemu-img and return the exit code'''
devnull = open('/dev/null', 'r+')
return subprocess.call(qemu_img_args + list(args), stdin=devnull, stdout=devnull)
def qemu_io(*args):
'''Run qemu-io and return the stdout data'''
args = qemu_io_args + list(args)
return subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
class VM(object):
'''A QEMU VM'''
def __init__(self):
self._monitor_path = os.path.join(test_dir, 'qemu-mon.%d' % os.getpid())
self._qemu_log_path = os.path.join(test_dir, 'qemu-log.%d' % os.getpid())
self._args = qemu_args + ['-chardev',
'socket,id=mon,path=' + self._monitor_path,
'-mon', 'chardev=mon,mode=control',
'-qtest', 'stdio', '-machine', 'accel=qtest',
'-display', 'none', '-vga', 'none']
self._num_drives = 0
def add_drive(self, path, opts=''):
'''Add a virtio-blk drive to the VM'''
options = ['if=virtio',
'format=%s' % imgfmt,
'cache=none',
'file=%s' % path,
'id=drive%d' % self._num_drives]
if opts:
options.append(opts)
self._args.append('-drive')
self._args.append(','.join(options))
self._num_drives += 1
return self
def launch(self):
'''Launch the VM and establish a QMP connection'''
devnull = open('/dev/null', 'rb')
qemulog = open(self._qemu_log_path, 'wb')
try:
self._qmp = qmp.QEMUMonitorProtocol(self._monitor_path, server=True)
self._popen = subprocess.Popen(self._args, stdin=devnull, stdout=qemulog,
stderr=subprocess.STDOUT)
self._qmp.accept()
except:
os.remove(self._monitor_path)
raise
def shutdown(self):
'''Terminate the VM and clean up'''
if not self._popen is None:
self._qmp.cmd('quit')
self._popen.wait()
os.remove(self._monitor_path)
os.remove(self._qemu_log_path)
self._popen = None
def qmp(self, cmd, **args):
'''Invoke a QMP command and return the result dict'''
return self._qmp.cmd(cmd, args=args)
def get_qmp_events(self, wait=False):
'''Poll for queued QMP events and return a list of dicts'''
events = self._qmp.get_events(wait=wait)
self._qmp.clear_events()
return events
index_re = re.compile(r'([^\[]+)\[([^\]]+)\]')
class QMPTestCase(unittest.TestCase):
'''Abstract base class for QMP test cases'''
def dictpath(self, d, path):
'''Traverse a path in a nested dict'''
for component in path.split('/'):
m = index_re.match(component)
if m:
component, idx = m.groups()
idx = int(idx)
if not isinstance(d, dict) or component not in d:
self.fail('failed path traversal for "%s" in "%s"' % (path, str(d)))
d = d[component]
if m:
if not isinstance(d, list):
self.fail('path component "%s" in "%s" is not a list in "%s"' % (component, path, str(d)))
try:
d = d[idx]
except IndexError:
self.fail('invalid index "%s" in path "%s" in "%s"' % (idx, path, str(d)))
return d
def assert_qmp(self, d, path, value):
'''Assert that the value for a specific path in a QMP dict matches'''
result = self.dictpath(d, path)
self.assertEqual(result, value, 'values not equal "%s" and "%s"' % (str(result), str(value)))
def notrun(reason):
'''Skip this test suite'''
# Each test in qemu-iotests has a number ("seq")
seq = os.path.basename(sys.argv[0])
open('%s.notrun' % seq, 'wb').write(reason + '\n')
print '%s not run: %s' % (seq, reason)
sys.exit(0)
def main(supported_fmts=[]):
'''Run tests'''
if supported_fmts and (imgfmt not in supported_fmts):
notrun('not suitable for this image format: %s' % imgfmt)
# We need to filter out the time taken from the output so that qemu-iotest
# can reliably diff the results against master output.
import StringIO
output = StringIO.StringIO()
class MyTestRunner(unittest.TextTestRunner):
def __init__(self, stream=output, descriptions=True, verbosity=1):
unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
# unittest.main() will use sys.exit() so expect a SystemExit exception
try:
unittest.main(testRunner=MyTestRunner)
finally:
sys.stderr.write(re.sub(r'Ran (\d+) test[s] in [\d.]+s', r'Ran \1 tests', output.getvalue()))
|
onponomarev/ganeti
|
refs/heads/master
|
lib/cli_opts.py
|
2
|
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module containing Ganeti's command line parsing options"""
import re
from optparse import (Option, OptionValueError)
import simplejson
from ganeti import utils
from ganeti import errors
from ganeti import constants
from ganeti import compat
from ganeti import pathutils
from ganeti import serializer
__all__ = [
"ABSOLUTE_OPT",
"ADD_RESERVED_IPS_OPT",
"ADD_UIDS_OPT",
"ALL_OPT",
"ALLOC_POLICY_OPT",
"ALLOCATABLE_OPT",
"ALLOW_FAILOVER_OPT",
"AUTO_PROMOTE_OPT",
"AUTO_REPLACE_OPT",
"BACKEND_OPT",
"BLK_OS_OPT",
"CAPAB_MASTER_OPT",
"CAPAB_VM_OPT",
"CLEANUP_OPT",
"CLEAR_OSPARAMS_OPT",
"CLEAR_OSPARAMS_PRIVATE_OPT",
"cli_option",
"CLUSTER_DOMAIN_SECRET_OPT",
"COMMIT_OPT",
"COMMON_CREATE_OPTS",
"COMMON_OPTS",
"COMPRESS_OPT",
"COMPRESSION_TOOLS_OPT",
"CONFIRM_OPT",
"CP_SIZE_OPT",
"DEBUG_OPT",
"DEBUG_SIMERR_OPT",
"DEFAULT_IALLOCATOR_OPT",
"DEFAULT_IALLOCATOR_PARAMS_OPT",
"DISK_OPT",
"DISK_PARAMS_OPT",
"DISK_STATE_OPT",
"DISK_TEMPLATE_OPT",
"DISKIDX_OPT",
"DRAINED_OPT",
"DRBD_HELPER_OPT",
"DRY_RUN_OPT",
"DST_NODE_OPT",
"EARLY_RELEASE_OPT",
"ENABLED_DATA_COLLECTORS_OPT",
"DIAGNOSE_DATA_COLLECTOR_FILENAME_OPT",
"ENABLED_DISK_TEMPLATES_OPT",
"ENABLED_HV_OPT",
"ENABLED_PREDICTIVE_QUEUE_OPT",
"ENABLED_USER_SHUTDOWN_OPT",
"ERROR_CODES_OPT",
"EXT_PARAMS_OPT",
"FAILURE_ONLY_OPT",
"FIELDS_OPT",
"FILESTORE_DIR_OPT",
"FILESTORE_DRIVER_OPT",
"FORCE_FAILOVER_OPT",
"FORCE_FILTER_OPT",
"FORCE_OPT",
"FORCE_VARIANT_OPT",
"FORTHCOMING_OPT",
"GATEWAY6_OPT",
"GATEWAY_OPT",
"GLOBAL_FILEDIR_OPT",
"GLOBAL_GLUSTER_FILEDIR_OPT",
"GLOBAL_SHARED_FILEDIR_OPT",
"HELPER_SHUTDOWN_TIMEOUT_OPT",
"HELPER_STARTUP_TIMEOUT_OPT",
"HID_OS_OPT",
"HOTPLUG_IF_POSSIBLE_OPT",
"HOTPLUG_OPT",
"HV_STATE_OPT",
"HVLIST_OPT",
"HVOPTS_OPT",
"HYPERVISOR_OPT",
"IALLOCATOR_OPT",
"IDENTIFY_DEFAULTS_OPT",
"IGNORE_CONSIST_OPT",
"IGNORE_ERRORS_OPT",
"IGNORE_FAILURES_OPT",
"IGNORE_HVVERSIONS_OPT",
"IGNORE_IPOLICY_OPT",
"IGNORE_OFFLINE_OPT",
"IGNORE_REMOVE_FAILURES_OPT",
"IGNORE_SECONDARIES_OPT",
"IGNORE_SOFT_ERRORS_OPT",
"IGNORE_SIZE_OPT",
"INCLUDEDEFAULTS_OPT",
"INPUT_OPT",
"INSTALL_IMAGE_OPT",
"INSTANCE_COMMUNICATION_NETWORK_OPT",
"INSTANCE_COMMUNICATION_OPT",
"INSTANCE_POLICY_OPTS",
"INTERVAL_OPT",
"IPOLICY_BOUNDS_SPECS_STR",
"IPOLICY_DISK_TEMPLATES",
"IPOLICY_SPINDLE_RATIO",
"IPOLICY_STD_SPECS_OPT",
"IPOLICY_STD_SPECS_STR",
"IPOLICY_VCPU_RATIO",
"IPOLICY_MEMORY_RATIO",
"LONG_SLEEP_OPT",
"MAC_PREFIX_OPT",
"MAINT_BALANCE_OPT",
"MAINT_BALANCE_THRESHOLD_OPT",
"MAINT_INTERVAL_OPT",
"MAINTAIN_NODE_HEALTH_OPT",
"MASTER_NETDEV_OPT",
"MASTER_NETMASK_OPT",
"MAX_TRACK_OPT",
"MC_OPT",
"MIGRATION_MODE_OPT",
"MODIFY_ETCHOSTS_OPT",
"MODIFY_SSH_SETUP_OPT",
"NET_OPT",
"NETWORK6_OPT",
"NETWORK_OPT",
"NEW_CLUSTER_CERT_OPT",
"NEW_CLUSTER_DOMAIN_SECRET_OPT",
"NEW_CONFD_HMAC_KEY_OPT",
"NEW_NODE_CERT_OPT",
"NEW_PRIMARY_OPT",
"NEW_RAPI_CERT_OPT",
"NEW_SECONDARY_OPT",
"NEW_SPICE_CERT_OPT",
"NEW_SSH_KEY_OPT",
"NIC_PARAMS_OPT",
"NO_INSTALL_OPT",
"NO_REMEMBER_OPT",
"NOCONFLICTSCHECK_OPT",
"NODE_FORCE_JOIN_OPT",
"NODE_LIST_OPT",
"NODE_PARAMS_OPT",
"NODE_PLACEMENT_OPT",
"NODE_POWERED_OPT",
"NODEGROUP_OPT",
"NODEGROUP_OPT_NAME",
"NOHDR_OPT",
"NOIPCHECK_OPT",
"NOMODIFY_ETCHOSTS_OPT",
"NOMODIFY_SSH_SETUP_OPT",
"NONAMECHECK_OPT",
"NONICS_OPT",
"NONLIVE_OPT",
"NONPLUS1_OPT",
"NORUNTIME_CHGS_OPT",
"NOSHUTDOWN_OPT",
"NOSSH_KEYCHECK_OPT",
"NOSTART_OPT",
"NOVOTING_OPT",
"NWSYNC_OPT",
"OFFLINE_INST_OPT",
"OFFLINE_OPT",
"ON_PRIMARY_OPT",
"ON_SECONDARY_OPT",
"ONLINE_INST_OPT",
"OOB_TIMEOUT_OPT",
"OPT_COMPL_ALL",
"OPT_COMPL_INST_ADD_NODES",
"OPT_COMPL_MANY_NODES",
"OPT_COMPL_ONE_EXTSTORAGE",
"OPT_COMPL_ONE_FILTER",
"OPT_COMPL_ONE_IALLOCATOR",
"OPT_COMPL_ONE_INSTANCE",
"OPT_COMPL_ONE_NETWORK",
"OPT_COMPL_ONE_NODE",
"OPT_COMPL_ONE_NODEGROUP",
"OPT_COMPL_ONE_OS",
"OS_OPT",
"OS_SIZE_OPT",
"OSPARAMS_OPT",
"OSPARAMS_PRIVATE_OPT",
"OSPARAMS_SECRET_OPT",
"POWER_DELAY_OPT",
"PREALLOC_WIPE_DISKS_OPT",
"PRIMARY_IP_VERSION_OPT",
"PRIMARY_ONLY_OPT",
"PRINT_JOBID_OPT",
"PRIORITY_OPT",
"RAPI_CERT_OPT",
"READD_OPT",
"REASON_OPT",
"REBOOT_TYPE_OPT",
"REMOVE_INSTANCE_OPT",
"REMOVE_OSPARAMS_OPT",
"REMOVE_OSPARAMS_PRIVATE_OPT",
"REMOVE_RESERVED_IPS_OPT",
"REMOVE_UIDS_OPT",
"RESERVED_LVS_OPT",
"ROMAN_OPT",
"RQL_OPT",
"RUNTIME_MEM_OPT",
"SECONDARY_IP_OPT",
"SECONDARY_ONLY_OPT",
"SELECT_OS_OPT",
"SEP_OPT",
"SEQUENTIAL_OPT",
"SHOW_MACHINE_OPT",
"SHOWCMD_OPT",
"SHUTDOWN_TIMEOUT_OPT",
"SINGLE_NODE_OPT",
"SPECS_CPU_COUNT_OPT",
"SPECS_DISK_COUNT_OPT",
"SPECS_DISK_SIZE_OPT",
"SPECS_MEM_SIZE_OPT",
"SPECS_NIC_COUNT_OPT",
"SPICE_CACERT_OPT",
"SPICE_CERT_OPT",
"SPLIT_ISPECS_OPTS",
"SRC_DIR_OPT",
"SRC_NODE_OPT",
"SSH_KEY_BITS_OPT",
"SSH_KEY_TYPE_OPT",
"STARTUP_PAUSED_OPT",
"STATIC_OPT",
"SUBMIT_OPT",
"SUBMIT_OPTS",
"SYNC_OPT",
"TAG_ADD_OPT",
"TAG_SRC_OPT",
"TIMEOUT_OPT",
"TO_GROUP_OPT",
"TRANSPORT_COMPRESSION_OPT",
"UIDPOOL_OPT",
"USE_EXTERNAL_MIP_SCRIPT",
"USE_REPL_NET_OPT",
"USEUNITS_OPT",
"VERBOSE_OPT",
"VERIFY_CLUTTER_OPT",
"VG_NAME_OPT",
"WFSYNC_OPT",
"YES_DOIT_OPT",
"ZERO_FREE_SPACE_OPT",
"ZEROING_IMAGE_OPT",
"ZEROING_TIMEOUT_FIXED_OPT",
"ZEROING_TIMEOUT_PER_MIB_OPT",
]
NO_PREFIX = "no_"
UN_PREFIX = "-"
#: Priorities (sorted)
_PRIORITY_NAMES = [
("low", constants.OP_PRIO_LOW),
("normal", constants.OP_PRIO_NORMAL),
("high", constants.OP_PRIO_HIGH),
]
#: Priority dictionary for easier lookup
# TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once
# we migrate to Python 2.6
_PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
def check_unit(option, opt, value): # pylint: disable=W0613
"""OptParsers custom converter for units.
"""
try:
return utils.ParseUnit(value)
except errors.UnitParseError, err:
raise OptionValueError("option %s: %s" % (opt, err))
def _SplitKeyVal(opt, data, parse_prefixes):
"""Convert a KeyVal string into a dict.
This function will convert a key=val[,...] string into a dict. Empty
values will be converted specially: keys which have the prefix 'no_'
will have the value=False and the prefix stripped, keys with the prefix
"-" will have value=None and the prefix stripped, and the others will
have value=True.
@type opt: string
@param opt: a string holding the option name for which we process the
data, used in building error messages
@type data: string
@param data: a string of the format key=val,key=val,...
@type parse_prefixes: bool
@param parse_prefixes: whether to handle prefixes specially
@rtype: dict
@return: {key=val, key=val}
@raises errors.ParameterError: if there are duplicate keys
"""
kv_dict = {}
if data:
for elem in utils.UnescapeAndSplit(data, sep=","):
if "=" in elem:
key, val = elem.split("=", 1)
elif parse_prefixes:
if elem.startswith(NO_PREFIX):
key, val = elem[len(NO_PREFIX):], False
elif elem.startswith(UN_PREFIX):
key, val = elem[len(UN_PREFIX):], None
else:
key, val = elem, True
else:
raise errors.ParameterError("Missing value for key '%s' in option %s" %
(elem, opt))
if key in kv_dict:
raise errors.ParameterError("Duplicate key '%s' in option %s" %
(key, opt))
kv_dict[key] = val
return kv_dict
def _SplitIdentKeyVal(opt, value, parse_prefixes):
"""Helper function to parse "ident:key=val,key=val" options.
@type opt: string
@param opt: option name, used in error messages
@type value: string
@param value: expected to be in the format "ident:key=val,key=val,..."
@type parse_prefixes: bool
@param parse_prefixes: whether to handle prefixes specially (see
L{_SplitKeyVal})
@rtype: tuple
@return: (ident, {key=val, key=val})
@raises errors.ParameterError: in case of duplicates or other parsing errors
"""
if ":" not in value:
ident, rest = value, ""
else:
ident, rest = value.split(":", 1)
if parse_prefixes and ident.startswith(NO_PREFIX):
if rest:
msg = "Cannot pass options when removing parameter groups: %s" % value
raise errors.ParameterError(msg)
retval = (ident[len(NO_PREFIX):], False)
elif (parse_prefixes and ident.startswith(UN_PREFIX) and
(len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())):
if rest:
msg = "Cannot pass options when removing parameter groups: %s" % value
raise errors.ParameterError(msg)
retval = (ident[len(UN_PREFIX):], None)
else:
kv_dict = _SplitKeyVal(opt, rest, parse_prefixes)
retval = (ident, kv_dict)
return retval
def check_ident_key_val(option, opt, value): # pylint: disable=W0613
"""Custom parser for ident:key=val,key=val options.
This will store the parsed values as a tuple (ident, {key: val}). As such,
multiple uses of this option via action=append is possible.
"""
return _SplitIdentKeyVal(opt, value, True)
def check_key_val(option, opt, value): # pylint: disable=W0613
"""Custom parser class for key=val,key=val options.
This will store the parsed values as a dict {key: val}.
"""
return _SplitKeyVal(opt, value, True)
def check_key_private_val(option, opt, value): # pylint: disable=W0613
"""Custom parser class for private and secret key=val,key=val options.
This will store the parsed values as a dict {key: val}.
"""
return serializer.PrivateDict(_SplitKeyVal(opt, value, True))
def _SplitListKeyVal(opt, value):
retval = {}
for elem in value.split("/"):
if not elem:
raise errors.ParameterError("Empty section in option '%s'" % opt)
(ident, valdict) = _SplitIdentKeyVal(opt, elem, False)
if ident in retval:
msg = ("Duplicated parameter '%s' in parsing %s: %s" %
(ident, opt, elem))
raise errors.ParameterError(msg)
retval[ident] = valdict
return retval
def check_multilist_ident_key_val(_, opt, value):
"""Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options.
@rtype: list of dictionary
@return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}]
"""
retval = []
for line in value.split("//"):
retval.append(_SplitListKeyVal(opt, line))
return retval
def check_bool(option, opt, value): # pylint: disable=W0613
"""Custom parser for yes/no options.
This will store the parsed value as either True or False.
"""
value = value.lower()
if value == constants.VALUE_FALSE or value == "no":
return False
elif value == constants.VALUE_TRUE or value == "yes":
return True
else:
raise errors.ParameterError("Invalid boolean value '%s'" % value)
def check_list(option, opt, value): # pylint: disable=W0613
"""Custom parser for comma-separated lists.
"""
# we have to make this explicit check since "".split(",") is [""],
# not an empty list :(
if not value:
return []
else:
return utils.UnescapeAndSplit(value)
def check_maybefloat(option, opt, value): # pylint: disable=W0613
"""Custom parser for float numbers which might be also defaults.
"""
value = value.lower()
if value == constants.VALUE_DEFAULT:
return value
else:
return float(value)
def check_json(option, opt, value): # pylint: disable=W0613
"""Custom parser for JSON arguments.
Takes a string containing JSON, returns a Python object.
"""
return simplejson.loads(value)
def check_filteraction(option, opt, value): # pylint: disable=W0613
"""Custom parser for filter rule actions.
Takes a string, returns an action as a Python object (list or string).
The string "RATE_LIMIT n" becomes `["RATE_LIMIT", n]`.
All other strings stay as they are.
"""
match = re.match(r"RATE_LIMIT\s+(\d+)", value)
if match:
n = int(match.group(1))
return ["RATE_LIMIT", n]
else:
return value
# completion_suggestion is normally a list. Using numeric values not evaluating
# to False for dynamic completion.
(OPT_COMPL_MANY_NODES,
OPT_COMPL_ONE_NODE,
OPT_COMPL_ONE_INSTANCE,
OPT_COMPL_ONE_OS,
OPT_COMPL_ONE_EXTSTORAGE,
OPT_COMPL_ONE_FILTER,
OPT_COMPL_ONE_IALLOCATOR,
OPT_COMPL_ONE_NETWORK,
OPT_COMPL_INST_ADD_NODES,
OPT_COMPL_ONE_NODEGROUP) = range(100, 110)
OPT_COMPL_ALL = compat.UniqueFrozenset([
OPT_COMPL_MANY_NODES,
OPT_COMPL_ONE_NODE,
OPT_COMPL_ONE_INSTANCE,
OPT_COMPL_ONE_OS,
OPT_COMPL_ONE_EXTSTORAGE,
OPT_COMPL_ONE_FILTER,
OPT_COMPL_ONE_IALLOCATOR,
OPT_COMPL_ONE_NETWORK,
OPT_COMPL_INST_ADD_NODES,
OPT_COMPL_ONE_NODEGROUP,
])
class CliOption(Option):
"""Custom option class for optparse.
"""
ATTRS = Option.ATTRS + [
"completion_suggest",
]
TYPES = Option.TYPES + (
"multilistidentkeyval",
"identkeyval",
"keyval",
"keyprivateval",
"unit",
"bool",
"list",
"maybefloat",
"json",
"filteraction",
)
TYPE_CHECKER = Option.TYPE_CHECKER.copy()
TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val
TYPE_CHECKER["identkeyval"] = check_ident_key_val
TYPE_CHECKER["keyval"] = check_key_val
TYPE_CHECKER["keyprivateval"] = check_key_private_val
TYPE_CHECKER["unit"] = check_unit
TYPE_CHECKER["bool"] = check_bool
TYPE_CHECKER["list"] = check_list
TYPE_CHECKER["maybefloat"] = check_maybefloat
TYPE_CHECKER["json"] = check_json
TYPE_CHECKER["filteraction"] = check_filteraction
# optparse.py sets make_option, so we do it for our own option class, too
cli_option = CliOption # pylint: disable=C0103
_YORNO = "yes|no"
DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
help="Increase debugging level")
NOHDR_OPT = cli_option("--no-headers", default=False,
action="store_true", dest="no_headers",
help="Don't display column headers")
SEP_OPT = cli_option("--separator", default=None,
action="store", dest="separator",
help=("Separator between output fields"
" (defaults to one space)"))
USEUNITS_OPT = cli_option("--units", default=None,
dest="units", choices=("h", "m", "g", "t"),
help="Specify units for output (one of h/m/g/t)")
FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
type="string", metavar="FIELDS",
help="Comma separated list of output fields")
FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
default=False, help="Force the operation")
CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
default=False, help="Do not require confirmation")
IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
action="store_true", default=False,
help=("Ignore offline nodes and do as much"
" as possible"))
IGNORE_SOFT_ERRORS_OPT = cli_option("--ignore-soft-errors",
dest="ignore_soft_errors",
action="store_true", default=False,
help=("Tell htools to ignore any soft"
" errors like N+1 violations"))
TAG_ADD_OPT = cli_option("--tags", dest="tags",
default=None, help="Comma-separated list of instance"
" tags")
TAG_SRC_OPT = cli_option("--from", dest="tags_source",
default=None, help="File with tag names")
SUBMIT_OPT = cli_option("--submit", dest="submit_only",
default=False, action="store_true",
help=("Submit the job and return the job ID, but"
" don't wait for the job to finish"))
PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid",
default=False, action="store_true",
help=("Additionally print the job as first line"
" on stdout (for scripting)."))
SEQUENTIAL_OPT = cli_option("--sequential", dest="sequential",
default=False, action="store_true",
help=("Execute all resulting jobs sequentially"))
SYNC_OPT = cli_option("--sync", dest="do_locking",
default=False, action="store_true",
help=("Grab locks while doing the queries"
" in order to ensure more consistent results"))
DRY_RUN_OPT = cli_option("--dry-run", default=False,
action="store_true",
help=("Do not execute the operation, just run the"
" check steps and verify if it could be"
" executed"))
VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
action="store_true",
help="Increase the verbosity of the operation")
DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
action="store_true", dest="simulate_errors",
help="Debugging option that makes the operation"
" treat most runtime checks as failed")
NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
default=True, action="store_false",
help="Don't wait for sync (DANGEROUS!)")
WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync",
default=False, action="store_true",
help="Wait for disks to sync")
ONLINE_INST_OPT = cli_option("--online", dest="online_inst",
action="store_true", default=False,
help="Enable offline instance")
OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst",
action="store_true", default=False,
help="Disable down instance")
DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
help=("Custom disk setup (%s)" %
utils.CommaJoin(constants.DISK_TEMPLATES)),
default=None, metavar="TEMPL",
choices=list(constants.DISK_TEMPLATES))
EXT_PARAMS_OPT = cli_option("-e", "--ext-params", dest="ext_params",
default={}, type="keyval",
help="Parameters for ExtStorage template"
" conversions in the format:"
" provider=prvdr[,param1=val1,param2=val2,...]")
NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
help="Do not create any network cards for"
" the instance")
FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
help="Relative path under default cluster-wide"
" file storage dir to store file-based disks",
default=None, metavar="<DIR>")
FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
help="Driver to use for image files",
default=None, metavar="<DRIVER>",
choices=list(constants.FILE_DRIVER))
IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
help="Select nodes for the instance automatically"
" using the <NAME> iallocator plugin",
default=None, type="string",
completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
metavar="<NAME>",
help="Set the default instance"
" allocator plugin",
default=None, type="string",
completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params",
dest="default_iallocator_params",
help="iallocator template"
" parameters, in the format"
" template:option=value,"
" option=value,...",
type="keyval",
default=None)
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
metavar="<os>",
completion_suggest=OPT_COMPL_ONE_OS)
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
type="keyval", default={},
help="OS parameters")
OSPARAMS_PRIVATE_OPT = cli_option("--os-parameters-private",
dest="osparams_private",
type="keyprivateval",
default=serializer.PrivateDict(),
help="Private OS parameters"
" (won't be logged)")
OSPARAMS_SECRET_OPT = cli_option("--os-parameters-secret",
dest="osparams_secret",
type="keyprivateval",
default=serializer.PrivateDict(),
help="Secret OS parameters (won't be logged or"
" saved; you must supply these for every"
" operation.)")
CLEAR_OSPARAMS_OPT = cli_option("--clear-os-parameters",
dest="clear_osparams",
action="store_true",
default=False,
help="Clear current OS parameters")
CLEAR_OSPARAMS_PRIVATE_OPT = cli_option("--clear-os-parameters-private",
dest="clear_osparams_private",
action="store_true",
default=False,
help="Clear current private OS"
" parameters")
REMOVE_OSPARAMS_OPT = cli_option("--remove-os-parameters",
dest="remove_osparams",
type="list",
default=None,
help="Comma-separated list of OS parameters"
" that should be removed")
REMOVE_OSPARAMS_PRIVATE_OPT = cli_option("--remove-os-parameters-private",
dest="remove_osparams_private",
type="list",
default=None,
help="Comma-separated list of private"
" OS parameters that should be"
" removed")
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
action="store_true", default=False,
help="Force an unknown variant")
NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
action="store_true", default=False,
help="Do not install the OS (will"
" enable no-start)")
NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes",
dest="allow_runtime_chgs",
default=True, action="store_false",
help="Don't allow runtime changes")
BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
type="keyval", default={},
help="Backend parameters")
HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
default={}, dest="hvparams",
help="Hypervisor parameters")
DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams",
help="Disk template parameters, in the format"
" template:option=value,option=value,...",
type="identkeyval", action="append", default=[])
SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size",
type="keyval", default={},
help="Memory size specs: list of key=value,"
" where key is one of min, max, std"
" (in MB or using a unit)")
SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count",
type="keyval", default={},
help="CPU count specs: list of key=value,"
" where key is one of min, max, std")
SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count",
dest="ispecs_disk_count",
type="keyval", default={},
help="Disk count specs: list of key=value,"
" where key is one of min, max, std")
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
type="keyval", default={},
help="Disk size specs: list of key=value,"
" where key is one of min, max, std"
" (in MB or using a unit)")
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
type="keyval", default={},
help="NIC count specs: list of key=value,"
" where key is one of min, max, std")
IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs"
IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR,
dest="ipolicy_bounds_specs",
type="multilistidentkeyval", default=None,
help="Complete instance specs limits")
IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs"
IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR,
dest="ipolicy_std_specs",
type="keyval", default=None,
help="Complete standard instance specs")
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
dest="ipolicy_disk_templates",
type="list", default=None,
help="Comma-separated list of"
" enabled disk templates")
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
dest="ipolicy_vcpu_ratio",
type="maybefloat", default=None,
help="The maximum allowed vcpu-to-cpu ratio")
IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio",
dest="ipolicy_spindle_ratio",
type="maybefloat", default=None,
help=("The maximum allowed instances to"
" spindle ratio"))
IPOLICY_MEMORY_RATIO = cli_option("--ipolicy-memory-ratio",
dest="ipolicy_memory_ratio",
type="maybefloat", default=None,
help=("The maximum allowed used memory to"
" physicall memory ratio (in terms of"
" memory overcommitment)"))
HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
help="Hypervisor and hypervisor options, in the"
" format hypervisor:option=value,option=value,...",
default=None, type="identkeyval")
HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
help="Hypervisor and hypervisor options, in the"
" format hypervisor:option=value,option=value,...",
default=[], action="append", type="identkeyval")
NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
action="store_false",
help="Don't check that the instance's IP"
" is alive")
NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
default=True, action="store_false",
help="Don't check that the instance's name"
" is resolvable")
NET_OPT = cli_option("--net",
help="NIC parameters", default=[],
dest="nics", action="append", type="identkeyval")
DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
dest="disks", action="append", type="identkeyval")
DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
help="Comma-separated list of disks"
" indices to act on (e.g. 0,2) (optional,"
" defaults to all disks)")
OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
help="Enforces a single-disk configuration using the"
" given disk size, in MiB unless a suffix is used",
default=None, type="unit", metavar="<size>")
IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
dest="ignore_consistency",
action="store_true", default=False,
help="Ignore the consistency of the disks on"
" the secondary. The source node must be "
"marked offline first for this to succeed.")
IGNORE_HVVERSIONS_OPT = cli_option("--ignore-hvversions",
dest="ignore_hvversions",
action="store_true", default=False,
help="Ignore imcompatible hypervisor"
" versions between source and target")
ALLOW_FAILOVER_OPT = cli_option("--allow-failover",
dest="allow_failover",
action="store_true", default=False,
help="If migration is not possible fallback to"
" failover")
FORCE_FAILOVER_OPT = cli_option("--force-failover",
dest="force_failover",
action="store_true", default=False,
help="Do not use migration, always use"
" failover")
NONLIVE_OPT = cli_option("--non-live", dest="live",
default=True, action="store_false",
help="Do a non-live migration (this usually means"
" freeze the instance, save the state, transfer and"
" only then resume running on the secondary node)")
MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
default=None,
choices=list(constants.HT_MIGRATION_MODES),
help="Override default migration mode (choose"
" either live or non-live")
NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
help="Target node and optional secondary node",
metavar="<pnode>[:<snode>]",
completion_suggest=OPT_COMPL_INST_ADD_NODES)
NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
action="append", metavar="<node>",
help="Use only this node (can be used multiple"
" times, if not given defaults to all nodes)",
completion_suggest=OPT_COMPL_ONE_NODE)
NODEGROUP_OPT_NAME = "--node-group"
NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME,
dest="nodegroup",
help="Node group (name or uuid)",
metavar="<nodegroup>",
default=None, type="string",
completion_suggest=OPT_COMPL_ONE_NODEGROUP)
SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
metavar="<node>",
completion_suggest=OPT_COMPL_ONE_NODE)
NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
action="store_false",
help="Don't start the instance after creation")
FORTHCOMING_OPT = cli_option("--forthcoming", dest="forthcoming",
action="store_true", default=False,
help="Only reserve resources, but do not"
" create the instance yet")
COMMIT_OPT = cli_option("--commit", dest="commit",
action="store_true", default=False,
help="The instance is already reserved and should"
" be committed now")
SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
action="store_true", default=False,
help="Show command instead of executing it")
CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
default=False, action="store_true",
help="Instead of performing the migration/failover,"
" try to recover from a failed cleanup. This is safe"
" to run even if the instance is healthy, but it"
" will create extra replication traffic and "
" disrupt briefly the replication (like during the"
" migration/failover")
STATIC_OPT = cli_option("-s", "--static", dest="static",
action="store_true", default=False,
help="Only show configuration data, not runtime data")
ALL_OPT = cli_option("--all", dest="show_all",
default=False, action="store_true",
help="Show info on all instances on the cluster."
" This can take a long time to run, use wisely")
SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
action="store_true", default=False,
help="Interactive OS reinstall, lists available"
" OS templates for selection")
IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
action="store_true", default=False,
help="Remove the instance from the cluster"
" configuration even if there are failures"
" during the removal process")
IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
dest="ignore_remove_failures",
action="store_true", default=False,
help="Remove the instance from the"
" cluster configuration even if there"
" are failures during the removal"
" process")
REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
action="store_true", default=False,
help="Remove the instance from the cluster")
DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node",
help="Specifies the new node for the instance",
metavar="NODE", default=None,
completion_suggest=OPT_COMPL_ONE_NODE)
NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
help="Specifies the new secondary node",
metavar="NODE", default=None,
completion_suggest=OPT_COMPL_ONE_NODE)
NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node",
help="Specifies the new primary node",
metavar="<node>", default=None,
completion_suggest=OPT_COMPL_ONE_NODE)
ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
default=False, action="store_true",
help="Replace the disk(s) on the primary"
" node (applies only to internally mirrored"
" disk templates, e.g. %s)" %
utils.CommaJoin(constants.DTS_INT_MIRROR))
ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
default=False, action="store_true",
help="Replace the disk(s) on the secondary"
" node (applies only to internally mirrored"
" disk templates, e.g. %s)" %
utils.CommaJoin(constants.DTS_INT_MIRROR))
AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
default=False, action="store_true",
help="Lock all nodes and auto-promote as needed"
" to MC status")
AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
default=False, action="store_true",
help="Automatically replace faulty disks"
" (applies only to internally mirrored"
" disk templates, e.g. %s)" %
utils.CommaJoin(constants.DTS_INT_MIRROR))
IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
default=False, action="store_true",
help="Ignore current recorded size"
" (useful for forcing activation when"
" the recorded size is wrong)")
SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
metavar="<node>",
completion_suggest=OPT_COMPL_ONE_NODE)
SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
metavar="<dir>")
SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
help="Specify the secondary ip for the node",
metavar="ADDRESS", default=None)
READD_OPT = cli_option("--readd", dest="readd",
default=False, action="store_true",
help="Readd old node after replacing it")
NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
default=True, action="store_false",
help="Disable SSH key fingerprint checking")
NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
default=False, action="store_true",
help="Force the joining of a node")
MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
type="bool", default=None, metavar=_YORNO,
help="Set the master_candidate flag on the node")
OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
type="bool", default=None,
help=("Set the offline flag on the node"
" (cluster does not communicate with offline"
" nodes)"))
DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
type="bool", default=None,
help=("Set the drained flag on the node"
" (excluded from allocation operations)"))
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
type="bool", default=None, metavar=_YORNO,
help="Set the master_capable flag on the node")
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
type="bool", default=None, metavar=_YORNO,
help="Set the vm_capable flag on the node")
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
type="bool", default=None, metavar=_YORNO,
help="Set the allocatable flag on a volume")
ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
dest="enabled_hypervisors",
help="Comma-separated list of hypervisors",
type="string", default=None)
ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates",
dest="enabled_disk_templates",
help="Comma-separated list of "
"disk templates",
type="string", default=None)
ENABLED_PREDICTIVE_QUEUE_OPT = cli_option("--predictive-queue",
default=None,
dest="enabled_predictive_queue",
help="Whether the predictive queue is"
"enabled",
type="bool")
ENABLED_USER_SHUTDOWN_OPT = cli_option("--user-shutdown",
default=None,
dest="enabled_user_shutdown",
help="Whether user shutdown is enabled",
type="bool")
NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
type="keyval", default={},
help="NIC parameters")
CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
dest="candidate_pool_size", type="int",
help="Set the candidate pool size")
RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs",
type="int", help="Set the maximal number of jobs to "
"run simultaneously")
MAX_TRACK_OPT = cli_option("--max-tracked-jobs", dest="max_tracked_jobs",
type="int", help="Set the maximal number of jobs to "
"be tracked simultaneously for "
"scheduling")
COMPRESSION_TOOLS_OPT = \
cli_option("--compression-tools",
dest="compression_tools", type="string", default=None,
help="Comma-separated list of compression tools which are"
" allowed to be used by Ganeti in various operations")
MAINT_INTERVAL_OPT = \
cli_option("--maintenance-interval", dest="maint_round_delay", type="int",
default=None, help="Minimal time in seconds, the maintenance"
" daemon waits between rounds")
MAINT_BALANCE_OPT = \
cli_option("--auto-balance-cluster", dest="maint_balance", type="bool",
default=None, metavar=_YORNO, help="Whether the maintenance"
" daemon should balance the cluster")
MAINT_BALANCE_THRESHOLD_OPT = \
cli_option("--auto-balance-threshold", dest="maint_balance_threshold",
type="float", default=None, metavar="CLUSTERSCORE",
help="Minimal gain for an auto-balancing step to be taken")
VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
help=("Enables LVM and specifies the volume group"
" name (cluster-wide) for disk allocation"
" [%s]" % constants.DEFAULT_VG),
metavar="VG", default=None)
YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it",
help="Destroy cluster", action="store_true")
NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
help="Skip node agreement check (dangerous)",
action="store_true", default=False)
MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
help="Specify the mac prefix for the instance IP"
" addresses, in the format XX:XX:XX",
metavar="PREFIX",
default=None)
MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
help="Specify the node interface (cluster-wide)"
" on which the master IP address will be added"
" (cluster init default: %s)" %
constants.DEFAULT_BRIDGE,
metavar="NETDEV",
default=None)
MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask",
help="Specify the netmask of the master IP",
metavar="NETMASK",
default=None)
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
dest="use_external_mip_script",
help="Specify whether to run a"
" user-provided script for the master"
" IP address turnup and"
" turndown operations",
type="bool", metavar=_YORNO, default=None)
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
help="Specify the default directory (cluster-"
"wide) for storing the file-based disks [%s]" %
pathutils.DEFAULT_FILE_STORAGE_DIR,
metavar="DIR",
default=None)
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
"--shared-file-storage-dir",
dest="shared_file_storage_dir",
help="Specify the default directory (cluster-wide) for storing the"
" shared file-based disks [%s]" %
pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR,
metavar="SHAREDDIR", default=None)
GLOBAL_GLUSTER_FILEDIR_OPT = cli_option(
"--gluster-storage-dir",
dest="gluster_storage_dir",
help="Specify the default directory (cluster-wide) for mounting Gluster"
" file systems [%s]" %
pathutils.DEFAULT_GLUSTER_STORAGE_DIR,
metavar="GLUSTERDIR", default=None)
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
help="Don't modify %s" % pathutils.ETC_HOSTS,
action="store_false", default=True)
MODIFY_ETCHOSTS_OPT = \
cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO,
default=None, type="bool",
help="Defines whether the cluster should autonomously modify"
" and keep in sync the /etc/hosts file of the nodes")
NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
help="Don't initialize SSH keys",
action="store_false", default=True)
MODIFY_SSH_SETUP_OPT = \
cli_option("--modify-ssh-setup", dest="modify_ssh_setup", metavar=_YORNO,
default=None, type="bool",
help="Defines whether the cluster should update node SSH keys"
" on node add and on renew-crypto")
ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
help="Enable parseable error messages",
action="store_true", default=False)
NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
help="Skip N+1 memory redundancy tests",
action="store_true", default=False)
REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
help="Type of reboot: soft/hard/full",
default=constants.INSTANCE_REBOOT_HARD,
metavar="<REBOOT>",
choices=list(constants.REBOOT_TYPES))
IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
dest="ignore_secondaries",
default=False, action="store_true",
help="Ignore errors from secondaries")
NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
action="store_false", default=True,
help="Don't shutdown the instance (unsafe)")
TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
help="Maximum time to wait")
COMPRESS_OPT = cli_option("--compress", dest="compress",
type="string", default=constants.IEC_NONE,
help="The compression mode to use")
TRANSPORT_COMPRESSION_OPT = \
cli_option("--transport-compression", dest="transport_compression",
type="string", default=constants.IEC_NONE,
help="The compression mode to use during transport")
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
dest="shutdown_timeout", type="int",
default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
help="Maximum time to wait for instance"
" shutdown")
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
default=None,
help=("Number of seconds between repetions of the"
" command"))
EARLY_RELEASE_OPT = cli_option("--early-release",
dest="early_release", default=False,
action="store_true",
help="Release the locks on the secondary"
" node(s) early")
NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
dest="new_cluster_cert",
default=False, action="store_true",
help="Generate a new cluster certificate")
NEW_NODE_CERT_OPT = cli_option(
"--new-node-certificates", dest="new_node_cert", default=False,
action="store_true", help="Generate new node certificates (for all nodes)")
NEW_SSH_KEY_OPT = cli_option(
"--new-ssh-keys", dest="new_ssh_keys", default=False,
action="store_true", help="Generate new node SSH keys (for all nodes)")
RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
default=None,
help="File containing new RAPI certificate")
NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
default=None, action="store_true",
help=("Generate a new self-signed RAPI"
" certificate"))
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
default=None,
help="File containing new SPICE certificate")
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
default=None,
help="File containing the certificate of the CA"
" which signed the SPICE certificate")
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
dest="new_spice_cert", default=None,
action="store_true",
help=("Generate a new self-signed SPICE"
" certificate"))
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
dest="new_confd_hmac_key",
default=False, action="store_true",
help=("Create a new HMAC key for %s" %
constants.CONFD))
CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
dest="cluster_domain_secret",
default=None,
help=("Load new new cluster domain"
" secret from file"))
NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
dest="new_cluster_domain_secret",
default=False, action="store_true",
help=("Create a new cluster domain"
" secret"))
USE_REPL_NET_OPT = cli_option("--use-replication-network",
dest="use_replication_network",
help="Whether to use the replication network"
" for talking to the nodes",
action="store_true", default=False)
MAINTAIN_NODE_HEALTH_OPT = \
cli_option("--maintain-node-health", dest="maintain_node_health",
metavar=_YORNO, default=None, type="bool",
help="Configure the cluster to automatically maintain node"
" health, by shutting down unknown instances, shutting down"
" unknown DRBD devices, etc.")
IDENTIFY_DEFAULTS_OPT = \
cli_option("--identify-defaults", dest="identify_defaults",
default=False, action="store_true",
help="Identify which saved instance parameters are equal to"
" the current cluster defaults and set them as such, instead"
" of marking them as overridden")
UIDPOOL_OPT = cli_option("--uid-pool", default=None,
action="store", dest="uid_pool",
help=("A list of user-ids or user-id"
" ranges separated by commas"))
ADD_UIDS_OPT = cli_option("--add-uids", default=None,
action="store", dest="add_uids",
help=("A list of user-ids or user-id"
" ranges separated by commas, to be"
" added to the user-id pool"))
REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
action="store", dest="remove_uids",
help=("A list of user-ids or user-id"
" ranges separated by commas, to be"
" removed from the user-id pool"))
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
action="store", dest="reserved_lvs",
help=("A comma-separated list of reserved"
" logical volumes names, that will be"
" ignored by cluster verify"))
ROMAN_OPT = cli_option("--roman",
dest="roman_integers", default=False,
action="store_true",
help="Use roman numbers for positive integers")
DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
action="store", default=None,
help="Specifies usermode helper for DRBD")
PRIMARY_IP_VERSION_OPT = \
cli_option("--primary-ip-version", default=constants.IP4_VERSION,
action="store", dest="primary_ip_version",
metavar="%d|%d" % (constants.IP4_VERSION,
constants.IP6_VERSION),
help="Cluster-wide IP version for primary IP")
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
action="store_true",
help="Show machine name for every line in output")
FAILURE_ONLY_OPT = cli_option("--failure-only", default=False,
action="store_true",
help=("Hide successful results and show failures"
" only (determined by the exit code)"))
REASON_OPT = cli_option("--reason", default=[],
help="The reason for executing the command")
def _PriorityOptionCb(option, _, value, parser):
"""Callback for processing C{--priority} option.
"""
value = _PRIONAME_TO_VALUE[value]
setattr(parser.values, option.dest, value)
PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
metavar="|".join(name for name, _ in _PRIORITY_NAMES),
choices=_PRIONAME_TO_VALUE.keys(),
action="callback", type="choice",
callback=_PriorityOptionCb,
help="Priority for opcode processing")
OPPORTUNISTIC_OPT = cli_option("--opportunistic-locking",
dest="opportunistic_locking",
action="store_true", default=False,
help="Opportunistically acquire locks")
HID_OS_OPT = cli_option("--hidden", dest="hidden",
type="bool", default=None, metavar=_YORNO,
help="Sets the hidden flag on the OS")
BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
type="bool", default=None, metavar=_YORNO,
help="Sets the blacklisted flag on the OS")
PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
type="bool", metavar=_YORNO,
dest="prealloc_wipe_disks",
help=("Wipe disks prior to instance"
" creation"))
NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
type="keyval", default=None,
help="Node parameters")
ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
action="store", metavar="POLICY", default=None,
help="Allocation policy for the node group")
NODE_POWERED_OPT = cli_option("--node-powered", default=None,
type="bool", metavar=_YORNO,
dest="node_powered",
help="Specify if the SoR for node is powered")
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
default=constants.OOB_TIMEOUT,
help="Maximum time to wait for out-of-band helper")
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
default=constants.OOB_POWER_DELAY,
help="Time in seconds to wait between power-ons")
FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter",
action="store_true", default=False,
help=("Whether command argument should be treated"
" as filter"))
NO_REMEMBER_OPT = cli_option("--no-remember",
dest="no_remember",
action="store_true", default=False,
help="Perform but do not record the change"
" in the configuration")
PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only",
default=False, action="store_true",
help="Evacuate primary instances only")
SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only",
default=False, action="store_true",
help="Evacuate secondary instances only"
" (applies only to internally mirrored"
" disk templates, e.g. %s)" %
utils.CommaJoin(constants.DTS_INT_MIRROR))
STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused",
action="store_true", default=False,
help="Pause instance at startup")
TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>",
help="Destination node group (name or uuid)",
default=None, action="append",
completion_suggest=OPT_COMPL_ONE_NODEGROUP)
IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[],
action="append", dest="ignore_errors",
choices=list(constants.CV_ALL_ECODES_STRINGS),
help="Error code to be ignored")
DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state",
action="append",
help=("Specify disk state information in the"
" format"
" storage_type/identifier:option=value,...;"
" note this is unused for now"),
type="identkeyval")
HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state",
action="append",
help=("Specify hypervisor state information in the"
" format hypervisor:option=value,...;"
" note this is unused for now"),
type="identkeyval")
IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy",
action="store_true", default=False,
help="Ignore instance policy violations")
RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem",
help="Sets the instance's runtime memory,"
" ballooning it up or down to the new value",
default=None, type="unit", metavar="<size>")
ABSOLUTE_OPT = cli_option("--absolute", dest="absolute",
action="store_true", default=False,
help="Marks the grow as absolute instead of the"
" (default) relative mode")
NETWORK_OPT = cli_option("--network",
action="store", default=None, dest="network",
help="IP network in CIDR notation")
GATEWAY_OPT = cli_option("--gateway",
action="store", default=None, dest="gateway",
help="IP address of the router (gateway)")
ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips",
action="store", default=None,
dest="add_reserved_ips",
help="Comma-separated list of"
" reserved IPs to add")
REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips",
action="store", default=None,
dest="remove_reserved_ips",
help="Comma-delimited list of"
" reserved IPs to remove")
NETWORK6_OPT = cli_option("--network6",
action="store", default=None, dest="network6",
help="IP network in CIDR notation")
GATEWAY6_OPT = cli_option("--gateway6",
action="store", default=None, dest="gateway6",
help="IP6 address of the router (gateway)")
NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check",
dest="conflicts_check",
default=True,
action="store_false",
help="Don't check for conflicting IPs")
INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults",
default=False, action="store_true",
help="Include default values")
HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug",
action="store_true", default=False,
help="Hotplug supported devices (NICs and Disks)")
HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible",
dest="hotplug_if_possible",
action="store_true", default=False,
help="Hotplug devices in case"
" hotplug is supported")
INSTALL_IMAGE_OPT = \
cli_option("--install-image",
dest="install_image",
action="store",
type="string",
default=None,
help="The OS image to use for running the OS scripts safely")
INSTANCE_COMMUNICATION_OPT = \
cli_option("-c", "--communication",
dest="instance_communication",
help=constants.INSTANCE_COMMUNICATION_DOC,
type="bool")
INSTANCE_COMMUNICATION_NETWORK_OPT = \
cli_option("--instance-communication-network",
dest="instance_communication_network",
type="string",
help="Set the network name for instance communication")
ZEROING_IMAGE_OPT = \
cli_option("--zeroing-image",
dest="zeroing_image", action="store", default=None,
help="The OS image to use to zero instance disks")
ZERO_FREE_SPACE_OPT = \
cli_option("--zero-free-space",
dest="zero_free_space", action="store_true", default=False,
help="Whether to zero the free space on the disks of the "
"instance prior to the export")
HELPER_STARTUP_TIMEOUT_OPT = \
cli_option("--helper-startup-timeout",
dest="helper_startup_timeout", action="store", type="int",
help="Startup timeout for the helper VM")
HELPER_SHUTDOWN_TIMEOUT_OPT = \
cli_option("--helper-shutdown-timeout",
dest="helper_shutdown_timeout", action="store", type="int",
help="Shutdown timeout for the helper VM")
ZEROING_TIMEOUT_FIXED_OPT = \
cli_option("--zeroing-timeout-fixed",
dest="zeroing_timeout_fixed", action="store", type="int",
help="The fixed amount of time to wait before assuming that the "
"zeroing failed")
ZEROING_TIMEOUT_PER_MIB_OPT = \
cli_option("--zeroing-timeout-per-mib",
dest="zeroing_timeout_per_mib", action="store", type="float",
help="The amount of time to wait per MiB of data to zero, in "
"addition to the fixed timeout")
ENABLED_DATA_COLLECTORS_OPT = \
cli_option("--enabled-data-collectors",
dest="enabled_data_collectors", type="keyval",
default={},
help="Deactivate or reactivate a data collector for reporting, "
"in the format collector=bool, where collector is one of %s."
% ", ".join(constants.DATA_COLLECTOR_NAMES))
DIAGNOSE_DATA_COLLECTOR_FILENAME_OPT = \
cli_option("--diagnose-data-collector-filename",
dest="diagnose_data_collector_filename",
help=("Set's the file name of the script"
" diagnose data collector should run"
" If this value is empty string, the collector"
" will return a success value"
" without running anything"),
type="string")
VERIFY_CLUTTER_OPT = cli_option(
"--verify-ssh-clutter", default=False, dest="verify_clutter",
help="Verify that Ganeti did not clutter"
" up the 'authorized_keys' file", action="store_true")
LONG_SLEEP_OPT = cli_option(
"--long-sleep", default=False, dest="long_sleep",
help="Allow long shutdowns when backing up instances", action="store_true")
INPUT_OPT = cli_option("--input", dest="input", default=None,
help=("input to be passed as stdin"
" to the repair command"),
type="string")
SSH_KEY_TYPE_OPT = \
cli_option("--ssh-key-type", default=None,
choices=list(constants.SSHK_ALL), dest="ssh_key_type",
help="Type of SSH key deployed by Ganeti for cluster actions")
SSH_KEY_BITS_OPT = \
cli_option("--ssh-key-bits", default=None,
type="int", dest="ssh_key_bits",
help="Length of SSH keys generated by Ganeti, in bits")
#: Options provided by all commands
COMMON_OPTS = [DEBUG_OPT, REASON_OPT]
# options related to asynchronous job handling
SUBMIT_OPTS = [
SUBMIT_OPT,
PRINT_JOBID_OPT,
]
# common options for creating instances. add and import then add their own
# specific ones.
COMMON_CREATE_OPTS = [
BACKEND_OPT,
DISK_OPT,
DISK_TEMPLATE_OPT,
FILESTORE_DIR_OPT,
FILESTORE_DRIVER_OPT,
HYPERVISOR_OPT,
IALLOCATOR_OPT,
NET_OPT,
NODE_PLACEMENT_OPT,
NODEGROUP_OPT,
NOIPCHECK_OPT,
NOCONFLICTSCHECK_OPT,
NONAMECHECK_OPT,
NONICS_OPT,
NWSYNC_OPT,
OSPARAMS_OPT,
OSPARAMS_PRIVATE_OPT,
OSPARAMS_SECRET_OPT,
OS_SIZE_OPT,
OPPORTUNISTIC_OPT,
SUBMIT_OPT,
PRINT_JOBID_OPT,
TAG_ADD_OPT,
DRY_RUN_OPT,
PRIORITY_OPT,
]
# common instance policy options
INSTANCE_POLICY_OPTS = [
IPOLICY_BOUNDS_SPECS_OPT,
IPOLICY_DISK_TEMPLATES,
IPOLICY_VCPU_RATIO,
IPOLICY_SPINDLE_RATIO,
IPOLICY_MEMORY_RATIO,
]
# instance policy split specs options
SPLIT_ISPECS_OPTS = [
SPECS_CPU_COUNT_OPT,
SPECS_DISK_COUNT_OPT,
SPECS_DISK_SIZE_OPT,
SPECS_MEM_SIZE_OPT,
SPECS_NIC_COUNT_OPT,
]
|
hujiajie/chromium-crosswalk
|
refs/heads/master
|
tools/telemetry/third_party/pyserial/serial/rfc2217.py
|
141
|
#! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# This module implements a RFC2217 compatible client. RF2217 descibes a
# protocol to access serial ports over TCP/IP and allows setting the baud rate,
# modem control lines etc.
#
# (C) 2001-2013 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
# TODO:
# - setting control line -> answer is not checked (had problems with one of the
# severs). consider implementing a compatibility mode flag to make check
# conditional
# - write timeout not implemented at all
##############################################################################
# observations and issues with servers
#=============================================================================
# sredird V2.2.1
# - http://www.ibiblio.org/pub/Linux/system/serial/ sredird-2.2.2.tar.gz
# - does not acknowledge SET_CONTROL (RTS/DTR) correctly, always responding
# [105 1] instead of the actual value.
# - SET_BAUDRATE answer contains 4 extra null bytes -> probably for larger
# numbers than 2**32?
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: while true; do nc -l -p 7000 -c "sredird debug /dev/ttyUSB0 /var/lock/sredir"; done
#=============================================================================
# telnetcpcd (untested)
# - http://ftp.wayne.edu/kermit/sredird/telnetcpcd-1.09.tar.gz
# - To get the signature [COM_PORT_OPTION] w/o data has to be sent.
#=============================================================================
# ser2net
# - does not negotiate BINARY or COM_PORT_OPTION for his side but at least
# acknowledges that the client activates these options
# - The configuration may be that the server prints a banner. As this client
# implementation does a flushInput on connect, this banner is hidden from
# the user application.
# - NOTIFY_MODEMSTATE: the poll interval of the server seems to be one
# second.
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: run ser2net daemon, in /etc/ser2net.conf:
# 2000:telnet:0:/dev/ttyS0:9600 remctl banner
##############################################################################
# How to identify ports? pySerial might want to support other protocols in the
# future, so lets use an URL scheme.
# for RFC2217 compliant servers we will use this:
# rfc2217://<host>:<port>[/option[/option...]]
#
# options:
# - "debug" print diagnostic messages
# - "ign_set_control": do not look at the answers to SET_CONTROL
# - "poll_modem": issue NOTIFY_MODEMSTATE requests when CTS/DTR/RI/CD is read.
# Without this option it expects that the server sends notifications
# automatically on change (which most servers do and is according to the
# RFC).
# the order of the options is not relevant
from serial.serialutil import *
import time
import struct
import socket
import threading
import Queue
import logging
# port string is expected to be something like this:
# rfc2217://host:port
# host may be an IP or including domain, whatever.
# port is 0...65535
# map log level names to constants. used in fromURL()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
# telnet protocol characters
IAC = to_bytes([255]) # Interpret As Command
DONT = to_bytes([254])
DO = to_bytes([253])
WONT = to_bytes([252])
WILL = to_bytes([251])
IAC_DOUBLED = to_bytes([IAC, IAC])
SE = to_bytes([240]) # Subnegotiation End
NOP = to_bytes([241]) # No Operation
DM = to_bytes([242]) # Data Mark
BRK = to_bytes([243]) # Break
IP = to_bytes([244]) # Interrupt process
AO = to_bytes([245]) # Abort output
AYT = to_bytes([246]) # Are You There
EC = to_bytes([247]) # Erase Character
EL = to_bytes([248]) # Erase Line
GA = to_bytes([249]) # Go Ahead
SB = to_bytes([250]) # Subnegotiation Begin
# selected telnet options
BINARY = to_bytes([0]) # 8-bit data path
ECHO = to_bytes([1]) # echo
SGA = to_bytes([3]) # suppress go ahead
# RFC2217
COM_PORT_OPTION = to_bytes([44])
# Client to Access Server
SET_BAUDRATE = to_bytes([1])
SET_DATASIZE = to_bytes([2])
SET_PARITY = to_bytes([3])
SET_STOPSIZE = to_bytes([4])
SET_CONTROL = to_bytes([5])
NOTIFY_LINESTATE = to_bytes([6])
NOTIFY_MODEMSTATE = to_bytes([7])
FLOWCONTROL_SUSPEND = to_bytes([8])
FLOWCONTROL_RESUME = to_bytes([9])
SET_LINESTATE_MASK = to_bytes([10])
SET_MODEMSTATE_MASK = to_bytes([11])
PURGE_DATA = to_bytes([12])
SERVER_SET_BAUDRATE = to_bytes([101])
SERVER_SET_DATASIZE = to_bytes([102])
SERVER_SET_PARITY = to_bytes([103])
SERVER_SET_STOPSIZE = to_bytes([104])
SERVER_SET_CONTROL = to_bytes([105])
SERVER_NOTIFY_LINESTATE = to_bytes([106])
SERVER_NOTIFY_MODEMSTATE = to_bytes([107])
SERVER_FLOWCONTROL_SUSPEND = to_bytes([108])
SERVER_FLOWCONTROL_RESUME = to_bytes([109])
SERVER_SET_LINESTATE_MASK = to_bytes([110])
SERVER_SET_MODEMSTATE_MASK = to_bytes([111])
SERVER_PURGE_DATA = to_bytes([112])
RFC2217_ANSWER_MAP = {
SET_BAUDRATE: SERVER_SET_BAUDRATE,
SET_DATASIZE: SERVER_SET_DATASIZE,
SET_PARITY: SERVER_SET_PARITY,
SET_STOPSIZE: SERVER_SET_STOPSIZE,
SET_CONTROL: SERVER_SET_CONTROL,
NOTIFY_LINESTATE: SERVER_NOTIFY_LINESTATE,
NOTIFY_MODEMSTATE: SERVER_NOTIFY_MODEMSTATE,
FLOWCONTROL_SUSPEND: SERVER_FLOWCONTROL_SUSPEND,
FLOWCONTROL_RESUME: SERVER_FLOWCONTROL_RESUME,
SET_LINESTATE_MASK: SERVER_SET_LINESTATE_MASK,
SET_MODEMSTATE_MASK: SERVER_SET_MODEMSTATE_MASK,
PURGE_DATA: SERVER_PURGE_DATA,
}
SET_CONTROL_REQ_FLOW_SETTING = to_bytes([0]) # Request Com Port Flow Control Setting (outbound/both)
SET_CONTROL_USE_NO_FLOW_CONTROL = to_bytes([1]) # Use No Flow Control (outbound/both)
SET_CONTROL_USE_SW_FLOW_CONTROL = to_bytes([2]) # Use XON/XOFF Flow Control (outbound/both)
SET_CONTROL_USE_HW_FLOW_CONTROL = to_bytes([3]) # Use HARDWARE Flow Control (outbound/both)
SET_CONTROL_REQ_BREAK_STATE = to_bytes([4]) # Request BREAK State
SET_CONTROL_BREAK_ON = to_bytes([5]) # Set BREAK State ON
SET_CONTROL_BREAK_OFF = to_bytes([6]) # Set BREAK State OFF
SET_CONTROL_REQ_DTR = to_bytes([7]) # Request DTR Signal State
SET_CONTROL_DTR_ON = to_bytes([8]) # Set DTR Signal State ON
SET_CONTROL_DTR_OFF = to_bytes([9]) # Set DTR Signal State OFF
SET_CONTROL_REQ_RTS = to_bytes([10]) # Request RTS Signal State
SET_CONTROL_RTS_ON = to_bytes([11]) # Set RTS Signal State ON
SET_CONTROL_RTS_OFF = to_bytes([12]) # Set RTS Signal State OFF
SET_CONTROL_REQ_FLOW_SETTING_IN = to_bytes([13]) # Request Com Port Flow Control Setting (inbound)
SET_CONTROL_USE_NO_FLOW_CONTROL_IN = to_bytes([14]) # Use No Flow Control (inbound)
SET_CONTROL_USE_SW_FLOW_CONTOL_IN = to_bytes([15]) # Use XON/XOFF Flow Control (inbound)
SET_CONTROL_USE_HW_FLOW_CONTOL_IN = to_bytes([16]) # Use HARDWARE Flow Control (inbound)
SET_CONTROL_USE_DCD_FLOW_CONTROL = to_bytes([17]) # Use DCD Flow Control (outbound/both)
SET_CONTROL_USE_DTR_FLOW_CONTROL = to_bytes([18]) # Use DTR Flow Control (inbound)
SET_CONTROL_USE_DSR_FLOW_CONTROL = to_bytes([19]) # Use DSR Flow Control (outbound/both)
LINESTATE_MASK_TIMEOUT = 128 # Time-out Error
LINESTATE_MASK_SHIFTREG_EMPTY = 64 # Transfer Shift Register Empty
LINESTATE_MASK_TRANSREG_EMPTY = 32 # Transfer Holding Register Empty
LINESTATE_MASK_BREAK_DETECT = 16 # Break-detect Error
LINESTATE_MASK_FRAMING_ERROR = 8 # Framing Error
LINESTATE_MASK_PARTIY_ERROR = 4 # Parity Error
LINESTATE_MASK_OVERRUN_ERROR = 2 # Overrun Error
LINESTATE_MASK_DATA_READY = 1 # Data Ready
MODEMSTATE_MASK_CD = 128 # Receive Line Signal Detect (also known as Carrier Detect)
MODEMSTATE_MASK_RI = 64 # Ring Indicator
MODEMSTATE_MASK_DSR = 32 # Data-Set-Ready Signal State
MODEMSTATE_MASK_CTS = 16 # Clear-To-Send Signal State
MODEMSTATE_MASK_CD_CHANGE = 8 # Delta Receive Line Signal Detect
MODEMSTATE_MASK_RI_CHANGE = 4 # Trailing-edge Ring Detector
MODEMSTATE_MASK_DSR_CHANGE = 2 # Delta Data-Set-Ready
MODEMSTATE_MASK_CTS_CHANGE = 1 # Delta Clear-To-Send
PURGE_RECEIVE_BUFFER = to_bytes([1]) # Purge access server receive data buffer
PURGE_TRANSMIT_BUFFER = to_bytes([2]) # Purge access server transmit data buffer
PURGE_BOTH_BUFFERS = to_bytes([3]) # Purge both the access server receive data buffer and the access server transmit data buffer
RFC2217_PARITY_MAP = {
PARITY_NONE: 1,
PARITY_ODD: 2,
PARITY_EVEN: 3,
PARITY_MARK: 4,
PARITY_SPACE: 5,
}
RFC2217_REVERSE_PARITY_MAP = dict((v,k) for k,v in RFC2217_PARITY_MAP.items())
RFC2217_STOPBIT_MAP = {
STOPBITS_ONE: 1,
STOPBITS_ONE_POINT_FIVE: 3,
STOPBITS_TWO: 2,
}
RFC2217_REVERSE_STOPBIT_MAP = dict((v,k) for k,v in RFC2217_STOPBIT_MAP.items())
# Telnet filter states
M_NORMAL = 0
M_IAC_SEEN = 1
M_NEGOTIATE = 2
# TelnetOption and TelnetSubnegotiation states
REQUESTED = 'REQUESTED'
ACTIVE = 'ACTIVE'
INACTIVE = 'INACTIVE'
REALLY_INACTIVE = 'REALLY_INACTIVE'
class TelnetOption(object):
"""Manage a single telnet option, keeps track of DO/DONT WILL/WONT."""
def __init__(self, connection, name, option, send_yes, send_no, ack_yes, ack_no, initial_state, activation_callback=None):
"""\
Initialize option.
:param connection: connection used to transmit answers
:param name: a readable name for debug outputs
:param send_yes: what to send when option is to be enabled.
:param send_no: what to send when option is to be disabled.
:param ack_yes: what to expect when remote agrees on option.
:param ack_no: what to expect when remote disagrees on option.
:param initial_state: options initialized with REQUESTED are tried to
be enabled on startup. use INACTIVE for all others.
"""
self.connection = connection
self.name = name
self.option = option
self.send_yes = send_yes
self.send_no = send_no
self.ack_yes = ack_yes
self.ack_no = ack_no
self.state = initial_state
self.active = False
self.activation_callback = activation_callback
def __repr__(self):
"""String for debug outputs"""
return "%s:%s(%s)" % (self.name, self.active, self.state)
def process_incoming(self, command):
"""A DO/DONT/WILL/WONT was received for this option, update state and
answer when needed."""
if command == self.ack_yes:
if self.state is REQUESTED:
self.state = ACTIVE
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is ACTIVE:
pass
elif self.state is INACTIVE:
self.state = ACTIVE
self.connection.telnetSendOption(self.send_yes, self.option)
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is REALLY_INACTIVE:
self.connection.telnetSendOption(self.send_no, self.option)
else:
raise ValueError('option in illegal state %r' % self)
elif command == self.ack_no:
if self.state is REQUESTED:
self.state = INACTIVE
self.active = False
elif self.state is ACTIVE:
self.state = INACTIVE
self.connection.telnetSendOption(self.send_no, self.option)
self.active = False
elif self.state is INACTIVE:
pass
elif self.state is REALLY_INACTIVE:
pass
else:
raise ValueError('option in illegal state %r' % self)
class TelnetSubnegotiation(object):
"""\
A object to handle subnegotiation of options. In this case actually
sub-sub options for RFC 2217. It is used to track com port options.
"""
def __init__(self, connection, name, option, ack_option=None):
if ack_option is None: ack_option = option
self.connection = connection
self.name = name
self.option = option
self.value = None
self.ack_option = ack_option
self.state = INACTIVE
def __repr__(self):
"""String for debug outputs."""
return "%s:%s" % (self.name, self.state)
def set(self, value):
"""\
request a change of the value. a request is sent to the server. if
the client needs to know if the change is performed he has to check the
state of this object.
"""
self.value = value
self.state = REQUESTED
self.connection.rfc2217SendSubnegotiation(self.option, self.value)
if self.connection.logger:
self.connection.logger.debug("SB Requesting %s -> %r" % (self.name, self.value))
def isReady(self):
"""\
check if answer from server has been received. when server rejects
the change, raise a ValueError.
"""
if self.state == REALLY_INACTIVE:
raise ValueError("remote rejected value for option %r" % (self.name))
return self.state == ACTIVE
# add property to have a similar interface as TelnetOption
active = property(isReady)
def wait(self, timeout=3):
"""\
wait until the subnegotiation has been acknowledged or timeout. It
can also throw a value error when the answer from the server does not
match the value sent.
"""
timeout_time = time.time() + timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
if self.isReady():
break
else:
raise SerialException("timeout while waiting for option %r" % (self.name))
def checkAnswer(self, suboption):
"""\
check an incoming subnegotiation block. the parameter already has
cut off the header like sub option number and com port option value.
"""
if self.value == suboption[:len(self.value)]:
self.state = ACTIVE
else:
# error propagation done in isReady
self.state = REALLY_INACTIVE
if self.connection.logger:
self.connection.logger.debug("SB Answer %s -> %r -> %s" % (self.name, suboption, self.state))
class RFC2217Serial(SerialBase):
"""Serial port implementation for RFC 2217 remote serial ports."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
self.logger = None
self._ignore_set_control_answer = False
self._poll_modem_state = False
self._network_timeout = 3
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self._isOpen:
raise SerialException("Port is already open.")
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect(self.fromURL(self.portstr))
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception, msg:
self._socket = None
raise SerialException("Could not open port %s: %s" % (self.portstr, msg))
self._socket.settimeout(5) # XXX good value?
# use a thread save queue as buffer. it also simplifies implementing
# the read timeout
self._read_buffer = Queue.Queue()
# to ensure that user writes does not interfere with internal
# telnet/rfc2217 options establish a lock
self._write_lock = threading.Lock()
# name the following separately so that, below, a check can be easily done
mandadory_options = [
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED),
]
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, REQUESTED),
] + mandadory_options
# RFC 2217 specific states
# COM port settings
self._rfc2217_port_settings = {
'baudrate': TelnetSubnegotiation(self, 'baudrate', SET_BAUDRATE, SERVER_SET_BAUDRATE),
'datasize': TelnetSubnegotiation(self, 'datasize', SET_DATASIZE, SERVER_SET_DATASIZE),
'parity': TelnetSubnegotiation(self, 'parity', SET_PARITY, SERVER_SET_PARITY),
'stopsize': TelnetSubnegotiation(self, 'stopsize', SET_STOPSIZE, SERVER_SET_STOPSIZE),
}
# There are more subnegotiation objects, combine all in one dictionary
# for easy access
self._rfc2217_options = {
'purge': TelnetSubnegotiation(self, 'purge', PURGE_DATA, SERVER_PURGE_DATA),
'control': TelnetSubnegotiation(self, 'control', SET_CONTROL, SERVER_SET_CONTROL),
}
self._rfc2217_options.update(self._rfc2217_port_settings)
# cache for line and modem states that the server sends to us
self._linestate = 0
self._modemstate = None
self._modemstate_expires = 0
# RFC 2217 flow control between server and client
self._remote_suspend_flow = False
self._thread = threading.Thread(target=self._telnetReadLoop)
self._thread.setDaemon(True)
self._thread.setName('pySerial RFC 2217 reader thread for %s' % (self._port,))
self._thread.start()
# negotiate Telnet/RFC 2217 -> send initial requests
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnetSendOption(option.send_yes, option.option)
# now wait until important options are negotiated
timeout_time = time.time() + self._network_timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in mandadory_options) == len(mandadory_options):
break
else:
raise SerialException("Remote does not seem to support RFC2217 or BINARY mode %r" % mandadory_options)
if self.logger:
self.logger.info("Negotiated options: %s" % self._telnet_options)
# fine, go on, set RFC 2271 specific things
self._reconfigurePort()
# all things set up get, now a clean start
self._isOpen = True
if not self._rtscts:
self.setRTS(True)
self.setDTR(True)
self.flushInput()
self.flushOutput()
def _reconfigurePort(self):
"""Set communication parameters on opened port."""
if self._socket is None:
raise SerialException("Can only operate on open ports")
# if self._timeout != 0 and self._interCharTimeout is not None:
# XXX
if self._writeTimeout is not None:
raise NotImplementedError('writeTimeout is currently not supported')
# XXX
# Setup the connection
# to get good performance, all parameter changes are sent first...
if not isinstance(self._baudrate, (int, long)) or not 0 < self._baudrate < 2**32:
raise ValueError("invalid baudrate: %r" % (self._baudrate))
self._rfc2217_port_settings['baudrate'].set(struct.pack('!I', self._baudrate))
self._rfc2217_port_settings['datasize'].set(struct.pack('!B', self._bytesize))
self._rfc2217_port_settings['parity'].set(struct.pack('!B', RFC2217_PARITY_MAP[self._parity]))
self._rfc2217_port_settings['stopsize'].set(struct.pack('!B', RFC2217_STOPBIT_MAP[self._stopbits]))
# and now wait until parameters are active
items = self._rfc2217_port_settings.values()
if self.logger:
self.logger.debug("Negotiating settings: %s" % (items,))
timeout_time = time.time() + self._network_timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in items) == len(items):
break
else:
raise SerialException("Remote does not accept parameter change (RFC2217): %r" % items)
if self.logger:
self.logger.info("Negotiated settings: %s" % (items,))
if self._rtscts and self._xonxoff:
raise ValueError('xonxoff and rtscts together are not supported')
elif self._rtscts:
self.rfc2217SetControl(SET_CONTROL_USE_HW_FLOW_CONTROL)
elif self._xonxoff:
self.rfc2217SetControl(SET_CONTROL_USE_SW_FLOW_CONTROL)
else:
self.rfc2217SetControl(SET_CONTROL_USE_NO_FLOW_CONTROL)
def close(self):
"""Close port"""
if self._isOpen:
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except:
# ignore errors.
pass
self._socket = None
if self._thread:
self._thread.join()
self._isOpen = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def makeDeviceName(self, port):
raise SerialException("there is no sensible way to turn numbers into URLs")
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("rfc2217://"): url = url[10:]
try:
# is there a "path" (our options)?
if '/' in url:
# cut away options
url, options = url.split('/', 1)
# process options now, directly altering self
for option in options.split('/'):
if '=' in option:
option, value = option.split('=', 1)
else:
value = None
if option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.rfc2217')
self.logger.setLevel(LOGGER_LEVELS[value])
self.logger.debug('enabled logging')
elif option == 'ign_set_control':
self._ignore_set_control_answer = True
elif option == 'poll_modem':
self._poll_modem_state = True
elif option == 'timeout':
self._network_timeout = float(value)
else:
raise ValueError('unknown option: %r' % (option,))
# get host and port
host, port = url.split(':', 1) # may raise ValueError because of unpacking
port = int(port) # and this if it's not a number
if not 0 <= port < 65536: raise ValueError("port not in range 0...65535")
except ValueError, e:
raise SerialException('expected a string in the form "[rfc2217://]<host>:<port>[/option[/option...]]": %s' % e)
return (host, port)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
if not self._isOpen: raise portNotOpenError
return self._read_buffer.qsize()
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self._isOpen: raise portNotOpenError
data = bytearray()
try:
while len(data) < size:
if self._thread is None:
raise SerialException('connection failed (reader thread died)')
data.append(self._read_buffer.get(True, self._timeout))
except Queue.Empty: # -> timeout
pass
return bytes(data)
def write(self, data):
"""\
Output the given string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed.
"""
if not self._isOpen: raise portNotOpenError
self._write_lock.acquire()
try:
try:
self._socket.sendall(to_bytes(data).replace(IAC, IAC_DOUBLED))
except socket.error, e:
raise SerialException("connection failed (socket error): %s" % e) # XXX what exception if socket connection fails
finally:
self._write_lock.release()
return len(data)
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
self.rfc2217SendPurge(PURGE_RECEIVE_BUFFER)
# empty read buffer
while self._read_buffer.qsize():
self._read_buffer.get(False)
def flushOutput(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self._isOpen: raise portNotOpenError
self.rfc2217SendPurge(PURGE_TRANSMIT_BUFFER)
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given
duration."""
if not self._isOpen: raise portNotOpenError
self.setBreak(True)
time.sleep(duration)
self.setBreak(False)
def setBreak(self, level=True):
"""\
Set break: Controls TXD. When active, to transmitting is
possible.
"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('set BREAK to %s' % ('inactive', 'active')[bool(level)])
if level:
self.rfc2217SetControl(SET_CONTROL_BREAK_ON)
else:
self.rfc2217SetControl(SET_CONTROL_BREAK_OFF)
def setRTS(self, level=True):
"""Set terminal status line: Request To Send."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('set RTS to %s' % ('inactive', 'active')[bool(level)])
if level:
self.rfc2217SetControl(SET_CONTROL_RTS_ON)
else:
self.rfc2217SetControl(SET_CONTROL_RTS_OFF)
def setDTR(self, level=True):
"""Set terminal status line: Data Terminal Ready."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('set DTR to %s' % ('inactive', 'active')[bool(level)])
if level:
self.rfc2217SetControl(SET_CONTROL_DTR_ON)
else:
self.rfc2217SetControl(SET_CONTROL_DTR_OFF)
def getCTS(self):
"""Read terminal status line: Clear To Send."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_CTS)
def getDSR(self):
"""Read terminal status line: Data Set Ready."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_DSR)
def getRI(self):
"""Read terminal status line: Ring Indicator."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_RI)
def getCD(self):
"""Read terminal status line: Carrier Detect."""
if not self._isOpen: raise portNotOpenError
return bool(self.getModemState() & MODEMSTATE_MASK_CD)
# - - - platform specific - - -
# None so far
# - - - RFC2217 specific - - -
def _telnetReadLoop(self):
"""read loop for the socket."""
mode = M_NORMAL
suboption = None
try:
while self._socket is not None:
try:
data = self._socket.recv(1024)
except socket.timeout:
# just need to get out of recv form time to time to check if
# still alive
continue
except socket.error, e:
# connection fails -> terminate loop
if self.logger:
self.logger.debug("socket error in reader thread: %s" % (e,))
break
if not data: break # lost connection
for byte in data:
if mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
mode = M_IAC_SEEN
else:
# store data in read buffer or sub option buffer
# depending on state
if suboption is not None:
suboption.append(byte)
else:
self._read_buffer.put(byte)
elif mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if suboption is not None:
suboption.append(IAC)
else:
self._read_buffer.put(IAC)
mode = M_NORMAL
elif byte == SB:
# sub option start
suboption = bytearray()
mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnetProcessSubnegotiation(bytes(suboption))
suboption = None
mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
telnet_command = byte
mode = M_NEGOTIATE
else:
# other telnet commands
self._telnetProcessCommand(byte)
mode = M_NORMAL
elif mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnetNegotiateOption(telnet_command, byte)
mode = M_NORMAL
finally:
self._thread = None
if self.logger:
self.logger.debug("read thread terminated")
# - incoming telnet commands and options
def _telnetProcessCommand(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: %r" % (command,))
def _telnetNegotiateOption(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnetSendOption((command == WILL and DONT or WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: %r" % (option,))
def _telnetProcessSubnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if suboption[1:2] == SERVER_NOTIFY_LINESTATE and len(suboption) >= 3:
self._linestate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_LINESTATE: %s" % self._linestate)
elif suboption[1:2] == SERVER_NOTIFY_MODEMSTATE and len(suboption) >= 3:
self._modemstate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: %s" % self._modemstate)
# update time when we think that a poll would make sense
self._modemstate_expires = time.time() + 0.3
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
self._remote_suspend_flow = False
else:
for item in self._rfc2217_options.values():
if item.ack_option == suboption[1:2]:
#~ print "processing COM_PORT_OPTION: %r" % list(suboption[1:])
item.checkAnswer(bytes(suboption[2:]))
break
else:
if self.logger:
self.logger.warning("ignoring COM_PORT_OPTION: %r" % (suboption,))
else:
if self.logger:
self.logger.warning("ignoring subnegotiation: %r" % (suboption,))
# - outgoing telnet commands and options
def _internal_raw_write(self, data):
"""internal socket write with no data escaping. used to send telnet stuff."""
self._write_lock.acquire()
try:
self._socket.sendall(data)
finally:
self._write_lock.release()
def telnetSendOption(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self._internal_raw_write(to_bytes([IAC, action, option]))
def rfc2217SendSubnegotiation(self, option, value=''):
"""Subnegotiation of RFC2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self._internal_raw_write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE]))
def rfc2217SendPurge(self, value):
item = self._rfc2217_options['purge']
item.set(value) # transmit desired purge type
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217SetControl(self, value):
item = self._rfc2217_options['control']
item.set(value) # transmit desired control type
if self._ignore_set_control_answer:
# answers are ignored when option is set. compatibility mode for
# servers that answer, but not the expected one... (or no answer
# at all) i.e. sredird
time.sleep(0.1) # this helps getting the unit tests passed
else:
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217FlowServerReady(self):
"""\
check if server is ready to receive data. block for some time when
not.
"""
#~ if self._remote_suspend_flow:
#~ wait---
def getModemState(self):
"""\
get last modem state (cached value. if value is "old", request a new
one. this cache helps that we don't issue to many requests when e.g. all
status lines, one after the other is queried by te user (getCTS, getDSR
etc.)
"""
# active modem state polling enabled? is the value fresh enough?
if self._poll_modem_state and self._modemstate_expires < time.time():
if self.logger:
self.logger.debug('polling modem state')
# when it is older, request an update
self.rfc2217SendSubnegotiation(NOTIFY_MODEMSTATE)
timeout_time = time.time() + self._network_timeout
while time.time() < timeout_time:
time.sleep(0.05) # prevent 100% CPU load
# when expiration time is updated, it means that there is a new
# value
if self._modemstate_expires > time.time():
if self.logger:
self.logger.warning('poll for modem state failed')
break
# even when there is a timeout, do not generate an error just
# return the last known value. this way we can support buggy
# servers that do not respond to polls, but send automatic
# updates.
if self._modemstate is not None:
if self.logger:
self.logger.debug('using cached modem state')
return self._modemstate
else:
# never received a notification from the server
raise SerialException("remote sends no NOTIFY_MODEMSTATE")
# assemble Serial class with the platform specific implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(RFC2217Serial, FileLike):
pass
else:
# io library present
class Serial(RFC2217Serial, io.RawIOBase):
pass
#############################################################################
# The following is code that helps implementing an RFC 2217 server.
class PortManager(object):
"""\
This class manages the state of Telnet and RFC 2217. It needs a serial
instance and a connection to work with. Connection is expected to implement
a (thread safe) write function, that writes the string to the network.
"""
def __init__(self, serial_port, connection, logger=None):
self.serial = serial_port
self.connection = connection
self.logger = logger
self._client_is_rfc2217 = False
# filter state machine
self.mode = M_NORMAL
self.suboption = None
self.telnet_command = None
# states for modem/line control events
self.modemstate_mask = 255
self.last_modemstate = None
self.linstate_mask = 0
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED, self._client_ok),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, INACTIVE, self._client_ok),
]
# negotiate Telnet/RFC2217 -> send initial requests
if self.logger:
self.logger.debug("requesting initial Telnet/RFC 2217 options")
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnetSendOption(option.send_yes, option.option)
# issue 1st modem state notification
def _client_ok(self):
"""\
callback of telnet option. it gets called when option is activated.
this one here is used to detect when the client agrees on RFC 2217. a
flag is set so that other functions like check_modem_lines know if the
client is ok.
"""
# The callback is used for we and they so if one party agrees, we're
# already happy. it seems not all servers do the negotiation correctly
# and i guess there are incorrect clients too.. so be happy if client
# answers one or the other positively.
self._client_is_rfc2217 = True
if self.logger:
self.logger.info("client accepts RFC 2217")
# this is to ensure that the client gets a notification, even if there
# was no change
self.check_modem_lines(force_notification=True)
# - outgoing telnet commands and options
def telnetSendOption(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self.connection.write(to_bytes([IAC, action, option]))
def rfc2217SendSubnegotiation(self, option, value=''):
"""Subnegotiation of RFC 2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self.connection.write(to_bytes([IAC, SB, COM_PORT_OPTION, option] + list(value) + [IAC, SE]))
# - check modem lines, needs to be called periodically from user to
# establish polling
def check_modem_lines(self, force_notification=False):
modemstate = (
(self.serial.getCTS() and MODEMSTATE_MASK_CTS) |
(self.serial.getDSR() and MODEMSTATE_MASK_DSR) |
(self.serial.getRI() and MODEMSTATE_MASK_RI) |
(self.serial.getCD() and MODEMSTATE_MASK_CD)
)
# check what has changed
deltas = modemstate ^ (self.last_modemstate or 0) # when last is None -> 0
if deltas & MODEMSTATE_MASK_CTS:
modemstate |= MODEMSTATE_MASK_CTS_CHANGE
if deltas & MODEMSTATE_MASK_DSR:
modemstate |= MODEMSTATE_MASK_DSR_CHANGE
if deltas & MODEMSTATE_MASK_RI:
modemstate |= MODEMSTATE_MASK_RI_CHANGE
if deltas & MODEMSTATE_MASK_CD:
modemstate |= MODEMSTATE_MASK_CD_CHANGE
# if new state is different and the mask allows this change, send
# notification. suppress notifications when client is not rfc2217
if modemstate != self.last_modemstate or force_notification:
if (self._client_is_rfc2217 and (modemstate & self.modemstate_mask)) or force_notification:
self.rfc2217SendSubnegotiation(
SERVER_NOTIFY_MODEMSTATE,
to_bytes([modemstate & self.modemstate_mask])
)
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: %s" % (modemstate,))
# save last state, but forget about deltas.
# otherwise it would also notify about changing deltas which is
# probably not very useful
self.last_modemstate = modemstate & 0xf0
# - outgoing data escaping
def escape(self, data):
"""\
this generator function is for the user. all outgoing data has to be
properly escaped, so that no IAC character in the data stream messes up
the Telnet state machine in the server.
socket.sendall(escape(data))
"""
for byte in data:
if byte == IAC:
yield IAC
yield IAC
else:
yield byte
# - incoming data filter
def filter(self, data):
"""\
handle a bunch of incoming bytes. this is a generator. it will yield
all characters not of interest for Telnet/RFC 2217.
The idea is that the reader thread pushes data from the socket through
this filter:
for byte in filter(socket.recv(1024)):
# do things like CR/LF conversion/whatever
# and write data to the serial port
serial.write(byte)
(socket error handling code left as exercise for the reader)
"""
for byte in data:
if self.mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
self.mode = M_IAC_SEEN
else:
# store data in sub option buffer or pass it to our
# consumer depending on state
if self.suboption is not None:
self.suboption.append(byte)
else:
yield byte
elif self.mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if self.suboption is not None:
self.suboption.append(byte)
else:
yield byte
self.mode = M_NORMAL
elif byte == SB:
# sub option start
self.suboption = bytearray()
self.mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnetProcessSubnegotiation(bytes(self.suboption))
self.suboption = None
self.mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
self.telnet_command = byte
self.mode = M_NEGOTIATE
else:
# other telnet commands
self._telnetProcessCommand(byte)
self.mode = M_NORMAL
elif self.mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnetNegotiateOption(self.telnet_command, byte)
self.mode = M_NORMAL
# - incoming telnet commands and options
def _telnetProcessCommand(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: %r" % (command,))
def _telnetNegotiateOption(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnetSendOption((command == WILL and DONT or WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: %r" % (option,))
def _telnetProcessSubnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if self.logger:
self.logger.debug('received COM_PORT_OPTION: %r' % (suboption,))
if suboption[1:2] == SET_BAUDRATE:
backup = self.serial.baudrate
try:
(baudrate,) = struct.unpack("!I", suboption[2:6])
if baudrate != 0:
self.serial.baudrate = baudrate
except ValueError, e:
if self.logger:
self.logger.error("failed to set baud rate: %s" % (e,))
self.serial.baudrate = backup
else:
if self.logger:
self.logger.info("%s baud rate: %s" % (baudrate and 'set' or 'get', self.serial.baudrate))
self.rfc2217SendSubnegotiation(SERVER_SET_BAUDRATE, struct.pack("!I", self.serial.baudrate))
elif suboption[1:2] == SET_DATASIZE:
backup = self.serial.bytesize
try:
(datasize,) = struct.unpack("!B", suboption[2:3])
if datasize != 0:
self.serial.bytesize = datasize
except ValueError, e:
if self.logger:
self.logger.error("failed to set data size: %s" % (e,))
self.serial.bytesize = backup
else:
if self.logger:
self.logger.info("%s data size: %s" % (datasize and 'set' or 'get', self.serial.bytesize))
self.rfc2217SendSubnegotiation(SERVER_SET_DATASIZE, struct.pack("!B", self.serial.bytesize))
elif suboption[1:2] == SET_PARITY:
backup = self.serial.parity
try:
parity = struct.unpack("!B", suboption[2:3])[0]
if parity != 0:
self.serial.parity = RFC2217_REVERSE_PARITY_MAP[parity]
except ValueError, e:
if self.logger:
self.logger.error("failed to set parity: %s" % (e,))
self.serial.parity = backup
else:
if self.logger:
self.logger.info("%s parity: %s" % (parity and 'set' or 'get', self.serial.parity))
self.rfc2217SendSubnegotiation(
SERVER_SET_PARITY,
struct.pack("!B", RFC2217_PARITY_MAP[self.serial.parity])
)
elif suboption[1:2] == SET_STOPSIZE:
backup = self.serial.stopbits
try:
stopbits = struct.unpack("!B", suboption[2:3])[0]
if stopbits != 0:
self.serial.stopbits = RFC2217_REVERSE_STOPBIT_MAP[stopbits]
except ValueError, e:
if self.logger:
self.logger.error("failed to set stop bits: %s" % (e,))
self.serial.stopbits = backup
else:
if self.logger:
self.logger.info("%s stop bits: %s" % (stopbits and 'set' or 'get', self.serial.stopbits))
self.rfc2217SendSubnegotiation(
SERVER_SET_STOPSIZE,
struct.pack("!B", RFC2217_STOPBIT_MAP[self.serial.stopbits])
)
elif suboption[1:2] == SET_CONTROL:
if suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING:
if self.serial.xonxoff:
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif self.serial.rtscts:
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
else:
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL:
self.serial.xonxoff = False
self.serial.rtscts = False
if self.logger:
self.logger.info("changed flow control to None")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTROL:
self.serial.xonxoff = True
if self.logger:
self.logger.info("changed flow control to XON/XOFF")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTROL:
self.serial.rtscts = True
if self.logger:
self.logger.info("changed flow control to RTS/CTS")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_REQ_BREAK_STATE:
if self.logger:
self.logger.warning("requested break state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_BREAK_ON:
self.serial.setBreak(True)
if self.logger:
self.logger.info("changed BREAK to active")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_ON)
elif suboption[2:3] == SET_CONTROL_BREAK_OFF:
self.serial.setBreak(False)
if self.logger:
self.logger.info("changed BREAK to inactive")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_DTR:
if self.logger:
self.logger.warning("requested DTR state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_DTR_ON:
self.serial.setDTR(True)
if self.logger:
self.logger.info("changed DTR to active")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_ON)
elif suboption[2:3] == SET_CONTROL_DTR_OFF:
self.serial.setDTR(False)
if self.logger:
self.logger.info("changed DTR to inactive")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_RTS:
if self.logger:
self.logger.warning("requested RTS state - not implemented")
pass # XXX needs cached value
#~ self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_ON:
self.serial.setRTS(True)
if self.logger:
self.logger.info("changed RTS to active")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_OFF:
self.serial.setRTS(False)
if self.logger:
self.logger.info("changed RTS to inactive")
self.rfc2217SendSubnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_OFF)
#~ elif suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_DCD_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DTR_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DSR_FLOW_CONTROL:
elif suboption[1:2] == NOTIFY_LINESTATE:
# client polls for current state
self.rfc2217SendSubnegotiation(
SERVER_NOTIFY_LINESTATE,
to_bytes([0]) # sorry, nothing like that implemented
)
elif suboption[1:2] == NOTIFY_MODEMSTATE:
if self.logger:
self.logger.info("request for modem state")
# client polls for current state
self.check_modem_lines(force_notification=True)
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
if self.logger:
self.logger.info("suspend")
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
if self.logger:
self.logger.info("resume")
self._remote_suspend_flow = False
elif suboption[1:2] == SET_LINESTATE_MASK:
self.linstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("line state mask: 0x%02x" % (self.linstate_mask,))
elif suboption[1:2] == SET_MODEMSTATE_MASK:
self.modemstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("modem state mask: 0x%02x" % (self.modemstate_mask,))
elif suboption[1:2] == PURGE_DATA:
if suboption[2:3] == PURGE_RECEIVE_BUFFER:
self.serial.flushInput()
if self.logger:
self.logger.info("purge in")
self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_RECEIVE_BUFFER)
elif suboption[2:3] == PURGE_TRANSMIT_BUFFER:
self.serial.flushOutput()
if self.logger:
self.logger.info("purge out")
self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_TRANSMIT_BUFFER)
elif suboption[2:3] == PURGE_BOTH_BUFFERS:
self.serial.flushInput()
self.serial.flushOutput()
if self.logger:
self.logger.info("purge both")
self.rfc2217SendSubnegotiation(SERVER_PURGE_DATA, PURGE_BOTH_BUFFERS)
else:
if self.logger:
self.logger.error("undefined PURGE_DATA: %r" % list(suboption[2:]))
else:
if self.logger:
self.logger.error("undefined COM_PORT_OPTION: %r" % list(suboption[1:]))
else:
if self.logger:
self.logger.warning("unknown subnegotiation: %r" % (suboption,))
# simple client test
if __name__ == '__main__':
import sys
s = Serial('rfc2217://localhost:7000', 115200)
sys.stdout.write('%s\n' % s)
#~ s.baudrate = 1898
sys.stdout.write("write...\n")
s.write("hello\n")
s.flush()
sys.stdout.write("read: %s\n" % s.read(5))
#~ s.baudrate = 19200
#~ s.databits = 7
s.close()
|
xiangel/hue
|
refs/heads/master
|
desktop/core/ext-py/Pygments-1.3.1/pygments/styles/murphy.py
|
75
|
# -*- coding: utf-8 -*-
"""
pygments.styles.murphy
~~~~~~~~~~~~~~~~~~~~~~
Murphy's style from CodeRay.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class MurphyStyle(Style):
"""
Murphy's style from CodeRay.
"""
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "#666 italic",
Comment.Preproc: "#579 noitalic",
Comment.Special: "#c00 bold",
Keyword: "bold #289",
Keyword.Pseudo: "#08f",
Keyword.Type: "#66f",
Operator: "#333",
Operator.Word: "bold #000",
Name.Builtin: "#072",
Name.Function: "bold #5ed",
Name.Class: "bold #e9e",
Name.Namespace: "bold #0e84b5",
Name.Exception: "bold #F00",
Name.Variable: "#036",
Name.Variable.Instance: "#aaf",
Name.Variable.Class: "#ccf",
Name.Variable.Global: "#f84",
Name.Constant: "bold #5ed",
Name.Label: "bold #970",
Name.Entity: "#800",
Name.Attribute: "#007",
Name.Tag: "#070",
Name.Decorator: "bold #555",
String: "bg:#e0e0ff",
String.Char: "#88F bg:",
String.Doc: "#D42 bg:",
String.Interpol: "bg:#eee",
String.Escape: "bold #666",
String.Regex: "bg:#e0e0ff #000",
String.Symbol: "#fc8 bg:",
String.Other: "#f88",
Number: "bold #60E",
Number.Integer: "bold #66f",
Number.Float: "bold #60E",
Number.Hex: "bold #058",
Number.Oct: "bold #40E",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #c65d09",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "#F00 bg:#FAA"
}
|
odoo-turkiye/odoo
|
refs/heads/8.0
|
addons/hr_attendance/wizard/__init__.py
|
375
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_attendance_error
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
anarchivist/pyflag
|
refs/heads/master
|
src/FileFormats/RegFile.py
|
7
|
#!/usr/bin/env python
# ******************************************************
# Copyright 2004: Commonwealth of Australia.
#
# Developed by the Computer Network Vulnerability Team,
# Information Security Group.
# Department of Defence.
#
# Michael Cohen <scudette@users.sourceforge.net>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# ******************************************************
""" A Library to read the windows NT/2k/XP registry format.
"""
from format import *
from plugins.FileFormats.BasicFormats import *
import sys
## This is where the first page starts
FIRST_PAGE_OFFSET=0x1000
class NK_TYPE(WORD_ENUM):
""" The different types of NK nodes """
types = {
0x20: 'key',
0x2c: 'ROOT_KEY'
}
class RegFName(STRING):
""" A string which does not print NULLs """
def __str__(self):
result=[c for c in STRING.__str__(self) if c!='\x00' ]
return ''.join(result)
class lh_key(SimpleStruct):
fields = [
[ 'id', STRING,{'length':2}],
[ 'no_keys', WORD ],
]
def read(self):
result=SimpleStruct.read(self)
# if result['id']!='lh':
# raise IOError("lh record expected, but not found at offset 0x%08X" % self.buffer.offset)
no_keys=result['no_keys'].get_value()
self.add_element(result,'hashes', LF_HashArray(self.buffer[4:],count=no_keys))
return result
class ri_key(SimpleStruct):
fields = [
[ 'id', STRING,{'length':2}],
[ 'no_pointers',WORD ],
]
def read(self):
result=SimpleStruct.read(self)
if result['id']!='ri':
raise IOError("ri record expected, but not found at offset 0x%08X" % data.offset)
class Pri_key(POINTER):
""" This is a pointer to the ri_key struct for a particular nk.
It is pointing relative to FIRST_PAGE_OFFSET.
"""
target_class=ri_key
def calc_offset(self):
offset = self.data
if offset>0:
offset=offset+FIRST_PAGE_OFFSET+4
data=self.buffer.set_offset(offset)
return data
else: return None
class Plh_key(Pri_key):
target_class=lh_key
class KEY_NAME(STRING):
""" The key names are a 32 bit length followed by data """
def __init__(self,buffer,*args,**kwargs):
offset = WORD(buffer)
kwargs['length']=offset.get_value()
STRING.__init__(self,buffer[4:], *args, **kwargs)
class NK_key(SimpleStruct):
""" The main key node """
fields=[
[ 'id', STRING,{'length':2}],
[ 'Type', NK_TYPE],
[ 'WriteTS', WIN12_FILETIME],
[ 'parent_offset', LONG],
[ 'number_of_subkeys', ULONG],
[ 'pad', LONG],
[ 'offs_lh', Plh_key],
[ 'pad', LONG],
[ 'no_values', LONG],
[ 'offs_vk_list', LONG],
[ 'offs_sk', LONG],
[ 'offs_class_name', LONG],
[ 'pad', LONG_ARRAY,{'count':5}],
[ 'key_name', KEY_NAME],
]
def read(self):
result=SimpleStruct.read(self)
if result['id']!='nk':
raise IOError("nk record expected, but not found at offset %s" % self.buffer.offset)
## Find the list of value keys (VKs)
offs_vk_list = result['offs_vk_list'].get_value()
data=self.buffer.set_offset(FIRST_PAGE_OFFSET+4+offs_vk_list)
no_values=result['no_values'].get_value()
## Add the list to ourselves
self.add_element(result,'vk_list', VK_Array(data,count=no_values))
return result
def keys(self):
""" A generator which yields the keys under this node """
try:
lh_key=self['offs_lh'].get_value()
except: return
if not lh_key: return
for lh in lh_key['hashes']:
try:
nk_key = lh['ofs_nk'].get_value()
yield nk_key
except (KeyError,IOError):
pass
def key(self,name):
""" Find the named child of this node """
for k in self.keys():
if k['key_name']==name:
return k
raise KeyError("Key %s not found under %s" % (name, self['key_name']))
def value(self,name):
""" Find the named child of this node """
for v in self.values():
if v['keyname']==name:
return v
raise KeyError("Value %s not found under %s" % (name, self['key_name']))
def values(self):
""" A Generator which returns all the value nodes of this key """
if self['no_values'].get_value()>0:
try:
for value in self['vk_list']:
vk=value.get_value()
if vk:
yield vk
except IOError:
return
class PNK_key(Pri_key):
target_class=NK_key
class RegF(SimpleStruct):
""" This is the registry file header """
def __init__(self, buffer, *args, **kwargs):
SimpleStruct.__init__(self, buffer, *args, **kwargs)
self.root_key = self['root_key_offset'].get_value()
def get_key(self, path):
""" Given a path, retrieve the key object stored there """
p = path.split("/")
root_key = self.root_key
while p:
key = p.pop(0)
if key:
root_key = root_key.key(key)
return root_key
fields = [
[ 'Magic', STRING , dict(length=4) ],
[ 'Unknown1', LONG_ARRAY, dict(count=2) ],
[ 'Last Modified', WIN_FILETIME],
[ 'Unknown2', LONG_ARRAY,{'count':4}],
##Offset is relative to FIRST_PAGE_OFFSET. This offset is
##to the root key's nk record.
[ 'root_key_offset',PNK_key],
[ 'filesize', LONG],
[ 'Name', RegFName,{'length':0x1fc-0x2c}],
[ 'checksum', ULONG ],
]
class DATA_TYPE(LONG_ENUM):
""" Different types of data stored in the registry """
types = {
0:'REG_NONE',
1:'REG_SZ', # Unicode nul terminated string
2:'REG_EXPAND_SZ', # Unicode nul terminated string + env
3:'REG_BINARY', # Free form binary
4:'REG_DWORD', # 32-bit number
5:'REG_DWORD_BIG_ENDIAN', # 32-bit number
6:'REG_LINK', # Symbolic Link (unicode)
7:'REG_MULTI_SZ', # Multiple Unicode strings
8:'REG_RESOURCE_LIST', # Resource list in the resource map
9:'REG_FULL_RESOURCE_DESCRIPTOR', # Resource list in the hardware description
10:'REG_RESOURCE_REQUIREMENTS_LIST',
11:'Unknown'
}
class DATA(SimpleStruct):
""" This represents the encoded data object.
The data is encoded using the three vectors len_data,offs_data and val_type. There are many edge cases where these change meanings. This is another example of microsoft stupidity - increasing the complexity for no reason. Most of the code below handles the weird edge cases.
"""
fields=[
[ 'len_data', LONG ],
[ 'offs_data', LONG ],
[ 'val_type', DATA_TYPE ],
]
def read(self):
result=SimpleStruct.read(self)
len_data=result['len_data'].get_value()
size=len_data& 0x7fffffff
offs_data=result['offs_data'].get_value()
val_type=result['val_type']
## Work around all the weird edge cases:
## If the offset is zero, the value is represented inline inside the length:
if size and val_type=='REG_DWORD' and len_data & 0x80000000L:
self.raw_data=struct.pack('l',result['offs_data'].get_value())
## This is a catchall in case:
elif len_data<0:
## raise IOError("length is negative in data: %s %s %s" %(result['len_data'],result['offs_data'],result['val_type']))
## print("length is negative in data: %s %s %s" %(result['len_data'],result['offs_data'],result['val_type']))
self.raw_data=None
else:
## Data is referenced by offs_data:
data=self.buffer.set_offset(offs_data+FIRST_PAGE_OFFSET+4)
self.raw_data=data[:min(size,1024)]
return result
def __repr__(self):
return self.__str__()
def __str__(self):
""" We display ourselves nicely """
val_type=self['val_type']
if self.raw_data==None:
return 'None'
elif val_type=='REG_SZ' or val_type=='REG_EXPAND_SZ' or val_type=='REG_MULTI_SZ':
result="%s" % UCS16_STR(self.raw_data, length=len(self.raw_data))
elif val_type=='REG_DWORD':
result="0x08%X" % ULONG(self.raw_data).get_value()
else:
## FIXME: This needs to be a hexdump view:
result="%r" % ("%s" % self.raw_data)
return result
class vk_key(SimpleStruct):
fields = [
[ 'id', STRING,{'length':2}],
[ 'len_name',WORD ],
[ 'data', DATA ],
[ 'flag', WORD ],
[ 'pad', WORD ],
]
def read(self):
result=SimpleStruct.read(self)
if result['id']!='vk':
raise IOError("vk record expected, but not found at offset 0x%08X" % data.offset)
strlen=result['len_name'].get_value()
if strlen>0:
keyname=STRING(self.buffer[self.offset:],length=strlen)
else:
keyname=STRING('@',length=1)
## New struct member is keyname:
self.add_element(result,'keyname',keyname)
return result
class PNK_key(Pri_key):
target_class=NK_key
class lf_hash(SimpleStruct):
fields = [
[ 'ofs_nk', PNK_key],
[ 'name', STRING,{'length':4}],
]
class LF_HashArray(ARRAY):
target_class=lf_hash
class Pvk_key(Pri_key):
target_class=vk_key
class VK_Array(ARRAY):
target_class=Pvk_key
def print_values(nk_key, path):
print "%s%s" % (path,nk_key['key_name'])
if nk_key['no_values'].get_value()>0:
try:
for value in nk_key['vk_list']:
vk=value.get_value()
if vk:
print " Values: %s\t->\t%s\t%s" % (vk['keyname'],vk['data']['val_type'],vk['data'])
except IOError:
print "Oops: Cant parse values in %s at offset 0x%08X!" % (nk_key['key_name'], nk_key.buffer.offset)
def ls_r(root_key,path='', cb=print_values):
""" Lists all paths under root_key recursively.
@arg root_key: An NK_key object
"""
lh_key=root_key['offs_lh'].get_value()
## Node has no lf list, therefore no children:
if not lh_key: return
## Iterate over all children:
for lh in lh_key['hashes']:
try:
nk_key=lh['ofs_nk'].get_value()
# print "%s%s" % (path,nk_key['key_name'])
cb(nk_key, path=path)
ls_r(nk_key,"%s%s/" % (path,nk_key['key_name']), cb=cb)
except IOError,e:
print "Oops: Cant parse nk node %s at offset 0x%08X!: The error was %s" % (path,root_key.buffer.offset,e)
def get_key(root_key, path):
p = path.split("/")
while p:
root_key = root_key.key(p.pop(0))
return root_key
if __name__ == "__main__":
fd=open(sys.argv[1],'r')
buffer = Buffer(fd=fd)
header = RegF(buffer)
print header
path = 'Software/Microsoft/Windows/CurrentVersion/Explorer/TrayNotify'
key = header.get_key(path)
print key
print "Values for %s" % path
for v in key.values():
print v['data']['val_type'],v['data']['len_data'],v['data']
print "Keys under %s" % path
for k in key.keys():
print k
ls_r(header.root_key)
|
pwil3058/epygibus
|
refs/heads/master
|
epygibus_pkg/cli/subcmd_del.py
|
1
|
### Copyright (C) 2015 Peter Williams <pwil3058@gmail.com>
###
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; version 2 of the License only.
###
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import os
from . import cmd
from .. import config
from .. import snapshot
from .. import excpns
PARSER = cmd.SUB_CMD_PARSER.add_parser(
"del",
description=_("Delete the nominated archive's oldest (or specified) snapshot."),
)
cmd.add_cmd_argument(PARSER, cmd.ARCHIVE_NAME_ARG(_("the name of the arch(ive whose snapshot is to be deleted.")))
XPARSER = PARSER.add_mutually_exclusive_group(required=False)
cmd.add_cmd_argument(XPARSER, cmd.BACK_ISSUE_ARG(-1))
XPARSER.add_argument(
"--all_but_newest",
help=_("delete all but the N newest snapshots."),
dest="newest_count",
metavar=_("N"),
type=int,
)
PARSER.add_argument(
"--remove_last_ok",
help=_("aurhorise deletion of the last snapshot in the archive."),
action="store_true",
)
def run_cmd(args):
if args.newest_count is not None:
try:
snapshot.delete_all_snapshots_but_newest(args.archive_name, newest_count=args.newest_count, clear_fell= args.remove_last_ok)
except excpns.Error as edata:
sys.stderr.write(str(edata) + "\n")
sys.exit(-1)
else:
try:
snapshot.delete_snapshot(args.archive_name, seln_fn=lambda l: l[-1-args.back], clear_fell= args.remove_last_ok)
except excpns.Error as edata:
sys.stderr.write(str(edata) + "\n")
sys.exit(-1)
return 0
PARSER.set_defaults(run_cmd=run_cmd)
|
EvenStrangest/tensorflow
|
refs/heads/master
|
tensorflow/examples/skflow/hdf5_classification.py
|
9
|
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics, cross_validation
from tensorflow.contrib import learn
import h5py
# Load dataset.
iris = learn.datasets.load_dataset('iris')
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
test_size=0.2, random_state=42)
# Note that we are saving and load iris data as h5 format as a simple demonstration here.
h5f = h5py.File('test_hdf5.h5', 'w')
h5f.create_dataset('X_train', data=X_train)
h5f.create_dataset('X_test', data=X_test)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
h5f = h5py.File('test_hdf5.h5', 'r')
X_train = h5f['X_train']
X_test = h5f['X_test']
y_train = h5f['y_train']
y_test = h5f['y_test']
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = learn.TensorFlowDNNClassifier(hidden_units=[10, 20, 10],
n_classes=3, steps=200)
# Fit and predict.
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print('Accuracy: {0:f}'.format(score))
|
bfalacerda/strands_executive
|
refs/heads/kinetic-devel
|
task_executor/scripts/example_add_time_critical_client.py
|
2
|
#!/usr/bin/env python
import rospy
from strands_executive_msgs import task_utils
from strands_executive_msgs.msg import Task
from strands_executive_msgs.srv import AddTasks, SetExecutionStatus
import sys
def get_services():
# get services necessary to do the jon
add_tasks_srv_name = '/task_executor/add_tasks'
set_exe_stat_srv_name = '/task_executor/set_execution_status'
rospy.loginfo("Waiting for task_executor service...")
rospy.wait_for_service(add_tasks_srv_name)
rospy.wait_for_service(set_exe_stat_srv_name)
rospy.loginfo("Done")
add_tasks_srv = rospy.ServiceProxy(add_tasks_srv_name, AddTasks)
set_execution_status = \
rospy.ServiceProxy(
set_exe_stat_srv_name,
SetExecutionStatus)
return add_tasks_srv, set_execution_status
if __name__ == '__main__':
rospy.init_node("example_add_time_critical_client")
# get services to call into execution framework
add_task, set_execution_status = get_services()
wp = 'WayPoint2'
duration_secs = 30
wait_before = rospy.Duration(30)
max_duration = rospy.Duration(duration_secs)
wait_task = Task(action='wait_action',
start_node_id=wp, max_duration=max_duration)
wait_task.start_after = rospy.get_rostime() + wait_before
wait_task.end_before = wait_task.start_after
task_utils.add_time_argument(wait_task, rospy.Time())
task_utils.add_duration_argument(wait_task, max_duration)
task_id = add_task([wait_task])
# Set the task executor running (if it isn't already)
set_execution_status(True)
# now let's stop execution while it's going on
# rospy.sleep(int(sys.argv[2])/2)
# set_execution_status(False)
|
TheTypoMaster/linux
|
refs/heads/master
|
tools/perf/scripts/python/check-perf-trace.py
|
1997
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
eteq/bokeh
|
refs/heads/master
|
bokeh/sampledata/sprint.py
|
45
|
from __future__ import absolute_import, print_function
from os.path import dirname, join
try:
import pandas as pd
except ImportError as e:
raise RuntimeError("sprint data requires pandas (http://pandas.pydata.org) to be installed")
sprint = pd.read_csv(join(dirname(__file__), 'sprint.csv'), skipinitialspace=True, escapechar="\\")
|
ConeyLiu/spark
|
refs/heads/master
|
python/pyspark/mllib/tests/test_streaming_algorithms.py
|
3
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import time, sleep
import unittest
from numpy import array, random, exp, dot, all, mean, abs
from numpy import sum as array_sum
from pyspark import SparkContext
from pyspark.mllib.clustering import StreamingKMeans, StreamingKMeansModel
from pyspark.mllib.classification import StreamingLogisticRegressionWithSGD
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.regression import LabeledPoint, StreamingLinearRegressionWithSGD
from pyspark.mllib.util import LinearDataGenerator
from pyspark.streaming import StreamingContext
from pyspark.testing.utils import eventually
class MLLibStreamingTestCase(unittest.TestCase):
def setUp(self):
self.sc = SparkContext('local[4]', "MLlib tests")
self.ssc = StreamingContext(self.sc, 1.0)
def tearDown(self):
self.ssc.stop(False)
self.sc.stop()
class StreamingKMeansTest(MLLibStreamingTestCase):
def test_model_params(self):
"""Test that the model params are set correctly"""
stkm = StreamingKMeans()
stkm.setK(5).setDecayFactor(0.0)
self.assertEqual(stkm._k, 5)
self.assertEqual(stkm._decayFactor, 0.0)
# Model not set yet.
self.assertIsNone(stkm.latestModel())
self.assertRaises(ValueError, stkm.trainOn, [0.0, 1.0])
stkm.setInitialCenters(
centers=[[0.0, 0.0], [1.0, 1.0]], weights=[1.0, 1.0])
self.assertEqual(
stkm.latestModel().centers, [[0.0, 0.0], [1.0, 1.0]])
self.assertEqual(stkm.latestModel().clusterWeights, [1.0, 1.0])
def test_accuracy_for_single_center(self):
"""Test that parameters obtained are correct for a single center."""
centers, batches = self.streamingKMeansDataGenerator(
batches=5, numPoints=5, k=1, d=5, r=0.1, seed=0)
stkm = StreamingKMeans(1)
stkm.setInitialCenters([[0., 0., 0., 0., 0.]], [0.])
input_stream = self.ssc.queueStream(
[self.sc.parallelize(batch, 1) for batch in batches])
stkm.trainOn(input_stream)
self.ssc.start()
def condition():
self.assertEqual(stkm.latestModel().clusterWeights, [25.0])
return True
eventually(condition, catch_assertions=True)
realCenters = array_sum(array(centers), axis=0)
for i in range(5):
modelCenters = stkm.latestModel().centers[0][i]
self.assertAlmostEqual(centers[0][i], modelCenters, 1)
self.assertAlmostEqual(realCenters[i], modelCenters, 1)
def streamingKMeansDataGenerator(self, batches, numPoints,
k, d, r, seed, centers=None):
rng = random.RandomState(seed)
# Generate centers.
centers = [rng.randn(d) for i in range(k)]
return centers, [[Vectors.dense(centers[j % k] + r * rng.randn(d))
for j in range(numPoints)]
for i in range(batches)]
def test_trainOn_model(self):
"""Test the model on toy data with four clusters."""
stkm = StreamingKMeans()
initCenters = [[1.0, 1.0], [-1.0, 1.0], [-1.0, -1.0], [1.0, -1.0]]
stkm.setInitialCenters(
centers=initCenters, weights=[1.0, 1.0, 1.0, 1.0])
# Create a toy dataset by setting a tiny offset for each point.
offsets = [[0, 0.1], [0, -0.1], [0.1, 0], [-0.1, 0]]
batches = []
for offset in offsets:
batches.append([[offset[0] + center[0], offset[1] + center[1]]
for center in initCenters])
batches = [self.sc.parallelize(batch, 1) for batch in batches]
input_stream = self.ssc.queueStream(batches)
stkm.trainOn(input_stream)
self.ssc.start()
# Give enough time to train the model.
def condition():
finalModel = stkm.latestModel()
self.assertTrue(all(finalModel.centers == array(initCenters)))
self.assertEqual(finalModel.clusterWeights, [5.0, 5.0, 5.0, 5.0])
return True
eventually(condition, catch_assertions=True)
def test_predictOn_model(self):
"""Test that the model predicts correctly on toy data."""
stkm = StreamingKMeans()
stkm._model = StreamingKMeansModel(
clusterCenters=[[1.0, 1.0], [-1.0, 1.0], [-1.0, -1.0], [1.0, -1.0]],
clusterWeights=[1.0, 1.0, 1.0, 1.0])
predict_data = [[[1.5, 1.5]], [[-1.5, 1.5]], [[-1.5, -1.5]], [[1.5, -1.5]]]
predict_data = [self.sc.parallelize(batch, 1) for batch in predict_data]
predict_stream = self.ssc.queueStream(predict_data)
predict_val = stkm.predictOn(predict_stream)
result = []
def update(rdd):
rdd_collect = rdd.collect()
if rdd_collect:
result.append(rdd_collect)
predict_val.foreachRDD(update)
self.ssc.start()
def condition():
self.assertEqual(result, [[0], [1], [2], [3]])
return True
eventually(condition, catch_assertions=True)
@unittest.skip("SPARK-10086: Flaky StreamingKMeans test in PySpark")
def test_trainOn_predictOn(self):
"""Test that prediction happens on the updated model."""
stkm = StreamingKMeans(decayFactor=0.0, k=2)
stkm.setInitialCenters([[0.0], [1.0]], [1.0, 1.0])
# Since decay factor is set to zero, once the first batch
# is passed the clusterCenters are updated to [-0.5, 0.7]
# which causes 0.2 & 0.3 to be classified as 1, even though the
# classification based in the initial model would have been 0
# proving that the model is updated.
batches = [[[-0.5], [0.6], [0.8]], [[0.2], [-0.1], [0.3]]]
batches = [self.sc.parallelize(batch) for batch in batches]
input_stream = self.ssc.queueStream(batches)
predict_results = []
def collect(rdd):
rdd_collect = rdd.collect()
if rdd_collect:
predict_results.append(rdd_collect)
stkm.trainOn(input_stream)
predict_stream = stkm.predictOn(input_stream)
predict_stream.foreachRDD(collect)
self.ssc.start()
def condition():
self.assertEqual(predict_results, [[0, 1, 1], [1, 0, 1]])
return True
eventually(condition, catch_assertions=True)
class StreamingLogisticRegressionWithSGDTests(MLLibStreamingTestCase):
@staticmethod
def generateLogisticInput(offset, scale, nPoints, seed):
"""
Generate 1 / (1 + exp(-x * scale + offset))
where,
x is randomnly distributed and the threshold
and labels for each sample in x is obtained from a random uniform
distribution.
"""
rng = random.RandomState(seed)
x = rng.randn(nPoints)
sigmoid = 1. / (1 + exp(-(dot(x, scale) + offset)))
y_p = rng.rand(nPoints)
cut_off = y_p <= sigmoid
y_p[cut_off] = 1.0
y_p[~cut_off] = 0.0
return [
LabeledPoint(y_p[i], Vectors.dense([x[i]]))
for i in range(nPoints)]
def test_parameter_accuracy(self):
"""
Test that the final value of weights is close to the desired value.
"""
input_batches = [
self.sc.parallelize(self.generateLogisticInput(0, 1.5, 100, 42 + i))
for i in range(20)]
input_stream = self.ssc.queueStream(input_batches)
slr = StreamingLogisticRegressionWithSGD(
stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0])
slr.trainOn(input_stream)
self.ssc.start()
def condition():
rel = (1.5 - slr.latestModel().weights.array[0]) / 1.5
self.assertAlmostEqual(rel, 0.1, 1)
return True
eventually(condition, timeout=60.0, catch_assertions=True)
def test_convergence(self):
"""
Test that weights converge to the required value on toy data.
"""
input_batches = [
self.sc.parallelize(self.generateLogisticInput(0, 1.5, 100, 42 + i))
for i in range(20)]
input_stream = self.ssc.queueStream(input_batches)
models = []
slr = StreamingLogisticRegressionWithSGD(
stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0])
slr.trainOn(input_stream)
input_stream.foreachRDD(
lambda x: models.append(slr.latestModel().weights[0]))
self.ssc.start()
def condition():
self.assertEqual(len(models), len(input_batches))
return True
# We want all batches to finish for this test.
eventually(condition, 60.0, catch_assertions=True)
t_models = array(models)
diff = t_models[1:] - t_models[:-1]
# Test that weights improve with a small tolerance
self.assertTrue(all(diff >= -0.1))
self.assertTrue(array_sum(diff > 0) > 1)
@staticmethod
def calculate_accuracy_error(true, predicted):
return sum(abs(array(true) - array(predicted))) / len(true)
def test_predictions(self):
"""Test predicted values on a toy model."""
input_batches = []
for i in range(20):
batch = self.sc.parallelize(
self.generateLogisticInput(0, 1.5, 100, 42 + i))
input_batches.append(batch.map(lambda x: (x.label, x.features)))
input_stream = self.ssc.queueStream(input_batches)
slr = StreamingLogisticRegressionWithSGD(
stepSize=0.2, numIterations=25)
slr.setInitialWeights([1.5])
predict_stream = slr.predictOnValues(input_stream)
true_predicted = []
predict_stream.foreachRDD(lambda x: true_predicted.append(x.collect()))
self.ssc.start()
def condition():
self.assertEqual(len(true_predicted), len(input_batches))
return True
eventually(condition, catch_assertions=True)
# Test that the accuracy error is no more than 0.4 on each batch.
for batch in true_predicted:
true, predicted = zip(*batch)
self.assertTrue(
self.calculate_accuracy_error(true, predicted) < 0.4)
def test_training_and_prediction(self):
"""Test that the model improves on toy data with no. of batches"""
input_batches = [
self.sc.parallelize(self.generateLogisticInput(0, 1.5, 100, 42 + i))
for i in range(40)]
predict_batches = [
b.map(lambda lp: (lp.label, lp.features)) for b in input_batches]
slr = StreamingLogisticRegressionWithSGD(
stepSize=0.01, numIterations=25)
slr.setInitialWeights([-0.1])
errors = []
def collect_errors(rdd):
true, predicted = zip(*rdd.collect())
errors.append(self.calculate_accuracy_error(true, predicted))
true_predicted = []
input_stream = self.ssc.queueStream(input_batches)
predict_stream = self.ssc.queueStream(predict_batches)
slr.trainOn(input_stream)
ps = slr.predictOnValues(predict_stream)
ps.foreachRDD(lambda x: collect_errors(x))
self.ssc.start()
def condition():
# Test that the improvement in error is > 0.3
if len(errors) == len(predict_batches):
self.assertGreater(errors[1] - errors[-1], 0.3)
if len(errors) >= 3 and errors[1] - errors[-1] > 0.3:
return True
return "Latest errors: " + ", ".join(map(lambda x: str(x), errors))
eventually(condition, timeout=180.0)
class StreamingLinearRegressionWithTests(MLLibStreamingTestCase):
def assertArrayAlmostEqual(self, array1, array2, dec):
for i, j in array1, array2:
self.assertAlmostEqual(i, j, dec)
def test_parameter_accuracy(self):
"""Test that coefs are predicted accurately by fitting on toy data."""
# Test that fitting (10*X1 + 10*X2), (X1, X2) gives coefficients
# (10, 10)
slr = StreamingLinearRegressionWithSGD(stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0, 0.0])
xMean = [0.0, 0.0]
xVariance = [1.0 / 3.0, 1.0 / 3.0]
# Create ten batches with 100 sample points in each.
batches = []
for i in range(10):
batch = LinearDataGenerator.generateLinearInput(
0.0, [10.0, 10.0], xMean, xVariance, 100, 42 + i, 0.1)
batches.append(self.sc.parallelize(batch))
input_stream = self.ssc.queueStream(batches)
slr.trainOn(input_stream)
self.ssc.start()
def condition():
self.assertArrayAlmostEqual(
slr.latestModel().weights.array, [10., 10.], 1)
self.assertAlmostEqual(slr.latestModel().intercept, 0.0, 1)
return True
eventually(condition, catch_assertions=True)
def test_parameter_convergence(self):
"""Test that the model parameters improve with streaming data."""
slr = StreamingLinearRegressionWithSGD(stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0])
# Create ten batches with 100 sample points in each.
batches = []
for i in range(10):
batch = LinearDataGenerator.generateLinearInput(
0.0, [10.0], [0.0], [1.0 / 3.0], 100, 42 + i, 0.1)
batches.append(self.sc.parallelize(batch))
model_weights = []
input_stream = self.ssc.queueStream(batches)
input_stream.foreachRDD(
lambda x: model_weights.append(slr.latestModel().weights[0]))
slr.trainOn(input_stream)
self.ssc.start()
def condition():
self.assertEqual(len(model_weights), len(batches))
return True
# We want all batches to finish for this test.
eventually(condition, catch_assertions=True)
w = array(model_weights)
diff = w[1:] - w[:-1]
self.assertTrue(all(diff >= -0.1))
def test_prediction(self):
"""Test prediction on a model with weights already set."""
# Create a model with initial Weights equal to coefs
slr = StreamingLinearRegressionWithSGD(stepSize=0.2, numIterations=25)
slr.setInitialWeights([10.0, 10.0])
# Create ten batches with 100 sample points in each.
batches = []
for i in range(10):
batch = LinearDataGenerator.generateLinearInput(
0.0, [10.0, 10.0], [0.0, 0.0], [1.0 / 3.0, 1.0 / 3.0],
100, 42 + i, 0.1)
batches.append(
self.sc.parallelize(batch).map(lambda lp: (lp.label, lp.features)))
input_stream = self.ssc.queueStream(batches)
output_stream = slr.predictOnValues(input_stream)
samples = []
output_stream.foreachRDD(lambda x: samples.append(x.collect()))
self.ssc.start()
def condition():
self.assertEqual(len(samples), len(batches))
return True
# We want all batches to finish for this test.
eventually(condition, catch_assertions=True)
# Test that mean absolute error on each batch is less than 0.1
for batch in samples:
true, predicted = zip(*batch)
self.assertTrue(mean(abs(array(true) - array(predicted))) < 0.1)
def test_train_prediction(self):
"""Test that error on test data improves as model is trained."""
slr = StreamingLinearRegressionWithSGD(stepSize=0.2, numIterations=25)
slr.setInitialWeights([0.0])
# Create ten batches with 100 sample points in each.
batches = []
for i in range(10):
batch = LinearDataGenerator.generateLinearInput(
0.0, [10.0], [0.0], [1.0 / 3.0], 100, 42 + i, 0.1)
batches.append(self.sc.parallelize(batch))
predict_batches = [
b.map(lambda lp: (lp.label, lp.features)) for b in batches]
errors = []
def func(rdd):
true, predicted = zip(*rdd.collect())
errors.append(mean(abs(true) - abs(predicted)))
input_stream = self.ssc.queueStream(batches)
output_stream = self.ssc.queueStream(predict_batches)
slr.trainOn(input_stream)
output_stream = slr.predictOnValues(output_stream)
output_stream.foreachRDD(func)
self.ssc.start()
def condition():
if len(errors) == len(predict_batches):
self.assertGreater(errors[1] - errors[-1], 2)
if len(errors) >= 3 and errors[1] - errors[-1] > 2:
return True
return "Latest errors: " + ", ".join(map(lambda x: str(x), errors))
eventually(condition, timeout=180.0)
if __name__ == "__main__":
from pyspark.mllib.tests.test_streaming_algorithms import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.