code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from lxml import html
from tinydb import TinyDB, Query
import requests
from . import get_logger, CONFIG
class TracklistManager(object):
db = TinyDB(CONFIG["technical"]["tracklist-db"], indent=2, separators=(',', ': '))
get_logger().info("Starting tracklist manager with database at %s", CONFIG["technical"]["tracklist-db"])
@classmethod
def get_tracklist(cls, pid):
result = cls.db.get(Query().pid == pid)
if not result:
get_logger().debug("Getting tracklist for: %s", pid)
tracklist = Tracklist(pid).listing
cls.db.insert({"pid": pid, "tracklist": tracklist})
else:
tracklist = result["tracklist"]
return tracklist
class Tracklist(object):
def __init__(self, pid):
"""
See also https://github.com/StevenMaude/bbc_radio_tracklisting_downloader.
:param pid: the unique pid of the episode
"""
self.pid = pid
self.listing = []
url = "http://www.bbc.co.uk/programmes/{}/segments.inc".format(self.pid)
page = requests.get(url)
tree = html.fromstring(page.text)
for track in tree.xpath('//div[@class="segment__track"]'):
try:
artist_names = track.xpath('.//span[@property="byArtist"]//span[@class="artist"]/text()')
except ValueError:
artist_names = []
artist = ', '.join(artist_names)
try:
title, = track.xpath('.//p/span[@property="name"]/text()')
except ValueError:
title = ''
self.listing.append([artist, title])
def __repr__(self):
return "Tracklist[pid={self.pid}, len={amount}]".format(amount=len(self.listing), **locals())
|
julien-hadleyjack/genrss-py
|
src/genrss/tracklist.py
|
Python
|
bsd-2-clause
| 1,857
|
import os
from glob import glob
from setuptools import setup, find_packages
# Setup flags and parameters
pkg_name = 'libbot' # top-level package name
# Cache readme contents for use as long_description
readme = open('README.md').read()
# Call setup()
setup(
name=pkg_name,
version='0.1',
description='Library of common bot functions',
long_description=readme,
url='https://github.com/IEEERobotics/libbot',
author='IEEE Robotics Team',
author_email='group-ieee-robotics@ncsu.edu',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'PyYAML'
],
test_suite=(pkg_name + '.tests'),
platforms='any',
keywords='lib library bot ieee logger config',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
])
|
IEEERobotics/libbot
|
setup.py
|
Python
|
bsd-2-clause
| 970
|
"""Misc support code.
Copyright (c) 2015 Civic Knowledge. This file is licensed under the terms of
the Revised BSD License, included in this distribution as LICENSE.txt
"""
from collections import OrderedDict, defaultdict, Mapping, deque, MutableMapping, Callable
from functools import partial, reduce, wraps
import json
import hashlib
import logging
import os
import pprint
import re
import subprocess
import sys
from time import time
import yaml
from yaml.representer import RepresenterError
import warnings
from bs4 import BeautifulSoup
from six.moves import filterfalse, xrange as six_xrange
from six import iteritems, iterkeys, itervalues, print_, StringIO
from six.moves.urllib.parse import urlparse, urlsplit, urlunsplit
from six.moves.urllib.request import urlopen
from ambry.dbexceptions import ConfigurationError
logger_init = set()
def get_logger(name, file_name=None, stream=None, template=None, propagate=False, level=None):
"""Get a logger by name.
"""
logger = logging.getLogger(name)
running_tests = (
'test' in sys.argv # running with setup.py
or sys.argv[0].endswith('py.test')) # running with py.test
if running_tests and not level:
# testing without level, this means tester does not want to see any log messages.
level = logging.CRITICAL
if not level:
level = logging.INFO
logger.setLevel(level)
logger.propagate = propagate
formatter = logging.Formatter(template)
if not stream:
stream = sys.stdout
logger.handlers = []
handler = logging.StreamHandler(stream=stream)
handler.setFormatter(formatter)
logger.addHandler(handler)
if file_name:
handler = logging.FileHandler(file_name)
handler.setFormatter(logging.Formatter('%(asctime)s '+template))
logger.addHandler(handler)
return logger
# From https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
def memoize(obj):
cache = obj.cache = {}
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def expiring_memoize(obj):
"""Like memoize, but forgets after 10 seconds."""
cache = obj.cache = {}
last_access = obj.last_access = defaultdict(int)
@wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if last_access[key] and last_access[key] + 10 < time():
if key in cache:
del cache[key]
last_access[key] = time()
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
class Counter(dict):
"""Mapping where default values are zero."""
def __missing__(self, key):
return 0
# Stolen from:
# http://code.activestate.com/recipes/498245-lru-and-lfu-cache-decorators/
def lru_cache(maxsize=128, maxtime=60):
'''Least-recently-used cache decorator.
Arguments to the cached function must be hashable.
Cache performance statistics stored in f.hits and f.misses.
Clear the cache with f.clear().
http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
'''
maxqueue = maxsize * 10
# @ReservedAssignment
def decorating_function(
user_function,
len=len,
iter=iter,
tuple=tuple,
sorted=sorted,
KeyError=KeyError):
cache = {} # mapping of args to results
queue = deque() # order that keys have been used
refcount = Counter() # times each key is in the queue
sentinel = object() # marker for looping around the queue
kwd_mark = object() # separate positional and keyword args
# lookup optimizations (ugly but fast)
queue_append, queue_popleft = queue.append, queue.popleft
queue_appendleft, queue_pop = queue.appendleft, queue.pop
@wraps(user_function)
def wrapper(*args, **kwds):
# cache key records both positional and keyword args
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
# record recent use of this key
queue_append(key)
refcount[key] += 1
# get cache entry or compute if not found
try:
result, expire_time = cache[key]
if expire_time and time() > expire_time:
raise KeyError('Expired')
wrapper.hits += 1
except KeyError:
result = user_function(*args, **kwds)
if maxtime:
expire_time = time() + maxtime
else:
expire_time = None
cache[key] = result, expire_time
wrapper.misses += 1
# purge least recently used cache entry
if len(cache) > maxsize:
key = queue_popleft()
refcount[key] -= 1
while refcount[key]:
key = queue_popleft()
refcount[key] -= 1
del cache[key], refcount[key]
# periodically compact the queue by eliminating duplicate keys
# while preserving order of most recent access
if len(queue) > maxqueue:
refcount.clear()
queue_appendleft(sentinel)
for key in filterfalse(refcount.__contains__, iter(queue_pop, sentinel)):
queue_appendleft(key)
refcount[key] = 1
return result
def clear():
cache.clear()
queue.clear()
refcount.clear()
wrapper.hits = wrapper.misses = 0
wrapper.hits = wrapper.misses = 0
wrapper.clear = clear
return wrapper
return decorating_function
class YamlIncludeLoader(yaml.Loader):
def __init__(self, stream):
self._root = os.path.split(stream.name)[0]
super(YamlIncludeLoader, self).__init__(stream)
# From http://pypi.python.org/pypi/layered-yaml-attrdict-config/12.07.1
class OrderedDictYAMLLoader(yaml.Loader):
'Based on: https://gist.github.com/844388'
def __init__(self, *args, **kwargs):
yaml.Loader.__init__(self, *args, **kwargs)
self.dir = None
for a in args:
try:
self.dir = os.path.dirname(a.name)
except:
pass
self.add_constructor(
'tag:yaml.org,2002:map',
type(self).construct_yaml_map)
self.add_constructor(
'tag:yaml.org,2002:omap',
type(self).construct_yaml_map)
self.add_constructor('!include', OrderedDictYAMLLoader.include)
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(
None,
None,
'expected a mapping node, but found {}'.format(
node.id),
node.start_mark)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unacceptable key ({})'.format(exc),
key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def include(self, node):
if not self.dir:
return "ConfigurationError: Can't include file: wasn't able to set base directory"
relpath = self.construct_scalar(node)
abspath = os.path.join(self.dir, relpath)
if not os.path.exists(abspath):
raise ConfigurationError(
"Can't include file '{}': Does not exist".format(abspath))
with open(abspath, 'r') as f:
parts = abspath.split('.')
ext = parts.pop()
if ext == 'yaml':
return yaml.load(f, OrderedDictYAMLLoader)
else:
return IncludeFile(abspath, relpath, f.read())
# IncludeFile and include_representer ensures that when config files are re-written, they are
# represented as an include, not the contents of the include
class IncludeFile(str):
def __new__(cls, abspath, relpath, data):
s = str.__new__(cls, data)
s.abspath = abspath
s.relpath = relpath
return s
def include_representer(dumper, data):
return dumper.represent_scalar('!include', data.relpath)
# http://pypi.python.org/pypi/layered-yaml-attrdict-config/12.07.1
class AttrDict(OrderedDict):
def __init__(self, *argz, **kwz):
super(AttrDict, self).__init__(*argz, **kwz)
def __setitem__(self, k, v):
super(AttrDict, self).__setitem__(k, AttrDict(v) if isinstance(v, Mapping) else v)
def __getattr__(self, k):
if not (k.startswith('__') or k.startswith('_OrderedDict__')):
return self[k]
else:
return super(AttrDict, self).__getattr__(k)
def __setattr__(self, k, v):
if k.startswith('_OrderedDict__'):
return super(AttrDict, self).__setattr__(k, v)
self[k] = v
def __iter__(self):
return iterkeys(super(OrderedDict, self))
##
# __enter__ and __exit__ allow for assigning a path to a variable
# with 'with', which isn't extra functionalm but looks pretty.
##
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
return False
@classmethod
def from_yaml(cls, path, if_exists=False):
if if_exists and not os.path.exists(path):
return cls()
with open(path) as f:
return cls(yaml.load(f, OrderedDictYAMLLoader) or {})
@staticmethod
def flatten_dict(data, path=tuple()):
dst = list()
for k, v in iteritems(data):
k = path + (k,)
if isinstance(v, Mapping):
for v in v.flatten(k):
dst.append(v)
else:
dst.append((k, v))
return dst
def flatten(self, path=tuple()):
return self.flatten_dict(self, path=path)
def update_flat(self, val):
if isinstance(val, AttrDict):
val = val.flatten()
for k, v in val:
dst = self
for slug in k[:-1]:
if dst.get(slug) is None:
dst[slug] = AttrDict()
dst = dst[slug]
if v is not None or not isinstance(dst.get(k[-1]), Mapping):
dst[k[-1]] = v
def unflatten_row(self, k, v):
dst = self
for slug in k[:-1]:
if slug is None:
continue
if dst.get(slug) is None:
dst[slug] = AttrDict()
dst = dst[slug]
if v is not None or not isinstance(dst.get(k[-1]), Mapping):
dst[k[-1]] = v
def update_yaml(self, path):
self.update_flat(self.from_yaml(path))
return self
def to_dict(self):
root = {}
val = self.flatten()
for k, v in val:
dst = root
for slug in k[:-1]:
if dst.get(slug) is None:
dst[slug] = dict()
dst = dst[slug]
if v is not None or not isinstance(dst.get(k[-1]), Mapping):
dst[k[-1]] = v
return root
def update_dict(self, data):
self.update_flat(self.flatten_dict(data))
def clone(self):
clone = AttrDict()
clone.update_dict(self)
return clone
def rebase(self, base):
base = base.clone()
base.update_dict(self)
self.clear()
self.update_dict(base)
def dump(self, stream=None, map_view=None):
from ambry.metadata.proptree import _ScalarTermS, _ScalarTermU
from ambry.orm import MutationList, MutationDict # cross-module import
yaml.representer.SafeRepresenter.add_representer(
MapView, yaml.representer.SafeRepresenter.represent_dict)
yaml.representer.SafeRepresenter.add_representer(
AttrDict, yaml.representer.SafeRepresenter.represent_dict)
yaml.representer.SafeRepresenter.add_representer(
OrderedDict, yaml.representer.SafeRepresenter.represent_dict)
yaml.representer.SafeRepresenter.add_representer(
defaultdict, yaml.representer.SafeRepresenter.represent_dict)
yaml.representer.SafeRepresenter.add_representer(
MutationDict, yaml.representer.SafeRepresenter.represent_dict)
yaml.representer.SafeRepresenter.add_representer(
set, yaml.representer.SafeRepresenter.represent_list)
yaml.representer.SafeRepresenter.add_representer(
MutationList, yaml.representer.SafeRepresenter.represent_list)
yaml.representer.SafeRepresenter.add_representer(
IncludeFile, include_representer)
yaml.representer.SafeRepresenter.add_representer(
_ScalarTermS, yaml.representer.SafeRepresenter.represent_str)
yaml.representer.SafeRepresenter.add_representer(
_ScalarTermU, yaml.representer.SafeRepresenter.represent_str)
if stream is None:
stream = StringIO()
d = self
if map_view is not None:
map_view.inner = d
d = map_view
try:
yaml.safe_dump(d, stream, default_flow_style=False, indent=4, encoding='utf-8')
except RepresenterError:
pprint.pprint(self.to_dict())
raise
if isinstance(stream, StringIO):
return stream.getvalue()
def json(self):
o = yaml.load(self.dump())
return json.dumps(o)
class MapView(MutableMapping):
"""A map that provides a limited view on an underlying, inner map. Iterating over the
view retrns only the keys specified in the keys argument. """
_inner = None
_keys = None
def __init__(self, d=None, keys=None):
self._inner = d
self._keys = keys
@property
def inner(self):
return self._inner
@inner.setter
def inner(self, value):
self._inner = value
def __getitem__(self, key):
return self._inner.__getitem__(key)
def __setitem__(self, key, value):
raise NotImplementedError()
return self._inner.__setitem__(key, value)
def __delitem__(self, key):
return self._inner.__delitem__(key)
def __len__(self):
return self._inner.__len__()
def __iter__(self):
for k in self._inner:
if not self._keys or k in self._keys:
yield k
def __getattr__(self, item):
return getattr(self._inner, item)
class CaseInsensitiveDict(Mapping): # http://stackoverflow.com/a/16202162
def __init__(self, d):
self._d = d
self._s = dict((k.lower(), k) for k in d)
def __contains__(self, k):
return k.lower() in self._s
def __len__(self):
return len(self._s)
def __iter__(self):
return iter(self._s)
def __getitem__(self, k):
return self._d[self._s[k.lower()]]
def __setitem__(self, k, v):
self._d[k] = v
self._s[k.lower()] = k
def pop(self, k):
k0 = self._s.pop(k.lower())
return self._d.pop(k0)
def actual_key_case(self, k):
return self._s.get(k.lower())
def lowercase_dict(d):
return dict((k.lower(), v) for k, v in iteritems(d))
def configure_logging(cfg, custom_level=None):
"""Don't know what this is for ...."""
import itertools as it
import operator as op
if custom_level is None:
custom_level = logging.WARNING
for entity in it.chain.from_iterable(it.imap(op.methodcaller('viewvalues'),
[cfg] + [cfg.get(k, dict()) for k in ['handlers', 'loggers']])):
if isinstance(entity, Mapping) and entity.get('level') == 'custom':
entity['level'] = custom_level
logging.config.dictConfig(cfg)
logging.captureWarnings(cfg.warnings)
# {{{ http://code.activestate.com/recipes/578272/ (r1)
def toposort(data):
"""Dependencies are expressed as a dictionary whose keys are items and
whose values are a set of dependent items. Output is a list of sets in
topological order. The first set consists of items with no dependences,
each subsequent set consists of items that depend upon items in the
preceeding sets.
>>> print '\\n'.join(repr(sorted(x)) for x in toposort2({
... 2: set([11]),
... 9: set([11,8]),
... 10: set([11,3]),
... 11: set([7,5]),
... 8: set([7,3]),
... }) )
[3, 5, 7]
[8, 11]
[2, 9, 10]
"""
# Ignore self dependencies.
for k, v in iteritems(data):
v.discard(k)
# Find all items that don't depend on anything.
extra_items_in_deps = reduce(
set.union, itervalues(data)) - set(data.keys())
# Add empty dependences where needed
data.update({item: set() for item in extra_items_in_deps})
while True:
ordered = set(item for item, dep in iteritems(data) if not dep)
if not ordered:
break
yield ordered
data = {item: (dep - ordered)
for item, dep in iteritems(data)
if item not in ordered}
assert not data, 'Cyclic dependencies exist among these items:\n%s' % '\n'.join(
repr(x) for x in list(data.items()))
# end of http://code.activestate.com/recipes/578272/ }}}
def md5_for_stream(f, block_size=2 ** 20):
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()
def md5_for_file(f, block_size=2 ** 20):
"""Generate an MD5 has for a possibly large file by breaking it into
chunks."""
md5 = hashlib.md5()
try:
# Guess that f is a FLO.
f.seek(0)
return md5_for_stream(f, block_size=block_size)
except AttributeError:
# Nope, not a FLO. Maybe string?
file_name = f
with open(file_name, 'rb') as f:
return md5_for_file(f, block_size)
def make_acro(past, prefix, s): # pragma: no cover
"""Create a three letter acronym from the input string s.
Args:
past: A set object, for storing acronyms that have already been created
prefix: A prefix added to the acronym before storing in the set
s: The string to create the acronym from.
"""
def _make_acro(s, t=0):
"""Make an acronym of s for trial t"""
# Really should cache these ...
v = ['a', 'e', 'i', 'o', 'u', 'y']
c = [chr(x) for x in six_xrange(ord('a'), ord('z') + 1) if chr(x) not in v]
s = re.sub(r'\W+', '', s.lower())
vx = [x for x in s if x in v] # Vowels in input string
cx = [x for x in s if x in c] # Consonants in input string
if s.startswith('Mc'):
if t < 1:
return 'Mc' + v[0]
if t < 2:
return 'Mc' + c[0]
if s[0] in v: # Starts with a vowel
if t < 1:
return vx[0] + cx[0] + cx[1]
if t < 2:
return vx[0] + vx[1] + cx[0]
if s[0] in c and s[1] in c: # Two first consonants
if t < 1:
return cx[0] + cx[1] + vx[0]
if t < 2:
return cx[0] + cx[1] + cx[2]
if t < 3:
return cx[0] + vx[0] + cx[1]
if t < 4:
return cx[0] + cx[1] + cx[2]
if t < 5:
return cx[0] + vx[0] + vx[1]
if t < 6:
return cx[0] + cx[1] + cx[-1]
# These are punts; just take a substring
if t < 7:
return s[0:3]
if t < 8:
return s[1:4]
if t < 9:
return s[2:5]
if t < 10:
return s[3:6]
return None
for t in six_xrange(11): # Try multiple forms until one isn't in the past acronyms
try:
a = _make_acro(s, t)
if a is not None:
if prefix:
aps = prefix + a
else:
aps = a
if aps not in past:
past.add(aps)
return a
except IndexError:
pass
raise Exception('Could not get acronym.')
def ensure_dir_exists(path):
"""Given a file, ensure that the path to the file exists"""
import os
f_dir = os.path.dirname(path)
if not os.path.exists(f_dir):
os.makedirs(f_dir)
return f_dir
def walk_dict(d):
"""Walk a tree (nested dicts).
For each 'path', or dict, in the tree, returns a 3-tuple containing:
(path, sub-dicts, values)
where:
* path is the path to the dict
* sub-dicts is a tuple of (key,dict) pairs for each sub-dict in this dict
* values is a tuple of (key,value) pairs for each (non-dict) item in this dict
"""
# nested dict keys
nested_keys = tuple(k for k in list(d.keys()) if isinstance(d[k], dict))
# key/value pairs for non-dicts
items = tuple((k, d[k]) for k in list(d.keys()) if k not in nested_keys)
# return path, key/sub-dict pairs, and key/value pairs
yield ('/', [(k, d[k]) for k in nested_keys], items)
# recurse each subdict
for k in nested_keys:
for res in walk_dict(d[k]):
# for each result, stick key in path and pass on
res = ('/%s' % k + res[0], res[1], res[2])
yield res
def init_log_rate(output_f, N=None, message='', print_rate=None):
"""Initialze the log_rate function. Returnas a partial function to call for
each event.
If N is not specified but print_rate is specified, the initial N is
set to 100, and after the first message, the N value is adjusted to
emit print_rate messages per second
"""
if print_rate and not N:
N = 100
if not N:
N = 5000
d = [0, # number of items processed
time(), # start time. This one gets replaced after first message
N, # ticker to next message
N, # frequency to log a message
message,
print_rate,
deque([], maxlen=4) # Deque for averaging last N rates
]
assert isinstance(output_f, Callable)
f = partial(_log_rate, output_f, d)
f.always = output_f
f.count = lambda: d[0]
return f
def _log_rate(output_f, d, message=None):
"""Log a message for the Nth time the method is called.
d is the object returned from init_log_rate
"""
if d[2] <= 0:
if message is None:
message = d[4]
# Average the rate over the length of the deque.
d[6].append(int(d[3] / (time() - d[1])))
rate = sum(d[6]) / len(d[6])
# Prints the processing rate in 1,000 records per sec.
output_f(message + ': ' + str(rate) + '/s ' + str(d[0] / 1000) + 'K ')
d[1] = time()
# If the print_rate was specified, adjust the number of records to
# aproximate that rate.
if d[5]:
target_rate = rate * d[5]
d[3] = int((target_rate + d[3]) / 2)
d[2] = d[3]
d[0] += 1
d[2] -= 1
class Progressor(object):
"""Progress reporter suitable for calling in Library.get()
Example: r = l.get(args.term, cb=Progressor().progress)
"""
start = None
last = None
freq = 5
def __init__(self, message='Download', printf=print_):
self.start = time.clock()
self.message = message
self.rates = deque(maxlen=10)
self.printf = printf
def progress(self, i, n):
now = time.clock()
if not self.last:
self.last = now
if now - self.last > self.freq:
diff = now - self.start
self.last = now
i_rate = float(i) / diff
self.rates.append(i_rate)
if len(self.rates) > self.rates.maxlen / 2:
rate = sum(self.rates) / len(self.rates)
rate_type = 'a'
else:
rate = i_rate
rate_type = 'i'
msg = '{}: Compressed: {} Mb. Downloaded, Uncompressed: {:6.2f} Mb, {:5.2f} Mb / s ({})'\
.format(
self.message, int(int(n) / (1024 * 1024)),
round(float(i) / (1024. * 1024.), 2),
round(float(rate) / (1024 * 1024), 2), rate_type)
self.printf(msg)
# http://stackoverflow.com/a/1695250
# >>> Numbers = enum('ZERO', 'ONE', TWO = 20, THREE = 30)
# >>> print Numbers.ONE
# >>> print Numbers.THREE
def enum(*sequential, **named):
enums = dict(list(zip(sequential, list(six_xrange(len(sequential))))), **named)
return type('Enum', (), enums)
class Constant:
"""Organizes constants in a class."""
class ConstError(TypeError):
pass
def __setattr__(self, name, value):
if name in self.__dict__:
raise self.ConstError("Can't rebind const(%s)" % name)
self.__dict__[name] = value
def count_open_fds():
"""return the number of open file descriptors for current process.
.. warning: will only work on UNIX-like os-es.
http://stackoverflow.com/a/7142094
"""
pid = os.getpid()
procs = subprocess.check_output(
['lsof', '-w', '-Ff', '-p', str(pid)])
nprocs = len(
[s for s in procs.split('\n') if s and s[0] == 'f' and s[1:].isdigit()]
)
return nprocs
def parse_url_to_dict(url):
"""Parse a url and return a dict with keys for all of the parts.
The urlparse function() returns a wacky combination of a namedtuple
with properties.
"""
p = urlparse(url)
return {
'scheme': p.scheme,
'netloc': p.netloc,
'path': p.path,
'params': p.params,
'query': p.query,
'fragment': p.fragment,
'username': p.username,
'password': p.password,
'hostname': p.hostname,
'port': p.port
}
def unparse_url_dict(d):
if 'hostname' in d and d['hostname']:
host_port = d['hostname']
else:
host_port = ''
if 'port' in d and d['port']:
host_port += ':' + str(d['port'])
user_pass = ''
if 'username' in d and d['username']:
user_pass += d['username']
if 'password' in d and d['password']:
user_pass += ':' + d['password']
if user_pass:
host_port = '{}@{}'.format(user_pass, host_port)
url = '{}://{}/{}'.format(d.get('scheme', 'http'),
host_port, d.get('path', '').lstrip('/'))
if 'query' in d and d['query']:
url += '?' + d['query']
return url
def set_url_part(url, **kwargs):
"""Change one or more parts of a URL"""
d = parse_url_to_dict(url)
d.update(kwargs)
return unparse_url_dict(d)
def filter_url(url, **kwargs):
"""filter a URL by returning a URL with only the parts specified in the keywords"""
d = parse_url_to_dict(url)
d.update(kwargs)
return unparse_url_dict({k: v for k, v in list(d.items()) if v})
def select_from_url(url, key):
d = parse_url_to_dict(url)
return d.get(key)
def normalize_newlines(string):
"""Convert \r\n or \r to \n."""
return re.sub(r'(\r\n|\r|\n)', '\n', string)
def print_yaml(o):
"""Pretty print an object as YAML."""
print(yaml.dump(o, default_flow_style=False, indent=4, encoding='utf-8'))
def qualified_class_name(o):
"""Full name of an object, including the module"""
module = o.__class__.__module__
if module is None or module == str.__class__.__module__:
return o.__class__.__name__
return module + '.' + o.__class__.__name__
def qualified_name(cls):
"""Full name of a class, including the module. Like qualified_class_name, but when you already have a class """
module = cls.__module__
if module is None or module == str.__class__.__module__:
return cls.__name__
return module + '.' + cls.__name__
def qualified_name_import(cls):
"""Full name of a class, including the module. Like qualified_class_name, but when you already have a class """
parts = qualified_name(cls).split('.')
return "from {} import {}".format('.'.join(parts[:-1]), parts[-1])
class _Getch:
"""Gets a single character from standard input. Does not echo to the
screen."""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
#from http://code.activestate.com/recipes/134892/
class _GetchUnix:
def __init__(self):
import tty, sys
def __call__(self):
import sys, tty, termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
# Originally was raw mode, not cbreak, but raw screws up printing.
tty.setcbreak(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
getch = _Getch()
def scrape(library, url, as_html=False):
if url.startswith('s3:'):
s3 = library.filesystem.s3(url)
return scrape_s3(url, s3, as_html=as_html)
else:
return scrape_urls_from_web_page(url)
def scrape_s3(root_url, s3, as_html=False):
from os.path import join
d = dict(external_documentation={}, sources={}, links={})
for f in s3.walkfiles('/'):
if as_html:
try:
url, _ = s3.getpathurl(f).split('?', 1)
except ValueError:
url = s3.getpathurl(f)
else:
url = join(root_url, f.strip('/'))
fn = f.strip('/')
d['sources'][fn] = dict(url=url, description='', title=fn)
return d
def scrape_urls_from_web_page(page_url):
parts = list(urlsplit(page_url))
parts[2] = ''
root_url = urlunsplit(parts)
html_page = urlopen(page_url)
soup = BeautifulSoup(html_page)
d = dict(external_documentation={}, sources={}, links={})
for link in soup.findAll('a'):
if not link:
continue
if link.string:
text = str(link.string.encode('ascii', 'ignore'))
else:
text = 'None'
url = link.get('href')
if not url:
continue
if 'javascript' in url:
continue
if url.startswith('http'):
pass
elif url.startswith('/'):
url = os.path.join(root_url, url)
else:
url = os.path.join(page_url, url)
base = os.path.basename(url)
if '#' in base:
continue
try:
fn, ext = base.split('.', 1)
except ValueError:
fn = base
ext = ''
try: # Yaml adds a lot of junk to encode unicode. # FIXME. SHould use safe_dump instead
fn = str(fn)
url = str(url)
text = str(text)
except UnicodeDecodeError:
pass
# xlsm is a bug that adss 'm' to the end of the url. No idea.
if ext.lower() in ('zip', 'csv', 'xls', 'xlsx', 'xlsm', 'txt'):
d['sources'][fn] = dict(url=url, description=text)
elif ext.lower() in ('pdf', 'html'):
d['external_documentation'][fn] = dict(url=url, description=text, title=text)
else:
d['links'][text] = dict(url=url, description=text, title=text)
return d
def drop_empty(rows):
"""Transpose the columns into rows, remove all of the rows that are empty after the first cell, then
transpose back. The result is that columns that have a header but no data in the body are removed, assuming
the header is the first row. """
return zip(*[col for col in zip(*rows) if bool(filter(bool, col[1:]))])
# http://stackoverflow.com/a/20577580
def dequote(s):
"""
If a string has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the string unchanged.
"""
if (s[0] == s[-1]) and s.startswith(("'", '"')):
return s[1:-1]
return s
def pretty_time(s, granularity=3):
"""Pretty print time in seconds. COnverts the input time in seconds into a string with
interval names, such as days, hours and minutes
From:
http://stackoverflow.com/a/24542445/1144479
"""
intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
def display_time(seconds, granularity=granularity):
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append('{} {}'.format(int(value), name))
return ', '.join(result[:granularity])
return display_time(s, granularity)
# From: http://code.activestate.com/recipes/391367-deprecated/
def deprecated(func):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emmitted
when the function is used."""
def newFunc(*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning)
return func(*args, **kwargs)
newFunc.__name__ = func.__name__
newFunc.__doc__ = func.__doc__
newFunc.__dict__.update(func.__dict__)
return newFunc
def int_maybe(v):
"""Try to convert to an int and return None on failure"""
try:
return int(v)
except (TypeError, ValueError):
return None
def random_string(length):
import random
import string
return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.ascii_uppercase + string.digits)
for _ in range(length))
# From: http://code.activestate.com/recipes/496741-object-proxying/
class Proxy(object):
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
#
# proxying (special cases)
#
def __getattribute__(self, name):
return getattr(object.__getattribute__(self, "_obj"), name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_obj"), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_obj"), name, value)
def __nonzero__(self):
return bool(object.__getattribute__(self, "_obj"))
def __str__(self):
return str(object.__getattribute__(self, "_obj"))
def __repr__(self):
return repr(object.__getattribute__(self, "_obj"))
#
# factories
#
_special_names = [
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__',
'__rand__', '__rdiv__', '__rdivmod__', '__reduce__', '__reduce_ex__',
'__repr__', '__reversed__', '__rfloorfiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__sub__',
'__truediv__', '__xor__', 'next',
]
@classmethod
def _create_class_proxy(cls, theclass):
"""creates a proxy for the given class"""
def make_method(name):
def method(self, *args, **kw):
return getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw)
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = object.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
def delete_module(modname, paranoid=None):
""" Delete a module.http://stackoverflow.com/a/1668289
:param modname:
:param paranoid:
:return:
"""
from sys import modules
try:
thismod = modules[modname]
except KeyError:
raise ValueError(modname)
these_symbols = dir(thismod)
if paranoid:
try:
paranoid[:] # sequence support
except:
raise ValueError('must supply a finite list for paranoid')
else:
these_symbols = paranoid[:]
del modules[modname]
for mod in modules.values():
try:
delattr(mod, modname)
except AttributeError:
pass
if paranoid:
for symbol in these_symbols:
if symbol[:2] == '__': # ignore special symbols
continue
try:
delattr(mod, symbol)
except AttributeError:
pass
def flatten(d, sep=None):
"""Flatten a datastructure composed of dicts, sequences and scalars. If sep is None,
the key is a tuple of key path comonents. """
def _flatten(e, parent_key):
import collections
if isinstance(e, collections.MutableMapping):
return tuple((parent_key + k2, v2) for k, v in e.items() for k2, v2 in _flatten(v, (k,)))
elif isinstance(e, collections.MutableSequence):
return tuple((parent_key + k2, v2) for i, v in enumerate(e) for k2, v2 in _flatten(v, (i,)))
else:
return (parent_key, (e,)),
return tuple((k if sep is None else sep.join(str(e) for e in k), v[0])
for k, v in _flatten(d, tuple()))
|
CivicKnowledge/ambry
|
ambry/util/__init__.py
|
Python
|
bsd-2-clause
| 39,947
|
import os
from setuptools import find_packages, setup
root = os.path.dirname(os.path.realpath(__file__))
long_description = open(os.path.join(root, 'README.rst')).read()
setup(
name='range-regex',
version='1.0.3',
description='Python numeric range regular expression generator',
long_description=long_description,
url='http://github.com/dimka665/range-regex',
author='Dmitry Voronin',
author_email='dimka665@gmail.com',
license='BSD',
# packages=['range_regex'],
packages=find_packages(),
include_package_data=True,
keywords='numeric range regex regular expression generator',
)
|
dimka665/range-regex
|
setup.py
|
Python
|
bsd-2-clause
| 628
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2018-03-11 17:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('board', '0006_merge_20180311_1702'),
]
operations = [
migrations.CreateModel(
name='MainPoster',
fields=[
('basepost_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='board.BasePost')),
('title', models.CharField(max_length=128, verbose_name='제목')),
('title_ko', models.CharField(max_length=128, null=True, verbose_name='제목')),
('title_en', models.CharField(max_length=128, null=True, verbose_name='제목')),
('image', models.ImageField(upload_to='banner', verbose_name='이미지')),
],
options={
'verbose_name': '메인포스터',
'verbose_name_plural': '메인포스터(들)',
},
bases=('board.basepost',),
),
migrations.AlterModelOptions(
name='boardbanner',
options={'verbose_name': '게시판 배너', 'verbose_name_plural': '게시판 배너(들)'},
),
migrations.AlterField(
model_name='board',
name='role',
field=models.CharField(choices=[('DEFAULT', '기본'), ('PROJECT', '사업'), ('PLANBOOK', '정책자료집'), ('DEBATE', '논의'), ('ARCHIVING', '아카이빙'), ('WORKHOUR', '상근관리'), ('SPONSOR', '제휴리스트'), ('SWIPER', '격주보고'), ('STORE', '상점'), ('CONTACT', '산하기구')], default='DEFAULT', max_length=32, verbose_name='보드 역할'),
),
]
|
hangpark/kaistusc
|
apps/board/migrations/0007_auto_20180311_1704.py
|
Python
|
bsd-2-clause
| 1,864
|
# Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from .arch import *
|
programa-stic/barf-project
|
barf/arch/__init__.py
|
Python
|
bsd-2-clause
| 1,406
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 David Shah <dave@ds0.me>
# Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import Pins, Subsignal, IOStandard, Misc
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk300", 0,
Subsignal("n", Pins("AY38"), IOStandard("DIFF_SSTL12")),
Subsignal("p", Pins("AY37"), IOStandard("DIFF_SSTL12")),
),
("clk300", 1,
Subsignal("n", Pins("AW19"), IOStandard("DIFF_SSTL12")),
Subsignal("p", Pins("AW20"), IOStandard("DIFF_SSTL12")),
),
("clk300", 2,
Subsignal("n", Pins("E32"), IOStandard("DIFF_SSTL12")),
Subsignal("p", Pins("F32"), IOStandard("DIFF_SSTL12")),
),
("clk300", 3,
Subsignal("n", Pins("H16"), IOStandard("DIFF_SSTL12")),
Subsignal("p", Pins("J16"), IOStandard("DIFF_SSTL12")),
),
# Leds
("user_led", 0, Pins("BC21"), IOStandard("LVCMOS12")),
("user_led", 1, Pins("BB21"), IOStandard("LVCMOS12")),
("user_led", 2, Pins("BA20"), IOStandard("LVCMOS12")),
# Serial
("serial", 0,
Subsignal("rx", Pins("BF18"), IOStandard("LVCMOS12")),
Subsignal("tx", Pins("BB20"), IOStandard("LVCMOS12")),
),
# PCIe
("pcie_x2", 0,
Subsignal("rst_n", Pins("BD21"), IOStandard("LVCMOS12")),
Subsignal("clk_n", Pins("AM10")),
Subsignal("clk_p", Pins("AM11")),
Subsignal("rx_n", Pins("AF1 AG3")),
Subsignal("rx_p", Pins("AF2 AG4")),
Subsignal("tx_n", Pins("AF6 AG8")),
Subsignal("tx_p", Pins("AF7 AG9")),
),
("pcie_x4", 0,
Subsignal("rst_n", Pins("BD21"), IOStandard("LVCMOS12")),
Subsignal("clk_n", Pins("AM10")),
Subsignal("clk_p", Pins("AM11")),
Subsignal("rx_n", Pins("AF1 AG3 AH1 AJ3")),
Subsignal("rx_p", Pins("AF2 AG4 AH2 AJ4")),
Subsignal("tx_n", Pins("AF6 AG8 AH6 AJ8")),
Subsignal("tx_p", Pins("AF7 AG9 AH7 AJ9")),
),
("pcie_x8", 0,
Subsignal("rst_n", Pins("BD21"), IOStandard("LVCMOS12")),
Subsignal("clk_n", Pins("AM10")),
Subsignal("clk_p", Pins("AM11")),
Subsignal("rx_n", Pins("AF1 AG3 AH1 AJ3 AK1 AL3 AM1 AN3")),
Subsignal("rx_p", Pins("AF2 AG4 AH2 AJ4 AK2 AL4 AM2 AN4")),
Subsignal("tx_n", Pins("AF6 AG8 AH6 AJ8 AK6 AL8 AM6 AN8")),
Subsignal("tx_p", Pins("AF7 AG9 AH7 AJ9 AK7 AL9 AM7 AN9")),
),
("pcie_x16", 0,
Subsignal("rst_n", Pins("BD21"), IOStandard("LVCMOS12")),
Subsignal("clk_n", Pins("AM10")),
Subsignal("clk_p", Pins("AM11")),
Subsignal("rx_n", Pins("AF1 AG3 AH1 AJ3 AK1 AL3 AM1 AN3 AP1 AR3 AT1 AU3 AV1 AW3 BA1 BC1")),
Subsignal("rx_p", Pins("AF2 AG4 AH2 AJ4 AK2 AL4 AM2 AN4 AP2 AR4 AT2 AU4 AV2 AW4 BA2 BC2")),
Subsignal("tx_n", Pins("AF6 AG8 AH6 AJ8 AK6 AL8 AM6 AN8 AP6 AR8 AT6 AU8 AV6 BB4 BD4 BF4")),
Subsignal("tx_p", Pins("AF7 AG9 AH7 AJ9 AK7 AL9 AM7 AN9 AP7 AR9 AT7 AU9 AV7 BB5 BD5 BF5")),
),
# DDR4 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"AT36 AV36 AV37 AW35 AW36 AY36 AY35 BA40",
"BA37 BB37 AR35 BA39 BB40 AN36"),
IOStandard("SSTL12_DCI")),
Subsignal("act_n", Pins("BB39"), IOStandard("SSTL12_DCI")),
Subsignal("ba", Pins("AT35 AT34"), IOStandard("SSTL12_DCI")),
Subsignal("bg", Pins("BC37 BC39"), IOStandard("SSTL12_DCI")),
Subsignal("ras_n", Pins("AR36"), IOStandard("SSTL12_DCI")), # A16
Subsignal("cas_n", Pins("AP36"), IOStandard("SSTL12_DCI")), # A15
Subsignal("we_n", Pins("AP35"), IOStandard("SSTL12_DCI")), # A14
Subsignal("cke", Pins("BC38"), IOStandard("SSTL12_DCI")),
Subsignal("clk_n", Pins("AW38"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("clk_p", Pins("AV38"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("cs_n", Pins("AR33"), IOStandard("SSTL12_DCI")),
Subsignal("dm", Pins("BC31 AY27 BB26 BD26 AP30 BF39 AR30 BA32"),
IOStandard("POD12_DCI")),
Subsignal("dq", Pins(
"BB31 BB32 AY33 AY32 BC33 BC32 BB34 BC34",
"AT28 AT27 AU27 AV27 AV28 AV29 AW30 AY30",
"BA28 BA27 AW28 AW29 BC27 BB27 BA29 BB29",
"BE28 BF28 BE30 BD30 BF27 BE27 BF29 BF30",
"AT32 AU32 AM30 AL30 AR31 AN31 AR32 AN32",
"BD40 BD39 BF42 BF43 BF41 BE40 BE37 BF37",
"AM27 AN27 AP28 AP29 AM29 AN29 AR28 AR27",
"AW34 AV32 AV31 AV34 BA35 BA34 AW31 AY31"),
IOStandard("POD12_DCI"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_n", Pins("BB36 AU30 BB30 BD29 AM32 BF38 AL29 AW33"),
IOStandard("DIFF_POD12"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_p", Pins("BB35 AU29 BA30 BD28 AM31 BE38 AL28 AV33"),
IOStandard("DIFF_POD12"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("odt", Pins("AP34"), IOStandard("SSTL12_DCI")),
Subsignal("reset_n", Pins("AU31"), IOStandard("LVCMOS12")),
Misc("SLEW=FAST")
),
("ddram", 1,
Subsignal("a", Pins(
"AN24 AT24 AW24 AN26 AY22 AY23 AV24 BA22",
"AY25 BA23 AM26 BA25 BB22 AL24"),
IOStandard("SSTL12_DCI")),
Subsignal("act_n", Pins("AW25"), IOStandard("SSTL12_DCI")),
Subsignal("ba", Pins("AU24 AP26"), IOStandard("SSTL12_DCI")),
Subsignal("bg", Pins("BC22 AW26"), IOStandard("SSTL12_DCI")),
Subsignal("ras_n", Pins("AN23"), IOStandard("SSTL12_DCI")), # A16
Subsignal("cas_n", Pins("AM25"), IOStandard("SSTL12_DCI")), # A15
Subsignal("we_n", Pins("AL25"), IOStandard("SSTL12_DCI")), # A14
Subsignal("cke", Pins("BB25"), IOStandard("SSTL12_DCI")),
Subsignal("clk_n", Pins("AU25"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("clk_p", Pins("AT25"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("cs_n", Pins("AV23"), IOStandard("SSTL12_DCI")),
Subsignal("dm", Pins("BE8 BE15 BE22 BA10 AY13 BB14 AN14 AW16"),
IOStandard("POD12_DCI")),
Subsignal("dq", Pins(
" BC7 BD7 BD8 BD9 BF7 BE7 BD10 BE10",
"BF12 BE13 BD14 BD13 BF14 BF13 BD16 BD15",
"BF25 BE25 BF24 BD25 BC23 BD23 BF23 BE23",
"BA14 BA13 BA12 BB12 BC9 BB9 BA7 BA8",
"AU13 AW14 AW13 AV13 AU14 BA11 AY11 AV14",
"BA18 BA17 AY18 AY17 BD11 BC11 BA15 BB15",
"AR13 AP13 AN13 AM13 AT15 AR15 AM14 AL14",
"AV16 AV17 AU17 AU16 BB17 BB16 AT17 AT18"),
IOStandard("POD12_DCI"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_n", Pins("BF9 BE11 BD24 BB10 AY15 BC12 AT13 AW18"),
IOStandard("DIFF_POD12"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_p", Pins("BF10 BE12 BC24 BB11 AW15 BC13 AT14 AV18"),
IOStandard("DIFF_POD12"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("odt", Pins("AW23"), IOStandard("SSTL12_DCI")),
Subsignal("reset_n", Pins("AR17"), IOStandard("LVCMOS12")),
Misc("SLEW=FAST")
),
("ddram", 2,
Subsignal("a", Pins(
"L29 A33 C33 J29 H31 G31 C32 B32",
"A32 D31 A34 E31 M30 F33"),
IOStandard("SSTL12_DCI")),
Subsignal("act_n", Pins("B31"), IOStandard("SSTL12_DCI")),
Subsignal("ba", Pins("D33 B36"), IOStandard("SSTL12_DCI")),
Subsignal("bg", Pins("C31 J30"), IOStandard("SSTL12_DCI")),
Subsignal("ras_n", Pins("K30"), IOStandard("SSTL12_DCI")), # A16
Subsignal("cas_n", Pins("G32"), IOStandard("SSTL12_DCI")), # A15
Subsignal("we_n", Pins("A35"), IOStandard("SSTL12_DCI")), # A14
Subsignal("cke", Pins("G30"), IOStandard("SSTL12_DCI")),
Subsignal("clk_n", Pins("B34"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("clk_p", Pins("C34"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("cs_n", Pins("B35"), IOStandard("SSTL12_DCI")),
Subsignal("dm", Pins("T30 M27 R28 H26 C37 H33 G37 M34"),
IOStandard("POD12_DCI")),
Subsignal("dq", Pins(
"P29 P30 R30 N29 N32 M32 P31 L32",
"H29 G29 J28 H28 K27 L27 K26 K25",
"P25 R25 L25 M25 P26 R26 N27 N28",
"F27 D28 E27 E28 G26 F29 G27 F28",
"A38 A37 B37 C36 B40 C39 A40 D39",
"G36 H36 H37 J36 G34 G35 K37 K38",
"E38 D38 E35 F35 E36 E37 F38 G38",
"K35 J35 K33 L33 J33 J34 N34 P34"),
IOStandard("POD12_DCI"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_n", Pins("M31 J26 M26 D30 A39 H38 E40 L36"),
IOStandard("DIFF_POD12"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_p", Pins("N31 J25 N26 D29 B39 J38 E39 L35"),
IOStandard("DIFF_POD12"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("odt", Pins("E33"), IOStandard("SSTL12_DCI")),
Subsignal("reset_n", Pins("D36"), IOStandard("LVCMOS12")),
Misc("SLEW=FAST")
),
("ddram", 3,
Subsignal("a", Pins(
"K15 B15 F14 A15 C14 A14 B14 E13",
"F13 A13 D14 C13 B13 K16"),
IOStandard("SSTL12_DCI")),
Subsignal("act_n", Pins("H13"), IOStandard("SSTL12_DCI")),
Subsignal("ba", Pins("J15 H14"), IOStandard("SSTL12_DCI")),
Subsignal("bg", Pins("D13 J13"), IOStandard("SSTL12_DCI")),
Subsignal("ras_n", Pins("F15"), IOStandard("SSTL12_DCI")), # A16
Subsignal("cas_n", Pins("E15"), IOStandard("SSTL12_DCI")), # A15
Subsignal("cke", Pins("K13"), IOStandard("SSTL12_DCI")), # A14
Subsignal("we_n", Pins("D15"), IOStandard("SSTL12_DCI")),
Subsignal("clk_n", Pins("L13"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("clk_p", Pins("L14"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("cs_n", Pins("B16"), IOStandard("SSTL12_DCI")),
Subsignal("dm", Pins("T13 N17 D24 B19 H19 H23 M22 N22"),
IOStandard("POD12_DCI")),
Subsignal("dq", Pins(
"M16 N16 N14 N13 R15 T15 P13 P14",
"R17 R18 M20 M19 N18 N19 R20 T20",
"B24 A23 A22 B25 C24 C23 C22 B22",
"C18 C19 C21 B21 A17 A18 B20 A20",
"E18 E17 E20 F20 D19 H18 D20 J18",
"G21 E22 G22 F22 G25 F24 E25 F25",
"J24 G24 J23 H24 L23 K21 L24 K22",
"P24 N24 R23 T24 N23 P21 P23 R21"),
IOStandard("POD12_DCI"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_n", Pins("P15 P18 A24 B17 F17 E23 H21 R22"),
IOStandard("DIFF_POD12"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_p", Pins("R16 P19 A25 C17 F18 F23 J21 T22"),
IOStandard("DIFF_POD12"),
Misc("OUTPUT_IMPEDANCE=RDRV_40_40"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("odt", Pins("C16"), IOStandard("SSTL12_DCI")),
Subsignal("reset_n", Pins("D21"), IOStandard("LVCMOS12")),
Misc("SLEW=FAST")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = []
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk300"
default_clk_period = 1e9/300e6
def __init__(self, toolchain="vivado"):
XilinxPlatform.__init__(self, "xcvu9p-fsgd2104-2l-e", _io, _connectors, toolchain=toolchain)
def create_programmer(self):
return VivadoProgrammer()
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
# For passively cooled boards, overheating is a significant risk if airflow isn't sufficient
self.add_platform_command("set_property BITSTREAM.CONFIG.OVERTEMPSHUTDOWN ENABLE [current_design]")
# Reduce programming time
self.add_platform_command("set_property BITSTREAM.GENERAL.COMPRESS TRUE [current_design]")
# DDR4 memory channel C0 Clock constraint / Internal Vref
self.add_period_constraint(self.lookup_request("clk300", 0, loose=True), 1e9/300e6)
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 40]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 41]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 42]")
# DDR4 memory channel C1 Clock constraint / Internal Vref
self.add_period_constraint(self.lookup_request("clk300", 1, loose=True), 1e9/300e6)
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 65]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 66]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 67]")
# DDR4 memory channel C2 Clock constraint / Internal Vref
self.add_period_constraint(self.lookup_request("clk300", 2, loose=True), 1e9/300e6)
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 46]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 47]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 48]")
# DDR4 memory channel C3 Clock constraint / Internal Vref
self.add_period_constraint(self.lookup_request("clk300", 3, loose=True), 1e9/300e6)
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 70]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 71]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 72]")
|
litex-hub/litex-boards
|
litex_boards/platforms/sqrl_xcu1525.py
|
Python
|
bsd-2-clause
| 14,980
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests_cache.backends.base
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains BaseCache class which can be used as in-memory cache backend or
extended to support persistence.
"""
from datetime import datetime
import hashlib
from copy import copy
from io import BytesIO
import requests
from ..compat import is_py2, urlencode, urlparse, urlunparse, parse_qsl
_DEFAULT_HEADERS = requests.utils.default_headers()
class BaseCache(object):
""" Base class for cache implementations, can be used as in-memory cache.
To extend it you can provide dictionary-like objects for
:attr:`keys_map` and :attr:`responses` or override public methods.
"""
def __init__(self, *args, **kwargs):
#: `key` -> `key_in_responses` mapping
self.keys_map = {}
#: `key_in_cache` -> `response` mapping
self.responses = {}
self._include_get_headers = kwargs.get("include_get_headers", False)
self._ignored_parameters = set(kwargs.get("ignored_parameters") or [])
def save_response(self, key, response):
""" Save response to cache
:param key: key for this response
:param response: response to save
.. note:: Response is reduced before saving (with :meth:`reduce_response`)
to make it picklable
"""
self.responses[key] = self.reduce_response(response), datetime.utcnow()
def add_key_mapping(self, new_key, key_to_response):
"""
Adds mapping of `new_key` to `key_to_response` to make it possible to
associate many keys with single response
:param new_key: new key (e.g. url from redirect)
:param key_to_response: key which can be found in :attr:`responses`
:return:
"""
self.keys_map[new_key] = key_to_response
def get_response_and_time(self, key, default=(None, None)):
""" Retrieves response and timestamp for `key` if it's stored in cache,
otherwise returns `default`
:param key: key of resource
:param default: return this if `key` not found in cache
:returns: tuple (response, datetime)
.. note:: Response is restored after unpickling with :meth:`restore_response`
"""
try:
if key not in self.responses:
key = self.keys_map[key]
response, timestamp = self.responses[key]
except KeyError:
return default
return self.restore_response(response), timestamp
def delete(self, key):
""" Delete `key` from cache. Also deletes all responses from response history
"""
try:
if key in self.responses:
response, _ = self.responses[key]
del self.responses[key]
else:
response, _ = self.responses[self.keys_map[key]]
del self.keys_map[key]
for r in response.history:
del self.keys_map[self.create_key(r.request)]
except KeyError:
pass
def delete_url(self, url):
""" Delete response associated with `url` from cache.
Also deletes all responses from response history. Works only for GET requests
"""
self.delete(self._url_to_key(url))
def clear(self):
""" Clear cache
"""
self.responses.clear()
self.keys_map.clear()
def has_key(self, key):
""" Returns `True` if cache has `key`, `False` otherwise
"""
return key in self.responses or key in self.keys_map
def has_url(self, url):
""" Returns `True` if cache has `url`, `False` otherwise.
Works only for GET request urls
"""
return self.has_key(self._url_to_key(url))
def _url_to_key(self, url):
session = requests.Session()
return self.create_key(session.prepare_request(requests.Request('GET', url)))
_response_attrs = ['_content', 'url', 'status_code', 'cookies',
'headers', 'encoding', 'request', 'reason', 'raw']
_raw_response_attrs = ['_original_response', 'decode_content', 'headers',
'reason', 'status', 'strict', 'version']
def reduce_response(self, response, seen=None):
""" Reduce response object to make it compatible with ``pickle``
"""
if seen is None:
seen = {}
try:
return seen[id(response)]
except KeyError:
pass
result = _Store()
# prefetch
response.content
for field in self._response_attrs:
setattr(result, field, self._picklable_field(response, field))
seen[id(response)] = result
result.history = tuple(self.reduce_response(r, seen) for r in response.history)
return result
def _picklable_field(self, response, name):
value = getattr(response, name)
if name == 'request':
value = copy(value)
value.hooks = []
elif name == 'raw':
result = _RawStore()
for field in self._raw_response_attrs:
setattr(result, field, getattr(value, field, None))
if result._original_response is not None:
setattr(result._original_response, "fp", None) # _io.BufferedReader is not picklable
value = result
return value
def restore_response(self, response, seen=None):
""" Restore response object after unpickling
"""
if seen is None:
seen = {}
try:
return seen[id(response)]
except KeyError:
pass
result = requests.Response()
for field in self._response_attrs:
setattr(result, field, getattr(response, field, None))
result.raw._cached_content_ = result.content
seen[id(response)] = result
result.history = tuple(self.restore_response(r, seen) for r in response.history)
return result
def _remove_ignored_parameters(self, request):
def filter_ignored_parameters(data):
return [(k, v) for k, v in data if k not in self._ignored_parameters]
url = urlparse(request.url)
query = parse_qsl(url.query)
query = filter_ignored_parameters(query)
query = urlencode(query)
url = urlunparse((url.scheme, url.netloc, url.path, url.params, query, url.fragment))
body = request.body
content_type = request.headers.get('content-type')
if body and content_type:
if content_type == 'application/x-www-form-urlencoded':
body = parse_qsl(body)
body = filter_ignored_parameters(body)
body = urlencode(body)
elif content_type == 'application/json':
import json
body = json.loads(body)
body = filter_ignored_parameters(sorted(body.items()))
body = json.dumps(body)
return url, body
def create_key(self, request):
if self._ignored_parameters:
url, body = self._remove_ignored_parameters(request)
else:
url, body = request.url, request.body
key = hashlib.sha256()
key.update(_to_bytes(request.method.upper()))
key.update(_to_bytes(url))
if request.body:
key.update(_to_bytes(body))
else:
if self._include_get_headers and request.headers != _DEFAULT_HEADERS:
for name, value in sorted(request.headers.items()):
key.update(_to_bytes(name))
key.update(_to_bytes(value))
return key.hexdigest()
def __str__(self):
return 'keys: %s\nresponses: %s' % (self.keys_map, self.responses)
# used for saving response attributes
class _Store(object):
pass
class _RawStore(object):
# noop for cached response
def release_conn(self):
pass
# for streaming requests support
def read(self, chunk_size=1):
if not hasattr(self, "_io_with_content_"):
self._io_with_content_ = BytesIO(self._cached_content_)
return self._io_with_content_.read(chunk_size)
def _to_bytes(s, encoding='utf-8'):
if is_py2 or isinstance(s, bytes):
return s
return bytes(s, encoding)
|
YetAnotherNerd/requests-cache
|
requests_cache/backends/base.py
|
Python
|
bsd-2-clause
| 8,344
|
from educollections import ArrayList
def print_list_state(lst):
print('Size is', lst.size())
print('Contents are', lst)
print()
arr = ArrayList(10)
print('Capacity is', arr.capacity())
print_list_state(arr)
for i in range(10):
print('Prepend', i)
arr.prepend(i)
print_list_state(arr)
for i in range(10):
print('Item at index', i, 'is', arr.get(i))
print_list_state(arr)
for i in range(10):
print('Assign index', i, 'with', 10 - i)
arr.set(i, 10 - i)
print_list_state(arr)
arr.clear()
print_list_state(arr)
for i in range(10):
print('Append', i)
arr.append(i)
print_list_state(arr)
for i in [9, 4, 1, 6, 3, 0, 0, 0, 1, 0]:
item = arr.remove(i);
print('Removed', item, 'from index', i)
print_list_state(arr)
arr.clear()
print_list_state(arr)
for i in range(5):
print('Append', i)
arr.append(i)
print_list_state(arr)
for i in [2, 3, 0, 7, 8]:
print('Insert', i + 10, 'at index', i)
arr.insert(i, i + 10)
print_list_state(arr)
|
nkraft/educollections
|
test_educollections.py
|
Python
|
bsd-2-clause
| 1,009
|
"""Sections variable used for grouping Gjoneska 2015 GO IDs."""
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
SECTIONS = [ # 18 sections
("immune", [ # 15 GO-headers
"GO:0002376", # BP 564 L01 D01 M immune system process
"GO:0002682", # BP 1,183 L02 D02 AB regulation of immune system process
"GO:0030155", # BP 246 L02 D02 AB regulation of cell adhesion
"GO:0006955", # BP 100 L02 D02 GM immune response
"GO:0001817", # BP 476 L03 D03 AB regulation of cytokine production
"GO:0001775", # BP 162 L03 D03 CD cell activation
"GO:0001816", # BP 110 L03 D03 DK cytokine production
"GO:1903037", # BP 155 L04 D04 AB regulation of leukocyte cell-cell adhesion
"GO:0034097", # BP 59 L04 D04 G response to cytokine
"GO:0006954", # BP 25 L04 D04 G inflammatory response
"GO:0045087", # BP 25 L03 D04 GM innate immune response
"GO:0002521", # BP 72 L05 D05 CDF leukocyte differentiation
"GO:0007229", # BP 0 L05 D05 AB integrin-mediated signaling pathway
"GO:0050900", # BP 57 L02 D06 CDMN leukocyte migration
"GO:0042130", # BP 9 L07 D08 AB negative regulation of T cell proliferation
#"GO:0002252", # BP 138 L02 D02 L immune effector process
]),
("viral/bacteria", [ # 4 GO-headers
"GO:0016032", # BP 301 L03 D04 CJ viral process
"GO:0050792", # BP 119 L03 D04 AB regulation of viral process
"GO:0098542", # BP 37 L03 D05 GJ defense response to other organism
"GO:0009617", # BP 12 L03 D05 GJ response to bacterium
]),
("neuro", [ # 25 GO-headers
"GO:0099531", # BP 32 L01 D01 U presynaptic process in chemical synaptic Xmission
"GO:0042391", # BP 117 L03 D03 A regulation of membrane potential
"GO:0050877", # BP 96 L03 D03 K neurological system process
"GO:0050808", # BP 20 L03 D03 CDI synapse organization
"GO:0007272", # BP 13 L03 D03 CD ensheathment of neurons
"GO:0051960", # BP 236 L04 D04 AB regulation of nervous system development
"GO:0050804", # BP 120 L03 D04 AB modulation of synaptic transmission
"GO:0097485", # BP 34 L04 D04 CD neuron projection guidance
"GO:0031644", # BP 30 L04 D04 AB regulation of neurological system process
"GO:0031175", # BP 14 L04 D04 CDI neuron projection development
"GO:0035418", # BP 14 L04 D04 H protein localization to synapse
"GO:0007399", # BP 0 L04 D04 F nervous system development
"GO:0050767", # BP 192 L05 D05 AB regulation of neurogenesis
"GO:0030182", # BP 71 L05 D05 CDF neuron differentiation
"GO:0099536", # BP 40 L04 D05 CDR synaptic signaling
"GO:0048666", # BP 29 L04 D05 CDF neuron development
"GO:0010001", # BP 17 L05 D05 CDF glial cell differentiation
"GO:0051969", # BP 5 L03 D05 AB regulation of transmission of nerve impulse
"GO:0022008", # BP 3 L05 D05 CDF neurogenesis
"GO:0007158", # BP 0 L04 D05 DP neuron cell-cell adhesion
"GO:0014002", # BP 1 L05 D06 CDF astrocyte development
"GO:0048812", # BP 27 L05 D07 CDFI neuron projection morphogenesis
"GO:0048667", # BP 6 L06 D07 CDFI cell morphogenesis involved in neuron differen.
"GO:0072578", # BP 5 L05 D07 CDHI neurotransmitter-gated ion channel clustering
"GO:0007409", # BP 23 L06 D08 CDFI axonogenesis
]),
("cell death", [ # 6 GO-headers
"GO:0010941", # BP 316 L03 D03 AB regulation of cell death
"GO:0008219", # BP 104 L03 D03 CD cell death
"GO:0060548", # BP 103 L04 D04 AB negative regulation of cell death
"GO:0097190", # BP 22 L04 D04 AB apoptotic signaling pathway
"GO:0097527", # BP 0 L04 D04 AB necroptotic signaling pathway
"GO:0008637", # BP 7 L05 D05 CI apoptotic mitochondrial changes
]),
("lipid", [ # 7 GO-headers
"GO:0006629", # BP 623 L03 D03 DE lipid metabolic process
"GO:0019216", # BP 243 L04 D04 AB regulation of lipid metabolic process
"GO:0032368", # BP 130 L04 D04 AB regulation of lipid transport
"GO:0033993", # BP 112 L04 D04 G response to lipid
"GO:0006869", # BP 93 L04 D05 DH lipid transport
"GO:0055088", # BP 10 L05 D05 A lipid homeostasis
"GO:0042158", # BP 3 L05 D06 CE lipoprotein biosynthetic process
]),
("adhesion", [ # 3 GO-headers
"GO:0022610", # BP 194 L01 D01 P biological adhesion
"GO:0030155", # BP 246 L02 D02 AB regulation of cell adhesion
"GO:0007155", # BP 165 L02 D02 P cell adhesion
]),
("cell cycle", [ # 9 GO-headers
"GO:0022402", # BP 463 L02 D02 C cell cycle process
"GO:0022403", # BP 46 L02 D02 S cell cycle phase
"GO:0051726", # BP 411 L03 D03 AB regulation of cell cycle
"GO:0051301", # BP 54 L03 D03 CD cell division
"GO:0007049", # BP 12 L03 D03 CD cell cycle
"GO:0070192", # BP 17 L03 D05 CIL chromosome organization in meiotic cell cycle
"GO:0007051", # BP 19 L03 D06 CDI spindle organization
"GO:0007067", # BP 1 L04 D06 CI mitotic nuclear division
"GO:0030071", # BP 11 L06 D09 AB regulation of mitotic metaphase/anaphase transition
]),
("chromosome", [ # 9 GO-headers
"GO:0032259", # BP 119 L02 D02 E methylation
"GO:0051983", # BP 108 L03 D03 AB regulation of chromosome segregation
"GO:0007059", # BP 11 L03 D03 CD chromosome segregation
"GO:0006325", # BP 184 L04 D04 CI chromatin organization
"GO:0051276", # BP 107 L04 D04 CI chromosome organization
"GO:0032204", # BP 29 L03 D06 AB regulation of telomere maintenance
"GO:0034502", # BP 21 L06 D06 H protein localization to chromosome
"GO:0031497", # BP 11 L05 D06 CI chromatin assembly
"GO:0006334", # BP 3 L06 D07 CI nucleosome assembly
]),
("development", [ # 10 GO-headers
"GO:0032502", # BP 3,173 L01 D01 F developmental process
"GO:0022414", # BP 847 L01 D01 L reproductive process
"GO:0050793", # BP 1,881 L02 D02 AB regulation of developmental process
"GO:0048856", # BP 1,016 L02 D02 F anatomical structure development
"GO:0048646", # BP 331 L02 D02 F anatomical structure formation in morphogenesis
"GO:0007568", # BP 18 L03 D03 DF aging
"GO:0022604", # BP 129 L04 D04 AB regulation of cell morphogenesis
"GO:0000902", # BP 65 L04 D05 CDFI cell morphogenesis
"GO:0045765", # BP 14 L04 D05 AB regulation of angiogenesis
]),
("extracellular matrix", [ # 1 GO-headers
"GO:0030198", # BP 27 L04 D04 CDI extracellular matrix organization
]),
("ion", [ # 3 GO-headers
"GO:0006811", # BP 422 L04 D04 H ion transport
"GO:0055085", # BP 330 L04 D04 H transmembrane transport
"GO:0006874", # BP 33 L08 D09 ACD cellular calcium ion homeostasis
]),
("localization", [ # 3 GO-headers
"GO:0051179", # BP 2,142 L01 D01 H localization
"GO:0040011", # BP 394 L01 D01 N locomotion
"GO:0032879", # BP 1,682 L02 D02 AB regulation of localization
]),
("membrane", [ # 1 GO-headers
"GO:0061024", # BP 273 L03 D03 CI membrane organization
]),
("metabolic", [ # 7 GO-headers
"GO:0008152", # BP 6,418 L01 D01 E metabolic process
"GO:0019222", # BP 3,243 L02 D02 AB regulation of metabolic process
"GO:0009056", # BP 1,369 L02 D02 E catabolic process
"GO:0044281", # BP 2,139 L03 D03 DE small molecule metabolic process
"GO:0050790", # BP 620 L03 D03 A regulation of catalytic activity
"GO:0051186", # BP 373 L03 D03 CE cofactor metabolic process
"GO:0006259", # BP 300 L04 D06 CE DNA metabolic process
]),
("phosphorylation", [ # 3 GO-headers
"GO:0006793", # BP 798 L03 D03 CE phosphorus metabolic process
"GO:0016310", # BP 138 L05 D05 CE phosphorylation
"GO:0006468", # BP 97 L06 D07 CE protein phosphorylation
]),
("signaling", [ # 4 GO-headers
"GO:0023052", # BP 116 L01 D01 R signaling
"GO:0023051", # BP 1,364 L02 D02 AB regulation of signaling
"GO:0007165", # BP 717 L03 D03 AB signal transduction
"GO:0007267", # BP 99 L03 D04 CDR cell-cell signaling
]),
("stimulus", [ # 4 GO-headers
"GO:0050896", # BP 2,218 L01 D01 G response to stimulus
"GO:0048583", # BP 2,377 L02 D02 AB regulation of response to stimulus
"GO:0006950", # BP 492 L02 D02 G response to stress
"GO:0080134", # BP 940 L03 D03 AB regulation of response to stress
]),
("prolif_differ", [ # 3 GO-headers
"GO:0008283", # BP 158 L02 D02 D cell proliferation
"GO:0030154", # BP 494 L04 D04 CDF cell differentiation
"GO:0045595", # BP 828 L03 D03 AB regulation of cell differentiation
"GO:0042127", # BP 268 L03 D03 AB regulation of cell proliferation
]),
]
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang. All rights reserved.
|
tanghaibao/goatools
|
goatools/test_data/sections/gjoneska_pfenning.py
|
Python
|
bsd-2-clause
| 9,848
|
# *****************************************************************
# Copyright (c) 2013 Massachusetts Institute of Technology
#
# Developed exclusively at US Government expense under US Air Force contract
# FA8721-05-C-002. The rights of the United States Government to use, modify,
# reproduce, release, perform, display or disclose this computer software and
# computer software documentation in whole or in part, in any manner and for
# any purpose whatsoever, and to have or authorize others to do so, are
# Unrestricted and Unlimited.
#
# Licensed for use under the BSD License as described in the BSD-LICENSE.txt
# file in the root directory of this release.
#
# Project: SPAR
# Authors: SY
# Description: TA2 file handle object class
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 28 Nov 2012 SY Original Version
# *****************************************************************
import os
class file_handle_object(object):
"""A class that handles writing to the file system. We want to separate
all file system modification mothods out into a separate class so that we
can overwrite them for unit testing.
"""
def __init__(self):
"""no initialization necessary."""
pass
def create_dir(self, dir_name):
"""creates a directory corresponding to dir_name"""
# this method exists to aid in unit testing. we do not want to actually
# be creating directories during unit tests, so we will use a subclass
# of this class wherein this method is overwritten.
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def get_file_object(self, file_name, command):
"""returns a file object. command shoule be either 'r' or 'w'."""
# this method exists to aid in unit testing. we do not want to actually
# be accessing files during unit tests, so we will use a subclass
# of this class wherein this method is overwritten.
return open(file_name, command)
def close_file_object(self, file_object):
"""closes the file object."""
# this method exists to aid in unit testing. we do not want to
# be closing files during unit tests, so we will use a subclass
# of this class wherein this method is overwritten.
return file_object.close()
|
y4n9squared/HEtest
|
hetest/python/circuit_generation/circuit_common/file_handle_object.py
|
Python
|
bsd-2-clause
| 2,432
|
# Copyright (c) 2016, Matt Layman
'''Storage services which use the file system to store data'''
|
mblayman/markwiki
|
markwiki/storage/fs/__init__.py
|
Python
|
bsd-2-clause
| 97
|
import re
from binascii import unhexlify
from collections import namedtuple
from itertools import starmap
from streamlink.compat import urljoin, urlparse
__all__ = ["load", "M3U8Parser"]
# EXT-X-BYTERANGE
ByteRange = namedtuple("ByteRange", "range offset")
# EXT-X-KEY
Key = namedtuple("Key", "method uri iv key_format key_format_versions")
# EXT-X-MAP
Map = namedtuple("Map", "uri byterange")
# EXT-X-MEDIA
Media = namedtuple("Media", "uri type group_id language name default "
"autoselect forced characteristics")
# EXT-X-START
Start = namedtuple("Start", "time_offset precise")
# EXT-X-STREAM-INF
StreamInfo = namedtuple("StreamInfo", "bandwidth program_id codecs resolution "
"audio video subtitles")
# EXT-X-I-FRAME-STREAM-INF
IFrameStreamInfo = namedtuple("IFrameStreamInfo", "bandwidth program_id "
"codecs resolution video")
Playlist = namedtuple("Playlist", "uri stream_info media is_iframe")
Resolution = namedtuple("Resolution", "width height")
Segment = namedtuple("Segment", "uri duration title key discontinuity "
"byterange date map")
ATTRIBUTE_REGEX = (r"([A-Z\-]+)=(\d+\.\d+|0x[0-9A-z]+|\d+x\d+|\d+|"
r"\"(.+?)\"|[0-9A-z\-]+)")
class M3U8(object):
def __init__(self):
self.is_endlist = False
self.is_master = False
self.allow_cache = None
self.discontinuity_sequence = None
self.iframes_only = None
self.media_sequence = None
self.playlist_type = None
self.target_duration = None
self.start = None
self.version = None
self.media = []
self.playlists = []
self.segments = []
class M3U8Parser(object):
def __init__(self, base_uri=None):
self.base_uri = base_uri
def create_stream_info(self, streaminf, cls=None):
program_id = streaminf.get("PROGRAM-ID")
bandwidth = streaminf.get("BANDWIDTH")
if bandwidth:
bandwidth = float(bandwidth)
resolution = streaminf.get("RESOLUTION")
if resolution:
resolution = self.parse_resolution(resolution)
codecs = streaminf.get("CODECS")
if codecs:
codecs = codecs.split(",")
else:
codecs = []
if cls == IFrameStreamInfo:
return IFrameStreamInfo(bandwidth, program_id, codecs, resolution,
streaminf.get("VIDEO"))
else:
return StreamInfo(bandwidth, program_id, codecs, resolution,
streaminf.get("AUDIO"), streaminf.get("VIDEO"),
streaminf.get("SUBTITLES"))
def split_tag(self, line):
match = re.match("#(?P<tag>[\w-]+)(:(?P<value>.+))?", line)
if match:
return match.group("tag"), (match.group("value") or "").strip()
return None, None
def parse_attributes(self, value):
def map_attribute(key, value, quoted):
return (key, quoted or value)
attr = re.findall(ATTRIBUTE_REGEX, value)
return dict(starmap(map_attribute, attr))
def parse_bool(self, value):
return value == "YES"
def parse_byterange(self, value):
match = re.match("(?P<range>\d+)(@(?P<offset>.+))?", value)
if match:
return ByteRange(int(match.group("range")),
int(match.group("offset") or 0))
def parse_extinf(self, value):
match = re.match("(?P<duration>\d+(\.\d+)?)(,(?P<title>.+))?", value)
if match:
return float(match.group("duration")), match.group("title")
return (0, None)
def parse_hex(self, value):
value = value[2:]
if len(value) % 2:
value = "0" + value
return unhexlify(value)
def parse_resolution(self, value):
match = re.match("(\d+)x(\d+)", value)
if match:
width, height = int(match.group(1)), int(match.group(2))
else:
width, height = 0, 0
return Resolution(width, height)
def parse_tag(self, line, transform=None):
tag, value = self.split_tag(line)
if transform:
value = transform(value)
return value
def parse_line(self, lineno, line):
if lineno == 0 and not line.startswith("#EXTM3U"):
raise ValueError("Missing #EXTM3U header")
if not line.startswith("#"):
if self.state.pop("expect_segment", None):
byterange = self.state.pop("byterange", None)
extinf = self.state.pop("extinf", (0, None))
date = self.state.pop("date", None)
map_ = self.state.get("map")
key = self.state.get("key")
segment = Segment(self.uri(line), extinf[0],
extinf[1], key,
self.state.pop("discontinuity", False),
byterange, date, map_)
self.m3u8.segments.append(segment)
elif self.state.pop("expect_playlist", None):
streaminf = self.state.pop("streaminf", {})
stream_info = self.create_stream_info(streaminf)
playlist = Playlist(self.uri(line), stream_info, [], False)
self.m3u8.playlists.append(playlist)
elif line.startswith("#EXTINF"):
self.state["expect_segment"] = True
self.state["extinf"] = self.parse_tag(line, self.parse_extinf)
elif line.startswith("#EXT-X-BYTERANGE"):
self.state["expect_segment"] = True
self.state["byterange"] = self.parse_tag(line, self.parse_byterange)
elif line.startswith("#EXT-X-TARGETDURATION"):
self.m3u8.target_duration = self.parse_tag(line, int)
elif line.startswith("#EXT-X-MEDIA-SEQUENCE"):
self.m3u8.media_sequence = self.parse_tag(line, int)
elif line.startswith("#EXT-X-KEY"):
attr = self.parse_tag(line, self.parse_attributes)
iv = attr.get("IV")
if iv:
iv = self.parse_hex(iv)
self.state["key"] = Key(attr.get("METHOD"),
self.uri(attr.get("URI")),
iv, attr.get("KEYFORMAT"),
attr.get("KEYFORMATVERSIONS"))
elif line.startswith("#EXT-X-PROGRAM-DATE-TIME"):
self.state["date"] = self.parse_tag(line)
elif line.startswith("#EXT-X-ALLOW-CACHE"):
self.m3u8.allow_cache = self.parse_tag(line, self.parse_bool)
elif line.startswith("#EXT-X-STREAM-INF"):
self.state["streaminf"] = self.parse_tag(line, self.parse_attributes)
self.state["expect_playlist"] = True
elif line.startswith("#EXT-X-PLAYLIST-TYPE"):
self.m3u8.playlist_type = self.parse_tag(line)
elif line.startswith("#EXT-X-ENDLIST"):
self.m3u8.is_endlist = True
elif line.startswith("#EXT-X-MEDIA"):
attr = self.parse_tag(line, self.parse_attributes)
media = Media(self.uri(attr.get("URI")), attr.get("TYPE"),
attr.get("GROUP-ID"), attr.get("LANGUAGE"),
attr.get("NAME"),
self.parse_bool(attr.get("DEFAULT")),
self.parse_bool(attr.get("AUTOSELECT")),
self.parse_bool(attr.get("FORCED")),
attr.get("CHARACTERISTICS"))
self.m3u8.media.append(media)
elif line.startswith("#EXT-X-DISCONTINUITY"):
self.state["discontinuity"] = True
self.state["map"] = None
elif line.startswith("#EXT-X-DISCONTINUITY-SEQUENCE"):
self.m3u8.discontinuity_sequence = self.parse_tag(line, int)
elif line.startswith("#EXT-X-I-FRAMES-ONLY"):
self.m3u8.iframes_only = True
elif line.startswith("#EXT-X-MAP"):
attr = self.parse_tag(line, self.parse_attributes)
byterange = self.parse_byterange(attr.get("BYTERANGE", ""))
self.state["map"] = Map(attr.get("URI"), byterange)
elif line.startswith("#EXT-X-I-FRAME-STREAM-INF"):
attr = self.parse_tag(line, self.parse_attributes)
streaminf = self.state.pop("streaminf", attr)
stream_info = self.create_stream_info(streaminf, IFrameStreamInfo)
playlist = Playlist(self.uri(attr.get("URI")), stream_info, [], True)
self.m3u8.playlists.append(playlist)
elif line.startswith("#EXT-X-VERSION"):
self.m3u8.version = self.parse_tag(line, int)
elif line.startswith("#EXT-X-START"):
attr = self.parse_tag(line, self.parse_attributes)
start = Start(attr.get("TIME-OFFSET"),
self.parse_bool(attr.get("PRECISE", "NO")))
self.m3u8.start = start
def parse(self, data):
self.state = {}
self.m3u8 = M3U8()
for lineno, line in enumerate(filter(bool, data.splitlines())):
self.parse_line(lineno, line)
# Associate Media entries with each Playlist
for playlist in self.m3u8.playlists:
for media_type in ("audio", "video", "subtitles"):
group_id = getattr(playlist.stream_info, media_type, None)
if group_id:
for media in filter(lambda m: m.group_id == group_id,
self.m3u8.media):
playlist.media.append(media)
self.m3u8.is_master = not not self.m3u8.playlists
return self.m3u8
def uri(self, uri):
if uri and urlparse(uri).scheme:
return uri
elif self.base_uri and uri:
return urljoin(self.base_uri, uri)
else:
return uri
def load(data, base_uri=None, parser=M3U8Parser):
"""Attempts to parse a M3U8 playlist from a string of data.
If specified, *base_uri* is the base URI that relative URIs will
be joined together with, otherwise relative URIs will be as is.
If specified, *parser* can be a M3U8Parser subclass to be used
to parse the data.
"""
return parser(base_uri).parse(data)
|
javiercantero/streamlink
|
src/streamlink/stream/hls_playlist.py
|
Python
|
bsd-2-clause
| 10,407
|
import functools
import itertools
from ..backend_object import BackendObject
def normalize_types(f):
@functools.wraps(f)
def normalizer(self, region, o):
'''
Convert any object to an object that we can process.
'''
if isinstance(o, IfProxy):
return NotImplemented
if isinstance(o, Base):
o = o.model
if not isinstance(o, StridedInterval):
raise ClaripyVSAOperationError('Unsupported operand type %s' % type(o))
return f(self, region, o)
return normalizer
def normalize_types_one_arg(f):
@functools.wraps(f)
def normalizer(self, o):
'''
Convert any object to an object that we can process.
'''
if isinstance(o, IfProxy):
return NotImplemented
if isinstance(o, Base):
o = o.model
return f(self, o)
return normalizer
vs_id_ctr = itertools.count()
class ValueSet(BackendObject):
def __init__(self, name=None, region=None, bits=None, val=None):
self._name = name
if self._name is None:
self._name = 'VS_%d' % vs_id_ctr.next()
self._regions = {}
self._reversed = False
if region is not None and bits is not None and val is not None:
self.set_si(region, StridedInterval(bits=bits, stride=0, lower_bound=val, upper_bound=val))
@property
def name(self):
return self._name
@property
def regions(self):
return self._regions
@property
def reversed(self):
return self._reversed
@property
def unique(self):
return len(self.regions) == 1 and self.regions.values()[0].unique
@property
def bits(self):
return self.size()
@normalize_types
def set_si(self, region, si):
if not isinstance(si, StridedInterval):
raise ClaripyVSAOperationError('Unsupported type %s for si' % type(si))
self._regions[region] = si
def get_si(self, region):
if region in self._regions:
return self._regions[region]
# TODO: Should we return a None, or an empty SI instead?
return None
def items(self):
return self._regions.items()
def size(self):
return len(self)
@normalize_types
def merge_si(self, region, si):
if region not in self._regions:
self.set_si(region, si)
else:
self._regions[region] = self._regions[region].union(si)
@normalize_types
def remove_si(self, region, si):
raise NotImplementedError()
def __repr__(self):
s = ""
for region, si in self._regions.items():
s = "%s: %s" % (region, si)
return "(" + s + ")"
def __len__(self):
if self.is_empty:
return 0
return len(self._regions.items()[0][1])
@normalize_types_one_arg
def __add__(self, other):
if type(other) is ValueSet:
# Normally, addition between two addresses doesn't make any sense.
# So we only handle those corner cases
raise NotImplementedError()
else:
new_vs = ValueSet()
for region, si in self._regions.items():
new_vs._regions[region] = si + other
return new_vs
@normalize_types_one_arg
def __radd__(self, other):
return self.__add__(other)
@normalize_types_one_arg
def __sub__(self, other):
if type(other) is ValueSet:
# It might happen due to imprecision of our analysis (mostly due the absence of contexts)
if self.regions.keys() == other.regions.keys():
# Handle it here
new_vs = ValueSet()
for region, si in self._regions.iteritems():
new_vs._regions[region] = si - other._regions[region]
return new_vs
else:
__import__('ipdb').set_trace()
raise NotImplementedError()
else:
new_vs = ValueSet()
for region, si in self._regions.items():
new_vs._regions[region] = si - other
return new_vs
@normalize_types_one_arg
def __and__(self, other):
if type(other) is ValueSet:
# An address bitwise-and another address? WTF?
assert False
if BoolResult.is_true(other == 0):
# Corner case: a & 0 = 0
return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0)
new_vs = ValueSet()
if BoolResult.is_true(other < 0x100):
# Special case - sometimes (addr & mask) is used for testing whether the address is aligned or not
# We return an SI instead
ret = None
for region, si in self._regions.items():
r = si.__and__(other)
ret = r if ret is None else ret.union(r)
return ret
else:
for region, si in self._regions.items():
r = si.__and__(other)
new_vs._regions[region] = r
return new_vs
def __eq__(self, other):
if isinstance(other, ValueSet):
same = False
different = False
for region, si in other.regions.items():
if region in self.regions:
comp_ret = self.regions[region] == si
if BoolResult.has_true(comp_ret):
same = True
if BoolResult.has_false(comp_ret):
different = True
else:
different = True
if same and not different:
return TrueResult()
if same and different:
return MaybeResult()
return FalseResult()
elif isinstance(other, StridedInterval):
if 'global' in self.regions:
return self.regions['global'] == other
else:
return FalseResult()
else:
return FalseResult()
def __ne__(self, other):
return ~ (self == other)
def eval(self, n):
results = []
for _, si in self._regions.items():
if len(results) < n:
results.extend(si.eval(n))
return results
def copy(self):
vs = ValueSet()
vs._regions = self._regions.copy()
vs._reversed = self._reversed
return vs
def reverse(self):
print "valueset.reverse is not properly implemented"
vs = self.copy()
vs._reversed = not vs._reversed
return vs
@property
def is_empty(self):
return len(self._regions) == 0
def extract(self, high_bit, low_bit):
new_vs = ValueSet()
for region, si in self._regions.items():
new_vs.set_si(region, si.extract(high_bit, low_bit))
return new_vs
def concat(self, b):
new_vs = ValueSet()
# TODO: This logic is obviously flawed. Correct it later :-(
if isinstance(b, StridedInterval):
for region, si in self._regions.items():
new_vs.set_si(region, si.concat(b))
elif isinstance(b, ValueSet):
for region, si in self._regions.items():
new_vs.set_si(region, si.concat(b.get_si(region)))
else:
raise ClaripyVSAOperationError('ValueSet.concat() got an unsupported operand %s (type %s)' % (b, type(b)))
return new_vs
@normalize_types_one_arg
def union(self, b):
merged_vs = self.copy()
if type(b) is ValueSet:
for region, si in b.regions.items():
if region not in merged_vs._regions:
merged_vs._regions[region] = si
else:
merged_vs._regions[region] = merged_vs._regions[region].union(si)
else:
for region, si in self._regions.items():
merged_vs._regions[region] = merged_vs._regions[region].union(b)
return merged_vs
@normalize_types_one_arg
def widen(self, b):
merged_vs = self.copy()
if isinstance(b, ValueSet):
for region, si in b.regions.items():
if region not in merged_vs.regions:
merged_vs.regions[region] = si
else:
merged_vs.regions[region] = merged_vs.regions[region].widen(si)
else:
for region, si in self._regions.iteritems():
merged_vs._regions[region] = merged_vs._regions[region].widen(b)
return merged_vs
def identical(self, o):
"""
Used to make exact comparisons between two ValueSets.
:param o: The other ValueSet to compare with
:return: True if they are exactly same, False otherwise
"""
if self._name != o._name or self._reversed != o._reversed:
return False
for region, si in self.regions.items():
if region in o.regions:
o_si = o.regions[region]
if not si.identical(o_si):
return False
else:
return False
return True
from ..ast_base import Base
from .strided_interval import StridedInterval
from .ifproxy import IfProxy
from .bool_result import BoolResult, TrueResult, FalseResult, MaybeResult
from .errors import ClaripyVSAOperationError
|
zhuyue1314/claripy
|
claripy/vsa/valueset.py
|
Python
|
bsd-2-clause
| 9,471
|
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import yaml
from sklearn.learning_curve import learning_curve
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential, model_from_yaml
from keras.wrappers.scikit_learn import KerasRegressor
from django.conf import settings
from crimeprediction.vectorize import vectorize
if not hasattr(settings, 'OUTPUTS_DIR'):
raise ImproperlyConfigured(
'The directory to save output files is missing from your settings')
elif not os.path.exists(settings.OUTPUTS_DIR):
os.makedirs(settings.OUTPUTS_DIR)
if not hasattr(settings, 'MODEL_DIR'):
raise ImproperlyConfigured(
'The directory to save the model is missing from your settings')
elif not os.path.exists(settings.MODEL_DIR):
os.makedirs(settings.MODEL_DIR)
def run_network(grid_size, period, crime_type=None, seasonal=False):
'''Build, train and run LSTM network
:param grid_size: size of the cell dimension for the grid
:param period: timestep of crime data
:param crime_type: type of crime to be trained, None value will
train all
:param seasonal: implement seasonality or not
'''
vectors = vectorize(
grid_size, period, crime_type=crime_type, seasonal=seasonal)
global_start_time = time.time()
print 'Loading Data...'
dim = len(vectors[0])
result = np.array(vectors)
print "Data : ", result.shape
row = int(round(0.7 * result.shape[0]))
train = result[:row]
X_train = train[:-1]
y_train = train[1:]
test = result[row:]
X_test = test[:-1]
y_test = test[1:]
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
print '\nData Loaded. Compiling...\n'
print X_train
print y_train
model = Sequential()
model.add(LSTM(dim, input_shape=X_train.shape[1:]))
model.compile(loss='mse', optimizer='rmsprop',)
print("Train...")
try:
model.fit(X_train, y_train, nb_epoch=1000, shuffle=False)
except KeyboardInterrupt:
pass
print 'Training duration (s) : ', time.time() - global_start_time
predicted = model.predict(X_test)
norm_predicted = predicted
accuracy = []
f1scr = []
for x, data in enumerate(y_test):
print len(data)
print len(predicted[x])
correct = 0
total = 0
truepos = 0
falsepos = 0
trueneg = 0
falseneg = 0
for y, node in enumerate(data):
total += 1
if predicted[x][y] > 0:
norm_predicted[x][y] = 1
if node == 1:
correct += 1
truepos += 1
else:
falsepos += 1
else:
norm_predicted[x][y] = -1
if node == -1:
correct += 1
trueneg += 1
else:
falseneg += 1
print "correct", correct
print "total", total
act = float(correct) / total
print act
accuracy.append(act)
precision = truepos / float(truepos+falsepos)
recall = truepos / float(truepos+falseneg)
f1 = (precision * recall * 2) / float(precision + recall)
f1scr.append(f1)
print accuracy
print f1
crime_verbose = crime_type if crime_type is not None else "ALL"
output_folder = settings.OUTPUTS_DIR + \
'Results_{0}_{1}_{2}_{3}/'.format(
grid_size, crime_verbose, period, seasonal)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
results_file = output_folder + 'results.txt'
predicted_file = output_folder + 'predicted.txt'
X_train_file = output_folder + 'X_train.txt'
y_train_file = output_folder + 'y_train.txt'
X_test_file = output_folder + 'X_test.txt'
y_test_file = output_folder + 'y_test.txt'
np.savetxt(X_train_file, X_train, fmt='%d')
np.savetxt(y_train_file, y_train, fmt='%d')
np.savetxt(X_test_file, X_test, fmt='%d')
np.savetxt(y_test_file, y_test, fmt='%d')
np.savetxt(predicted_file, norm_predicted, fmt='%d')
results = "Average Accuracy:" + str(np.average(accuracy)) + '\n'
results += "Average F1 Score:" + str(np.average(f1scr))
with open(results_file, "w") as output_file:
output_file.write(results)
params = {
'grid_size': grid_size,
'period': period,
'crime_type': crime_type if crime_type is not None else 'all',
'seasonal': seasonal,
}
save_trained_model(model, yaml.dump(params))
def save_trained_model(model, params_string):
'''
saves trained model to directory and files depending on settings variables
:param model: model to be saved
:param params_string: a yaml string of parameters used for the model: crime_type, period, grid_size and seasonality
'''
folder = settings.MODEL_DIR
archi = folder + settings.MODEL_ARCHITECTURE
weights = folder + settings.MODEL_WEIGHTS
params = folder + settings.MODEL_PARAMS
yaml_string = model.to_yaml()
open(archi, 'w').write(yaml_string)
open(params, 'w').write(params_string)
model.save_weights(weights, overwrite=True)
def get_trained_model():
'''
reconstruct trained model from saved files
:rtype: a tuple of the model constructed and a yaml string of parameters
used
'''
folder = settings.MODEL_DIR
archi = folder + settings.MODEL_ARCHITECTURE
weights = folder + settings.MODEL_WEIGHTS
params = folder + settings.MODEL_PARAMS
params = yaml.safe_load(open(params).read())
model = model_from_yaml(open(archi).read())
model.load_weights(weights)
model.compile(loss='mse', optimizer='rmsprop',)
return model, params
def predict_next(model, **params):
'''
predicts next crime hotspots
:param model: the model to be used for prediction
:param **params: a yaml string of the parameters used by the model
'''
vectors = vectorize(**params)
print 'Loading Data...'
dim = len(vectors[0])
result = np.array(vectors)
result = np.reshape(result, (result.shape[0], result.shape[1], 1))
predicted = model.predict(result)
return predicted[-1]
|
jayArnel/crimemapping
|
crimeprediction/network.py
|
Python
|
bsd-2-clause
| 6,415
|
from JumpScale import j
import unittest
import JumpScale.portal
descr = """
test jpackages over rest to portal (appserver)
"""
organization = "jumpscale"
author = "kristof@incubaid.com"
license = "bsd"
version = "1.0"
category = "appserver.jpackages.rest,portal"
enable=True
priority=5
class TEST(unittest.TestCase):
def setUp(self):
self.client= j.core.portal.getClient("127.0.0.1", 81, "1234") #@need to read from config file for the secret
self.actor = self.client.getActor("system", "packagemanager")
def test_getJpackages(self):
l1=self.actor.getJPackages(j.application.whoAmI.nid)
print l1
l2=self.actor.getJPackages(j.application.whoAmI.nid,"jumpscale")
print l2
def test_getJpackageInfo(self):
jp=self.actor.getJPackageInfo(j.application.whoAmI.nid,"jumpscale","osis")
print jp
def test_getJpackageFilesInfo(self):
info=self.actor.getJPackageFilesInfo(j.application.whoAmI.nid,"jumpscale","osis")
# print info
def test_action(self):
info=self.actor.action(j.application.whoAmI.nid,domain="jumpscale",pname="osis",action="start")
print info
#@todo finish tests and make better
|
Jumpscale/jumpscale6_core
|
apps/gridportal/tests/jpackagesRest__test.py
|
Python
|
bsd-2-clause
| 1,238
|
try:
import simplejson as json
except ImportError:
import json
import functools
from cgi import parse_header
def wrap_json(func=None, *, encoder=json.JSONEncoder, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
encodes the json responses.
NOTE: this middleware exists just for backward compatibility,
but it has some limitations in terms of response body encoding
because it only accept list or dictionary outputs and json
specification allows store other values also.
It is recommended use the `wrap_json_body` and wrap_json_response`
instead of this.
"""
if func is None:
return functools.partial(
wrap_json,
encoder=encoder,
preserve_raw_body=preserve_raw_body
)
wrapped_func = wrap_json_body(func, preserve_raw_body=preserve_raw_body)
wrapped_func = wrap_json_response(wrapped_func, encoder=encoder)
return wrapped_func
def wrap_json_body(func=None, *, preserve_raw_body=False):
"""
A middleware that parses the body of json requests and
add it to the request under the `body` attribute (replacing
the previous value). Can preserve the original value in
a new attribute `raw_body` if you give preserve_raw_body=True.
"""
if func is None:
return functools.partial(
wrap_json_body,
preserve_raw_body=preserve_raw_body
)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if preserve_raw_body:
request.raw_body = request.body
if ctype == "application/json":
request.body = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_params(func):
"""
A middleware that parses the body of json requests and
add it to the request under the `params` key.
"""
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
ctype, pdict = parse_header(request.headers.get('Content-Type', ''))
if ctype == "application/json":
request.params = json.loads(request.body.decode("utf-8")) if request.body else None
return func(request, *args, **kwargs)
return wrapper
def wrap_json_response(func=None, *, encoder=json.JSONEncoder):
"""
A middleware that encodes in json the response body in case
of that the "Content-Type" header is "application/json".
This middlware accepts and optional `encoder` parameter, that
allow to the user specify its own json encoder class.
"""
if func is None:
return functools.partial(wrap_json_response, encoder=encoder)
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if "Content-Type" in response.headers and response.headers['Content-Type'] is not None:
ctype, pdict = parse_header(response.headers.get('Content-Type', ''))
if ctype == "application/json" and (isinstance(response.body, dict) or isinstance(response.body, list)):
response.body = json.dumps(response.body, cls=encoder)
return response
return wrapper
|
jespino/anillo
|
anillo/middlewares/json.py
|
Python
|
bsd-2-clause
| 3,333
|
from __future__ import absolute_import
import re, os, sys
from clay import app
import clay.config
from flask import make_response, request, redirect, render_template, url_for
from epubber.fimfic_epubgen import FimFictionEPubGenerator
site_epub_classes = [
FimFictionEPubGenerator
]
accesslog = clay.config.get_logger('epubber_access')
#####################################################################
# Main App Views Section
#####################################################################
@app.route('/', methods=['GET', 'POST'])
def main_view():
story = request.args.get("story") or None
if story:
data = None
for epgenclass in site_epub_classes:
epgen = epgenclass()
if epgen.handle_url(story):
epub_file, data = epgen.gen_epub()
accesslog.info('%(title)s - %(url)s' % epgen.metas)
del epgen
response = make_response(data)
response.headers["Content-Type"] = "application/epub+zip"
response.headers["Content-Disposition"] = "attachment; filename=%s" % epub_file
return response
del epgen
return ("Cannot generate epub for this URL.", 400)
return render_template("main.html")
#####################################################################
# Secondary Views Section
#####################################################################
@app.route('/health', methods=['GET'])
def health_view():
'''
Heartbeat view, because why not?
'''
return ('OK', 200)
#####################################################################
# URL Shortener Views Section
#####################################################################
@app.route('/img/<path>', methods=['GET', 'POST'])
def static_img_proxy_view(path):
'''
Make shorter URLs for image files.
'''
path = re.sub(r'[^A-Za-z0-9_.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('img', path)))
@app.route('/js/<path>', methods=['GET', 'POST'])
def static_js_proxy_view(path):
'''
Make shorter URLs for javascript files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('js', path)))
@app.route('/css/<path>', methods=['GET', 'POST'])
def static_css_proxy_view(path):
'''
Make shorter URLs for CSS files.
'''
path = re.sub(r'[^A-Za-z0-9_+.-]', r'_', path)
return redirect(url_for('static', filename=os.path.join('css', path)))
#####################################################################
# Main
#####################################################################
def main():
# Make templates copacetic with UTF8
reload(sys)
sys.setdefaultencoding('utf-8')
# App Config
app.secret_key = clay.config.get('flask.secret_key')
main()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 nowrap
|
revarbat/epubber
|
epubber/views/main.py
|
Python
|
bsd-2-clause
| 2,968
|
# -*- coding: utf-8 -*-
"""
PyLTI is module that implements IMS LTI in python
The API uses decorators to wrap function with LTI functionality.
"""
VERSION = "0.3.2" # pragma: no cover
|
layus/pylti
|
pylti/__init__.py
|
Python
|
bsd-2-clause
| 185
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
import matplotlib.pyplot as plt
from .theme import theme_base
class theme_xkcd(theme_base):
"""
xkcd theme
The theme internaly uses the settings from pyplot.xkcd().
"""
def __init__(self, scale=1, length=100, randomness=2):
super(theme_xkcd, self).__init__()
with plt.xkcd(scale=scale, length=length, randomness=randomness):
_xkcd = mpl.rcParams.copy()
# no need to a get a deprecate warning for nothing...
for key in mpl._deprecated_map:
if key in _xkcd:
del _xkcd[key]
if 'tk.pythoninspect' in _xkcd:
del _xkcd['tk.pythoninspect']
self._rcParams.update(_xkcd)
def __deepcopy__(self, memo):
class _empty(object):
pass
result = _empty()
result.__class__ = self.__class__
result.__dict__["_rcParams"] = {}
for k, v in self._rcParams.items():
try:
result.__dict__["_rcParams"][k] = deepcopy(v, memo)
except NotImplementedError:
# deepcopy raises an error for objects that are drived from or
# composed of matplotlib.transform.TransformNode.
# Not desirable, but probably requires upstream fix.
# In particular, XKCD uses matplotlib.patheffects.withStrok
# -gdowding
result.__dict__["_rcParams"][k] = copy(v)
return result
|
yhat/ggplot
|
ggplot/themes/theme_xkcd.py
|
Python
|
bsd-2-clause
| 1,582
|
# THREE GOLD STARS
# Sudoku [http://en.wikipedia.org/wiki/Sudoku]
# is a logic puzzle where a game
# is defined by a partially filled
# 9 x 9 square of digits where each square
# contains one of the digits 1,2,3,4,5,6,7,8,9.
# For this question we will generalize
# and simplify the game.
# Define a procedure, check_sudoku,
# that takes as input a square list
# of lists representing an n x n
# sudoku puzzle solution and returns the boolean
# True if the input is a valid
# sudoku square and returns the boolean False
# otherwise.
# A valid sudoku square satisfies these
# two properties:
# 1. Each column of the square contains
# each of the whole numbers from 1 to n exactly once.
# 2. Each row of the square contains each
# of the whole numbers from 1 to n exactly once.
# You may assume the the input is square and contains at
# least one row and column.
correct = [[1,2,3],
[2,3,1],
[3,1,2]]
incorrect = [[1,2,3,4],
[2,3,1,3],
[3,1,2,3],
[4,4,4,4]]
incorrect2 = [[1,2,3,4],
[2,3,1,4],
[4,1,2,3],
[3,4,1,2]]
incorrect3 = [[1,2,3,4,5],
[2,3,1,5,6],
[4,5,2,1,3],
[3,4,5,2,1],
[5,6,4,3,2]]
incorrect4 = [['a','b','c'],
['b','c','a'],
['c','a','b']]
incorrect5 = [ [1, 1.5],
[1.5, 1]]
def check_sudoku(grid):
for ii, row in enumerate(grid):
column = []
for jj in range(len(row)):
column.append(grid[jj][ii])
for jj in range(len(grid)):
if jj+1 not in row or jj+1 not in column:
return False
return True
print check_sudoku(incorrect)
#>>> False
print check_sudoku(correct)
#>>> True
print check_sudoku(incorrect2)
#>>> False
print check_sudoku(incorrect3)
#>>> False
print check_sudoku(incorrect4)
#>>> False
print check_sudoku(incorrect5)
#>>> False
|
ezralalonde/cloaked-octo-sansa
|
03/hw/06.py
|
Python
|
bsd-2-clause
| 1,984
|
###############################################################################
## Imports
###############################################################################
# Django
from django import template
from django.forms.forms import BoundField
###############################################################################
## Filters
###############################################################################
register = template.Library()
class NamelessField(BoundField):
def __init__(self, field):
self.__dict__ = field.__dict__
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
name = ""
return widget.render(name, self.value(), attrs=attrs)
@register.filter(name='namelessfield')
def namelessfield(field):
return NamelessField(field)
|
scdoshi/django-bits
|
bits/templatetags/custom_utils.py
|
Python
|
bsd-2-clause
| 1,358
|
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
import time
def Rank(self, parts, fromloc, overriderank, server=None):
username = parts[2].lower()
year = time.strftime("%Y")
month = time.strftime("%m")
if username == "099":
if not (int(year) > 2012 and int(month) > 3):
return "099 may not be ranked until April 1st 2013."
if server:
factory = server
else:
factory = self.client.factory
if parts[1] == "builder":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not overriderank:
if not (world.isOp(self.client.username) or world.isOwner(self.client.username) or self.client.isModPlus()):
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not (world.isOp(parts[-1]) or world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
world.builders.add(username)
if username in factory.usernames:
user = factory.usernames[username]
if user.world == world:
user.sendBuilderUpdate()
return ("%s is now a Builder" % username)
elif parts[1] == "op":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not overriderank:
if not (world.isOwner(self.client.username) or self.client.isModPlus()):
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not (world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
world.ops.add(username)
return ("Opped %s" % username)
elif parts[1] == "worldowner":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not self.client.isWorldOwnerPlus() or overriderank:
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not (world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
self.client.world.owner = (username)
return ("%s is now a World Owner." % username)
elif parts[1] == "member":
if not server:
if not self.client.isModPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isModPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.members.add(username)
if username in factory.usernames:
factory.usernames[username].sendMemberUpdate()
return ("%s is now a Member." % username)
elif parts[1] == "globalbuilder":
if not server:
if not self.client.isModPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isModPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.globalbuilders.add(username)
if username in factory.usernames:
factory.usernames[username].sendGlobalBuilderUpdate()
return ("%s is now a Global Builder." % username)
elif parts[1] == "mod":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.mods.add(username)
if username in factory.usernames:
factory.usernames[username].sendModUpdate()
return ("%s is now a Mod." % username)
elif parts[1] == "admin":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.admins.add(username)
if username in factory.usernames:
factory.usernames[username].sendAdminUpdate()
return ("%s is now an admin." % username)
elif parts[1] == "coder":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.coders.add(username)
if username in factory.usernames:
factory.usernames[username].sendCoderUpdate()
return ("%s is now a coder." % username)
elif parts[1] == "director":
if not server:
if not self.client.isHiddenPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isHiddenPlus(parts[-1]):
return ("You are not a high enough rank!")
factory.directors.add(username)
if username in factory.usernames:
factory.usernames[username].sendDirectorUpdate()
return ("%s is now an director." % username)
elif parts[1] == "hidden":
if not server:
if not self.client.isServerOwner():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isServerOwner(parts[-1]):
return ("You are not a high enough rank!")
factory.hidden.add(username)
if username in factory.usernames:
factory.usernames[username].sendHiddenUpdate()
return ("%s is now hidden." % username)
else:
return ("Unknown rank \"%s\""%parts[1])
def DeRank(self, parts, fromloc, overriderank, server=None):
username = parts[2].lower()
if server:
factory = server
else:
factory = self.client.factory
if parts[1] == "builder":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not overriderank:
if not (world.isOp(self.client.username) or world.isOwner(self.client.username) or self.client.isModPlus()):
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not (world.isOp(parts[-1]) or world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
try:
world.builders.remove(username)
except KeyError:
return ("%s is not a Builder." % username)
if username in factory.usernames:
user = factory.usernames[username]
if user.world == world:
user.sendBuilderUpdate()
return ("Removed %s as Builder" % username)
elif parts[1] == "op":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not overriderank:
if not (world.isOwner(self.client.username) or self.client.isModPlus()) and world != self.client.world:
return ("You are not a World Owner!")
else:
if fromloc != "console":
if not (world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
try:
world.ops.remove(username)
except KeyError:
return ("%s is not an op." % username)
if username in factory.usernames:
user = factory.usernames[username]
if user.world == world:
user.sendOpUpdate()
return ("Deopped %s" % username)
elif parts[1] == "worldowner":
if len(parts) > 3:
try:
world = factory.worlds[parts[3]]
except KeyError:
return ("Unknown world \"%s\"" %parts[3])
else:
if not server:
world = self.client.world
else:
return "You must provide a world"
if not server:
if not (world.isOwner(self.client.username) or self.client.isModPlus()) and world != self.client.world:
return ("You are not a World Owner!")
else:
if fromloc != "console":
if not (world.isOwner(parts[-1]) or factory.isModPlus(parts[-1])):
return ("You are not a high enough rank!")
try:
self.client.world.owner = ("")
except KeyError:
return ("%s is not a world owner." % username)
if username in factory.usernames:
user = factory.usernames[username]
if user.world == world:
user.sendOpUpdate()
return ("%s is no longer the World Owner." % username)
elif parts[1] == "member":
if not server:
if not self.client.isModPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isModPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.members:
factory.members.remove(username)
else:
return ("No such member \"%s\"" % username.lower())
if username in factory.usernames:
factory.usernames[username].sendMemberUpdate()
return ("%s is no longer a Member." % username.lower())
elif parts[1] == "globalbuilder":
if not server:
if not self.client.isModPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isModPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.globalbuilders:
factory.globalbuilders.remove(username)
else:
return ("No such global builder \"%s\"" % username.lower())
if username in factory.usernames:
factory.usernames[username].sendGlobalBuilderUpdate()
return ("%s is no longer a Member." % username.lower())
elif parts[1] == "mod":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.mods:
factory.mods.remove(username)
else:
return ("No such mod \"%s\"" % username.lower())
if username in factory.usernames:
factory.usernames[username].sendModUpdate()
return ("%s is no longer a Mod." % username.lower())
elif parts[1] == "admin":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.admins:
factory.admins.remove(username)
if username in factory.usernames:
factory.usernames[username].sendAdminUpdate()
return ("%s is no longer an admin." % username.lower())
else:
return ("No such admin \"%s\""% username.lower())
elif parts[1] == "coder":
if not server:
if not self.client.isDirectorPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isDirectorPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.coders:
factory.coders.remove(username)
if username in factory.usernames:
factory.usernames[username].sendCoderUpdate()
return ("%s is no longer a coder." % username.lower())
else:
return ("No such admin \"%s\""% username.lower())
elif parts[1] == "director":
if not server:
if not self.client.isHiddenPlus():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isHiddenPlus(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.directors:
factory.directors.remove(username)
if username in factory.usernames:
factory.usernames[username].sendDirectorUpdate()
return ("%s is no longer an director." % username.lower())
else:
return ("No such director \"%s\""% username.lower())
elif parts[1] == "hidden":
if not server:
if not self.client.isServerOwner():
return ("You are not a high enough rank!")
else:
if fromloc != "console":
if not factory.isServerOwner(parts[-1]):
return ("You are not a high enough rank!")
if username in factory.hidden:
factory.hidden.remove(username)
if username in factory.usernames:
factory.usernames[username].sendHiddenUpdate()
return ("%s is no longer hidden." % username.lower())
else:
return ("No such hidden \"%s\""% username.lower())
else:
return ("Unknown rank \"%s\""%parts[1])
def Spec(self, username, fromloc, overriderank, server=None):
if server:
factory = server
else:
factory = self.client.factory
if username in factory.directors:
return ("You cannot make staff a spec!")
if username in factory.coders:
return ("You cannot make staff a spec!")
if username in factory.admins:
return ("You cannot make staff a spec!")
if username in factory.mods:
return ("You cannot make staff a spec!")
factory.spectators.add(username)
if username in factory.usernames:
factory.usernames[username].sendSpectatorUpdate()
return ("%s is now a spec." % username)
def Staff(self, server=None):
Temp = []
if server:
factory = server
else:
factory = self.client.factory
if len(factory.directors):
Temp.append (["Directors:"] + list(factory.directors))
if len(factory.coders):
Temp.append (["Coders:"] + list(factory.coders))
if len(factory.admins):
Temp.append (["Admins:"] + list(factory.admins))
if len(factory.mods):
Temp.append (["Mods:"] + list(factory.mods))
return Temp
def Credits(self=None):
Temp = []
Temp.append ("Thanks to the following people for making Arc possible...")
Temp.append ("Mojang Specifications (Minecraft): Notch, dock, ez, ...")
Temp.append ("Creator: aera (Myne and The Archives)")
Temp.append ("Devs (Arc/The Archives): Adam01, gdude2002 (arbot), NotMeh, revenant,")
Temp.append ("Devs (iCraft): AndrewPH, destroyerx1, Dwarfy, erronjason, eugo (Knossus), goober, gothfox, ntfwc, Saanix, sk8rjwd, tehcid, Varriount, willempiee")
Temp.append ("Devs (blockBox): fizyplankton, tyteen4a03, UberFoX")
Temp.append ("Others: 099, 2k10, Akai, Antoligy, Aquaskys, aythrea, Bidoof_King, Bioniclegenius (Red_Link), blahblahbal, BlueProtoman, CDRom, fragmer, GLaDOS (Cortana), iMak, Kelraider, MAup, MystX, PyroPyro, Rils, Roadcrosser, Roujo, setveen, TheUndeadFish, TkTech, Uninspired")
return Temp
def makefile(filename):
import os
dir = os.path.dirname(filename)
try:
os.stat(dir)
except:
try:
os.mkdir(dir)
except OSError:
pass
if not os.path.exists(filename):
with open(filename, "w") as f:
f.write("")
del os
def makedatfile(filename):
import os
dir = os.path.dirname(filename)
try:
os.stat(dir)
except:
try:
os.mkdir(dir)
except OSError:
pass
if not os.path.exists(filename):
with open(filename, "w") as f:
f.write("(dp1\n.")
del os
def checkos(self):
try:
if (os.uname()[0] == "Darwin"):
os = "Mac"
else:
os = "Linux"
except:
os = "Windows"
return os
|
TheArchives/Nexus
|
core/globals.py
|
Python
|
bsd-2-clause
| 18,259
|
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
from numba import njit, prange
import numpy as np
import argparse
import time
def kde(X):
b = 0.5
points = np.array([-1.0, 2.0, 5.0])
N = points.shape[0]
n = X.shape[0]
exps = 0
# "prange" in a normal function is identical to "range"
for i in prange(n):
p = X[i]
d = (-(p-points)**2)/(2*b**2)
m = np.min(d)
exps += m-np.log(b*N)+np.log(np.sum(np.exp(d-m)))
return exps
def main():
parser = argparse.ArgumentParser(description='Kernel-Density')
parser.add_argument('--size', dest='size', type=int, default=10000000)
parser.add_argument('--iterations', dest='iterations', type=int, default=20)
args = parser.parse_args()
size = args.size
iterations = args.iterations
np.random.seed(0)
kde(np.random.ranf(10))
print("size:", size)
X = np.random.ranf(size)
t1 = time.time()
for _ in range(iterations):
res = kde(X)
t = time.time()-t1
print("checksum:", res)
print("SELFTIMED:", t)
if __name__ == '__main__':
main()
|
jriehl/numba
|
examples/kernel-density-estimation/kernel_density_estimation.py
|
Python
|
bsd-2-clause
| 1,135
|
# -*- coding: utf-8 -*-
from django.db.models import Q
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.utils.timezone import now
from django.conf import settings
from django import template # import Template,Context
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.utils.deconstruct import deconstructible
from email import message_from_string
from celery.result import AsyncResult
from bs4 import BeautifulSoup as Soup
import traceback
import re
from utils import (
create_auto_secret,
create_auto_short_secret,
expire,
get_template_source,
)
import json
import logging
logger = logging.getLogger('paloma')
DEFAULT_RETURN_PATH_RE = r"bcmsg-(?P<message_id>\d+)@(?P<domain>.+)"
DEFAULT_RETURN_PATH_FORMAT = "bcmsg-%(message_id)s@%(domain)s"
RETURN_PATH_RE = r"^(?P<commnad>.+)-(?P<message_id>\d+)@(?P<domain>.+)"
RETURN_PATH_FORMAT = "%(command)s-%(message_id)s@%(domain)s"
def return_path_from_address(address):
return re.search(
DEFAULT_RETURN_PATH_RE,
address).groupdict()
def default_return_path(param):
return DEFAULT_RETURN_PATH_FORMAT % param
def read_return_path(address):
return re.search(
RETURN_PATH_RE,
address).groupdict()
def make_return_path(param):
return RETURN_PATH_FORMAT % param
def MDT(t=None):
return (t or now()).strftime('%m%d%H%M%S')
@deconstructible
class Domain(models.Model):
''' Domain
- virtual_transport_maps.cf
'''
domain = models.CharField(
_(u'Domain'),
unique=True, max_length=100, db_index=True, )
''' Domain
- key for virtual_transport_maps.cf
- key and return value for virtual_domains_maps.cf
'''
description = models.CharField(
_(u'Description'),
max_length=200, default='')
maxquota = models.BigIntegerField(null=True, blank=True, default=None)
quota = models.BigIntegerField(null=True, blank=True, default=None)
transport = models.CharField(max_length=765)
'''
- virtual_transport_maps.cf looks this for specified **domain**.
'''
backupmx = models.IntegerField(null=True, blank=True, default=None)
active = models.BooleanField(default=True)
class Meta:
verbose_name = _(u'Domain')
verbose_name_plural = _(u'Domains')
@deconstructible
class Alias(models.Model):
''' Alias
- local user - maildir
- remote user - alias
- for virtual_alias_maps.cf
'''
address = models.CharField(
_('Alias Address'), max_length=100)
'''
- key for virtual_alias_maps.cf
'''
alias = models.CharField(
_('Alias Forward'), max_length=100)
'''
- value for virtual_alias_maps.cf
'''
mailbox = models.CharField(
_(u'Mailbox'),
max_length=100, null=True, default=None, blank=True,
help_text=u'specify Maildir path if address is local user ')
'''
- for local usr
- value for virtual_alias_maps.cf
'''
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _('Alias')
verbose_name_plural = _('Alias')
unique_together = (('address', 'alias', ), )
##
class AbstractProfile(models.Model):
''' Profile meta class'''
def target_context(self, member):
""" override this to return context dict for template rendering """
raise NotImplementedError
@classmethod
def target(cls, obj, *args, **kwargs):
context = {}
subclasses = cls.__subclasses__()
for ref in obj._meta.get_all_related_objects():
if ref.model in subclasses:
try:
context.update(
getattr(obj, ref.var_name
).target_context(*args, **kwargs)
)
except Exception:
pass
return context
class Meta:
abstract = True
@deconstructible
class Site(models.Model):
''' Site
'''
name = models.CharField(
_(u'Owner Site Name'), help_text=_(u'Owner Site Name'),
max_length=100, db_index=True, unique=True)
''' Site Name '''
domain = models.CharField(
_(u'@Domain'), help_text=_(u'@Domain'),
max_length=100, default='localhost',
db_index=True, unique=True, null=False, blank=False, )
''' @Domain'''
url = models.CharField(
_(u'URL'), help_text=_(u'URL'),
max_length=150, db_index=True, unique=True, default="/",)
''' URL path '''
operators = models.ManyToManyField(
User, help_text=_('User'), verbose_name=_(u'Site Operators'))
''' Site Operators '''
class Meta:
verbose_name = _('Site')
verbose_name_plural = _('Site')
unique_together = (('name', 'domain'), )
@property
def authority_address(self):
return "{0}@{1}".format(self.name, self.domain)
@property
def default_circle(self):
try:
return self.circle_set.get(is_default=True,)
except:
#: if no, get default:
name = getattr(settings, 'PALOMA_NAME', 'all')
return self.circle_set.get_or_create(
site=self, name=name, symbol=name,)[0]
def __unicode__(self):
return self.domain
@classmethod
def app_site(cls):
name = getattr(settings, 'PALOMA_NAME', 'paloma')
domain = getattr(settings, 'PALOMA_DEFAULT_DOMAIN', 'example.com')
return Site.objects.get_or_create(name=name, domain=domain)[0]
# Mesage Tempalte
class TemplateManager(models.Manager):
def get_template(self, name, site=None):
site = site or Site.app_site()
ret, created = self.get_or_create(site=site, name=name)
if created or not ret.subject or not ret.text:
try:
path = 'paloma/mails/default_%s.html' % name.lower()
source = Soup(get_template_source(path))
ret.subject = source.select('subject')[0].text
ret.subject = ret.subject.replace('\n', '').replace('\r', '')
ret.text = source.select('text')[0].text
ret.save()
except Exception:
logger.debug(traceback.format_exc())
return ret
@deconstructible
class Template(models.Model):
''' Site Notice Text '''
site = models.ForeignKey(Site, verbose_name=_(u'Owner Site'))
''' Owner Site'''
name = models.CharField(
_(u'Template Name'),
max_length=200, db_index=True,)
''' Notice Name'''
subject = models.CharField(
_(u'Template Subject'),
max_length=100, default='',)
''' Subject '''
text = models.TextField(
_(u'Template Text'), default='',)
''' Text '''
objects = TemplateManager()
@classmethod
def get_default_template(cls, name='DEFAULT_TEMPLATE', site=None):
site = site or Site.app_site()
return Template.objects.get_or_create(site=site, name=name,)[0]
def render(self, *args, **kwargs):
'''
:param kwargs: Context dictionary
'''
return tuple([template.Template(t).render(template.Context(kwargs))
for t in [self.subject, self.text]])
def __unicode__(self):
return self.name
class Meta:
unique_together = (('site', 'name'),)
verbose_name = _(u'Template')
verbose_name_plural = _(u'Templates')
@deconstructible
class Targetting(models.Model):
''' '''
site = models.ForeignKey(Site, verbose_name=_(u'Owner Site'))
''' Owner Site'''
targetter_content_type = models.ForeignKey(
ContentType,
related_name="targetter")
''' targetter model class'''
targetter_object_id = models.PositiveIntegerField()
''' tragetter object id '''
targetter = generic.GenericForeignKey(
'targetter_content_type',
'targetter_object_id')
''' targgetter instance '''
mediator_content_type = models.ForeignKey(
ContentType,
related_name="mediator")
''' mediator model class'''
mediator_object_id = models.PositiveIntegerField()
''' mediator object id '''
mediator = generic.GenericForeignKey('mediator_content_type',
'mediator_object_id')
''' mediator instance '''
def __unicode__(self):
return self.targetter.__unicode__()
class CircleManager(models.Manager):
def find_for_domain(self, domain, symbol=None):
q = {'site__domain': domain}
if symbol is None or symbol == '':
q['is_default'] = True
else:
q['symbol'] = symbol
return self.get(**q)
def accessible_list(self, user):
return self.filter(
Q(membership__member__user=user) | Q(is_secret=False)
).distinct()
def of_user(self, user):
return self.filter(membership__member__user=user)
def of_user_exclusive(self, user):
return self.exclude(membership__member__user=user)
def of_admin(self, user):
return self.filter(membership__member__user=user,
membership__is_admin=True)
@deconstructible
class Circle(models.Model):
''' Circle
'''
site = models.ForeignKey(
Site, verbose_name=_(u'Owner Site'))
''' Owner Site'''
name = models.CharField(
_(u'Circle Name'), max_length=100, db_index=True)
''' Circle Name '''
description = models.TextField(
_(u'Circle Description'), null=True, default=None, blank=True)
symbol = models.CharField(
_(u'Circle Symbol'), max_length=100, db_index=True,
help_text=_(u'Circle Symbol Help Text'), )
''' Symbol '''
is_default = models.BooleanField(
_(u'Is Default Circle'), default=False,
help_text=_('Is Default Circle Help'),)
''' Site's Default Circle or not '''
is_moderated = models.BooleanField(
_(u'Is Moderated Circle'),
default=True, help_text=_('Is Moderated Circle Help'), )
''' True: Only operators(Membership.is_admin True)
can circulate their message.'''
is_secret = models.BooleanField(
_(u'Is Secret Circle'),
default=False, help_text=_('Is Secret Circle Help'), )
''' True: only membership users know its existence '''
objects = CircleManager()
def __unicode__(self):
return "%s of %s" % (self.name, self.site.__unicode__())
@property
def main_address(self):
return "%s@%s" % (self.symbol, self.site.domain)
@property
def domain(self):
return self.site.domain
def save(self, **kwargs):
if self.is_default:
self.site.circle_set.update(is_default=False)
else:
query = () if self.id is None else (~Q(id=self.id), )
if self.site.circle_set.filter(
is_default=True, *query).count() < 1:
self.is_default = True
super(Circle, self).save(**kwargs)
def is_admin_user(self, user):
return user.is_superuser or self.membership_set.filter(
member__user=user, is_admin=True).exists()
def is_admin(self, user):
return user.is_superuser or self.membership_set.filter(
member__user=user, is_admin=True).exists()
def is_operator(self, user):
return user.is_superuser or self.membership_set.filter(
member__user=user, is_admin=True).exists()
def is_member(self, user):
return self.membership_set.filter(
member__user=user, is_admitted=True).exists()
@property
def memberships(self):
return self.membership_set.all()
def membership_for_user(self, user):
try:
return self.membership_set.get(member__user=user)
except:
return None
@property
def memberships_unadmitted(self):
return self.membership_set.filter(is_admitted=False)
def are_member(self, users):
''' all users are member of this Circle '''
return all(
map(lambda u: self.membership_set.filter(member__user=u).exists(),
users))
pass
def any_admin(self):
try:
admin_list = self.membership_set.filter(is_admin=True)
if admin_list.count() > 0:
return admin_list[0]
return User.objects.filter(is_superuser=True)[0]
except Exception:
logger.debug(traceback.format_exc())
return None
def add_member(self, member, is_admin=False, is_admitted=False):
membership, created = Membership.objects.get_or_create(circle=self,
member=member)
membership.is_admin = is_admin
membership.is_admitted = is_admitted
membership.save()
return membership
class Meta:
unique_together = (
('site', 'name'),
('site', 'symbol'),)
verbose_name = _(u'Circle')
verbose_name_plural = _(u'Circles')
@deconstructible
class Member(models.Model):
''' Member
- a system user can have multiple personality
'''
user = models.ForeignKey(User, verbose_name=_(u'System User'))
''' System User '''
address = models.CharField(_(u'Forward address'),
max_length=100, unique=True)
''' Email Address
'''
is_active = models.BooleanField(_(u'Actaive status'), default=False)
''' Active Status '''
bounces = models.IntegerField(_(u'Bounce counts'), default=0)
''' Bounce count'''
circles = models.ManyToManyField(Circle,
through='Membership',
verbose_name=_(u'Opt-in Circle'))
''' Opt-In Circles'''
def __unicode__(self):
return "%s(%s)" % (
self.user.__unicode__() if self.user else "unbound user",
self.address if self.address else "not registered",
)
def reset_password(self, active=False):
''' reset password '''
newpass = User.objects.make_random_password()
self.user.set_password(newpass)
self.user.is_active = active
self.user.save()
return newpass
def get_absolute_url(self):
''' Django API '''
return None
# return self.user.get_absolute_url() if self.user else None
class Meta:
verbose_name = _(u'Member')
verbose_name_plural = _(u'Members')
@deconstructible
class Membership(models.Model):
member = models.ForeignKey(Member, verbose_name=_(u'Member'))
''' Member ( :ref:`paloma.models.Member` ) '''
circle = models.ForeignKey(Circle, verbose_name=_(u'Circle'))
''' Circle ( :ref:`paloma.models.Circle` )'''
is_admin = models.BooleanField(_(u'Is Circle Admin'), default=False)
is_admitted = models.BooleanField(
_(u'Is Membership Admitted'),
default=False,
help_text=_(u'Is Membership Admitted Help'))
''' Member must be admitted by a Circle Admin to has a Membership '''
def is_member_active(self):
return self.member.is_active
is_member_active.short_description = _(u"Is Member Active")
def is_user_active(self):
return self.member.user.is_active
is_user_active.short_description = _(u"Is User Active")
def user(self):
return self.member.user
def __unicode__(self):
return "%s -> %s(%s)" % (
self.member.__unicode__() if self.member else "N/A",
self.circle.__unicode__() if self.circle else "N/A",
_(u"Circle Admin") if self.is_admin else _(u"General Member"),)
def get_absolute_url(self):
''' Django API '''
# if self.member and self.member.user:
# return self.member.user.get_absolute_url()
return None
class Meta:
unique_together = (('member', 'circle', ), )
verbose_name = _(u'Membership')
verbose_name_plural = _(u'Memberships')
PUBLISH_STATUS = (
('pending', _('Pending')),
('scheduled', _('Scheduled')),
('active', _('Active')),
('finished', _('Finished')),
('canceled', _('Canceled')),)
@deconstructible
class Publish(models.Model):
''' Message Delivery Publish'''
site = models.ForeignKey(Site, verbose_name=_(u'Site'),)
''' Site '''
publisher = models.ForeignKey(User, verbose_name=_(u'Publisher'))
''' publisher '''
messages = models.ManyToManyField('Message',
through="Publication",
verbose_name=_("Messages"),
related_name="message_set",)
subject = models.CharField(_(u'Subject'), max_length=101,)
''' Subject '''
text = models.TextField(_(u'Text'), )
''' Text '''
circles = models.ManyToManyField(Circle, verbose_name=_(u'Target Circles'))
''' Circle'''
task_id = models.CharField(_(u'Task ID'),
max_length=40, default=None,
null=True, blank=True,)
''' Task ID '''
status = models.CharField(_(u"Publish Status"), max_length=24,
db_index=True,
help_text=_('Publish Status Help'),
default="pending", choices=PUBLISH_STATUS)
dt_start = models.DateTimeField(
_(u'Time to Send'),
help_text=_(u'Time to Send Help'),
null=True, blank=True, default=now)
''' Stat datetime to send'''
activated_at = models.DateTimeField(
_(u'Task Activated Time'),
help_text=_(u'Task Activated Time Help'),
null=True, blank=True, default=None)
''' Task Activated Time '''
forward_to = models.CharField(
_(u'Forward address'),
max_length=100, default=None, null=True, blank=True)
''' Forward address for incomming email '''
targettings = generic.GenericRelation(
Targetting,
verbose_name=_('Optional Targetting'),
object_id_field="mediator_object_id",
content_type_field="mediator_content_type")
def __unicode__(self):
if self.dt_start:
return self.subject + self.dt_start.strftime(
"(%Y-%m-%d %H:%M:%S) by " + self.publisher.__unicode__())
else:
return self.subject + "(now)"
def get_context(self, circle, user):
context = {}
for ref in self._meta.get_all_related_objects():
if ref.model in AbstractProfile.__subclasses__():
try:
context.update(
getattr(self,
ref.var_name).target_context(circle, user)
)
except Exception:
pass
return context
def target_members_for_user(self, user):
return Member.objects.filter(
membership__circle__in=self.circles.all(),
user=user)
@property
def is_timeup(self):
return self.dt_start is None or self.dt_start <= now()
@property
def task(self):
try:
return AsyncResult(self.task_id)
except:
return None
class Meta:
verbose_name = _(u'Publish')
verbose_name_plural = _(u'Publish')
####
class JournalManager(models.Manager):
''' Message Manager'''
def handle_incomming_mail(self, sender, is_jailed, recipient, mssage):
'''
:param mesage: :py:class:`email.Message`
'''
pass
@deconstructible
class Journal(models.Model):
''' Raw Message
'''
dt_created = models.DateTimeField(
_(u'Journaled Datetime'),
help_text=_(u'Journaled datetime'), auto_now_add=True)
''' Journaled Datetime '''
sender = models.CharField(u'Sender', max_length=100)
''' sender '''
recipient = models.CharField(u'Receipient', max_length=100)
''' recipient '''
text = models.TextField(
_(u'Message Text'), default=None, blank=True, null=True)
''' Message text '''
is_jailed = models.BooleanField(_(u'Jailed Message'), default=False)
''' Jailed(Reciepient missing emails have been journaled) if true '''
def mailobject(self):
''' return mail object
:rtype: email.message.Message
'''
# print ">>>>> type", type(self.text)
return message_from_string(self.text.encode('utf8'))
class Meta:
verbose_name = _(u'Journal')
verbose_name_plural = _(u'Journals')
def forwards(self):
return Alias.objects.filter(address=self.recipient)
def forward_from(self):
return "jfwd-{0}@{1}".format(
self.id,
self.recipient.split('@')[1],
)
try:
from rsyslog import Systemevents, Systemeventsproperties
Systemevents()
Systemeventsproperties()
except:
pass
@deconstructible
class MessageManager(models.Manager):
def create_from_template(self,
member_or_recepient,
template_name,
params={},
message_id=None,
circle=None):
''' Create Message from specified Template '''
template_name = template_name.lower()
msg = {
'circle': circle,
}
member = None
recipient = None
if type(member_or_recepient) == Member:
member = member_or_recepient
elif type(member_or_recepient) == Membership:
member = member_or_recepient.member
circle = member_or_recepient.circle
else: # str
recipient = member_or_recepient
site = msg['circle'].site if msg['circle'] else Site.app_site()
# load tempalte from storage
template = Template.objects.get_template(site=site,
name=template_name)
message_id = message_id or \
"msg-%s-%s@%s" % (template_name, MDT(), site.domain)
# create
try:
mail, created = self.get_or_create(mail_message_id=message_id)
mail.cirlce = circle
mail.template = template
mail.member = member
mail.recipient = recipient
mail.render(**params)
mail.save()
return mail
except Exception, e:
for err in traceback.format_exc().split('\n'):
logger.debug('send_template_mail:error:%s:%s' % (str(e), err))
return None
@deconstructible
class Message(models.Model):
''' Message '''
mail_message_id = models.CharField(u'Message ID', max_length=100,
db_index=True, unique=True)
''' Mesage-ID header - 'Message-ID: <local-part "@" domain>' '''
template = models.ForeignKey(Template, verbose_name=u'Template',
null=True, on_delete=models.SET_NULL)
''' Message Template '''
member = models.ForeignKey(Member, verbose_name=u'Member',
null=True, default=None, blank=True,
on_delete=models.SET_NULL)
''' Recipient Member (member.circle is Sender)'''
circle = models.ForeignKey(Circle, verbose_name=u'Circle',
null=True, default=None, blank=True,
on_delete=models.SET_NULL)
''' Target Circle ( if None, Site's default circle is used.)'''
recipient = models.EmailField(u'recipient', max_length=50,
default=None, blank=True, null=True)
''' Recipient (for non-Member )'''
subject = models.TextField(u'Message Subject', default=None,
blank=True, null=True)
''' Message Subject '''
text = models.TextField(_(u'Message Text'), default=None,
blank=True, null=True)
''' Message text '''
status = models.CharField(u'Status', max_length=50,
default=None, blank=True, null=True)
''' SMTP Status '''
task_id = models.CharField(u'Task ID', max_length=40,
default=None, null=True, blank=True, )
''' Task ID '''
checked = models.BooleanField(_(u'Mail Checked'), default=False, )
created = models.DateTimeField(_(u'Created'), auto_now_add=True)
updated = models.DateTimeField(_(u'Updated'), auto_now=True)
smtped = models.DateTimeField(_(u'SMTP Time'),
default=None, blank=True, null=True)
parameters = models.TextField(blank=True, null=True, )
''' extra parameters '''
_context_cache = None
''' Base Text'''
objects = MessageManager()
def __init__(self, *args, **kwargs):
super(Message, self).__init__(*args, **kwargs)
if self.template is None:
self.template = Template.get_default_template()
def __unicode__(self):
try:
return self.template.__unicode__() + str(self.recipients)
except:
return unicode(self.id)
@property
def task(self):
try:
return AsyncResult(self.task_id)
except:
return None
@property
def recipients(self): # plural!!!!
return [self.recipient] if self.recipient else [self.member.address]
def context(self, **kwargs):
''' "text" and "subject" are rendered with this context
- member : paloma.models.Member
- template : paloma.models.Template
- kwargs : extra parameters
- [any] : JSON serialized dict save in "parameters"
'''
ret = {"member": self.member, "template": self.template, }
ret.update(kwargs)
try:
ret.update(json.loads(self.parameters))
except:
pass
return ret
def render(self, do_save=True, **kwargs):
''' render for member in circle'''
if self.template:
self.text = template.Template(
self.template.text
).render(template.Context(self.context(**kwargs)))
self.subject = template.Template(
self.template.subject
).render(template.Context(self.context(**kwargs)))
if do_save:
self.save()
@property
def from_address(self):
circle = self.circle or self.template.site.default_circle
return circle.main_address
@property
def return_path(self):
''' default return path '''
return make_return_path({"command": "msg", "message_id": self.id,
"domain": self.template.site.domain})
def set_status(self, status=None, smtped=None, do_save=True):
self.smtped = smtped
self.status = status
if do_save:
self.save()
@classmethod
def update_status(cls, msg, **kwargs):
for m in cls.objects.filter(
mail_message_id=kwargs.get('message_id', '')):
m.set_status(msg, now())
class Meta:
verbose_name = _(u'Message')
verbose_name_plural = _(u'Messages')
@deconstructible
class Provision(models.Model):
''' Account Provision management
'''
member = models.OneToOneField(Member, verbose_name=_(u'Member'),
on_delete=models.SET_NULL,
null=True, default=None, blank=True)
''' Member'''
status = models.CharField(_(u"Provision Status"),
max_length=24, db_index=True,)
''' Provisioning Status'''
circle = models.ForeignKey(Circle, verbose_name=_(u'Circle'),
null=True, default=None, blank=True,
on_delete=models.SET_NULL)
''' Circle'''
inviter = models.ForeignKey(User, verbose_name=_(u'Inviter'),
null=True, default=None, blank=True,
on_delete=models.SET_NULL)
''' Inviter'''
prospect = models.CharField(_(u'Provision Prospect'),
max_length=100, default=None,
null=True, blank=True)
''' Prospect Email Address'''
secret = models.CharField(
_(u'Provision Secret'),
max_length=100,
default='',
unique=True)
''' Secret
'''
short_secret = models.CharField(
_(u'Provision Short Secret'),
max_length=10, unique=True,
default='')
''' Short Secret
'''
url = models.CharField(_(u'URL for Notice'),
max_length=200, default=None, null=True, blank=True)
''' URL for notice '''
dt_expire = models.DateTimeField(
_(u'Provision Secret Expired'),
null=True, blank=True,
default=None,
help_text=u'Secrete Expired', )
''' Secrete Expired'''
dt_try = models.DateTimeField(_(u'Provision Try Datetime'),
null=True, blank=True, default=None,
help_text=u'Try Datetime', )
''' Try Datetime'''
dt_commit = models.DateTimeField(_(u'Commit Datetime'),
null=True, blank=True, default=None,
help_text=u'Commit Datetime', )
''' Commit Datetime'''
def __init__(self, *args, **kwargs):
super(Provision, self).__init__(*args, **kwargs)
self.dt_expire = self.dt_expire or expire()
self.secret = self.secret or create_auto_secret()
self.short_secret = self.short_secret or create_auto_short_secret()
def is_open(self, dt_now=None):
''' check if this is open status or not
'''
dt_now = dt_now if dt_now else now()
return (self.dt_commit is None) and \
(self.dt_expire > dt_now) and \
(self.mailbox is not None) and \
(self.group is not None)
def close(self):
''' close this enroll management
'''
self.dt_commit = now()
self.save()
def provided(self, user, address, is_active=True):
self.member = Member.objects.get_or_create(
user=user, address=address)[0]
self.member.is_active = is_active
self.member.save()
if self.circle:
membership, created = Membership.objects.get_or_create(
circle=self.circle, member=self.member)
membership.is_admitted = is_active
membership.save()
self.dt_commit = now()
self.save()
return membership
def reset(self, save=False):
self.secret = create_auto_secret()
self.short_secret = create_auto_short_secret()
self.dt_commit = None
self.dt_expire = expire()
if save:
self.save()
def send_response(self):
''' send response mail
'''
from paloma.tasks import send_templated_message
mail_message_id = u"%s-up-%d@%s" % (self.circle.symbol,
self.id,
self.circle.site.domain)
name = "provision_%s" % self.status
recipient = self.member and self.member.address or self.prospect
send_templated_message(
recipient,
name,
params={'provision': self},
message_id=mail_message_id,
)
logger.debug(_('Provision %(provision)s is sent for %(to)s') % {
"provision": name,
"to": str(recipient)})
class Meta:
verbose_name = _('Provision')
verbose_name_plural = _('Provisions')
class PublicationManager(models.Manager):
def publish(self, publish, circle, member, signature='pub'):
assert all([publish, circle, member])
msgid = "<%s-%d-%d-%d@%s>" % (
signature, publish.id,
circle.id, member.id, circle.domain)
ret, created = self.get_or_create(
publish=publish,
message=Message.objects.get_or_create(
mail_message_id=msgid,
template=Template.get_default_template('PUBLICATION'),
circle=circle,
member=member,)[0] # (object,created )
)
ret.render()
ret.save()
return ret
@deconstructible
class Publication(models.Model):
''' Each Published Item
'''
publish = models.ForeignKey(Publish, verbose_name=_(u'Publish'))
''' Mail Schedule'''
message = models.ForeignKey(Message, verbose_name=_(u'Mail Message'))
''' Message '''
objects = PublicationManager()
def context(self, **kwargs):
ret = self.message.context(**kwargs)
ret['publish'] = self.publish
#: Circle & Member Targetting
ret.update(
AbstractProfile.target(self.message.circle, self.message.member)
)
#:AdHoc Targetting
for t in self.publish.targettings.all():
try:
ret.update(t.target(self))
except:
pass
return ret
def render(self, **kwargs):
''' render for member in circle'''
self.message.text = template.Template(
self.publish.text
).render(template.Context(self.context(**kwargs)))
self.message.subject = template.Template(
self.publish.subject
).render(template.Context(self.context(**kwargs)))
self.message.save()
|
hdknr/paloma
|
src/paloma/models.py
|
Python
|
bsd-2-clause
| 33,915
|
# -*- coding: UTF-8 -*-
# Copyright 2011-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
from builtins import str
import six
import datetime
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.core.validators import MaxValueValidator
from django.core.validators import MinValueValidator
from django.core.exceptions import ValidationError
from django.utils import timezone
from lino import mixins
from lino.api import dd, rt, _, pgettext
from .choicelists import (
DurationUnits, Recurrencies, Weekdays, AccessClasses, PlannerColumns)
from .utils import setkw, dt2kw, when_text
from lino.modlib.checkdata.choicelists import Checker
from lino.modlib.printing.mixins import TypedPrintable
from lino.modlib.printing.mixins import Printable
from lino.modlib.users.mixins import UserAuthored, Assignable
from lino_xl.lib.postings.mixins import Postable
from lino_xl.lib.outbox.mixins import MailableType, Mailable
from lino_xl.lib.contacts.mixins import ContactRelated
from lino.modlib.office.roles import OfficeStaff
from .workflows import (TaskStates, EntryStates, GuestStates)
from .actions import UpdateGuests
from .mixins import Component
from .mixins import EventGenerator, RecurrenceSet, Reservation
from .mixins import Ended
from .mixins import MoveEntryNext, UpdateEntries, UpdateEntriesByEvent
from .actions import ShowEntriesByDay
from .ui import ConflictingEvents
DEMO_START_YEAR = 2013
class CalendarType(object):
def validate_calendar(self, cal):
pass
class LocalCalendar(CalendarType):
label = "Local Calendar"
class GoogleCalendar(CalendarType):
label = "Google Calendar"
def validate_calendar(self, cal):
if not cal.url_template:
cal.url_template = \
"https://%(username)s:%(password)s@www.google.com/calendar/dav/%(username)s/"
CALENDAR_CHOICES = []
CALENDAR_DICT = {}
def register_calendartype(name, instance):
CALENDAR_DICT[name] = instance
CALENDAR_CHOICES.append((name, instance.label))
register_calendartype('local', LocalCalendar())
register_calendartype('google', GoogleCalendar())
class DailyPlannerRow(mixins.BabelDesignated, mixins.Sequenced):
class Meta:
app_label = 'cal'
abstract = dd.is_abstract_model(__name__, 'PlannerRow')
verbose_name = _("Planner row")
verbose_name_plural = _("Planner rows")
ordering = ['seqno']
start_time = models.TimeField(
blank=True, null=True,
verbose_name=_("Start time"))
end_time = models.TimeField(
blank=True, null=True,
verbose_name=_("End time"))
from lino.mixins.periods import ObservedDateRange
from etgen.html import E
from lino.utils import join_elems
class DailyPlannerRows(dd.Table):
model = 'cal.DailyPlannerRow'
column_names = "seqno designation start_time end_time"
required_roles = dd.login_required(OfficeStaff)
class DailyPlanner(DailyPlannerRows):
label = _("Daily planner")
editable = False
parameters = dict(
date=models.DateField(
_("Date"), help_text=_("Date to show")),
user=dd.ForeignKey('users.User', null=True, blank=True))
@classmethod
def param_defaults(cls, ar, **kw):
kw = super(DailyPlanner, cls).param_defaults(ar, **kw)
kw.update(date=dd.today())
# kw.update(end_date=dd.today())
# kw.update(user=ar.get_user())
return kw
@classmethod
def setup_columns(self):
names = ''
for i, vf in enumerate(self.get_ventilated_columns()):
self.add_virtual_field('vc' + str(i), vf)
names += ' ' + vf.name + ':20'
self.column_names = "overview {}".format(names)
#~ logger.info("20131114 setup_columns() --> %s",self.column_names)
@classmethod
def get_ventilated_columns(cls):
Event = rt.models.cal.Event
def fmt(e):
t = str(e.start_time)[:5]
u = e.user
if u is None:
return "{} {}".format(
t, e.room)
return t
u = u.initials or u.username or str(u)
return "{} {}".format(t, u)
def w(pc, verbose_name):
def func(fld, obj, ar):
# obj is the DailyPlannerRow instance
pv = ar.param_values
qs = Event.objects.filter(event_type__planner_column=pc)
if pv.user:
qs = qs.filter(user=pv.user)
if pv.date:
qs = qs.filter(start_date=pv.date)
if obj.start_time:
qs = qs.filter(start_time__gte=obj.start_time,
start_time__isnull=False)
if obj.end_time:
qs = qs.filter(start_time__lt=obj.end_time,
start_time__isnull=False)
if not obj.start_time and not obj.end_time:
qs = qs.filter(start_time__isnull=True)
qs = qs.order_by('start_time')
chunks = [e.obj2href(ar, fmt(e)) for e in qs]
return E.p(*join_elems(chunks))
return dd.VirtualField(dd.HtmlBox(verbose_name), func)
for pc in PlannerColumns.objects():
yield w(pc, pc.text)
class RemoteCalendar(mixins.Sequenced):
class Meta:
app_label = 'cal'
abstract = dd.is_abstract_model(__name__, 'RemoteCalendar')
verbose_name = _("Remote Calendar")
verbose_name_plural = _("Remote Calendars")
ordering = ['seqno']
type = models.CharField(_("Type"), max_length=20,
default='local',
choices=CALENDAR_CHOICES)
url_template = models.CharField(_("URL template"),
max_length=200, blank=True) # ,null=True)
username = models.CharField(_("Username"),
max_length=200, blank=True) # ,null=True)
password = dd.PasswordField(_("Password"),
max_length=200, blank=True) # ,null=True)
readonly = models.BooleanField(_("read-only"), default=False)
def get_url(self):
if self.url_template:
return self.url_template % dict(
username=self.username,
password=self.password)
return ''
def save(self, *args, **kw):
ct = CALENDAR_DICT.get(self.type)
ct.validate_calendar(self)
super(RemoteCalendar, self).save(*args, **kw)
class Room(mixins.BabelNamed, ContactRelated):
class Meta:
app_label = 'cal'
abstract = dd.is_abstract_model(__name__, 'Room')
verbose_name = _("Room")
verbose_name_plural = _("Rooms")
description = dd.RichTextField(_("Description"), blank=True)
dd.update_field(
Room, 'company', verbose_name=_("Responsible"))
dd.update_field(
Room, 'contact_person', verbose_name=_("Contact person"))
class Priority(mixins.BabelNamed):
class Meta:
app_label = 'cal'
verbose_name = _("Priority")
verbose_name_plural = _('Priorities')
ref = models.CharField(max_length=1)
@dd.python_2_unicode_compatible
class EventType(mixins.BabelNamed, mixins.Sequenced, MailableType):
templates_group = 'cal/Event'
class Meta:
app_label = 'cal'
abstract = dd.is_abstract_model(__name__, 'EventType')
verbose_name = _("Calendar entry type")
verbose_name_plural = _("Calendar entry types")
ordering = ['seqno']
description = dd.RichTextField(
_("Description"), blank=True, format='html')
is_appointment = models.BooleanField(_("Appointment"), default=True)
all_rooms = models.BooleanField(_("Locks all rooms"), default=False)
locks_user = models.BooleanField(_("Locks the user"), default=False)
start_date = models.DateField(
verbose_name=_("Start date"),
blank=True, null=True)
event_label = dd.BabelCharField(
_("Entry label"), max_length=200, blank=True)
# , default=_("Calendar entry"))
# default values for a Babelfield don't work as expected
max_conflicting = models.PositiveIntegerField(
_("Simultaneous entries"), default=1)
max_days = models.PositiveIntegerField(
_("Maximum days"), default=1)
transparent = models.BooleanField(_("Transparent"), default=False)
planner_column = PlannerColumns.field(blank=True)
def __str__(self):
# when selecting an Event.event_type it is more natural to
# have the event_label. It seems that the current `name` field
# is actually never used.
return settings.SITE.babelattr(self, 'event_label') \
or settings.SITE.babelattr(self, 'name')
class GuestRole(mixins.BabelNamed):
templates_group = 'cal/Guest'
class Meta:
app_label = 'cal'
verbose_name = _("Guest Role")
verbose_name_plural = _("Guest Roles")
def default_color():
d = Calendar.objects.all().aggregate(models.Max('color'))
n = d['color__max'] or 0
return n + 1
class Calendar(mixins.BabelNamed):
COLOR_CHOICES = [i + 1 for i in range(32)]
class Meta:
app_label = 'cal'
abstract = dd.is_abstract_model(__name__, 'Calendar')
verbose_name = _("Calendar")
verbose_name_plural = _("Calendars")
description = dd.RichTextField(_("Description"), blank=True, format='html')
color = models.IntegerField(
_("color"), default=default_color,
validators=[MinValueValidator(1), MaxValueValidator(32)]
)
# choices=COLOR_CHOICES)
class Subscription(UserAuthored):
class Meta:
app_label = 'cal'
abstract = dd.is_abstract_model(__name__, 'Subscription')
verbose_name = _("Subscription")
verbose_name_plural = _("Subscriptions")
unique_together = ['user', 'calendar']
manager_roles_required = dd.login_required(OfficeStaff)
calendar = dd.ForeignKey(
'cal.Calendar', help_text=_("The calendar you want to subscribe to."))
is_hidden = models.BooleanField(
_("hidden"), default=False,
help_text=_("""Whether this subscription should "
"initially be displayed as a hidden calendar."""))
class Task(Component):
class Meta:
app_label = 'cal'
verbose_name = _("Task")
verbose_name_plural = _("Tasks")
abstract = dd.is_abstract_model(__name__, 'Task')
due_date = models.DateField(
blank=True, null=True,
verbose_name=_("Due date"))
due_time = models.TimeField(
blank=True, null=True,
verbose_name=_("Due time"))
# ~ done = models.BooleanField(_("Done"),default=False) # iCal:COMPLETED
# iCal:PERCENT
percent = models.IntegerField(_("Duration value"), null=True, blank=True)
state = TaskStates.field(
default=TaskStates.as_callable('todo')) # iCal:STATUS
# def before_ui_save(self, ar, **kw):
# if self.state == TaskStates.todo:
# self.state = TaskStates.started
# return super(Task, self).before_ui_save(ar, **kw)
# def on_user_change(self,request):
# if not self.state:
# self.state = TaskState.todo
# self.user_modified = True
def is_user_modified(self):
return self.state != TaskStates.todo
@classmethod
def on_analyze(cls, lino):
# lino.TASK_AUTO_FIELDS = dd.fields_list(cls,
cls.DISABLED_AUTO_FIELDS = dd.fields_list(
cls, """start_date start_time summary""")
super(Task, cls).on_analyze(lino)
# def __unicode__(self):
# ~ return "#" + str(self.pk)
class EventPolicy(mixins.BabelNamed, RecurrenceSet):
class Meta:
app_label = 'cal'
verbose_name = _("Recurrency policy")
verbose_name_plural = _('Recurrency policies')
abstract = dd.is_abstract_model(__name__, 'EventPolicy')
event_type = dd.ForeignKey(
'cal.EventType', null=True, blank=True)
class RecurrentEvent(mixins.BabelNamed, RecurrenceSet, EventGenerator,
UserAuthored):
class Meta:
app_label = 'cal'
verbose_name = _("Recurring event")
verbose_name_plural = _("Recurring events")
abstract = dd.is_abstract_model(__name__, 'RecurrentEvent')
event_type = dd.ForeignKey('cal.EventType', blank=True, null=True)
description = dd.RichTextField(
_("Description"), blank=True, format='html')
# def on_create(self,ar):
# super(RecurrentEvent,self).on_create(ar)
# self.event_type = settings.SITE.site_config.holiday_event_type
# def __unicode__(self):
# return self.summary
def update_cal_rset(self):
return self
def update_cal_from(self, ar):
return self.start_date
def update_cal_event_type(self):
return self.event_type
def update_cal_summary(self, et, i):
return six.text_type(self)
def care_about_conflicts(self, we):
return False
dd.update_field(
RecurrentEvent, 'every_unit',
default=Recurrencies.as_callable('yearly'), blank=False, null=False)
class ExtAllDayField(dd.VirtualField):
"""
An editable virtual field needed for
communication with the Ext.ensible CalendarPanel
because we consider the "all day" checkbox
equivalent to "empty start and end time fields".
"""
editable = True
def __init__(self, *args, **kw):
dd.VirtualField.__init__(self, models.BooleanField(*args, **kw), None)
def set_value_in_object(self, request, obj, value):
if value:
obj.end_time = None
obj.start_time = None
else:
if not obj.start_time:
obj.start_time = datetime.time(9, 0, 0)
if not obj.end_time:
pass
# obj.end_time = datetime.time(10, 0, 0)
# obj.save()
def value_from_object(self, obj, ar):
# logger.info("20120118 value_from_object() %s",dd.obj2str(obj))
return (obj.start_time is None)
@dd.python_2_unicode_compatible
class Event(Component, Ended, Assignable, TypedPrintable, Mailable, Postable):
class Meta:
app_label = 'cal'
abstract = dd.is_abstract_model(__name__, 'Event')
# abstract = True
verbose_name = _("Calendar entry")
verbose_name_plural = _("Calendar entries")
# verbose_name = pgettext("cal", "Event")
# verbose_name_plural = pgettext("cal", "Events")
update_guests = UpdateGuests()
update_events = UpdateEntriesByEvent()
show_today = ShowEntriesByDay('start_date')
event_type = dd.ForeignKey('cal.EventType', blank=True, null=True)
transparent = models.BooleanField(_("Transparent"), default=False)
room = dd.ForeignKey('cal.Room', null=True, blank=True)
priority = dd.ForeignKey(Priority, null=True, blank=True)
state = EntryStates.field(
default=EntryStates.as_callable('suggested'))
all_day = ExtAllDayField(_("all day"))
move_next = MoveEntryNext()
show_conflicting = dd.ShowSlaveTable(ConflictingEvents)
def strftime(self):
if not self.start_date:
return ''
d = self.start_date.strftime(settings.SITE.date_format_strftime)
if self.start_time:
t = self.start_time.strftime(
settings.SITE.time_format_strftime)
return "%s %s" % (d, t)
else:
return d
def __str__(self):
if self.summary:
s = self.summary
elif self.event_type:
s = str(self.event_type)
elif self.pk:
s = self._meta.verbose_name + " #" + str(self.pk)
else:
s = _("Unsaved %s") % self._meta.verbose_name
when = self.strftime()
if when:
s = "{} ({})".format(s, when)
return s
def duration_veto(obj):
if obj.end_date is None:
return
et = obj.event_type
if et is None:
return
duration = obj.end_date - obj.start_date
# print (20161222, duration.days, et.max_days)
if duration.days > et.max_days:
return _(
"Event lasts {0} days but only {1} are allowed.").format(
duration.days, et.max_days)
def full_clean(self, *args, **kw):
et = self.event_type
if et and et.max_days == 1:
# avoid "Abandoning with 297 unsaved instances"
self.end_date = None
msg = self.duration_veto()
if msg is not None:
raise ValidationError(str(msg))
super(Event, self).full_clean(*args, **kw)
def get_change_observers(self):
# implements ChangeNotifier
if not self.is_user_modified():
return
for x in super(Event, self).get_change_observers():
yield x
for u in (self.user, self.assigned_to):
if u is not None:
yield (u, u.mail_mode)
def has_conflicting_events(self):
qs = self.get_conflicting_events()
if qs is None:
return False
if self.event_type is not None:
if self.event_type.transparent:
return False
# holidays (all room events) conflict also with events
# whose type otherwise would allow conflicting events
if qs.filter(event_type__all_rooms=True).count() > 0:
return True
n = self.event_type.max_conflicting - 1
else:
n = 0
# date = self.start_date
# if date.day == 9 and date.month == 3:
# dd.logger.info("20171130 has_conflicting_events() %s", qs.query)
return qs.count() > n
def get_conflicting_events(self):
if self.transparent:
return
# if self.event_type is not None and self.event_type.transparent:
# return
# return False
# Event = dd.resolve_model('cal.Event')
# ot = ContentType.objects.get_for_model(RecurrentEvent)
qs = self.__class__.objects.filter(transparent=False)
qs = qs.exclude(event_type__transparent=True)
# if self.state.transparent:
# # cancelled entries are basically transparent to all
# # others. Except if they have an owner, in which case we
# # wouldn't want Lino to put another automatic entry at
# # that date.
# if self.owner_id is None:
# return
# qs = qs.filter(
# owner_id=self.owner_id, owner_type=self.owner_type)
end_date = self.end_date or self.start_date
flt = Q(start_date=self.start_date, end_date__isnull=True)
flt |= Q(end_date__isnull=False,
start_date__lte=self.start_date, end_date__gte=end_date)
if end_date == self.start_date:
if self.start_time and self.end_time:
# the other starts before me and ends after i started
c1 = Q(start_time__lte=self.start_time,
end_time__gt=self.start_time)
# the other ends after me and started before i ended
c2 = Q(end_time__gte=self.end_time,
start_time__lt=self.end_time)
# the other is full day
c3 = Q(end_time__isnull=True, start_time__isnull=True)
flt &= (c1 | c2 | c3)
qs = qs.filter(flt)
# saved events don't conflict with themselves:
if self.id is not None:
qs = qs.exclude(id=self.id)
# automatic events never conflict with other generated events
# of same owner. Rule needed for update_events.
if self.auto_type:
qs = qs.exclude(
# auto_type=self.auto_type,
auto_type__isnull=False,
owner_id=self.owner_id, owner_type=self.owner_type)
# transparent events (cancelled or omitted) usually don't
# cause a conflict with other events (e.g. a holiday). But a
# cancelled course lesson should not tolerate another lesson
# of the same course on the same date.
ntstates = EntryStates.filter(transparent=False)
if self.owner_id is None:
if self.state.transparent:
return
qs = qs.filter(state__in=ntstates)
else:
if self.state.transparent:
qs = qs.filter(
owner_id=self.owner_id, owner_type=self.owner_type)
else:
qs = qs.filter(
Q(state__in=ntstates) | Q(
owner_id=self.owner_id, owner_type=self.owner_type))
if self.room is None:
# an entry that needs a room but doesn't yet have one,
# conflicts with any all-room entry (e.g. a holiday). For
# generated entries this list extends to roomed entries of
# the same generator.
if self.event_type is None or not self.event_type.all_rooms:
if self.owner_id is None:
qs = qs.filter(event_type__all_rooms=True)
else:
qs = qs.filter(
Q(event_type__all_rooms=True) | Q(
owner_id=self.owner_id, owner_type=self.owner_type))
else:
# other event in the same room
c1 = Q(room=self.room)
# other event locks all rooms (e.g. holidays)
# c2 = Q(room__isnull=False, event_type__all_rooms=True)
c2 = Q(event_type__all_rooms=True)
qs = qs.filter(c1 | c2)
if self.user is not None:
if self.event_type is not None:
if self.event_type.locks_user:
# c1 = Q(event_type__locks_user=False)
# c2 = Q(user=self.user)
# qs = qs.filter(c1|c2)
qs = qs.filter(user=self.user, event_type__locks_user=True)
# qs = Event.objects.filter(flt,owner_type=ot)
# if we.start_date.month == 7:
# print 20131011, self, we.start_date, qs.count()
# print 20131025, qs.query
return qs
def is_fixed_state(self):
return self.state.fixed
# return self.state in EntryStates.editable_states
def is_user_modified(self):
return self.state != EntryStates.suggested
def after_ui_save(self, ar, cw):
super(Event, self).after_ui_save(ar, cw)
self.update_guests.run_from_code(ar)
def before_state_change(self, ar, old, new):
super(Event, self).before_state_change(ar, old, new)
if new.noauto:
self.auto_type = None
def suggest_guests(self):
if self.owner:
for obj in self.owner.suggest_cal_guests(self):
yield obj
def get_event_summary(event, ar):
# from django.utils.translation import ugettext as _
s = event.summary
# if event.owner_id:
# s += " ({0})".format(event.owner)
if event.user is not None and event.user != ar.get_user():
if event.access_class == AccessClasses.show_busy:
s = _("Busy")
s = event.user.username + ': ' + unicode(s)
elif settings.SITE.project_model is not None \
and event.project is not None:
s += " " + unicode(_("with")) + " " + unicode(event.project)
if event.state:
s = ("(%s) " % unicode(event.state)) + s
n = event.guest_set.all().count()
if n:
s = ("[%d] " % n) + s
return s
def before_ui_save(self, ar, **kw):
# logger.info("20130528 before_ui_save")
if self.state is EntryStates.suggested:
self.state = EntryStates.draft
return super(Event, self).before_ui_save(ar, **kw)
def on_create(self, ar):
if self.event_type is None:
self.event_type = ar.user.event_type or \
settings.SITE.site_config.default_event_type
self.start_date = settings.SITE.today()
self.start_time = timezone.now().time()
# see also Assignable.on_create()
super(Event, self).on_create(ar)
# def on_create(self,ar):
# self.start_date = settings.SITE.today()
# self.start_time = datetime.datetime.now().time()
# ~ # default user is almost the same as for UserAuthored
# ~ # but we take the *real* user, not the "working as"
# if self.user_id is None:
# u = ar.user
# if u is not None:
# self.user = u
# super(Event,self).on_create(ar)
def get_postable_recipients(self):
"""return or yield a list of Partners"""
if self.project:
if isinstance(self.project, dd.plugins.cal.partner_model):
yield self.project
for g in self.guest_set.all():
yield g.partner
# if self.user.partner:
# yield self.user.partner
def get_mailable_type(self):
return self.event_type
def get_mailable_recipients(self):
if self.project:
if isinstance(self.project, dd.plugins.cal.partner_model):
yield ('to', self.project)
for g in self.guest_set.all():
yield ('to', g.partner)
if self.user.partner:
yield ('cc', self.user.partner)
# def get_mailable_body(self,ar):
# return self.description
@dd.displayfield(_("When"), sortable_by=['start_date', 'start_time'])
def when_text(self, ar):
txt = when_text(self.start_date, self.start_time)
if self.end_date and self.end_date != self.start_date:
txt += "-" + when_text(self.end_date, self.end_time)
return txt
@dd.displayfield(_("When"), sortable_by=['start_date', 'start_time'])
# def linked_date(self, ar):
def when_html(self, ar):
if ar is None:
return ''
EntriesByDay = settings.SITE.models.cal.EntriesByDay
txt = when_text(self.start_date, self.start_time)
return EntriesByDay.as_link(ar, self.start_date, txt)
# return self.obj2href(ar, txt)
@dd.displayfield(_("Link URL"))
def url(self, ar):
return 'foo'
@dd.virtualfield(dd.DisplayField(_("Reminder")))
def reminder(self, request):
return False
# reminder.return_type = dd.DisplayField(_("Reminder"))
def get_calendar(self):
# for sub in Subscription.objects.filter(user=ar.get_user()):
# if sub.contains_event(self):
# return sub
return None
@dd.virtualfield(dd.ForeignKey('cal.Calendar'))
def calendar(self, ar):
return self.get_calendar()
def get_print_language(self):
# if settings.SITE.project_model is not None and self.project:
if self.project:
return self.project.get_print_language()
if self.user:
return self.user.language
return settings.SITE.get_default_language()
@classmethod
def get_default_table(cls):
return OneEvent
@classmethod
def on_analyze(cls, lino):
cls.DISABLED_AUTO_FIELDS = dd.fields_list(cls, "summary")
super(Event, cls).on_analyze(lino)
def auto_type_changed(self, ar):
if self.auto_type:
self.summary = self.owner.update_cal_summary(
self.event_type, self.auto_type)
# def save(self, *args, **kwargs):
# if "Weekends" in str(self.owner):
# if not self.end_date:
# raise Exception("20180321")
# super(Event, self).save(*args, **kwargs)
dd.update_field(Event, 'user', verbose_name=_("Responsible user"))
class EntryChecker(Checker):
model = Event
def get_responsible_user(self, obj):
return obj.user or super(
EntryChecker, self).get_responsible_user(obj)
class EventGuestChecker(EntryChecker):
verbose_name = _("Entries without participants")
def get_checkdata_problems(self, obj, fix=False):
if not obj.state.edit_guests:
return
existing = set([g.partner.pk for g in obj.guest_set.all()])
if len(existing) == 0:
suggested = list(obj.suggest_guests())
if len(suggested) > 0:
msg = _("No participants although {0} suggestions exist.")
yield (True, msg.format(len(suggested)))
if fix:
for g in suggested:
g.save()
EventGuestChecker.activate()
class ConflictingEventsChecker(EntryChecker):
verbose_name = _("Check for conflicting calendar entries")
def get_checkdata_problems(self, obj, fix=False):
if not obj.has_conflicting_events():
return
qs = obj.get_conflicting_events()
num = qs.count()
if num == 1:
msg = _("Event conflicts with {0}.").format(qs[0])
else:
msg = _("Event conflicts with {0} other events.").format(num)
yield (False, msg)
ConflictingEventsChecker.activate()
class ObsoleteEventTypeChecker(EntryChecker):
verbose_name = _("Obsolete generated calendar entries")
def get_checkdata_problems(self, obj, fix=False):
if not obj.auto_type:
return
if obj.owner is None:
msg = _("Has auto_type but no owner.")
yield (False, msg)
return
et = obj.owner.update_cal_event_type()
if obj.event_type != et:
msg = _("Event type but {0} (should be {1}).").format(
obj.event_type, et)
autofix = False # TODO: make this configurable?
yield (autofix, msg)
if fix:
obj.event_type = et
obj.full_clean()
obj.save()
ObsoleteEventTypeChecker.activate()
DONT_FIX_LONG_ENTRIES = False
class LongEntryChecker(EntryChecker):
verbose_name = _("Too long-lasting calendar entries")
model = Event
def get_checkdata_problems(self, obj, fix=False):
msg = obj.duration_veto()
if msg is not None:
if DONT_FIX_LONG_ENTRIES:
yield (False, msg)
else:
yield (True, msg)
if fix:
obj.end_date = None
obj.full_clean()
obj.save()
LongEntryChecker.activate()
@dd.python_2_unicode_compatible
class Guest(Printable):
workflow_state_field = 'state'
allow_cascaded_delete = ['event']
class Meta:
app_label = 'cal'
abstract = dd.is_abstract_model(__name__, 'Guest')
# verbose_name = _("Participant")
# verbose_name_plural = _("Participants")
verbose_name = _("Presence")
verbose_name_plural = _("Presences")
unique_together = ['event', 'partner']
event = dd.ForeignKey('cal.Event')
partner = dd.ForeignKey(dd.plugins.cal.partner_model)
role = dd.ForeignKey(
'cal.GuestRole', verbose_name=_("Role"), blank=True, null=True)
state = GuestStates.field(default=GuestStates.as_callable('invited'))
remark = models.CharField(_("Remark"), max_length=200, blank=True)
# Define a `user` property because we want to use
# `lino.modlib.users.mixins.My`
def get_user(self):
# used to apply `owner` requirement in GuestState
return self.event.user
user = property(get_user)
# author_field_name = 'user'
def __str__(self):
return u'%s #%s (%s)' % (
self._meta.verbose_name, self.pk, self.event.strftime())
# def get_printable_type(self):
# return self.role
def get_mailable_type(self):
return self.role
def get_mailable_recipients(self):
yield ('to', self.partner)
@dd.displayfield(_("Event"))
def event_summary(self, ar):
if ar is None:
return ''
return ar.obj2html(self.event, self.event.get_event_summary(ar))
def migrate_reminder(obj, reminder_date, reminder_text,
delay_value, delay_type, reminder_done):
"""
This was used only for migrating to 1.2.0,
see :mod:`lino.projects.pcsw.migrate`.
"""
raise NotImplementedError(
"No longer needed (and no longer supported after 20111026).")
def delay2alarm(delay_type):
if delay_type == 'D':
return DurationUnits.days
if delay_type == 'W':
return DurationUnits.weeks
if delay_type == 'M':
return DurationUnits.months
if delay_type == 'Y':
return DurationUnits.years
# ~ # These constants must be unique for the whole Lino Site.
# ~ # Keep in sync with auto types defined in lino.projects.pcsw.models.Person
# REMINDER = 5
if reminder_text:
summary = reminder_text
else:
summary = _('due date reached')
update_auto_task(
None, # REMINDER,
obj.user,
reminder_date,
summary, obj,
done=reminder_done,
alarm_value=delay_value,
alarm_unit=delay2alarm(delay_type))
# Inject application-specific fields to users.User.
dd.inject_field(settings.SITE.user_model,
'access_class',
AccessClasses.field(
default=AccessClasses.as_callable('public'),
verbose_name=_("Default access class"),
help_text=_(
"""The default access class for your calendar events and tasks.""")
))
dd.inject_field(settings.SITE.user_model,
'event_type',
dd.ForeignKey('cal.EventType',
blank=True, null=True,
verbose_name=_("Default Event Type"),
help_text=_("""The default event type for your calendar events.""")
))
dd.inject_field(
'system.SiteConfig',
'default_event_type',
dd.ForeignKey(
'cal.EventType',
blank=True, null=True,
verbose_name=_("Default Event Type"),
help_text=_("""The default type of events on this site.""")
))
dd.inject_field(
'system.SiteConfig',
'site_calendar',
dd.ForeignKey(
'cal.Calendar',
blank=True, null=True,
related_name="%(app_label)s_%(class)s_set_by_site_calender",
verbose_name=_("Site Calendar"),
help_text=_("""The default calendar of this site.""")))
dd.inject_field(
'system.SiteConfig',
'max_auto_events',
models.IntegerField(
_("Max automatic events"), default=72,
blank=True, null=True,
help_text=_(
"""Maximum number of automatic events to be generated.""")
))
dd.inject_field(
'system.SiteConfig',
'hide_events_before',
models.DateField(
_("Hide events before"),
blank=True, null=True,
help_text=_("""If this is specified, certain tables show only
events after the given date.""")
))
Reservation.show_today = ShowEntriesByDay('start_date')
if False: # removed 20160610 because it is probably not used
def update_reminders_for_user(user, ar):
n = 0
for model in rt.models_by_base(EventGenerator):
for obj in model.objects.filter(user=user):
obj.update_reminders(ar)
# logger.info("--> %s",unicode(obj))
n += 1
return n
class UpdateUserReminders(UpdateEntries):
"""
Users can invoke this to re-generate their automatic tasks.
"""
def run_from_ui(self, ar, **kw):
user = ar.selected_rows[0]
dd.logger.info("Updating reminders for %s", unicode(user))
n = update_reminders_for_user(user, ar)
msg = _("%(num)d reminders for %(user)s have been updated."
) % dict(user=user, num=n)
dd.logger.info(msg)
ar.success(msg, **kw)
@dd.receiver(dd.pre_analyze, dispatch_uid="add_update_reminders")
def pre_analyze(sender, **kw):
sender.user_model.define_action(update_reminders=UpdateUserReminders())
from .ui import *
|
khchine5/xl
|
lino_xl/lib/cal/models.py
|
Python
|
bsd-2-clause
| 36,436
|
#!/usr/bin/env python
"""
Benchmark how long it takes to set 10,000 keys in the database.
"""
from __future__ import print_function
import trollius as asyncio
from trollius import From
import logging
import trollius_redis
import time
from six.moves import range
if __name__ == '__main__':
loop = asyncio.get_event_loop()
# Enable logging
logging.getLogger().addHandler(logging.StreamHandler())
logging.getLogger().setLevel(logging.WARNING)
def run():
connection = yield From(trollius_redis.Pool.create(
host=u'localhost', port=6379, poolsize=50))
try:
# === Benchmark 1 ==
print(
u'1. How much time does it take to set 10,000 values '
u'in Redis? (without pipelining)')
print(u'Starting...')
start = time.time()
# Do 10,000 set requests
for i in range(10 * 1000):
# By using yield from here, we wait for the answer.
yield From(connection.set(u'key', u'value'))
print(u'Done. Duration=', time.time() - start)
print()
# === Benchmark 2 (should be at least 3x as fast) ==
print(u'2. How much time does it take if we use asyncio.gather, '
u'and pipeline requests?')
print(u'Starting...')
start = time.time()
# Do 10,000 set requests
futures = [asyncio.Task(connection.set(u'key', u'value')) for x
in range(10*1000)]
yield From(asyncio.gather(*futures))
print(u'Done. Duration=', time.time() - start)
finally:
connection.close()
loop.run_until_complete(run())
|
benjolitz/trollius-redis
|
examples/benchmarks/speed_test.py
|
Python
|
bsd-2-clause
| 1,736
|
import os
import unittest
from math import pi
import numpy
from kiva import agg
def save_path(filename):
return filename
def draw_arcs(gc, x2, y2, radiusstep=25.0):
gc.set_stroke_color((0.2,0.2,0.2)) # lightgray
gc.move_to(0, 0)
gc.line_to(100, 0)
gc.line_to(x2, y2)
gc.stroke_path()
gc.set_stroke_color((0,0,0))
for i in range(7):
gc.move_to(0, 0);
gc.arc_to(100, 0, x2, y2, i*radiusstep+20.0)
gc.stroke_path()
class TestAffineMatrix(unittest.TestCase):
def test_arc_to(self):
gc = agg.GraphicsContextArray((640,480), "rgba32")
axes = agg.CompiledPath()
axes.move_to(0.5, 50.5)
axes.line_to(100.5, 50.5)
axes.move_to(50.5, 0.5)
axes.line_to(50.5, 100.5)
box = agg.CompiledPath()
box.move_to(0.5, 0.5)
box.line_to(100.5, 0.5)
box.line_to(100.5, 100.5)
box.line_to(0.5, 100.5)
box.close_path()
arc = agg.CompiledPath()
arc.move_to(10, 10)
arc.line_to(20, 10)
arc.arc_to(40, 10, 40, 30, 20.0)
arc.line_to(40, 40)
whole_shebang = agg.CompiledPath()
whole_shebang.save_ctm()
whole_shebang.add_path(axes)
whole_shebang.add_path(box)
whole_shebang.translate_ctm(0.0, 50.5)
whole_shebang.add_path(arc)
whole_shebang.translate_ctm(50.5, 50.5)
whole_shebang.rotate_ctm(-agg.pi/2)
whole_shebang.add_path(arc)
whole_shebang.rotate_ctm(agg.pi/2)
whole_shebang.translate_ctm(50.5, -50.5)
whole_shebang.rotate_ctm(-agg.pi)
whole_shebang.add_path(arc)
whole_shebang.rotate_ctm(agg.pi)
whole_shebang.translate_ctm(-50.5, -50.5)
whole_shebang.rotate_ctm(-3*agg.pi/2)
whole_shebang.add_path(arc)
whole_shebang.restore_ctm()
gc.set_stroke_color((1.0,0.0,0.0))
gc.set_line_width(1.0)
ctm1 = gc.get_ctm()
gc.translate_ctm(50.5, 300.5)
gc.add_path(whole_shebang)
gc.stroke_path()
gc.translate_ctm(130.5, 50.0)
ctm2 = gc.get_ctm()
gc.rotate_ctm(-agg.pi/6)
gc.add_path(whole_shebang)
gc.set_stroke_color((0.0,0.0,1.0))
gc.stroke_path()
gc.set_ctm(ctm2)
gc.translate_ctm(130.5, 0.0)
ctm2 = gc.get_ctm()
gc.rotate_ctm(-agg.pi/3)
gc.scale_ctm(1.0, 2.0)
gc.add_path(whole_shebang)
gc.stroke_path()
gc.set_ctm(ctm1)
ctm1 = gc.get_ctm()
gc.translate_ctm(150.5, 20.5)
draw_arcs(gc, 70.5, 96.5)
gc.translate_ctm(300.5, 0)
draw_arcs(gc, 160.5, 76.5, 50.0)
gc.set_ctm(ctm1)
gc.translate_ctm(120.5, 100.5)
gc.scale_ctm(-1.0, 1.0)
draw_arcs(gc, 70.5, 96.5)
gc.translate_ctm(-300.5, 100.5)
gc.scale_ctm(0.75, -1.0)
draw_arcs(gc, 160.5, 76.5, 50.0)
gc.save(save_path("arc_to.png"))
def test_arc(self):
gc = agg.GraphicsContextArray((640,648))
gc.save(save_path("arc.png"))
def test_skewing_matrix(self):
val = agg.skewing_matrix(pi/4.,pi/4.)
desired = numpy.array([ 1.0,1.0,1.0,1.0,0.0,0.0])
actual = val.asarray()
assert(numpy.allclose(desired,actual))
if __name__ == "__main__":
unittest.main()
|
tommy-u/enable
|
integrationtests/kiva/agg/test_arc.py
|
Python
|
bsd-3-clause
| 3,344
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import os
import time
import shutil
import traceback
import signal
from collections import OrderedDict
import re
import flask
import gevent
import gevent.event
import gevent.queue
from config import config_value
from . import utils
from status import Status
from job import Job
from dataset import DatasetJob
from model import ModelJob
from digits.utils import errors
from log import logger
class Resource(object):
"""
Stores information about which tasks are using a resource
"""
class ResourceAllocation(object):
"""
Marks that a task is using [part of] a resource
"""
def __init__(self, task, value):
"""
Arguments:
task -- which task is using the resource
value -- how much of the resource is being used
"""
self.task = task
self.value = value
def __init__(self, identifier=None, max_value=1):
"""
Keyword arguments:
identifier -- some way to identify this resource
max_value -- a numeric representation of the capacity of this resource
"""
if identifier is None:
self.identifier = id(self)
else:
self.identifier = identifier
self.max_value = max_value
self.allocations = []
def remaining(self):
"""
Returns the amount of this resource that is not being used
"""
return self.max_value - sum(a.value for a in self.allocations)
def allocate(self, task, value):
"""
A task is requesting to use this resource
"""
if self.remaining() - value < 0:
raise RuntimeError('Resource is already maxed out at %s/%s' % (
self.remaining(),
self.max_value)
)
self.allocations.append(self.ResourceAllocation(task, value))
def deallocate(self, task):
"""
The task has finished using this resource
"""
for i, a in enumerate(self.allocations):
if id(task) == id(a.task):
self.allocations.pop(i)
return True
return False
class Scheduler:
"""
Coordinates execution of Jobs
"""
def __init__(self, gpu_list=None, verbose=False):
"""
Keyword arguments:
gpu_list -- a comma-separated string which is a list of GPU id's
verbose -- if True, print more errors
"""
self.jobs = OrderedDict()
self.verbose = verbose
# Keeps track of resource usage
self.resources = {
# TODO: break this into CPU cores, memory usage, IO usage, etc.
'parse_folder_task_pool': [Resource()],
'create_db_task_pool': [Resource(max_value=2)],
'analyze_db_task_pool': [Resource(max_value=4)],
'gpus': [Resource(identifier=index)
for index in gpu_list.split(',')] if gpu_list else [],
}
self.running = False
self.shutdown = gevent.event.Event()
def load_past_jobs(self):
"""
Look in the jobs directory and load all valid jobs
"""
loaded_jobs = []
failed_jobs = []
for dir_name in sorted(os.listdir(config_value('jobs_dir'))):
if os.path.isdir(os.path.join(config_value('jobs_dir'), dir_name)):
# Make sure it hasn't already been loaded
if dir_name in self.jobs:
continue
try:
job = Job.load(dir_name)
# The server might have crashed
if job.status.is_running():
job.status = Status.ABORT
for task in job.tasks:
if task.status.is_running():
task.status = Status.ABORT
# We might have changed some attributes here or in __setstate__
job.save()
loaded_jobs.append(job)
except Exception as e:
failed_jobs.append((dir_name, e))
# add DatasetJobs
for job in loaded_jobs:
if isinstance(job, DatasetJob):
self.jobs[job.id()] = job
# add ModelJobs
for job in loaded_jobs:
if isinstance(job, ModelJob):
try:
# load the DatasetJob
job.load_dataset()
self.jobs[job.id()] = job
except Exception as e:
failed_jobs.append((job.id(), e))
logger.info('Loaded %d jobs.' % len(self.jobs))
if len(failed_jobs):
logger.warning('Failed to load %d jobs.' % len(failed_jobs))
if self.verbose:
for job_id, e in failed_jobs:
logger.debug('%s - %s: %s' % (job_id, type(e).__name__, str(e)))
def add_job(self, job):
"""
Add a job to self.jobs
"""
if not self.running:
logger.error('Scheduler not running. Cannot add job.')
return False
else:
self.jobs[job.id()] = job
# Need to fix this properly
# if True or flask._app_ctx_stack.top is not None:
from digits.webapp import app, socketio
with app.app_context():
# send message to job_management room that the job is added
html = flask.render_template('job_row.html', job = job)
# Convert the html into a list for the jQuery
# DataTable.row.add() method. This regex removes the <tr>
# and <td> tags, and splits the string into one element
# for each cell.
html = re.sub('<tr[^<]*>[\s\n\r]*<td[^<]*>[\s\n\r]*', '', html)
html = re.sub('[\s\n\r]*</td>[\s\n\r]*</tr>', '', html)
html = re.split('</td>[\s\n\r]*<td[^<]*>', html)
socketio.emit('job update',
{
'update': 'added',
'job_id': job.id(),
'html': html
},
namespace='/jobs',
room='job_management',
)
if 'DIGITS_MODE_TEST' not in os.environ:
# Let the scheduler do a little work before returning
time.sleep(utils.wait_time())
return True
def get_job(self, job_id):
"""
Look through self.jobs to try to find the Job
Returns None if not found
"""
if job_id is None:
return None
return self.jobs.get(job_id, None)
def abort_job(self, job_id):
"""
Aborts a running Job
Returns True if the job was found and aborted
"""
job = self.get_job(job_id)
if job is None or not job.status.is_running():
return False
job.abort()
return True
def delete_job(self, job):
"""
Deletes an entire job folder from disk
Returns True if the Job was found and deleted
"""
if isinstance(job, str) or isinstance(job, unicode):
job_id = str(job)
elif isinstance(job, Job):
job_id = job.id()
else:
raise ValueError('called delete_job with a %s' % type(job))
dependent_jobs = []
# try to find the job
job = self.jobs.get(job_id, None)
if job:
if isinstance(job, DatasetJob):
# check for dependencies
for j in self.jobs.values():
if isinstance(j, ModelJob) and j.dataset_id == job.id():
logger.error('Cannot delete "%s" (%s) because "%s" (%s) depends on it.' % (job.name(), job.id(), j.name(), j.id()))
dependent_jobs.append(j.name())
if len(dependent_jobs)>0:
error_message = 'Cannot delete "%s" because %d model%s depend%s on it: %s' % (
job.name(),
len(dependent_jobs),
('s' if len(dependent_jobs) != 1 else ''),
('s' if len(dependent_jobs) == 1 else ''),
', '.join(['"%s"' % j for j in dependent_jobs]))
raise errors.DeleteError(error_message)
self.jobs.pop(job_id, None)
job.abort()
if os.path.exists(job.dir()):
shutil.rmtree(job.dir())
logger.info('Job deleted.', job_id=job_id)
from digits.webapp import socketio
socketio.emit('job update',
{
'update': 'deleted',
'job_id': job.id()
},
namespace='/jobs',
room='job_management',
)
return True
# see if the folder exists on disk
path = os.path.join(config_value('jobs_dir'), job_id)
path = os.path.normpath(path)
if os.path.dirname(path) == config_value('jobs_dir') and os.path.exists(path):
shutil.rmtree(path)
return True
return False
def running_dataset_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, DatasetJob) and j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def completed_dataset_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, DatasetJob) and not j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def running_model_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, ModelJob) and j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def completed_model_jobs(self):
"""a query utility"""
return sorted(
[j for j in self.jobs.values() if isinstance(j, ModelJob) and not j.status.is_running()],
cmp=lambda x,y: cmp(y.id(), x.id())
)
def start(self):
"""
Start the Scheduler
Returns True on success
"""
if self.running:
return True
gevent.spawn(self.main_thread)
self.running = True
return True
def stop(self):
"""
Stop the Scheduler
Returns True if the shutdown was graceful
"""
self.shutdown.set()
wait_limit = 5
start = time.time()
while self.running:
if time.time() - start > wait_limit:
return False
time.sleep(0.1)
return True
def main_thread(self):
"""
Monitors the jobs in current_jobs, updates their statuses,
and puts their tasks in queues to be processed by other threads
"""
signal.signal(signal.SIGTERM, self.sigterm_handler)
try:
last_saved = None
while not self.shutdown.is_set():
# Iterate backwards so we can delete jobs
for job in self.jobs.values():
if job.status == Status.INIT:
def start_this_job(job):
if isinstance(job, ModelJob):
if job.dataset.status == Status.DONE:
job.status = Status.RUN
elif job.dataset.status in [Status.ABORT, Status.ERROR]:
job.abort()
else:
job.status = Status.WAIT
else:
job.status = Status.RUN
if 'DIGITS_MODE_TEST' in os.environ:
start_this_job(job)
else:
# Delay start by one second for initial page load
gevent.spawn_later(1, start_this_job, job)
if job.status == Status.WAIT:
if isinstance(job, ModelJob):
if job.dataset.status == Status.DONE:
job.status = Status.RUN
elif job.dataset.status in [Status.ABORT, Status.ERROR]:
job.abort()
else:
job.status = Status.RUN
if job.status == Status.RUN:
alldone = True
for task in job.tasks:
if task.status in [Status.INIT, Status.WAIT]:
alldone = False
# try to start the task
if task.ready_to_queue():
requested_resources = task.offer_resources(self.resources)
if requested_resources is None:
task.status = Status.WAIT
else:
if self.reserve_resources(task, requested_resources):
gevent.spawn(self.run_task,
task, requested_resources)
elif task.status == Status.RUN:
# job is not done
alldone = False
elif task.status in [Status.DONE, Status.ABORT]:
# job is done
pass
elif task.status == Status.ERROR:
# propogate error status up to job
job.status = Status.ERROR
alldone = False
break
else:
logger.warning('Unrecognized task status: "%s"', task.status, job_id=job.id())
if alldone:
job.status = Status.DONE
logger.info('Job complete.', job_id=job.id())
job.save()
# save running jobs every 15 seconds
if not last_saved or time.time()-last_saved > 15:
for job in self.jobs.values():
if job.status.is_running():
job.save()
last_saved = time.time()
time.sleep(utils.wait_time())
except KeyboardInterrupt:
pass
# Shutdown
for job in self.jobs.values():
job.abort()
job.save()
self.running = False
def sigterm_handler(self, signal, frame):
"""
Gunicorn shuts down workers with SIGTERM, not SIGKILL
"""
self.shutdown.set()
def task_error(self, task, error):
"""
Handle an error while executing a task
"""
logger.error('%s: %s' % (type(error).__name__, error), job_id=task.job_id)
task.exception = error
task.traceback = traceback.format_exc()
task.status = Status.ERROR
def reserve_resources(self, task, resources):
"""
Reserve resources for a task
"""
try:
# reserve resources
for resource_type, requests in resources.iteritems():
for identifier, value in requests:
found = False
for resource in self.resources[resource_type]:
if resource.identifier == identifier:
resource.allocate(task, value)
self.emit_gpus_available()
found = True
break
if not found:
raise RuntimeError('Resource "%s" with identifier="%s" not found' % (
resource_type, identifier))
task.current_resources = resources
return True
except Exception as e:
self.task_error(task, e)
self.release_resources(task, resources)
return False
def release_resources(self, task, resources):
"""
Release resources previously reserved for a task
"""
# release resources
for resource_type, requests in resources.iteritems():
for identifier, value in requests:
for resource in self.resources[resource_type]:
if resource.identifier == identifier:
resource.deallocate(task)
self.emit_gpus_available()
task.current_resources = None
def run_task(self, task, resources):
"""
Executes a task
Arguments:
task -- the task to run
resources -- the resources allocated for this task
a dict mapping resource_type to lists of (identifier, value) tuples
"""
try:
task.run(resources)
except Exception as e:
self.task_error(task, e)
finally:
self.release_resources(task, resources)
def emit_gpus_available(self):
"""
Call socketio.emit gpu availablity
"""
from digits.webapp import scheduler, socketio
socketio.emit('server update',
{
'update': 'gpus_available',
'total_gpu_count': len(self.resources['gpus']),
'remaining_gpu_count': sum(r.remaining() for r in scheduler.resources['gpus']),
},
namespace='/jobs',
room='job_management'
)
|
batra-mlp-lab/DIGITS
|
digits/scheduler.py
|
Python
|
bsd-3-clause
| 18,363
|
"Run the keyword-names example pipeline, which has keyword-style inputs."
import kiveapi
import example_tools
# Use HTTPS on a real server, so your password is encrypted.
# Don't put your real password in source code, store it in a text file
# that is only readable by your user account or some more secure storage.
session = kiveapi.KiveAPI("http://localhost:8000")
session.login('kive', 'kive')
# Get datasets to collate
names_dataset = session.find_datasets(name="example_names.csv")[0]
salutations_dataset = session.find_datasets(name="salutations.csv")[0]
# Get the collation app from the samplecode container
kwsalutationsapp = session.endpoints.containerapps.filter("name", "kw_salutations")[0]
appargs = session.get(kwsalutationsapp["argument_list"]).json()
# Start a run of the app providing the datasets as arguments
inputargs = {a["name"]: a["url"] for a in appargs if a["type"] == "I"}
runspec = {
"name": "API Example 5",
"app": kwsalutationsapp["url"],
"datasets": [
{
"argument": inputargs["names"],
"dataset": names_dataset.raw["url"],
},
{
"argument": inputargs["salutations"],
"dataset": salutations_dataset.raw["url"],
},
]
}
print("Starting example run...")
containerrun = session.endpoints.containerruns.post(json=runspec)
# Monitor the run for completion
containerrun = example_tools.await_containerrun(session, containerrun)
# Retrieve the output and save it to a file
run_datasets = session.get(containerrun["dataset_list"]).json()
for run_dataset in run_datasets:
if run_dataset.get("argument_type") == "O":
dataset = session.get(run_dataset["dataset"]).json()
filename = dataset["name"]
print(f" downloading {filename}")
with open(filename, "wb") as outf:
session.download_file(outf, dataset["download_url"])
print("Example run finished")
|
cfe-lab/Kive
|
api/example_5.py
|
Python
|
bsd-3-clause
| 1,920
|
__all__ = ['Kane']
from sympy import Symbol, zeros, Matrix, diff, solve_linear_system_LU, eye
from sympy.utilities import default_sort_key
from sympy.physics.mechanics.essential import ReferenceFrame, dynamicsymbols
from sympy.physics.mechanics.particle import Particle
from sympy.physics.mechanics.point import Point
from sympy.physics.mechanics.rigidbody import RigidBody
class Kane(object):
"""Kane's method object.
This object is used to do the "book-keeping" as you go through and form
equations of motion in the way Kane presents in:
Kane, T., Levinson, D. Dynamics Theory and Applications. 1985 McGraw-Hill
The attributes are for equations in the form [M] udot = forcing.
Very Important Warning: simp is set to True by default, to the advantage of
smaller, simpler systems. If your system is large, it will lead to
slowdowns; however turning it off might have negative implications in
numerical evaluation. Care needs to be taken to appropriately reduce
expressions generated with simp==False, as they might be too large
themselves. Computing the relationship between independent and dependent
speeds (when dealing with non-holonomic systems) benefits from simp being
set to True (during the .speeds() method); the same is true for
linearization of non-holonomic systems. If numerical evaluations are
unsucessful with simp==False, try setting simp to True only for these
methods; this provides some compromise between the two options.
Attributes
==========
auxiliary : Matrix
If applicable, the set of auxiliary Kane's
equations used to solve for non-contributing
forces.
mass_matrix : Matrix
The system's mass matrix
forcing : Matrix
The system's forcing vector
simp : Boolean
Flag determining whether simplification of symbolic matrix
inversion can occur or not
mass_matrix_full : Matrix
The "mass matrix" for the u's and q's
forcing_full : Matrix
The "forcing vector" for the u's and q's
Examples
========
This is a simple example for a one defree of freedom translational
spring-mass-damper.
In this example, we first need to do the kinematics.
This involves creating generalized speeds and coordinates and their
derivatives.
Then we create a point and set its velocity in a frame::
>>> from sympy import symbols
>>> from sympy.physics.mechanics import dynamicsymbols, ReferenceFrame
>>> from sympy.physics.mechanics import Point, Particle, Kane
>>> q, u = dynamicsymbols('q u')
>>> qd, ud = dynamicsymbols('q u', 1)
>>> m, c, k = symbols('m c k')
>>> N = ReferenceFrame('N')
>>> P = Point('P')
>>> P.set_vel(N, u * N.x)
Next we need to arrange/store information in the way the Kane requires.
The kinematic differential equations need to be stored in a dict.
A list of forces/torques must be constructed, where each entry in the list
is a (Point, Vector) or (ReferenceFrame, Vector) tuple, where the Vectors
represent the Force or Torque.
Next a particle needs to be created, and it needs to have a point and mass
assigned to it.
Finally, a list of all bodies and particles needs to be created::
>>> kd = [qd - u]
>>> FL = [(P, (-k * q - c * u) * N.x)]
>>> pa = Particle('pa', P, m)
>>> BL = [pa]
Finally we can generate the equations of motion.
First we create the Kane object and supply an inertial frame.
Next we pass it the generalized speeds.
Then we pass it the kinematic differential equation dict.
Next we form FR* and FR to complete: Fr + Fr* = 0.
We have the equations of motion at this point.
It makes sense to rearrnge them though, so we calculate the mass matrix and
the forcing terms, for E.o.M. in the form: [MM] udot = forcing, where MM is
the mass matrix, udot is a vector of the time derivatives of the
generalized speeds, and forcing is a vector representing "forcing" terms::
>>> KM = Kane(N)
>>> KM.coords([q])
>>> KM.speeds([u])
>>> KM.kindiffeq(kd)
>>> (fr, frstar) = KM.kanes_equations(FL, BL)
>>> MM = KM.mass_matrix
>>> forcing = KM.forcing
>>> rhs = MM.inv() * forcing
>>> rhs
[-(c*u(t) + k*q(t))/m]
>>> KM.linearize()[0]
[0, 1]
[k, c]
Please look at the documentation pages for more information on how to
perform linearization and how to deal with dependent coordinates & speeds,
and how do deal with bringing non-contributing forces into evidence.
"""
simp = True
def __init__(self, frame):
"""Supply the inertial frame for Kane initialization. """
# Big storage things
self._inertial = frame
self._forcelist = None
self._bodylist = None
self._fr = None
self._frstar = None
self._rhs = None
self._aux_eq = None
# States
self._q = None
self._qdep = []
self._qdot = None
self._u = None
self._udep = []
self._udot = None
self._uaux = None
# Differential Equations Matrices
self._k_d = None
self._f_d = None
self._k_kqdot = None
self._k_ku = None
self._f_k = None
# Constraint Matrices
self._f_h = Matrix([])
self._k_nh = Matrix([])
self._f_nh = Matrix([])
self._k_dnh = Matrix([])
self._f_dnh = Matrix([])
def _find_dynamicsymbols(self, inlist, insyms=[]):
"""Finds all non-supplied dynamicsymbols in the expressions."""
from sympy.core.function import AppliedUndef, Derivative
t = dynamicsymbols._t
return reduce(set.union, [set([i]) for j in inlist
for i in j.atoms(AppliedUndef, Derivative)
if i.atoms() == set([t])], set()) - insyms
temp_f = set().union(*[i.atoms(AppliedUndef) for i in inlist])
temp_d = set().union(*[i.atoms(Derivative) for i in inlist])
set_f = set([a for a in temp_f if a.args == (t,)])
set_d = set([a for a in temp_d if ((a.args[0] in set_f) and all([i == t
for i in a.variables]))])
return list(set.union(set_f, set_d) - set(insyms))
def _find_othersymbols(self, inlist, insyms=[]):
"""Finds all non-dynamic symbols in the expressions."""
return list(reduce(set.union, [i.atoms(Symbol) for i in inlist]) -
set(insyms))
def _mat_inv_mul(self, A, B):
"""Internal Function
Computes A^-1 * B symbolically w/ substitution, where B is not
necessarily a vector, but can be a matrix.
"""
# Note: investigate difficulty in only creating symbols for non-zero
# entries; this could speed things up, perhaps?
r1, c1 = A.shape
r2, c2 = B.shape
temp1 = Matrix(r1, c1, lambda i, j: Symbol('x' + str(j + r1 * i)))
temp2 = Matrix(r2, c2, lambda i, j: Symbol('y' + str(j + r2 * i)))
for i in range(len(temp1)):
if A[i] == 0:
temp1[i] = 0
for i in range(len(temp2)):
if B[i] == 0:
temp2[i] = 0
temp3 = []
for i in range(c2):
temp3.append(temp1.LUsolve(temp2.extract(range(r2), [i])))
temp3 = Matrix([i.T for i in temp3]).T
if Kane.simp == True:
temp3.simplify()
return temp3.subs(dict(zip(temp1, A))).subs(dict(zip(temp2, B)))
def coords(self, qind, qdep=[], coneqs=[]):
"""Supply all the generalized coordiantes in a list.
If some coordinates are dependent, supply them as part of qdep. Their
dependent nature will only show up in the linearization process though.
Parameters
==========
qind : list
A list of independent generalized coords
qdep : list
List of dependent coordinates
coneq : list
List of expressions which are equal to zero; these are the
configuration constraint equations
"""
if not isinstance(qind, (list, tuple)):
raise TypeError('Generalized coords. must be supplied in a list.')
self._q = qind + qdep
self._qdot = [diff(i, dynamicsymbols._t) for i in self._q]
if not isinstance(qdep, (list, tuple)):
raise TypeError('Dependent speeds and constraints must each be '
'provided in their own list.')
if len(qdep) != len(coneqs):
raise ValueError('There must be an equal number of dependent '
'speeds and constraints.')
coneqs = Matrix(coneqs)
self._qdep = qdep
self._f_h = coneqs
def speeds(self, uind, udep=[], coneqs=[], diffconeqs=None, u_auxiliary=[]):
"""Supply all the generalized speeds in a list.
If there are motion constraints or auxiliary speeds, they are provided
here as well (as well as motion constraints).
Parameters
==========
uind : list
A list of independent generalized speeds
udep : list
Optional list of dependent speeds
coneqs : list
Optional List of constraint expressions; these are expressions
which are equal to zero which define a speed (motion) constraint.
diffconeqs : list
Optional, calculated automatically otherwise; list of constraint
equations; again equal to zero, but define an acceleration
constraint.
u_auxiliary : list
An optional list of auxiliary speeds used for brining
non-contributing forces into evidence
"""
if not isinstance(uind, (list, tuple)):
raise TypeError('Generalized speeds must be supplied in a list.')
self._u = uind + udep
self._udot = [diff(i, dynamicsymbols._t) for i in self._u]
self._uaux = u_auxiliary
if not isinstance(udep, (list, tuple)):
raise TypeError('Dependent speeds and constraints must each be '
'provided in their own list.')
if len(udep) != len(coneqs):
raise ValueError('There must be an equal number of dependent '
'speeds and constraints.')
if diffconeqs != None:
if len(udep) != len(diffconeqs):
raise ValueError('There must be an equal number of dependent '
'speeds and constraints.')
if len(udep) != 0:
u = self._u
uzero = dict(zip(u, [0] * len(u)))
coneqs = Matrix(coneqs)
udot = self._udot
udotzero = dict(zip(udot, [0] * len(udot)))
self._udep = udep
self._f_nh = coneqs.subs(uzero)
self._k_nh = (coneqs - self._f_nh).jacobian(u)
# if no differentiated non holonomic constraints were given, calculate
if diffconeqs == None:
self._k_dnh = self._k_nh
self._f_dnh = (self._k_nh.diff(dynamicsymbols._t) * Matrix(u) +
self._f_nh.diff(dynamicsymbols._t))
else:
self._f_dnh = diffconeqs.subs(udotzero)
self._k_dnh = (diffconeqs - self._f_dnh).jacobian(udot)
o = len(u) # number of generalized speeds
m = len(udep) # number of motion constraints
p = o - m # number of independent speeds
# For a reminder, form of non-holonomic constraints is:
# B u + C = 0
B = self._k_nh.extract(range(m), range(o))
C = self._f_nh.extract(range(m), [0])
# We partition B into indenpendent and dependent columns
# Ars is then -Bdep.inv() * Bind, and it relates depedent speeds to
# independent speeds as: udep = Ars uind, neglecting the C term here.
self._depB = B
self._depC = C
mr1 = B.extract(range(m), range(p))
ml1 = B.extract(range(m), range(p, o))
self._Ars = - self._mat_inv_mul(ml1, mr1)
def kindiffdict(self):
"""Returns the qdot's in a dictionary. """
if self._k_kqdot == None:
raise ValueError('Kin. diff. eqs need to be supplied first.')
sub_dict = solve_linear_system_LU(Matrix([self._k_kqdot.T,
-(self._k_ku * Matrix(self._u) + self._f_k).T]).T, self._qdot)
return sub_dict
def kindiffeq(self, kdeqs):
"""Supply all the kinematic differential equations in a list.
They should be in the form [Expr1, Expr2, ...] where Expri is equal to
zero
Parameters
==========
kdeqs : list (of Expr)
The listof kinematic differential equations
"""
if len(self._q) != len(kdeqs):
raise ValueError('There must be an equal number of kinematic '
'differential equations and coordinates.')
uaux = self._uaux
# dictionary of auxiliary speeds which are equal to zero
uaz = dict(zip(uaux, [0] * len(uaux)))
kdeqs = Matrix(kdeqs).subs(uaz)
qdot = self._qdot
qdotzero = dict(zip(qdot, [0] * len(qdot)))
u = self._u
uzero = dict(zip(u, [0] * len(u)))
f_k = kdeqs.subs(uzero).subs(qdotzero)
k_kqdot = (kdeqs.subs(uzero) - f_k).jacobian(Matrix(qdot))
k_ku = (kdeqs.subs(qdotzero) - f_k).jacobian(Matrix(u))
self._k_ku = self._mat_inv_mul(k_kqdot, k_ku)
self._f_k = self._mat_inv_mul(k_kqdot, f_k)
self._k_kqdot = eye(len(qdot))
def _form_fr(self, fl):
"""Form the generalized active force.
Computes the vector of the generalized active force vector.
Used to compute E.o.M. in the form Fr + Fr* = 0.
Parameters
==========
fl : list
Takes in a list of (Point, Vector) or (ReferenceFrame, Vector)
tuples which represent the force at a point or torque on a frame.
"""
if not isinstance(fl, (list, tuple)):
raise TypeError('Forces must be supplied in a list of: lists or '
'tuples.')
N = self._inertial
self._forcelist = fl[:]
u = self._u
o = len(u)
FR = zeros(o, 1)
# goes through each Fr (where this loop's i is r)
for i, v in enumerate(u):
# does this for each force pair in list (pair is w)
for j, w in enumerate(fl):
if isinstance(w[0], ReferenceFrame):
speed = w[0].ang_vel_in(N)
FR[i] += speed.diff(v, N) & w[1]
elif isinstance(w[0], Point):
speed = w[0].vel(N)
FR[i] += speed.diff(v, N) & w[1]
else:
raise TypeError('First entry in force pair is a point or'
' frame.')
# for dependent speeds
if len(self._udep) != 0:
m = len(self._udep)
p = o - m
FRtilde = FR.extract(range(p), [0])
FRold = FR.extract(range(p, o), [0])
FRtilde += self._Ars.T * FRold
FR = FRtilde
self._fr = FR
return FR
def _form_frstar(self, bl):
"""Form the generalized inertia force.
Computes the vector of the generalized inertia force vector.
Used to compute E.o.M. in the form Fr + Fr* = 0.
Parameters
==========
bl : list
A list of all RigidBody's and Particle's in the system.
"""
if not isinstance(bl, (list, tuple)):
raise TypeError('Bodies must be supplied in a list.')
if self._fr == None:
raise ValueError('Calculate Fr first, please.')
t = dynamicsymbols._t
N = self._inertial
self._bodylist = bl
u = self._u # all speeds
udep = self._udep # dependent speeds
o = len(u)
p = o - len(udep)
udot = self._udot
udotzero = dict(zip(udot, [0] * len(udot)))
uaux = self._uaux
uauxdot = [diff(i, t) for i in uaux]
# dictionary of auxiliary speeds which are equal to zero
uaz = dict(zip(uaux, [0] * len(uaux)))
# dictionary of derivatives of auxiliary speeds which are equal to zero
uadz = dict(zip(uauxdot, [0] * len(uauxdot)))
# Form R*, T* for each body or particle in the list
# This is stored as a list of tuples [(r*, t*),...]
# Each tuple is for a body or particle
# Within each rs is a tuple and ts is a tuple
# These have the same structure: ([list], value)
# The list is the coefficients of rs/ts wrt udots, value is everything
# else in the expression
# Partial velocities are stored as a list of tuple; a tuple for each
# body
# Each tuple has two elements, lists which represent the partial
# velocity for each ur; The first list is translational partial
# velocities, the second list is rotational translational velocities
MM = zeros(o, o)
nonMM = zeros(o, 1)
rsts = []
partials = []
for i, v in enumerate(bl): # go through list of bodies, particles
if isinstance(v, RigidBody):
om = v.frame.ang_vel_in(N).subs(uadz).subs(uaz) # ang velocity
omp = v.frame.ang_vel_in(N) # ang velocity, for partials
alp = v.frame.ang_acc_in(N).subs(uadz).subs(uaz) # ang acc
ve = v.mc.vel(N).subs(uadz).subs(uaz) # velocity
vep = v.mc.vel(N) # velocity, for partials
acc = v.mc.acc(N).subs(uadz).subs(uaz) # acceleration
m = (v.mass).subs(uadz).subs(uaz)
I, P = v.inertia
I = I.subs(uadz).subs(uaz)
if P != v.mc:
# redefine I about mass center
# have I S/O, want I S/S*
# I S/O = I S/S* + I S*/O; I S/S* = I S/O - I S*/O
# This block of code needs to have a test written for it
print('This functionality has not yet been tested yet, '
'use at your own risk.')
f = v.frame
d = v.mc.pos_from(P)
I -= m * (((f.x | f.x) + (f.y | f.y) + (f.z | f.z)) *
(d & d) - (d | d))
templist = []
# One could think of r star as a collection of coefficients of
# the udots plus another term. What we do here is get all of
# these coefficients and store them in a list, then we get the
# "other" term and put the list and other term in a tuple, for
# each body/particle. The same is done for t star. The reason
# for this is to not let the expressions get too large; so we
# keep them seperate for as long a possible
for j, w in enumerate(udot):
templist.append(-m * acc.diff(w, N))
other = -m.diff(t) * ve - m * acc.subs(udotzero)
rs = (templist, other)
templist = []
# see above note
for j, w in enumerate(udot):
templist.append(-I & alp.diff(w, N))
other = -((I.dt(v.frame) & om) + (I & alp.subs(udotzero))
+ (om ^ (I & om)))
ts = (templist, other)
tl1 = []
tl2 = []
# calculates the partials only once and stores them for later
for j, w in enumerate(u):
tl1.append(vep.diff(w, N))
tl2.append(omp.diff(w, N))
partials.append((tl1, tl2))
elif isinstance(v, Particle):
ve = v.point.vel(N).subs(uadz).subs(uaz)
vep = v.point.vel(N)
acc = v.point.acc(N).subs(uadz).subs(uaz)
m = v.mass.subs(uadz).subs(uaz)
templist = []
# see above note
for j, w in enumerate(udot):
templist.append(-m * acc.diff(w, N))
other = -m.diff(t) * ve - m * acc.subs(udotzero)
rs = (templist, other)
# We make an empty t star here so that way the later code
# doesn't care whether its operating on a body or particle
ts = ([0] * len(u), 0)
tl1 = []
tl2 = []
# calculates the partials only once, makes 0's for angular
# partials so the later code is body/particle indepedent
for j, w in enumerate(u):
tl1.append(vep.diff(w, N))
tl2.append(0)
partials.append((tl1, tl2))
else:
raise TypeError('The body list needs RigidBody or '
'Particle as list elements.')
rsts.append((rs, ts))
# Use R*, T* and partial velocities to form FR*
FRSTAR = zeros(o, 1)
# does this for each body in the list
for i, v in enumerate(rsts):
rs, ts = v # unpact r*, t*
vps, ops = partials[i] # unpack vel. partials, ang. vel. partials
# Computes the mass matrix entries from r*, there are from the list
# in the rstar tuple
ii = 0
for x in vps:
for w in rs[0]:
MM[ii] += w & x
ii += 1
# Computes the mass matrix entries from t*, there are from the list
# in the tstar tuple
ii = 0
for x in ops:
for w in ts[0]:
MM[ii] += w & x
ii += 1
# Non mass matrix entries from rstar, from the other in the rstar
# tuple
for j, w in enumerate(vps):
nonMM[j] += w & rs[1]
# Non mass matrix entries from tstar, from the other in the tstar
# tuple
for j, w in enumerate(ops):
nonMM[j] += w & ts[1]
FRSTAR = MM * Matrix(udot) + nonMM
# For motion constraints, m is the number of constraints
# Really, one should just look at Kane's book for descriptions of this
# process
if len(self._udep) != 0:
FRSTARtilde = FRSTAR.extract(range(p), [0])
FRSTARold = FRSTAR.extract(range(p, o), [0])
FRSTARtilde += self._Ars.T * FRSTARold
FRSTAR = FRSTARtilde
MMi = MM.extract(range(p), range(o))
MMd = MM.extract(range(p, o), range(o))
MM = MMi + self._Ars.T * MMd
self._frstar = FRSTAR
zeroeq = self._fr + self._frstar
zeroeq = zeroeq.subs(udotzero)
self._k_d = MM
self._f_d = zeroeq
return FRSTAR
def kanes_equations(self, FL, BL):
""" Method to form Kane's equations, Fr + Fr* = 0.
Returns (Fr, Fr*). In the case where auxiliary generalized speeds are
present (say, s auxiliary speeds, o generalized speeds, and m motion
constraints) the length of the returned vectors will be o - m + s in
length. The first o - m equations will be the constrained Kane's
equations, then the s auxiliary Kane's equations. These auxiliary
equations can be accessed with the auxiliary_eqs().
Parameters
==========
FL : list
Takes in a list of (Point, Vector) or (ReferenceFrame, Vector)
tuples which represent the force at a point or torque on a frame.
BL : list
A list of all RigidBody's and Particle's in the system.
"""
if (self._q == None) or (self._u == None):
raise ValueError('Speeds and coordinates must be supplied first.')
if (self._k_kqdot == None):
raise ValueError('Supply kinematic differential equations, please.')
fr = self._form_fr(FL)
frstar = self._form_frstar(BL)
if self._uaux != []:
km = Kane(self._inertial)
km.coords(self._q)
km.speeds(self._uaux, u_auxiliary=self._uaux)
fraux = km._form_fr(FL)
frstaraux = km._form_frstar(BL)
self._aux_eq = fraux + frstaraux
self._fr = fr.col_join(fraux)
self._frstar = frstar.col_join(frstaraux)
return (self._fr, self._frstar)
else:
return (fr, frstar)
@property
def auxiliary_eqs(self):
if (self._fr == None) or (self._frstar == None):
raise ValueError('Need to compute Fr, Fr* first.')
if self._uaux == []:
raise ValueError('No auxiliary speeds have been declared.')
return self._aux_eq
def linearize(self):
""" Method used to generate linearized equations.
Note that for linearization, it is assumed that time is not perturbed,
but only coordinates and positions. The "forcing" vector's jacobian is
computed with respect to the state vector in the form [Qi, Qd, Ui, Ud].
This is the "f_lin_A" matrix.
It also finds any non-state dynamicsymbols and computes the jacobian of
the "forcing" vector with respect to them. This is the "f_lin_B"
matrix; if this is empty, an empty matrix is created.
Consider the following:
If our equations are: [M]qudot = f, where [M] is the full mass matrix,
qudot is a vector of the deriatives of the coordinates and speeds, and
f in the full forcing vector, the linearization process is as follows:
[M]qudot = [f_lin_A]qu + [f_lin_B]y, where qu is the state vector,
f_lin_A is the jacobian of the full forcing vector with respect to the
state vector, f_lin_B is the jacobian of the full forcing vector with
respect to any non-speed/coordinate dynamicsymbols which show up in the
full forcing vector, and y is a vector of those dynamic symbols (each
column in f_lin_B corresponds to a row of the y vector, each of which
is a non-speed/coordinate dynamicsymbol).
To get the traditional state-space A and B matrix, you need to multiply
the f_lin_A and f_lin_B matrices by the inverse of the mass matrix.
Caution needs to be taken when inverting large symbolic matrices;
substituting in numerical values before inverting will work better.
A tuple of (f_lin_A, f_lin_B, other_dynamicsymbols) is returned.
"""
if (self._fr == None) or (self._frstar == None):
raise ValueError('Need to compute Fr, Fr* first.')
# Note that this is now unneccessary, and it should never be
# encountered; I still think it should be in here in case the user
# manually sets these matrices incorrectly.
for i in self._q:
if self._k_kqdot.diff(i) != 0 * self._k_kqdot:
raise ValueError('Matrix K_kqdot must not depend on any q.')
t = dynamicsymbols._t
uaux = self._uaux
uauxdot = [diff(i, t) for i in uaux]
# dictionary of auxiliary speeds & derivatives which are equal to zero
subdict = dict(zip(uaux + uauxdot, [0] * (len(uaux) + len(uauxdot))))
# Checking for dynamic symbols outside the dynamic differential
# equations; throws error if there is.
insyms = set(self._q + self._qdot + self._u + self._udot + uaux + uauxdot)
if any(self._find_dynamicsymbols(i, insyms) for i in [self._k_kqdot,
self._k_ku,
self._f_k,
self._k_dnh,
self._f_dnh,
self._k_d]):
raise ValueError('Cannot have dynamic symbols outside dynamic '
'forcing vector.')
other_dyns = list(self._find_dynamicsymbols(self._f_d.subs(subdict),
insyms))
# make it canonically ordered so the jacobian is canonical
other_dyns.sort(key=default_sort_key)
for i in other_dyns:
if diff(i, dynamicsymbols._t) in other_dyns:
raise ValueError('Cannot have derivatives of specified '
'quantities when linearizing forcing terms.')
o = len(self._u) # number of speeds
n = len(self._q) # number of coordinates
l = len(self._qdep) # number of configuration constraints
m = len(self._udep) # number of motion constraints
qi = Matrix(self._q[: n - l]) # independent coords
qd = Matrix(self._q[n - l: n]) # dependent coords; could be empty
ui = Matrix(self._u[: o - m]) # independent speeds
ud = Matrix(self._u[o - m: o]) # dependent speeds; could be empty
qdot = Matrix(self._qdot) # time derivatives of coordinates
# with equations in the form MM udot = forcing, expand that to:
# MM_full [q,u].T = forcing_full. This combines coordinates and
# speeds together for the linearization, which is necessary for the
# linearization process, due to dependent coordinates. f1 is the rows
# from the kinematic differential equations, f2 is the rows from the
# dynamic differential equations (and differentiated non-holonomic
# constraints).
f1 = self._k_ku * Matrix(self._u) + self._f_k
f2 = self._f_d
# Only want to do this if these matrices have been filled in, which
# occurs when there are dependent speeds
if m != 0:
f2 = self._f_d.col_join(self._f_dnh)
fnh = self._f_nh + self._k_nh * Matrix(self._u)
f1 = f1.subs(subdict)
f2 = f2.subs(subdict)
fh = self._f_h.subs(subdict)
fku = (self._k_ku * Matrix(self._u)).subs(subdict)
fkf = self._f_k.subs(subdict)
# In the code below, we are applying the chain rule by hand on these
# things. All the matrices have been changed into vectors (by
# multiplying the dynamic symbols which it is paired with), so we can
# take the jacobian of them. The basic operation is take the jacobian
# of the f1, f2 vectors wrt all of the q's and u's. f1 is a function of
# q, u, and t; f2 is a function of q, qdot, u, and t. In the code
# below, we are not considering perturbations in t. So if f1 is a
# function of the q's, u's but some of the q's or u's could be
# dependent on other q's or u's (qd's might be dependent on qi's, ud's
# might be dependent on ui's or qi's), so what we do is take the
# jacobian of the f1 term wrt qi's and qd's, the jacobian wrt the qd's
# gets multiplied by the jacobian of qd wrt qi, this is extended for
# the ud's as well. dqd_dqi is computed by taking a taylor expansion of
# the holonomic constraint equations about q*, treating q* - q as dq,
# seperating into dqd (depedent q's) and dqi (independent q's) and the
# rearranging for dqd/dqi. This is again extended for the speeds.
# First case: configuration and motion constraints
if (l != 0) and (m != 0):
fh_jac_qi = fh.jacobian(qi)
fh_jac_qd = fh.jacobian(qd)
fnh_jac_qi = fnh.jacobian(qi)
fnh_jac_qd = fnh.jacobian(qd)
fnh_jac_ui = fnh.jacobian(ui)
fnh_jac_ud = fnh.jacobian(ud)
fku_jac_qi = fku.jacobian(qi)
fku_jac_qd = fku.jacobian(qd)
fku_jac_ui = fku.jacobian(ui)
fku_jac_ud = fku.jacobian(ud)
fkf_jac_qi = fkf.jacobian(qi)
fkf_jac_qd = fkf.jacobian(qd)
f1_jac_qi = f1.jacobian(qi)
f1_jac_qd = f1.jacobian(qd)
f1_jac_ui = f1.jacobian(ui)
f1_jac_ud = f1.jacobian(ud)
f2_jac_qi = f2.jacobian(qi)
f2_jac_qd = f2.jacobian(qd)
f2_jac_ui = f2.jacobian(ui)
f2_jac_ud = f2.jacobian(ud)
f2_jac_qdot = f2.jacobian(qdot)
dqd_dqi = - self._mat_inv_mul(fh_jac_qd, fh_jac_qi)
dud_dqi = self._mat_inv_mul(fnh_jac_ud, (fnh_jac_qd *
dqd_dqi - fnh_jac_qi))
dud_dui = - self._mat_inv_mul(fnh_jac_ud, fnh_jac_ui)
dqdot_dui = - self._k_kqdot.inv() * (fku_jac_ui +
fku_jac_ud * dud_dui)
dqdot_dqi = - self._k_kqdot.inv() * (fku_jac_qi + fkf_jac_qi +
(fku_jac_qd + fkf_jac_qd) * dqd_dqi + fku_jac_ud * dud_dqi)
f1_q = f1_jac_qi + f1_jac_qd * dqd_dqi + f1_jac_ud * dud_dqi
f1_u = f1_jac_ui + f1_jac_ud * dud_dui
f2_q = (f2_jac_qi + f2_jac_qd * dqd_dqi + f2_jac_qdot * dqdot_dqi +
f2_jac_ud * dud_dqi)
f2_u = f2_jac_ui + f2_jac_ud * dud_dui + f2_jac_qdot * dqdot_dui
# Second case: configuration constraints only
elif l != 0:
dqd_dqi = - self._mat_inv_mul(fh.jacobian(qd), fh.jacobian(qi))
dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi) + (fku.jacobian(qd) + fkf.jacobian(qd)) *
dqd_dqi)
f1_q = (f1.jacobian(qi) + f1.jacobian(qd) * dqd_dqi)
f1_u = f1.jacobian(ui)
f2_jac_qdot = f2.jacobian(qdot)
f2_q = (f2.jacobian(qi) + f2.jacobian(qd) * dqd_dqi +
f2.jac_qdot * dqdot_dqi)
f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui
# Third case: motion constraints only
elif m != 0:
dud_dqi = self._mat_inv_mul(fnh.jacobian(ud), - fnh.jacobian(qi))
dud_dui = - self._mat_inv_mul(fnh.jacobian(ud), fnh.jacobian(ui))
dqdot_dui = - self._k_kqdot.inv() * (fku.jacobian(ui) +
fku.jacobian(ud) * dud_dui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi) + fku.jacobian(ud) * dud_dqi)
f1_jac_ud = f1.jacobian(ud)
f2_jac_qdot = f2.jacobian(qdot)
f2_jac_ud = f2.jacobian(ud)
f1_q = f1.jacobian(qi) + f1_jac_ud * dud_dqi
f1_u = f1.jacobian(ui) + f1_jac_ud * dud_dui
f2_q = (f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi + f2_jac_ud
* dud_dqi)
f2_u = (f2.jacobian(ui) + f2_jac_ud * dud_dui + f2_jac_qdot *
dqdot_dui)
# Fourth case: No constraints
else:
dqdot_dui = - self._k_kqdot.inv() * fku.jacobian(ui)
dqdot_dqi = - self._k_kqdot.inv() * (fku.jacobian(qi) +
fkf.jacobian(qi))
f1_q = f1.jacobian(qi)
f1_u = f1.jacobian(ui)
f2_jac_qdot = f2.jacobian(qdot)
f2_q = f2.jacobian(qi) + f2_jac_qdot * dqdot_dqi
f2_u = f2.jacobian(ui) + f2_jac_qdot * dqdot_dui
f_lin_A = -(f1_q.row_join(f1_u)).col_join(f2_q.row_join(f2_u))
if other_dyns:
f1_oths = f1.jacobian(other_dyns)
f2_oths = f2.jacobian(other_dyns)
f_lin_B = -f1_oths.col_join(f2_oths)
else:
f_lin_B = Matrix([])
return (f_lin_A, f_lin_B, Matrix(other_dyns))
@property
def mass_matrix(self):
# Returns the mass matrix, which is augmented by the differentiated non
# holonomic equations if necessary
if (self._frstar == None) & (self._fr == None):
raise ValueError('Need to compute Fr, Fr* first.')
return Matrix([self._k_d, self._k_dnh])
@property
def mass_matrix_full(self):
# Returns the mass matrix from above, augmented by kin diff's k_kqdot
if (self._frstar == None) & (self._fr == None):
raise ValueError('Need to compute Fr, Fr* first.')
o = len(self._u)
n = len(self._q)
return ((self._k_kqdot).row_join(zeros(n, o))).col_join((zeros(o,
n)).row_join(self.mass_matrix))
@property
def forcing(self):
# Returns the forcing vector, which is augmented by the differentiated
# non holonomic equations if necessary
if (self._frstar == None) & (self._fr == None):
raise ValueError('Need to compute Fr, Fr* first.')
return -Matrix([self._f_d, self._f_dnh])
@property
def forcing_full(self):
# Returns the forcing vector, which is augmented by the differentiated
# non holonomic equations if necessary
if (self._frstar == None) & (self._fr == None):
raise ValueError('Need to compute Fr, Fr* first.')
f1 = self._k_ku * Matrix(self._u) + self._f_k
return -Matrix([f1, self._f_d, self._f_dnh])
|
ichuang/sympy
|
sympy/physics/mechanics/kane.py
|
Python
|
bsd-3-clause
| 37,447
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import csv
from io import TextIOWrapper
import six
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.db import transaction
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django_tables2 import RequestConfig
from rapidsms.models import Contact, Connection, Backend
from rapidsms.contrib.registration.tables import ContactTable
from rapidsms.contrib.registration.forms import (
BulkRegistrationForm,
ContactForm, ConnectionFormSet)
from rapidsms.conf import settings
@login_required
def registration(request):
contacts_table = ContactTable(
Contact.objects.all().prefetch_related('connection_set'),
template_name="django_tables2/bootstrap-tables.html")
paginate = {"per_page": settings.PAGINATOR_OBJECTS_PER_PAGE}
RequestConfig(request, paginate=paginate).configure(contacts_table)
return render(request, "registration/dashboard.html", {
"contacts_table": contacts_table,
})
@login_required
def contact(request, pk=None):
if pk:
contact = get_object_or_404(Contact, pk=pk)
else:
contact = Contact()
contact_form = ContactForm(instance=contact)
connection_formset = ConnectionFormSet(instance=contact)
if request.method == 'POST':
data = {}
for key in request.POST:
val = request.POST[key]
if isinstance(val, six.string_types):
data[key] = val
else:
try:
data[key] = val[0]
except (IndexError, TypeError):
data[key] = val
del data
if pk:
if "delete_contact" in request.POST:
contact.delete()
messages.add_message(request, messages.INFO, "Deleted contact")
return HttpResponseRedirect(reverse(registration))
contact_form = ContactForm(request.POST, instance=contact)
else:
contact_form = ContactForm(request.POST)
if contact_form.is_valid():
contact = contact_form.save(commit=False)
connection_formset = ConnectionFormSet(request.POST,
instance=contact)
if connection_formset.is_valid():
contact.save()
connection_formset.save()
messages.add_message(request, messages.INFO, "Added contact")
return HttpResponseRedirect(reverse(registration))
return render(request, 'registration/contact_form.html', {
"contact": contact,
"contact_form": contact_form,
"connection_formset": connection_formset,
})
@login_required
@transaction.atomic
def contact_bulk_add(request):
bulk_form = BulkRegistrationForm(request.POST)
if request.method == "POST" and "bulk" in request.FILES:
# Python3's CSV module takes strings while Python2's takes bytes
if six.PY3:
encoding = request.encoding or settings.DEFAULT_CHARSET
f = TextIOWrapper(request.FILES['bulk'].file, encoding=encoding)
else:
f = request.FILES['bulk']
reader = csv.reader(
f,
quoting=csv.QUOTE_NONE,
skipinitialspace=True
)
count = 0
for i, row in enumerate(reader, start=1):
try:
name, backend_name, identity = row
except ValueError:
return render(request, 'registration/bulk_form.html', {
"bulk_form": bulk_form,
"csv_errors": "Could not unpack line " + str(i),
})
contact = Contact.objects.create(name=name)
try:
backend = Backend.objects.get(name=backend_name)
except Backend.DoesNotExist:
return render(request, 'registration/bulk_form.html', {
"bulk_form": bulk_form,
"csv_errors": "Could not find Backend. Line: " + str(i),
})
Connection.objects.create(
backend=backend,
identity=identity,
contact=contact)
count += 1
if not count:
return render(request, 'registration/bulk_form.html', {
"bulk_form": bulk_form,
"csv_errors": "No contacts found in file",
})
messages.add_message(request, messages.INFO, "Added %d contacts" %
count)
return HttpResponseRedirect(reverse(registration))
return render(request, 'registration/bulk_form.html', {
"bulk_form": bulk_form,
})
|
lsgunth/rapidsms
|
rapidsms/contrib/registration/views.py
|
Python
|
bsd-3-clause
| 4,844
|
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
import numpy.random
import scipy.stats
import functools
from collections import defaultdict
from nose import SkipTest
from nose.tools import assert_greater
from nose.tools import assert_in
from nose.tools import assert_is_instance
from nose.tools import assert_not_equal
from nose.tools import assert_true
from goftests import density_goodness_of_fit
from goftests import discrete_goodness_of_fit
from goftests import vector_density_goodness_of_fit
from distributions.dbg.random import sample_discrete
from distributions.util import scores_to_probs
from distributions.tests.util import assert_all_close
from distributions.tests.util import assert_close
from distributions.tests.util import assert_hasattr
from distributions.tests.util import import_model
from distributions.tests.util import list_models
from distributions.tests.util import seed_all
try:
import distributions.io.schema_pb2
has_protobuf = True
except ImportError:
has_protobuf = False
DATA_COUNT = 20
SAMPLE_COUNT = 1000
MIN_GOODNESS_OF_FIT = 1e-3
MODULES = {
'{flavor}.models.{name}'.format(**spec): import_model(spec)
for spec in list_models()
}
IS_FAST = {'dbg': False, 'hp': True, 'lp': True}
def model_is_fast(model):
flavor = model.__name__.split('.')[1]
return IS_FAST[flavor]
def iter_examples(module):
assert_hasattr(module, 'EXAMPLES')
EXAMPLES = module.EXAMPLES
assert_is_instance(EXAMPLES, list)
assert_true(EXAMPLES, 'no examples provided')
for i, EXAMPLE in enumerate(EXAMPLES):
print 'example {}/{}'.format(1 + i, len(EXAMPLES))
assert_in('shared', EXAMPLE)
assert_in('values', EXAMPLE)
values = EXAMPLE['values']
assert_is_instance(values, list)
count = len(values)
assert_true(
count >= 7,
'Add more example values (expected >= 7, found {})'.format(count))
yield EXAMPLE
def for_each_model(*filters):
'''
Run one test per Model, filtering out inappropriate Models for test.
'''
def filtered(test_fun):
@functools.wraps(test_fun)
def test_one_model(name):
module = MODULES[name]
assert_hasattr(module, 'Shared')
for EXAMPLE in iter_examples(module):
test_fun(module, EXAMPLE)
@functools.wraps(test_fun)
def test_all_models():
for name in MODULES:
module = MODULES[name]
if all(f(module) for f in filters):
yield test_one_model, name
return test_all_models
return filtered
@for_each_model()
def test_value(module, EXAMPLE):
assert_hasattr(module, 'Value')
assert_is_instance(module.Value, type)
values = EXAMPLE['values']
for value in values:
assert_is_instance(value, module.Value)
@for_each_model()
def test_shared(module, EXAMPLE):
assert_hasattr(module, 'Shared')
assert_is_instance(module.Shared, type)
shared1 = module.Shared.from_dict(EXAMPLE['shared'])
shared2 = module.Shared.from_dict(EXAMPLE['shared'])
assert_close(shared1.dump(), EXAMPLE['shared'])
values = EXAMPLE['values']
seed_all(0)
for value in values:
shared1.add_value(value)
seed_all(0)
for value in values:
shared2.add_value(value)
assert_close(shared1.dump(), shared2.dump())
for value in values:
shared1.remove_value(value)
assert_close(shared1.dump(), EXAMPLE['shared'])
@for_each_model()
def test_group(module, EXAMPLE):
assert_hasattr(module, 'Group')
assert_is_instance(module.Group, type)
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
for value in values:
shared.add_value(value)
group1 = module.Group()
group1.init(shared)
for value in values:
group1.add_value(shared, value)
group2 = module.Group.from_values(shared, values)
assert_close(group1.dump(), group2.dump())
group = module.Group.from_values(shared, values)
dumped = group.dump()
group.init(shared)
group.load(dumped)
assert_close(group.dump(), dumped)
for value in values:
group2.remove_value(shared, value)
assert_not_equal(group1, group2)
group2.merge(shared, group1)
for value in values:
group1.score_value(shared, value)
for _ in xrange(10):
value = group1.sample_value(shared)
group1.score_value(shared, value)
module.sample_group(shared, 10)
group1.score_data(shared)
group2.score_data(shared)
@for_each_model(lambda module: hasattr(module.Shared, 'protobuf_load'))
def test_protobuf(module, EXAMPLE):
if not has_protobuf:
raise SkipTest('protobuf not available')
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
Message = getattr(distributions.io.schema_pb2, module.NAME)
message = Message.Shared()
shared.protobuf_dump(message)
shared2 = module.Shared()
shared2.protobuf_load(message)
assert_close(shared2.dump(), shared.dump())
message.Clear()
dumped = shared.dump()
module.Shared.to_protobuf(dumped, message)
assert_close(module.Shared.from_protobuf(message), dumped)
if hasattr(module.Group, 'protobuf_load'):
for value in values:
shared.add_value(value)
group = module.Group.from_values(shared, values)
message = Message.Group()
group.protobuf_dump(message)
group2 = module.Group()
group2.protobuf_load(message)
assert_close(group2.dump(), group.dump())
message.Clear()
dumped = group.dump()
module.Group.to_protobuf(dumped, message)
assert_close(module.Group.from_protobuf(message), dumped)
@for_each_model()
def test_add_remove(module, EXAMPLE):
# Test group_add_value, group_remove_value, score_data, score_value
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
values = []
group = module.Group.from_values(shared)
score = 0.0
assert_close(group.score_data(shared), score, err_msg='p(empty) != 1')
for _ in range(DATA_COUNT):
value = group.sample_value(shared)
values.append(value)
score += group.score_value(shared, value)
group.add_value(shared, value)
group_all = module.Group.from_dict(group.dump())
assert_close(
score,
group.score_data(shared),
err_msg='p(x1,...,xn) != p(x1) p(x2|x1) p(xn|...)')
numpy.random.shuffle(values)
for value in values:
group.remove_value(shared, value)
group_empty = module.Group.from_values(shared)
assert_close(
group.dump(),
group_empty.dump(),
err_msg='group + values - values != group')
numpy.random.shuffle(values)
for value in values:
group.add_value(shared, value)
assert_close(
group.dump(),
group_all.dump(),
err_msg='group - values + values != group')
@for_each_model()
def test_add_repeated(module, EXAMPLE):
# Test add_repeated value vs n * add
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for value in EXAMPLE['values']:
group = module.Group.from_values(shared)
for _ in range(DATA_COUNT):
group.add_value(shared, value)
group_repeated = module.Group.from_values(shared)
group_repeated.add_repeated_value(shared, value, count=DATA_COUNT)
assert_close(
group.dump(),
group_repeated.dump(),
err_msg='n * add_value != add_repeated_value n')
@for_each_model()
def test_add_merge(module, EXAMPLE):
# Test group_add_value, group_merge
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values'][:]
for value in values:
shared.add_value(value)
numpy.random.shuffle(values)
group = module.Group.from_values(shared, values)
for i in xrange(len(values) + 1):
numpy.random.shuffle(values)
group1 = module.Group.from_values(shared, values[:i])
group2 = module.Group.from_values(shared, values[i:])
group1.merge(shared, group2)
assert_close(group.dump(), group1.dump())
@for_each_model()
def test_group_merge(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
group1 = module.Group.from_values(shared)
group2 = module.Group.from_values(shared)
expected = module.Group.from_values(shared)
actual = module.Group.from_values(shared)
for _ in xrange(100):
value = expected.sample_value(shared)
expected.add_value(shared, value)
group1.add_value(shared, value)
value = expected.sample_value(shared)
expected.add_value(shared, value)
group2.add_value(shared, value)
actual.load(group1.dump())
actual.merge(shared, group2)
assert_close(actual.dump(), expected.dump())
@for_each_model(lambda module: module.Value in [bool, int])
def test_group_allows_debt(module, EXAMPLE):
# Test that group.add_value can safely go into data debt
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
values = []
group1 = module.Group.from_values(shared, values)
for _ in range(DATA_COUNT):
value = group1.sample_value(shared)
values.append(value)
group1.add_value(shared, value)
group2 = module.Group.from_values(shared)
pos_values = [(v, +1) for v in values]
neg_values = [(v, -1) for v in values]
signed_values = pos_values * 3 + neg_values * 2
numpy.random.shuffle(signed_values)
for value, sign in signed_values:
if sign > 0:
group2.add_value(shared, value)
else:
group2.remove_value(shared, value)
assert_close(group1.dump(), group2.dump())
@for_each_model()
def test_sample_seed(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
seed_all(0)
group1 = module.Group.from_values(shared)
values1 = [group1.sample_value(shared) for _ in xrange(DATA_COUNT)]
seed_all(0)
group2 = module.Group.from_values(shared)
values2 = [group2.sample_value(shared) for _ in xrange(DATA_COUNT)]
assert_close(values1, values2, err_msg='values')
@for_each_model()
def test_sample_value(module, EXAMPLE):
seed_all(0)
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for values in [[], EXAMPLE['values']]:
group = module.Group.from_values(shared, values)
sample_count = SAMPLE_COUNT
if module.Value == numpy.ndarray:
sample_count *= 10
samples = [group.sample_value(shared) for _ in xrange(sample_count)]
if module.Value in [bool, int]:
probs_dict = {
value: math.exp(group.score_value(shared, value))
for value in set(samples)
}
gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
elif module.Value == float:
probs = numpy.exp([
group.score_value(shared, value)
for value in samples
])
gof = density_goodness_of_fit(samples, probs, plot=True)
elif module.Value == numpy.ndarray:
if module.__name__ == 'distributions.lp.models.niw':
raise SkipTest('FIXME known sampling bug')
probs = numpy.exp([
group.score_value(shared, value)
for value in samples
])
gof = vector_density_goodness_of_fit(samples, probs, plot=True)
else:
raise SkipTest('Not implemented for {}'.format(module.Value))
print '{} gof = {:0.3g}'.format(module.__name__, gof)
assert_greater(gof, MIN_GOODNESS_OF_FIT)
@for_each_model()
def test_sample_group(module, EXAMPLE):
seed_all(0)
SIZE = 2
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
for values in [[], EXAMPLE['values']]:
if module.Value in [bool, int]:
samples = []
probs_dict = {}
for _ in xrange(SAMPLE_COUNT):
values = module.sample_group(shared, SIZE)
sample = tuple(values)
samples.append(sample)
group = module.Group.from_values(shared, values)
probs_dict[sample] = math.exp(group.score_data(shared))
gof = discrete_goodness_of_fit(samples, probs_dict, plot=True)
else:
raise SkipTest('Not implemented for {}'.format(module.Value))
print '{} gof = {:0.3g}'.format(module.__name__, gof)
assert_greater(gof, MIN_GOODNESS_OF_FIT)
def _append_ss(group, aggregator):
ss = group.dump()
for key, val in ss.iteritems():
if isinstance(val, list):
for i, v in enumerate(val):
aggregator['{}_{}'.format(key, i)].append(v)
elif isinstance(val, dict):
for k, v in val.iteritems():
aggregator['{}_{}'.format(key, k)].append(v)
else:
aggregator[key].append(val)
def sample_marginal_conditional(module, shared, value_count):
values = module.sample_group(shared, value_count)
group = module.Group.from_values(shared, values)
return group
def sample_successive_conditional(module, shared, group, value_count):
sampler = module.Sampler()
sampler.init(shared, group)
values = [sampler.eval(shared) for _ in xrange(value_count)]
new_group = module.Group.from_values(shared, values)
return new_group
@for_each_model(model_is_fast)
def test_joint(module, EXAMPLE):
# \cite{geweke04getting}
seed_all(0)
SIZE = 10
SKIP = 100
shared = module.Shared.from_dict(EXAMPLE['shared'])
shared.realize()
marginal_conditional_samples = defaultdict(lambda: [])
successive_conditional_samples = defaultdict(lambda: [])
cond_group = sample_marginal_conditional(module, shared, SIZE)
for _ in xrange(SAMPLE_COUNT):
marg_group = sample_marginal_conditional(module, shared, SIZE)
_append_ss(marg_group, marginal_conditional_samples)
for __ in range(SKIP):
cond_group = sample_successive_conditional(
module,
shared,
cond_group,
SIZE)
_append_ss(cond_group, successive_conditional_samples)
for key in marginal_conditional_samples.keys():
gof = scipy.stats.ttest_ind(
marginal_conditional_samples[key],
successive_conditional_samples[key])[1]
if isinstance(gof, numpy.ndarray):
raise SkipTest('XXX: handle array case, gof = {}'.format(gof))
print '{}:{} gof = {:0.3g}'.format(module.__name__, key, gof)
if not numpy.isfinite(gof):
raise SkipTest('Test fails with gof = {}'.format(gof))
assert_greater(gof, MIN_GOODNESS_OF_FIT)
@for_each_model(lambda module: hasattr(module.Shared, 'scorer_create'))
def test_scorer(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
group = module.Group.from_values(shared)
scorer1 = shared.scorer_create()
scorer2 = shared.scorer_create(group)
for value in values:
score1 = shared.scorer_eval(scorer1, value)
score2 = shared.scorer_eval(scorer2, value)
score3 = group.score_value(shared, value)
assert_all_close([score1, score2, score3])
@for_each_model(lambda module: hasattr(module, 'Mixture'))
def test_mixture_runs(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
mixture = module.Mixture()
for value in values:
shared.add_value(value)
mixture.append(module.Group.from_values(shared, [value]))
mixture.init(shared)
groupids = []
for value in values:
scores = numpy.zeros(len(mixture), dtype=numpy.float32)
mixture.score_value(shared, value, scores)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
mixture.add_value(shared, groupid, value)
groupids.append(groupid)
mixture.add_group(shared)
assert len(mixture) == len(values) + 1
scores = numpy.zeros(len(mixture), dtype=numpy.float32)
for value, groupid in zip(values, groupids):
mixture.remove_value(shared, groupid, value)
mixture.remove_group(shared, 0)
if module.__name__ == 'distributions.lp.models.dpd':
raise SkipTest('FIXME known segfault here')
mixture.remove_group(shared, len(mixture) - 1)
assert len(mixture) == len(values) - 1
for value in values:
scores = numpy.zeros(len(mixture), dtype=numpy.float32)
mixture.score_value(shared, value, scores)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
mixture.add_value(shared, groupid, value)
@for_each_model(lambda module: hasattr(module, 'Mixture'))
def test_mixture_score(module, EXAMPLE):
shared = module.Shared.from_dict(EXAMPLE['shared'])
values = EXAMPLE['values']
for value in values:
shared.add_value(value)
groups = [module.Group.from_values(shared, [value]) for value in values]
mixture = module.Mixture()
for group in groups:
mixture.append(group)
mixture.init(shared)
def check_score_value(value):
expected = [group.score_value(shared, value) for group in groups]
actual = numpy.zeros(len(mixture), dtype=numpy.float32)
noise = numpy.random.randn(len(actual))
actual += noise
mixture.score_value(shared, value, actual)
actual -= noise
assert_close(actual, expected, err_msg='score_value {}'.format(value))
another = [
mixture.score_value_group(shared, i, value)
for i in xrange(len(groups))
]
assert_close(
another,
expected,
err_msg='score_value_group {}'.format(value))
return actual
def check_score_data():
expected = sum(group.score_data(shared) for group in groups)
actual = mixture.score_data(shared)
assert_close(actual, expected, err_msg='score_data')
print 'init'
for value in values:
check_score_value(value)
check_score_data()
print 'adding'
groupids = []
for value in values:
scores = check_score_value(value)
probs = scores_to_probs(scores)
groupid = sample_discrete(probs)
groups[groupid].add_value(shared, value)
mixture.add_value(shared, groupid, value)
groupids.append(groupid)
check_score_data()
print 'removing'
for value, groupid in zip(values, groupids):
groups[groupid].remove_value(shared, value)
mixture.remove_value(shared, groupid, value)
scores = check_score_value(value)
check_score_data()
|
fritzo/distributions
|
distributions/tests/test_models.py
|
Python
|
bsd-3-clause
| 20,478
|
import HTMLParser
import json
from xml.etree import ElementTree
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest, Http404
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_noop
from django.views.decorators.cache import cache_page
from django.views.generic import View
from couchdbkit import ResourceConflict
from casexml.apps.case.models import CASE_STATUS_OPEN
from casexml.apps.case.xml import V2
from casexml.apps.phone.fixtures import generator
from corehq.form_processor.utils import should_use_sql_backend
from corehq.form_processor.utils.general import use_sqlite_backend
from dimagi.utils.logging import notify_exception
from dimagi.utils.parsing import string_to_boolean
from dimagi.utils.web import json_response, get_url_base, json_handler
from touchforms.formplayer.api import DjangoAuth, get_raw_instance, sync_db
from touchforms.formplayer.models import EntrySession
from xml2json.lib import xml2json
from corehq import toggles, privileges
from corehq.apps.accounting.decorators import requires_privilege_for_commcare_user, requires_privilege_with_fallback
from corehq.apps.app_manager.dbaccessors import (
get_latest_build_doc,
get_brief_apps_in_domain,
get_latest_released_app_doc,
get_app_ids_in_domain,
get_current_app,
wrap_app,
)
from corehq.apps.app_manager.exceptions import FormNotFoundException, ModuleNotFoundException
from corehq.apps.app_manager.models import Application, ApplicationBase, RemoteApp
from corehq.apps.app_manager.suite_xml.sections.details import get_instances_for_module
from corehq.apps.app_manager.suite_xml.sections.entries import EntriesHelper
from corehq.apps.app_manager.util import get_cloudcare_session_data
from corehq.apps.cloudcare.api import (
api_closed_to_status,
CaseAPIResult,
get_app_json,
get_filtered_cases,
get_filters_from_request_params,
get_open_form_sessions,
look_up_app_json,
)
from corehq.apps.cloudcare.dbaccessors import get_cloudcare_apps
from corehq.apps.cloudcare.decorators import require_cloudcare_access
from corehq.apps.cloudcare.exceptions import RemoteAppError
from corehq.apps.cloudcare.models import ApplicationAccess
from corehq.apps.cloudcare.touchforms_api import BaseSessionDataHelper, CaseSessionDataHelper
from corehq.apps.domain.decorators import login_and_domain_required, login_or_digest_ex, domain_admin_required
from corehq.apps.groups.models import Group
from corehq.apps.reports.formdetails import readable
from corehq.apps.style.decorators import (
use_datatables,
use_jquery_ui,
)
from corehq.apps.users.models import CouchUser, CommCareUser
from corehq.apps.users.views import BaseUserSettingsView
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors, FormAccessors, LedgerAccessors
from corehq.form_processor.exceptions import XFormNotFound, CaseNotFound
from corehq.util.quickcache import skippable_quickcache
from corehq.util.xml_utils import indent_xml
from corehq.apps.analytics.tasks import track_clicked_preview_on_hubspot
from corehq.apps.analytics.utils import get_meta
@require_cloudcare_access
def default(request, domain):
return HttpResponseRedirect(reverse('cloudcare_main', args=[domain, '']))
def insufficient_privilege(request, domain, *args, **kwargs):
context = {
'domain': domain,
}
return render(request, "cloudcare/insufficient_privilege.html", context)
class CloudcareMain(View):
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(CloudcareMain, self).dispatch(request, *args, **kwargs)
def get(self, request, domain, urlPath):
try:
preview = string_to_boolean(request.GET.get("preview", "false"))
except ValueError:
# this is typically only set at all if it's intended to be true so this
# is a reasonable default for "something went wrong"
preview = True
app_access = ApplicationAccess.get_by_domain(domain)
accessor = CaseAccessors(domain)
if not preview:
apps = get_cloudcare_apps(domain)
if request.project.use_cloudcare_releases:
if (toggles.CLOUDCARE_LATEST_BUILD.enabled(domain) or
toggles.CLOUDCARE_LATEST_BUILD.enabled(request.couch_user.username)):
get_cloudcare_app = get_latest_build_doc
else:
get_cloudcare_app = get_latest_released_app_doc
apps = map(
lambda app: get_cloudcare_app(domain, app['_id']),
apps,
)
apps = filter(None, apps)
apps = map(wrap_app, apps)
# convert to json
apps = [get_app_json(app) for app in apps]
else:
# legacy functionality - use the latest build regardless of stars
apps = [get_latest_build_doc(domain, app['_id']) for app in apps]
apps = [get_app_json(ApplicationBase.wrap(app)) for app in apps if app]
else:
# big TODO: write a new apps view for Formplayer, can likely cut most out now
if toggles.USE_FORMPLAYER_FRONTEND.enabled(domain):
apps = get_cloudcare_apps(domain)
else:
apps = get_brief_apps_in_domain(domain)
apps = [get_app_json(app) for app in apps if app and (
isinstance(app, RemoteApp) or app.application_version == V2)]
meta = get_meta(request)
track_clicked_preview_on_hubspot(request.couch_user, request.COOKIES, meta)
# trim out empty apps
apps = filter(lambda app: app, apps)
apps = filter(lambda app: app_access.user_can_access_app(request.couch_user, app), apps)
def _default_lang():
if apps:
# unfortunately we have to go back to the DB to find this
return Application.get(apps[0]["_id"]).default_language
else:
return "en"
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
def _url_context():
# given a url path, returns potentially the app, parent, and case, if
# they're selected. the front end optimizes with these to avoid excess
# server calls
# there's an annoying dependency between this logic and backbone's
# url routing that seems hard to solve well. this needs to be synced
# with apps.js if anything changes
# for apps anything with "view/app/" works
# for cases it will be:
# "view/:app/:module/:form/case/:case/"
# if there are parent cases, it will be:
# "view/:app/:module/:form/parent/:parent/case/:case/
# could use regex here but this is actually simpler with the potential
# absence of a trailing slash
split = urlPath.split('/')
app_id = split[1] if len(split) >= 2 else None
if len(split) >= 5 and split[4] == "parent":
parent_id = split[5]
case_id = split[7] if len(split) >= 7 else None
else:
parent_id = None
case_id = split[5] if len(split) >= 6 else None
app = None
if app_id:
if app_id in [a['_id'] for a in apps]:
app = look_up_app_json(domain, app_id)
else:
messages.info(request, _("That app is no longer valid. Try using the "
"navigation links to select an app."))
if app is None and len(apps) == 1:
app = look_up_app_json(domain, apps[0]['_id'])
def _get_case(domain, case_id):
case = accessor.get_case(case_id)
assert case.domain == domain, "case %s not in %s" % (case_id, domain)
return case.to_api_json()
case = _get_case(domain, case_id) if case_id else None
if parent_id is None and case is not None:
parent_id = case.get('indices', {}).get('parent', {}).get('case_id', None)
parent = _get_case(domain, parent_id) if parent_id else None
return {
"app": app,
"case": case,
"parent": parent
}
context = {
"domain": domain,
"language": language,
"apps": apps,
"apps_raw": apps,
"preview": preview,
"maps_api_key": settings.GMAPS_API_KEY,
"sessions_enabled": request.couch_user.is_commcare_user(),
"use_cloudcare_releases": request.project.use_cloudcare_releases,
"username": request.user.username,
"formplayer_url": settings.FORMPLAYER_URL,
'use_sqlite_backend': use_sqlite_backend(domain),
}
context.update(_url_context())
if toggles.USE_FORMPLAYER_FRONTEND.enabled(domain):
return render(request, "cloudcare/formplayer_home.html", context)
else:
return render(request, "cloudcare/cloudcare_home.html", context)
class FormplayerMain(View):
preview = False
urlname = 'formplayer_main'
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(FormplayerMain, self).dispatch(request, *args, **kwargs)
def fetch_app(self, domain, app_id):
username = self.request.couch_user.username
if (toggles.CLOUDCARE_LATEST_BUILD.enabled(domain) or
toggles.CLOUDCARE_LATEST_BUILD.enabled(username)):
return get_latest_build_doc(domain, app_id)
else:
return get_latest_released_app_doc(domain, app_id)
def get(self, request, domain):
app_access = ApplicationAccess.get_by_domain(domain)
app_ids = get_app_ids_in_domain(domain)
apps = map(
lambda app_id: self.fetch_app(domain, app_id),
app_ids,
)
apps = filter(None, apps)
apps = filter(lambda app: app['cloudcare_enabled'] or self.preview, apps)
apps = filter(lambda app: app_access.user_can_access_app(request.couch_user, app), apps)
apps = sorted(apps, key=lambda app: app['name'])
def _default_lang():
try:
return apps[0]['langs'][0]
except Exception:
return 'en'
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
context = {
"domain": domain,
"language": language,
"apps": apps,
"maps_api_key": settings.GMAPS_API_KEY,
"username": request.user.username,
"formplayer_url": settings.FORMPLAYER_URL,
"single_app_mode": False,
"home_url": reverse(self.urlname, args=[domain]),
}
return render(request, "cloudcare/formplayer_home.html", context)
class FormplayerMainPreview(FormplayerMain):
preview = True
urlname = 'formplayer_main_preview'
def fetch_app(self, domain, app_id):
return get_current_app(domain, app_id)
class FormplayerPreviewSingleApp(View):
urlname = 'formplayer_single_app'
@use_datatables
@use_jquery_ui
@method_decorator(require_cloudcare_access)
@method_decorator(requires_privilege_for_commcare_user(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(FormplayerPreviewSingleApp, self).dispatch(request, *args, **kwargs)
def get(self, request, domain, app_id, **kwargs):
app_access = ApplicationAccess.get_by_domain(domain)
app = get_current_app(domain, app_id)
if not app_access.user_can_access_app(request.couch_user, app):
raise Http404()
def _default_lang():
try:
return app['langs'][0]
except Exception:
return 'en'
# default language to user's preference, followed by
# first app's default, followed by english
language = request.couch_user.language or _default_lang()
context = {
"domain": domain,
"language": language,
"apps": [app],
"maps_api_key": settings.GMAPS_API_KEY,
"username": request.user.username,
"formplayer_url": settings.FORMPLAYER_URL,
"single_app_mode": True,
"home_url": reverse(self.urlname, args=[domain, app_id]),
}
return render(request, "cloudcare/formplayer_home.html", context)
@login_and_domain_required
@requires_privilege_for_commcare_user(privileges.CLOUDCARE)
def form_context(request, domain, app_id, module_id, form_id):
app = Application.get(app_id)
form_url = '{}{}'.format(
settings.CLOUDCARE_BASE_URL or get_url_base(),
reverse('download_xform', args=[domain, app_id, module_id, form_id])
)
case_id = request.GET.get('case_id')
instance_id = request.GET.get('instance_id')
try:
form = app.get_module(module_id).get_form(form_id)
except (FormNotFoundException, ModuleNotFoundException):
raise Http404()
form_name = form.name.values()[0]
# make the name for the session we will use with the case and form
session_name = u'{app} > {form}'.format(
app=app.name,
form=form_name,
)
if case_id:
case = CaseAccessors(domain).get_case(case_id)
session_name = u'{0} - {1}'.format(session_name, case.name)
root_context = {
'form_url': form_url,
}
if instance_id:
try:
root_context['instance_xml'] = FormAccessors(domain).get_form(instance_id).get_xml()
except XFormNotFound:
raise Http404()
session_extras = {'session_name': session_name, 'app_id': app._id}
session_extras.update(get_cloudcare_session_data(domain, form, request.couch_user))
delegation = request.GET.get('task-list') == 'true'
session_helper = CaseSessionDataHelper(domain, request.couch_user, case_id, app, form, delegation=delegation)
return json_response(session_helper.get_full_context(
root_context,
session_extras
))
cloudcare_api = login_or_digest_ex(allow_cc_users=True)
def get_cases_vary_on(request, domain):
request_params = request.GET
return [
request.couch_user.get_id
if request.couch_user.is_commcare_user() else request_params.get('user_id', ''),
request_params.get('ids_only', 'false'),
request_params.get('case_id', ''),
request_params.get('footprint', 'false'),
request_params.get('closed', 'false'),
json.dumps(get_filters_from_request_params(request_params)),
domain,
]
def get_cases_skip_arg(request, domain):
"""
When this function returns True, skippable_quickcache will not go to the cache for the result. By default,
if neither of these params are passed into the function, nothing will be cached. Cache will always be
skipped if ids_only is false.
The caching is mainly a hack for touchforms to respond more quickly. Touchforms makes repeated requests to
get the list of case_ids associated with a user.
"""
if not toggles.CLOUDCARE_CACHE.enabled(domain):
return True
request_params = request.GET
return (not string_to_boolean(request_params.get('use_cache', 'false')) or
not string_to_boolean(request_params.get('ids_only', 'false')))
@cloudcare_api
@skippable_quickcache(get_cases_vary_on, get_cases_skip_arg, timeout=240 * 60)
def get_cases(request, domain):
request_params = request.GET
if request.couch_user.is_commcare_user():
user_id = request.couch_user.get_id
else:
user_id = request_params.get("user_id", "")
if not user_id and not request.couch_user.is_web_user():
return HttpResponseBadRequest("Must specify user_id!")
ids_only = string_to_boolean(request_params.get("ids_only", "false"))
case_id = request_params.get("case_id", "")
footprint = string_to_boolean(request_params.get("footprint", "false"))
accessor = CaseAccessors(domain)
if toggles.HSPH_HACK.enabled(domain):
hsph_case_id = request_params.get('hsph_hack', None)
if hsph_case_id != 'None' and hsph_case_id and user_id:
case = accessor.get_case(hsph_case_id)
usercase_id = CommCareUser.get_by_user_id(user_id).get_usercase_id()
usercase = accessor.get_case(usercase_id) if usercase_id else None
return json_response(map(
lambda case: CaseAPIResult(domain=domain, id=case['_id'], couch_doc=case, id_only=ids_only),
filter(None, [case, case.parent, usercase])
))
if case_id and not footprint:
# short circuit everything else and just return the case
# NOTE: this allows any user in the domain to access any case given
# they know its ID, which is slightly different from the previous
# behavior (can only access things you own + footprint). If we want to
# change this contract we would need to update this to check the
# owned case list + footprint
case = accessor.get_case(case_id)
assert case.domain == domain
cases = [CaseAPIResult(domain=domain, id=case_id, couch_doc=case, id_only=ids_only)]
else:
filters = get_filters_from_request_params(request_params)
status = api_closed_to_status(request_params.get('closed', 'false'))
case_type = filters.get('properties/case_type', None)
cases = get_filtered_cases(domain, status=status, case_type=case_type,
user_id=user_id, filters=filters,
footprint=footprint, ids_only=ids_only,
strip_history=True)
return json_response(cases)
@cloudcare_api
def filter_cases(request, domain, app_id, module_id, parent_id=None):
app = Application.get(app_id)
module = app.get_module(module_id)
auth_cookie = request.COOKIES.get('sessionid')
requires_parent_cases = string_to_boolean(request.GET.get('requires_parent_cases', 'false'))
xpath = EntriesHelper.get_filter_xpath(module)
instances = get_instances_for_module(app, module, additional_xpaths=[xpath])
extra_instances = [{'id': inst.id, 'src': inst.src} for inst in instances]
use_formplayer = toggles.USE_FORMPLAYER.enabled(domain)
accessor = CaseAccessors(domain)
# touchforms doesn't like this to be escaped
xpath = HTMLParser.HTMLParser().unescape(xpath)
case_type = module.case_type
if xpath or should_use_sql_backend(domain):
# if we need to do a custom filter, send it to touchforms for processing
additional_filters = {
"properties/case_type": case_type,
"footprint": True
}
helper = BaseSessionDataHelper(domain, request.couch_user)
result = helper.filter_cases(xpath, additional_filters, DjangoAuth(auth_cookie),
extra_instances=extra_instances, use_formplayer=use_formplayer)
if result.get('status', None) == 'error':
code = result.get('code', 500)
message = result.get('message', _("Something went wrong filtering your cases."))
if code == 500:
notify_exception(None, message=message)
return json_response(message, status_code=code)
case_ids = result.get("cases", [])
else:
# otherwise just use our built in api with the defaults
case_ids = [res.id for res in get_filtered_cases(
domain,
status=CASE_STATUS_OPEN,
case_type=case_type,
user_id=request.couch_user._id,
footprint=True,
ids_only=True,
)]
cases = accessor.get_cases(case_ids)
if parent_id:
cases = filter(lambda c: c.parent and c.parent.case_id == parent_id, cases)
# refilter these because we might have accidentally included footprint cases
# in the results from touchforms. this is a little hacky but the easiest
# (quick) workaround. should be revisted when we optimize the case list.
cases = filter(lambda c: c.type == case_type, cases)
cases = [c.to_api_json(lite=True) for c in cases if c]
response = {'cases': cases}
if requires_parent_cases:
# Subtract already fetched cases from parent list
parent_ids = set(map(lambda c: c['indices']['parent']['case_id'], cases)) - \
set(map(lambda c: c['case_id'], cases))
parents = accessor.get_cases(list(parent_ids))
parents = [c.to_api_json(lite=True) for c in parents]
response.update({'parents': parents})
return json_response(response)
@cloudcare_api
def get_apps_api(request, domain):
return json_response(get_cloudcare_apps(domain))
@cloudcare_api
def get_app_api(request, domain, app_id):
try:
return json_response(look_up_app_json(domain, app_id))
except RemoteAppError:
raise Http404()
@cloudcare_api
@cache_page(60 * 30)
def get_fixtures(request, domain, user_id, fixture_id=None):
try:
user = CommCareUser.get_by_user_id(user_id)
except CouchUser.AccountTypeError:
err = ("You can't use case sharing or fixtures as a %s. "
"Login as a mobile worker and try again.") % settings.WEB_USER_TERM,
return HttpResponse(err, status=412, content_type="text/plain")
if not user:
raise Http404
assert user.is_member_of(domain)
restore_user = user.to_ota_restore_user()
if not fixture_id:
ret = ElementTree.Element("fixtures")
for fixture in generator.get_fixtures(restore_user, version=V2):
ret.append(fixture)
return HttpResponse(ElementTree.tostring(ret), content_type="text/xml")
else:
fixture = generator.get_fixture_by_id(fixture_id, restore_user, version=V2)
if not fixture:
raise Http404
assert len(fixture.getchildren()) == 1, 'fixture {} expected 1 child but found {}'.format(
fixture_id, len(fixture.getchildren())
)
return HttpResponse(ElementTree.tostring(fixture.getchildren()[0]), content_type="text/xml")
@cloudcare_api
def get_sessions(request, domain):
# is it ok to pull user from the request? other api calls seem to have an explicit 'user' param
skip = request.GET.get('skip') or 0
limit = request.GET.get('limit') or 10
return json_response(get_open_form_sessions(request.user, skip=skip, limit=limit))
@cloudcare_api
def get_session_context(request, domain, session_id):
# NOTE: although this view does not appeared to be called from anywhere it is, and cannot be deleted.
# The javascript routing in cloudcare depends on it, though constructs it manually in a hardcoded way.
# see getSessionContextUrl in cloudcare/util.js
# Adding 'cloudcare_get_session_context' to this comment so that the url name passes a grep test
try:
session = EntrySession.objects.get(session_id=session_id)
except EntrySession.DoesNotExist:
session = None
if request.method == 'DELETE':
if session:
session.delete()
return json_response({'status': 'success'})
else:
helper = BaseSessionDataHelper(domain, request.couch_user)
return json_response(helper.get_full_context({
'session_id': session_id,
'app_id': session.app_id if session else None
}))
@cloudcare_api
def get_ledgers(request, domain):
"""
Returns ledgers associated with a case in the format:
{
"section_id": {
"product_id": amount,
"product_id": amount,
...
},
...
}
Note: this only works for the Couch backend
"""
request_params = request.GET
case_id = request_params.get('case_id')
if not case_id:
return json_response(
{'message': 'You must specify a case id to make this query.'},
status_code=400
)
try:
case = CaseAccessors(domain).get_case(case_id)
except CaseNotFound:
raise Http404()
ledger_map = LedgerAccessors(domain).get_case_ledger_state(case.case_id)
def custom_json_handler(obj):
if hasattr(obj, 'stock_on_hand'):
return obj.stock_on_hand
return json_handler(obj)
return json_response(
{
'entity_id': case_id,
'ledger': ledger_map,
},
default=custom_json_handler,
)
@cloudcare_api
def sync_db_api(request, domain):
auth_cookie = request.COOKIES.get('sessionid')
username = request.GET.get('username')
try:
response = sync_db(username, domain, DjangoAuth(auth_cookie))
except Exception, e:
return json_response(
{'status': 'error', 'message': unicode(e)},
status_code=500
)
else:
return json_response(response)
class ReadableQuestions(View):
urlname = 'readable_questions'
@csrf_exempt
@method_decorator(cloudcare_api)
def dispatch(self, request, *args, **kwargs):
return super(ReadableQuestions, self).dispatch(request, *args, **kwargs)
def post(self, request, domain):
instance_xml = request.POST.get('instanceXml').encode('utf-8')
app_id = request.POST.get('appId')
xmlns = request.POST.get('xmlns')
_, form_data_json = xml2json(instance_xml)
pretty_questions = readable.get_questions(domain, app_id, xmlns)
readable_form = readable.get_readable_form_data(form_data_json, pretty_questions)
rendered_readable_form = render_to_string(
'reports/form/partials/readable_form.html',
{'questions': readable_form}
)
return json_response({
'form_data': rendered_readable_form,
'form_questions': pretty_questions
})
@cloudcare_api
def render_form(request, domain):
# get session
session_id = request.GET.get('session_id')
session = get_object_or_404(EntrySession, session_id=session_id)
try:
raw_instance = get_raw_instance(session_id, domain)
except Exception, e:
return HttpResponse(e, status=500, content_type="text/plain")
xmlns = raw_instance["xmlns"]
form_data_xml = raw_instance["output"]
_, form_data_json = xml2json(form_data_xml)
pretty_questions = readable.get_questions(domain, session.app_id, xmlns)
readable_form = readable.get_readable_form_data(form_data_json, pretty_questions)
rendered_readable_form = render_to_string(
'reports/form/partials/readable_form.html',
{'questions': readable_form}
)
return json_response({
'form_data': rendered_readable_form,
'instance_xml': indent_xml(form_data_xml)
})
class HttpResponseConflict(HttpResponse):
status_code = 409
class EditCloudcareUserPermissionsView(BaseUserSettingsView):
template_name = 'cloudcare/config.html'
urlname = 'cloudcare_app_settings'
@property
def page_title(self):
if toggles.USE_FORMPLAYER_FRONTEND.enabled(self.domain):
return _("Web Apps Permissions")
else:
return _("CloudCare Permissions")
@method_decorator(domain_admin_required)
@method_decorator(requires_privilege_with_fallback(privileges.CLOUDCARE))
def dispatch(self, request, *args, **kwargs):
return super(EditCloudcareUserPermissionsView, self).dispatch(request, *args, **kwargs)
@property
def page_context(self):
apps = get_cloudcare_apps(self.domain)
access = ApplicationAccess.get_template_json(self.domain, apps)
groups = Group.by_domain(self.domain)
return {
'apps': apps,
'groups': groups,
'access': access,
}
def put(self, request, *args, **kwargs):
j = json.loads(request.body)
old = ApplicationAccess.get_by_domain(self.domain)
new = ApplicationAccess.wrap(j)
old.restrict = new.restrict
old.app_groups = new.app_groups
try:
if old._rev != new._rev or old._id != new._id:
raise ResourceConflict()
old.save()
except ResourceConflict:
return HttpResponseConflict()
else:
return json_response({'_rev': old._rev})
|
qedsoftware/commcare-hq
|
corehq/apps/cloudcare/views.py
|
Python
|
bsd-3-clause
| 29,447
|
#
# AggHelp.py -- help classes for the Agg drawing
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
import aggdraw as agg
from ginga import colors
class AggContext(object):
def __init__(self, canvas):
self.canvas = canvas
def set_canvas(self, canvas):
self.canvas = canvas
def get_color(self, color):
if isinstance(color, str):
r, g, b = colors.lookup_color(color)
elif isinstance(color, tuple):
# color is assumed to be a 3-tuple of RGB values as floats
# between 0 and 1
r, g, b = color
else:
r, g, b = 1.0, 1.0, 1.0
return (int(r*255), int(g*255), int(b*255))
def get_pen(self, color, linewidth=1):
# if hasattr(self, 'linestyle'):
# if self.linestyle == 'dash':
# cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
p = agg.Pen(self.get_color(color), width=linewidth)
return p
def get_brush(self, color):
p = agg.Brush(self.get_color(color))
return p
def get_font(self, name, size, color):
color = self.get_color(color)
# TODO: what kind of lookup can we use for this?
filename = '/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf'
f = agg.Font(color, filename, size=size)
return f
def text_extents(self, text, font):
wd, ht = self.canvas.textsize(text, font)
return wd, ht
#END
|
bsipocz/ginga
|
ginga/aggw/AggHelp.py
|
Python
|
bsd-3-clause
| 1,635
|
"""
#;+
#; NAME:
#; spec_guis
#; Version 1.0
#;
#; PURPOSE:
#; Module for Spectroscopy Guis with QT
#; These call pieces from spec_widgets
#; 12-Dec-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import os, sys
import matplotlib.pyplot as plt
import glob
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib import mpl
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Matplotlib Figure object
from matplotlib.figure import Figure
from astropy.units import Quantity
from astropy import units as u
from linetools.lists.linelist import LineList
from xastropy.xutils import xdebug as xdb
from xastropy.xguis import spec_widgets as xspw
#class XSpecGui(QtGui.QMainWindow):
#class XAbsIDGui(QtGui.QMainWindow):
#class XVelPltGui(QtGui.QDialog):
# x_specplot replacement
class XSpecGui(QtGui.QMainWindow):
''' GUI to replace XIDL x_specplot
12-Dec-2014 by JXP v1.0
27-Mar-2015 by JXP v2.0 :: EW, column, better zooming + panning
'''
def __init__(self, spec, parent=None, zsys=None, norm=None):
QtGui.QMainWindow.__init__(self, parent)
'''
spec = Spectrum1D
'''
mpl.rcParams['agg.path.chunksize'] = 20000 # Needed to avoid carsh in large spectral files
# Build a widget combining several others
self.main_widget = QtGui.QWidget()
# Status bar
self.create_status_bar()
# Grab the pieces and tie together
self.pltline_widg = xspw.PlotLinesWidget(status=self.statusBar, init_z=zsys)
self.pltline_widg.setMaximumWidth(300)
# Hook the spec widget to Plot Line
self.spec_widg = xspw.ExamineSpecWidget(spec,status=self.statusBar,
llist=self.pltline_widg.llist,
zsys=zsys, norm=norm)
self.pltline_widg.spec_widg = self.spec_widg
self.spec_widg.canvas.mpl_connect('button_press_event', self.on_click)
extras = QtGui.QWidget()
extras.setMaximumWidth(130)
vbox = QtGui.QVBoxLayout()
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(self.quit)
vbox.addWidget(self.pltline_widg)
vbox.addWidget(qbtn)
extras.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.spec_widg)
hbox.addWidget(extras)
self.main_widget.setLayout(hbox)
# Point MainWindow
self.setCentralWidget(self.main_widget)
def create_status_bar(self):
self.status_text = QtGui.QLabel("XSpec")
self.statusBar().addWidget(self.status_text, 1)
def on_click(self,event):
if event.button == 3: # Set redshift
if self.pltline_widg.llist['List'] is None:
return
self.select_line_widg = xspw.SelectLineWidget(
self.pltline_widg.llist[self.pltline_widg.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
if line.strip() == 'None':
return
#
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
z = event.xdata/wrest.value - 1.
self.pltline_widg.llist['z'] = z
self.statusBar().showMessage('z = {:f}'.format(z))
self.pltline_widg.zbox.setText('{:.5f}'.format(self.pltline_widg.llist['z']))
# Draw
self.spec_widg.on_draw()
# Quit
def quit(self):
self.close()
# GUI for Identifying many (all) Abs Systems in a Spectrum
class XAbsIDGui(QtGui.QMainWindow):
''' GUI to analyze absorption systems in a spectrum
16-Dec-2014 by JXP
'''
def __init__(self, spec, parent=None, abssys_dir=None, absid_list=None, norm=True,
srch_id=True, id_dir='ID_LINES/', second_file=None):
QtGui.QMainWindow.__init__(self, parent)
'''
spec = Spectrum1D
second_file = Second spectrum file (e.g. COS + STIS)
'''
# Build a widget combining several others
self.main_widget = QtGui.QWidget()
# Status bar
self.create_status_bar()
# Initialize
if absid_list is None:
# Automatically search for ID files
if srch_id:
absid_list = glob.glob(id_dir+'*id.fits')
else:
absid_list = []
# Grab the pieces and tie together
self.abssys_widg = xspw.AbsSysWidget(absid_list)
self.pltline_widg = xspw.PlotLinesWidget(status=self.statusBar)
self.spec_widg = xspw.ExamineSpecWidget(spec,status=self.statusBar,
llist=self.pltline_widg.llist, norm=norm,
second_file=second_file,
abs_sys=self.abssys_widg.abs_sys)
self.pltline_widg.spec_widg = self.spec_widg
# Connections
self.spec_widg.canvas.mpl_connect('button_press_event', self.on_click)
self.spec_widg.canvas.mpl_connect('key_press_event', self.on_key)
self.abssys_widg.refine_button.clicked.connect(self.refine_abssys)
# Layout
anly_widg = QtGui.QWidget()
anly_widg.setMaximumWidth(300)
anly_widg.setMinimumWidth(150)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.pltline_widg)
vbox.addWidget(self.abssys_widg)
anly_widg.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.spec_widg)
hbox.addWidget(anly_widg)
self.main_widget.setLayout(hbox)
# Point MainWindow
self.setCentralWidget(self.main_widget)
def create_status_bar(self):
self.status_text = QtGui.QLabel("XAbsID")
self.statusBar().addWidget(self.status_text, 1)
def on_key(self,event):
if event.key == 'v': # Stack plot
if self.spec_widg.vplt_flg == 1:
self.abssys_widg.add_fil(self.spec_widg.outfil)
self.abssys_widg.reload()
# Update line list
idx = self.pltline_widg.lists.index(self.spec_widg.llist['List'])
self.pltline_widg.llist_widget.setCurrentRow(idx)
elif event.key == '?': # Check for a match with known systems
wv_chosen = event.xdata
# Load grb
llist = xspw.set_llist('grb.lst')
# Loop through systems
for iabs_sys in self.abssys_widg.all_abssys:
z = iabs_sys.zabs
wvobs = np.array((1+z) * llist['grb.lst']['wrest'])
mtwv = np.where( np.abs( wvobs-wv_chosen ) < 0.2 )[0]
for imt in mtwv:
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
print('z={:g}, {:s}, f={:g}'.format(z,
llist['grb.lst']['name'][imt],
llist['grb.lst']['fval'][imt]))
if len(mtwv) == 0:
print('No match. wrest={:g} for z={:g}'.format(wv_chosen/(1+z), z))
def on_click(self,event):
if event.button == 3: # Set redshift
# Line list?
try:
self.pltline_widg.llist['List']
except KeyError:
print('Set a line list first!!')
return
#
if self.pltline_widg.llist[self.pltline_widg.llist['List']] == 'None':
return
self.select_line_widg = xspw.SelectLineWidget(
self.pltline_widg.llist[self.pltline_widg.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
if line.strip() == 'None':
return
#
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
z = event.xdata/wrest.value - 1.
self.pltline_widg.llist['z'] = z
self.statusBar().showMessage('z = {:f}'.format(z))
self.pltline_widg.zbox.setText(self.pltline_widg.zbox.z_frmt.format(
self.pltline_widg.llist['z']))
# Draw
self.spec_widg.on_draw()
def refine_abssys(self):
item = self.abssys_widg.abslist_widget.selectedItems()
if len(item) != 1:
self.statusBar().showMessage('AbsSys: Must select only 1 system!')
print('AbsSys: Must select only 1 system!')
txt = item[0].text()
ii = self.abssys_widg.all_items.index(txt)
iabs_sys = self.abssys_widg.all_abssys[ii]
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Launch
gui = XVelPltGui(self.spec_widg.spec, outfil=iabs_sys.absid_file,
abs_sys=iabs_sys, norm=self.spec_widg.norm)
gui.exec_()
# ##################################
# GUI for velocity plot
class XVelPltGui(QtGui.QDialog):
''' GUI to analyze absorption systems in a spectrum
24-Dec-2014 by JXP
'''
def __init__(self, ispec, z=None, parent=None, llist=None, norm=True,
vmnx=[-300., 300.]*u.km/u.s, abs_sys=None, outfil='dum_ID.fits',
sel_wv=None):
'''
spec = Filename or Spectrum1D
Norm: Bool (False)
Normalized spectrum?
abs_sys: AbsSystem
Absorption system class
sel_wv: Selected wavelength. Used to inspect a single, unknown line
'''
super(XVelPltGui, self).__init__(parent)
# Initialize
self.abs_sys = abs_sys
if not self.abs_sys is None:
self.z = self.abs_sys.zabs
else:
if z is None:
raise ValueError('XVelPlt: Need to set abs_sys or z!')
self.z = z
self.vmnx = vmnx
self.outfil = outfil
self.norm = norm
self.sel_wv = sel_wv
# Grab the pieces and tie together
self.vplt_widg = xspw.VelPlotWidget(ispec, abs_sys=self.abs_sys, llist=llist,
vmnx=self.vmnx, z=self.z, norm=self.norm)
self.pltline_widg = xspw.PlotLinesWidget(init_llist=self.vplt_widg.llist,
init_z=self.z)
#self.pltline_widg.spec_widg = self.vplt_widg
self.slines = xspw.SelectedLinesWidget(self.vplt_widg.llist[self.vplt_widg.llist['List']],
init_select=self.vplt_widg.llist['show_line'],
plot_widget=self.vplt_widg)
# Connections
self.pltline_widg.llist_widget.currentItemChanged.connect(self.on_llist_change)
self.connect(self.pltline_widg.zbox, QtCore.SIGNAL('editingFinished ()'), self.setz)
self.vplt_widg.canvas.mpl_connect('key_press_event', self.on_key)
# Outfil
wbtn = QtGui.QPushButton('Write', self)
wbtn.setAutoDefault(False)
wbtn.clicked.connect(self.write_out)
self.out_box = QtGui.QLineEdit()
self.out_box.setText(self.outfil)
self.connect(self.out_box, QtCore.SIGNAL('editingFinished ()'), self.set_outfil)
# Quit
buttons = QtGui.QWidget()
wqbtn = QtGui.QPushButton('Write+Quit', self)
wqbtn.setAutoDefault(False)
wqbtn.clicked.connect(self.write_quit)
qbtn = QtGui.QPushButton('Quit', self)
qbtn.setAutoDefault(False)
qbtn.clicked.connect(self.quit)
# Sizes
lines_widg = QtGui.QWidget()
lines_widg.setMaximumWidth(300)
lines_widg.setMinimumWidth(200)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.pltline_widg)
vbox.addWidget(self.slines)
vbox.addWidget(wbtn)
vbox.addWidget(self.out_box)
# Quit buttons
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(wqbtn)
hbox1.addWidget(qbtn)
buttons.setLayout(hbox1)
#
vbox.addWidget(buttons)
lines_widg.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.vplt_widg)
hbox.addWidget(lines_widg)
self.setLayout(hbox)
# Initial draw
self.vplt_widg.on_draw()
# Change z
def on_key(self,event):
if event.key == 'z':
self.z = self.vplt_widg.z
self.pltline_widg.llist['z'] = self.z
self.pltline_widg.zbox.setText(self.pltline_widg.zbox.z_frmt.format(self.z))
if event.key == 'T': # Try another rest wavelength for input line
# Get line from User
self.select_line_widg = xspw.SelectLineWidget(
self.pltline_widg.llist[self.pltline_widg.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
# Set redshift
self.z = self.sel_wv / wrest - 1.
print('Setting z = {:g}'.format(self.z))
self.pltline_widg.llist['z'] = self.z
self.pltline_widg.zbox.setText(self.pltline_widg.zbox.z_frmt.format(self.z))
self.vplt_widg.z = self.pltline_widg.llist['z']
# Reset
self.vplt_widg.init_lines()
self.vplt_widg.on_draw()
# Set z from pltline_widg
def setz(self):
self.vplt_widg.abs_sys.zabs = self.pltline_widg.llist['z']
self.vplt_widg.z = self.pltline_widg.llist['z']
self.z = self.pltline_widg.llist['z']
self.vplt_widg.on_draw()
# Change list of lines to choose from
def on_llist_change(self):
llist = self.pltline_widg.llist
all_lines = list( llist[llist['List']]['wrest'] )
# Set selected
abs_sys = self.vplt_widg.abs_sys
wrest = abs_sys.lines.keys()
wrest.sort()
select = []
for iwrest in wrest:
try:
select.append(all_lines.index(iwrest))
except ValueError:
pass
select.sort()
# GUIs
self.vplt_widg.llist['List'] = llist['List']
self.vplt_widg.llist['show_line'] = select
self.vplt_widg.idx_line = 0
self.slines.selected = select
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.slines.on_list_change(llist[llist['List']])
# Write
def set_outfil(self):
self.outfil = str(self.out_box.text())
print('XVelPlot: Will write to {:s}'.format(self.outfil))
# Write
def write_out(self):
self.vplt_widg.abs_sys.absid_file = self.outfil
self.vplt_widg.abs_sys.write_absid_file()
# Write + Quit
def write_quit(self):
self.write_out()
self.flg_quit = 1
self.abs_sys = self.vplt_widg.abs_sys
self.done(1)
# Write + Quit
def quit(self):
#self.abs_sys = self.vplt_widg.abs_sys # Have to write to pass back
self.flg_quit = 0
self.done(1)
# x_specplot replacement
class XAODMGui(QtGui.QDialog):
''' GUI to show AODM plots
28-Dec-2014 by JXP
'''
def __init__(self, spec, z, wrest, vmnx=[-300., 300.]*u.km/u.s, parent=None, norm=True):
super(XAODMGui, self).__init__(parent)
'''
spec = Spectrum1D
'''
# Grab the pieces and tie together
self.aodm_widg = xspw.AODMWidget(spec,z,wrest,vmnx=vmnx,norm=norm)
self.aodm_widg.canvas.mpl_connect('key_press_event', self.on_key)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.aodm_widg)
self.setLayout(vbox)
self.aodm_widg.on_draw()
def on_key(self,event):
if event.key == 'q': # Quit
self.done(1)
# Script to run XSpec from the command line
def run_xspec(*args, **kwargs):
'''
Runs the XSpecGui
Command line
or from Python
Examples:
1. python ~/xastropy/xastropy/xguis/spec_guis.py 1
2. spec_guis.run_xspec(filename)
3. spec_guis.run_xspec(spec1d)
'''
import argparse
from specutils import Spectrum1D
from xastropy.spec.utils import XSpectrum1D
parser = argparse.ArgumentParser(description='Parse for XSpec')
parser.add_argument("flag", type=int, help="GUI flag (ignored)")
parser.add_argument("file", type=str, help="Spectral file")
parser.add_argument("-zsys", type=float, help="System Redshift")
parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
action="store_true")
if len(args) == 0:
pargs = parser.parse_args()
else: # better know what you are doing!
#xdb.set_trace()
if type(args[0]) in [XSpectrum1D, Spectrum1D]:
app = QtGui.QApplication(sys.argv)
gui = XSpecGui(args[0], **kwargs)
gui.show()
app.exec_()
return
else: # String parsing
largs = ['1'] + [iargs for iargs in args]
pargs = parser.parse_args(largs)
# Normalized?
norm=True
if pargs.un_norm:
norm=False
# Second spectral file?
try:
zsys = pargs.zsys
except AttributeError:
zsys=None
app = QtGui.QApplication(sys.argv)
gui = XSpecGui(pargs.file, zsys=zsys, norm=norm)
gui.show()
app.exec_()
# Script to run XAbsID from the command line
def run_xabsid():
import argparse
parser = argparse.ArgumentParser(description='Script for XSpec')
parser.add_argument("flag", type=int, help="GUI flag (ignored)")
parser.add_argument("file", type=str, help="Spectral file")
parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
action="store_true")
parser.add_argument("-id_dir", type=str,
help="Directory for ID files (ID_LINES is default)")
parser.add_argument("-secondfile", type=str, help="Second spectral file")
args = parser.parse_args()
# Normalized?
norm=True
if args.un_norm:
norm=False
# Second spectral file?
second_file=None
if args.secondfile:
second_file=args.secondfile
# Launch
app = QtGui.QApplication(sys.argv)
gui = XAbsIDGui(args.file, norm=norm, second_file=second_file)
gui.show()
app.exec_()
# ################
if __name__ == "__main__":
import sys
from linetools.spectra import io as lsi
from xastropy.igm import abs_sys as xiabs
if len(sys.argv) == 1: # TESTING
flg_fig = 0
#flg_fig += 2**0 # XSpec
#flg_fig += 2**1 # XAbsID
#flg_fig += 2**2 # XVelPlt Gui
flg_fig += 2**3 # XVelPlt Gui without ID list; Also tests select wave
#flg_fig += 2**4 # XAODM Gui
# Read spectrum
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = lsi.readspec(spec_fil)
# XSpec
if (flg_fig % 2) == 1:
app = QtGui.QApplication(sys.argv)
gui = XSpecGui(spec)
gui.show()
app.exec_()
# XAbsID
if (flg_fig % 2**2) >= 2**1:
#spec_fil = '/u/xavier/PROGETTI/LLSZ3/data/normalize/SDSSJ1004+0018_nF.fits'
#spec = xspec.readwrite.readspec(spec_fil)
#norm = True
spec_fil = '/Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits'
norm = False
absid_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
absid_fil2 = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ2348-1041_z2.997_id.fits'
app = QtGui.QApplication(sys.argv)
gui = XAbsIDGui(spec_fil,norm=norm) #,absid_list=[absid_fil, absid_fil2])
gui.show()
app.exec_()
# XVelPlt with existing AbsID file
if (flg_fig % 2**3) >= 2**2:
spec_fil = '/u/xavier/PROGETTI/LLSZ3/data/normalize/SDSSJ1004+0018_nF.fits'
#spec = xspec.readwrite.readspec(spec_fil)
absid_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
abs_sys = xiabs.abssys_utils.Generic_System(None)
abs_sys.parse_absid_file(absid_fil)
#
app = QtGui.QApplication(sys.argv)
app.setApplicationName('XVelPlt')
gui = XVelPltGui(spec_fil,abs_sys=abs_sys,
outfil='/Users/xavier/Desktop/tmp.fits')
gui.show()
sys.exit(app.exec_())
# XVelPlt without existing AbsID file
if (flg_fig % 2**4) >= 2**3:
#spec_fil = '/u/xavier/PROGETTI/LLSZ3/data/normalize/SDSSJ1004+0018_nF.fits'
#z=2.746
#outfil='/Users/xavier/Desktop/J1004+0018_z2.746_id.fits'
spec_fil = '/Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits'
z=0.
outfil='/Users/xavier/Desktop/tmp.fits'
#
app = QtGui.QApplication(sys.argv)
app.setApplicationName('XVelPlt')
gui = XVelPltGui(spec_fil, z=z, outfil=outfil,norm=False, sel_wv=1526.80)
gui.show()
sys.exit(app.exec_())
# AODM GUI
if (flg_fig % 2**5) >= 2**4:
#spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
#z=2.96916
#lines = [1548.195, 1550.770]
norm = True
spec_fil = '/Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits'
z=0.4391
lines = [1215.6701, 1025.7223] * u.AA
norm = False
# Launch
spec = lsi.readspec(spec_fil)
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AODM')
main = XAODMGui(spec, z, lines, norm=norm)
main.show()
sys.exit(app.exec_())
else: # RUN A GUI
id_gui = int(sys.argv[1]) # 1 = XSpec, 2=XAbsId
if id_gui == 1:
run_xspec()
elif id_gui == 2:
run_xabsid()
|
profxj/old_xastropy
|
xastropy/xguis/spec_guis.py
|
Python
|
bsd-3-clause
| 22,801
|
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import os.path
import subprocess
import sys
libpg_query = os.path.join('.', 'libpg_query')
class PSqlParseBuildExt(build_ext):
def run(self):
return_code = subprocess.call(['make', '-C', libpg_query, 'build'])
if return_code:
sys.stderr.write('''
An error occurred during extension building.
Make sure you have bison and flex installed on your system.
''')
sys.exit(return_code)
build_ext.run(self)
USE_CYTHON = bool(os.environ.get('USE_CYTHON'))
ext = '.pyx' if USE_CYTHON else '.c'
libraries = ['pg_query']
extensions = [
Extension('psqlparse.parser',
['psqlparse/parser' + ext],
libraries=libraries,
include_dirs=[libpg_query],
library_dirs=[libpg_query])
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='psqlparse',
version='1.0-rc7',
url='https://github.com/alculquicondor/psqlparse',
author='Aldo Culquicondor',
author_email='aldo@amigocloud.com',
description='Parse SQL queries using the PostgreSQL query parser',
install_requires=['six'],
license='BSD',
cmdclass={'build_ext': PSqlParseBuildExt},
packages=['psqlparse', 'psqlparse.nodes'],
ext_modules=extensions)
|
alculquicondor/psqlparse
|
setup.py
|
Python
|
bsd-3-clause
| 1,405
|
import pulsar as psr
def load_ref_system():
""" Returns l-arginine as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
N 0.8592 -2.9103 -0.8578
C 1.3352 -1.5376 -1.1529
C 2.8596 -1.5658 -1.1624
O 3.6250 -1.8965 -0.2757
O 3.4285 -1.1488 -2.3160
C 0.8699 -0.4612 -0.1600
H 1.4712 0.4581 -0.3123
H 1.0768 -0.7804 0.8827
C -0.6054 -0.1308 -0.3266
H -1.2260 -1.0343 -0.1569
H -0.8065 0.1769 -1.3739
C -1.0120 0.9757 0.6424
H -0.4134 1.8919 0.4482
H -0.7821 0.6714 1.6839
N -2.4750 1.2329 0.5383
H -2.7251 1.4082 -0.4139
C -2.9738 2.2808 1.4124
N -3.4837 3.3356 0.8530
H -3.9046 4.0108 1.4410
N -2.8404 2.0695 2.8280
H -2.7215 1.1094 3.0676
H -3.5979 2.4725 3.3357
H -0.1386 -2.9250 -0.8895
H 1.1675 -3.1979 0.0476
H 0.9562 -1.2864 -2.1768
H 4.3768 -1.2078 -2.2540
""")
|
pulsar-chem/Pulsar-Core
|
lib/systems/l-arginine.py
|
Python
|
bsd-3-clause
| 1,416
|
from unittest import mock
from olympia.amo.cron import gc
from olympia.amo.tests import TestCase
from olympia.files.models import FileUpload
@mock.patch('olympia.amo.cron.storage')
class TestGC(TestCase):
def test_file_uploads_deletion(self, storage_mock):
fu_new = FileUpload.objects.create(path='/tmp/new', name='new')
fu_new.update(created=self.days_ago(6))
fu_old = FileUpload.objects.create(path='/tmp/old', name='old')
fu_old.update(created=self.days_ago(8))
gc()
assert FileUpload.objects.count() == 1
assert storage_mock.delete.call_count == 1
assert storage_mock.delete.call_args[0][0] == fu_old.path
def test_file_uploads_deletion_no_path_somehow(self, storage_mock):
fu_old = FileUpload.objects.create(path='', name='foo')
fu_old.update(created=self.days_ago(8))
gc()
assert FileUpload.objects.count() == 0 # FileUpload was deleted.
assert storage_mock.delete.call_count == 0 # No path to delete.
def test_file_uploads_deletion_oserror(self, storage_mock):
fu_older = FileUpload.objects.create(path='/tmp/older', name='older')
fu_older.update(created=self.days_ago(300))
fu_old = FileUpload.objects.create(path='/tmp/old', name='old')
fu_old.update(created=self.days_ago(8))
storage_mock.delete.side_effect = OSError
gc()
# Even though delete() caused a OSError, we still deleted the
# FileUploads rows, and tried to delete each corresponding path on
# the filesystem.
assert FileUpload.objects.count() == 0
assert storage_mock.delete.call_count == 2
assert storage_mock.delete.call_args_list[0][0][0] == fu_older.path
assert storage_mock.delete.call_args_list[1][0][0] == fu_old.path
|
psiinon/addons-server
|
src/olympia/amo/tests/test_cron.py
|
Python
|
bsd-3-clause
| 1,830
|
from gurtel.util import Url
class TestUrl(object):
def equal(self, one, two):
"""
For this test, want to ensure that compare-equal implies hash-equal.
"""
return (one == two) and (hash(one) == hash(two))
def test_no_qs(self):
assert self.equal(
Url("http://fake.base/path/"),
Url("http://fake.base/path/"))
def test_same_qs(self):
assert self.equal(
Url("http://fake.base/path/?foo=bar"),
Url("http://fake.base/path/?foo=bar"))
def test_different_key_order(self):
assert self.equal(
Url("http://fake.base/path/?foo=bar&arg=yo"),
Url("http://fake.base/path/?arg=yo&foo=bar"))
def test_different_value_order(self):
assert not self.equal(
Url("http://fake.base/path/?foo=bar&foo=yo"),
Url("http://fake.base/path/?foo=yo&foo=bar"))
def test_repr(self):
assert self.equal(
repr(Url("http://fake.base/path/?foo=bar")),
"Url(http://fake.base/path/?foo=bar)")
|
oddbird/gurtel
|
tests/test_util.py
|
Python
|
bsd-3-clause
| 1,076
|
# encoding: latin2
"""Repository of clusterPy's main class "Layer"
"""
__author__ = "Juan C. Duque, Alejandro Betancourt"
__credits__ = "Copyright (c) 2009-10 Juan C. Duque"
__license__ = "New BSD License"
__version__ = "1.0.0"
__maintainer__ = "RiSE Group"
__email__ = "contacto@rise-group.org"
__all__ = ['Layer']
import copy
import cPickle
import numpy
import os
import re
import time
import itertools
from data import generateSAR
from data import generateSMA
from data import generateCAR
from data import generateSpots
from data import generatePositiveSpots
from data import generateUniform
from data import generateGBinomial
from data import generateLBinomial
from data import dissolveData
from data import fieldOperation
from data import spatialLag
from geometry import dissolveLayer
from geometry import transportLayer
from geometry import expandLayer
from geometry import getBbox
from geometry import getGeometricAreas
from geometry import getCentroids
# Clustering
from toolboxes import execAZP
from toolboxes import execArisel
from toolboxes import execAZPRTabu
from toolboxes import execAZPSA
from toolboxes import execAZPTabu
from toolboxes import execRandom
from toolboxes.cluster.pRegionsExact import execPregionsExact
from toolboxes.cluster.pRegionsExactCP import execPregionsExactCP
from toolboxes.cluster.minpOrder import execMinpOrder
from toolboxes.cluster.minpFlow import execMinpFlow
from toolboxes import execMaxpTabu
from toolboxes import execAMOEBA
from toolboxes import originalSOM
from toolboxes import geoSom
from toolboxes import geoAssociationCoef
from toolboxes import redistributionCoef
from toolboxes import similarityCoef
# Irregular Maps
try:
from toolboxes import topoStatistics
from toolboxes import noFrontiersW
except:
pass
# Spatial statistics
from toolboxes import globalInequalityChanges
from toolboxes import inequalityMultivar
from toolboxes import interregionalInequalityTest
from toolboxes import interregionalInequalityDifferences
from outputs import dbfWriter
from outputs import shpWriterDis
from outputs import csvWriter
# Contiguity function
from contiguity import dict2matrix
from contiguity import dict2gal
from contiguity import dict2csv
# Layer
# Layer.dissolveMap
# Layer.addVariable
# Layer.getVars
# Layer.generateData
# Layer.resetData
# Layer.cluster
# Layer.getVars
# Layer.resetData
# Layer.cluster
# Layer.esda
# Layer.exportArcData
# Layer.save
# Layer.exportDBFY
# Layer.exportCSVY
# Layer.exportGALW
# Layer.exportCSVW
# Layer.exportOutputs
# Layer.transport
# Layer.expand
class Layer():
"""Main class in clusterPy
It is an object that represents an original map and all the
other maps derived from it after running any algorithm.
The layer object can be also represented as an inverse tree
with an upper root representing the original map and the
different branches representing other layers related to the
root.
"""
def __init__(self):
"""
**Attributes**
* Y: dictionary (attribute values of each feature)
* fieldNames: list (fieldNames List of attribute names)
* areas: list (list containing the coordinates of each feature)
* region2areas: list (list of lenght N (number of areas) with the ID of the region to which each area has been assigned during the last algorithm run)
* Wqueen: dictionary (spatial contiguity based on queen criterion)
* Wrook: dictionary (spatial contiguity based on rook criterion)
* Wcustom: dictionary (custom spatial contiguity based on any other criterion)
* type: string (layer's geometry type ('polygons','lines','points'))
* results: list (repository of layer instances from running an algorithm)
* outputCluster: dictionary (information about different characteristics of a solution (time, parameters, OS, among others))
* name: string (layer's name; default is 'root')
* outputDissolve: dictionary (keep information from which the current layer has been created)
* father: Layer (layer from which the current layer has been generated)
* bbox: tuple (bounding box)
"""
# Object Attributes
self.Y = {}
self.fieldNames = []
self.areas = []
self.region2areas = []
self.Wqueen = {}
self.Wrook = {}
self.customW = {}
self.shpType = ''
self.results = []
self.name = ""
self.outputCluster = {}
self.outputCluster['r2a'] = []
self.outputCluster['r2aRoot'] = []
self.outputDissolve = {}
self.outputDissolve['r2a'] = []
self.outputDissolve['r2aRoot'] = []
self.father = []
self.bbox = []
self.tStats = []
def dissolveMap(self, var=None, dataOperations={}):
"""
**Description**
Once you run an aggregation algorithm you can use the dissolve function to create a new map where the new polygons result from dissolving borders between areas assigned to the same region.
The dissolve map is an instance of a layer that is located inside the original layer. The dissolved map is then a "child" layer to which you can apply the same methods available for any layer. It implies that you can easily perform nested aggregation by applying aggregation algorithms to already dissolved maps.
:param var: It is the variable that indicates which areas belong to the same regions. This variable is usually the variable that is saved to a layer once an aggregation algorithm is executed. This variable can also be already included in your map, or it can be added from an external file.
:type var: string
:param dataOperations: Dictionary which maps a variable to a list of operations to run on it. The dissolved layer will contain in it's data all the variables specified in this dictionary. Be sure to check the dissolved layer fieldNames before use it's variables.
:type dataOperations: dictionary
The dictionary structure must be as showed bellow.::
>>> X = {}
>>> X[variableName1] = [function1, function2,....]
>>> X[variableName2] = [function1, function2,....]
Where functions are strings which represent the names of the functions to be used on the given variable (variableName). Functions could be,'sum','mean','min','max','meanDesv','stdDesv','med', 'mode','range','first','last','numberOfAreas'.
If you do not use this structure, the new layer (i.e.., the dissolved
map) will have just the ID field.
**Examples**
Dissolve china using the result from an aggregation algorithm ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.cluster('azpSa', ['Y1990', 'Y991'], 5)
china.dissolveMap()
Dissolve a China layer using a stored result on BELS ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.dissolveMap(var="BELS")
Dissolve china using the result from an aggregation algorithm. It also generates two new variables in the dissolved map. These new variables are the regional mean and sum of attributes "Y1978" and "Y1979" ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.cluster('azpSa', ['Y1990', 'Y991'], 5)
dataOperations = {'Y1978':['sum', 'mean'],'Y1979':['sum', 'mean']}
china.dissolveMap(dataOperations=dataOperations)
"""
print "Dissolving lines"
sh = Layer()
if var is not None:
if var in self.fieldNames:
region2areas = map(lambda x: x[0],self.getVars(var).values())
dissolveLayer(self, sh, region2areas)
sh.outputDissolve = {"objectiveFunction": "Unknown",\
"runningTime": "Unknown", "aggregationVariables": "Unknown",\
"algorithm":"Unknown", "weightType":"Unknown", \
"regions": len(sh.areas), "distanceType": "Unknown", "distanceStat": "Unknown", \
"selectionType": "Unknown", "objectiveFunctionType": "Unknown", \
"OS":os.name, "proccesorArchitecture": os.getenv('PROCESSOR_ARCHITECTURE'), \
"proccesorIdentifier": os.getenv('PROCESSOR_IDENTIFIER'),
"numberProccesor": os.getenv('NUMBER_OF_PROCESSORS'),
"r2a": self.region2areas}
sh.Y, sh.fieldNames = dissolveData(self.fieldNames, self.Y,
region2areas, dataOperations)
else:
raise NameError("The variable (%s) is not valid" %var)
else:
if self.region2areas == []:
raise NameError("You have not executed any algorithm")
else:
dissolveLayer(self, sh, self.region2areas)
outputKey = self.fieldNames[-1]
dissolveInfo = self.outputCluster[outputKey]
dissolveInfo['fieldName'] = outputKey
sh.outputDissolve = dissolveInfo
sh.Y,sh.fieldNames = dissolveData(self.fieldNames, self.Y,
self.region2areas, dataOperations)
print "Done"
def getVars(self, *args):
"""Getting subsets of data
:param args: subset data fieldNames.
:type args: tuple
:rtype: Dictionary (Data subset)
**Description**
This function allows the user to extract a subset of variables from a
layer object.
**Examples**
Getting Y1998 and Y1997 from China ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
subset = china.getVars(["Y1998", "Y1997"])
"""
print "Getting variables"
fields = []
for argument in args:
if isinstance(argument, list):
for argumentIn in argument:
fields.append(argumentIn)
else:
fields.append(argument)
labels = self.fieldNames
count = 0
subY = {}
for i in self.Y.keys():
subY[i] = []
for j in fields:
for i in range(len(labels)):
if labels[i] == j:
for j in self.Y.keys():
subY[j] = subY[j] + [self.Y[j][i]]
print "Variables successfully extracted"
return subY
def addVariable(self, names, values):
"""Adding new variables
:param names: field name
:type names: list
:param values: data
:type values: dictionary
**Description**
On the example below the population of China in 1990 is multiplied by 10 and stored on the layer as "10Y1900". Note that using the power of Python and clusterPy together the number of possible new variables is unlimited.
**Examples**
**Example 1**::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
Y1990 = china.getVars(["Y1990"])
MY1990 = {}
for area_i,pop in enumerate(Y1990):
MY1990[area_i] = pop*10
china.addVariable(['10Y1990'], MY1990)
**Example 2** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
chinaData = clusterpy.importCSV("clusterpy/data_examples/china.csv")
china.addVariable(chinaData[1],chinaData[0])
"""
print "Adding variables"
self.fieldNames += (names)
for area in range(len(values)):
if area in self.Y:
if type(values[area]) is not list:
self.Y[area] += [values[area]]
else:
self.Y[area] += values[area]
else:
self.Y[area] = [values[area]]
print "Done"
def spatialLag(self,variables,wtype="queen"):
"""Spatial lag of a set of variables
:param variables: data dictionary to be lagged
:type variables: dictionary
**Description**
This function calculates the lagged value of a set of variables using
the wrook specified as input. The result is stored in the layer data
using the original variable name preceded of "sl_"
**Examples**
**Example 1**::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.spatialLag(["Y1990","Y1991"])
china.exportArcData("chinaLagged")
"""
if wtype == 'rook':
w = self.Wrook
elif wtype == 'queen':
w = self.Wqueen
else:
print "Contiguity type is not supported"
wmatrix = dict2matrix(w,std=1,diag=0)
data = self.getVars(*variables)
lags = spatialLag(data,wmatrix)
names = ["sl_" + x for x in variables]
self.addVariable(names,lags)
def generateData(self, process, wtype, n, *args, **kargs):
"""Simulate data according to a specific stochastic process
:param process: type of data to be generated.
:type process: string
:param wtype: contiguity matrix to be used in order to generate the data.
:type wtype: string
:param n: number of processes to be generated.
:type n: integer
:param args: extra parameters of the simulators
:type args: tuple
:keyword integer: 0 for float variables and 1 for integer variables , by default 0.
:type integer: boolean
**Description**
In order to make the random data generation on clusterPy easier, we
provide a wide range of processes that can be generated with a single command. At the
moment the available processes and their optional parameters are:
* **Spatial autoregressive process (SAR)**
* rho: Autoregressive coefficient
* **Spatial Moving Average (SMA)**
* rho: Autoregressive coefficient
* **CAR**
* rho: Autoregressive coefficient
* **Random Spatial Clusters (Spots)**
* nc: Number of clusters
* compact: Compactness level (0 chain clusters - 1 compact clusters)
* Zalpha: Z value for the significance level of each cluster.
* **Positive Random Spatial Clusters (postive_spots)**
* nc: Number of clusters
* compact: Compactness level (0 chain clusters - 1 compact clusters)
* Zalpha: Z value of the significance level of each cluster. It is necesary
to take into account that the dsitribution of data is the absolute of a normal
distribution.
* **Uniform process (Uniform)**
* min: Uniform minimum
* max: Uniform maximum
* **Global Binomial (GBinomial)** (Only one distribution for all the areas)
* pob: Global Population
* prob: Global Probabilities
* **Local Binomial LBinomial** (Different distributions for each area.)
* Var_pob: Population field name.
* Var_prob: Probability field name.
**Examples**
Generating a float SAR variable for China with an autoregressive
coefficient of 0.7 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("SAR", "rook", 1, 0.7)
Generating a integer SAR variable for China with an autoregressive coefficient of 0.7 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("SAR", "rook", 1, 0.7, integer=1)
Generating a float SMA variable for China with an autoregressive coefficient of 0.3 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("SMA", "queen", 1, 0.3)
Generating an integer SMA variable for China with an autoregressive coefficient of 0.3 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("SMA", "queen", 1, 0.3, integer=1)
Generating a float CAR variable for China with an autoregressive coefficient of 0.7 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("CAR", "queen", 1, 0.7)
Generating an integer CAR variable for China with an autoregressive coefficient of 0.7 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("CAR", "queen", 1, 0.7, integer=1)
Generating a float Spot process on China each with 4 clusters, and compactness level of 0.7 and an Zalpha value of 1.28 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("Spots", "queen", 1, 4, 0.7, 1.28)
Generating an integer Spot process on China each with 4 clusters, and compactness level of 0.7 and an Zalpha value of 1.28::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("Spots", "queen", 1, 4, 0.7, 1.28, integer=1)
Generating a float Spot process with only positive values on China each with 4 clusters, and compactness level of 0.7 and an Zalpha value of 1.64 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("positive_spots", "queen", 1, 4, 0.7, 1.64)
Generating a float Spot process with only positive values over a grid
of 30 by 30 with 4 clusters, a compactness level of 0.7 and an Zalpha
value of 1.64 ::
import clusterpy
grid = clusterpy.createGrid(30,30)
grid.generateData("positive_spots", "queen", 1, 4, 0.7, 1.64)
Generating a local Binomial process on china with Y1998 as population level and simulated uniform probability (Uniform31) as risk level. ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("Uniform", "queen", 1, 0, 1)
china.fieldNames
china.generateData("LBinomial", "rook", 1, "Y1998", "Uniform31")
Generating a Binomial process on China with the same parameters for all the features ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("GBinomial", 'queen',1 , 10000, 0.5)
Generating a float Uniform process between 1 and 10 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("Uniform", 'queen', 1, 1, 10)
Generating an integer Uniform process between 1 and 10 ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.generateData("Uniform", 'queen', 1, 1, 10, integer=1)
"""
fields = []
print "Generating " + process
if wtype == 'rook':
w = self.Wrook
else:
w = self.Wqueen
if kargs.has_key("integer"):
integer = kargs["integer"]
else:
integer = 0
if process == 'SAR':
y = generateSAR(w, n, *args)
fields.extend(['SAR'+ str(i + len(self.fieldNames)) for i in range(n)])
elif process == 'SMA':
y = generateSMA(w, n, *args)
fields.extend(['SMA'+ str(i + len(self.fieldNames)) for i in range(n)])
elif process == 'CAR':
y = generateCAR(w, n, *args)
fields.extend(['CAR'+ str(i + len(self.fieldNames)) for i in range(n)])
elif process == 'Spots':
ylist = [generateSpots(w, *args) for i in xrange(n)]
fields.extend(['Spots' + str(i + len(self.fieldNames)) for i in range(n)])
y = {}
for i in xrange(len(w)):
y[i] = [x[i][0] for x in ylist]
elif process == 'positive_spots':
ylist = [generatePositiveSpots(w, *args) for i in xrange(n)]
fields.extend(['pspots' + str(i + len(self.fieldNames)) for i in range(n)])
y = {}
for i in xrange(len(w)):
y[i] = [x[i][0] for x in ylist]
elif process == 'Uniform':
y = generateUniform(w, n, *args)
fields.extend(['Uniform' + str(i + len(self.fieldNames)) for i in range(n)])
elif process == 'GBinomial':
# global parameters for the data
y = generateGBinomial(w, n, *args)
fields.extend(['Bino'+ str(i + len(self.fieldNames)) for i in range(n)])
elif process == 'LBinomial':
# local parameters for each area
arg = [arg for arg in args]
y_pob = self.getVars(arg[0])
y_pro = self.getVars(arg[1])
y = generateLBinomial(n, y_pob, y_pro)
fields.extend(['Bino' + str(i + len(self.fieldNames)) for i in range(n)])
for i in self.Y.keys():
if integer == 1:
self.Y[i] = self.Y[i] + [int(z) for z in y[i]]
else:
self.Y[i] = self.Y[i] + y[i]
self.fieldNames.extend(fields)
print "Done [" + process + "]"
def dataOperation(self,function):
"""
This function allows the creation of new variables. The variables must
be created using python language operations between variables.
:param function: This string is a python language operation which must include the variable name followed by the character "=" and the operations that must be executed in order to create the new variable. The new variable will be added as a new data attribute and the variable name will be added to fieldNames.
:type function: string
**Examples**
Creating a new variable wich is the sum of another two ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.dataOperation("Y9599 = Y1995 + Y1998")
Standardizing Y1995 ::
import clusterpy
import numpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
values = [i[0] for i in china.getVars("Y1995").values()]
mean_value = numpy.mean(values)
std_value = numpy.std(values)
china.dataOperation("Y1995St = (Y1995 - " + str(mean_value) + ")/float(" + str(std_value) + ")")
Scaling Y1998 bewtween 0 and 1. ::
import clusterpy
import numpy
china = clusterpy.importArcData("clsuterpy/data_examples/china")
values = [i[0] for i in china.getVars("Y1998").values()]
max_value = max(values)
min_value = min(values)
china.dataOperation("Y1998Sc = (Y1998 - " + str(min_value) + ")/float(" + str(max_value - min_value) + ")")
"""
m = re.match(r"(.*)\s?\=\s?(.*)", function)
if "groups" in dir(m):
fieldName = m.group(1).replace(" ", "")
if fieldName in self.fieldNames:
raise NameError("Variable " + str(fieldName) + " already exists")
function = m.group(2)
newVariable = fieldOperation(function, self.Y, self.fieldNames)
print "Adding " + fieldName + " to fieldNames"
print "Adding values from " + function + " to Y"
self.addVariable([fieldName], newVariable)
else:
raise NameError("Function is not well structured, it must include variable\
Name followed by = signal followed by the fieldOperations")
def resetData(self):
"""
All data available on the layer is deleted, keeping only the 'ID'
variable
**Examples** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.resetData()
"""
for i in self.Y.keys():
self.Y[i] = [i]
self.fieldNames = ['ID']
print "Done"
def cluster(*args, **kargs):
"""
Layer.cluster contains a wide set of algorithms for clustering with spatial contiguity constraints. For literature reviews on constrained clustering, see [Murtagh1985]_, [Gordon1996]_, [Duque_Ramos_Surinach2007]_.
Below you will find links that take you to a detailed description of
each algorithm.
The available algorithms are:
* Arisel [Duque_Church2004]_, [Duque_Church_Middleton2011]_:
* :ref:`Arisel description <arisel_description>`.
* :ref:`Using Arisel with clusterPy <arisel_examples>`.
* AZP [Openshaw_Rao1995]_:
* :ref:`AZP description <azp_description>`.
* :ref:`Using AZP with clusterPy <azp_examples>`.
* AZP-Simulated Annealing [Openshaw_Rao1995]_.
* :ref:`AZPSA description <azpsa_description>`.
* :ref:`Using AZPSA with clusterPy <azpsa_examples>`.
* AZP-Tabu [Openshaw_Rao1995]_.
* :ref:`AZP Tabu description <azpt_description>`.
* :ref:`Using AZP Tabu with clusterPy <azpt_examples>`.
* AZP-R-Tabu [Openshaw_Rao1995]_.
* :ref:`AZP Reactive Tabu description <azprt_description>`.
* :ref:`Using AZP reactive Tabu with clusterPy <azprt_examples>`.
ORGANIZAR QUE FUNCIONE
* P-regions (Exact) [Duque_Church_Middleton2009]_.
* :ref:`P-regions description <pregions_description>`.
* :ref:`Using P-regions with clusterPy <pregions_examples>`.
* Max-p-regions (Tabu) [Duque_Anselin_Rey2010]_.
* :ref:`Max-p description <maxp_description>`.
* :ref:`Using Max-p with clusterPy <maxp_examples>`.
* AMOEBA [Alstadt_Getis2006]_, [Duque_Alstadt_Velasquez_Franco_Betancourt2010]_.
* :ref:`AMOEBA description <amoeba_description>`.
* :ref:`Using AMOEBA with clusterPy <amoeba_examples>`.
* SOM [Kohonen1990]_.
* :ref:`SOM description <som_description>`.
* :ref:`Using SOM with clusterPy <som_examples>`.
* geoSOM [Bacao_Lobo_Painho2004]_.
* :ref:`GeoSOM description <geosom_description>`.
* :ref:`Using geoSOM with clusterPy <geosom_examples>`.
* Random
:param args: Basic parameters.
:type args: tuple
:param kargs: Optional parameter keywords.
:type kargs: dictionary
The dataOperations dictionary used by 'dissolveMap <dissolveMap>' could be
passed in order to specify which data should be calculated for the dissolved
layer. The dataOperations dictionary must be:
>>> X = {}
>>> X[variableName1] = [function1, function2,....]
>>> X[variableName2] = [function1, function2,....]
Where functions are strings wich represents the name of the functions to
be used on the given variableName. Functions could be,'sum','mean','min',
'max','meanDesv','stdDesv','med', 'mode','range','first','last',
'numberOfAreas. By deffault just ID variable is added to the dissolved
map.
**Examples**
.. _arisel_examples:
**ARISEL**
:ref:`Arisel description <arisel_description>`:
**Example 1** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 1, 0.9)
instance.exportArcData("testOutput/arisel_1_input")
instance.cluster('arisel', ['SAR1'], 15, dissolve=1)
instance.results[0].exportArcData("testOutput/arisel_1_solution")
.. image:: ../_static/ARISEL1.png
**Example 2** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/arisel_2_input")
instance.cluster('arisel', ['SAR1', 'SAR2'], 15, wType='queen', std=1, inits=3, convTabu=5, tabuLength=5, dissolve=1)
instance.results[0].exportArcData("testOutput/arisel_2_solution")
**Example 3** ::
import clusterpy
instance = clusterpy.createGrid(3, 3)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/arisel_3_input")
instance.cluster('arisel', ['SAR1', 'SAR2'], 3, wType='queen', std=1, inits=1, initialSolution=[0, 0, 1, 0, 1, 1, 2, 2, 2], convTabu=5, tabuLength=5, dissolve=1)
instance.results[0].exportArcData("testOutput/arisel_3_solution")
**Example 4** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
dataOperations = {'POP1970':['sum', 'mean'], 'POP2001':['sum', 'mean']}
calif.exportArcData("testOutput/arisel_4_input")
calif.cluster('arisel', ['POP1970', 'POP2001'], 15, inits= 3, dissolve=1, dataOperations=dataOperations)
calif.results[0].exportArcData("testOutput/arisel_4_solution")
**Example 5** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
calif.dataOperation("g70_01 = float(POP2001 - POP1970) / POP1970")
calif.exportArcData("testOutput/arisel_5_input")
calif.cluster('arisel', ['g70_01'], 15, inits= 4, dissolve=1)
calif.results[0].exportArcData("testOutput/arisel_5_solution")
.. image:: ../_static/ARISEL5.png
.. _azp_examples:
**AZP**
:ref:`AZP description <azp_description>`
**Example 1** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azp_1_input")
instance.cluster('azp', ['SAR1'], 15, dissolve=1)
instance.results[0].exportArcData("testOutput/azp_1_solution")
.. image:: ../_static/AZP1.png
**Example 2** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azp_2_input")
instance.cluster('azp', ['SAR1', 'SAR2'], 15, wType='queen', std=1, dissolve=1)
instance.results[0].exportArcData("testOutput/azp_2_solution")
**Example 3** ::
import clusterpy
instance = clusterpy.createGrid(3, 3)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azp_3_input")
instance.cluster('azp', ['SAR1', 'SAR2'], 3, wType='queen', std=1, initialSolution=[0, 0, 1, 0, 1, 1, 2, 2, 2], dissolve=1)
instance.results[0].exportArcData("testOutput/azp_3_solution")
**Example 4** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
dataOperations = {'POP1970':['sum', 'mean'], 'POP2001':['sum', 'mean']}
calif.exportArcData("testOutput/azp_4_input")
calif.cluster('azp', ['POP1970', 'POP2001'], 15, dissolve=1, dataOperations=dataOperations)
calif.results[0].exportArcData("testOutput/azp_4_solution")
**Example 5** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
calif.dataOperation("g70_01 = float(POP2001 - POP1970) / POP1970")
calif.exportArcData("testOutput/azp_5_input")
calif.cluster('azp', ['g70_01'], 15, dissolve=1)
calif.results[0].exportArcData("testOutput/azp_5_solution")
.. image:: ../_static/AZP5.png
.. _azpsa_examples:
**AZP Simulated Annealing**
:ref:`AZP Simulated Annealing description <azpsa_description>`
**Example 1** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azpSA_1_input")
instance.cluster('azpSa', ['SAR1'], 15, dissolve=1)
instance.results[0].exportArcData("testOutput/azpSA_1_solution")
.. image:: ../_static/AZPSA1.png
**Example 2** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azpSA_2_input")
instance.cluster('azpSa', ['SAR1', 'SAR2'], 15, wType='queen', std=1, maxit=2, dissolve=1)
instance.results[0].exportArcData("testOutput/azpSA_2_solution")
**Example 3** ::
import clusterpy
instance = clusterpy.createGrid(3, 3)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azpSA_3_input")
instance.cluster('azpSa', ['SAR1', 'SAR2'], 3, wType='queen', std=1, initialSolution=[0, 0, 1, 0, 1, 1, 2, 2, 2], maxit=2, dissolve=1)
instance.results[0].exportArcData("testOutput/azpSA_3_solution")
**Example 4** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
dataOperations = {'POP1970':['sum', 'mean'], 'POP2001':['sum', 'mean']}
calif.exportArcData("testOutput/azpSA_4_input")
calif.cluster('azpSa', ['POP1970', 'POP2001'], 15, dissolve=1, dataOperations=dataOperations)
calif.results[0].exportArcData("testOutput/azpSA_4_solution")
**Example 5** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
calif.dataOperation("g70_01 = float(POP2001 - POP1970) / POP1970")
calif.exportArcData("testOutput/azpSA_5_input")
calif.cluster('azpSa', ['g70_01'], 15, dissolve=1)
calif.results[0].exportArcData("testOutput/azpSA_5_solution")
.. image:: ../_static/AZPSA5.png
.. _azpt_examples:
**AZP Tabu**
:ref:`AZP tabu description <azpt_description>`
**Example 1** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 1, 0.9)
instance.exportArcData("testOutput/azpTabu_1_input")
instance.cluster('azpTabu', ['SAR1'], 15, dissolve=1)
instance.results[0].exportArcData("testOutput/azpTabu_1_solution")
.. image:: ../_static/AZPT1.png
**Example 2** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azpTabu_2_input")
instance.cluster('azpTabu', ['SAR1', 'SAR2'], 15, wType='queen', std=1, convTabu=5, tabuLength=5, dissolve=1)
instance.results[0].exportArcData("testOutput/azpTabu_2_solution")
**Example 3** ::
import clusterpy
instance = clusterpy.createGrid(3, 3)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azpTabu_3_input")
instance.cluster('azpTabu', ['SAR1', 'SAR2'], 3, wType='queen', std=1, initialSolution=[0, 0, 1, 0, 1, 1, 2, 2, 2], convTabu=5, tabuLength=5, dissolve=1)
instance.results[0].exportArcData("testOutput/azpTabu_3_solution")
**Example 4** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
dataOperations = {'POP1970':['sum', 'mean'], 'POP2001':['sum', 'mean']}
calif.exportArcData("testOutput/azpTabu_4_input")
calif.cluster('azpTabu', ['POP1970', 'POP2001'], 15, dissolve=1, dataOperations=dataOperations)
calif.results[0].exportArcData("testOutput/azpTabu_4_solution")
**Example 5** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
calif.dataOperation("g70_01 = float(POP2001 - POP1970) / POP1970")
calif.exportArcData("testOutput/azpTabu_5_input")
calif.cluster('azpTabu', ['g70_01'], 15, dissolve=1)
calif.results[0].exportArcData("testOutput/azpTabu_5_solution")
.. image:: ../_static/AZPT5.png
.. _azprt_examples:
**AZP Reactive Tabu**
:ref:`AZP reactive tabu description <azprt_description>`
**Example 1** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 1, 0.9)
instance.exportArcData("testOutput/azpRTabu_1_input")
instance.cluster('azpRTabu', ['SAR1'], 15, dissolve=1)
instance.results[0].exportArcData("testOutput/azpRTabu_1_solution")
.. image:: ../_static/AZPR1.png
**Example 2** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azpRTabu_2_input")
instance.cluster('azpRTabu', ['SAR1', 'SAR2'], 15, wType='queen', std=1, convTabu=5, dissolve=1)
instance.results[0].exportArcData("testOutput/azpRTabu_2_solution")
**Example 3** ::
import clusterpy
instance = clusterpy.createGrid(3, 3)
instance.generateData("SAR", 'rook', 2, 0.9)
instance.exportArcData("testOutput/azpRTabu_3_input")
instance.cluster('azpRTabu', ['SAR1', 'SAR2'], 3, wType='queen', std=1, initialSolution=[0, 0, 1, 0, 1, 1, 2, 2, 2], convTabu=5, dissolve=1)
instance.results[0].exportArcData("testOutput/azpRTabu_3_solution")
**Example 4** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
dataOperations = {'POP1970':['sum', 'mean'], 'POP2001':['sum', 'mean']}
calif.exportArcData("testOutput/azpRTabu_4_input")
calif.cluster('azpRTabu', ['POP1970', 'POP2001'], 15, dissolve=1, dataOperations=dataOperations)
calif.results[0].exportArcData("testOutput/azpRTabu_4_solution")
**Example 5** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
calif.dataOperation("g70_01 = float(POP2001 - POP1970) / POP1970")
calif.exportArcData("testOutput/azpRTabu_5_input")
calif.cluster('azpRTabu', ['g70_01'], 15, dissolve=1)
calif.results[0].exportArcData("testOutput/azpRTabu_5_solution")
.. image:: ../_static/AZPR5.png
.. _lamaxp_examples:
**MAX-P**
:ref:`Max-p region description <maxp_description>`
**Example 1** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 1, 0.9)
instance.generateData('Uniform', 'rook', 1, 10, 15)
instance.exportArcData("testOutput/maxpTabu_1_input")
instance.cluster('maxpTabu', ['SAR1', 'Uniform2'], threshold=130, dissolve=1)
instance.results[0].exportArcData("testOutput/maxpTabu_1_solution")
.. image:: ../_static/Maxp1.png
**Example 2** ::
import clusterpy
instance = clusterpy.createGrid(10, 10)
instance.generateData("SAR", 'rook', 1, 0.9)
instance.generateData('Uniform', 'rook', 1, 10, 15)
instance.exportArcData("testOutput/maxpTabu_2_input")
instance.cluster('maxpTabu', ['SAR1', 'Uniform2'], threshold=130, wType='queen', maxit=3, tabuLength=5, dissolve=1)
instance.results[0].exportArcData("testOutput/maxpTabu_2_solution")
**Example 3** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
dataOperations = {'POP1970':['sum', 'mean'], 'POP2001':['sum', 'mean']}
calif.exportArcData("testOutput/maxpTabu_3_input")
calif.cluster('maxpTabu', ['POP1970', 'POP2001'], threshold=100000, dissolve=1, dataOperations=dataOperations)
calif.results[0].exportArcData("testOutput/maxpTabu_3_solution")
**Example 4** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.fieldNames
calif.dataOperation("g70_01 = float(POP2001 - POP1970) / POP1970")
calif.exportArcData("testOutput/maxpTabu_4_input")
calif.cluster('maxpTabu', ['g70_01', 'POP2001'], threshold=100000, dissolve=1,std=1)
calif.results[0].exportArcData("testOutput/maxpTabu_4_solution")
.. image:: ../_static/Maxp4.png
.. _amoeba_examples:
**AMOEBA**
:ref:`AMOEBA description <amoeba_description>`
**Example 1** ::
import clusterpy
instance = clusterpy.createGrid(33, 33)
instance.generateData("Spots", 'rook', 1, 2, 0.7, 0.99)
instance.cluster('amoeba', ['Spots1'],significance=0.01)
instance.exportArcData("testOutput/amoeba_1_solution")
.. image:: ../_static/AMOEBA1.png
**Example 2**::
import clusterpy
instance = clusterpy.createGrid(25, 25)
instance.generateData("Spots", 'rook', 1, 2, 0.7, 0.99)
instance.cluster('amoeba', ['Spots1'],wType="queen",significance=0.01)
instance.exportArcData("testOutput/amoeba_2_solution")
**Example 3** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.dataOperation("g70_01 = float(POP2001 - POP1970) / POP1970")
calif.cluster('amoeba', ['g70_01'],significance=0.01)
calif.exportArcData("testOutput/amoeba_3_solution")
.. image:: ../_static/AMOEBA3.png
.. _som_examples:
**Self Organizing Maps (SOM)**
:ref:`SOM description <som_description>`
**Example 1** ::
import clusterpy
instance = clusterpy.createGrid(33, 33)
instance.generateData("SAR", "rook", 1, 0.9)
instance.cluster("som", ["SAR1"], nRows=2,nCols=2)
instance.exportArcData("testOutput/som_1_dataLayer")
.. image:: ../_static/som1.png
**Example 2** ::
import clusterpy
instance = clusterpy.createGrid(33,33)
instance.generateData("SAR",'rook',1,0.9)
instance.generateData("SAR",'rook',1,0.9)
instance.cluster('som',['SAR1','SAR2'],nRows=2,nCols=2,alphaType='quadratic', fileName="testOutput/NeuronsLayer")
instance.exportArcData("testOutput/som_2_dataLayer")
**Example 3** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.dataOperation("g70_01 = float(POP2001 - POP1970) / POP1970")
calif.cluster('som',['g70_01'],nRows=2,nCols=2,alphaType='linear')
calif.exportArcData("testOutput/som_3_solution")
.. image:: ../_static/som3.png
.. _geosom_examples:
**Geo Self Organizing Maps (geoSOM)**
:ref:`GeoSOM description <geosom_description>`
**Example 1** ::
import clusterpy
instance = clusterpy.createGrid(33, 33)
instance.generateData("SAR", "rook", 1, 0.9)
instance.cluster("geoSom", ["SAR1"], nRows=3,nCols=3)
instance.exportArcData("testOutput/geoSom_1_dataLayer")
.. image:: ../_static/geosom1.png
**Example 2** ::
import clusterpy
instance = clusterpy.createGrid(33,33)
instance.generateData("SAR",'rook',1,0.9)
instance.generateData("SAR",'rook',1,0.9)
instance.cluster('geoSom',['SAR1','SAR2'],nRows=3,nCols=3,alphaType='quadratic', fileName="testOutput/NeuronsLayer")
instance.exportArcData("testOutput/geoSom_2_dataLayer")
**Example 3** ::
import clusterpy
calif = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
calif.dataOperation("g70_01 = float(POP2001 - POP1970) / POP1970")
calif.cluster('geoSom',['g70_01'],nRows=3,nCols=3,alphaType='linear')
calif.exportArcData("testOutput/geoSom_3_solution")
.. image:: ../_static/geosom3.png
"""
self = args[0]
algorithm = args[1]
# Extracting W type from arguments
if kargs.has_key('wType'):
wType = kargs['wType']
kargs.pop('wType')
else:
wType = 'rook'
# Extracting W according to requirement
if wType == 'rook':
algorithmW = self.Wrook
elif wType == 'queen':
algorithmW = self.Wqueen
else:
algorithmW = self.Wrook
# Extracting standardize variables
if kargs.has_key('std'):
std = kargs.pop('std')
else:
std = 0
# Setting dissolve according to requirement
if kargs.has_key("dissolve"):
dissolve = kargs.pop('dissolve')
else:
dissolve = 0
# Extracting dataOperations
if kargs.has_key("dataOperations"):
dataOperations = kargs.pop("dataOperations")
else:
dataOperations = {}
# Construction of parameters per algorithm
if algorithm in ["geoSom","amoeba","som"]:
dissolve = 0
dataOperations = {}
print "The parameters ""dissolve"" and ""dataOperations"" is not available for the this \
algorithm"
if algorithm == "geoSom":
fieldNames = tuple(args[2])
args = (self, fieldNames) + args[3:]
else:
fieldNames = tuple(args[2])
algorithmY = self.getVars(*fieldNames)
if std==1:
for nn,name in enumerate(fieldNames):
values = [i[0] for i in self.getVars(name).values()]
mean_value = numpy.mean(values)
std_value = numpy.std(values)
newVar = fieldOperation("( " + name + " - " + str(mean_value) + ")/float(" + str(std_value) + ")", algorithmY, fieldNames)
for nv,val in enumerate(newVar):
algorithmY[nv][nn] = val
# Adding original population to de algortihmY
if algorithm == "maxpTabu":
population = fieldNames[-1]
populationY = self.getVars(population)
for key in populationY:
algorithmY[key][-1] = populationY[key][0]
args = (algorithmY,algorithmW) + args[3:]
name = algorithm + "_" + time.strftime("%Y%m%d%H%M%S")
self.outputCluster[name] = {
"random": lambda *args, **kargs: execRandom(*args, **kargs),
"azp": lambda *args, **kargs: execAZP(*args, **kargs),
"arisel": lambda *args, **kargs: execArisel(*args, **kargs),
"azpTabu": lambda *args, **kargs: execAZPTabu(*args, **kargs),
"azpRTabu": lambda *args, **kargs: execAZPRTabu(*args, **kargs),
"azpSa": lambda *args, **kargs: execAZPSA(*args, **kargs),
"amoeba": lambda *args, **kargs: execAMOEBA(*args, **kargs),
"som": lambda *args, **kargs: originalSOM(*args, **kargs),
"geoSom": lambda *args, **kargs: geoSom(*args, **kargs),
"pRegionsExact": lambda *args, **kargs: execPregionsExact(*args, **kargs),
"pRegionsExactCP": lambda *args, **kargs: execPregionsExactCP(*args, **kargs),
"minpOrder": lambda *args, **kargs: execMinpOrder(*args, **kargs),
"minpFlow": lambda *args, **kargs: execMinpFlow(*args, **kargs),
"maxpTabu": lambda *args, **kargs: execMaxpTabu(*args, **kargs)
}[algorithm](*args, **kargs)
self.outputCluster[name]["weightType"] = wType
self.outputCluster[name]["aggregationVariables"] = fieldNames
self.outputCluster[name]["OS"] = os.name
self.outputCluster[name]["proccesorArchitecture"] = os.getenv('PROCESSOR_ARCHITECTURE')
self.outputCluster[name]["proccesorIdentifier"] = os.getenv('PROCESSOR_IDENTIFIER')
self.outputCluster[name]["numberProccesor"] = os.getenv('NUMBER_OF_PROCESSORS')
sol = self.outputCluster[name]["r2a"]
self.region2areas = sol
self.addVariable([name], sol)
self.outputCluster[name]["fieldName"] = self.fieldNames[-1]
if dissolve == 1:
self.dissolveMap(dataOperations=dataOperations)
def spmorph(self,variables,minShocks,maxShocks,
inequalityIndex,outFile,clusterAlgorithm,
nClusters, **kargs):
"""
This function runs the algorithm spMorph, devised by
[Duque_Ye_Folch2012], for a predefined shock. spMorph is an
exploratory space-time analysis tool for describing processes of
spatial redistribution of a given variable.
:keyword variables: List with variables to be analyzed. The variables must be chronologically sorted; e.g: ['Y1978', 'Y1979', 'Y1980', 'Y1981', 'Y1982', 'Y1983', 'Y1984', 'Y1985', 'Y1986', 'Y1987', 'Y1988']
:type variables: list
:keyword minShocks: minimum number of shocks to evaluate
:type minShocks: integer
:keyword maxShocks: maximum number of shocks to evaluate
:type maxShocks: integer
:keyword inequalityIndex: name of the inequality index to be utilized in the algorithm. By now, the only option is 'theil'
:type inequalityIndex: string
:keyword outFile: Name for the output file; e.g.: "spMorph" (no extension)
:type outFile: string
:keyword clusterAlgorithm: name of the spatial clustering algorithm to be utilized in the algorithm. The clustering algorithm must be any version of 'azp' or 'arisel'
:type clusterAlgorithm: string
:keyword nClusters: number of regions
:type nClusters: integer
After the parameter nClusters you can include more parameters related with the clustering algorithm that you are using. We recomend not to use dissolve=1 option
**Example 1**::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
variables = ['Y1978', 'Y1979', 'Y1980', 'Y1981']
china.multishockFinder(variables,0,2,'theil',"sphmorph",'azpTabu',4)
"""
def createTable(index,comb,shock):
auxline = str(index)
auxline = auxline.replace("[","")
auxline = auxline.replace("]","")
combtext = str(comb)
combtext = combtext.replace(","," ")
line = "".join([str(len(comb)),",",str(combtext),",",shock,",",auxline,",","\n"])
return line
bestSolutions = {}
table_t = ""
table_tw = ""
table_tb = ""
table_tw_t = ""
table_lowerTw_t = ""
table_a2r = ""
auxline = str(variables)
auxline = auxline.replace("[","")
auxline = auxline.replace("]","")
auxline = auxline.replace("'","")
header = "".join(["#shocks,shocks,shock,",auxline,"\n"])
auxline = str(range(len(self.areas)))
auxline = auxline.replace("[","")
auxline = auxline.replace("]","")
header_a2r = "".join(["#shocks,shocks,shock,",auxline,"\n"])
fout_t = open(outFile + "_t.csv","w")
fout_t.write(header)
fout_tb = open(outFile + "_tb.csv","w")
fout_tb.write(header)
fout_tw = open(outFile + "_tw.csv","w")
fout_tw.write(header)
fout_twt = open(outFile + "_twt.csv","w")
fout_twt.write(header)
fout_lb = open(outFile + "_lb.csv","w")
fout_lb.write(header)
fouta2r = open(outFile + "_a2r.csv","w")
fouta2r.write(header_a2r)
cachedSolutions = {}
for nElements in range(minShocks,maxShocks+1):
bestSol = [] # (t,tb,tw,tw/t,loweTw/t,comb,objectiveFunction)
for comb in itertools.combinations(variables[1:],nElements):
comb = list(comb)
comb.sort()
comb = tuple(comb)
t, tb, tw, tw_t, lowerTw_t,a2r = self.inequalityShock(variables,
comb,inequalityIndex,clusterAlgorithm,nClusters,cachedSolutions,**kargs)
if bestSol == [] or sum(lowerTw_t) <= (bestSol["of"]):
bestSol = {"t" : t,
"tb" : tb,
"tw" : tw,
"tw_t" : tw_t,
"lowerTw_t" : lowerTw_t,
"comb" : comb,
"of" : sum(lowerTw_t),
"a2r" : a2r}
fout_t = open(outFile + "_t.csv","a")
fout_tb = open(outFile + "_tb.csv","a")
fout_tw = open(outFile + "_tw.csv","a")
fout_twt = open(outFile + "_twt.csv","a")
fout_lb = open(outFile + "_lb.csv","a")
fouta2r = open(outFile + "_a2r.csv","a")
for nc in range(nElements+1):
line = createTable(bestSol["t"][nc],bestSol["comb"],str(nc))
fout_t.write(line)
line = createTable(bestSol["tb"][nc],bestSol["comb"],str(nc))
fout_tb.write(line)
line = createTable(bestSol["tw"][nc],bestSol["comb"],str(nc))
fout_tw.write(line)
line = createTable(bestSol["tw_t"][nc],bestSol["comb"],str(nc))
fout_twt.write(line)
line = createTable(bestSol["a2r"][nc],bestSol["comb"],str(nc))
fouta2r.write(line)
line = createTable(bestSol["lowerTw_t"],comb,"")
fout_lb.write(line)
fout_t.close()
fout_tb.close()
fout_tw.close()
fout_twt.close()
fout_lb.close()
fouta2r.close()
def inequalityShock(self,variables,shokVariables,
inequalityIndex,clusterAlgorithm,
nClusters,cachedSolutions,**kargs):
"""
This function runs the algorithm spMorph, devised by
[Duque_Ye_Folch2012], for a predefined shock. spMorph is an
exploratory space-time analysis tool for describing processes of
spatial redistribution of a given variable.
:keyword variables: List with variables to be analyzed. The variables must be chronologically sorted; e.g: ['Y1978', 'Y1979', 'Y1980', 'Y1981', 'Y1982', 'Y1983', 'Y1984', 'Y1985', 'Y1986', 'Y1987', 'Y1988']
:type variables: list
:keyword shokVariables: list with the name of the variable (in
vars) in wich a shock ocurred. NOTE: the shock variable is
included as the first variable of the next period; e.g: ['Y1981', 'Y1984'], this implies that the periods to analyze are: 1978-1980; 1981-1983 and 1984-1988.
:type shokVariables: list
:keyword inequalityIndex: name of the inequality index to be utilized in the algorithm. By now, the only option is 'theil'
:type inequalityIndex: string
:keyword clusterAlgorithm: name of the spatial clustering algorithm to be utilized in the algorithm. The clustering algorithm must be any version of 'azp' or 'arisel'
:type clusterAlgorithm: string
:keyword nClusters: number of regions
:type nClusters: integer
After the parameter nClusters you can include more parameters related with the clustering algorithm that you are using. We recomend not to use dissolve=1 option
The function returns:
t: total Theil
tb: between groups inequality
tw: within groups inequality
lb: lower bound
a2r: solution vector for the regionalization algorithm
**Example**::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
variables = ['Y1978', 'Y1979', 'Y1980', 'Y1981', 'Y1982',
'Y1983', 'Y1984', 'Y1985', 'Y1986', 'Y1987', 'Y1988',
'Y1989', 'Y1990', 'Y1991', 'Y1992' , 'Y1993', 'Y1994',
'Y1995', 'Y1996', 'Y1997', 'Y1998']
shokVariable = ['Y1984']
t,tb,tw,tw_t,lb,a2r=china.inequalityShock(variables,shokVariable,'theil',
'arisel',5)
"""
tempSet = []
area2regions = {}
area2regionsList = []
tempSetOrdered = []
for var in variables:
if var in shokVariables:
if tempSet == []:
raise NameError("First period could not \
be a shock period")
else:
tempSet.sort()
tempSet = tuple(tempSet)
if tempSet not in cachedSolutions:
clusterArgs = (clusterAlgorithm,tempSet,nClusters)
self.cluster(*clusterArgs,**kargs)
area2regions[tempSet] = self.region2areas
tempSetOrdered.append(tempSet)
else:
area2regions[tempSet] = cachedSolutions[tempSet][-1]
tempSetOrdered.append(tempSet)
tempSet = [var]
else:
tempSet.append(var)
tempSet.sort()
tempSet = tuple(tempSet)
if tempSet not in cachedSolutions:
clusterArgs = (clusterAlgorithm,tempSet,nClusters)
self.cluster(*clusterArgs,**kargs)
area2regions[tempSet] = self.region2areas
tempSetOrdered.append(tempSet)
else:
area2regions[tempSet] = cachedSolutions[tempSet][-1]
tempSetOrdered.append(tempSet)
Y = self.getVars(*variables)
t = []
tb = []
tw = []
tw_t = []
for a2r in tempSetOrdered:
if a2r in cachedSolutions:
t2,tb2,tw2,tw_t2,a2rc = cachedSolutions[a2r]
else:
t2,tb2,tw2,tw_t2 = inequalityMultivar(Y,area2regions[a2r],inequalityIndex)
cachedSolutions[a2r] = t2,tb2,tw2,tw_t2,area2regions[a2r]
a2rc = area2regions[a2r]
t.append(t2)
tb.append(tb2)
tw.append(tw2)
tw_t.append(tw_t2)
area2regionsList.append(a2rc)
lowerTw_t = [min(x) for x in zip(*tw_t)]
return t, tb, tw, tw_t, lowerTw_t,area2regionsList
def inequality(*args,**kargs):
"""
Documentacion Juank
"""
self = args[0]
algorithm = args[1]
if algorithm == 'globalInequalityChanges':
variables = args[2]
outFile = args[3]
Y = self.getVars(*variables)
args = (Y,variables,outFile)
globalInequalityChanges(*args,**kargs)
result = None
elif algorithm == 'inequality':
variables = args[2]
area2region = args[3]
Y = self.getVars(*variables)
area2region = [x[0] for x in self.getVars(area2region).values()]
args = (Y,area2region)
result = inequalityMultivar(*args,**kargs)
elif algorithm == 'interregionalInequalityTest':
fieldNames = args[2]
area2region = args[3]
outFile = args[4]
Y = self.getVars(*fieldNames)
area2regions = self.getVars(area2region)
args = (Y,fieldNames,area2regions,area2region,outFile)
result = interregionalInequalityTest(*args,**kargs)
elif algorithm == 'regionsInequalityDifferenceTest':
fieldNames = args[2]
area2region = args[3]
outFile = args[4]
Y = self.getVars(*fieldNames)
area2regions = self.getVars(area2region)
area2regions = zip(*area2regions.values())
args = (Y,fieldNames,area2regions,area2region,outFile)
result = interregionalInequalityDifferences(*args,**kargs)
return result
def esda(*args, **kargs):
"""
Documentacion Juank
Exploratory spatial data analysis algorithms. For more information
about the basic and the optional parameters, read the official
'algorithm documentation <www.rise-group.org>'
:param args: basic paramters.
:type args: tuple
:param kargs: optional parameter keywords.
:type kargs: dictionary
**Examples**
Geographical association coefficient (GAC)
>>> import clusterpy
>>> new = clusterpy.createGrid(10, 10)
>>> new.generateData("SAR", 'rook', 1, 0.9)
>>> new.generateData("SAR", 'rook', 1, 0.9)
>>> gac = new.esda("GAC", "SAR1", "SAR2")
Redistribution coefficient
>>> import clusterpy
>>> new = clusterpy.createGrid(10, 10)
>>> new.generateData("SAR", 'rook', 1, 0.9)
>>> new.generateData("SAR", 'rook', 1, 0.9)
>>> rdc = new.esda("RDC", "SAR1", "SAR2")
Similarity coefficient
>>> import clusterpy
>>> new = clusterpy.createGrid(10, 10)
>>> new.generateData("SAR", 'rook', 1, 0.9)
>>> new.generateData("SAR", 'rook', 1, 0.9)
>>> SIMC = new.esda("SIMC", "SAR1", "SAR2")
"""
self = args[0]
algorithm = args[1]
args = [self] + list(args[2:])
kargs = {}
result = {
"GAC": lambda *args, **kargs: geoAssociationCoef(*args, **kargs),
"RDC": lambda *args, **kargs: redistributionCoef(*args, **kargs),
"SIMC": lambda *args, **kargs: similarityCoef(*args, **kargs),
}[algorithm](*args, **kargs)
return result
def exportArcData(self, filename):
"""
Creates an ESRI shapefile from a clusterPy's layer.
:param filename: shape file name to create, without ".shp"
:type filename: string
**Examples** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.exportArcData("china")
"""
print "Writing ESRI files"
shpWriterDis(self.areas, filename, self.shpType)
self.exportDBFY(filename)
print "ESRI files created"
def exportDBFY(self, fileName, *args):
"""Exports the database file
:param fileName: dbf file name to create, without ".dbf"
:type fileName: string
:param args: variables subset to be exported
:type args: tuple
**Examples** ::
import clusterpy
clusterpy.importArcData("clusterpy/data_examples/china")
china.exportDBFY("china")
"""
print "Writing DBF file"
if args != ():
Y = self.getVars(self, *args)
fieldNames = args
else:
Y = self.Y
fieldNames = self.fieldNames
fieldspecs = []
types = Y[0]
for i in types:
itype = str(type(i))
if 'str' in itype:
fieldspecs.append(('C', 10, 0))
else:
fieldspecs.append(('N', 10, 3))
records = range(len(Y))
for i in xrange(len(Y)):
if len(fieldNames) == 2:
records[i] = []
records[i] = records[i] + Y.values()[i]
else:
records[i] = []
records[i] = records[i] + Y.values()[i]
dbfWriter(fieldNames, fieldspecs, records, fileName + '.dbf')
print "Done"
def exportCSVY(self, fileName, *args):
"""Exports layers data on .csv file
:param fileName: csv file name to create, without ".csv"
:type fileName: string
:param args: variables subset to be exported
:type args: tuple
**Examples** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.exportCSVY("ChinaCSV")
"""
print "Writing CSV files"
if args != ():
Y = self.getVars(self, *args)
fieldNames = args
else:
Y = self.Y
fieldNames = self.fieldNames
records = Y.values()
csvWriter(fileName, fieldNames, records)
print "Done"
def exportGALW(self, fileName, wtype='rook', idVariable='ID'):
"""
Exports the contiguity W matrix on a gal file
:param fileName: gal file name to create, without ".gal"
:type fileName: string
:keyword wtype: w type to export, default is 'rook'
:type wtype: string
:keyword idVariable: id variable fieldName, default is 'ID'
:type idVariable: string
**Example 1**
Exporting rook matrix ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.exportGALW("chinaW", wtype='rook')
**Example 2**
Exporting queen matrix ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.exportGALW("chinaW", wtype='queen')
**Example 3**
Exporting queen matrix based on a variable different from ID ::
import clusterpy
california = clusterpy.importArcData("clusterpy/data_examples/CA_Polygons")
california.exportGALW("californiaW", wtype='queen',idVariable="MYID")
**Example 3**
Exporting a customW matrix imported from a GWT file::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.customW = clusterpy.importGWT("clusterpy/data_examples/china_gwt_658.193052")
china.exportGALW("chinaW", wtype='custom')
"""
print "Writing GAL file"
if wtype == 'rook':
nw = self.Wrook
elif wtype == 'queen':
nw = self.Wqueen
elif wtype == 'custom':
nw = self.customW
else:
raise NameError("W type is not valid")
idvar = self.getVars(idVariable)
dict2gal(nw,idvar,fileName)
def exportCSVW(self, fileName, wtype='rook', idVariable='ID', standarize=False):
"""
Exports the nth contiguity W matrix on a csv file
:param wDict: Contiguity dictionary
:type wDict: dictionary
:param idVar: Data dictionary with the id field to be used
:type idVar: dictionary
:param fileName: gal file name to create, without ".gal"
:type fileName: string
:keyword standarize: True to standardize the variables.
:type standarize: boolean
**Examples 1**
Writing rook matrix to a csv ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.exportCSVW("chinaW", wtype='rook')
**Examples 2**
Writing rook matrix to a csv ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.exportCSVW("chinaW", wtype='queen')
"""
print "Writing CSV file"
if wtype == 'rook':
nw = copy.deepcopy(self.Wrook)
elif wtype == 'queen':
nw = copy.deepcopy(self.Wqueen)
elif wtype == 'custom':
nw = copy.deepcopy(self.customW)
else:
raise NameError("W type is not valid")
w = copy.deepcopy(nw)
idvar = self.getVars(idVariable)
dict2csv(nw,idvar,fileName,standarize)
def exportOutputs(self, filename):
"""Exports outputs of the last executed algorithm to a csv file. If no
algorithm has been ran, you wil get an error message.
:param filename: csv file name to create, without ".csv"
:type filename: string
**Examples** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.cluster('geoSom', ['Y1991'], 10, 10, alphaType='quadratic', fileName="oLayer", dissolve=1)
china.exportOutputs("outputs")
"""
f = open(filename, 'w')
#try:
print "Writing outputs to the CSV"
key0 = 'none'
cont = 0
while key0 == 'none' or key0 == "r2aRoot" or key0 == "r2a":
key0 = self.outputCluster.keys()[cont]
cont += 1
headers = self.outputCluster[key0].keys()
line = ''
for header in headers:
line += header + ';'
f.write(line[0: -1] + '\n')
for key in self.outputCluster.keys():
line = ''
for header in headers:
if (key != 'r2a' and key != 'r2aRoot'):
line += str(self.outputCluster[key][header]) + ';'
f.write(line[0: -1] + '\n')
print "Outputs successfully exported"
#except:
# raise NameError("No algorithm has been run")
f.close()
def exportRegions2area(self, filename):
"""export region2area results
:param filename: csv file name to create, without ".csv"
:type filename: string
**Examples** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.exportRegions2area('region2area')
"""
print "Writing region2areas"
f = open(filename, 'w')
data = self.getVars(self.outputCluster.keys())
for area in data.keys():
line = str(area) + ';'
regions = data[area]
for region in regions:
line += str(region) + ';'
f.write(line[0: -1] + '\n')
f.close()
print "region2areas successfully saved"
def transport(self, xoffset, yoffset):
"""
This function transports all the coordinates of a layer object on the
given offsets.
:param xoffset: length of the translation to be made on the x coordinates
:type xoffset: float
:param yoffset: length of the translation to be made on the y coordinates
:type yoffset: float
**Examples** ::
import clusterpy
clusterpy.importArcData("clusterpy/data_examples/china")
china.transport(100, 100)
"""
print "Changing coordinates"
transportLayer(self, xoffset, yoffset)
print "Done"
def expand(self, xproportion, yproportion):
"""
This function scales the layer width and height according to inputs
proportions
:param xproportion: proportion to scale x
:type xproportion: float
:param yproportion: proportion to scale y
:type yproportion: float
**Example** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.expand(100, 100)
"""
print "Changing coordinates"
expandLayer(self, xproportion, yproportion)
print "Done"
def getGeometricAreas(self):
"""
This function calculates the geometric area for the polygons of
a map and returns it as a dictionary.
For computational efficiency it's recommended to store the results
on the layer database using the addVariable layer function.
**Example** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.getGeometricAreas()
"""
return getGeometricAreas(self)
def getCentroids(self):
"""Centroid calculation
This function calculates the centroids for the polygons of
a map and returns it as a dictionary with the
coordinates of each area.
For computational efficiency it's recommended to store the results
on the layer database using the addVariable layer function.
**Example** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.getCentroids()
"""
return getCentroids(self)
def getBbox(self):
"""
this function returns the boundingbox of the layer layer object.
**Example** ::
import clusterpy
china = clusterpy.importArcData("clusterpy/data_examples/china")
china.getBbox()
"""
if self.bbox == []:
self.bbox = getBbox(self)
return self.bbox
def _defBbox(self):
if self.bbox == []:
self.bbox = getBbox(self)
def topoStats(self,regular=False):
if self.tStats == []:
self.nWrook = noFrontiersW(self.Wrook,self.Wqueen,self.areas)
M_n, m_n, mu1, mu2, a1, s, eig = topoStatistics(self.Wrook,self.nWrook,regular=regular)
self.tStats = [M_n,m_n,mu1,mu2,a1,s,eig]
return self.tStats
|
clusterpy/clusterpy
|
clusterpy/core/layer.py
|
Python
|
bsd-3-clause
| 75,262
|
# coding: utf-8
from __future__ import division, print_function, unicode_literals, \
absolute_import
import unittest
import os
import json
import numpy as np
import pandas as pd
from pymatgen import Structure
from veidt.describer.general import FuncGenerator, MultiDescriber
from veidt.describer.structural_describer import DistinctSiteProperty
class GeneratorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = np.random.rand(100, 3) * 10 - 5
cls.df = pd.DataFrame(cls.data, columns=["x", "y", "z"])
func_dict = {"sin": "np.sin",
"sum": "lambda d: d.sum(axis=1)",
"nest": "lambda d: np.log(np.exp(d['x']))"}
cls.generator = FuncGenerator(func_dict=func_dict)
def test_describe(self):
results = self.generator.describe(self.df)
np.testing.assert_array_equal(np.sin(self.data),
results[["sin x", "sin y", "sin z"]])
np.testing.assert_array_equal(np.sum(self.data, axis=1),
results["sum"])
np.testing.assert_array_almost_equal(self.data[:, 0],
results["nest"])
def test_serialize(self):
json_str = json.dumps(self.generator.as_dict())
recover = FuncGenerator.from_dict(json.loads(json_str))
class MultiDescriberTest(unittest.TestCase):
def test_describe(self):
li2o = Structure.from_file(os.path.join(os.path.dirname(__file__),
"../../tests/Li2O.cif"))
na2o = Structure.from_file(os.path.join(os.path.dirname(__file__),
"../../tests/Na2O.cif"))
d1 = DistinctSiteProperty(['8c', '4a'], ["Z", "atomic_radius"])
d2 = FuncGenerator(func_dict={"exp": "np.exp"}, append=False)
d = MultiDescriber([d1, d2])
results = d.describe(li2o)
self.assertAlmostEqual(results.iloc[0]["exp 8c-Z"], np.exp(3))
self.assertAlmostEqual(results.iloc[0]["exp 8c-atomic_radius"],
np.exp(1.45))
df = d.describe_all([li2o, na2o])
self.assertAlmostEqual(df.iloc[0]["exp 8c-Z"], np.exp(3))
self.assertAlmostEqual(df.iloc[1]["exp 8c-Z"], np.exp(11))
if __name__ == "__main__":
unittest.main()
|
materialsvirtuallab/veidt
|
veidt/describer/tests/test_general_describer.py
|
Python
|
bsd-3-clause
| 2,379
|
from webargs import fields
from ..api.validators import Email, password
user_args = {
'email': fields.Str(validate=Email, required=True),
'password': fields.Str(validate=password, required=True)
}
role_args = {
'name': fields.Str(required=True),
'description': fields.Str(required=True)
}
|
teracyhq/flask-boilerplate
|
app/api_1_0/args.py
|
Python
|
bsd-3-clause
| 308
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utilities for generating the version string for Astropy (or an affiliated
package) and the version.py module, which contains version info for the
package.
Within the generated astropy.version module, the `major`, `minor`, and `bugfix`
variables hold the respective parts of the version number (bugfix is '0' if
absent). The `release` variable is True if this is a release, and False if this
is a development version of astropy. For the actual version string, use::
from astropy.version import version
or::
from astropy import __version__
"""
from __future__ import division
import datetime
import imp
import os
import pkgutil
import sys
from distutils import log
import pkg_resources
from . import git_helpers
from .distutils_helpers import is_distutils_display_option
from .utils import invalidate_caches
PY3 = sys.version_info[0] == 3
def _version_split(version):
"""
Split a version string into major, minor, and bugfix numbers. If any of
those numbers are missing the default is zero. Any pre/post release
modifiers are ignored.
Examples
========
>>> _version_split('1.2.3')
(1, 2, 3)
>>> _version_split('1.2')
(1, 2, 0)
>>> _version_split('1.2rc1')
(1, 2, 0)
>>> _version_split('1')
(1, 0, 0)
>>> _version_split('')
(0, 0, 0)
"""
parsed_version = pkg_resources.parse_version(version)
if hasattr(parsed_version, 'base_version'):
# New version parsing for setuptools >= 8.0
if parsed_version.base_version:
parts = [int(part)
for part in parsed_version.base_version.split('.')]
else:
parts = []
else:
parts = []
for part in parsed_version:
if part.startswith('*'):
# Ignore any .dev, a, b, rc, etc.
break
parts.append(int(part))
if len(parts) < 3:
parts += [0] * (3 - len(parts))
# In principle a version could have more parts (like 1.2.3.4) but we only
# support <major>.<minor>.<micro>
return tuple(parts[:3])
# This is used by setup.py to create a new version.py - see that file for
# details. Note that the imports have to be absolute, since this is also used
# by affiliated packages.
_FROZEN_VERSION_PY_TEMPLATE = """
# Autogenerated by {packagetitle}'s setup.py on {timestamp!s}
from __future__ import unicode_literals
import datetime
{header}
major = {major}
minor = {minor}
bugfix = {bugfix}
release = {rel}
timestamp = {timestamp!r}
debug = {debug}
try:
from ._compiler import compiler
except ImportError:
compiler = "unknown"
try:
from .cython_version import cython_version
except ImportError:
cython_version = "unknown"
"""[1:]
_FROZEN_VERSION_PY_WITH_GIT_HEADER = """
{git_helpers}
_packagename = "{packagename}"
_last_generated_version = "{verstr}"
_last_githash = "{githash}"
# Determine where the source code for this module
# lives. If __file__ is not a filesystem path then
# it is assumed not to live in a git repo at all.
if _get_repo_path(__file__, levels=len(_packagename.split('.'))):
version = update_git_devstr(_last_generated_version, path=__file__)
githash = get_git_devstr(sha=True, show_warning=False,
path=__file__) or _last_githash
else:
# The file does not appear to live in a git repo so don't bother
# invoking git
version = _last_generated_version
githash = _last_githash
"""[1:]
_FROZEN_VERSION_PY_STATIC_HEADER = """
version = "{verstr}"
githash = "{githash}"
"""[1:]
def _get_version_py_str(packagename, version, githash, release, debug,
uses_git=True):
timestamp = datetime.datetime.now()
major, minor, bugfix = _version_split(version)
if packagename.lower() == 'astropy':
packagetitle = 'Astropy'
else:
packagetitle = 'Astropy-affiliated package ' + packagename
header = ''
if uses_git:
header = _generate_git_header(packagename, version, githash)
elif not githash:
# _generate_git_header will already generate a new git has for us, but
# for creating a new version.py for a release (even if uses_git=False)
# we still need to get the githash to include in the version.py
# See https://github.com/astropy/astropy-helpers/issues/141
githash = git_helpers.get_git_devstr(sha=True, show_warning=True)
if not header: # If _generate_git_header fails it returns an empty string
header = _FROZEN_VERSION_PY_STATIC_HEADER.format(verstr=version,
githash=githash)
return _FROZEN_VERSION_PY_TEMPLATE.format(packagetitle=packagetitle,
timestamp=timestamp,
header=header,
major=major,
minor=minor,
bugfix=bugfix,
rel=release, debug=debug)
def _generate_git_header(packagename, version, githash):
"""
Generates a header to the version.py module that includes utilities for
probing the git repository for updates (to the current git hash, etc.)
These utilities should only be available in development versions, and not
in release builds.
If this fails for any reason an empty string is returned.
"""
loader = pkgutil.get_loader(git_helpers)
source = loader.get_source(git_helpers.__name__) or ''
source_lines = source.splitlines()
if not source_lines:
log.warn('Cannot get source code for astropy_helpers.git_helpers; '
'git support disabled.')
return ''
idx = 0
for idx, line in enumerate(source_lines):
if line.startswith('# BEGIN'):
break
git_helpers_py = '\n'.join(source_lines[idx + 1:])
if PY3:
verstr = version
else:
# In Python 2 don't pass in a unicode string; otherwise verstr will
# be represented with u'' syntax which breaks on Python 3.x with x
# < 3. This is only an issue when developing on multiple Python
# versions at once
verstr = version.encode('utf8')
new_githash = git_helpers.get_git_devstr(sha=True, show_warning=False)
if new_githash:
githash = new_githash
return _FROZEN_VERSION_PY_WITH_GIT_HEADER.format(
git_helpers=git_helpers_py, packagename=packagename,
verstr=verstr, githash=githash)
def generate_version_py(packagename, version, release=None, debug=None,
uses_git=True):
"""Regenerate the version.py module if necessary."""
try:
version_module = get_pkg_version_module(packagename)
try:
last_generated_version = version_module._last_generated_version
except AttributeError:
last_generated_version = version_module.version
try:
last_githash = version_module._last_githash
except AttributeError:
last_githash = version_module.githash
current_release = version_module.release
current_debug = version_module.debug
except ImportError:
version_module = None
last_generated_version = None
last_githash = None
current_release = None
current_debug = None
if release is None:
# Keep whatever the current value is, if it exists
release = bool(current_release)
if debug is None:
# Likewise, keep whatever the current value is, if it exists
debug = bool(current_debug)
version_py = os.path.join(packagename, 'version.py')
if (last_generated_version != version or current_release != release or
current_debug != debug):
if '-q' not in sys.argv and '--quiet' not in sys.argv:
log.set_threshold(log.INFO)
if is_distutils_display_option():
# Always silence unnecessary log messages when display options are
# being used
log.set_threshold(log.WARN)
log.info('Freezing version number to {0}'.format(version_py))
with open(version_py, 'w') as f:
# This overwrites the actual version.py
f.write(_get_version_py_str(packagename, version, last_githash,
release, debug, uses_git=uses_git))
invalidate_caches()
if version_module:
imp.reload(version_module)
def get_pkg_version_module(packagename, fromlist=None):
"""Returns the package's .version module generated by
`astropy_helpers.version_helpers.generate_version_py`. Raises an
ImportError if the version module is not found.
If ``fromlist`` is an iterable, return a tuple of the members of the
version module corresponding to the member names given in ``fromlist``.
Raises an `AttributeError` if any of these module members are not found.
"""
if not fromlist:
# Due to a historical quirk of Python's import implementation,
# __import__ will not return submodules of a package if 'fromlist' is
# empty.
# TODO: For Python 3.1 and up it may be preferable to use importlib
# instead of the __import__ builtin
return __import__(packagename + '.version', fromlist=[''])
else:
mod = __import__(packagename + '.version', fromlist=fromlist)
return tuple(getattr(mod, member) for member in fromlist)
|
larrybradley/astropy-helpers
|
astropy_helpers/version_helpers.py
|
Python
|
bsd-3-clause
| 9,639
|
import json
import logging
import time
from datetime import datetime, timedelta
from itertools import chain
from django.conf import settings
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseRedirect)
from django.shortcuts import render
from django.utils.html import escape
from django.utils.http import urlquote
from django.views.decorators.cache import cache_page
import bleach
import jinja2
from elasticutils.utils import format_explanation
from elasticutils.contrib.django import ES_EXCEPTIONS
from mobility.decorators import mobile_template
from tower import ugettext as _, ugettext_lazy as _lazy
from kitsune import search as constants
from kitsune.forums.models import Forum, ThreadMappingType
from kitsune.products.models import Product
from kitsune.questions.models import QuestionMappingType
from kitsune.search.utils import locale_or_default, clean_excerpt
from kitsune.search import es_utils
from kitsune.search.forms import SimpleSearchForm, AdvancedSearchForm
from kitsune.search.es_utils import F, AnalyzerS, handle_es_errors
from kitsune.search.search_utils import apply_boosts, generate_simple_search
from kitsune.sumo.helpers import Paginator
from kitsune.sumo.json_utils import markup_json
from kitsune.sumo.urlresolvers import reverse
from kitsune.sumo.utils import paginate
from kitsune.wiki.facets import documents_for
from kitsune.wiki.models import DocumentMappingType
log = logging.getLogger('k.search')
EXCERPT_JOINER = _lazy(u'...', 'between search excerpts')
def cache_control(resp, cache_period):
"""Inserts cache/expires headers"""
resp['Cache-Control'] = 'max-age=%s' % (cache_period * 60)
resp['Expires'] = (
(datetime.utcnow() + timedelta(minutes=cache_period))
.strftime('%A, %d %B %Y %H:%M:%S GMT'))
return resp
def _es_down_template(request, *args, **kwargs):
"""Returns the appropriate "Elasticsearch is down!" template"""
return 'search/mobile/down.html' if request.MOBILE else 'search/down.html'
class UnknownDocType(Exception):
"""Signifies a doctype for which there's no handling"""
pass
def build_results_list(pages, is_json):
"""Takes a paginated search and returns results List
Handles wiki documents, questions and contributor forum posts.
:arg pages: paginated S
:arg is_json: whether or not this is generated results for json output
:returns: list of dicts
"""
results = []
for rank, doc in enumerate(pages, pages.start_index()):
if doc['model'] == 'wiki_document':
summary = _build_es_excerpt(doc)
if not summary:
summary = doc['document_summary']
result = {
'title': doc['document_title'],
'type': 'document'}
elif doc['model'] == 'questions_question':
summary = _build_es_excerpt(doc)
if not summary:
# We're excerpting only question_content, so if the query matched
# question_title or question_answer_content, then there won't be any
# question_content excerpts. In that case, just show the question--but
# only the first 500 characters.
summary = bleach.clean(doc['question_content'], strip=True)[:500]
result = {
'title': doc['question_title'],
'type': 'question',
'is_solved': doc['question_is_solved'],
'num_answers': doc['question_num_answers'],
'num_votes': doc['question_num_votes'],
'num_votes_past_week': doc['question_num_votes_past_week']}
elif doc['model'] == 'forums_thread':
summary = _build_es_excerpt(doc, first_only=True)
result = {
'title': doc['post_title'],
'type': 'thread'}
else:
raise UnknownDocType('%s is an unknown doctype' % doc['model'])
result['url'] = doc['url']
if not is_json:
result['object'] = doc
result['search_summary'] = summary
result['rank'] = rank
result['score'] = doc.es_meta.score
result['explanation'] = escape(format_explanation(doc.es_meta.explanation))
result['id'] = doc['id']
results.append(result)
return results
@markup_json
@handle_es_errors(_es_down_template)
@mobile_template('search/{mobile/}results.html')
def simple_search(request, template=None):
"""Elasticsearch-specific simple search view.
This view is for end user searching of the Knowledge Base and
Support Forum. Filtering options are limited to:
* product (`product=firefox`, for example, for only Firefox results)
* document type (`w=2`, for example, for Support Forum questions only)
"""
# 1. Prep request.
# Redirect to old Advanced Search URLs (?a={1,2}) to the new URL.
if request.GET.get('a') in ['1', '2']:
new_url = reverse('search.advanced') + '?' + request.GET.urlencode()
return HttpResponseRedirect(new_url)
# 2. Build form.
search_form = SimpleSearchForm(request.GET, auto_id=False)
# 3. Validate request.
if not search_form.is_valid():
if request.IS_JSON:
return HttpResponse(
json.dumps({'error': _('Invalid search data.')}),
content_type=request.CONTENT_TYPE,
status=400)
t = template if request.MOBILE else 'search/form.html'
return cache_control(
render(request, t, {
'advanced': False,
'request': request,
'search_form': search_form}),
settings.SEARCH_CACHE_PERIOD)
# 4. Generate search.
cleaned = search_form.cleaned_data
# On mobile, we default to just wiki results.
if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
cleaned['w'] = constants.WHERE_WIKI
language = locale_or_default(cleaned['language'] or request.LANGUAGE_CODE)
lang_name = settings.LANGUAGES_DICT.get(language.lower()) or ''
searcher = generate_simple_search(search_form, language, with_highlights=True)
searcher = searcher[:settings.SEARCH_MAX_RESULTS]
# 5. Generate output.
pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE)
if pages.paginator.count == 0:
fallback_results = _fallback_results(language, cleaned['product'])
results = []
else:
fallback_results = None
results = build_results_list(pages, request.IS_JSON)
product = Product.objects.filter(slug__in=cleaned['product'])
if product:
product_titles = [_(p.title, 'DB: products.Product.title') for p in product]
else:
product_titles = [_('All Products')]
# FIXME: This is probably bad l10n.
product_titles = ', '.join(product_titles)
data = {
'num_results': pages.paginator.count,
'results': results,
'fallback_results': fallback_results,
'product_titles': product_titles,
'q': cleaned['q'],
'w': cleaned['w'],
'lang_name': lang_name,
'products': Product.objects.filter(visible=True)}
if request.IS_JSON:
data['total'] = len(data['results'])
data['products'] = [{'slug': p.slug, 'title': p.title}
for p in data['products']]
if product:
data['product'] = product[0].slug
pages = Paginator(pages)
data['pagination'] = dict(
number=pages.pager.number,
num_pages=pages.pager.paginator.num_pages,
has_next=pages.pager.has_next(),
has_previous=pages.pager.has_previous(),
max=pages.max,
span=pages.span,
dotted_upper=pages.pager.dotted_upper,
dotted_lower=pages.pager.dotted_lower,
page_range=pages.pager.page_range,
url=pages.pager.url,
)
if not results:
data['message'] = _('No pages matched the search criteria')
json_data = json.dumps(data)
if request.JSON_CALLBACK:
json_data = request.JSON_CALLBACK + '(' + json_data + ');'
return HttpResponse(json_data, content_type=request.CONTENT_TYPE)
data.update({
'product': product,
'pages': pages,
'search_form': search_form,
'advanced': False,
})
resp = cache_control(render(request, template, data), settings.SEARCH_CACHE_PERIOD)
resp.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
max_age=3600, secure=False, httponly=False)
return resp
@markup_json
@handle_es_errors(_es_down_template)
@mobile_template('search/{mobile/}results.html')
def advanced_search(request, template=None):
"""Elasticsearch-specific Advanced search view"""
# 1. Prep request.
r = request.GET.copy()
# TODO: Figure out how to get rid of 'a' and do it.
# It basically is used to switch between showing the form or results.
a = request.GET.get('a', '2')
# TODO: This is so the 'a=1' stays in the URL for pagination.
r['a'] = 1
language = locale_or_default(request.GET.get('language', request.LANGUAGE_CODE))
r['language'] = language
lang = language.lower()
lang_name = settings.LANGUAGES_DICT.get(lang) or ''
# 2. Build form.
search_form = AdvancedSearchForm(r, auto_id=False)
search_form.set_allowed_forums(request.user)
# get value for search input from last search term.
last_search = request.COOKIES.get(settings.LAST_SEARCH_COOKIE)
if last_search and 'q' not in r:
r['q'] = urlquote(last_search)
# 3. Validate request.
# Note: a == 2 means "show the form"--that's all we use it for now.
if a == '2' or not search_form.is_valid():
if request.IS_JSON:
return HttpResponse(
json.dumps({'error': _('Invalid search data.')}),
content_type=request.CONTENT_TYPE,
status=400)
t = template if request.MOBILE else 'search/form.html'
return cache_control(
render(request, t, {
'advanced': True,
'request': request,
'search_form': search_form}),
settings.SEARCH_CACHE_PERIOD)
# 4. Generate search.
cleaned = search_form.cleaned_data
# On mobile, we default to just wiki results.
if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
cleaned['w'] = constants.WHERE_WIKI
# We use a regular S here because we want to search across
# multiple doctypes.
searcher = (AnalyzerS().es(urls=settings.ES_URLS)
.indexes(es_utils.read_index('default')))
doctypes = []
final_filter = F()
unix_now = int(time.time())
interval_filters = (
('created', cleaned['created'], cleaned['created_date']),
('updated', cleaned['updated'], cleaned['updated_date'])
)
# Start - wiki search configuration
if cleaned['w'] & constants.WHERE_WIKI:
wiki_f = F(model='wiki_document')
# Category filter
if cleaned['category']:
wiki_f &= F(document_category__in=cleaned['category'])
# Locale filter
wiki_f &= F(document_locale=language)
# Product filter
products = cleaned['product']
for p in products:
wiki_f &= F(product=p)
# Topics filter
topics = cleaned['topics']
for t in topics:
wiki_f &= F(topic=t)
# Archived bit
if not cleaned['include_archived']:
wiki_f &= F(document_is_archived=False)
# Apply sortby
sortby = cleaned['sortby_documents']
try:
searcher = searcher.order_by(*constants.SORT_DOCUMENTS[sortby])
except IndexError:
# Skip index errors because they imply the user is sending us sortby values
# that aren't valid.
pass
doctypes.append(DocumentMappingType.get_mapping_type_name())
final_filter |= wiki_f
# End - wiki search configuration
# Start - support questions configuration
if cleaned['w'] & constants.WHERE_SUPPORT:
question_f = F(model='questions_question')
# These filters are ternary, they can be either YES, NO, or OFF
ternary_filters = ('is_locked', 'is_solved', 'has_answers',
'has_helpful', 'is_archived')
d = dict(('question_%s' % filter_name,
_ternary_filter(cleaned[filter_name]))
for filter_name in ternary_filters if cleaned[filter_name])
if d:
question_f &= F(**d)
if cleaned['asked_by']:
question_f &= F(question_creator=cleaned['asked_by'])
if cleaned['answered_by']:
question_f &= F(question_answer_creator=cleaned['answered_by'])
q_tags = [t.strip() for t in cleaned['q_tags'].split(',')]
for t in q_tags:
if t:
question_f &= F(question_tag=t)
# Product filter
products = cleaned['product']
for p in products:
question_f &= F(product=p)
# Topics filter
topics = cleaned['topics']
for t in topics:
question_f &= F(topic=t)
# Note: num_voted (with a d) is a different field than num_votes
# (with an s). The former is a dropdown and the latter is an
# integer value.
if cleaned['num_voted'] == constants.INTERVAL_BEFORE:
question_f &= F(question_num_votes__lte=max(cleaned['num_votes'], 0))
elif cleaned['num_voted'] == constants.INTERVAL_AFTER:
question_f &= F(question_num_votes__gte=cleaned['num_votes'])
# Apply sortby
sortby = cleaned['sortby']
try:
searcher = searcher.order_by(*constants.SORT_QUESTIONS[sortby])
except IndexError:
# Skip index errors because they imply the user is sending us sortby values
# that aren't valid.
pass
# Apply created and updated filters
for filter_name, filter_option, filter_date in interval_filters:
if filter_option == constants.INTERVAL_BEFORE:
before = {filter_name + '__gte': 0,
filter_name + '__lte': max(filter_date, 0)}
question_f &= F(**before)
elif filter_option == constants.INTERVAL_AFTER:
after = {filter_name + '__gte': min(filter_date, unix_now),
filter_name + '__lte': unix_now}
question_f &= F(**after)
doctypes.append(QuestionMappingType.get_mapping_type_name())
final_filter |= question_f
# End - support questions configuration
# Start - discussion forum configuration
if cleaned['w'] & constants.WHERE_DISCUSSION:
discussion_f = F(model='forums_thread')
if cleaned['author']:
discussion_f &= F(post_author_ord=cleaned['author'])
if cleaned['thread_type']:
if constants.DISCUSSION_STICKY in cleaned['thread_type']:
discussion_f &= F(post_is_sticky=1)
if constants.DISCUSSION_LOCKED in cleaned['thread_type']:
discussion_f &= F(post_is_locked=1)
valid_forum_ids = [f.id for f in Forum.authorized_forums_for_user(request.user)]
forum_ids = None
if cleaned['forum']:
forum_ids = [f for f in cleaned['forum'] if f in valid_forum_ids]
# If we removed all the forums they wanted to look at or if
# they didn't specify, then we filter on the list of all
# forums they're authorized to look at.
if not forum_ids:
forum_ids = valid_forum_ids
discussion_f &= F(post_forum_id__in=forum_ids)
# Apply created and updated filters
for filter_name, filter_option, filter_date in interval_filters:
if filter_option == constants.INTERVAL_BEFORE:
before = {filter_name + '__gte': 0,
filter_name + '__lte': max(filter_date, 0)}
discussion_f &= F(**before)
elif filter_option == constants.INTERVAL_AFTER:
after = {filter_name + '__gte': min(filter_date, unix_now),
filter_name + '__lte': unix_now}
discussion_f &= F(**after)
doctypes.append(ThreadMappingType.get_mapping_type_name())
final_filter |= discussion_f
# End - discussion forum configuration
# Done with all the filtery stuff--time to generate results
searcher = searcher.doctypes(*doctypes)
searcher = searcher.filter(final_filter)
if 'explain' in request.GET and request.GET['explain'] == '1':
searcher = searcher.explain()
cleaned_q = cleaned['q']
# Set up the highlights. Show the entire field highlighted.
searcher = searcher.highlight(
'question_content', # support forum
'document_summary', # kb
'post_content', # contributor forum
pre_tags=['<b>'],
post_tags=['</b>'],
number_of_fragments=0)
searcher = apply_boosts(searcher)
# Build the query
if cleaned_q:
query_fields = chain(*[
cls.get_query_fields() for cls in [
DocumentMappingType,
ThreadMappingType,
QuestionMappingType
]
])
query = {}
# Create a simple_query_search query for every field we want to search.
for field in query_fields:
query['%s__sqs' % field] = cleaned_q
# Transform the query to use locale aware analyzers.
query = es_utils.es_query_with_analyzer(query, language)
searcher = searcher.query(should=True, **query)
searcher = searcher[:settings.SEARCH_MAX_RESULTS]
# 5. Generate output
pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE)
if pages.paginator.count == 0:
# If we know there aren't any results, show fallback_results.
fallback_results = _fallback_results(language, cleaned['product'])
results = []
else:
fallback_results = None
results = build_results_list(pages, request.IS_JSON)
items = [(k, v) for k in search_form.fields for
v in r.getlist(k) if v and k != 'a']
items.append(('a', '2'))
product = Product.objects.filter(slug__in=cleaned['product'])
if product:
product_titles = [_(p.title, 'DB: products.Product.title') for p in product]
else:
product_titles = [_('All Products')]
# FIXME: This is probably bad l10n.
product_titles = ', '.join(product_titles)
data = {
'num_results': pages.paginator.count,
'results': results,
'fallback_results': fallback_results,
'product_titles': product_titles,
'q': cleaned['q'],
'w': cleaned['w'],
'lang_name': lang_name,
'advanced': True,
'products': Product.objects.filter(visible=True)
}
if request.IS_JSON:
data['total'] = len(data['results'])
data['products'] = [{'slug': p.slug, 'title': p.title}
for p in data['products']]
if product:
data['product'] = product[0].slug
pages = Paginator(pages)
data['pagination'] = dict(
number=pages.pager.number,
num_pages=pages.pager.paginator.num_pages,
has_next=pages.pager.has_next(),
has_previous=pages.pager.has_previous(),
max=pages.max,
span=pages.span,
dotted_upper=pages.pager.dotted_upper,
dotted_lower=pages.pager.dotted_lower,
page_range=pages.pager.page_range,
url=pages.pager.url,
)
if not results:
data['message'] = _('No pages matched the search criteria')
json_data = json.dumps(data)
if request.JSON_CALLBACK:
json_data = request.JSON_CALLBACK + '(' + json_data + ');'
return HttpResponse(json_data, content_type=request.CONTENT_TYPE)
data.update({
'product': product,
'pages': pages,
'search_form': search_form
})
resp = cache_control(render(request, template, data), settings.SEARCH_CACHE_PERIOD)
resp.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
max_age=3600, secure=False, httponly=False)
return resp
@cache_page(60 * 15) # 15 minutes.
def opensearch_suggestions(request):
"""A simple search view that returns OpenSearch suggestions."""
content_type = 'application/x-suggestions+json'
search_form = SimpleSearchForm(request.GET, auto_id=False)
if not search_form.is_valid():
return HttpResponseBadRequest(content_type=content_type)
cleaned = search_form.cleaned_data
language = locale_or_default(cleaned['language'] or request.LANGUAGE_CODE)
searcher = generate_simple_search(search_form, language, with_highlights=False)
searcher = searcher.values_dict('document_title', 'question_title', 'url')
results = searcher[:10]
def urlize(r):
return u'%s://%s%s' % (
'https' if request.is_secure() else 'http',
request.get_host(),
r['url'][0]
)
def titleize(r):
# NB: Elasticsearch returns an array of strings as the value, so we mimic that and
# then pull out the first (and only) string.
return r.get('document_title', r.get('question_title', [_('No title')]))[0]
try:
data = [
cleaned['q'],
[titleize(r) for r in results],
[],
[urlize(r) for r in results]
]
except ES_EXCEPTIONS:
# If we have Elasticsearch problems, we just send back an empty set of results.
data = []
return HttpResponse(json.dumps(data), content_type=content_type)
@cache_page(60 * 60 * 168) # 1 week.
def opensearch_plugin(request):
"""Render an OpenSearch Plugin."""
host = u'%s://%s' % ('https' if request.is_secure() else 'http', request.get_host())
return render(
request, 'search/plugin.html', {
'host': host,
'locale': request.LANGUAGE_CODE
},
content_type='application/opensearchdescription+xml'
)
def _ternary_filter(ternary_value):
"""Return a search query given a TERNARY_YES or TERNARY_NO.
Behavior for TERNARY_OFF is undefined.
"""
return ternary_value == constants.TERNARY_YES
def _build_es_excerpt(result, first_only=False):
"""Return concatenated search excerpts.
:arg result: The result object from the queryset results
:arg first_only: True if we should show only the first bit, False
if we should show all bits
"""
bits = [m.strip() for m in chain(*result.es_meta.highlight.values())]
if first_only and bits:
excerpt = bits[0]
else:
excerpt = EXCERPT_JOINER.join(bits)
return jinja2.Markup(clean_excerpt(excerpt))
def _fallback_results(locale, product_slugs):
"""Return the top 20 articles by votes for the given product(s)."""
products = []
for slug in product_slugs:
try:
p = Product.objects.get(slug=slug)
products.append(p)
except Product.DoesNotExist:
pass
docs, fallback = documents_for(locale, products=products)
docs = docs + (fallback or [])
return docs[:20]
|
Osmose/kitsune
|
kitsune/search/views.py
|
Python
|
bsd-3-clause
| 23,529
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for reflection.py, which also indirectly tests the output of the
pure-Python protocol compiler.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import copy
import gc
import operator
import struct
from google.apputils import basetest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import text_format
from google.protobuf.internal import api_implementation
from google.protobuf.internal import more_extensions_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf.internal import wire_format
from google.protobuf.internal import test_util
from google.protobuf.internal import decoder
class _MiniDecoder(object):
"""Decodes a stream of values from a string.
Once upon a time we actually had a class called decoder.Decoder. Then we
got rid of it during a redesign that made decoding much, much faster overall.
But a couple tests in this file used it to check that the serialized form of
a message was correct. So, this class implements just the methods that were
used by said tests, so that we don't have to rewrite the tests.
"""
def __init__(self, bytes):
self._bytes = bytes
self._pos = 0
def ReadVarint(self):
result, self._pos = decoder._DecodeVarint(self._bytes, self._pos)
return result
ReadInt32 = ReadVarint
ReadInt64 = ReadVarint
ReadUInt32 = ReadVarint
ReadUInt64 = ReadVarint
def ReadSInt64(self):
return wire_format.ZigZagDecode(self.ReadVarint())
ReadSInt32 = ReadSInt64
def ReadFieldNumberAndWireType(self):
return wire_format.UnpackTag(self.ReadVarint())
def ReadFloat(self):
result = struct.unpack("<f", self._bytes[self._pos:self._pos+4])[0]
self._pos += 4
return result
def ReadDouble(self):
result = struct.unpack("<d", self._bytes[self._pos:self._pos+8])[0]
self._pos += 8
return result
def EndOfStream(self):
return self._pos == len(self._bytes)
class ReflectionTest(basetest.TestCase):
def assertListsEqual(self, values, others):
self.assertEqual(len(values), len(others))
for i in range(len(values)):
self.assertEqual(values[i], others[i])
def testScalarConstructor(self):
# Constructor with only scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_double=54.321,
optional_string='optional_string')
self.assertEqual(24, proto.optional_int32)
self.assertEqual(54.321, proto.optional_double)
self.assertEqual('optional_string', proto.optional_string)
def testRepeatedScalarConstructor(self):
# Constructor with only repeated scalar types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_int32=[1, 2, 3, 4],
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_string=["optional_string"])
self.assertEquals([1, 2, 3, 4], list(proto.repeated_int32))
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(["optional_string"], list(proto.repeated_string))
def testRepeatedCompositeConstructor(self):
# Constructor with only repeated composite types should succeed.
proto = unittest_pb2.TestAllTypes(
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
repeatedgroup=[
unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)])
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
self.assertEquals(
[unittest_pb2.TestAllTypes.RepeatedGroup(),
unittest_pb2.TestAllTypes.RepeatedGroup(a=1),
unittest_pb2.TestAllTypes.RepeatedGroup(a=2)],
list(proto.repeatedgroup))
def testMixedConstructor(self):
# Constructor with only mixed types should succeed.
proto = unittest_pb2.TestAllTypes(
optional_int32=24,
optional_string='optional_string',
repeated_double=[1.23, 54.321],
repeated_bool=[True, False, False],
repeated_nested_message=[
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
repeated_foreign_message=[
unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)])
self.assertEqual(24, proto.optional_int32)
self.assertEqual('optional_string', proto.optional_string)
self.assertEquals([1.23, 54.321], list(proto.repeated_double))
self.assertEquals([True, False, False], list(proto.repeated_bool))
self.assertEquals(
[unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.FOO),
unittest_pb2.TestAllTypes.NestedMessage(
bb=unittest_pb2.TestAllTypes.BAR)],
list(proto.repeated_nested_message))
self.assertEquals(
[unittest_pb2.ForeignMessage(c=-43),
unittest_pb2.ForeignMessage(c=45324),
unittest_pb2.ForeignMessage(c=12)],
list(proto.repeated_foreign_message))
def testConstructorTypeError(self):
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_int32="foo")
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, optional_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_int32=["foo"])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_string=[1234])
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=1234)
self.assertRaises(
TypeError, unittest_pb2.TestAllTypes, repeated_nested_message=[1234])
def testConstructorInvalidatesCachedByteSize(self):
message = unittest_pb2.TestAllTypes(optional_int32 = 12)
self.assertEquals(2, message.ByteSize())
message = unittest_pb2.TestAllTypes(
optional_nested_message = unittest_pb2.TestAllTypes.NestedMessage())
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(repeated_int32 = [12])
self.assertEquals(3, message.ByteSize())
message = unittest_pb2.TestAllTypes(
repeated_nested_message = [unittest_pb2.TestAllTypes.NestedMessage()])
self.assertEquals(3, message.ByteSize())
def testSimpleHasBits(self):
# Test a scalar.
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_int32'))
self.assertEqual(0, proto.optional_int32)
# HasField() shouldn't be true if all we've done is
# read the default value.
self.assertTrue(not proto.HasField('optional_int32'))
proto.optional_int32 = 1
# Setting a value however *should* set the "has" bit.
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
# And clearing that value should unset the "has" bit.
self.assertTrue(not proto.HasField('optional_int32'))
def testHasBitsWithSinglyNestedScalar(self):
# Helper used to test foreign messages and groups.
#
# composite_field_name should be the name of a non-repeated
# composite (i.e., foreign or group) field in TestAllTypes,
# and scalar_field_name should be the name of an integer-valued
# scalar field within that composite.
#
# I never thought I'd miss C++ macros and templates so much. :(
# This helper is semantically just:
#
# assert proto.composite_field.scalar_field == 0
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
#
# proto.composite_field.scalar_field = 10
# old_composite_field = proto.composite_field
#
# assert proto.composite_field.scalar_field == 10
# assert proto.composite_field.HasField('scalar_field')
# assert proto.HasField('composite_field')
#
# proto.ClearField('composite_field')
#
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
# assert proto.composite_field.scalar_field == 0
#
# # Now ensure that ClearField('composite_field') disconnected
# # the old field object from the object tree...
# assert old_composite_field is not proto.composite_field
# old_composite_field.scalar_field = 20
# assert not proto.composite_field.HasField('scalar_field')
# assert not proto.HasField('composite_field')
def TestCompositeHasBits(composite_field_name, scalar_field_name):
proto = unittest_pb2.TestAllTypes()
# First, check that we can get the scalar value, and see that it's the
# default (0), but that proto.HasField('omposite') and
# proto.composite.HasField('scalar') will still return False.
composite_field = getattr(proto, composite_field_name)
original_scalar_value = getattr(composite_field, scalar_field_name)
self.assertEqual(0, original_scalar_value)
# Assert that the composite object does not "have" the scalar.
self.assertTrue(not composite_field.HasField(scalar_field_name))
# Assert that proto does not "have" the composite field.
self.assertTrue(not proto.HasField(composite_field_name))
# Now set the scalar within the composite field. Ensure that the setting
# is reflected, and that proto.HasField('composite') and
# proto.composite.HasField('scalar') now both return True.
new_val = 20
setattr(composite_field, scalar_field_name, new_val)
self.assertEqual(new_val, getattr(composite_field, scalar_field_name))
# Hold on to a reference to the current composite_field object.
old_composite_field = composite_field
# Assert that the has methods now return true.
self.assertTrue(composite_field.HasField(scalar_field_name))
self.assertTrue(proto.HasField(composite_field_name))
# Now call the clear method...
proto.ClearField(composite_field_name)
# ...and ensure that the "has" bits are all back to False...
composite_field = getattr(proto, composite_field_name)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
# ...and ensure that the scalar field has returned to its default.
self.assertEqual(0, getattr(composite_field, scalar_field_name))
self.assertTrue(old_composite_field is not composite_field)
setattr(old_composite_field, scalar_field_name, new_val)
self.assertTrue(not composite_field.HasField(scalar_field_name))
self.assertTrue(not proto.HasField(composite_field_name))
self.assertEqual(0, getattr(composite_field, scalar_field_name))
# Test simple, single-level nesting when we set a scalar.
TestCompositeHasBits('optionalgroup', 'a')
TestCompositeHasBits('optional_nested_message', 'bb')
TestCompositeHasBits('optional_foreign_message', 'c')
TestCompositeHasBits('optional_import_message', 'd')
def testReferencesToNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
del proto
# A previous version had a bug where this would raise an exception when
# hitting a now-dead weak reference.
nested.bb = 23
def testDisconnectingNestedMessageBeforeSettingField(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testGetDefaultMessageAfterDisconnectingDefaultMessage(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.ClearField('optional_nested_message')
del proto
del nested
# Force a garbage collect so that the underlying CMessages are freed along
# with the Messages they point to. This is to make sure we're not deleting
# default message instances.
gc.collect()
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
def testDisconnectingNestedMessageAfterSettingField(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
self.assertTrue(proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message') # Should disconnect from parent
self.assertEqual(5, nested.bb)
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
def testDisconnectingNestedMessageBeforeGettingField(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
def testDisconnectingNestedMessageAfterMerge(self):
# This test exercises the code path that does not use ReleaseMessage().
# The underlying fear is that if we use ReleaseMessage() incorrectly,
# we will have memory leaks. It's hard to check that that doesn't happen,
# but at least we can exercise that code path to make sure it works.
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_nested_message.bb = 5
proto1.MergeFrom(proto2)
self.assertTrue(proto1.HasField('optional_nested_message'))
proto1.ClearField('optional_nested_message')
self.assertTrue(not proto1.HasField('optional_nested_message'))
def testDisconnectingLazyNestedMessage(self):
# This test exercises releasing a nested message that is lazy. This test
# only exercises real code in the C++ implementation as Python does not
# support lazy parsing, but the current C++ implementation results in
# memory corruption and a crash.
if api_implementation.Type() != 'python':
return
proto = unittest_pb2.TestAllTypes()
proto.optional_lazy_message.bb = 5
proto.ClearField('optional_lazy_message')
del proto
gc.collect()
def testHasBitsWhenModifyingRepeatedFields(self):
# Test nesting when we add an element to a repeated field in a submessage.
proto = unittest_pb2.TestNestedMessageHasBits()
proto.optional_nested_message.nestedmessage_repeated_int32.append(5)
self.assertEqual(
[5], proto.optional_nested_message.nestedmessage_repeated_int32)
self.assertTrue(proto.HasField('optional_nested_message'))
# Do the same test, but with a repeated composite field within the
# submessage.
proto.ClearField('optional_nested_message')
self.assertTrue(not proto.HasField('optional_nested_message'))
proto.optional_nested_message.nestedmessage_repeated_foreignmessage.add()
self.assertTrue(proto.HasField('optional_nested_message'))
def testHasBitsForManyLevelsOfNesting(self):
# Test nesting many levels deep.
recursive_proto = unittest_pb2.TestMutualRecursionA()
self.assertTrue(not recursive_proto.HasField('bb'))
self.assertEqual(0, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(not recursive_proto.HasField('bb'))
recursive_proto.bb.a.bb.a.bb.optional_int32 = 5
self.assertEqual(5, recursive_proto.bb.a.bb.a.bb.optional_int32)
self.assertTrue(recursive_proto.HasField('bb'))
self.assertTrue(recursive_proto.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.HasField('bb'))
self.assertTrue(recursive_proto.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.HasField('bb'))
self.assertTrue(not recursive_proto.bb.a.bb.a.bb.HasField('a'))
self.assertTrue(recursive_proto.bb.a.bb.a.bb.HasField('optional_int32'))
def testSingularListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_fixed32 = 1
proto.optional_int32 = 5
proto.optional_string = 'foo'
# Access sub-message but don't set it yet.
nested_message = proto.optional_nested_message
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo') ],
proto.ListFields())
proto.optional_nested_message.bb = 123
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 5),
(proto.DESCRIPTOR.fields_by_name['optional_fixed32'], 1),
(proto.DESCRIPTOR.fields_by_name['optional_string' ], 'foo'),
(proto.DESCRIPTOR.fields_by_name['optional_nested_message' ],
nested_message) ],
proto.ListFields())
def testRepeatedListFields(self):
proto = unittest_pb2.TestAllTypes()
proto.repeated_fixed32.append(1)
proto.repeated_int32.append(5)
proto.repeated_int32.append(11)
proto.repeated_string.extend(['foo', 'bar'])
proto.repeated_string.extend([])
proto.repeated_string.append('baz')
proto.repeated_string.extend(str(x) for x in xrange(2))
proto.optional_int32 = 21
proto.repeated_bool # Access but don't set anything; should not be listed.
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['optional_int32' ], 21),
(proto.DESCRIPTOR.fields_by_name['repeated_int32' ], [5, 11]),
(proto.DESCRIPTOR.fields_by_name['repeated_fixed32'], [1]),
(proto.DESCRIPTOR.fields_by_name['repeated_string' ],
['foo', 'bar', 'baz', '0', '1']) ],
proto.ListFields())
def testSingularListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.optional_fixed32_extension] = 1
proto.Extensions[unittest_pb2.optional_int32_extension ] = 5
proto.Extensions[unittest_pb2.optional_string_extension ] = 'foo'
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 5),
(unittest_pb2.optional_fixed32_extension, 1),
(unittest_pb2.optional_string_extension , 'foo') ],
proto.ListFields())
def testRepeatedListExtensions(self):
proto = unittest_pb2.TestAllExtensions()
proto.Extensions[unittest_pb2.repeated_fixed32_extension].append(1)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(5)
proto.Extensions[unittest_pb2.repeated_int32_extension ].append(11)
proto.Extensions[unittest_pb2.repeated_string_extension ].append('foo')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('bar')
proto.Extensions[unittest_pb2.repeated_string_extension ].append('baz')
proto.Extensions[unittest_pb2.optional_int32_extension ] = 21
self.assertEqual(
[ (unittest_pb2.optional_int32_extension , 21),
(unittest_pb2.repeated_int32_extension , [5, 11]),
(unittest_pb2.repeated_fixed32_extension, [1]),
(unittest_pb2.repeated_string_extension , ['foo', 'bar', 'baz']) ],
proto.ListFields())
def testListFieldsAndExtensions(self):
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
unittest_pb2.my_extension_int
self.assertEqual(
[ (proto.DESCRIPTOR.fields_by_name['my_int' ], 1),
(unittest_pb2.my_extension_int , 23),
(proto.DESCRIPTOR.fields_by_name['my_string'], 'foo'),
(unittest_pb2.my_extension_string , 'bar'),
(proto.DESCRIPTOR.fields_by_name['my_float' ], 1.0) ],
proto.ListFields())
def testDefaultValues(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
self.assertEqual(0, proto.optional_int64)
self.assertEqual(0, proto.optional_uint32)
self.assertEqual(0, proto.optional_uint64)
self.assertEqual(0, proto.optional_sint32)
self.assertEqual(0, proto.optional_sint64)
self.assertEqual(0, proto.optional_fixed32)
self.assertEqual(0, proto.optional_fixed64)
self.assertEqual(0, proto.optional_sfixed32)
self.assertEqual(0, proto.optional_sfixed64)
self.assertEqual(0.0, proto.optional_float)
self.assertEqual(0.0, proto.optional_double)
self.assertEqual(False, proto.optional_bool)
self.assertEqual('', proto.optional_string)
self.assertEqual(b'', proto.optional_bytes)
self.assertEqual(41, proto.default_int32)
self.assertEqual(42, proto.default_int64)
self.assertEqual(43, proto.default_uint32)
self.assertEqual(44, proto.default_uint64)
self.assertEqual(-45, proto.default_sint32)
self.assertEqual(46, proto.default_sint64)
self.assertEqual(47, proto.default_fixed32)
self.assertEqual(48, proto.default_fixed64)
self.assertEqual(49, proto.default_sfixed32)
self.assertEqual(-50, proto.default_sfixed64)
self.assertEqual(51.5, proto.default_float)
self.assertEqual(52e3, proto.default_double)
self.assertEqual(True, proto.default_bool)
self.assertEqual('hello', proto.default_string)
self.assertEqual(b'world', proto.default_bytes)
self.assertEqual(unittest_pb2.TestAllTypes.BAR, proto.default_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_BAR, proto.default_foreign_enum)
self.assertEqual(unittest_import_pb2.IMPORT_BAR,
proto.default_import_enum)
proto = unittest_pb2.TestExtremeDefaultValues()
self.assertEqual(u'\u1234', proto.utf8_string)
def testHasFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.HasField, 'nonexistent_field')
def testClearFieldWithUnknownFieldName(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(ValueError, proto.ClearField, 'nonexistent_field')
def testClearRemovesChildren(self):
# Make sure there aren't any implementation bugs that are only partially
# clearing the message (which can happen in the more complex C++
# implementation which has parallel message lists).
proto = unittest_pb2.TestRequiredForeign()
for i in range(10):
proto.repeated_message.add()
proto2 = unittest_pb2.TestRequiredForeign()
proto.CopyFrom(proto2)
self.assertRaises(IndexError, lambda: proto.repeated_message[5])
def testDisallowedAssignments(self):
# It's illegal to assign values directly to repeated fields
# or to nonrepeated composite fields. Ensure that this fails.
proto = unittest_pb2.TestAllTypes()
# Repeated fields.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', 10)
# Lists shouldn't work, either.
self.assertRaises(AttributeError, setattr, proto, 'repeated_int32', [10])
# Composite fields.
self.assertRaises(AttributeError, setattr, proto,
'optional_nested_message', 23)
# Assignment to a repeated nested message field without specifying
# the index in the array of nested messages.
self.assertRaises(AttributeError, setattr, proto.repeated_nested_message,
'bb', 34)
# Assignment to an attribute of a repeated field.
self.assertRaises(AttributeError, setattr, proto.repeated_float,
'some_attribute', 34)
# proto.nonexistent_field = 23 should fail as well.
self.assertRaises(AttributeError, setattr, proto, 'nonexistent_field', 23)
def testSingleScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 1.1)
self.assertRaises(TypeError, setattr, proto, 'optional_int32', 'foo')
self.assertRaises(TypeError, setattr, proto, 'optional_string', 10)
self.assertRaises(TypeError, setattr, proto, 'optional_bytes', 10)
def testIntegerTypes(self):
def TestGetAndDeserialize(field_name, value, expected_type):
proto = unittest_pb2.TestAllTypes()
setattr(proto, field_name, value)
self.assertTrue(isinstance(getattr(proto, field_name), expected_type))
proto2 = unittest_pb2.TestAllTypes()
proto2.ParseFromString(proto.SerializeToString())
self.assertTrue(isinstance(getattr(proto2, field_name), expected_type))
TestGetAndDeserialize('optional_int32', 1, int)
TestGetAndDeserialize('optional_int32', 1 << 30, int)
TestGetAndDeserialize('optional_uint32', 1 << 30, int)
if struct.calcsize('L') == 4:
# Python only has signed ints, so 32-bit python can't fit an uint32
# in an int.
TestGetAndDeserialize('optional_uint32', 1 << 31, long)
else:
# 64-bit python can fit uint32 inside an int
TestGetAndDeserialize('optional_uint32', 1 << 31, int)
TestGetAndDeserialize('optional_int64', 1 << 30, long)
TestGetAndDeserialize('optional_int64', 1 << 60, long)
TestGetAndDeserialize('optional_uint64', 1 << 30, long)
TestGetAndDeserialize('optional_uint64', 1 << 60, long)
def testSingleScalarBoundsChecking(self):
def TestMinAndMaxIntegers(field_name, expected_min, expected_max):
pb = unittest_pb2.TestAllTypes()
setattr(pb, field_name, expected_min)
self.assertEqual(expected_min, getattr(pb, field_name))
setattr(pb, field_name, expected_max)
self.assertEqual(expected_max, getattr(pb, field_name))
self.assertRaises(ValueError, setattr, pb, field_name, expected_min - 1)
self.assertRaises(ValueError, setattr, pb, field_name, expected_max + 1)
TestMinAndMaxIntegers('optional_int32', -(1 << 31), (1 << 31) - 1)
TestMinAndMaxIntegers('optional_uint32', 0, 0xffffffff)
TestMinAndMaxIntegers('optional_int64', -(1 << 63), (1 << 63) - 1)
TestMinAndMaxIntegers('optional_uint64', 0, 0xffffffffffffffff)
pb = unittest_pb2.TestAllTypes()
pb.optional_nested_enum = 1
self.assertEqual(1, pb.optional_nested_enum)
def testRepeatedScalarTypeSafety(self):
proto = unittest_pb2.TestAllTypes()
self.assertRaises(TypeError, proto.repeated_int32.append, 1.1)
self.assertRaises(TypeError, proto.repeated_int32.append, 'foo')
self.assertRaises(TypeError, proto.repeated_string, 10)
self.assertRaises(TypeError, proto.repeated_bytes, 10)
proto.repeated_int32.append(10)
proto.repeated_int32[0] = 23
self.assertRaises(IndexError, proto.repeated_int32.__setitem__, 500, 23)
self.assertRaises(TypeError, proto.repeated_int32.__setitem__, 0, 'abc')
# Repeated enums tests.
#proto.repeated_nested_enum.append(0)
def testSingleScalarGettersAndSetters(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.optional_int32)
proto.optional_int32 = 1
self.assertEqual(1, proto.optional_int32)
proto.optional_uint64 = 0xffffffffffff
self.assertEqual(0xffffffffffff, proto.optional_uint64)
proto.optional_uint64 = 0xffffffffffffffff
self.assertEqual(0xffffffffffffffff, proto.optional_uint64)
# TODO(robinson): Test all other scalar field types.
def testSingleScalarClearField(self):
proto = unittest_pb2.TestAllTypes()
# Should be allowed to clear something that's not there (a no-op).
proto.ClearField('optional_int32')
proto.optional_int32 = 1
self.assertTrue(proto.HasField('optional_int32'))
proto.ClearField('optional_int32')
self.assertEqual(0, proto.optional_int32)
self.assertTrue(not proto.HasField('optional_int32'))
# TODO(robinson): Test all other scalar field types.
def testEnums(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testEnum_Name(self):
self.assertEqual('FOREIGN_FOO',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_FOO))
self.assertEqual('FOREIGN_BAR',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_BAR))
self.assertEqual('FOREIGN_BAZ',
unittest_pb2.ForeignEnum.Name(unittest_pb2.FOREIGN_BAZ))
self.assertRaises(ValueError,
unittest_pb2.ForeignEnum.Name, 11312)
proto = unittest_pb2.TestAllTypes()
self.assertEqual('FOO',
proto.NestedEnum.Name(proto.FOO))
self.assertEqual('FOO',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.FOO))
self.assertEqual('BAR',
proto.NestedEnum.Name(proto.BAR))
self.assertEqual('BAR',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.BAR))
self.assertEqual('BAZ',
proto.NestedEnum.Name(proto.BAZ))
self.assertEqual('BAZ',
unittest_pb2.TestAllTypes.NestedEnum.Name(proto.BAZ))
self.assertRaises(ValueError,
proto.NestedEnum.Name, 11312)
self.assertRaises(ValueError,
unittest_pb2.TestAllTypes.NestedEnum.Name, 11312)
def testEnum_Value(self):
self.assertEqual(unittest_pb2.FOREIGN_FOO,
unittest_pb2.ForeignEnum.Value('FOREIGN_FOO'))
self.assertEqual(unittest_pb2.FOREIGN_BAR,
unittest_pb2.ForeignEnum.Value('FOREIGN_BAR'))
self.assertEqual(unittest_pb2.FOREIGN_BAZ,
unittest_pb2.ForeignEnum.Value('FOREIGN_BAZ'))
self.assertRaises(ValueError,
unittest_pb2.ForeignEnum.Value, 'FO')
proto = unittest_pb2.TestAllTypes()
self.assertEqual(proto.FOO,
proto.NestedEnum.Value('FOO'))
self.assertEqual(proto.FOO,
unittest_pb2.TestAllTypes.NestedEnum.Value('FOO'))
self.assertEqual(proto.BAR,
proto.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAR,
unittest_pb2.TestAllTypes.NestedEnum.Value('BAR'))
self.assertEqual(proto.BAZ,
proto.NestedEnum.Value('BAZ'))
self.assertEqual(proto.BAZ,
unittest_pb2.TestAllTypes.NestedEnum.Value('BAZ'))
self.assertRaises(ValueError,
proto.NestedEnum.Value, 'Foo')
self.assertRaises(ValueError,
unittest_pb2.TestAllTypes.NestedEnum.Value, 'Foo')
def testEnum_KeysAndValues(self):
self.assertEqual(['FOREIGN_FOO', 'FOREIGN_BAR', 'FOREIGN_BAZ'],
unittest_pb2.ForeignEnum.keys())
self.assertEqual([4, 5, 6],
unittest_pb2.ForeignEnum.values())
self.assertEqual([('FOREIGN_FOO', 4), ('FOREIGN_BAR', 5),
('FOREIGN_BAZ', 6)],
unittest_pb2.ForeignEnum.items())
proto = unittest_pb2.TestAllTypes()
self.assertEqual(['FOO', 'BAR', 'BAZ', 'NEG'], proto.NestedEnum.keys())
self.assertEqual([1, 2, 3, -1], proto.NestedEnum.values())
self.assertEqual([('FOO', 1), ('BAR', 2), ('BAZ', 3), ('NEG', -1)],
proto.NestedEnum.items())
def testRepeatedScalars(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(15)
self.assertTrue(proto.repeated_int32)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual([5, 10, 15], proto.repeated_int32)
# Test single retrieval.
self.assertEqual(5, proto.repeated_int32[0])
self.assertEqual(15, proto.repeated_int32[-1])
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, 1234)
self.assertRaises(IndexError, proto.repeated_int32.__getitem__, -1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, 'foo')
self.assertRaises(TypeError, proto.repeated_int32.__getitem__, None)
# Test single assignment.
proto.repeated_int32[1] = 20
self.assertEqual([5, 20, 15], proto.repeated_int32)
# Test insertion.
proto.repeated_int32.insert(1, 25)
self.assertEqual([5, 25, 20, 15], proto.repeated_int32)
# Test slice retrieval.
proto.repeated_int32.append(30)
self.assertEqual([25, 20, 15], proto.repeated_int32[1:4])
self.assertEqual([5, 25, 20, 15, 30], proto.repeated_int32[:])
# Test slice assignment with an iterator
proto.repeated_int32[1:4] = (i for i in xrange(3))
self.assertEqual([5, 0, 1, 2, 30], proto.repeated_int32)
# Test slice assignment.
proto.repeated_int32[1:4] = [35, 40, 45]
self.assertEqual([5, 35, 40, 45, 30], proto.repeated_int32)
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_int32:
result.append(i)
self.assertEqual([5, 35, 40, 45, 30], result)
# Test single deletion.
del proto.repeated_int32[2]
self.assertEqual([5, 35, 45, 30], proto.repeated_int32)
# Test slice deletion.
del proto.repeated_int32[2:]
self.assertEqual([5, 35], proto.repeated_int32)
# Test extending.
proto.repeated_int32.extend([3, 13])
self.assertEqual([5, 35, 3, 13], proto.repeated_int32)
# Test clearing.
proto.ClearField('repeated_int32')
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(1)
self.assertEqual(1, proto.repeated_int32[-1])
# Test assignment to a negative index.
proto.repeated_int32[-1] = 2
self.assertEqual(2, proto.repeated_int32[-1])
# Test deletion at negative indices.
proto.repeated_int32[:] = [0, 1, 2, 3]
del proto.repeated_int32[-1]
self.assertEqual([0, 1, 2], proto.repeated_int32)
del proto.repeated_int32[-2]
self.assertEqual([0, 2], proto.repeated_int32)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, -3)
self.assertRaises(IndexError, proto.repeated_int32.__delitem__, 300)
del proto.repeated_int32[-2:-1]
self.assertEqual([2], proto.repeated_int32)
del proto.repeated_int32[100:10000]
self.assertEqual([2], proto.repeated_int32)
def testRepeatedScalarsRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_int32)
self.assertEqual(0, len(proto.repeated_int32))
proto.repeated_int32.append(5)
proto.repeated_int32.append(10)
proto.repeated_int32.append(5)
proto.repeated_int32.append(5)
self.assertEqual(4, len(proto.repeated_int32))
proto.repeated_int32.remove(5)
self.assertEqual(3, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
self.assertEqual(5, proto.repeated_int32[2])
proto.repeated_int32.remove(5)
self.assertEqual(2, len(proto.repeated_int32))
self.assertEqual(10, proto.repeated_int32[0])
self.assertEqual(5, proto.repeated_int32[1])
proto.repeated_int32.remove(10)
self.assertEqual(1, len(proto.repeated_int32))
self.assertEqual(5, proto.repeated_int32[0])
# Remove a non-existent element.
self.assertRaises(ValueError, proto.repeated_int32.remove, 123)
def testRepeatedComposites(self):
proto = unittest_pb2.TestAllTypes()
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
m1 = proto.repeated_nested_message.add()
self.assertTrue(proto.repeated_nested_message)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
self.assertTrue(isinstance(m0, unittest_pb2.TestAllTypes.NestedMessage))
# Test out-of-bounds indices.
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
1234)
self.assertRaises(IndexError, proto.repeated_nested_message.__getitem__,
-1234)
# Test incorrect types passed to __getitem__.
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
'foo')
self.assertRaises(TypeError, proto.repeated_nested_message.__getitem__,
None)
# Test slice retrieval.
m2 = proto.repeated_nested_message.add()
m3 = proto.repeated_nested_message.add()
m4 = proto.repeated_nested_message.add()
self.assertListsEqual(
[m1, m2, m3], proto.repeated_nested_message[1:4])
self.assertListsEqual(
[m0, m1, m2, m3, m4], proto.repeated_nested_message[:])
self.assertListsEqual(
[m0, m1], proto.repeated_nested_message[:2])
self.assertListsEqual(
[m2, m3, m4], proto.repeated_nested_message[2:])
self.assertEqual(
m0, proto.repeated_nested_message[0])
self.assertListsEqual(
[m0], proto.repeated_nested_message[:1])
# Test that we can use the field as an iterator.
result = []
for i in proto.repeated_nested_message:
result.append(i)
self.assertListsEqual([m0, m1, m2, m3, m4], result)
# Test single deletion.
del proto.repeated_nested_message[2]
self.assertListsEqual([m0, m1, m3, m4], proto.repeated_nested_message)
# Test slice deletion.
del proto.repeated_nested_message[2:]
self.assertListsEqual([m0, m1], proto.repeated_nested_message)
# Test extending.
n1 = unittest_pb2.TestAllTypes.NestedMessage(bb=1)
n2 = unittest_pb2.TestAllTypes.NestedMessage(bb=2)
proto.repeated_nested_message.extend([n1,n2])
self.assertEqual(4, len(proto.repeated_nested_message))
self.assertEqual(n1, proto.repeated_nested_message[2])
self.assertEqual(n2, proto.repeated_nested_message[3])
# Test clearing.
proto.ClearField('repeated_nested_message')
self.assertTrue(not proto.repeated_nested_message)
self.assertEqual(0, len(proto.repeated_nested_message))
# Test constructing an element while adding it.
proto.repeated_nested_message.add(bb=23)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(23, proto.repeated_nested_message[0].bb)
def testRepeatedCompositeRemove(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, len(proto.repeated_nested_message))
m0 = proto.repeated_nested_message.add()
# Need to set some differentiating variable so m0 != m1 != m2:
m0.bb = len(proto.repeated_nested_message)
m1 = proto.repeated_nested_message.add()
m1.bb = len(proto.repeated_nested_message)
self.assertTrue(m0 != m1)
m2 = proto.repeated_nested_message.add()
m2.bb = len(proto.repeated_nested_message)
self.assertListsEqual([m0, m1, m2], proto.repeated_nested_message)
self.assertEqual(3, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m0)
self.assertEqual(2, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
self.assertEqual(m2, proto.repeated_nested_message[1])
# Removing m0 again or removing None should raise error
self.assertRaises(ValueError, proto.repeated_nested_message.remove, m0)
self.assertRaises(ValueError, proto.repeated_nested_message.remove, None)
self.assertEqual(2, len(proto.repeated_nested_message))
proto.repeated_nested_message.remove(m2)
self.assertEqual(1, len(proto.repeated_nested_message))
self.assertEqual(m1, proto.repeated_nested_message[0])
def testHandWrittenReflection(self):
# Hand written extensions are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
FieldDescriptor = descriptor.FieldDescriptor
foo_field_descriptor = FieldDescriptor(
name='foo_field', full_name='MyProto.foo_field',
index=0, number=1, type=FieldDescriptor.TYPE_INT64,
cpp_type=FieldDescriptor.CPPTYPE_INT64,
label=FieldDescriptor.LABEL_OPTIONAL, default_value=0,
containing_type=None, message_type=None, enum_type=None,
is_extension=False, extension_scope=None,
options=descriptor_pb2.FieldOptions())
mydescriptor = descriptor.Descriptor(
name='MyProto', full_name='MyProto', filename='ignored',
containing_type=None, nested_types=[], enum_types=[],
fields=[foo_field_descriptor], extensions=[],
options=descriptor_pb2.MessageOptions())
class MyProtoClass(message.Message):
DESCRIPTOR = mydescriptor
__metaclass__ = reflection.GeneratedProtocolMessageType
myproto_instance = MyProtoClass()
self.assertEqual(0, myproto_instance.foo_field)
self.assertTrue(not myproto_instance.HasField('foo_field'))
myproto_instance.foo_field = 23
self.assertEqual(23, myproto_instance.foo_field)
self.assertTrue(myproto_instance.HasField('foo_field'))
def testDescriptorProtoSupport(self):
# Hand written descriptors/reflection are only supported by the pure-Python
# implementation of the API.
if api_implementation.Type() != 'python':
return
def AddDescriptorField(proto, field_name, field_type):
AddDescriptorField.field_index += 1
new_field = proto.field.add()
new_field.name = field_name
new_field.type = field_type
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_OPTIONAL
AddDescriptorField.field_index = 0
desc_proto = descriptor_pb2.DescriptorProto()
desc_proto.name = 'Car'
fdp = descriptor_pb2.FieldDescriptorProto
AddDescriptorField(desc_proto, 'name', fdp.TYPE_STRING)
AddDescriptorField(desc_proto, 'year', fdp.TYPE_INT64)
AddDescriptorField(desc_proto, 'automatic', fdp.TYPE_BOOL)
AddDescriptorField(desc_proto, 'price', fdp.TYPE_DOUBLE)
# Add a repeated field
AddDescriptorField.field_index += 1
new_field = desc_proto.field.add()
new_field.name = 'owners'
new_field.type = fdp.TYPE_STRING
new_field.number = AddDescriptorField.field_index
new_field.label = descriptor_pb2.FieldDescriptorProto.LABEL_REPEATED
desc = descriptor.MakeDescriptor(desc_proto)
self.assertTrue(desc.fields_by_name.has_key('name'))
self.assertTrue(desc.fields_by_name.has_key('year'))
self.assertTrue(desc.fields_by_name.has_key('automatic'))
self.assertTrue(desc.fields_by_name.has_key('price'))
self.assertTrue(desc.fields_by_name.has_key('owners'))
class CarMessage(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = desc
prius = CarMessage()
prius.name = 'prius'
prius.year = 2010
prius.automatic = True
prius.price = 25134.75
prius.owners.extend(['bob', 'susan'])
serialized_prius = prius.SerializeToString()
new_prius = reflection.ParseMessage(desc, serialized_prius)
self.assertTrue(new_prius is not prius)
self.assertEqual(prius, new_prius)
# these are unnecessary assuming message equality works as advertised but
# explicitly check to be safe since we're mucking about in metaclass foo
self.assertEqual(prius.name, new_prius.name)
self.assertEqual(prius.year, new_prius.year)
self.assertEqual(prius.automatic, new_prius.automatic)
self.assertEqual(prius.price, new_prius.price)
self.assertEqual(prius.owners, new_prius.owners)
def testTopLevelExtensionsForOptionalScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_int32_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension])
# As with normal scalar fields, just doing a read doesn't actually set the
# "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
# Actually set the thing.
extendee_proto.Extensions[extension] = 23
self.assertEqual(23, extendee_proto.Extensions[extension])
self.assertTrue(extendee_proto.HasExtension(extension))
# Ensure that clearing works as well.
extendee_proto.ClearExtension(extension)
self.assertEqual(0, extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testTopLevelExtensionsForRepeatedScalar(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeated_string_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
extendee_proto.Extensions[extension].append('foo')
self.assertEqual(['foo'], extendee_proto.Extensions[extension])
string_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(string_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForOptionalMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.optional_foreign_message_extension
self.assertTrue(not extendee_proto.HasExtension(extension))
self.assertEqual(0, extendee_proto.Extensions[extension].c)
# As with normal (non-extension) fields, merely reading from the
# thing shouldn't set the "has" bit.
self.assertTrue(not extendee_proto.HasExtension(extension))
extendee_proto.Extensions[extension].c = 23
self.assertEqual(23, extendee_proto.Extensions[extension].c)
self.assertTrue(extendee_proto.HasExtension(extension))
# Save a reference here.
foreign_message = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertTrue(foreign_message is not extendee_proto.Extensions[extension])
# Setting a field on foreign_message now shouldn't set
# any "has" bits on extendee_proto.
foreign_message.c = 42
self.assertEqual(42, foreign_message.c)
self.assertTrue(foreign_message.HasField('c'))
self.assertTrue(not extendee_proto.HasExtension(extension))
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testTopLevelExtensionsForRepeatedMessage(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.repeatedgroup_extension
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
group = extendee_proto.Extensions[extension].add()
group.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension][0].a)
group.a = 42
self.assertEqual(42, extendee_proto.Extensions[extension][0].a)
group_list = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
self.assertEqual(0, len(extendee_proto.Extensions[extension]))
self.assertTrue(group_list is not extendee_proto.Extensions[extension])
# Shouldn't be allowed to do Extensions[extension] = 'a'
self.assertRaises(TypeError, operator.setitem, extendee_proto.Extensions,
extension, 'a')
def testNestedExtensions(self):
extendee_proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
# We just test the non-repeated case.
self.assertTrue(not extendee_proto.HasExtension(extension))
required = extendee_proto.Extensions[extension]
self.assertEqual(0, required.a)
self.assertTrue(not extendee_proto.HasExtension(extension))
required.a = 23
self.assertEqual(23, extendee_proto.Extensions[extension].a)
self.assertTrue(extendee_proto.HasExtension(extension))
extendee_proto.ClearExtension(extension)
self.assertTrue(required is not extendee_proto.Extensions[extension])
self.assertTrue(not extendee_proto.HasExtension(extension))
def testRegisteredExtensions(self):
self.assertTrue('protobuf_unittest.optional_int32_extension' in
unittest_pb2.TestAllExtensions._extensions_by_name)
self.assertTrue(1 in unittest_pb2.TestAllExtensions._extensions_by_number)
# Make sure extensions haven't been registered into types that shouldn't
# have any.
self.assertEquals(0, len(unittest_pb2.TestAllTypes._extensions_by_name))
# If message A directly contains message B, and
# a.HasField('b') is currently False, then mutating any
# extension in B should change a.HasField('b') to True
# (and so on up the object tree).
def testHasBitsForAncestorsOfExtendedMessage(self):
# Optional scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension] = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Repeated scalar extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual([], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension].append(23)
self.assertEqual([23], toplevel.submessage.Extensions[
more_extensions_pb2.repeated_int_extension])
self.assertTrue(toplevel.HasField('submessage'))
# Optional message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(not toplevel.HasField('submessage'))
toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int = 23
self.assertEqual(23, toplevel.submessage.Extensions[
more_extensions_pb2.optional_message_extension].foreign_message_int)
self.assertTrue(toplevel.HasField('submessage'))
# Repeated message extension.
toplevel = more_extensions_pb2.TopLevelMessage()
self.assertTrue(not toplevel.HasField('submessage'))
self.assertEqual(0, len(toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension]))
self.assertTrue(not toplevel.HasField('submessage'))
foreign = toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension].add()
self.assertEqual(foreign, toplevel.submessage.Extensions[
more_extensions_pb2.repeated_message_extension][0])
self.assertTrue(toplevel.HasField('submessage'))
def testDisconnectionAfterClearingEmptyMessage(self):
toplevel = more_extensions_pb2.TopLevelMessage()
extendee_proto = toplevel.submessage
extension = more_extensions_pb2.optional_message_extension
extension_proto = extendee_proto.Extensions[extension]
extendee_proto.ClearExtension(extension)
extension_proto.foreign_message_int = 23
self.assertTrue(extension_proto is not extendee_proto.Extensions[extension])
def testExtensionFailureModes(self):
extendee_proto = unittest_pb2.TestAllExtensions()
# Try non-extension-handle arguments to HasExtension,
# ClearExtension(), and Extensions[]...
self.assertRaises(KeyError, extendee_proto.HasExtension, 1234)
self.assertRaises(KeyError, extendee_proto.ClearExtension, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__, 1234)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__, 1234, 5)
# Try something that *is* an extension handle, just not for
# this message...
unknown_handle = more_extensions_pb2.optional_int_extension
self.assertRaises(KeyError, extendee_proto.HasExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.ClearExtension,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__getitem__,
unknown_handle)
self.assertRaises(KeyError, extendee_proto.Extensions.__setitem__,
unknown_handle, 5)
# Try call HasExtension() with a valid handle, but for a
# *repeated* field. (Just as with non-extension repeated
# fields, Has*() isn't supported for extension repeated fields).
self.assertRaises(KeyError, extendee_proto.HasExtension,
unittest_pb2.repeated_string_extension)
def testStaticParseFrom(self):
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
string1 = proto1.SerializeToString()
proto2 = unittest_pb2.TestAllTypes.FromString(string1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
def testMergeFromSingularField(self):
# Test merge with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = unittest_pb2.TestAllTypes()
# This shouldn't get overwritten.
proto2.optional_string = 'value'
proto2.MergeFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('value', proto2.optional_string)
def testMergeFromRepeatedField(self):
# Test merge with just a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.MergeFrom(proto1)
self.assertEqual(0, proto2.repeated_int32[0])
self.assertEqual(1, proto2.repeated_int32[1])
self.assertEqual(2, proto2.repeated_int32[2])
def testMergeFromOptionalGroup(self):
# Test merge with an optional group.
proto1 = unittest_pb2.TestAllTypes()
proto1.optionalgroup.a = 12
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
self.assertEqual(12, proto2.optionalgroup.a)
def testMergeFromRepeatedNestedMessage(self):
# Test merge with a repeated nested message.
proto1 = unittest_pb2.TestAllTypes()
m = proto1.repeated_nested_message.add()
m.bb = 123
m = proto1.repeated_nested_message.add()
m.bb = 321
proto2 = unittest_pb2.TestAllTypes()
m = proto2.repeated_nested_message.add()
m.bb = 999
proto2.MergeFrom(proto1)
self.assertEqual(999, proto2.repeated_nested_message[0].bb)
self.assertEqual(123, proto2.repeated_nested_message[1].bb)
self.assertEqual(321, proto2.repeated_nested_message[2].bb)
proto3 = unittest_pb2.TestAllTypes()
proto3.repeated_nested_message.MergeFrom(proto2.repeated_nested_message)
self.assertEqual(999, proto3.repeated_nested_message[0].bb)
self.assertEqual(123, proto3.repeated_nested_message[1].bb)
self.assertEqual(321, proto3.repeated_nested_message[2].bb)
def testMergeFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.MergeFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testMergeFromExtensionsSingular(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.optional_int32_extension] = 1
proto2 = unittest_pb2.TestAllExtensions()
proto2.MergeFrom(proto1)
self.assertEqual(
1, proto2.Extensions[unittest_pb2.optional_int32_extension])
def testMergeFromExtensionsRepeated(self):
proto1 = unittest_pb2.TestAllExtensions()
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(1)
proto1.Extensions[unittest_pb2.repeated_int32_extension].append(2)
proto2 = unittest_pb2.TestAllExtensions()
proto2.Extensions[unittest_pb2.repeated_int32_extension].append(0)
proto2.MergeFrom(proto1)
self.assertEqual(
3, len(proto2.Extensions[unittest_pb2.repeated_int32_extension]))
self.assertEqual(
0, proto2.Extensions[unittest_pb2.repeated_int32_extension][0])
self.assertEqual(
1, proto2.Extensions[unittest_pb2.repeated_int32_extension][1])
self.assertEqual(
2, proto2.Extensions[unittest_pb2.repeated_int32_extension][2])
def testMergeFromExtensionsNestedMessage(self):
proto1 = unittest_pb2.TestAllExtensions()
ext1 = proto1.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext1.add()
m.bb = 222
m = ext1.add()
m.bb = 333
proto2 = unittest_pb2.TestAllExtensions()
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
m = ext2.add()
m.bb = 111
proto2.MergeFrom(proto1)
ext2 = proto2.Extensions[
unittest_pb2.repeated_nested_message_extension]
self.assertEqual(3, len(ext2))
self.assertEqual(111, ext2[0].bb)
self.assertEqual(222, ext2[1].bb)
self.assertEqual(333, ext2[2].bb)
def testMergeFromBug(self):
message1 = unittest_pb2.TestAllTypes()
message2 = unittest_pb2.TestAllTypes()
# Cause optional_nested_message to be instantiated within message1, even
# though it is not considered to be "present".
message1.optional_nested_message
self.assertFalse(message1.HasField('optional_nested_message'))
# Merge into message2. This should not instantiate the field is message2.
message2.MergeFrom(message1)
self.assertFalse(message2.HasField('optional_nested_message'))
def testCopyFromSingularField(self):
# Test copy with just a singular field.
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto1.optional_string = 'important-text'
proto2 = unittest_pb2.TestAllTypes()
proto2.optional_string = 'value'
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.optional_int32)
self.assertEqual('important-text', proto2.optional_string)
def testCopyFromRepeatedField(self):
# Test copy with a repeated field.
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.repeated_int32.append(2)
proto2 = unittest_pb2.TestAllTypes()
proto2.repeated_int32.append(0)
proto2.CopyFrom(proto1)
self.assertEqual(1, proto2.repeated_int32[0])
self.assertEqual(2, proto2.repeated_int32[1])
def testCopyFromAllFields(self):
# With all fields set.
proto1 = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto1)
proto2 = unittest_pb2.TestAllTypes()
proto2.CopyFrom(proto1)
# Messages should be equal.
self.assertEqual(proto2, proto1)
# Serialized string should be equal too.
string1 = proto1.SerializeToString()
string2 = proto2.SerializeToString()
self.assertEqual(string1, string2)
def testCopyFromSelf(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.repeated_int32.append(1)
proto1.optional_int32 = 2
proto1.optional_string = 'important-text'
proto1.CopyFrom(proto1)
self.assertEqual(1, proto1.repeated_int32[0])
self.assertEqual(2, proto1.optional_int32)
self.assertEqual('important-text', proto1.optional_string)
def testCopyFromBadType(self):
# The python implementation doesn't raise an exception in this
# case. In theory it should.
if api_implementation.Type() == 'python':
return
proto1 = unittest_pb2.TestAllTypes()
proto2 = unittest_pb2.TestAllExtensions()
self.assertRaises(TypeError, proto1.CopyFrom, proto2)
def testDeepCopy(self):
proto1 = unittest_pb2.TestAllTypes()
proto1.optional_int32 = 1
proto2 = copy.deepcopy(proto1)
self.assertEqual(1, proto2.optional_int32)
proto1.repeated_int32.append(2)
proto1.repeated_int32.append(3)
container = copy.deepcopy(proto1.repeated_int32)
self.assertEqual([2, 3], container)
# TODO(anuraag): Implement deepcopy for repeated composite / extension dict
def testClear(self):
proto = unittest_pb2.TestAllTypes()
# C++ implementation does not support lazy fields right now so leave it
# out for now.
if api_implementation.Type() == 'python':
test_util.SetAllFields(proto)
else:
test_util.SetAllNonLazyFields(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllTypes()
self.assertEquals(proto, empty_proto)
# Test if extensions which were set are cleared.
proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(proto)
# Clear the message.
proto.Clear()
self.assertEquals(proto.ByteSize(), 0)
empty_proto = unittest_pb2.TestAllExtensions()
self.assertEquals(proto, empty_proto)
def testDisconnectingBeforeClear(self):
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
proto.Clear()
self.assertTrue(nested is not proto.optional_nested_message)
nested.bb = 23
self.assertTrue(not proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
proto = unittest_pb2.TestAllTypes()
nested = proto.optional_nested_message
nested.bb = 5
foreign = proto.optional_foreign_message
foreign.c = 6
proto.Clear()
self.assertTrue(nested is not proto.optional_nested_message)
self.assertTrue(foreign is not proto.optional_foreign_message)
self.assertEqual(5, nested.bb)
self.assertEqual(6, foreign.c)
nested.bb = 15
foreign.c = 16
self.assertFalse(proto.HasField('optional_nested_message'))
self.assertEqual(0, proto.optional_nested_message.bb)
self.assertFalse(proto.HasField('optional_foreign_message'))
self.assertEqual(0, proto.optional_foreign_message.c)
def testOneOf(self):
proto = unittest_pb2.TestAllTypes()
proto.oneof_uint32 = 10
proto.oneof_nested_message.bb = 11
self.assertEqual(11, proto.oneof_nested_message.bb)
self.assertFalse(proto.HasField('oneof_uint32'))
nested = proto.oneof_nested_message
proto.oneof_string = 'abc'
self.assertEqual('abc', proto.oneof_string)
self.assertEqual(11, nested.bb)
self.assertFalse(proto.HasField('oneof_nested_message'))
def assertInitialized(self, proto):
self.assertTrue(proto.IsInitialized())
# Neither method should raise an exception.
proto.SerializeToString()
proto.SerializePartialToString()
def assertNotInitialized(self, proto):
self.assertFalse(proto.IsInitialized())
# "Partial" serialization doesn't care if message is uninitialized.
proto.SerializePartialToString()
def testIsInitialized(self):
# Trivial cases - all optional fields and extensions.
proto = unittest_pb2.TestAllTypes()
self.assertInitialized(proto)
proto = unittest_pb2.TestAllExtensions()
self.assertInitialized(proto)
# The case of uninitialized required fields.
proto = unittest_pb2.TestRequired()
self.assertNotInitialized(proto)
proto.a = proto.b = proto.c = 2
self.assertInitialized(proto)
# The case of uninitialized submessage.
proto = unittest_pb2.TestRequiredForeign()
self.assertInitialized(proto)
proto.optional_message.a = 1
self.assertNotInitialized(proto)
proto.optional_message.b = 0
proto.optional_message.c = 0
self.assertInitialized(proto)
# Uninitialized repeated submessage.
message1 = proto.repeated_message.add()
self.assertNotInitialized(proto)
message1.a = message1.b = message1.c = 0
self.assertInitialized(proto)
# Uninitialized repeated group in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.multi
message1 = proto.Extensions[extension].add()
message2 = proto.Extensions[extension].add()
self.assertNotInitialized(proto)
message1.a = 1
message1.b = 1
message1.c = 1
self.assertNotInitialized(proto)
message2.a = 2
message2.b = 2
message2.c = 2
self.assertInitialized(proto)
# Uninitialized nonrepeated message in an extension.
proto = unittest_pb2.TestAllExtensions()
extension = unittest_pb2.TestRequired.single
proto.Extensions[extension].a = 1
self.assertNotInitialized(proto)
proto.Extensions[extension].b = 2
proto.Extensions[extension].c = 3
self.assertInitialized(proto)
# Try passing an errors list.
errors = []
proto = unittest_pb2.TestRequired()
self.assertFalse(proto.IsInitialized(errors))
self.assertEqual(errors, ['a', 'b', 'c'])
@basetest.unittest.skipIf(
api_implementation.Type() != 'cpp' or api_implementation.Version() != 2,
'Errors are only available from the most recent C++ implementation.')
def testFileDescriptorErrors(self):
file_name = 'test_file_descriptor_errors.proto'
package_name = 'test_file_descriptor_errors.proto'
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.name = file_name
file_descriptor_proto.package = package_name
m1 = file_descriptor_proto.message_type.add()
m1.name = 'msg1'
# Compiles the proto into the C++ descriptor pool
descriptor.FileDescriptor(
file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
# Add a FileDescriptorProto that has duplicate symbols
another_file_name = 'another_test_file_descriptor_errors.proto'
file_descriptor_proto.name = another_file_name
m2 = file_descriptor_proto.message_type.add()
m2.name = 'msg2'
with self.assertRaises(TypeError) as cm:
descriptor.FileDescriptor(
another_file_name,
package_name,
serialized_pb=file_descriptor_proto.SerializeToString())
self.assertTrue(hasattr(cm, 'exception'), '%s not raised' %
getattr(cm.expected, '__name__', cm.expected))
self.assertIn('test_file_descriptor_errors.proto', str(cm.exception))
# Error message will say something about this definition being a
# duplicate, though we don't check the message exactly to avoid a
# dependency on the C++ logging code.
self.assertIn('test_file_descriptor_errors.msg1', str(cm.exception))
def testStringUTF8Encoding(self):
proto = unittest_pb2.TestAllTypes()
# Assignment of a unicode object to a field of type 'bytes' is not allowed.
self.assertRaises(TypeError,
setattr, proto, 'optional_bytes', u'unicode object')
# Check that the default value is of python's 'unicode' type.
self.assertEqual(type(proto.optional_string), unicode)
proto.optional_string = unicode('Testing')
self.assertEqual(proto.optional_string, str('Testing'))
# Assign a value of type 'str' which can be encoded in UTF-8.
proto.optional_string = str('Testing')
self.assertEqual(proto.optional_string, unicode('Testing'))
# Try to assign a 'str' value which contains bytes that aren't 7-bit ASCII.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', b'a\x80a')
if str is bytes: # PY2
# Assign a 'str' object which contains a UTF-8 encoded string.
self.assertRaises(ValueError,
setattr, proto, 'optional_string', 'Тест')
else:
proto.optional_string = 'Тест'
# No exception thrown.
proto.optional_string = 'abc'
def testStringUTF8Serialization(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message = unittest_mset_pb2.TestMessageSetExtension2
extension = extension_message.message_set_extension
test_utf8 = u'Тест'
test_utf8_bytes = test_utf8.encode('utf-8')
# 'Test' in another language, using UTF-8 charset.
proto.Extensions[extension].str = test_utf8
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
# Check byte size.
self.assertEqual(proto.ByteSize(), len(serialized))
raw = unittest_mset_pb2.RawMessageSet()
bytes_read = raw.MergeFromString(serialized)
self.assertEqual(len(serialized), bytes_read)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(1, len(raw.item))
# Check that the type_id is the same as the tag ID in the .proto file.
self.assertEqual(raw.item[0].type_id, 1547769)
# Check the actual bytes on the wire.
self.assertTrue(
raw.item[0].message.endswith(test_utf8_bytes))
bytes_read = message2.MergeFromString(raw.item[0].message)
self.assertEqual(len(raw.item[0].message), bytes_read)
self.assertEqual(type(message2.str), unicode)
self.assertEqual(message2.str, test_utf8)
# The pure Python API throws an exception on MergeFromString(),
# if any of the string fields of the message can't be UTF-8 decoded.
# The C++ implementation of the API has no way to check that on
# MergeFromString and thus has no way to throw the exception.
#
# The pure Python API always returns objects of type 'unicode' (UTF-8
# encoded), or 'bytes' (in 7 bit ASCII).
badbytes = raw.item[0].message.replace(
test_utf8_bytes, len(test_utf8_bytes) * b'\xff')
unicode_decode_failed = False
try:
message2.MergeFromString(badbytes)
except UnicodeDecodeError:
unicode_decode_failed = True
string_field = message2.str
self.assertTrue(unicode_decode_failed or type(string_field) is bytes)
def testBytesInTextFormat(self):
proto = unittest_pb2.TestAllTypes(optional_bytes=b'\x00\x7f\x80\xff')
self.assertEqual(u'optional_bytes: "\\000\\177\\200\\377"\n',
unicode(proto))
def testEmptyNestedMessage(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.MergeFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.CopyFrom(
unittest_pb2.TestAllTypes.NestedMessage())
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
bytes_read = proto.optional_nested_message.MergeFromString(b'')
self.assertEqual(0, bytes_read)
self.assertTrue(proto.HasField('optional_nested_message'))
proto = unittest_pb2.TestAllTypes()
proto.optional_nested_message.ParseFromString(b'')
self.assertTrue(proto.HasField('optional_nested_message'))
serialized = proto.SerializeToString()
proto2 = unittest_pb2.TestAllTypes()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertTrue(proto2.HasField('optional_nested_message'))
def testSetInParent(self):
proto = unittest_pb2.TestAllTypes()
self.assertFalse(proto.HasField('optionalgroup'))
proto.optionalgroup.SetInParent()
self.assertTrue(proto.HasField('optionalgroup'))
# Since we had so many tests for protocol buffer equality, we broke these out
# into separate TestCase classes.
class TestAllTypesEqualityTest(basetest.TestCase):
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testSelfEquality(self):
self.assertEqual(self.first_proto, self.first_proto)
def testEmptyProtosEqual(self):
self.assertEqual(self.first_proto, self.second_proto)
class FullProtosEqualityTest(basetest.TestCase):
"""Equality tests using completely-full protos as a starting point."""
def setUp(self):
self.first_proto = unittest_pb2.TestAllTypes()
self.second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(self.first_proto)
test_util.SetAllFields(self.second_proto)
def testNotHashable(self):
self.assertRaises(TypeError, hash, self.first_proto)
def testNoneNotEqual(self):
self.assertNotEqual(self.first_proto, None)
self.assertNotEqual(None, self.second_proto)
def testNotEqualToOtherMessage(self):
third_proto = unittest_pb2.TestRequired()
self.assertNotEqual(self.first_proto, third_proto)
self.assertNotEqual(third_proto, self.second_proto)
def testAllFieldsFilledEquality(self):
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalar(self):
# Nonrepeated scalar field change should cause inequality.
self.first_proto.optional_int32 += 1
self.assertNotEqual(self.first_proto, self.second_proto)
# ...as should clearing a field.
self.first_proto.ClearField('optional_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedComposite(self):
# Change a nonrepeated composite field.
self.first_proto.optional_nested_message.bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Clear a field in the nested message.
self.first_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = (
self.second_proto.optional_nested_message.bb)
self.assertEqual(self.first_proto, self.second_proto)
# Remove the nested message entirely.
self.first_proto.ClearField('optional_nested_message')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedScalar(self):
# Change a repeated scalar field.
self.first_proto.repeated_int32.append(5)
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.ClearField('repeated_int32')
self.assertNotEqual(self.first_proto, self.second_proto)
def testRepeatedComposite(self):
# Change value within a repeated composite field.
self.first_proto.repeated_nested_message[0].bb += 1
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.repeated_nested_message[0].bb -= 1
self.assertEqual(self.first_proto, self.second_proto)
# Add a value to a repeated composite field.
self.first_proto.repeated_nested_message.add()
self.assertNotEqual(self.first_proto, self.second_proto)
self.second_proto.repeated_nested_message.add()
self.assertEqual(self.first_proto, self.second_proto)
def testNonRepeatedScalarHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated scalar field.
self.first_proto.ClearField('optional_int32')
self.second_proto.optional_int32 = 0
self.assertNotEqual(self.first_proto, self.second_proto)
def testNonRepeatedCompositeHasBits(self):
# Ensure that we test "has" bits as well as value for
# nonrepeated composite field.
self.first_proto.ClearField('optional_nested_message')
self.second_proto.optional_nested_message.ClearField('bb')
self.assertNotEqual(self.first_proto, self.second_proto)
self.first_proto.optional_nested_message.bb = 0
self.first_proto.optional_nested_message.ClearField('bb')
self.assertEqual(self.first_proto, self.second_proto)
class ExtensionEqualityTest(basetest.TestCase):
def testExtensionEquality(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(first_proto, second_proto)
test_util.SetAllExtensions(first_proto)
self.assertNotEqual(first_proto, second_proto)
test_util.SetAllExtensions(second_proto)
self.assertEqual(first_proto, second_proto)
# Ensure that we check value equality.
first_proto.Extensions[unittest_pb2.optional_int32_extension] += 1
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] -= 1
self.assertEqual(first_proto, second_proto)
# Ensure that we also look at "has" bits.
first_proto.ClearExtension(unittest_pb2.optional_int32_extension)
second_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertNotEqual(first_proto, second_proto)
first_proto.Extensions[unittest_pb2.optional_int32_extension] = 0
self.assertEqual(first_proto, second_proto)
# Ensure that differences in cached values
# don't matter if "has" bits are both false.
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
self.assertEqual(
0, first_proto.Extensions[unittest_pb2.optional_int32_extension])
self.assertEqual(first_proto, second_proto)
class MutualRecursionEqualityTest(basetest.TestCase):
def testEqualityWithMutualRecursion(self):
first_proto = unittest_pb2.TestMutualRecursionA()
second_proto = unittest_pb2.TestMutualRecursionA()
self.assertEqual(first_proto, second_proto)
first_proto.bb.a.bb.optional_int32 = 23
self.assertNotEqual(first_proto, second_proto)
second_proto.bb.a.bb.optional_int32 = 23
self.assertEqual(first_proto, second_proto)
class ByteSizeTest(basetest.TestCase):
def setUp(self):
self.proto = unittest_pb2.TestAllTypes()
self.extended_proto = more_extensions_pb2.ExtendedMessage()
self.packed_proto = unittest_pb2.TestPackedTypes()
self.packed_extended_proto = unittest_pb2.TestPackedExtensions()
def Size(self):
return self.proto.ByteSize()
def testEmptyMessage(self):
self.assertEqual(0, self.proto.ByteSize())
def testSizedOnKwargs(self):
# Use a separate message to ensure testing right after creation.
proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, proto.ByteSize())
proto_kwargs = unittest_pb2.TestAllTypes(optional_int64 = 1)
# One byte for the tag, one to encode varint 1.
self.assertEqual(2, proto_kwargs.ByteSize())
def testVarints(self):
def Test(i, expected_varint_size):
self.proto.Clear()
self.proto.optional_int64 = i
# Add one to the varint size for the tag info
# for tag 1.
self.assertEqual(expected_varint_size + 1, self.Size())
Test(0, 1)
Test(1, 1)
for i, num_bytes in zip(range(7, 63, 7), range(1, 10000)):
Test((1 << i) - 1, num_bytes)
Test(-1, 10)
Test(-2, 10)
Test(-(1 << 63), 10)
def testStrings(self):
self.proto.optional_string = ''
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2, self.Size())
self.proto.optional_string = 'abc'
# Need one byte for tag info (tag #14), and one byte for length.
self.assertEqual(2 + len(self.proto.optional_string), self.Size())
self.proto.optional_string = 'x' * 128
# Need one byte for tag info (tag #14), and TWO bytes for length.
self.assertEqual(3 + len(self.proto.optional_string), self.Size())
def testOtherNumerics(self):
self.proto.optional_fixed32 = 1234
# One byte for tag and 4 bytes for fixed32.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_fixed64 = 1234
# One byte for tag and 8 bytes for fixed64.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_float = 1.234
# One byte for tag and 4 bytes for float.
self.assertEqual(5, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_double = 1.234
# One byte for tag and 8 bytes for float.
self.assertEqual(9, self.Size())
self.proto = unittest_pb2.TestAllTypes()
self.proto.optional_sint32 = 64
# One byte for tag and 2 bytes for zig-zag-encoded 64.
self.assertEqual(3, self.Size())
self.proto = unittest_pb2.TestAllTypes()
def testComposites(self):
# 3 bytes.
self.proto.optional_nested_message.bb = (1 << 14)
# Plus one byte for bb tag.
# Plus 1 byte for optional_nested_message serialized size.
# Plus two bytes for optional_nested_message tag.
self.assertEqual(3 + 1 + 1 + 2, self.Size())
def testGroups(self):
# 4 bytes.
self.proto.optionalgroup.a = (1 << 21)
# Plus two bytes for |a| tag.
# Plus 2 * two bytes for START_GROUP and END_GROUP tags.
self.assertEqual(4 + 2 + 2*2, self.Size())
def testRepeatedScalars(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsExtend(self):
self.proto.repeated_int32.extend([10, 128]) # 3 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
def testRepeatedScalarsRemove(self):
self.proto.repeated_int32.append(10) # 1 byte.
self.proto.repeated_int32.append(128) # 2 bytes.
# Also need 2 bytes for each entry for tag.
self.assertEqual(1 + 2 + 2*2, self.Size())
self.proto.repeated_int32.remove(128)
self.assertEqual(1 + 2, self.Size())
def testRepeatedComposites(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 7
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
def testRepeatedCompositesDelete(self):
# Empty message. 2 bytes tag plus 1 byte length.
foreign_message_0 = self.proto.repeated_nested_message.add()
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
foreign_message_1 = self.proto.repeated_nested_message.add()
foreign_message_1.bb = 9
self.assertEqual(2 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[0]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
# Now add a new message.
foreign_message_2 = self.proto.repeated_nested_message.add()
foreign_message_2.bb = 12
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
self.assertEqual(2 + 1 + 1 + 1 + 2 + 1 + 1 + 1, self.Size())
# 2 bytes tag plus 1 byte length plus 1 byte bb tag 1 byte int.
del self.proto.repeated_nested_message[1]
self.assertEqual(2 + 1 + 1 + 1, self.Size())
del self.proto.repeated_nested_message[0]
self.assertEqual(0, self.Size())
def testRepeatedGroups(self):
# 2-byte START_GROUP plus 2-byte END_GROUP.
group_0 = self.proto.repeatedgroup.add()
# 2-byte START_GROUP plus 2-byte |a| tag + 1-byte |a|
# plus 2-byte END_GROUP.
group_1 = self.proto.repeatedgroup.add()
group_1.a = 7
self.assertEqual(2 + 2 + 2 + 2 + 1 + 2, self.Size())
def testExtensions(self):
proto = unittest_pb2.TestAllExtensions()
self.assertEqual(0, proto.ByteSize())
extension = unittest_pb2.optional_int32_extension # Field #1, 1 byte.
proto.Extensions[extension] = 23
# 1 byte for tag, 1 byte for value.
self.assertEqual(2, proto.ByteSize())
def testCacheInvalidationForNonrepeatedScalar(self):
# Test non-extension.
self.proto.optional_int32 = 1
self.assertEqual(2, self.proto.ByteSize())
self.proto.optional_int32 = 128
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_int_extension
self.extended_proto.Extensions[extension] = 1
self.assertEqual(2, self.extended_proto.ByteSize())
self.extended_proto.Extensions[extension] = 128
self.assertEqual(3, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedScalar(self):
# Test non-extension.
self.proto.repeated_int32.append(1)
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_int32.append(1)
self.assertEqual(6, self.proto.ByteSize())
self.proto.repeated_int32[1] = 128
self.assertEqual(7, self.proto.ByteSize())
self.proto.ClearField('repeated_int32')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_int_extension
repeated = self.extended_proto.Extensions[extension]
repeated.append(1)
self.assertEqual(2, self.extended_proto.ByteSize())
repeated.append(1)
self.assertEqual(4, self.extended_proto.ByteSize())
repeated[1] = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForNonrepeatedMessage(self):
# Test non-extension.
self.proto.optional_foreign_message.c = 1
self.assertEqual(5, self.proto.ByteSize())
self.proto.optional_foreign_message.c = 128
self.assertEqual(6, self.proto.ByteSize())
self.proto.optional_foreign_message.ClearField('c')
self.assertEqual(3, self.proto.ByteSize())
self.proto.ClearField('optional_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
if api_implementation.Type() == 'python':
# This is only possible in pure-Python implementation of the API.
child = self.proto.optional_foreign_message
self.proto.ClearField('optional_foreign_message')
child.c = 128
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.optional_message_extension
child = self.extended_proto.Extensions[extension]
self.assertEqual(0, self.extended_proto.ByteSize())
child.foreign_message_int = 1
self.assertEqual(4, self.extended_proto.ByteSize())
child.foreign_message_int = 128
self.assertEqual(5, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testCacheInvalidationForRepeatedMessage(self):
# Test non-extension.
child0 = self.proto.repeated_foreign_message.add()
self.assertEqual(3, self.proto.ByteSize())
self.proto.repeated_foreign_message.add()
self.assertEqual(6, self.proto.ByteSize())
child0.c = 1
self.assertEqual(8, self.proto.ByteSize())
self.proto.ClearField('repeated_foreign_message')
self.assertEqual(0, self.proto.ByteSize())
# Test within extension.
extension = more_extensions_pb2.repeated_message_extension
child_list = self.extended_proto.Extensions[extension]
child0 = child_list.add()
self.assertEqual(2, self.extended_proto.ByteSize())
child_list.add()
self.assertEqual(4, self.extended_proto.ByteSize())
child0.foreign_message_int = 1
self.assertEqual(6, self.extended_proto.ByteSize())
child0.ClearField('foreign_message_int')
self.assertEqual(4, self.extended_proto.ByteSize())
self.extended_proto.ClearExtension(extension)
self.assertEqual(0, self.extended_proto.ByteSize())
def testPackedRepeatedScalars(self):
self.assertEqual(0, self.packed_proto.ByteSize())
self.packed_proto.packed_int32.append(10) # 1 byte.
self.packed_proto.packed_int32.append(128) # 2 bytes.
# The tag is 2 bytes (the field number is 90), and the varint
# storing the length is 1 byte.
int_size = 1 + 2 + 3
self.assertEqual(int_size, self.packed_proto.ByteSize())
self.packed_proto.packed_double.append(4.2) # 8 bytes
self.packed_proto.packed_double.append(3.25) # 8 bytes
# 2 more tag bytes, 1 more length byte.
double_size = 8 + 8 + 3
self.assertEqual(int_size+double_size, self.packed_proto.ByteSize())
self.packed_proto.ClearField('packed_int32')
self.assertEqual(double_size, self.packed_proto.ByteSize())
def testPackedExtensions(self):
self.assertEqual(0, self.packed_extended_proto.ByteSize())
extension = self.packed_extended_proto.Extensions[
unittest_pb2.packed_fixed32_extension]
extension.extend([1, 2, 3, 4]) # 16 bytes
# Tag is 3 bytes.
self.assertEqual(19, self.packed_extended_proto.ByteSize())
# Issues to be sure to cover include:
# * Handling of unrecognized tags ("uninterpreted_bytes").
# * Handling of MessageSets.
# * Consistent ordering of tags in the wire format,
# including ordering between extensions and non-extension
# fields.
# * Consistent serialization of negative numbers, especially
# negative int32s.
# * Handling of empty submessages (with and without "has"
# bits set).
class SerializationTest(basetest.TestCase):
def testSerializeEmtpyMessage(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeAllFields(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeAllExtensions(self):
first_proto = unittest_pb2.TestAllExtensions()
second_proto = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeWithOptionalGroup(self):
first_proto = unittest_pb2.TestAllTypes()
second_proto = unittest_pb2.TestAllTypes()
first_proto.optionalgroup.a = 242
serialized = first_proto.SerializeToString()
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual(first_proto, second_proto)
def testSerializeNegativeValues(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.optional_int32 = -1
first_proto.optional_int64 = -(2 << 40)
first_proto.optional_sint32 = -3
first_proto.optional_sint64 = -(4 << 40)
first_proto.optional_sfixed32 = -5
first_proto.optional_sfixed64 = -(6 << 40)
second_proto = unittest_pb2.TestAllTypes.FromString(
first_proto.SerializeToString())
self.assertEqual(first_proto, second_proto)
def testParseTruncated(self):
# This test is only applicable for the Python implementation of the API.
if api_implementation.Type() != 'python':
return
first_proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(first_proto)
serialized = first_proto.SerializeToString()
for truncation_point in xrange(len(serialized) + 1):
try:
second_proto = unittest_pb2.TestAllTypes()
unknown_fields = unittest_pb2.TestEmptyMessage()
pos = second_proto._InternalParse(serialized, 0, truncation_point)
# If we didn't raise an error then we read exactly the amount expected.
self.assertEqual(truncation_point, pos)
# Parsing to unknown fields should not throw if parsing to known fields
# did not.
try:
pos2 = unknown_fields._InternalParse(serialized, 0, truncation_point)
self.assertEqual(truncation_point, pos2)
except message.DecodeError:
self.fail('Parsing unknown fields failed when parsing known fields '
'did not.')
except message.DecodeError:
# Parsing unknown fields should also fail.
self.assertRaises(message.DecodeError, unknown_fields._InternalParse,
serialized, 0, truncation_point)
def testCanonicalSerializationOrder(self):
proto = more_messages_pb2.OutOfOrderFields()
# These are also their tag numbers. Even though we're setting these in
# reverse-tag order AND they're listed in reverse tag-order in the .proto
# file, they should nonetheless be serialized in tag order.
proto.optional_sint32 = 5
proto.Extensions[more_messages_pb2.optional_uint64] = 4
proto.optional_uint32 = 3
proto.Extensions[more_messages_pb2.optional_int64] = 2
proto.optional_int32 = 1
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((1, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(1, d.ReadInt32())
self.assertEqual((2, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(2, d.ReadInt64())
self.assertEqual((3, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(3, d.ReadUInt32())
self.assertEqual((4, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(4, d.ReadUInt64())
self.assertEqual((5, wire_format.WIRETYPE_VARINT), ReadTag())
self.assertEqual(5, d.ReadSInt32())
def testCanonicalSerializationOrderSameAsCpp(self):
# Copy of the same test we use for C++.
proto = unittest_pb2.TestFieldOrderings()
test_util.SetAllFieldsAndExtensions(proto)
serialized = proto.SerializeToString()
test_util.ExpectAllFieldsAndExtensionsInOrder(serialized)
def testMergeFromStringWhenFieldsAlreadySet(self):
first_proto = unittest_pb2.TestAllTypes()
first_proto.repeated_string.append('foobar')
first_proto.optional_int32 = 23
first_proto.optional_nested_message.bb = 42
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestAllTypes()
second_proto.repeated_string.append('baz')
second_proto.optional_int32 = 100
second_proto.optional_nested_message.bb = 999
bytes_parsed = second_proto.MergeFromString(serialized)
self.assertEqual(len(serialized), bytes_parsed)
# Ensure that we append to repeated fields.
self.assertEqual(['baz', 'foobar'], list(second_proto.repeated_string))
# Ensure that we overwrite nonrepeatd scalars.
self.assertEqual(23, second_proto.optional_int32)
# Ensure that we recursively call MergeFromString() on
# submessages.
self.assertEqual(42, second_proto.optional_nested_message.bb)
def testMessageSetWireFormat(self):
proto = unittest_mset_pb2.TestMessageSet()
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension_message2 = unittest_mset_pb2.TestMessageSetExtension2
extension1 = extension_message1.message_set_extension
extension2 = extension_message2.message_set_extension
proto.Extensions[extension1].i = 123
proto.Extensions[extension2].str = 'foo'
# Serialize using the MessageSet wire format (this is specified in the
# .proto file).
serialized = proto.SerializeToString()
raw = unittest_mset_pb2.RawMessageSet()
self.assertEqual(False,
raw.DESCRIPTOR.GetOptions().message_set_wire_format)
self.assertEqual(
len(serialized),
raw.MergeFromString(serialized))
self.assertEqual(2, len(raw.item))
message1 = unittest_mset_pb2.TestMessageSetExtension1()
self.assertEqual(
len(raw.item[0].message),
message1.MergeFromString(raw.item[0].message))
self.assertEqual(123, message1.i)
message2 = unittest_mset_pb2.TestMessageSetExtension2()
self.assertEqual(
len(raw.item[1].message),
message2.MergeFromString(raw.item[1].message))
self.assertEqual('foo', message2.str)
# Deserialize using the MessageSet wire format.
proto2 = unittest_mset_pb2.TestMessageSet()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertEqual(123, proto2.Extensions[extension1].i)
self.assertEqual('foo', proto2.Extensions[extension2].str)
# Check byte size.
self.assertEqual(proto2.ByteSize(), len(serialized))
self.assertEqual(proto.ByteSize(), len(serialized))
def testMessageSetWireFormatUnknownExtension(self):
# Create a message using the message set wire format with an unknown
# message.
raw = unittest_mset_pb2.RawMessageSet()
# Add an item.
item = raw.item.add()
item.type_id = 1545008
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12345
item.message = message1.SerializeToString()
# Add a second, unknown extension.
item = raw.item.add()
item.type_id = 1545009
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
message1 = unittest_mset_pb2.TestMessageSetExtension1()
message1.i = 12346
item.message = message1.SerializeToString()
# Add another unknown extension.
item = raw.item.add()
item.type_id = 1545010
message1 = unittest_mset_pb2.TestMessageSetExtension2()
message1.str = 'foo'
item.message = message1.SerializeToString()
serialized = raw.SerializeToString()
# Parse message using the message set wire format.
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(
len(serialized),
proto.MergeFromString(serialized))
# Check that the message parsed well.
extension_message1 = unittest_mset_pb2.TestMessageSetExtension1
extension1 = extension_message1.message_set_extension
self.assertEquals(12345, proto.Extensions[extension1].i)
def testUnknownFields(self):
proto = unittest_pb2.TestAllTypes()
test_util.SetAllFields(proto)
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
# Now test with a int64 field set.
proto = unittest_pb2.TestAllTypes()
proto.optional_int64 = 0x0fffffffffffffff
serialized = proto.SerializeToString()
# The empty message should be parsable with all of the fields
# unknown.
proto2 = unittest_pb2.TestEmptyMessage()
# Parsing this message should succeed.
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
def _CheckRaises(self, exc_class, callable_obj, exception):
"""This method checks if the excpetion type and message are as expected."""
try:
callable_obj()
except exc_class as ex:
# Check if the exception message is the right one.
self.assertEqual(exception, str(ex))
return
else:
raise self.failureException('%s not raised' % str(exc_class))
def testSerializeUninitialized(self):
proto = unittest_pb2.TestRequired()
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
self.assertFalse(proto2.HasField('a'))
# proto2 ParseFromString does not check that required fields are set.
proto2.ParseFromString(partial)
self.assertFalse(proto2.HasField('a'))
proto.a = 1
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.b = 2
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto.c = 3
serialized = proto.SerializeToString()
# Shouldn't raise exceptions.
partial = proto.SerializePartialToString()
proto2 = unittest_pb2.TestRequired()
self.assertEqual(
len(serialized),
proto2.MergeFromString(serialized))
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
self.assertEqual(
len(partial),
proto2.MergeFromString(partial))
self.assertEqual(1, proto2.a)
self.assertEqual(2, proto2.b)
self.assertEqual(3, proto2.c)
def testSerializeUninitializedSubMessage(self):
proto = unittest_pb2.TestRequiredForeign()
# Sub-message doesn't exist yet, so this succeeds.
proto.SerializeToString()
proto.optional_message.a = 1
proto.optional_message.b = 2
proto.optional_message.c = 3
proto.SerializeToString()
proto.repeated_message.add().a = 1
proto.repeated_message.add().b = 2
proto.repeated_message[0].b = 2
proto.repeated_message[0].c = 3
proto.repeated_message[1].a = 1
proto.repeated_message[1].c = 3
proto.SerializeToString()
def testSerializeAllPackedFields(self):
first_proto = unittest_pb2.TestPackedTypes()
second_proto = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(first_proto)
serialized = first_proto.SerializeToString()
self.assertEqual(first_proto.ByteSize(), len(serialized))
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testSerializeAllPackedExtensions(self):
first_proto = unittest_pb2.TestPackedExtensions()
second_proto = unittest_pb2.TestPackedExtensions()
test_util.SetAllPackedExtensions(first_proto)
serialized = first_proto.SerializeToString()
bytes_read = second_proto.MergeFromString(serialized)
self.assertEqual(second_proto.ByteSize(), bytes_read)
self.assertEqual(first_proto, second_proto)
def testMergePackedFromStringWhenSomeFieldsAlreadySet(self):
first_proto = unittest_pb2.TestPackedTypes()
first_proto.packed_int32.extend([1, 2])
first_proto.packed_double.append(3.0)
serialized = first_proto.SerializeToString()
second_proto = unittest_pb2.TestPackedTypes()
second_proto.packed_int32.append(3)
second_proto.packed_double.extend([1.0, 2.0])
second_proto.packed_sint32.append(4)
self.assertEqual(
len(serialized),
second_proto.MergeFromString(serialized))
self.assertEqual([3, 1, 2], second_proto.packed_int32)
self.assertEqual([1.0, 2.0, 3.0], second_proto.packed_double)
self.assertEqual([4], second_proto.packed_sint32)
def testPackedFieldsWireFormat(self):
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.extend([1, 2, 150, 3]) # 1 + 1 + 2 + 1 bytes
proto.packed_double.extend([1.0, 1000.0]) # 8 + 8 bytes
proto.packed_float.append(2.0) # 4 bytes, will be before double
serialized = proto.SerializeToString()
self.assertEqual(proto.ByteSize(), len(serialized))
d = _MiniDecoder(serialized)
ReadTag = d.ReadFieldNumberAndWireType
self.assertEqual((90, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(1+1+1+2, d.ReadInt32())
self.assertEqual(1, d.ReadInt32())
self.assertEqual(2, d.ReadInt32())
self.assertEqual(150, d.ReadInt32())
self.assertEqual(3, d.ReadInt32())
self.assertEqual((100, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(4, d.ReadInt32())
self.assertEqual(2.0, d.ReadFloat())
self.assertEqual((101, wire_format.WIRETYPE_LENGTH_DELIMITED), ReadTag())
self.assertEqual(8+8, d.ReadInt32())
self.assertEqual(1.0, d.ReadDouble())
self.assertEqual(1000.0, d.ReadDouble())
self.assertTrue(d.EndOfStream())
def testParsePackedFromUnpacked(self):
unpacked = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(unpacked)
packed = unittest_pb2.TestPackedTypes()
serialized = unpacked.SerializeToString()
self.assertEqual(
len(serialized),
packed.MergeFromString(serialized))
expected = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(expected)
self.assertEqual(expected, packed)
def testParseUnpackedFromPacked(self):
packed = unittest_pb2.TestPackedTypes()
test_util.SetAllPackedFields(packed)
unpacked = unittest_pb2.TestUnpackedTypes()
serialized = packed.SerializeToString()
self.assertEqual(
len(serialized),
unpacked.MergeFromString(serialized))
expected = unittest_pb2.TestUnpackedTypes()
test_util.SetAllUnpackedFields(expected)
self.assertEqual(expected, unpacked)
def testFieldNumbers(self):
proto = unittest_pb2.TestAllTypes()
self.assertEqual(unittest_pb2.TestAllTypes.NestedMessage.BB_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONAL_INT32_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.TestAllTypes.OPTIONALGROUP_FIELD_NUMBER, 16)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_MESSAGE_FIELD_NUMBER, 18)
self.assertEqual(
unittest_pb2.TestAllTypes.OPTIONAL_NESTED_ENUM_FIELD_NUMBER, 21)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATED_INT32_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.TestAllTypes.REPEATEDGROUP_FIELD_NUMBER, 46)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_MESSAGE_FIELD_NUMBER, 48)
self.assertEqual(
unittest_pb2.TestAllTypes.REPEATED_NESTED_ENUM_FIELD_NUMBER, 51)
def testExtensionFieldNumbers(self):
self.assertEqual(unittest_pb2.TestRequired.single.number, 1000)
self.assertEqual(unittest_pb2.TestRequired.SINGLE_FIELD_NUMBER, 1000)
self.assertEqual(unittest_pb2.TestRequired.multi.number, 1001)
self.assertEqual(unittest_pb2.TestRequired.MULTI_FIELD_NUMBER, 1001)
self.assertEqual(unittest_pb2.optional_int32_extension.number, 1)
self.assertEqual(unittest_pb2.OPTIONAL_INT32_EXTENSION_FIELD_NUMBER, 1)
self.assertEqual(unittest_pb2.optionalgroup_extension.number, 16)
self.assertEqual(unittest_pb2.OPTIONALGROUP_EXTENSION_FIELD_NUMBER, 16)
self.assertEqual(unittest_pb2.optional_nested_message_extension.number, 18)
self.assertEqual(
unittest_pb2.OPTIONAL_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 18)
self.assertEqual(unittest_pb2.optional_nested_enum_extension.number, 21)
self.assertEqual(unittest_pb2.OPTIONAL_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
21)
self.assertEqual(unittest_pb2.repeated_int32_extension.number, 31)
self.assertEqual(unittest_pb2.REPEATED_INT32_EXTENSION_FIELD_NUMBER, 31)
self.assertEqual(unittest_pb2.repeatedgroup_extension.number, 46)
self.assertEqual(unittest_pb2.REPEATEDGROUP_EXTENSION_FIELD_NUMBER, 46)
self.assertEqual(unittest_pb2.repeated_nested_message_extension.number, 48)
self.assertEqual(
unittest_pb2.REPEATED_NESTED_MESSAGE_EXTENSION_FIELD_NUMBER, 48)
self.assertEqual(unittest_pb2.repeated_nested_enum_extension.number, 51)
self.assertEqual(unittest_pb2.REPEATED_NESTED_ENUM_EXTENSION_FIELD_NUMBER,
51)
def testInitKwargs(self):
proto = unittest_pb2.TestAllTypes(
optional_int32=1,
optional_string='foo',
optional_bool=True,
optional_bytes=b'bar',
optional_nested_message=unittest_pb2.TestAllTypes.NestedMessage(bb=1),
optional_foreign_message=unittest_pb2.ForeignMessage(c=1),
optional_nested_enum=unittest_pb2.TestAllTypes.FOO,
optional_foreign_enum=unittest_pb2.FOREIGN_FOO,
repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_int32'))
self.assertTrue(proto.HasField('optional_string'))
self.assertTrue(proto.HasField('optional_bool'))
self.assertTrue(proto.HasField('optional_bytes'))
self.assertTrue(proto.HasField('optional_nested_message'))
self.assertTrue(proto.HasField('optional_foreign_message'))
self.assertTrue(proto.HasField('optional_nested_enum'))
self.assertTrue(proto.HasField('optional_foreign_enum'))
self.assertEqual(1, proto.optional_int32)
self.assertEqual('foo', proto.optional_string)
self.assertEqual(True, proto.optional_bool)
self.assertEqual(b'bar', proto.optional_bytes)
self.assertEqual(1, proto.optional_nested_message.bb)
self.assertEqual(1, proto.optional_foreign_message.c)
self.assertEqual(unittest_pb2.TestAllTypes.FOO,
proto.optional_nested_enum)
self.assertEqual(unittest_pb2.FOREIGN_FOO, proto.optional_foreign_enum)
self.assertEqual([1, 2, 3], proto.repeated_int32)
def testInitArgsUnknownFieldName(self):
def InitalizeEmptyMessageWithExtraKeywordArg():
unused_proto = unittest_pb2.TestEmptyMessage(unknown='unknown')
self._CheckRaises(ValueError,
InitalizeEmptyMessageWithExtraKeywordArg,
'Protocol message has no "unknown" field.')
def testInitRequiredKwargs(self):
proto = unittest_pb2.TestRequired(a=1, b=1, c=1)
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('a'))
self.assertTrue(proto.HasField('b'))
self.assertTrue(proto.HasField('c'))
self.assertTrue(not proto.HasField('dummy2'))
self.assertEqual(1, proto.a)
self.assertEqual(1, proto.b)
self.assertEqual(1, proto.c)
def testInitRequiredForeignKwargs(self):
proto = unittest_pb2.TestRequiredForeign(
optional_message=unittest_pb2.TestRequired(a=1, b=1, c=1))
self.assertTrue(proto.IsInitialized())
self.assertTrue(proto.HasField('optional_message'))
self.assertTrue(proto.optional_message.IsInitialized())
self.assertTrue(proto.optional_message.HasField('a'))
self.assertTrue(proto.optional_message.HasField('b'))
self.assertTrue(proto.optional_message.HasField('c'))
self.assertTrue(not proto.optional_message.HasField('dummy2'))
self.assertEqual(unittest_pb2.TestRequired(a=1, b=1, c=1),
proto.optional_message)
self.assertEqual(1, proto.optional_message.a)
self.assertEqual(1, proto.optional_message.b)
self.assertEqual(1, proto.optional_message.c)
def testInitRepeatedKwargs(self):
proto = unittest_pb2.TestAllTypes(repeated_int32=[1, 2, 3])
self.assertTrue(proto.IsInitialized())
self.assertEqual(1, proto.repeated_int32[0])
self.assertEqual(2, proto.repeated_int32[1])
self.assertEqual(3, proto.repeated_int32[2])
class OptionsTest(basetest.TestCase):
def testMessageOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertEqual(True,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(False,
proto.DESCRIPTOR.GetOptions().message_set_wire_format)
def testPackedOptions(self):
proto = unittest_pb2.TestAllTypes()
proto.optional_int32 = 1
proto.optional_double = 3.0
for field_descriptor, _ in proto.ListFields():
self.assertEqual(False, field_descriptor.GetOptions().packed)
proto = unittest_pb2.TestPackedTypes()
proto.packed_int32.append(1)
proto.packed_double.append(3.0)
for field_descriptor, _ in proto.ListFields():
self.assertEqual(True, field_descriptor.GetOptions().packed)
self.assertEqual(reflection._FieldDescriptor.LABEL_REPEATED,
field_descriptor.label)
class ClassAPITest(basetest.TestCase):
def testMakeClassWithNestedDescriptor(self):
leaf_desc = descriptor.Descriptor('leaf', 'package.parent.child.leaf', '',
containing_type=None, fields=[],
nested_types=[], enum_types=[],
extensions=[])
child_desc = descriptor.Descriptor('child', 'package.parent.child', '',
containing_type=None, fields=[],
nested_types=[leaf_desc], enum_types=[],
extensions=[])
sibling_desc = descriptor.Descriptor('sibling', 'package.parent.sibling',
'', containing_type=None, fields=[],
nested_types=[], enum_types=[],
extensions=[])
parent_desc = descriptor.Descriptor('parent', 'package.parent', '',
containing_type=None, fields=[],
nested_types=[child_desc, sibling_desc],
enum_types=[], extensions=[])
message_class = reflection.MakeClass(parent_desc)
self.assertIn('child', message_class.__dict__)
self.assertIn('sibling', message_class.__dict__)
self.assertIn('leaf', message_class.child.__dict__)
def _GetSerializedFileDescriptor(self, name):
"""Get a serialized representation of a test FileDescriptorProto.
Args:
name: All calls to this must use a unique message name, to avoid
collisions in the cpp descriptor pool.
Returns:
A string containing the serialized form of a test FileDescriptorProto.
"""
file_descriptor_str = (
'message_type {'
' name: "' + name + '"'
' field {'
' name: "flat"'
' number: 1'
' label: LABEL_REPEATED'
' type: TYPE_UINT32'
' }'
' field {'
' name: "bar"'
' number: 2'
' label: LABEL_OPTIONAL'
' type: TYPE_MESSAGE'
' type_name: "Bar"'
' }'
' nested_type {'
' name: "Bar"'
' field {'
' name: "baz"'
' number: 3'
' label: LABEL_OPTIONAL'
' type: TYPE_MESSAGE'
' type_name: "Baz"'
' }'
' nested_type {'
' name: "Baz"'
' enum_type {'
' name: "deep_enum"'
' value {'
' name: "VALUE_A"'
' number: 0'
' }'
' }'
' field {'
' name: "deep"'
' number: 4'
' label: LABEL_OPTIONAL'
' type: TYPE_UINT32'
' }'
' }'
' }'
'}')
file_descriptor = descriptor_pb2.FileDescriptorProto()
text_format.Merge(file_descriptor_str, file_descriptor)
return file_descriptor.SerializeToString()
def testParsingFlatClassWithExplicitClassDeclaration(self):
"""Test that the generated class can parse a flat message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('A'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
class MessageClass(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = msg_descriptor
msg = MessageClass()
msg_str = (
'flat: 0 '
'flat: 1 '
'flat: 2 ')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.flat, [0, 1, 2])
def testParsingFlatClass(self):
"""Test that the generated class can parse a flat message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('B'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
msg_str = (
'flat: 0 '
'flat: 1 '
'flat: 2 ')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.flat, [0, 1, 2])
def testParsingNestedClass(self):
"""Test that the generated class can parse a nested message."""
file_descriptor = descriptor_pb2.FileDescriptorProto()
file_descriptor.ParseFromString(self._GetSerializedFileDescriptor('C'))
msg_descriptor = descriptor.MakeDescriptor(
file_descriptor.message_type[0])
msg_class = reflection.MakeClass(msg_descriptor)
msg = msg_class()
msg_str = (
'bar {'
' baz {'
' deep: 4'
' }'
'}')
text_format.Merge(msg_str, msg)
self.assertEqual(msg.bar.baz.deep, 4)
if __name__ == '__main__':
basetest.main()
|
blazbratanic/protobuf
|
python/google/protobuf/internal/reflection_test.py
|
Python
|
bsd-3-clause
| 119,310
|
from mission.constants.config import PIPE_SEARCH_DEPTH
from mission.framework.combinators import *
from mission.framework.primitive import *
from mission.framework.position import *
from mission.framework.movement import *
from mission.framework.task import *
from mission.framework.timing import *
from mission.framework.wiggling import *
from mission.constants.config import *
from mission.missions.ozer_common import SequentialSuccess, Conditional, Retry
from mission.opt_aux.aux import *
from mission.missions.buoys import Scuttle
import aslam
import shm, time, math
import numpy as n
from auv_math.math_utils import rotate
class GoToPipe(Task):
"""
Move to and align with the pipe after buoys
"""
def on_first_run(self, *args, **kwargs):
pipe_results = shm.buoys_pipe_results.get()
self.task = Sequential(
Log('Returning to pipe position'),
GoToPosition(
pipe_results.north,
pipe_results.east,
depth=pipe_results.depth,
optimize=True,
),
Log('Aligning with pipe'),
Heading(pipe_results.heading),
)
def on_run(self, *args, **kwargs):
if not self.task.finished:
self.task()
else:
self.finish()
class Timeout(Task):
def on_first_run(self, time, task, *args, **kwargs):
self.timer = Timer(time)
def on_run(self, time, task, *args, **kwargs):
task()
self.timer()
if task.finished:
self.finish()
elif self.timer.finished:
self.logw('Task timed out in {} seconds!'.format(time))
self.finish()
# Will simply override the desired depth set by a task directly after it is called.
class MinDepth(Task):
def on_run(self, min_depth, subtask):
actual = shm.kalman.depth.get()
if actual > min_depth:
subtask()
else:
self.logw('Actual depth {} less than minimum of {}; NOT running task!'.format(actual, min_depth))
desire = shm.desires.depth.get()
if desire < min_depth:
self.logw('Desired depth {} less than minimum of {}; overriding with minimum!'.format(desire, min_depth))
shm.desires.depth.set(min_depth)
if subtask.finished:
self.finish()
class GrabHeading(Task):
def on_run(self):
heading = shm.kalman.heading.get()
self.logw('Grabbed current sub heading of {}'.format(heading))
shm.buoys_mission.heading.set(heading)
self.finish()
class GrabPosition(Task):
def on_run(self):
pos = aslam.sub.position()
self.logw('Grabbed position of N {}, E {}, D {}'.format(pos[0], pos[1], pos[2]))
shm.buoys_mission.north.set(pos[0])
shm.buoys_mission.east.set(pos[1])
shm.buoys_mission.depth.set(pos[2])
self.finish()
class RestoreHeading(Task):
def on_first_run(self):
self.saved = shm.buoys_mission.heading.get()
self.logw('Restoring sub heading of {}'.format(self.saved))
def on_run(self):
task = Heading(self.saved)
task()
if task.finished:
self.finish()
class RestorePosition(Task):
def on_first_run(self):
self.saved = [shm.buoys_mission.north.get(), shm.buoys_mission.east.get(), shm.buoys_mission.depth.get()]
self.logw('Restoring saved position of N {}, E {}, D {}'.format(*self.saved))
def on_run(self):
task = GoToPosition(self.saved[0], self.saved[1], depth = self.saved[2])
task()
if task.finished:
self.finish()
Scan = Sequential(
MoveYRough(2.0),
MoveYRough(-4.0),
MoveYRough(4.0),
MoveYRough(-4.0),
MoveYRough(2.0)
)
boundingBox = lambda pos: (pos - n.array([0.2, 0.2, 0.2]), pos + n.array([0.2, 0.2, 0.2]))
tolerance = n.array([0.05, 0.05, 0.05])
class TouchGuarded(Task):
def on_run(self, subtask, sensor):
subtask()
if subtask.finished or not sensor.get():
self.finish()
class AvoidYellow(Task):
def on_first_run(self):
self.heading = shm.kalman.heading.get()
self.red_buoy = aslam.world.red_buoy.position()[:2]
self.green_buoy = aslam.world.green_buoy.position()[:2]
self.yellow_buoy = aslam.world.yellow_buoy.position()[:2]
self.all_buoys = [('red', self.red_buoy), ('green', self.green_buoy), ('yellow', self.yellow_buoy)]
self.sorted_buoys = sorted(self.all_buoys, key = lambda x: rotate(x[1], -self.heading)[1])
self.logi('Sorted buoys (left-to-right): {}'.format([x[0] for x in self.sorted_buoys]))
subtasks = []
subtasks.append(MasterConcurrent(HPRWiggle(), MoveXRough(-1.0)))
subtasks.append(Depth(PIPE_SEARCH_DEPTH))
if self.sorted_buoys[0][0] == 'yellow':
# yellow buoy far left, go right
subtasks.append(MoveYRough(1.0))
elif self.sorted_buoys[1][0] == 'yellow':
subtasks.append(MoveYRough(1.0))
else:
subtasks.append(MoveYRough(-1.0))
subtasks.append(MoveXRough(1.0))
center_buoy = n.array(self.sorted_buoys[1][1])
center_buoy += n.array(rotate((1, 0), self.heading)) # 1m beyond center buoy
subtasks.append(GoToPosition(center_buoy[0], center_buoy[1], depth=PIPE_SEARCH_DEPTH))
self.subtask = Sequential(*subtasks)
def on_run(self):
self.subtask()
if self.subtask.finished:
self.finish()
class AllBuoys(Task):
def desiredModules(self):
return [shm.vision_modules.Buoys]
def on_first_run(self):
self.has_made_progress = False
shm.navigation_settings.optimize.set(False)
delta_red = aslam.world.red_buoy.position() - aslam.sub.position()
delta_red /= n.linalg.norm(delta_red)
delta_red *= -1
delta_green = aslam.world.green_buoy.position() - aslam.sub.position()
delta_green /= n.linalg.norm(delta_green)
delta_green *= -1
delta_yellow = aslam.world.yellow_buoy.position() - aslam.sub.position()
delta_yellow /= n.linalg.norm(delta_yellow)
delta_yellow *= -1
subtasks = []
# subtasks.append(GoToPipe())
subtasks.append(MoveXRough(PIPE_TO_BUOYS_DIST))
subtasks.append(Depth(BUOY_SEARCH_DEPTH))
subtasks.append(GrabPosition())
subtasks.append(GrabHeading())
subtasks.append(Scan)
if 1:
subtasks += [
Timeout(20.0, SequentialSuccess(
aslam.Target(aslam.world.red_buoy, delta_red, tolerance, boundingBox(delta_red * 2), orient = True)),
RelativeToInitialDepth(0.05),
Timeout(5.0, TouchGuarded(MoveXRough(1.3), shm.gpio.wall_1)),
),
RestorePosition(),
RestoreHeading()
]
if 1:
subtasks += [
Timeout(20.0, SequentialSuccess(
aslam.Target(aslam.world.green_buoy, delta_green, tolerance, boundingBox(delta_green * 2), orient = True)),
RelativeToInitialDepth(0.1),
Timeout(5.0, TouchGuarded(MoveXRough(1.3), shm.gpio.wall_1)),
),
RestorePosition(),
RestoreHeading()
]
if 1:
subtasks += [
Timeout(20.0, SequentialSuccess(
aslam.Target(aslam.world.yellow_buoy, delta_yellow, tolerance, boundingBox(delta_yellow * 2), orient = True)),
RelativeToInitialDepth(0.1),
GuardedTimer(10.0, Scuttle(), aslam.SimpleTarget(aslam.world.yellow_buoy, delta_yellow)),
),
RestorePosition(),
RestoreHeading(),
AvoidYellow()
]
subtasks.append(RestoreHeading())
self.subtask = MinDepth(0.1, Sequential(*subtasks))
def on_run(self):
self.subtask()
if self.subtask.finished:
self.finish()
|
cuauv/software
|
mission/missions/old/2017/aslam_buoys.py
|
Python
|
bsd-3-clause
| 7,476
|
from gevent import monkey
monkey.patch_all()
import pytest
import gevent
import marshmallow
from channelstream.server_state import get_state
from channelstream.channel import Channel
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestConnectViews(object):
def test_bad_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect
dummy_request.json_body = {}
try:
connect(dummy_request)
except marshmallow.exceptions.ValidationError as exc:
assert exc.messages == {"username": ["Missing data for required field."]}
def test_good_json(self, dummy_request, test_uuids):
server_state = get_state()
from channelstream.wsgi_views.server import connect
dummy_request.json_body = {
"username": "username",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
assert server_state.channels == {}
result = connect(dummy_request)
assert len(server_state.channels.keys()) == 2
assert "username" in server_state.users
assert test_uuids[1] in server_state.connections
assert result["channels"] == ["a", "aB"]
assert result["state"] == {"bar": "baz", "key": "foo"}
assert result["conn_id"] == test_uuids[1]
channels_info = result["channels_info"]["channels"]
assert len(channels_info.keys()) == 2
assert channels_info["a"]["total_users"] == 1
assert channels_info["a"]["total_connections"] == 1
assert channels_info["a"]["users"] == ["username"]
assert channels_info["a"]["history"] == []
assert result["channels_info"]["users"] == [
{"state": {"bar": "baz", "key": "foo"}, "user": "username"}
]
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestUserStateViews(object):
def test_bad_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import user_state
dummy_request.json_body = {}
with pytest.raises(marshmallow.exceptions.ValidationError) as excinfo:
user_state(dummy_request)
assert excinfo.value.messages == {"user": ["Missing data for required field."]}
def _connect_user(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
def test_not_found_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import user_state
dummy_request.json_body = {"user": "blabla"}
with pytest.raises(marshmallow.exceptions.ValidationError) as excinfo:
user_state(dummy_request)
assert excinfo.value.messages == {"user": ["Unknown user"]}
def test_good_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import user_state
self._connect_user(dummy_request, test_uuids)
dummy_request.json_body = {
"user": "test",
"user_state": {"bar": 2, "private": "im_private"},
"state_public_keys": ["avatar", "bar"],
}
result = user_state(dummy_request)
sorted_keys = sorted(["bar", "key", "private"])
assert sorted_keys == sorted(result["user_state"].keys())
assert result["user_state"]["private"] == "im_private"
sorted_changed = sorted([x["key"] for x in result["changed_state"]])
assert result["public_keys"] == ["avatar", "bar"]
assert sorted_changed == sorted(["bar", "private"])
def test_good_json_no_public_keys(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import user_state
self._connect_user(dummy_request, test_uuids)
dummy_request.json_body = {
"user": "test",
"user_state": {"bar": 2, "private": "im_private"},
}
result = user_state(dummy_request)
sorted_keys = sorted(["bar", "key", "private"])
assert sorted_keys == sorted(result["user_state"].keys())
assert result["user_state"]["private"] == "im_private"
assert result["public_keys"] == ["bar"]
sorted_changed = sorted([x["key"] for x in result["changed_state"]])
assert sorted_changed == sorted(["bar", "private"])
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestSubscribeViews(object):
def test_bad_json(self, dummy_request):
from channelstream.wsgi_views.server import subscribe
dummy_request.json_body = {}
try:
subscribe(dummy_request)
except marshmallow.exceptions.ValidationError as exc:
assert list(sorted(exc.messages.keys())) == ["channels", "conn_id"]
def test_good_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, subscribe
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {
"conn_id": str(test_uuids[1]),
"channels": ["b"],
"channel_configs": {
"a": {"notify_presence": True},
"b": {"notify_presence": True},
},
}
result = subscribe(dummy_request)
assert sorted(result["channels"]) == sorted(["a", "aB", "b"])
assert result["channels_info"]["users"] == [
{"state": {"bar": "baz", "key": "foo"}, "user": "test"}
]
assert "a" in result["channels_info"]["channels"]
assert "b" in result["channels_info"]["channels"]
assert result["channels_info"]["channels"]["a"]["total_connections"] == 1
assert result["channels_info"]["channels"]["a"]["total_users"] == 1
assert result["channels_info"]["channels"]["a"]["history"] == []
assert result["channels_info"]["channels"]["a"]["users"] == ["test"]
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestUnsubscribeViews(object):
def test_bad_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import unsubscribe
dummy_request.json_body = {}
try:
unsubscribe(dummy_request)
except marshmallow.exceptions.ValidationError as exc:
assert list(sorted(exc.messages.keys())) == ["channels", "conn_id"]
def test_good_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, unsubscribe
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB", "aC"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {
"conn_id": str(test_uuids[1]),
"channels": ["aC", "a"],
}
result = unsubscribe(dummy_request)
assert sorted(result["channels"]) == sorted(["aB"])
def test_non_existing_channel(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, unsubscribe
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB", "aC"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {"conn_id": str(test_uuids[1]), "channels": ["d"]}
result = unsubscribe(dummy_request)
assert sorted(result["channels"]) == sorted(["a", "aB", "aC"])
def test_no_channels(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, unsubscribe
dummy_request.json_body = {
"username": "test",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {"conn_id": str(test_uuids[1]), "channels": ["a"]}
result = unsubscribe(dummy_request)
assert len(result["channels"]) == 0
assert result["channels_info"]["users"] == []
assert result["channels_info"]["channels"] == {}
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestInfoView(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import info
dummy_request.json_body = {}
result = info(dummy_request)
assert result["channels"] == {}
assert result["users"] == []
def test_subscribed_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, info
dummy_request.json_body = {
"username": "test1",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz"},
"state_public_keys": ["bar"],
"channels": ["a", "aB"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {
"username": "test2",
"conn_id": test_uuids[2],
"fresh_user_state": {"key": "foo1"},
"user_state": {"bar": "baz1"},
"state_public_keys": ["key"],
"channels": ["a", "c"],
"channel_configs": {"c": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = {}
result = info(dummy_request)
assert sorted(("a", "aB", "c")) == sorted(result["channels"].keys())
assert result["users"]
comp_a = sorted(result["channels"]["a"]["users"])
comp_b = sorted(["test1", "test2"])
assert comp_a == comp_b
assert result["channels"]["a"]["total_users"] == 2
assert result["channels"]["a"]["total_connections"] == 2
assert result["channels"]["c"]["users"] == ["test2"]
assert result["channels"]["c"]["total_users"] == 1
assert result["channels"]["c"]["total_connections"] == 1
assert result["channels"]["aB"]["users"] == ["test1"]
comp_a = sorted(result["users"], key=lambda x: x["user"])
comp_b = sorted(
[
{"state": {"bar": "baz", "key": "foo"}, "user": "test1"},
{"state": {"bar": "baz1", "key": "foo1"}, "user": "test2"},
],
key=lambda x: x["user"],
)
assert comp_a == comp_b
dummy_request.body = "NOTEMPTY"
dummy_request.json_body = {"info": {"channels": ["a"]}}
result = info(dummy_request)
assert "a" in result["channels"]
assert "aB" not in result["channels"]
def test_detailed_json(self, dummy_request, test_uuids):
from channelstream.wsgi_views.server import connect, info, message
dummy_request.json_body = {
"username": "test1",
"conn_id": str(test_uuids[1]),
"fresh_user_state": {"key": "foo"},
"user_state": {"bar": "baz", "private": "p1"},
"state_public_keys": ["bar"],
"channels": ["a", "aB", "c", "D"],
"channel_configs": {"a": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
dummy_request.json_body = [
{
"type": "message",
"user": "test1",
"channel": "a",
"message": {"text": "test"},
}
]
message(dummy_request)
gevent.sleep(0)
dummy_request.body = "value"
dummy_request.json_body = {
"info": {
"exclude_channels": ["c"],
"include_history": False,
"include_users": True,
"return_public_state": True,
"include_connections": True,
}
}
result = info(dummy_request)
assert sorted(result["channels"].keys()) == sorted(["a", "aB", "D"])
assert "private" not in result["users"][0]["state"]
assert len(result["channels"]["a"]["history"]) == 0
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestMessageViews(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import message
server_state = get_state()
dummy_request.json_body = {}
assert server_state.stats["total_unique_messages"] == 0
with pytest.raises(marshmallow.exceptions.ValidationError) as excinfo:
message(dummy_request)
assert excinfo.value.messages == {"_schema": ["Invalid input type."]}
def test_good_json_no_channel(self, dummy_request):
from channelstream.wsgi_views.server import message
server_state = get_state()
channel = Channel("test")
channel.store_history = True
server_state.channels[channel.name] = channel
msg_payload = {
"type": "message",
"user": "system",
"channel": "test",
"message": {"text": "test"},
}
dummy_request.json_body = [msg_payload]
assert server_state.stats["total_unique_messages"] == 0
assert len(channel.history) == 0
message(dummy_request)
# change context
gevent.sleep(0)
assert server_state.stats["total_unique_messages"] == 1
assert len(channel.history) == 1
msg = channel.history[0]
assert msg["uuid"] is not None
assert msg["user"] == msg_payload["user"]
assert msg["message"] == msg_payload["message"]
assert msg["type"] == msg_payload["type"]
assert msg["channel"] == msg_payload["channel"]
assert msg["timestamp"] is not None
def test_catchup_messages(self, dummy_request):
from channelstream.wsgi_views.server import message, connect
server_state = get_state()
dummy_request.json_body = {
"username": "test1",
"channels": ["test"],
"channel_configs": {"test": {"store_history": True, "history_size": 2}},
}
connect(dummy_request)
msg_payload = {
"type": "message",
"user": "system",
"channel": "test",
"message": {"text": "test3"},
}
dummy_request.json_body = [msg_payload]
message(dummy_request)
# add pm message to non-existing user
wrong_user_msg_payload = {
"type": "message",
"user": "system",
"channel": "test",
"message": {"text": "test1"},
"pm_users": ["test2"],
}
msg_payload = {
"type": "message",
"user": "system",
"channel": "test",
"message": {"text": "test2"},
"pm_users": ["test1"],
}
dummy_request.json_body = [wrong_user_msg_payload, msg_payload]
message(dummy_request)
# change context
gevent.sleep(0)
connection = server_state.users["test1"].connections[0]
messages = connection.get_catchup_messages()
assert len(messages) == 2
assert messages[0]["timestamp"] > connection.last_active
assert messages[0]["message"]["text"] == "test3"
assert messages[1]["timestamp"] > connection.last_active
assert messages[1]["message"]["text"] == "test2"
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestMessageEditViews(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import message
dummy_request.json_body = {}
with pytest.raises(marshmallow.exceptions.ValidationError) as excinfo:
message(dummy_request)
assert excinfo.value.messages == {"_schema": ["Invalid input type."]}
def test_good_json_no_channel(self, dummy_request):
from channelstream.wsgi_views.server import message, messages_patch
server_state = get_state()
channel = Channel("test")
channel.store_history = True
server_state.channels[channel.name] = channel
msg_payload = {"user": "system", "channel": "test", "message": {"text": "test"}}
dummy_request.json_body = [msg_payload]
message(dummy_request)
# change context
gevent.sleep(0)
msg = channel.history[0]
assert msg["message"] == msg_payload["message"]
edit_payload = {
"uuid": msg["uuid"],
"user": "edited_system",
"channel": "test",
"timestamp": "2010-01-01T01:01",
"edited": "2010-01-01T01:02",
"message": {"text": "edited_message"},
}
dummy_request.json_body = [edit_payload]
response = messages_patch(dummy_request)[0]
gevent.sleep(0)
assert msg["user"] == response["user"]
assert msg["message"] == response["message"]
assert msg["edited"] == response["edited"]
assert msg["timestamp"] == response["timestamp"]
frame = channel.frames[0][1]
assert id(frame) == id(msg)
assert frame["user"] == response["user"]
assert frame["message"] == response["message"]
assert frame["edited"] == response["edited"]
assert frame["timestamp"] == response["timestamp"]
class TestMessageDeleteViews(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import messages_delete
dummy_request.json_body = []
result = messages_delete(dummy_request)
assert result == []
def test_good_json_no_channel(self, dummy_request):
from channelstream.wsgi_views.server import message, messages_delete
server_state = get_state()
channel = Channel("test")
channel.store_history = True
server_state.channels[channel.name] = channel
msg_payload = {"user": "system", "channel": "test", "message": {"text": "test"}}
dummy_request.json_body = [msg_payload]
message(dummy_request)
# change context
gevent.sleep(0)
msg = channel.history[0]
assert msg["message"] == msg_payload["message"]
dummy_request.json_body = [{"uuid": str(msg["uuid"]), "channel": "test"}]
response = messages_delete(dummy_request)
gevent.sleep(0)
assert response[0]["uuid"] == msg["uuid"]
assert len(channel.history) == 0
assert len(channel.frames) == 1
assert channel.frames[0][1]["type"] == "message:delete"
@pytest.mark.usefixtures("cleanup_globals", "pyramid_config")
class TestChannelConfigView(object):
def test_empty_json(self, dummy_request):
from channelstream.wsgi_views.server import channel_config
dummy_request.json_body = {}
result = channel_config(dummy_request)
assert result["channels"] == {}
assert result["users"] == []
def test_valid_json(self, dummy_request):
from channelstream.wsgi_views.server import channel_config
dummy_request.json_body = {
"chanx1": {
"notify_presence": True,
"store_history": True,
"history_size": 3,
"broadcast_presence_with_user_lists": True,
"notify_state": True,
"store_frames": False,
}
}
result = channel_config(dummy_request)
channel_settings = result["channels"]["chanx1"]["settings"]
assert channel_settings["notify_presence"] is True
assert channel_settings["store_history"] is True
assert channel_settings["history_size"] == 3
assert channel_settings["broadcast_presence_with_user_lists"] is True
assert channel_settings["notify_state"] is True
assert channel_settings["store_frames"] is False
|
AppEnlight/channelstream
|
tests/tests_views.py
|
Python
|
bsd-3-clause
| 21,292
|
import wx
import wx.glcanvas
import pyglet
import pyglet.gl as gl
import pyglet.gl
import motmot.wxvideo.wxvideo as wxvideo
# XXX TODO:
# check off-by-one error in width/coordinate settings (e.g. glOrtho call)
# allow sharing of OpenGL context between instances
NewImageReadyEvent = wx.NewEventType() # use to trigger GUI thread action from grab thread
class PygWxContext:
_gl_begin = False
_workaround_unpack_row_length = False
def __init__(self, glcanvas ):
# glcanvas is instance of wx.glcanvas.GLCanvas
self.glcanvas = glcanvas
pyglet.gl._contexts.append( self )
def SetCurrent(self):
self.glcanvas.GetParent().Show()
if pyglet.version[:3] >= '1.1':
# tested on 1.1beta1
pyglet.gl.current_context = self
else:
# tested on 1.0
pyglet.gl._current_context = self
self.glcanvas.SetCurrent()
class DynamicImageCanvas(wx.glcanvas.GLCanvas):
"""Display image data to OpenGL using as few resources as possible"""
def _setcurrent(self,hack_ok=True):
self.wxcontext.SetCurrent()
def __init__(self, *args, **kw):
attribList = kw.get('attribList',None)
if attribList is None:
attribList = [
wx.glcanvas.WX_GL_RGBA,
wx.glcanvas.WX_GL_DOUBLEBUFFER, # force double buffering
wx.glcanvas.WX_GL_DEPTH_SIZE, 16,]
kw['attribList']=attribList
super(DynamicImageCanvas, self).__init__(*args,**kw)
self.init = False
self.Connect( -1, -1, NewImageReadyEvent, self.OnDraw )
self.flip_lr = False
self.fullcanvas = False
self.rotate_180 = False
wx.EVT_ERASE_BACKGROUND(self, self.OnEraseBackground)
wx.EVT_SIZE(self, self.OnSize)
wx.EVT_PAINT(self, self.OnPaint)
self._pygimage = None
self.wxcontext = PygWxContext( self )
self.wxcontext.SetCurrent()
def OnEraseBackground(self, event):
pass # Do nothing, to avoid flashing on MSW. (inhereted from wxDemo)
def set_flip_LR(self,value):
self.flip_lr = value
self._reset_projection()
set_flip_LR.__doc__ = wxvideo.DynamicImageCanvas.set_flip_LR.__doc__
def set_fullcanvas(self,value):
self.fullcanvas = value
self._reset_projection()
def set_rotate_180(self,value):
self.rotate_180 = value
self._reset_projection()
set_rotate_180.__doc__ = wxvideo.DynamicImageCanvas.set_rotate_180.__doc__
def OnSize(self, event):
size = self.GetClientSize()
if self.GetContext():
self.wxcontext.SetCurrent()
gl.glViewport(0, 0, size.width, size.height)
event.Skip()
def OnPaint(self, event):
dc = wx.PaintDC(self)
self.wxcontext.SetCurrent()
if not self.init:
self.InitGL()
self.init = True
self.OnDraw()
def InitGL(self):
self.wxcontext.SetCurrent()
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
self._reset_projection()
self.extra_initgl()
def extra_initgl(self):
pass
def _reset_projection(self):
if self.fullcanvas:
if self._pygimage is None:
return
width, height = self._pygimage.width, self._pygimage.height
else:
size = self.GetClientSize()
width, height = size.width, size.height
b = 0
t = height
if self.flip_lr:
l = width
r = 0
else:
l = 0
r = width
if self.rotate_180:
l,r=r,l
b,t=t,b
if width==0 or height==0:
# prevent OpenGL error
return
self.wxcontext.SetCurrent()
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glOrtho(l,r,b,t, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
def new_image(self, image):
self._pygimage = image
self._reset_projection() # always trigger re-calculation of projection - necessary if self.fullcanvas
def update_image(self, image):
"""update the image to be displayed"""
self.wxcontext.SetCurrent()
self._pygimage.view_new_array( image )
event = wx.CommandEvent(NewImageReadyEvent)
event.SetEventObject(self)
wx.PostEvent(self, event)
def core_draw(self):
if self._pygimage is not None:
self._pygimage.blit(0, 0, 0)
def OnDraw(self,event=None):
self.wxcontext.SetCurrent()
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
self.core_draw()
self.SwapBuffers()
|
motmot/wxglvideo
|
motmot/wxglvideo/wxglvideo.py
|
Python
|
bsd-3-clause
| 4,761
|
# Standard library imports
import logging
# Third party imports
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.contrib.admin.utils import NestedObjects
from django.urls import reverse
from django.db import DEFAULT_DB_ALIAS
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from github import Github
# Local application/library imports
from dojo.forms import GITHUBForm, DeleteGITHUBConfForm
from dojo.models import GITHUB_Conf
from dojo.utils import add_breadcrumb
logger = logging.getLogger(__name__)
@csrf_exempt
def webhook(request):
return HttpResponse('')
@user_passes_test(lambda u: u.is_staff)
def express_new_github(request):
return HttpResponse('')
@user_passes_test(lambda u: u.is_staff)
def new_github(request):
if request.method == 'POST':
gform = GITHUBForm(request.POST, instance=GITHUB_Conf())
if gform.is_valid():
try:
api_key = gform.cleaned_data.get('api_key')
g = Github(api_key)
user = g.get_user()
logger.debug('Using user ' + user.login)
new_j = gform.save(commit=False)
new_j.api_key = api_key
new_j.save()
messages.add_message(request,
messages.SUCCESS,
'Github Configuration Successfully Created.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('github', ))
except Exception as info:
logger.error(info)
messages.add_message(request,
messages.ERROR,
'Unable to authenticate on github.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('github', ))
else:
gform = GITHUBForm()
add_breadcrumb(title="New Github Configuration", top_level=False, request=request)
return render(request, 'dojo/new_github.html',
{'gform': gform})
@user_passes_test(lambda u: u.is_staff)
def github(request):
confs = GITHUB_Conf.objects.all()
add_breadcrumb(title="Github List", top_level=not len(request.GET), request=request)
return render(request,
'dojo/github.html',
{'confs': confs,
})
@user_passes_test(lambda u: u.is_staff)
def delete_github(request, tid):
github_instance = get_object_or_404(GITHUB_Conf, pk=tid)
# eng = test.engagement
# TODO Make Form
form = DeleteGITHUBConfForm(instance=github_instance)
if request.method == 'POST':
if 'id' in request.POST and str(github_instance.id) == request.POST['id']:
form = DeleteGITHUBConfForm(request.POST, instance=github_instance)
if form.is_valid():
github_instance.delete()
messages.add_message(request,
messages.SUCCESS,
'Github Conf and relationships removed.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('github'))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([github_instance])
rels = collector.nested()
add_breadcrumb(title="Delete", top_level=False, request=request)
return render(request, 'dojo/delete_github.html',
{'inst': github_instance,
'form': form,
'rels': rels,
'deletable_objects': rels,
})
|
rackerlabs/django-DefectDojo
|
dojo/github_issue_link/views.py
|
Python
|
bsd-3-clause
| 3,843
|
import logging
from django.conf import settings
from django.db import models
from mkt.site.mail import send_mail
from mkt.site.models import ModelBase
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
from mkt.websites.models import Website
log = logging.getLogger('z.abuse')
class AbuseReport(ModelBase):
# NULL if the reporter is anonymous.
reporter = models.ForeignKey(UserProfile, null=True,
blank=True, related_name='abuse_reported')
ip_address = models.CharField(max_length=255, default='0.0.0.0')
# An abuse report can be for an addon, a user, or a website. Only one of
# these should be null.
addon = models.ForeignKey(Webapp, null=True, related_name='abuse_reports')
user = models.ForeignKey(UserProfile, null=True,
related_name='abuse_reports')
website = models.ForeignKey(Website, null=True,
related_name='abuse_reports')
message = models.TextField()
read = models.BooleanField(default=False)
class Meta:
db_table = 'abuse_reports'
@property
def object(self):
return self.addon or self.user or self.website
def send(self):
obj = self.object
if self.reporter:
user_name = '%s (%s)' % (self.reporter.name, self.reporter.email)
else:
user_name = 'An anonymous coward'
if self.addon:
type_ = 'App'
elif self.user:
type_ = 'User'
else:
type_ = 'Website'
subject = u'[%s] Abuse Report for %s' % (type_, obj.name)
msg = u'%s reported abuse for %s (%s%s).\n\n%s' % (
user_name, obj.name, settings.SITE_URL, obj.get_url_path(),
self.message)
send_mail(subject, msg, recipient_list=(settings.ABUSE_EMAIL,))
@classmethod
def recent_high_abuse_reports(cls, threshold, period, addon_id=None):
"""
Returns AbuseReport objects for the given threshold over the given time
period (in days). Filters by addon_id if provided.
E.g. Greater than 5 abuse reports for all webapps in the past 7 days.
"""
abuse_sql = ['''
SELECT `abuse_reports`.*,
COUNT(`abuse_reports`.`addon_id`) AS `num_reports`
FROM `abuse_reports`
INNER JOIN `addons` ON (`abuse_reports`.`addon_id` = `addons`.`id`)
WHERE `abuse_reports`.`created` >= %s ''']
params = [period]
if addon_id:
abuse_sql.append('AND `addons`.`id` = %s ')
params.append(addon_id)
abuse_sql.append('GROUP BY addon_id HAVING num_reports > %s')
params.append(threshold)
return list(cls.objects.raw(''.join(abuse_sql), params))
def send_abuse_report(request, obj, message):
report = AbuseReport(ip_address=request.META.get('REMOTE_ADDR'),
message=message)
if request.user.is_authenticated():
report.reporter = request.user
if isinstance(obj, Webapp):
report.addon = obj
elif isinstance(obj, UserProfile):
report.user = obj
elif isinstance(obj, Website):
report.website = obj
report.save()
report.send()
# Trigger addon high abuse report detection task.
if isinstance(obj, Webapp):
from mkt.webapps.tasks import find_abuse_escalations
find_abuse_escalations.delay(obj.id)
|
eviljeff/zamboni
|
mkt/abuse/models.py
|
Python
|
bsd-3-clause
| 3,467
|
from itertools import product
from inspect import signature
import warnings
from textwrap import dedent
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from ._core import VectorPlotter, variable_type, categorical_order
from . import utils
from .utils import _check_argument, adjust_legend_subtitles, _draw_figure
from .palettes import color_palette, blend_palette
from ._decorators import _deprecate_positional_args
from ._docstrings import (
DocstringComponents,
_core_docs,
)
__all__ = ["FacetGrid", "PairGrid", "JointGrid", "pairplot", "jointplot"]
_param_docs = DocstringComponents.from_nested_components(
core=_core_docs["params"],
)
class _BaseGrid:
"""Base class for grids of subplots."""
def set(self, **kwargs):
"""Set attributes on each subplot Axes."""
for ax in self.axes.flat:
if ax is not None: # Handle removed axes
ax.set(**kwargs)
return self
@property
def fig(self):
"""DEPRECATED: prefer the `figure` property."""
# Grid.figure is preferred because it matches the Axes attribute name.
# But as the maintanace burden on having this property is minimal,
# let's be slow about formally deprecating it. For now just note its deprecation
# in the docstring; add a warning in version 0.13, and eventually remove it.
return self._figure
@property
def figure(self):
"""Access the :class:`matplotlib.figure.Figure` object underlying the grid."""
return self._figure
def savefig(self, *args, **kwargs):
"""
Save an image of the plot.
This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches="tight"
by default. Parameters are passed through to the matplotlib function.
"""
kwargs = kwargs.copy()
kwargs.setdefault("bbox_inches", "tight")
self.figure.savefig(*args, **kwargs)
class Grid(_BaseGrid):
"""A grid that can have multiple subplots and an external legend."""
_margin_titles = False
_legend_out = True
def __init__(self):
self._tight_layout_rect = [0, 0, 1, 1]
self._tight_layout_pad = None
# This attribute is set externally and is a hack to handle newer functions that
# don't add proxy artists onto the Axes. We need an overall cleaner approach.
self._extract_legend_handles = False
def tight_layout(self, *args, **kwargs):
"""Call fig.tight_layout within rect that exclude the legend."""
kwargs = kwargs.copy()
kwargs.setdefault("rect", self._tight_layout_rect)
if self._tight_layout_pad is not None:
kwargs.setdefault("pad", self._tight_layout_pad)
self._figure.tight_layout(*args, **kwargs)
def add_legend(self, legend_data=None, title=None, label_order=None,
adjust_subtitles=False, **kwargs):
"""Draw a legend, maybe placing it outside axes and resizing the figure.
Parameters
----------
legend_data : dict
Dictionary mapping label names (or two-element tuples where the
second element is a label name) to matplotlib artist handles. The
default reads from ``self._legend_data``.
title : string
Title for the legend. The default reads from ``self._hue_var``.
label_order : list of labels
The order that the legend entries should appear in. The default
reads from ``self.hue_names``.
adjust_subtitles : bool
If True, modify entries with invisible artists to left-align
the labels and set the font size to that of a title.
kwargs : key, value pairings
Other keyword arguments are passed to the underlying legend methods
on the Figure or Axes object.
Returns
-------
self : Grid instance
Returns self for easy chaining.
"""
# Find the data for the legend
if legend_data is None:
legend_data = self._legend_data
if label_order is None:
if self.hue_names is None:
label_order = list(legend_data.keys())
else:
label_order = list(map(utils.to_utf8, self.hue_names))
blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)
handles = [legend_data.get(l, blank_handle) for l in label_order]
title = self._hue_var if title is None else title
title_size = mpl.rcParams["legend.title_fontsize"]
# Unpack nested labels from a hierarchical legend
labels = []
for entry in label_order:
if isinstance(entry, tuple):
_, label = entry
else:
label = entry
labels.append(label)
# Set default legend kwargs
kwargs.setdefault("scatterpoints", 1)
if self._legend_out:
kwargs.setdefault("frameon", False)
kwargs.setdefault("loc", "center right")
# Draw a full-figure legend outside the grid
figlegend = self._figure.legend(handles, labels, **kwargs)
self._legend = figlegend
figlegend.set_title(title, prop={"size": title_size})
if adjust_subtitles:
adjust_legend_subtitles(figlegend)
# Draw the plot to set the bounding boxes correctly
_draw_figure(self._figure)
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self._figure.dpi
fig_width, fig_height = self._figure.get_size_inches()
self._figure.set_size_inches(fig_width + legend_width, fig_height)
# Draw the plot again to get the new transformations
_draw_figure(self._figure)
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self._figure.dpi
space_needed = legend_width / (fig_width + legend_width)
margin = .04 if self._margin_titles else .01
self._space_needed = margin + space_needed
right = 1 - self._space_needed
# Place the subplot axes to give space for the legend
self._figure.subplots_adjust(right=right)
self._tight_layout_rect[2] = right
else:
# Draw a legend in the first axis
ax = self.axes.flat[0]
kwargs.setdefault("loc", "best")
leg = ax.legend(handles, labels, **kwargs)
leg.set_title(title, prop={"size": title_size})
self._legend = leg
if adjust_subtitles:
adjust_legend_subtitles(leg)
return self
def _update_legend_data(self, ax):
"""Extract the legend data from an axes object and save it."""
data = {}
# Get data directly from the legend, which is necessary
# for newer functions that don't add labeled proxy artists
if ax.legend_ is not None and self._extract_legend_handles:
handles = ax.legend_.legendHandles
labels = [t.get_text() for t in ax.legend_.texts]
data.update({l: h for h, l in zip(handles, labels)})
handles, labels = ax.get_legend_handles_labels()
data.update({l: h for h, l in zip(handles, labels)})
self._legend_data.update(data)
# Now clear the legend
ax.legend_ = None
def _get_palette(self, data, hue, hue_order, palette):
"""Get a list of colors for the hue variable."""
if hue is None:
palette = color_palette(n_colors=1)
else:
hue_names = categorical_order(data[hue], hue_order)
n_colors = len(hue_names)
# By default use either the current color palette or HUSL
if palette is None:
current_palette = utils.get_color_cycle()
if n_colors > len(current_palette):
colors = color_palette("husl", n_colors)
else:
colors = color_palette(n_colors=n_colors)
# Allow for palette to map from hue variable names
elif isinstance(palette, dict):
color_names = [palette[h] for h in hue_names]
colors = color_palette(color_names, n_colors)
# Otherwise act as if we just got a list of colors
else:
colors = color_palette(palette, n_colors)
palette = color_palette(colors, n_colors)
return palette
@property
def legend(self):
"""The :class:`matplotlib.legend.Legend` object, if present."""
try:
return self._legend
except AttributeError:
return None
_facet_docs = dict(
data=dedent("""\
data : DataFrame
Tidy ("long-form") dataframe where each column is a variable and each
row is an observation.\
"""),
rowcol=dedent("""\
row, col : vectors or keys in ``data``
Variables that define subsets to plot on different facets.\
"""),
rowcol_order=dedent("""\
{row,col}_order : vector of strings
Specify the order in which levels of the ``row`` and/or ``col`` variables
appear in the grid of subplots.\
"""),
col_wrap=dedent("""\
col_wrap : int
"Wrap" the column variable at this width, so that the column facets
span multiple rows. Incompatible with a ``row`` facet.\
"""),
share_xy=dedent("""\
share{x,y} : bool, 'col', or 'row' optional
If true, the facets will share y axes across columns and/or x axes
across rows.\
"""),
height=dedent("""\
height : scalar
Height (in inches) of each facet. See also: ``aspect``.\
"""),
aspect=dedent("""\
aspect : scalar
Aspect ratio of each facet, so that ``aspect * height`` gives the width
of each facet in inches.\
"""),
palette=dedent("""\
palette : palette name, list, or dict
Colors to use for the different levels of the ``hue`` variable. Should
be something that can be interpreted by :func:`color_palette`, or a
dictionary mapping hue levels to matplotlib colors.\
"""),
legend_out=dedent("""\
legend_out : bool
If ``True``, the figure size will be extended, and the legend will be
drawn outside the plot on the center right.\
"""),
margin_titles=dedent("""\
margin_titles : bool
If ``True``, the titles for the row variable are drawn to the right of
the last column. This option is experimental and may not work in all
cases.\
"""),
facet_kws=dedent("""\
facet_kws : dict
Additional parameters passed to :class:`FacetGrid`.
"""),
)
class FacetGrid(Grid):
"""Multi-plot grid for plotting conditional relationships."""
@_deprecate_positional_args
def __init__(
self, data, *,
row=None, col=None, hue=None, col_wrap=None,
sharex=True, sharey=True, height=3, aspect=1, palette=None,
row_order=None, col_order=None, hue_order=None, hue_kws=None,
dropna=False, legend_out=True, despine=True,
margin_titles=False, xlim=None, ylim=None, subplot_kws=None,
gridspec_kws=None, size=None
):
super(FacetGrid, self).__init__()
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
# Determine the hue facet layer information
hue_var = hue
if hue is None:
hue_names = None
else:
hue_names = categorical_order(data[hue], hue_order)
colors = self._get_palette(data, hue, hue_order, palette)
# Set up the lists of names for the row and column facet variables
if row is None:
row_names = []
else:
row_names = categorical_order(data[row], row_order)
if col is None:
col_names = []
else:
col_names = categorical_order(data[col], col_order)
# Additional dict of kwarg -> list of values for mapping the hue var
hue_kws = hue_kws if hue_kws is not None else {}
# Make a boolean mask that is True anywhere there is an NA
# value in one of the faceting variables, but only if dropna is True
none_na = np.zeros(len(data), bool)
if dropna:
row_na = none_na if row is None else data[row].isnull()
col_na = none_na if col is None else data[col].isnull()
hue_na = none_na if hue is None else data[hue].isnull()
not_na = ~(row_na | col_na | hue_na)
else:
not_na = ~none_na
# Compute the grid shape
ncol = 1 if col is None else len(col_names)
nrow = 1 if row is None else len(row_names)
self._n_facets = ncol * nrow
self._col_wrap = col_wrap
if col_wrap is not None:
if row is not None:
err = "Cannot use `row` and `col_wrap` together."
raise ValueError(err)
ncol = col_wrap
nrow = int(np.ceil(len(col_names) / col_wrap))
self._ncol = ncol
self._nrow = nrow
# Calculate the base figure size
# This can get stretched later by a legend
# TODO this doesn't account for axis labels
figsize = (ncol * height * aspect, nrow * height)
# Validate some inputs
if col_wrap is not None:
margin_titles = False
# Build the subplot keyword dictionary
subplot_kws = {} if subplot_kws is None else subplot_kws.copy()
gridspec_kws = {} if gridspec_kws is None else gridspec_kws.copy()
if xlim is not None:
subplot_kws["xlim"] = xlim
if ylim is not None:
subplot_kws["ylim"] = ylim
# --- Initialize the subplot grid
# Disable autolayout so legend_out works properly
with mpl.rc_context({"figure.autolayout": False}):
fig = plt.figure(figsize=figsize)
if col_wrap is None:
kwargs = dict(squeeze=False,
sharex=sharex, sharey=sharey,
subplot_kw=subplot_kws,
gridspec_kw=gridspec_kws)
axes = fig.subplots(nrow, ncol, **kwargs)
if col is None and row is None:
axes_dict = {}
elif col is None:
axes_dict = dict(zip(row_names, axes.flat))
elif row is None:
axes_dict = dict(zip(col_names, axes.flat))
else:
facet_product = product(row_names, col_names)
axes_dict = dict(zip(facet_product, axes.flat))
else:
# If wrapping the col variable we need to make the grid ourselves
if gridspec_kws:
warnings.warn("`gridspec_kws` ignored when using `col_wrap`")
n_axes = len(col_names)
axes = np.empty(n_axes, object)
axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)
if sharex:
subplot_kws["sharex"] = axes[0]
if sharey:
subplot_kws["sharey"] = axes[0]
for i in range(1, n_axes):
axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)
axes_dict = dict(zip(col_names, axes))
# --- Set up the class attributes
# Attributes that are part of the public API but accessed through
# a property so that Sphinx adds them to the auto class doc
self._figure = fig
self._axes = axes
self._axes_dict = axes_dict
self._legend = None
# Public attributes that aren't explicitly documented
# (It's not obvious that having them be public was a good idea)
self.data = data
self.row_names = row_names
self.col_names = col_names
self.hue_names = hue_names
self.hue_kws = hue_kws
# Next the private variables
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._margin_titles = margin_titles
self._margin_titles_texts = []
self._col_wrap = col_wrap
self._hue_var = hue_var
self._colors = colors
self._legend_out = legend_out
self._legend_data = {}
self._x_var = None
self._y_var = None
self._sharex = sharex
self._sharey = sharey
self._dropna = dropna
self._not_na = not_na
# --- Make the axes look good
self.set_titles()
self.tight_layout()
if despine:
self.despine()
if sharex in [True, 'col']:
for ax in self._not_bottom_axes:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
if sharey in [True, 'row']:
for ax in self._not_left_axes:
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
__init__.__doc__ = dedent("""\
Initialize the matplotlib figure and FacetGrid object.
This class maps a dataset onto multiple axes arrayed in a grid of rows
and columns that correspond to *levels* of variables in the dataset.
The plots it produces are often called "lattice", "trellis", or
"small-multiple" graphics.
It can also represent levels of a third variable with the ``hue``
parameter, which plots different subsets of data in different colors.
This uses color to resolve elements on a third dimension, but only
draws subsets on top of each other and will not tailor the ``hue``
parameter for the specific visualization the way that axes-level
functions that accept ``hue`` will.
The basic workflow is to initialize the :class:`FacetGrid` object with
the dataset and the variables that are used to structure the grid. Then
one or more plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map` or :meth:`FacetGrid.map_dataframe`. Finally, the
plot can be tweaked with other methods to do things like change the
axis labels, use different ticks, or add a legend. See the detailed
code examples below for more information.
.. warning::
When using seaborn functions that infer semantic mappings from a
dataset, care must be taken to synchronize those mappings across
facets (e.g., by defing the ``hue`` mapping with a palette dict or
setting the data type of the variables to ``category``). In most cases,
it will be better to use a figure-level function (e.g. :func:`relplot`
or :func:`catplot`) than to use :class:`FacetGrid` directly.
See the :ref:`tutorial <grid_tutorial>` for more information.
Parameters
----------
{data}
row, col, hue : strings
Variables that define subsets of the data, which will be drawn on
separate facets in the grid. See the ``{{var}}_order`` parameters to
control the order of levels of this variable.
{col_wrap}
{share_xy}
{height}
{aspect}
{palette}
{{row,col,hue}}_order : lists
Order for the levels of the faceting variables. By default, this
will be the order that the levels appear in ``data`` or, if the
variables are pandas categoricals, the category order.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
{legend_out}
despine : boolean
Remove the top and right spines from the plots.
{margin_titles}
{{x, y}}lim: tuples
Limits for each of the axes on each facet (only relevant when
share{{x, y}} is True).
subplot_kws : dict
Dictionary of keyword arguments passed to matplotlib subplot(s)
methods.
gridspec_kws : dict
Dictionary of keyword arguments passed to
:class:`matplotlib.gridspec.GridSpec`
(via :meth:`matplotlib.figure.Figure.subplots`).
Ignored if ``col_wrap`` is not ``None``.
See Also
--------
PairGrid : Subplot grid for plotting pairwise relationships
relplot : Combine a relational plot and a :class:`FacetGrid`
displot : Combine a distribution plot and a :class:`FacetGrid`
catplot : Combine a categorical plot and a :class:`FacetGrid`
lmplot : Combine a regression plot and a :class:`FacetGrid`
Examples
--------
.. note::
These examples use seaborn functions to demonstrate some of the
advanced features of the class, but in most cases you will want
to use figue-level functions (e.g. :func:`displot`, :func:`relplot`)
to make the plots shown here.
.. include:: ../docstrings/FacetGrid.rst
""").format(**_facet_docs)
def facet_data(self):
"""Generator for name indices and data subsets for each facet.
Yields
------
(i, j, k), data_ijk : tuple of ints, DataFrame
The ints provide an index into the {row, col, hue}_names attribute,
and the dataframe contains a subset of the full data corresponding
to each facet. The generator yields subsets that correspond with
the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`
is None.
"""
data = self.data
# Construct masks for the row variable
if self.row_names:
row_masks = [data[self._row_var] == n for n in self.row_names]
else:
row_masks = [np.repeat(True, len(self.data))]
# Construct masks for the column variable
if self.col_names:
col_masks = [data[self._col_var] == n for n in self.col_names]
else:
col_masks = [np.repeat(True, len(self.data))]
# Construct masks for the hue variable
if self.hue_names:
hue_masks = [data[self._hue_var] == n for n in self.hue_names]
else:
hue_masks = [np.repeat(True, len(self.data))]
# Here is the main generator loop
for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),
enumerate(col_masks),
enumerate(hue_masks)):
data_ijk = data[row & col & hue & self._not_na]
yield (i, j, k), data_ijk
def map(self, func, *args, **kwargs):
"""Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# How we use the function depends on where it comes from
func_module = str(getattr(func, "__module__", ""))
# Check for categorical plots without order information
if func_module == "seaborn.categorical":
if "order" not in kwargs:
warning = ("Using the {} function without specifying "
"`order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
if len(args) == 3 and "hue_order" not in kwargs:
warning = ("Using the {} function without specifying "
"`hue_order` is likely to produce an incorrect "
"plot.".format(func.__name__))
warnings.warn(warning)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not func_module.startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = utils.to_utf8(self.hue_names[hue_k])
# Get the actual data we are going to plot with
plot_data = data_ijk[list(args)]
if self._dropna:
plot_data = plot_data.dropna()
plot_args = [v for k, v in plot_data.iteritems()]
# Some matplotlib functions don't handle pandas objects correctly
if func_module.startswith("matplotlib"):
plot_args = [v.values for v in plot_args]
# Draw the plot
self._facet_plot(func, ax, plot_args, kwargs)
# Finalize the annotations and layout
self._finalize_grid(args[:2])
return self
def map_dataframe(self, func, *args, **kwargs):
"""Like ``.map`` but passes args as strings and inserts data in kwargs.
This method is suitable for plotting with functions that accept a
long-form DataFrame as a `data` keyword argument and access the
data in that DataFrame using string variable names.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. Unlike
the `map` method, a function used here must "understand" Pandas
objects. It also must plot to the currently active matplotlib Axes
and take a `color` keyword argument. If faceting on the `hue`
dimension, it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : object
Returns self.
"""
# If color was a keyword argument, grab it here
kw_color = kwargs.pop("color", None)
# Iterate over the data subsets
for (row_i, col_j, hue_k), data_ijk in self.facet_data():
# If this subset is null, move on
if not data_ijk.values.size:
continue
# Get the current axis
modify_state = not str(func.__module__).startswith("seaborn")
ax = self.facet_axis(row_i, col_j, modify_state)
# Decide what color to plot with
kwargs["color"] = self._facet_color(hue_k, kw_color)
# Insert the other hue aesthetics if appropriate
for kw, val_list in self.hue_kws.items():
kwargs[kw] = val_list[hue_k]
# Insert a label in the keyword arguments for the legend
if self._hue_var is not None:
kwargs["label"] = self.hue_names[hue_k]
# Stick the facet dataframe into the kwargs
if self._dropna:
data_ijk = data_ijk.dropna()
kwargs["data"] = data_ijk
# Draw the plot
self._facet_plot(func, ax, args, kwargs)
# For axis labels, prefer to use positional args for backcompat
# but also extract the x/y kwargs and use if no corresponding arg
axis_labels = [kwargs.get("x", None), kwargs.get("y", None)]
for i, val in enumerate(args[:2]):
axis_labels[i] = val
self._finalize_grid(axis_labels)
return self
def _facet_color(self, hue_index, kw_color):
color = self._colors[hue_index]
if kw_color is not None:
return kw_color
elif color is not None:
return color
def _facet_plot(self, func, ax, plot_args, plot_kwargs):
# Draw the plot
if str(func.__module__).startswith("seaborn"):
plot_kwargs = plot_kwargs.copy()
semantics = ["x", "y", "hue", "size", "style"]
for key, val in zip(semantics, plot_args):
plot_kwargs[key] = val
plot_args = []
plot_kwargs["ax"] = ax
func(*plot_args, **plot_kwargs)
# Sort out the supporting information
self._update_legend_data(ax)
def _finalize_grid(self, axlabels):
"""Finalize the annotations and layout."""
self.set_axis_labels(*axlabels)
self.tight_layout()
def facet_axis(self, row_i, col_j, modify_state=True):
"""Make the axis identified by these indices active and return it."""
# Calculate the actual indices of the axes to plot on
if self._col_wrap is not None:
ax = self.axes.flat[col_j]
else:
ax = self.axes[row_i, col_j]
# Get a reference to the axes object we want, and make it active
if modify_state:
plt.sca(ax)
return ax
def despine(self, **kwargs):
"""Remove axis spines from the facets."""
utils.despine(self._figure, **kwargs)
return self
def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
self._x_var = x_var
self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)
if y_var is not None:
self._y_var = y_var
self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)
return self
def set_xlabels(self, label=None, clear_inner=True, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = self._x_var
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
if clear_inner:
for ax in self._not_bottom_axes:
ax.set_xlabel("")
return self
def set_ylabels(self, label=None, clear_inner=True, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = self._y_var
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
if clear_inner:
for ax in self._not_left_axes:
ax.set_ylabel("")
return self
def set_xticklabels(self, labels=None, step=None, **kwargs):
"""Set x axis tick labels of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_xticks()
ax.set_xticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_xticklabels()]
if step is not None:
xticks = ax.get_xticks()[::step]
curr_labels = curr_labels[::step]
ax.set_xticks(xticks)
ax.set_xticklabels(curr_labels, **kwargs)
else:
ax.set_xticklabels(labels, **kwargs)
return self
def set_yticklabels(self, labels=None, **kwargs):
"""Set y axis tick labels on the left column of the grid."""
for ax in self.axes.flat:
curr_ticks = ax.get_yticks()
ax.set_yticks(curr_ticks)
if labels is None:
curr_labels = [l.get_text() for l in ax.get_yticklabels()]
ax.set_yticklabels(curr_labels, **kwargs)
else:
ax.set_yticklabels(labels, **kwargs)
return self
def set_titles(self, template=None, row_template=None, col_template=None,
**kwargs):
"""Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for all titles with the formatting keys {col_var} and
{col_name} (if using a `col` faceting variable) and/or {row_var}
and {row_name} (if using a `row` faceting variable).
row_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {row_var} and {row_name} formatting keys.
col_template:
Template for the row variable when titles are drawn on the grid
margins. Must have {col_var} and {col_name} formatting keys.
Returns
-------
self: object
Returns self.
"""
args = dict(row_var=self._row_var, col_var=self._col_var)
kwargs["size"] = kwargs.pop("size", mpl.rcParams["axes.labelsize"])
# Establish default templates
if row_template is None:
row_template = "{row_var} = {row_name}"
if col_template is None:
col_template = "{col_var} = {col_name}"
if template is None:
if self._row_var is None:
template = col_template
elif self._col_var is None:
template = row_template
else:
template = " | ".join([row_template, col_template])
row_template = utils.to_utf8(row_template)
col_template = utils.to_utf8(col_template)
template = utils.to_utf8(template)
if self._margin_titles:
# Remove any existing title texts
for text in self._margin_titles_texts:
text.remove()
self._margin_titles_texts = []
if self.row_names is not None:
# Draw the row titles on the right edge of the grid
for i, row_name in enumerate(self.row_names):
ax = self.axes[i, -1]
args.update(dict(row_name=row_name))
title = row_template.format(**args)
text = ax.annotate(
title, xy=(1.02, .5), xycoords="axes fraction",
rotation=270, ha="left", va="center",
**kwargs
)
self._margin_titles_texts.append(text)
if self.col_names is not None:
# Draw the column titles as normal titles
for j, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = col_template.format(**args)
self.axes[0, j].set_title(title, **kwargs)
return self
# Otherwise title each facet with all the necessary information
if (self._row_var is not None) and (self._col_var is not None):
for i, row_name in enumerate(self.row_names):
for j, col_name in enumerate(self.col_names):
args.update(dict(row_name=row_name, col_name=col_name))
title = template.format(**args)
self.axes[i, j].set_title(title, **kwargs)
elif self.row_names is not None and len(self.row_names):
for i, row_name in enumerate(self.row_names):
args.update(dict(row_name=row_name))
title = template.format(**args)
self.axes[i, 0].set_title(title, **kwargs)
elif self.col_names is not None and len(self.col_names):
for i, col_name in enumerate(self.col_names):
args.update(dict(col_name=col_name))
title = template.format(**args)
# Index the flat array so col_wrap works
self.axes.flat[i].set_title(title, **kwargs)
return self
def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):
"""Add a reference line(s) to each facet.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s). Pass ``color=None`` to
use ``hue`` mapping.
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`FacetGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
self.map(plt.axvline, x=x, **line_kws)
if y is not None:
self.map(plt.axhline, y=y, **line_kws)
# ------ Properties that are part of the public API and documented by Sphinx
@property
def axes(self):
"""An array of the :class:`matplotlib.axes.Axes` objects in the grid."""
return self._axes
@property
def ax(self):
"""The :class:`matplotlib.axes.Axes` when no faceting variables are assigned."""
if self.axes.shape == (1, 1):
return self.axes[0, 0]
else:
err = (
"Use the `.axes` attribute when facet variables are assigned."
)
raise AttributeError(err)
@property
def axes_dict(self):
"""A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.
If only one of ``row`` or ``col`` is assigned, each key is a string
representing a level of that variable. If both facet dimensions are
assigned, each key is a ``({row_level}, {col_level})`` tuple.
"""
return self._axes_dict
# ------ Private properties, that require some computation to get
@property
def _inner_axes(self):
"""Return a flat array of the inner axes."""
if self._col_wrap is None:
return self.axes[:-1, 1:].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i % self._ncol
and i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _left_axes(self):
"""Return a flat array of the left column of axes."""
if self._col_wrap is None:
return self.axes[:, 0].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if not i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_left_axes(self):
"""Return a flat array of axes that aren't on the left column."""
if self._col_wrap is None:
return self.axes[:, 1:].flat
else:
axes = []
for i, ax in enumerate(self.axes):
if i % self._ncol:
axes.append(ax)
return np.array(axes, object).flat
@property
def _bottom_axes(self):
"""Return a flat array of the bottom row of axes."""
if self._col_wrap is None:
return self.axes[-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i >= (self._ncol * (self._nrow - 1))
or i >= (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
@property
def _not_bottom_axes(self):
"""Return a flat array of axes that aren't on the bottom row."""
if self._col_wrap is None:
return self.axes[:-1, :].flat
else:
axes = []
n_empty = self._nrow * self._ncol - self._n_facets
for i, ax in enumerate(self.axes):
append = (
i < (self._ncol * (self._nrow - 1))
and i < (self._ncol * (self._nrow - 1) - n_empty)
)
if append:
axes.append(ax)
return np.array(axes, object).flat
class PairGrid(Grid):
"""Subplot grid for plotting pairwise relationships in a dataset.
This object maps each variable in a dataset onto a column and row in a
grid of multiple axes. Different axes-level plotting functions can be
used to draw bivariate plots in the upper and lower triangles, and the
the marginal distribution of each variable can be shown on the diagonal.
Several different common plots can be generated in a single line using
:func:`pairplot`. Use :class:`PairGrid` when you need more flexibility.
See the :ref:`tutorial <grid_tutorial>` for more information.
"""
@_deprecate_positional_args
def __init__(
self, data, *,
hue=None, hue_order=None, palette=None,
hue_kws=None, vars=None, x_vars=None, y_vars=None,
corner=False, diag_sharey=True, height=2.5, aspect=1,
layout_pad=.5, despine=True, dropna=False, size=None
):
"""Initialize the plot figure and PairGrid object.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name)
Variable in ``data`` to map plot aspects to different colors. This
variable will be excluded from the default x and y variables.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
hue_kws : dictionary of param -> list of values mapping
Other keyword arguments to insert into the plotting call to let
other plot attributes vary across levels of the hue variable (e.g.
the markers in a scatterplot).
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
layout_pad : scalar
Padding between axes; passed to ``fig.tight_layout``.
despine : boolean
Remove the top and right spines from the plots.
dropna : boolean
Drop missing values from the data before plotting.
See Also
--------
pairplot : Easily drawing common uses of :class:`PairGrid`.
FacetGrid : Subplot grid for plotting conditional relationships.
Examples
--------
.. include:: ../docstrings/PairGrid.rst
"""
super(PairGrid, self).__init__()
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(UserWarning(msg))
# Sort out the variables that define the grid
numeric_cols = self._find_numeric_cols(data)
if hue in numeric_cols:
numeric_cols.remove(hue)
if vars is not None:
x_vars = list(vars)
y_vars = list(vars)
if x_vars is None:
x_vars = numeric_cols
if y_vars is None:
y_vars = numeric_cols
if np.isscalar(x_vars):
x_vars = [x_vars]
if np.isscalar(y_vars):
y_vars = [y_vars]
self.x_vars = x_vars = list(x_vars)
self.y_vars = y_vars = list(y_vars)
self.square_grid = self.x_vars == self.y_vars
if not x_vars:
raise ValueError("No variables found for grid columns.")
if not y_vars:
raise ValueError("No variables found for grid rows.")
# Create the figure and the array of subplots
figsize = len(x_vars) * height * aspect, len(y_vars) * height
# Disable autolayout so legend_out works
with mpl.rc_context({"figure.autolayout": False}):
fig = plt.figure(figsize=figsize)
axes = fig.subplots(len(y_vars), len(x_vars),
sharex="col", sharey="row",
squeeze=False)
# Possibly remove upper axes to make a corner grid
# Note: setting up the axes is usually the most time-intensive part
# of using the PairGrid. We are foregoing the speed improvement that
# we would get by just not setting up the hidden axes so that we can
# avoid implementing fig.subplots ourselves. But worth thinking about.
self._corner = corner
if corner:
hide_indices = np.triu_indices_from(axes, 1)
for i, j in zip(*hide_indices):
axes[i, j].remove()
axes[i, j] = None
self._figure = fig
self.axes = axes
self.data = data
# Save what we are going to do with the diagonal
self.diag_sharey = diag_sharey
self.diag_vars = None
self.diag_axes = None
self._dropna = dropna
# Label the axes
self._add_axis_labels()
# Sort out the hue variable
self._hue_var = hue
if hue is None:
self.hue_names = hue_order = ["_nolegend_"]
self.hue_vals = pd.Series(["_nolegend_"] * len(data),
index=data.index)
else:
# We need hue_order and hue_names because the former is used to control
# the order of drawing and the latter is used to control the order of
# the legend. hue_names can become string-typed while hue_order must
# retain the type of the input data. This is messy but results from
# the fact that PairGrid can implement the hue-mapping logic itself
# (and was originally written exclusively that way) but now can delegate
# to the axes-level functions, while always handling legend creation.
# See GH2307
hue_names = hue_order = categorical_order(data[hue], hue_order)
if dropna:
# Filter NA from the list of unique hue names
hue_names = list(filter(pd.notnull, hue_names))
self.hue_names = hue_names
self.hue_vals = data[hue]
# Additional dict of kwarg -> list of values for mapping the hue var
self.hue_kws = hue_kws if hue_kws is not None else {}
self._orig_palette = palette
self._hue_order = hue_order
self.palette = self._get_palette(data, hue, hue_order, palette)
self._legend_data = {}
# Make the plot look nice
for ax in axes[:-1, :].flat:
if ax is None:
continue
for label in ax.get_xticklabels():
label.set_visible(False)
ax.xaxis.offsetText.set_visible(False)
ax.xaxis.label.set_visible(False)
for ax in axes[:, 1:].flat:
if ax is None:
continue
for label in ax.get_yticklabels():
label.set_visible(False)
ax.yaxis.offsetText.set_visible(False)
ax.yaxis.label.set_visible(False)
self._tight_layout_rect = [.01, .01, .99, .99]
self._tight_layout_pad = layout_pad
self._despine = despine
if despine:
utils.despine(fig=fig)
self.tight_layout(pad=layout_pad)
def map(self, func, **kwargs):
"""Plot with the same function in every subplot.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
row_indices, col_indices = np.indices(self.axes.shape)
indices = zip(row_indices.flat, col_indices.flat)
self._map_bivariate(func, indices, **kwargs)
return self
def map_lower(self, func, **kwargs):
"""Plot with a bivariate function on the lower diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
indices = zip(*np.tril_indices_from(self.axes, -1))
self._map_bivariate(func, indices, **kwargs)
return self
def map_upper(self, func, **kwargs):
"""Plot with a bivariate function on the upper diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
indices = zip(*np.triu_indices_from(self.axes, 1))
self._map_bivariate(func, indices, **kwargs)
return self
def map_offdiag(self, func, **kwargs):
"""Plot with a bivariate function on the off-diagonal subplots.
Parameters
----------
func : callable plotting function
Must take x, y arrays as positional arguments and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
if self.square_grid:
self.map_lower(func, **kwargs)
if not self._corner:
self.map_upper(func, **kwargs)
else:
indices = []
for i, (y_var) in enumerate(self.y_vars):
for j, (x_var) in enumerate(self.x_vars):
if x_var != y_var:
indices.append((i, j))
self._map_bivariate(func, indices, **kwargs)
return self
def map_diag(self, func, **kwargs):
"""Plot with a univariate function on each diagonal subplot.
Parameters
----------
func : callable plotting function
Must take an x array as a positional argument and draw onto the
"currently active" matplotlib Axes. Also needs to accept kwargs
called ``color`` and ``label``.
"""
# Add special diagonal axes for the univariate plot
if self.diag_axes is None:
diag_vars = []
diag_axes = []
for i, y_var in enumerate(self.y_vars):
for j, x_var in enumerate(self.x_vars):
if x_var == y_var:
# Make the density axes
diag_vars.append(x_var)
ax = self.axes[i, j]
diag_ax = ax.twinx()
diag_ax.set_axis_off()
diag_axes.append(diag_ax)
# Work around matplotlib bug
# https://github.com/matplotlib/matplotlib/issues/15188
if not plt.rcParams.get("ytick.left", True):
for tick in ax.yaxis.majorTicks:
tick.tick1line.set_visible(False)
# Remove main y axis from density axes in a corner plot
if self._corner:
ax.yaxis.set_visible(False)
if self._despine:
utils.despine(ax=ax, left=True)
# TODO add optional density ticks (on the right)
# when drawing a corner plot?
if self.diag_sharey and diag_axes:
# This may change in future matplotlibs
# See https://github.com/matplotlib/matplotlib/pull/9923
group = diag_axes[0].get_shared_y_axes()
for ax in diag_axes[1:]:
group.join(ax, diag_axes[0])
self.diag_vars = np.array(diag_vars, np.object_)
self.diag_axes = np.array(diag_axes, np.object_)
if "hue" not in signature(func).parameters:
return self._map_diag_iter_hue(func, **kwargs)
# Loop over diagonal variables and axes, making one plot in each
for var, ax in zip(self.diag_vars, self.diag_axes):
plot_kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
plot_kwargs["ax"] = ax
else:
plt.sca(ax)
vector = self.data[var]
if self._hue_var is not None:
hue = self.data[self._hue_var]
else:
hue = None
if self._dropna:
not_na = vector.notna()
if hue is not None:
not_na &= hue.notna()
vector = vector[not_na]
if hue is not None:
hue = hue[not_na]
plot_kwargs.setdefault("hue", hue)
plot_kwargs.setdefault("hue_order", self._hue_order)
plot_kwargs.setdefault("palette", self._orig_palette)
func(x=vector, **plot_kwargs)
ax.legend_ = None
self._add_axis_labels()
return self
def _map_diag_iter_hue(self, func, **kwargs):
"""Put marginal plot on each diagonal axes, iterating over hue."""
# Plot on each of the diagonal axes
fixed_color = kwargs.pop("color", None)
for var, ax in zip(self.diag_vars, self.diag_axes):
hue_grouped = self.data[var].groupby(self.hue_vals)
plot_kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
plot_kwargs["ax"] = ax
else:
plt.sca(ax)
for k, label_k in enumerate(self._hue_order):
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.Series([], dtype=float)
if fixed_color is None:
color = self.palette[k]
else:
color = fixed_color
if self._dropna:
data_k = utils.remove_na(data_k)
if str(func.__module__).startswith("seaborn"):
func(x=data_k, label=label_k, color=color, **plot_kwargs)
else:
func(data_k, label=label_k, color=color, **plot_kwargs)
self._add_axis_labels()
return self
def _map_bivariate(self, func, indices, **kwargs):
"""Draw a bivariate plot on the indicated axes."""
# This is a hack to handle the fact that new distribution plots don't add
# their artists onto the axes. This is probably superior in general, but
# we'll need a better way to handle it in the axisgrid functions.
from .distributions import histplot, kdeplot
if func is histplot or func is kdeplot:
self._extract_legend_handles = True
kws = kwargs.copy() # Use copy as we insert other kwargs
for i, j in indices:
x_var = self.x_vars[j]
y_var = self.y_vars[i]
ax = self.axes[i, j]
if ax is None: # i.e. we are in corner mode
continue
self._plot_bivariate(x_var, y_var, ax, func, **kws)
self._add_axis_labels()
if "hue" in signature(func).parameters:
self.hue_names = list(self._legend_data)
def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):
"""Draw a bivariate plot on the specified axes."""
if "hue" not in signature(func).parameters:
self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)
return
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = ax
else:
plt.sca(ax)
if x_var == y_var:
axes_vars = [x_var]
else:
axes_vars = [x_var, y_var]
if self._hue_var is not None and self._hue_var not in axes_vars:
axes_vars.append(self._hue_var)
data = self.data[axes_vars]
if self._dropna:
data = data.dropna()
x = data[x_var]
y = data[y_var]
if self._hue_var is None:
hue = None
else:
hue = data.get(self._hue_var)
kwargs.setdefault("hue", hue)
kwargs.setdefault("hue_order", self._hue_order)
kwargs.setdefault("palette", self._orig_palette)
func(x=x, y=y, **kwargs)
self._update_legend_data(ax)
def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):
"""Draw a bivariate plot while iterating over hue subsets."""
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = ax
else:
plt.sca(ax)
if x_var == y_var:
axes_vars = [x_var]
else:
axes_vars = [x_var, y_var]
hue_grouped = self.data.groupby(self.hue_vals)
for k, label_k in enumerate(self._hue_order):
kws = kwargs.copy()
# Attempt to get data for this level, allowing for empty
try:
data_k = hue_grouped.get_group(label_k)
except KeyError:
data_k = pd.DataFrame(columns=axes_vars,
dtype=float)
if self._dropna:
data_k = data_k[axes_vars].dropna()
x = data_k[x_var]
y = data_k[y_var]
for kw, val_list in self.hue_kws.items():
kws[kw] = val_list[k]
kws.setdefault("color", self.palette[k])
if self._hue_var is not None:
kws["label"] = label_k
if str(func.__module__).startswith("seaborn"):
func(x=x, y=y, **kws)
else:
func(x, y, **kws)
self._update_legend_data(ax)
def _add_axis_labels(self):
"""Add labels to the left and bottom Axes."""
for ax, label in zip(self.axes[-1, :], self.x_vars):
ax.set_xlabel(label)
for ax, label in zip(self.axes[:, 0], self.y_vars):
ax.set_ylabel(label)
if self._corner:
self.axes[0, 0].set_ylabel("")
def _find_numeric_cols(self, data):
"""Find which variables in a DataFrame are numeric."""
numeric_cols = []
for col in data:
if variable_type(data[col]) == "numeric":
numeric_cols.append(col)
return numeric_cols
class JointGrid(_BaseGrid):
"""Grid for drawing a bivariate plot with marginal univariate plots.
Many plots can be drawn by using the figure-level interface :func:`jointplot`.
Use this class directly when you need more flexibility.
"""
@_deprecate_positional_args
def __init__(
self, *,
x=None, y=None,
data=None,
height=6, ratio=5, space=.2,
dropna=False, xlim=None, ylim=None, size=None, marginal_ticks=False,
hue=None, palette=None, hue_order=None, hue_norm=None,
):
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
# Set up the subplot grid
f = plt.figure(figsize=(height, height))
gs = plt.GridSpec(ratio + 1, ratio + 1)
ax_joint = f.add_subplot(gs[1:, :-1])
ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)
ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)
self._figure = f
self.ax_joint = ax_joint
self.ax_marg_x = ax_marg_x
self.ax_marg_y = ax_marg_y
# Turn off tick visibility for the measure axis on the marginal plots
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
plt.setp(ax_marg_x.get_xticklabels(minor=True), visible=False)
plt.setp(ax_marg_y.get_yticklabels(minor=True), visible=False)
# Turn off the ticks on the density axis for the marginal plots
if not marginal_ticks:
plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)
plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(), visible=False)
plt.setp(ax_marg_y.get_xticklabels(), visible=False)
plt.setp(ax_marg_x.get_yticklabels(minor=True), visible=False)
plt.setp(ax_marg_y.get_xticklabels(minor=True), visible=False)
ax_marg_x.yaxis.grid(False)
ax_marg_y.xaxis.grid(False)
# Process the input variables
p = VectorPlotter(data=data, variables=dict(x=x, y=y, hue=hue))
plot_data = p.plot_data.loc[:, p.plot_data.notna().any()]
# Possibly drop NA
if dropna:
plot_data = plot_data.dropna()
def get_var(var):
vector = plot_data.get(var, None)
if vector is not None:
vector = vector.rename(p.variables.get(var, None))
return vector
self.x = get_var("x")
self.y = get_var("y")
self.hue = get_var("hue")
for axis in "xy":
name = p.variables.get(axis, None)
if name is not None:
getattr(ax_joint, f"set_{axis}label")(name)
if xlim is not None:
ax_joint.set_xlim(xlim)
if ylim is not None:
ax_joint.set_ylim(ylim)
# Store the semantic mapping parameters for axes-level functions
self._hue_params = dict(palette=palette, hue_order=hue_order, hue_norm=hue_norm)
# Make the grid look nice
utils.despine(f)
if not marginal_ticks:
utils.despine(ax=ax_marg_x, left=True)
utils.despine(ax=ax_marg_y, bottom=True)
for axes in [ax_marg_x, ax_marg_y]:
for axis in [axes.xaxis, axes.yaxis]:
axis.label.set_visible(False)
f.tight_layout()
f.subplots_adjust(hspace=space, wspace=space)
def _inject_kwargs(self, func, kws, params):
"""Add params to kws if they are accepted by func."""
func_params = signature(func).parameters
for key, val in params.items():
if key in func_params:
kws.setdefault(key, val)
def plot(self, joint_func, marginal_func, **kwargs):
"""Draw the plot by passing functions for joint and marginal axes.
This method passes the ``kwargs`` dictionary to both functions. If you
need more control, call :meth:`JointGrid.plot_joint` and
:meth:`JointGrid.plot_marginals` directly with specific parameters.
Parameters
----------
joint_func, marginal_func : callables
Functions to draw the bivariate and univariate plots. See methods
referenced above for information about the required characteristics
of these functions.
kwargs
Additional keyword arguments are passed to both functions.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
self.plot_marginals(marginal_func, **kwargs)
self.plot_joint(joint_func, **kwargs)
return self
def plot_joint(self, func, **kwargs):
"""Draw a bivariate plot on the joint axes of the grid.
Parameters
----------
func : plotting callable
If a seaborn function, it should accept ``x`` and ``y``. Otherwise,
it must accept ``x`` and ``y`` vectors of data as the first two
positional arguments, and it must plot on the "current" axes.
If ``hue`` was defined in the class constructor, the function must
accept ``hue`` as a parameter.
kwargs
Keyword argument are passed to the plotting function.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
kwargs = kwargs.copy()
if str(func.__module__).startswith("seaborn"):
kwargs["ax"] = self.ax_joint
else:
plt.sca(self.ax_joint)
if self.hue is not None:
kwargs["hue"] = self.hue
self._inject_kwargs(func, kwargs, self._hue_params)
if str(func.__module__).startswith("seaborn"):
func(x=self.x, y=self.y, **kwargs)
else:
func(self.x, self.y, **kwargs)
return self
def plot_marginals(self, func, **kwargs):
"""Draw univariate plots on each marginal axes.
Parameters
----------
func : plotting callable
If a seaborn function, it should accept ``x`` and ``y`` and plot
when only one of them is defined. Otherwise, it must accept a vector
of data as the first positional argument and determine its orientation
using the ``vertical`` parameter, and it must plot on the "current" axes.
If ``hue`` was defined in the class constructor, it must accept ``hue``
as a parameter.
kwargs
Keyword argument are passed to the plotting function.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
seaborn_func = (
str(func.__module__).startswith("seaborn")
# deprecated distplot has a legacy API, special case it
and not func.__name__ == "distplot"
)
func_params = signature(func).parameters
kwargs = kwargs.copy()
if self.hue is not None:
kwargs["hue"] = self.hue
self._inject_kwargs(func, kwargs, self._hue_params)
if "legend" in func_params:
kwargs.setdefault("legend", False)
if "orientation" in func_params:
# e.g. plt.hist
orient_kw_x = {"orientation": "vertical"}
orient_kw_y = {"orientation": "horizontal"}
elif "vertical" in func_params:
# e.g. sns.distplot (also how did this get backwards?)
orient_kw_x = {"vertical": False}
orient_kw_y = {"vertical": True}
if seaborn_func:
func(x=self.x, ax=self.ax_marg_x, **kwargs)
else:
plt.sca(self.ax_marg_x)
func(self.x, **orient_kw_x, **kwargs)
if seaborn_func:
func(y=self.y, ax=self.ax_marg_y, **kwargs)
else:
plt.sca(self.ax_marg_y)
func(self.y, **orient_kw_y, **kwargs)
self.ax_marg_x.yaxis.get_label().set_visible(False)
self.ax_marg_y.xaxis.get_label().set_visible(False)
return self
def refline(
self, *, x=None, y=None, joint=True, marginal=True,
color='.5', linestyle='--', **line_kws
):
"""Add a reference line(s) to joint and/or marginal axes.
Parameters
----------
x, y : numeric
Value(s) to draw the line(s) at.
joint, marginal : bools
Whether to add the reference line(s) to the joint/marginal axes.
color : :mod:`matplotlib color <matplotlib.colors>`
Specifies the color of the reference line(s).
linestyle : str
Specifies the style of the reference line(s).
line_kws : key, value mappings
Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`
when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``
is not None.
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
line_kws['color'] = color
line_kws['linestyle'] = linestyle
if x is not None:
if joint:
self.ax_joint.axvline(x, **line_kws)
if marginal:
self.ax_marg_x.axvline(x, **line_kws)
if y is not None:
if joint:
self.ax_joint.axhline(y, **line_kws)
if marginal:
self.ax_marg_y.axhline(y, **line_kws)
return self
def set_axis_labels(self, xlabel="", ylabel="", **kwargs):
"""Set axis labels on the bivariate axes.
Parameters
----------
xlabel, ylabel : strings
Label names for the x and y variables.
kwargs : key, value mappings
Other keyword arguments are passed to the following functions:
- :meth:`matplotlib.axes.Axes.set_xlabel`
- :meth:`matplotlib.axes.Axes.set_ylabel`
Returns
-------
:class:`JointGrid` instance
Returns ``self`` for easy method chaining.
"""
self.ax_joint.set_xlabel(xlabel, **kwargs)
self.ax_joint.set_ylabel(ylabel, **kwargs)
return self
JointGrid.__init__.__doc__ = """\
Set up the grid of subplots and store data internally for easy plotting.
Parameters
----------
{params.core.xy}
{params.core.data}
height : number
Size of each side of the figure in inches (it will be square).
ratio : number
Ratio of joint axes height to marginal axes height.
space : number
Space between the joint and marginal axes
dropna : bool
If True, remove missing observations before plotting.
{{x, y}}lim : pairs of numbers
Set axis limits to these values before plotting.
marginal_ticks : bool
If False, suppress ticks on the count/density axis of the marginal plots.
{params.core.hue}
Note: unlike in :class:`FacetGrid` or :class:`PairGrid`, the axes-level
functions must support ``hue`` to use it in :class:`JointGrid`.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
See Also
--------
{seealso.jointplot}
{seealso.pairgrid}
{seealso.pairplot}
Examples
--------
.. include:: ../docstrings/JointGrid.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
@_deprecate_positional_args
def pairplot(
data, *,
hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="auto", markers=None,
height=2.5, aspect=1, corner=False, dropna=False,
plot_kws=None, diag_kws=None, grid_kws=None, size=None,
):
"""Plot pairwise relationships in a dataset.
By default, this function will create a grid of Axes such that each numeric
variable in ``data`` will by shared across the y-axes across a single row and
the x-axes across a single column. The diagonal plots are treated
differently: a univariate distribution plot is drawn to show the marginal
distribution of the data in each column.
It is also possible to show a subset of variables or plot different
variables on the rows and columns.
This is a high-level interface for :class:`PairGrid` that is intended to
make it easy to draw a few common styles. You should use :class:`PairGrid`
directly if you need more flexibility.
Parameters
----------
data : `pandas.DataFrame`
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : name of variable in ``data``
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'kde', 'hist', 'reg'}
Kind of plot to make.
diag_kind : {'auto', 'hist', 'kde', None}
Kind of plot for the diagonal subplots. If 'auto', choose based on
whether or not ``hue`` is used.
markers : single matplotlib marker code or list
Either the marker to use for all scatterplot points or a list of markers
with a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
height : scalar
Height (in inches) of each facet.
aspect : scalar
Aspect * height gives the width (in inches) of each facet.
corner : bool
If True, don't add axes to the upper (off-diagonal) triangle of the
grid, making this a "corner" plot.
dropna : boolean
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts
Dictionaries of keyword arguments. ``plot_kws`` are passed to the
bivariate plotting function, ``diag_kws`` are passed to the univariate
plotting function, and ``grid_kws`` are passed to the :class:`PairGrid`
constructor.
Returns
-------
grid : :class:`PairGrid`
Returns the underlying :class:`PairGrid` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise relationships.
JointGrid : Grid for plotting joint and marginal distributions of two variables.
Examples
--------
.. include:: ../docstrings/pairplot.rst
"""
# Avoid circular import
from .distributions import histplot, kdeplot
# Handle deprecations
if size is not None:
height = size
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
if not isinstance(data, pd.DataFrame):
raise TypeError(
"'data' must be pandas DataFrame object, not: {typefound}".format(
typefound=type(data)))
plot_kws = {} if plot_kws is None else plot_kws.copy()
diag_kws = {} if diag_kws is None else diag_kws.copy()
grid_kws = {} if grid_kws is None else grid_kws.copy()
# Resolve "auto" diag kind
if diag_kind == "auto":
if hue is None:
diag_kind = "kde" if kind == "kde" else "hist"
else:
diag_kind = "hist" if kind == "hist" else "kde"
# Set up the PairGrid
grid_kws.setdefault("diag_sharey", diag_kind == "hist")
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette, corner=corner,
height=height, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if kind == "reg":
# Needed until regplot supports style
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singleton or a list of "
"markers for each level of the hue variable"))
grid.hue_kws = {"marker": markers}
elif kind == "scatter":
if isinstance(markers, str):
plot_kws["marker"] = markers
elif hue is not None:
plot_kws["style"] = data[hue]
plot_kws["markers"] = markers
# Draw the marginal plots on the diagonal
diag_kws = diag_kws.copy()
diag_kws.setdefault("legend", False)
if diag_kind == "hist":
grid.map_diag(histplot, **diag_kws)
elif diag_kind == "kde":
diag_kws.setdefault("fill", True)
diag_kws.setdefault("warn_singular", False)
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
from .relational import scatterplot # Avoid circular import
plotter(scatterplot, **plot_kws)
elif kind == "reg":
from .regression import regplot # Avoid circular import
plotter(regplot, **plot_kws)
elif kind == "kde":
from .distributions import kdeplot # Avoid circular import
plot_kws.setdefault("warn_singular", False)
plotter(kdeplot, **plot_kws)
elif kind == "hist":
from .distributions import histplot # Avoid circular import
plotter(histplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
grid.tight_layout()
return grid
@_deprecate_positional_args
def jointplot(
*,
x=None, y=None,
data=None,
kind="scatter", color=None, height=6, ratio=5, space=.2,
dropna=False, xlim=None, ylim=None, marginal_ticks=False,
joint_kws=None, marginal_kws=None,
hue=None, palette=None, hue_order=None, hue_norm=None,
**kwargs
):
# Avoid circular imports
from .relational import scatterplot
from .regression import regplot, residplot
from .distributions import histplot, kdeplot, _freedman_diaconis_bins
# Handle deprecations
if "size" in kwargs:
height = kwargs.pop("size")
msg = ("The `size` parameter has been renamed to `height`; "
"please update your code.")
warnings.warn(msg, UserWarning)
# Set up empty default kwarg dicts
joint_kws = {} if joint_kws is None else joint_kws.copy()
joint_kws.update(kwargs)
marginal_kws = {} if marginal_kws is None else marginal_kws.copy()
# Handle deprecations of distplot-specific kwargs
distplot_keys = [
"rug", "fit", "hist_kws", "norm_hist" "hist_kws", "rug_kws",
]
unused_keys = []
for key in distplot_keys:
if key in marginal_kws:
unused_keys.append(key)
marginal_kws.pop(key)
if unused_keys and kind != "kde":
msg = (
"The marginal plotting function has changed to `histplot`,"
" which does not accept the following argument(s): {}."
).format(", ".join(unused_keys))
warnings.warn(msg, UserWarning)
# Validate the plot kind
plot_kinds = ["scatter", "hist", "hex", "kde", "reg", "resid"]
_check_argument("kind", plot_kinds, kind)
# Raise early if using `hue` with a kind that does not support it
if hue is not None and kind in ["hex", "reg", "resid"]:
msg = (
f"Use of `hue` with `kind='{kind}'` is not currently supported."
)
raise ValueError(msg)
# Make a colormap based off the plot color
# (Currently used only for kind="hex")
if color is None:
color = "C0"
color_rgb = mpl.colors.colorConverter.to_rgb(color)
colors = [utils.set_hls_values(color_rgb, l=l) # noqa
for l in np.linspace(1, 0, 12)]
cmap = blend_palette(colors, as_cmap=True)
# Matplotlib's hexbin plot is not na-robust
if kind == "hex":
dropna = True
# Initialize the JointGrid object
grid = JointGrid(
data=data, x=x, y=y, hue=hue,
palette=palette, hue_order=hue_order, hue_norm=hue_norm,
dropna=dropna, height=height, ratio=ratio, space=space,
xlim=xlim, ylim=ylim, marginal_ticks=marginal_ticks,
)
if grid.hue is not None:
marginal_kws.setdefault("legend", False)
# Plot the data using the grid
if kind.startswith("scatter"):
joint_kws.setdefault("color", color)
grid.plot_joint(scatterplot, **joint_kws)
if grid.hue is None:
marg_func = histplot
else:
marg_func = kdeplot
marginal_kws.setdefault("warn_singular", False)
marginal_kws.setdefault("fill", True)
marginal_kws.setdefault("color", color)
grid.plot_marginals(marg_func, **marginal_kws)
elif kind.startswith("hist"):
# TODO process pair parameters for bins, etc. and pass
# to both jount and marginal plots
joint_kws.setdefault("color", color)
grid.plot_joint(histplot, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
marg_x_kws = marginal_kws.copy()
marg_y_kws = marginal_kws.copy()
pair_keys = "bins", "binwidth", "binrange"
for key in pair_keys:
if isinstance(joint_kws.get(key), tuple):
x_val, y_val = joint_kws[key]
marg_x_kws.setdefault(key, x_val)
marg_y_kws.setdefault(key, y_val)
histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)
histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)
elif kind.startswith("kde"):
joint_kws.setdefault("color", color)
joint_kws.setdefault("warn_singular", False)
grid.plot_joint(kdeplot, **joint_kws)
marginal_kws.setdefault("color", color)
if "fill" in joint_kws:
marginal_kws.setdefault("fill", joint_kws["fill"])
grid.plot_marginals(kdeplot, **marginal_kws)
elif kind.startswith("hex"):
x_bins = min(_freedman_diaconis_bins(grid.x), 50)
y_bins = min(_freedman_diaconis_bins(grid.y), 50)
gridsize = int(np.mean([x_bins, y_bins]))
joint_kws.setdefault("gridsize", gridsize)
joint_kws.setdefault("cmap", cmap)
grid.plot_joint(plt.hexbin, **joint_kws)
marginal_kws.setdefault("kde", False)
marginal_kws.setdefault("color", color)
grid.plot_marginals(histplot, **marginal_kws)
elif kind.startswith("reg"):
marginal_kws.setdefault("color", color)
marginal_kws.setdefault("kde", True)
grid.plot_marginals(histplot, **marginal_kws)
joint_kws.setdefault("color", color)
grid.plot_joint(regplot, **joint_kws)
elif kind.startswith("resid"):
joint_kws.setdefault("color", color)
grid.plot_joint(residplot, **joint_kws)
x, y = grid.ax_joint.collections[0].get_offsets().T
marginal_kws.setdefault("color", color)
histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)
histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)
return grid
jointplot.__doc__ = """\
Draw a plot of two variables with bivariate and univariate graphs.
This function provides a convenient interface to the :class:`JointGrid`
class, with several canned plot kinds. This is intended to be a fairly
lightweight wrapper; if you need more flexibility, you should use
:class:`JointGrid` directly.
Parameters
----------
{params.core.xy}
{params.core.data}
kind : {{ "scatter" | "kde" | "hist" | "hex" | "reg" | "resid" }}
Kind of plot to draw. See the examples for references to the underlying functions.
{params.core.color}
height : numeric
Size of the figure (it will be square).
ratio : numeric
Ratio of joint axes height to marginal axes height.
space : numeric
Space between the joint and marginal axes
dropna : bool
If True, remove observations that are missing from ``x`` and ``y``.
{{x, y}}lim : pairs of numbers
Axis limits to set before plotting.
marginal_ticks : bool
If False, suppress ticks on the count/density axis of the marginal plots.
{{joint, marginal}}_kws : dicts
Additional keyword arguments for the plot components.
{params.core.hue}
Semantic variable that is mapped to determine the color of plot elements.
{params.core.palette}
{params.core.hue_order}
{params.core.hue_norm}
kwargs
Additional keyword arguments are passed to the function used to
draw the plot on the joint Axes, superseding items in the
``joint_kws`` dictionary.
Returns
-------
{returns.jointgrid}
See Also
--------
{seealso.jointgrid}
{seealso.pairgrid}
{seealso.pairplot}
Examples
--------
.. include:: ../docstrings/jointplot.rst
""".format(
params=_param_docs,
returns=_core_docs["returns"],
seealso=_core_docs["seealso"],
)
|
mwaskom/seaborn
|
seaborn/axisgrid.py
|
Python
|
bsd-3-clause
| 87,264
|
from PIL import Image
from maskgen import exif
import numpy as np
import PIL
"""
Save the image as PDF. If the image has a orientation and 'Image Rotated', rotate the image according to the EXIF.
"""
def transform(img,source,target, **kwargs):
if 'resolution' in kwargs:
res = float(int(kwargs['resolution']))
else:
res = 200.0
im = img.convert('RGB').to_array()
Image.fromarray(im).save(target,format='PDF',resolution=res)
return None,None
def operation():
return {'name':'OutputPDF',
'category':'Output',
'description':'Save an image as .pdf',
'software':'PIL',
'version':PIL.__version__,
'arguments':{
'resolution':{
'type':'int',
'defaultvalue':'100',
'description':'DPI'
}
},
'transitions': [
'image.image'
]
}
def suffix():
return '.pdf'
|
rwgdrummer/maskgen
|
plugins/OutputPDF/__init__.py
|
Python
|
bsd-3-clause
| 1,012
|
# -*- coding: utf-8 -*-
"""
Kay preparse management command.
:Copyright: (c) 2009 Accense Technology, Inc.
Takashi Matsuo <tmatsuo@candit.jp>,
All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from os import listdir, path, mkdir
from werkzeug.utils import import_string
import kay
import kay.app
from kay.utils import local
from kay.utils.jinja2utils.compiler import compile_dir
from kay.management.utils import print_status
IGNORE_FILENAMES = {
'kay': ('debug', 'app_template'),
'app': ('kay'),
}
def find_template_dir(target_path, ignore_filenames):
ret = []
for filename in listdir(target_path):
target_fullpath = path.join(target_path, filename)
if path.isdir(target_fullpath):
if filename.startswith(".") or filename in ignore_filenames:
continue
if filename == "templates":
ret.append(target_fullpath)
else:
ret = ret + find_template_dir(target_fullpath, ignore_filenames)
else:
continue
return ret
def do_preparse_bundle():
"""
Pre compile all the jinja2 templates in Kay itself.
"""
print_status("Compiling bundled templates...")
app = kay.app.get_application()
env = app.app.jinja2_env
for dir in find_template_dir(kay.KAY_DIR, ('debug','app_template')):
dest = prepare_destdir(dir)
print_status("Now compiling templates in %s to %s." % (dir, dest))
compile_dir(env, dir, dest)
print_status("Finished compiling bundled templates...")
def do_preparse_apps():
"""
Pre compile all the jinja2 templates in your applications.
"""
from kay.conf import LazySettings
print_status("Compiling templates...")
application = kay.app.get_application()
applications = [application]
settings_treated = []
for key, settings_name in \
application.app.app_settings.PER_DOMAIN_SETTINGS.iteritems():
if not settings_name in settings_treated:
applications.append(kay.app.get_application(
settings=LazySettings(settings_module=settings_name)))
settings_treated.append(settings_name)
for app in applications:
compile_app_templates(app.app) # pass KayApp instance
for key, submount_app in app.mounts.iteritems():
if isinstance(submount_app, kay.app.KayApp):
compile_app_templates(submount_app)
print_status("Finished compiling templates...")
def prepare_destdir(dir):
def replace_dirname(orig):
if 'templates' in orig:
return orig.replace('templates', 'templates_compiled')
else:
return orig+'_compiled'
dest = replace_dirname(dir)
if path.isdir(dest):
for d, subdirs, files in os.walk(dest):
for f in files:
compiled_filename = "%s/%s" % (d, f)
orig_filename = compiled_filename.replace(dest, dir)
if not path.isfile(orig_filename):
os.unlink(compiled_filename)
print_status("%s does not exist. So, '%s' is removed." % (
orig_filename, compiled_filename))
else:
mkdir(dest)
return dest
def compile_app_templates(app):
env = app.jinja2_env
target_dirs = [dir for dir in app.app_settings.TEMPLATE_DIRS\
if os.path.isdir(dir)]
for app in app.app_settings.INSTALLED_APPS:
if app.startswith("kay."):
continue
mod = import_string(app)
target_dirs.extend(find_template_dir(os.path.dirname(mod.__file__),
('kay')))
for dir in target_dirs:
dest = prepare_destdir(dir)
print_status("Now compiling templates in %s to %s." % (dir, dest))
compile_dir(env, dir, dest)
|
IanLewis/kay
|
kay/management/preparse.py
|
Python
|
bsd-3-clause
| 3,612
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Deglitch utilities
=====================
"""
import numpy as np
import logging
_logger = logging.getLogger("sloth.math.deglitch")
def remove_spikes_medfilt1d(y_spiky, backend="silx", kernel_size=3, threshold=0.1):
"""Remove spikes in a 1D array using medfilt from silx.math
Parameters
----------
y_spiky : array
spiky data
backend : str, optional
library to use as backend
- 'silx' -> from silx.math.medianfilter import medfilt1d
- 'pymca' -> from PyMca5.PyMcaMath.PyMcaSciPy.signal import medfilt1d
- 'pandas' : TODO
kernel_size : int, optional
kernel size where to calculate median, must be odd [3]
threshold : float, optional
relative difference between filtered and spiky data [0.1]
Returns
-------
array
filtered array
"""
ynew = np.zeros_like(y_spiky)
if not (kernel_size % 2):
kernel_size += 1
_logger.warning("'kernel_size' must be odd -> adjusted to %d", kernel_size)
if backend == "silx":
return remove_spikes_silx(y_spiky, kernel_size=kernel_size, threshold=threshold)
elif backend == "pymca":
return remove_spikes_silx(y_spiky, kernel_size=kernel_size, threshold=threshold)
elif backend == "pandas":
return remove_spikes_pandas(y_spiky, window=kernel_size, threshold=threshold)
else:
_logger.warning("backend for medfilt1d not found! -> returning zeros")
return ynew
def remove_spikes_silx(y_spiky, kernel_size=3, threshold=0.1):
"""Remove spikes in a 1D array using medfilt from silx.math
Parameters
----------
y_spiky : array
spiky data
kernel_size : int, optional
kernel size where to calculate median, must be odd [3]
threshold : float, optional
difference between filtered and spiky data relative [0.1]
Returns
-------
array
filtered array
"""
ynew = np.zeros_like(y_spiky)
try:
from silx.math.medianfilter import medfilt1d
except ImportError:
_logger.warning("medfilt1d (from SILX) not found! -> returning zeros")
return ynew
y_filtered = medfilt1d(
y_spiky, kernel_size=kernel_size, conditional=True, mode="nearest", cval=0
)
diff = y_filtered - y_spiky
rel_diff = diff / y_filtered
ynew = np.where(abs(rel_diff) > threshold, y_filtered, y_spiky)
return ynew
def remove_spikes_pymca(y_spiky, kernel_size=9, threshold=0.66):
"""Remove spikes in a 1D array using medfilt from PyMca5.PyMcaMath.PyMcaScipy.signal
Parameters
----------
y_spiky : array
spiky data
kernel_size : int, optional
kernel size where to calculate median, should be odd [9]
threshold : float, optional
difference between filtered and spiky data in sigma units [0.66]
Returns
-------
array
filtered array
"""
ynew = np.zeros_like(y_spiky)
try:
from PyMca5.PyMcaMath.PyMcaSciPy.signal import medfilt1d
except ImportError:
_logger.warning("medfilt1d (from PyMca5) not found! -> returning zeros")
return ynew
y_filtered = medfilt1d(y_spiky, kernel_size)
diff = y_filtered - y_spiky
mean = diff.mean()
sigma = (y_spiky - mean) ** 2
sigma = np.sqrt(sigma.sum() / float(len(sigma)))
ynew = np.where(abs(diff) > threshold * sigma, y_filtered, y_spiky)
return ynew
def remove_spikes_pandas(y, window=3, threshold=3):
"""remove spikes using pandas
Taken from `https://ocefpaf.github.io/python4oceanographers/blog/2015/03/16/outlier_detection/`_
.. note:: this will not work in pandas > 0.17 one could simply do
`df.rolling(3, center=True).median()`; also
df.as_matrix() is deprecated, use df.values instead
Parameters
----------
y : array 1D
window : int (optional)
window in rolling median [3]
threshold : int (optional)
number of sigma difference with original data
Return
------
ynew : array like x/y
"""
ynew = np.zeros_like(y)
try:
import pandas as pd
except ImportError:
_logger.error("pandas not found! -> returning zeros")
return ynew
df = pd.DataFrame(y)
try:
yf = (
pd.rolling_median(df, window=window, center=True)
.fillna(method="bfill")
.fillna(method="ffill")
)
diff = yf.as_matrix() - y
mean = diff.mean()
sigma = (y - mean) ** 2
sigma = np.sqrt(sigma.sum() / float(len(sigma)))
ynew = np.where(abs(diff) > threshold * sigma, yf.as_matrix(), y)
except Exception:
yf = (
df.rolling(window, center=True)
.median()
.fillna(method="bfill")
.fillna(method="ffill")
)
diff = yf.values - y
mean = diff.mean()
sigma = (y - mean) ** 2
sigma = np.sqrt(sigma.sum() / float(len(sigma)))
ynew = np.where(abs(diff) > threshold * sigma, yf.values, y)
# ynew = np.array(yf.values).reshape(len(x))
return ynew
|
maurov/xraysloth
|
sloth/math/deglitch.py
|
Python
|
bsd-3-clause
| 5,178
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['pdb']
__version__ = '0.9.0'
import sys
# backwards compatibility to support `from fairseq.meters import AverageMeter`
from fairseq.logging import meters, metrics, progress_bar # noqa
sys.modules['fairseq.meters'] = meters
sys.modules['fairseq.metrics'] = metrics
sys.modules['fairseq.progress_bar'] = progress_bar
import fairseq.criterions # noqa
import fairseq.models # noqa
import fairseq.modules # noqa
import fairseq.optim # noqa
import fairseq.optim.lr_scheduler # noqa
import fairseq.pdb # noqa
import fairseq.tasks # noqa
import fairseq.benchmark # noqa
import fairseq.model_parallel # noqa
|
hfp/libxsmm
|
samples/deeplearning/sparse_training/fairseq/fairseq/__init__.py
|
Python
|
bsd-3-clause
| 802
|
#from spectrum import datasets
from spectrum.datasets import *
def test_marple_data():
d = marple_data
assert len(d) == 64
def test_timeseries():
data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200)
ts = TimeSeries(data, sampling=1)
#assert ts.N == 1024
#assert ts.sampling == 1024
ts.plot()
def test_data_cosine():
data = data_cosine(N=1024, A=0.1, sampling=1024, freq=200)
def test_datafile():
from spectrum.datasets import dolphin_filename
try:
spectrum_data("testdummy")
assert False
except:
assert True
|
cokelaer/spectrum
|
test/test_datasets.py
|
Python
|
bsd-3-clause
| 589
|
# -*- coding:utf-8 -*-
#!/usr/bin/env python3
"""
"""
from collections import namedtuple
from minghu6.internet.proxy_ip import proxy_ip
from minghu6.text.seq_enh import filter_invalid_char
URL_LIST_FILE_PATH = 'URList-{username:s}.txt'
UrlNameTuple = namedtuple('UrlNameTuple', ['url', 'title'])
|
minghu6/csdn
|
csdn/csdn_offline/csdn_offline_common.py
|
Python
|
bsd-3-clause
| 313
|
"""Test segzify."""
|
ne-sachirou/segzi-tools
|
segzify/tests/__init__.py
|
Python
|
bsd-3-clause
| 20
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2013, LastSeal S.A.
Copyright (c) 2011-2012, Joaquin G. Duo
All rights reserved.
This code is distributed under BSD 3-clause License.
For details check the LICENSE file in the root of the project.
'''
class context_singleton(object):
'''
Singleton pattern decorator.
It provides a singleton for a determined class_ in a determined Context.
So for each Context there will be only once instance of the decorated class.
'''
def __init__(self, class_):
self.class_ = class_
def __call__(self, context=None, *a, **ad):
if context == None:
msg = "You should always provide a context for class: %r" % self.class_.__class__.__name__
raise RuntimeError(msg)
if not context.has_config('singleton', self.class_):
context.set_config('singleton', self.class_(context=context, *a, **ad), self.class_)
return context.get_config('singleton', self.class_)
def smokeTestModule():
from simplerpc.common.context.base import ContextBase
class Context(ContextBase):
def _loadInitConfig(self):
pass
ctx = Context('smoke test')
@context_singleton
class Example(object):
def __init__(self, context):
pass
assert Example(ctx) == Example(ctx)
if __name__ == '__main__':
smokeTestModule()
|
joaduo/python-simplerpc
|
simplerpc/common/abstract/decorators/context_singleton.py
|
Python
|
bsd-3-clause
| 1,380
|
from django.db.models import Prefetch, prefetch_related_objects
from django.test import TestCase
from .models import Author, Book, Reader
class PrefetchRelatedObjectsTests(TestCase):
"""
Since prefetch_related_objects() is just the inner part of
prefetch_related(), only do basic tests to ensure its API hasn't changed.
"""
@classmethod
def setUpTestData(cls):
cls.book1 = Book.objects.create(title='Poems')
cls.book2 = Book.objects.create(title='Jane Eyre')
cls.book3 = Book.objects.create(title='Wuthering Heights')
cls.book4 = Book.objects.create(title='Sense and Sensibility')
cls.author1 = Author.objects.create(name='Charlotte', first_book=cls.book1)
cls.author2 = Author.objects.create(name='Anne', first_book=cls.book1)
cls.author3 = Author.objects.create(name='Emily', first_book=cls.book1)
cls.author4 = Author.objects.create(name='Jane', first_book=cls.book4)
cls.book1.authors.add(cls.author1, cls.author2, cls.author3)
cls.book2.authors.add(cls.author1)
cls.book3.authors.add(cls.author3)
cls.book4.authors.add(cls.author4)
cls.reader1 = Reader.objects.create(name='Amy')
cls.reader2 = Reader.objects.create(name='Belinda')
cls.reader1.books_read.add(cls.book1, cls.book4)
cls.reader2.books_read.add(cls.book2, cls.book4)
def test_unknown(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertRaises(AttributeError):
prefetch_related_objects([book1], 'unknown_attribute')
def test_m2m_forward(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], 'authors')
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2, self.author3})
def test_m2m_reverse(self):
author1 = Author.objects.get(id=self.author1.id)
with self.assertNumQueries(1):
prefetch_related_objects([author1], 'books')
with self.assertNumQueries(0):
self.assertEqual(set(author1.books.all()), {self.book1, self.book2})
def test_foreignkey_forward(self):
authors = list(Author.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(authors, 'first_book')
with self.assertNumQueries(0):
[author.first_book for author in authors]
def test_foreignkey_reverse(self):
books = list(Book.objects.all())
with self.assertNumQueries(1):
prefetch_related_objects(books, 'first_time_authors')
with self.assertNumQueries(0):
[list(book.first_time_authors.all()) for book in books]
def test_m2m_then_m2m(self):
"""
We can follow a m2m and another m2m.
"""
authors = list(Author.objects.all())
with self.assertNumQueries(2):
prefetch_related_objects(authors, 'books__read_by')
with self.assertNumQueries(0):
self.assertEqual(
[
[[str(r) for r in b.read_by.all()] for b in a.books.all()]
for a in authors
],
[
[['Amy'], ['Belinda']], # Charlotte - Poems, Jane Eyre
[['Amy']], # Anne - Poems
[['Amy'], []], # Emily - Poems, Wuthering Heights
[['Amy', 'Belinda']], # Jane - Sense and Sense
]
)
def test_prefetch_object(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch('authors'))
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2, self.author3})
def test_prefetch_object_to_attr(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects([book1], Prefetch('authors', to_attr='the_authors'))
with self.assertNumQueries(0):
self.assertEqual(set(book1.the_authors), {self.author1, self.author2, self.author3})
def test_prefetch_queryset(self):
book1 = Book.objects.get(id=self.book1.id)
with self.assertNumQueries(1):
prefetch_related_objects(
[book1],
Prefetch('authors', queryset=Author.objects.filter(id__in=[self.author1.id, self.author2.id]))
)
with self.assertNumQueries(0):
self.assertEqual(set(book1.authors.all()), {self.author1, self.author2})
|
yephper/django
|
tests/prefetch_related/test_prefetch_related_objects.py
|
Python
|
bsd-3-clause
| 4,853
|
# Major, Minor
VERSION = (1, 4)
|
duointeractive/python-bluefin
|
bluefin/__init__.py
|
Python
|
bsd-3-clause
| 31
|
from zeit.cms.i18n import MessageFactory as _
from zope.cachedescriptors.property import Lazy as cachedproperty
import os.path
import zeit.cms.browser.view
import zeit.cms.content.interfaces
import zeit.cms.interfaces
import zeit.content.image.interfaces
import zeit.content.video.interfaces
import zeit.edit.browser.form
import zeit.edit.browser.landing
import zeit.edit.browser.view
import zeit.newsletter.interfaces
import zope.formlib.form
class LandingZoneBase(zeit.edit.browser.landing.LandingZone):
uniqueId = zeit.edit.browser.view.Form('uniqueId')
block_type = 'teaser'
def initialize_block(self):
content = zeit.cms.interfaces.ICMSContent(self.uniqueId)
self.block.reference = content
class GroupLandingZone(LandingZoneBase):
"""Handler to drop objects to the body's landing zone."""
order = 0
class TeaserLandingZone(LandingZoneBase):
"""Handler to drop objects after other objects."""
order = 'after-context'
class Teaser(zeit.cms.browser.view.Base):
@cachedproperty
def metadata(self):
return zeit.cms.content.interfaces.ICommonMetadata(
self.context.reference, None)
@cachedproperty
def image(self):
# XXX copy&paste&tweak of zeit.content.cp.browser.blocks.teaser.Display
content = self.context.reference
if content is None:
return
if zeit.content.video.interfaces.IVideoContent.providedBy(content):
return content.thumbnail
images = zeit.content.image.interfaces.IImages(content, None)
if images is None:
preview = zope.component.queryMultiAdapter(
(content, self.request), name='preview')
if preview:
return self.url(preview)
return
if not images.image:
return
group = images.image
for name in group:
basename, ext = os.path.splitext(name)
if basename.endswith('148x84'):
image = group[name]
return self.url(image, '@@raw')
class Advertisement(zeit.cms.browser.view.Base):
@cachedproperty
def image(self):
if not self.context.image:
return
return self.url(self.context.image, '@@raw')
class GroupTitle(zeit.edit.browser.form.InlineForm):
legend = None
prefix = 'group'
undo_description = _('edit group title')
form_fields = zope.formlib.form.FormFields(
zeit.newsletter.interfaces.IGroup).select('title')
class Empty(object):
def render(self):
return u''
|
ZeitOnline/zeit.newsletter
|
src/zeit/newsletter/browser/edit.py
|
Python
|
bsd-3-clause
| 2,579
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
# Note: this module is tested by a unit test config_validation_test.py,
# rather than recipe simulation tests.
_BISECT_CONFIG_SCHEMA = {
'command': {'type': 'string', 'required': True},
'good_revision': {'type': 'revision', 'required': True},
'bad_revision': {'type': 'revision', 'required': True},
'bisect_bot': {'type': 'string'},
'metric': {'type': 'string'},
'bug_id': {'type': 'integer'},
'repeat_count': {'type': 'integer'},
'max_time_minutes': {'type': 'integer'},
'bisect_mode': {'type': 'string',
'choices': ['mean', 'return_code', 'std_dev']},
'gs_bucket': {'type': 'string'},
'builder_host': {'type': 'string'},
'builder_port': {'type': 'integer'},
'test_type': {'type': 'string'},
'improvement_direction': {'type': 'integer'},
'recipe_tester_name': {'type': 'string'},
'try_job_id': {'type': 'integer'},
}
class ValidationFail(Exception):
"""An exception class that represents a failure to validate."""
def validate_bisect_config(config, schema=None):
"""Checks the correctness of the given bisect job config."""
schema = _BISECT_CONFIG_SCHEMA if schema is None else schema
for key in set(schema):
validate_key(config, schema, key)
if 'good_revision' in schema and 'bad_revision' in schema:
_validate_revisions(config.get('good_revision'), config.get('bad_revision'))
if 'bisect_mode' in schema and 'metric' in schema:
_validate_metric(config.get('bisect_mode'), config.get('metric'))
def validate_key(config, schema, key): # pragma: no cover
"""Checks the correctness of the given field in a config."""
if schema[key].get('required') and config.get(key) is None:
raise ValidationFail('Required key "%s" missing.' % key)
if config.get(key) is None:
return # Optional field.
value = config[key]
field_type = schema[key].get('type')
if field_type == 'string':
_validate_string(value, key)
elif field_type == 'integer':
_validate_integer(value, key)
elif field_type == 'revision':
_validate_revision(value, key)
elif field_type == 'boolean':
_validate_boolean(value, key)
if 'choices' in schema[key] and value not in schema[key]['choices']:
_fail(value, key)
def _fail(value, key):
raise ValidationFail('Invalid value %r for "%s".' % (value, key))
def _validate_string(value, key): # pragma: no cover
if not isinstance(value, basestring):
_fail(value, key)
def _validate_revision(value, key): # pragma: no cover
s = str(value)
if not (s.isdigit() or re.match('^[0-9A-Fa-f]{40}$', s)):
_fail(value, key)
def _validate_integer(value, key): # pragma: no cover
try:
int(value)
except ValueError:
_fail(value, key)
def _validate_boolean(value, key): # pragma: no cover
if value not in (True, False):
_fail(value, key)
def _validate_revisions(good_revision, bad_revision): # pragma: no cover
try:
earlier = int(good_revision)
later = int(bad_revision)
except ValueError:
return # The revisions could be sha1 hashes.
if earlier >= later:
raise ValidationFail('Order of good_revision (%d) and bad_revision(%d) '
'is reversed.' % (earlier, later))
def _validate_metric(bisect_mode, metric): # pragma: no cover
if bisect_mode not in ('mean', 'std_dev'):
return
if not (isinstance(metric, basestring) and metric.count('/') == 1):
raise ValidationFail('Invalid value for "metric": %s' % metric)
|
eunchong/build
|
scripts/slave/recipe_modules/auto_bisect/config_validation.py
|
Python
|
bsd-3-clause
| 3,647
|
def print_from_queue(q):
"""
prints values read from queue q to
standard out.
"""
while True:
v = q.get()
if v is None:
# exit loop
return
else:
print (str(v))
class queue_to_file(object):
"""
self.actuate(a) puts values from a queue q
into the file called self.filename
"""
def __init__(self, filename, timeout=0):
self.filename = filename
self.timeout = timeout
def actuate(self, q):
with open(self.filename, 'w') as the_file:
while True:
try:
v = q.get(timeout=self.timeout)
except:
# No more input for this actuator
return
if v is None:
# exit loop
return
else:
the_file.write(str(v) + '\n')
|
AssembleSoftware/IoTPy
|
IoTPy/agent_types/actuators_simple.py
|
Python
|
bsd-3-clause
| 930
|
from __future__ import absolute_import
from exam import fixture
from six.moves.urllib.parse import parse_qs, urlparse
from sentry.models import ApiApplication, ApiAuthorization, ApiGrant, ApiToken
from sentry.testutils import TestCase
class OAuthAuthorizeCodeTest(TestCase):
@fixture
def path(self):
return "/oauth/authorize/"
def setUp(self):
super(OAuthAuthorizeCodeTest, self).setUp()
self.application = ApiApplication.objects.create(
owner=self.user, redirect_uris="https://example.com"
)
def test_missing_response_type(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?redirect_uri={}&client_id={}".format(
self.path, "https://example.com", self.application.client_id
)
)
assert resp.status_code == 302
assert resp["Location"] == "https://example.com?error=unsupported_response_type"
def test_invalid_response_type(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=foobar&redirect_uri={}&client_id={}".format(
self.path, "https://example.com", self.application.client_id
)
)
assert resp.status_code == 302
assert resp["Location"] == "https://example.com?error=unsupported_response_type"
def test_missing_client_id(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=code&redirect_uri={}".format(self.path, "https://example.com")
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-error.html")
assert resp.context["error"] == "Missing or invalid <em>client_id</em> parameter."
def test_invalid_scope(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=code&client_id={}&scope=foo".format(
self.path, self.application.client_id
)
)
assert resp.status_code == 302
assert resp["Location"] == "https://example.com?error=invalid_scope"
def test_invalid_redirect_uri(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=code&redirect_uri=https://google.com&client_id={}".format(
self.path, self.application.client_id
)
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-error.html")
assert resp.context["error"] == "Missing or invalid <em>redirect_uri</em> parameter."
def test_minimal_params_approve_flow(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=code&client_id={}".format(self.path, self.application.client_id)
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(self.path, {"op": "approve"})
grant = ApiGrant.objects.get(user=self.user)
assert grant.redirect_uri == self.application.get_default_redirect_uri()
assert grant.application == self.application
assert not grant.get_scopes()
assert resp.status_code == 302
assert resp["Location"] == u"https://example.com?code={}".format(grant.code)
authorization = ApiAuthorization.objects.get(user=self.user, application=self.application)
assert authorization.get_scopes() == grant.get_scopes()
def test_minimal_params_deny_flow(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=code&client_id={}".format(self.path, self.application.client_id)
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(self.path, {"op": "deny"})
assert resp.status_code == 302
assert resp["Location"] == "https://example.com?error=access_denied"
assert not ApiGrant.objects.filter(user=self.user).exists()
assert not ApiToken.objects.filter(user=self.user).exists()
def test_rich_params(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=code&client_id={}&scope=org%3Aread&state=foo".format(
self.path, self.application.client_id
)
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(self.path, {"op": "approve"})
grant = ApiGrant.objects.get(user=self.user)
assert grant.redirect_uri == self.application.get_default_redirect_uri()
assert grant.application == self.application
assert grant.get_scopes() == ["org:read"]
assert resp.status_code == 302
# XXX: Compare parsed query strings to avoid ordering differences
# between py2/3
assert parse_qs(urlparse(resp["Location"]).query) == parse_qs(
u"state=foo&code={}".format(grant.code)
)
assert not ApiToken.objects.filter(user=self.user).exists()
def test_approve_flow_bypass_prompt(self):
self.login_as(self.user)
ApiAuthorization.objects.create(user=self.user, application=self.application)
resp = self.client.get(
u"{}?response_type=code&client_id={}".format(self.path, self.application.client_id)
)
grant = ApiGrant.objects.get(user=self.user)
assert grant.redirect_uri == self.application.get_default_redirect_uri()
assert grant.application == self.application
assert not grant.get_scopes()
assert resp.status_code == 302
assert resp["Location"] == u"https://example.com?code={}".format(grant.code)
def test_approve_flow_force_prompt(self):
self.login_as(self.user)
ApiAuthorization.objects.create(user=self.user, application=self.application)
resp = self.client.get(
u"{}?response_type=code&client_id={}&force_prompt=1".format(
self.path, self.application.client_id
)
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
def test_approve_flow_requires_prompt_new_scope(self):
self.login_as(self.user)
authorization = ApiAuthorization.objects.create(
user=self.user, application=self.application, scope_list=["org:write"]
)
resp = self.client.get(
u"{}?response_type=code&client_id={}&scope=org:read".format(
self.path, self.application.client_id
)
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(self.path, {"op": "approve"})
authorization = ApiAuthorization.objects.get(id=authorization.id)
assert sorted(authorization.get_scopes()) == ["org:read", "org:write"]
def test_approve_flow_non_scope_set(self):
self.login_as(self.user)
ApiAuthorization.objects.create(user=self.user, application=self.application)
resp = self.client.get(
u"{}?response_type=code&client_id={}&scope=member:read member:admin".format(
self.path, self.application.client_id
)
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
assert resp.context["scopes"] == ["member:read", "member:admin"]
assert resp.context["permissions"] == [
"Read, write, and admin access to organization members."
]
def test_unauthenticated_basic_auth(self):
full_path = u"{}?response_type=code&client_id={}".format(
self.path, self.application.client_id
)
resp = self.client.get(full_path)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/login.html")
assert resp.context["banner"] == u"Connect Sentry to {}".format(self.application.name)
resp = self.client.post(
full_path, {"username": self.user.username, "password": "admin", "op": "login"}
)
self.assertRedirects(resp, full_path)
resp = self.client.get(full_path)
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(full_path, {"op": "approve"})
grant = ApiGrant.objects.get(user=self.user)
assert grant.redirect_uri == self.application.get_default_redirect_uri()
assert grant.application == self.application
assert not grant.get_scopes()
assert resp.status_code == 302
assert resp["Location"] == u"https://example.com?code={}".format(grant.code)
authorization = ApiAuthorization.objects.get(user=self.user, application=self.application)
assert authorization.get_scopes() == grant.get_scopes()
class OAuthAuthorizeTokenTest(TestCase):
@fixture
def path(self):
return "/oauth/authorize/"
def setUp(self):
super(OAuthAuthorizeTokenTest, self).setUp()
self.application = ApiApplication.objects.create(
owner=self.user, redirect_uris="https://example.com"
)
def test_missing_response_type(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?redirect_uri={}&client_id={}".format(
self.path, "https://example.com", self.application.client_id
)
)
assert resp.status_code == 302
assert resp["Location"] == "https://example.com?error=unsupported_response_type"
def test_invalid_response_type(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=foobar&redirect_uri={}&client_id={}".format(
self.path, "https://example.com", self.application.client_id
)
)
assert resp.status_code == 302
assert resp["Location"] == "https://example.com?error=unsupported_response_type"
def test_missing_client_id(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=token&redirect_uri={}".format(self.path, "https://example.com")
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-error.html")
assert resp.context["error"] == "Missing or invalid <em>client_id</em> parameter."
def test_invalid_scope(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=token&client_id={}&scope=foo".format(
self.path, self.application.client_id
)
)
assert resp.status_code == 302
assert resp["Location"] == "https://example.com#error=invalid_scope"
def test_minimal_params_approve_flow(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=token&client_id={}".format(self.path, self.application.client_id)
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(self.path, {"op": "approve"})
assert not ApiGrant.objects.filter(user=self.user).exists()
token = ApiToken.objects.get(user=self.user)
assert token.application == self.application
assert not token.get_scopes()
assert not token.refresh_token
assert resp.status_code == 302
location, fragment = resp["Location"].split("#", 1)
assert location == "https://example.com"
fragment = parse_qs(fragment)
assert fragment["access_token"] == [token.token]
assert fragment["token_type"] == ["bearer"]
assert "refresh_token" not in fragment
assert fragment["expires_in"]
assert fragment["token_type"] == ["bearer"]
def test_minimal_params_code_deny_flow(self):
self.login_as(self.user)
resp = self.client.get(
u"{}?response_type=token&client_id={}".format(self.path, self.application.client_id)
)
assert resp.status_code == 200
self.assertTemplateUsed("sentry/oauth-authorize.html")
assert resp.context["application"] == self.application
resp = self.client.post(self.path, {"op": "deny"})
assert resp.status_code == 302
location, fragment = resp["Location"].split("#", 1)
assert location == "https://example.com"
fragment = parse_qs(fragment)
assert fragment == {"error": ["access_denied"]}
assert not ApiToken.objects.filter(user=self.user).exists()
|
beeftornado/sentry
|
tests/sentry/web/frontend/test_oauth_authorize.py
|
Python
|
bsd-3-clause
| 13,174
|
# -*- coding: utf-8 -*-
import os
import tempfile
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.test.client import RequestFactory
from mock import patch
from olympia import amo, core
from olympia.addons import forms
from olympia.addons.models import Addon, Category
from olympia.amo.tests import TestCase, addon_factory, req_factory_factory
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.utils import rm_local_tmp_dir
from olympia.tags.models import AddonTag, Tag
from olympia.users.models import UserProfile
class TestAddonFormSupport(TestCase):
def test_bogus_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'javascript://something.com'}, request=None)
assert not form.is_valid()
assert form.errors['support_url'][0][1] == u'Enter a valid URL.'
def test_ftp_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'ftp://foo.com'}, request=None)
assert not form.is_valid()
assert form.errors['support_url'][0][1] == u'Enter a valid URL.'
def test_http_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'http://foo.com'}, request=None)
assert form.is_valid()
class FormsTest(TestCase):
fixtures = ('base/addon_3615', 'base/addon_3615_categories',
'addons/denied')
def setUp(self):
super(FormsTest, self).setUp()
self.existing_name = 'Delicious Bookmarks'
self.non_existing_name = 'Does Not Exist'
self.error_msg = 'This name is already in use. Please choose another.'
self.request = req_factory_factory('/')
def test_locales(self):
form = forms.AddonFormDetails(request=self.request)
assert form.fields['default_locale'].choices[0][0] == 'af'
def test_slug_deny(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic({'slug': 'submit'}, request=self.request,
instance=delicious)
assert not form.is_valid()
assert form.errors['slug'] == (
[u'The slug cannot be "submit". Please choose another.'])
def test_name_trademark_mozilla(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious Mozilla', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert not form.is_valid()
assert dict(form.errors['name'])['en-us'].startswith(
u'Add-on names cannot contain the Mozilla or Firefox trademarks.')
def test_name_trademark_firefox(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious Firefox', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert not form.is_valid()
assert dict(form.errors['name'])['en-us'].startswith(
u'Add-on names cannot contain the Mozilla or Firefox trademarks.')
def test_name_trademark_allowed_for_prefix(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious for Mozilla', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert form.is_valid()
def test_name_no_trademark(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious Dumdidum', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert form.is_valid()
def test_bogus_homepage(self):
form = forms.AddonFormDetails(
{'homepage': 'javascript://something.com'}, request=self.request)
assert not form.is_valid()
assert form.errors['homepage'][0][1] == u'Enter a valid URL.'
def test_ftp_homepage(self):
form = forms.AddonFormDetails(
{'homepage': 'ftp://foo.com'}, request=self.request)
assert not form.is_valid()
assert form.errors['homepage'][0][1] == u'Enter a valid URL.'
def test_homepage_is_not_required(self):
delicious = Addon.objects.get()
form = forms.AddonFormDetails(
{'default_locale': 'en-US'},
request=self.request, instance=delicious)
assert form.is_valid()
def test_slug_isdigit(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic({'slug': '123'}, request=self.request,
instance=delicious)
assert not form.is_valid()
assert form.errors['slug'] == (
[u'The slug cannot be "123". Please choose another.'])
class TestTagsForm(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestTagsForm, self).setUp()
self.addon = Addon.objects.get(pk=3615)
category = Category.objects.get(pk=22)
category.db_name = 'test'
category.save()
self.data = {
'summary': str(self.addon.summary),
'name': str(self.addon.name),
'slug': self.addon.slug,
}
self.user = self.addon.authors.all()[0]
core.set_user(self.user)
self.request = req_factory_factory('/')
def add_tags(self, tags):
data = self.data.copy()
data.update({'tags': tags})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.is_valid()
form.save(self.addon)
return form
def get_tag_text(self):
return [t.tag_text for t in self.addon.tags.all()]
def test_tags(self):
self.add_tags('foo, bar')
assert self.get_tag_text() == ['bar', 'foo']
def test_tags_xss(self):
self.add_tags('<script>alert("foo")</script>, bar')
assert self.get_tag_text() == ['bar', 'scriptalertfooscript']
def test_tags_case_spaces(self):
self.add_tags('foo, bar')
self.add_tags('foo, bar , Bar, BAR, b a r ')
assert self.get_tag_text() == ['b a r', 'bar', 'foo']
def test_tags_spaces(self):
self.add_tags('foo, bar beer')
assert self.get_tag_text() == ['bar beer', 'foo']
def test_tags_unicode(self):
self.add_tags(u'Österreich')
assert self.get_tag_text() == [u'Österreich'.lower()]
def add_restricted(self, *args):
if not args:
args = ['i_am_a_restricted_tag']
for arg in args:
tag = Tag.objects.create(tag_text=arg, restricted=True)
AddonTag.objects.create(tag=tag, addon=self.addon)
def test_tags_restricted(self):
self.add_restricted()
self.add_tags('foo, bar')
form = forms.AddonFormBasic(data=self.data, request=self.request,
instance=self.addon)
assert form.fields['tags'].initial == 'bar, foo'
assert self.get_tag_text() == ['bar', 'foo', 'i_am_a_restricted_tag']
self.add_tags('')
assert self.get_tag_text() == ['i_am_a_restricted_tag']
def test_tags_error(self):
self.add_restricted('i_am_a_restricted_tag', 'sdk')
data = self.data.copy()
data.update({'tags': 'i_am_a_restricted_tag'})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.errors['tags'][0] == (
'"i_am_a_restricted_tag" is a reserved tag and cannot be used.')
data.update({'tags': 'i_am_a_restricted_tag, sdk'})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.errors['tags'][0] == (
'"i_am_a_restricted_tag", "sdk" are reserved tags and'
' cannot be used.')
@patch('olympia.access.acl.action_allowed')
def test_tags_admin_restricted(self, action_allowed):
action_allowed.return_value = True
self.add_restricted('i_am_a_restricted_tag')
self.add_tags('foo, bar')
assert self.get_tag_text() == ['bar', 'foo']
self.add_tags('foo, bar, i_am_a_restricted_tag')
assert self.get_tag_text() == ['bar', 'foo', 'i_am_a_restricted_tag']
form = forms.AddonFormBasic(data=self.data, request=self.request,
instance=self.addon)
assert form.fields['tags'].initial == 'bar, foo, i_am_a_restricted_tag'
@patch('olympia.access.acl.action_allowed')
def test_tags_admin_restricted_count(self, action_allowed):
action_allowed.return_value = True
self.add_restricted()
self.add_tags('i_am_a_restricted_tag, %s' % (', '.join('tag-test-%s' %
i for i in range(0, 20))))
def test_tags_restricted_count(self):
self.add_restricted()
self.add_tags(', '.join('tag-test-%s' % i for i in range(0, 20)))
def test_tags_slugified_count(self):
self.add_tags(', '.join('tag-test' for i in range(0, 21)))
assert self.get_tag_text() == ['tag-test']
def test_tags_limit(self):
self.add_tags(' %s' % ('t' * 128))
def test_tags_long(self):
tag = ' -%s' % ('t' * 128)
data = self.data.copy()
data.update({"tags": tag})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert not form.is_valid()
assert form.errors['tags'] == [
'All tags must be 128 characters or less after invalid characters'
' are removed.']
class TestIconForm(TestCase):
fixtures = ['base/addon_3615']
# TODO: AddonFormMedia save() method could do with cleaning up
# so this isn't necessary
def setUp(self):
super(TestIconForm, self).setUp()
self.temp_dir = tempfile.mkdtemp(dir=settings.TMP_PATH)
self.addon = Addon.objects.get(pk=3615)
class DummyRequest:
FILES = None
self.request = DummyRequest()
self.icon_path = os.path.join(settings.TMP_PATH, 'icon')
if not os.path.exists(self.icon_path):
os.makedirs(self.icon_path)
def tearDown(self):
rm_local_tmp_dir(self.temp_dir)
super(TestIconForm, self).tearDown()
def get_icon_paths(self):
path = os.path.join(self.addon.get_icon_dir(), str(self.addon.id))
return ['%s-%s.png' % (path, size) for size in amo.ADDON_ICON_SIZES]
@patch('olympia.addons.models.Addon.get_icon_dir')
def testIconUpload(self, get_icon_dir):
# TODO(gkoberger): clarify this please.
# We no longer use AddonFormMedia to upload icons, so
# skipping until I can ask andym what the point of this
# test is. Additionally, it's called "TestIconRemoval",
# but it doesn't seem to remove icons.
return
get_icon_dir.return_value = self.temp_dir
for path in self.get_icon_paths():
assert not os.path.exists(path)
img = get_image_path('non-animated.png')
data = {'icon_upload': img, 'icon_type': 'text/png'}
self.request.FILES = {'icon_upload': open(img)}
form = forms.AddonFormMedia(data=data, request=self.request,
instance=self.addon)
assert form.is_valid()
form.save(self.addon)
for path in self.get_icon_paths():
assert os.path.exists(path)
@patch('olympia.amo.models.ModelBase.update')
def test_icon_modified(self, update_mock):
name = 'transparent.png'
form = forms.AddonFormMedia({'icon_upload_hash': name},
request=self.request,
instance=self.addon)
dest = os.path.join(self.icon_path, name)
with storage.open(dest, 'w') as f:
shutil.copyfileobj(open(get_image_path(name)), f)
assert form.is_valid()
form.save(addon=self.addon)
assert update_mock.called
class TestCategoryForm(TestCase):
def test_no_possible_categories(self):
Category.objects.create(type=amo.ADDON_SEARCH,
application=amo.FIREFOX.id)
addon = addon_factory(type=amo.ADDON_SEARCH)
request = req_factory_factory('/')
form = forms.CategoryFormSet(addon=addon, request=request)
apps = [f.app for f in form.forms]
assert apps == [amo.FIREFOX]
class TestThemeForm(TestCase):
# Don't save image, we use a fake one.
@patch('olympia.addons.forms.save_theme')
def test_long_author_or_display_username(self, mock_save_theme):
# Bug 1181751.
user = UserProfile.objects.create(email='foo@bar.com',
username='a' * 255,
display_name='b' * 255)
request = RequestFactory()
request.user = user
cat = Category.objects.create(type=amo.ADDON_PERSONA)
form = forms.ThemeForm({
'name': 'my theme',
'slug': 'my-theme',
'category': cat.pk,
'header': 'some_file.png',
'agreed': True,
'header_hash': 'hash',
'license': 1}, request=request)
assert form.is_valid()
# Make sure there's no database issue, like too long data for the
# author or display_username fields.
form.save()
|
lavish205/olympia
|
src/olympia/addons/tests/test_forms.py
|
Python
|
bsd-3-clause
| 13,654
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras.optimizers import SGD
from keras.optimizers import Adam
from keras.optimizers import adadelta
from keras.optimizers import rmsprop
from keras.layers import Layer
from keras import backend as K
K.set_image_dim_ordering('tf')
import socket
import os
# -------------------------------------------------
# Background config:
hostname = socket.gethostname()
if hostname == 'baymax':
path_var = 'baymax/'
elif hostname == 'walle':
path_var = 'walle/'
elif hostname == 'bender':
path_var = 'bender/'
else:
path_var = 'zhora/'
DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_128/train/'
# DATA_DIR= '/local_home/data/KITTI_data/'
HD_DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_256/train/'
VAL_DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_128/val/'
VAL_HD_DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_256/val/'
TEST_DATA_DIR= '/local_home/JAAD_Dataset/iros/resized_imgs_128/test/'
MODEL_DIR = './../' + path_var + 'models'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
CHECKPOINT_DIR = './../' + path_var + 'checkpoints'
if not os.path.exists(CHECKPOINT_DIR):
os.mkdir(CHECKPOINT_DIR)
ATTN_WEIGHTS_DIR = './../' + path_var + 'attn_weights'
if not os.path.exists(ATTN_WEIGHTS_DIR):
os.mkdir(ATTN_WEIGHTS_DIR)
GEN_IMAGES_DIR = './../' + path_var + 'generated_images'
if not os.path.exists(GEN_IMAGES_DIR):
os.mkdir(GEN_IMAGES_DIR)
CLA_GEN_IMAGES_DIR = GEN_IMAGES_DIR + '/cla_gen/'
if not os.path.exists(CLA_GEN_IMAGES_DIR):
os.mkdir(CLA_GEN_IMAGES_DIR)
LOG_DIR = './../' + path_var + 'logs'
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
TF_LOG_DIR = './../' + path_var + 'tf_logs'
if not os.path.exists(TF_LOG_DIR):
os.mkdir(TF_LOG_DIR)
TF_LOG_GAN_DIR = './../' + path_var + 'tf_gan_logs'
if not os.path.exists(TF_LOG_GAN_DIR):
os.mkdir(TF_LOG_GAN_DIR)
TEST_RESULTS_DIR = './../' + path_var + 'test_results'
if not os.path.exists(TEST_RESULTS_DIR):
os.mkdir(TEST_RESULTS_DIR)
PRINT_MODEL_SUMMARY = True
SAVE_MODEL = True
PLOT_MODEL = True
SAVE_GENERATED_IMAGES = True
SHUFFLE = True
VIDEO_LENGTH = 30
IMG_SIZE = (128, 128, 3)
ADVERSARIAL = False
BUF_SIZE = 10
LOSS_WEIGHTS = [1, 1]
ATTN_COEFF = 0
KL_COEFF = 0
# -------------------------------------------------
# Network configuration:
print ("Loading network/training configuration.")
print ("Config file: " + str(__name__))
BATCH_SIZE = 7
NB_EPOCHS_AUTOENCODER = 30
NB_EPOCHS_GAN = 0
OPTIM_A = Adam(lr=0.0001, beta_1=0.5)
OPTIM_G = Adam(lr=0.00001, beta_1=0.5)
# OPTIM_D = Adam(lr=0.000001, beta_1=0.5)
# OPTIM_D = SGD(lr=0.000001, momentum=0.5, nesterov=True)
OPTIM_D = rmsprop(lr=0.000001)
lr_schedule = [10, 20, 30] # epoch_step
def schedule(epoch_idx):
if (epoch_idx + 1) < lr_schedule[0]:
return 0.0001
elif (epoch_idx + 1) < lr_schedule[1]:
return 0.0001 # lr_decay_ratio = 10
elif (epoch_idx + 1) < lr_schedule[2]:
return 0.00001
return 0.000001
|
AutonomyLab/deep_intent
|
code/autoencoder_model/scripts/config_nmta.py
|
Python
|
bsd-3-clause
| 3,079
|
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from actstream.models import Follow, Action, user_stream, actor_stream, model_stream
@login_required
def follow_unfollow(request, content_type_id, object_id, follow=True):
"""
Creates follow relationship st ``request.user`` starts following the actor defined by ``content_type_id``, ``object_id``
"""
ctype = get_object_or_404(ContentType, pk=content_type_id)
actor = get_object_or_404(ctype.model_class(), pk=object_id)
lookup = {
'user': request.user,
'content_type': ctype,
'object_id': object_id,
}
if follow:
Follow.objects.get_or_create(**lookup)
return type('Created', (HttpResponse,), {'status_code':201})()
Follow.objects.get(**lookup).delete()
return type('Deleted', (HttpResponse,), {'status_code':204})()
@login_required
def stream(request):
"""
Index page for authenticated user's activity stream. (Eg: Your feed at github.com)
"""
return render_to_response('activity/actor.html', {
'ctype': ContentType.objects.get_for_model(request.user),
'actor':request.user,'action_list':user_stream(request.user)
}, context_instance=RequestContext(request))
def followers(request, content_type_id, object_id):
"""
Creates a listing of ``User``s that follow the actor defined by ``content_type_id``, ``object_id``
"""
ctype = get_object_or_404(ContentType, pk=content_type_id)
follows = Follow.objects.filter(content_type=ctype, object_id=object_id)
actor = get_object_or_404(ctype.model_class(), pk=object_id)
return render_to_response('activity/followers.html', {
'followers': [f.user for f in follows], 'actor':actor
}, context_instance=RequestContext(request))
def user(request, username):
"""
``User`` focused activity stream. (Eg: Profile page twitter.com/justquick)
"""
user = get_object_or_404(User, username=username)
return render_to_response('activity/actor.html', {
'ctype': ContentType.objects.get_for_model(User),
'actor':user,'action_list':actor_stream(user)
}, context_instance=RequestContext(request))
def detail(request, action_id):
"""
``Action`` detail view (pretty boring, mainly used for get_absolute_url)
"""
return render_to_response('activity/detail.html', {
'action': get_object_or_404(Action, pk=action_id)
}, context_instance=RequestContext(request))
def actor(request, content_type_id, object_id):
"""
``Actor`` focused activity stream for actor defined by ``content_type_id``, ``object_id``
"""
ctype = get_object_or_404(ContentType, pk=content_type_id)
actor = get_object_or_404(ctype.model_class(), pk=object_id)
return render_to_response('activity/actor.html', {
'action_list': actor_stream(actor), 'actor':actor,'ctype':ctype
}, context_instance=RequestContext(request))
def model(request, content_type_id):
"""
``Actor`` focused activity stream for actor defined by ``content_type_id``, ``object_id``
"""
ctype = get_object_or_404(ContentType, pk=content_type_id)
actor = ctype.model_class()
return render_to_response('activity/actor.html', {
'action_list': model_stream(actor),'ctype':ctype,'actor':ctype#._meta.verbose_name_plural.title()
}, context_instance=RequestContext(request))
|
netconstructor/django-activity-stream
|
actstream/views.py
|
Python
|
bsd-3-clause
| 3,684
|
import os
from setuptools import setup
from setuptools import find_packages
version = '0.1'
shortdesc = "Klarna Payment for bda.plone.shop"
setup(
name='bda.plone.klarnapayment',
version=version,
description=shortdesc,
classifiers=[
'Environment :: Web Environment',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
author='Espen Moe-Nilssen',
author_email='espen@medialog.no',
license='GNU General Public Licence',
packages=find_packages('src'),
package_dir = {'': 'src'},
namespace_packages=['bda', 'bda.plone'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'Plone',
'bda.plone.shop',
'klarnacheckout',
],
extras_require={
'test': [
'plone.app.testing',
]
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
|
espenmn/bda.plone.klarnapayment
|
setup.py
|
Python
|
bsd-3-clause
| 1,088
|
import importlib
import inspect
from celery import task
from django.conf import settings
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk as es7_bulk
from elasticsearch.helpers.errors import BulkIndexError
from elasticsearch_dsl import Document, UpdateByQuery, analyzer, char_filter, token_filter
from kitsune.search import config
def _insert_custom_filters(analyzer_name, filter_list, char=False):
"""
Takes a list containing in-built filters (as strings), and the settings for custom filters
(as dicts). Turns the dicts into instances of `token_filter` or `char_filter` depending
on the value of the `char` argument.
"""
def mapping_func(position_filter_tuple):
position, filter = position_filter_tuple
if type(filter) is dict:
prefix = analyzer_name
default_filters = config.ES_DEFAULT_ANALYZER["char_filter" if char else "filter"]
if filter in default_filters:
# detect if this filter exists in the default analyzer
# if it does use the same name as the default
# to avoid defining the same filter for each locale
prefix = config.ES_DEFAULT_ANALYZER_NAME
position = default_filters.index(filter)
name = f'{prefix}_{position}_{filter["type"]}'
if char:
return char_filter(name, **filter)
return token_filter(name, **filter)
return filter
return list(map(mapping_func, enumerate(filter_list)))
def _create_synonym_graph_filter(synonym_file_name):
filter_name = f"{synonym_file_name}_synonym_graph"
return token_filter(
filter_name,
type="synonym_graph",
synonyms_path=f"synonyms/{synonym_file_name}.txt",
# we must use "true" instead of True to work around an elastic-dsl bug
expand="true",
lenient="true",
updateable="true",
)
def es_analyzer_for_locale(locale, search_analyzer=False):
"""Pick an appropriate analyzer for a given locale.
If no analyzer is defined for `locale` or the locale analyzer uses a plugin
but using plugin is turned off from settings, return an analyzer named "default_sumo".
"""
name = ""
analyzer_config = config.ES_LOCALE_ANALYZERS.get(locale)
if not analyzer_config or (analyzer_config.get("plugin") and not settings.ES_USE_PLUGINS):
name = config.ES_DEFAULT_ANALYZER_NAME
analyzer_config = {}
# use default values from ES_DEFAULT_ANALYZER if not overridden
# using python 3.9's dict union operator
analyzer_config = config.ES_DEFAULT_ANALYZER | analyzer_config
# turn dictionaries into `char_filter` and `token_filter` instances
filters = _insert_custom_filters(name or locale, analyzer_config["filter"])
char_filters = _insert_custom_filters(
name or locale, analyzer_config["char_filter"], char=True
)
if search_analyzer:
# create a locale-specific search analyzer, even if the index-time analyzer is
# `sumo_default`. we do this so that we can adjust the synonyms used in any locale,
# even if it doesn't have a custom analysis chain set up, without having to re-index
name = locale + "_search_analyzer"
filters.append(_create_synonym_graph_filter(config.ES_ALL_SYNONYMS_NAME))
filters.append(_create_synonym_graph_filter(locale))
return analyzer(
name or locale,
tokenizer=analyzer_config["tokenizer"],
filter=filters,
char_filter=char_filters,
)
def es7_client(**kwargs):
"""Return an ES7 Elasticsearch client"""
# prefer a cloud_id if available
if es7_cloud_id := settings.ES7_CLOUD_ID:
kwargs.update({"cloud_id": es7_cloud_id, "http_auth": settings.ES7_HTTP_AUTH})
else:
kwargs.update({"hosts": settings.ES7_URLS})
return Elasticsearch(**kwargs)
def get_doc_types(paths=["kitsune.search.documents"]):
"""Return all registered document types"""
doc_types = []
modules = [importlib.import_module(path) for path in paths]
for module in modules:
for key in dir(module):
cls = getattr(module, key)
if (
inspect.isclass(cls)
and issubclass(cls, Document)
and cls != Document
and cls.__name__ != "SumoDocument"
):
doc_types.append(cls)
return doc_types
@task
def index_object(doc_type_name, obj_id):
"""Index an ORM object given an object id and a document type name."""
doc_type = next(cls for cls in get_doc_types() if cls.__name__ == doc_type_name)
model = doc_type.get_model()
try:
obj = model.objects.get(pk=obj_id)
except model.DoesNotExist:
# if the row doesn't exist in DB, it may have been deleted while this job
# was in the celery queue - this shouldn't be treated as a failure, so
# just return
return
if doc_type.update_document:
doc_type.prepare(obj).to_action("update", doc_as_upsert=True)
else:
doc_type.prepare(obj).to_action("index")
@task
def index_objects_bulk(
doc_type_name,
obj_ids,
timeout=settings.ES_BULK_DEFAULT_TIMEOUT,
elastic_chunk_size=settings.ES_DEFAULT_ELASTIC_CHUNK_SIZE,
):
"""Bulk index ORM objects given a list of object ids and a document type name."""
doc_type = next(cls for cls in get_doc_types() if cls.__name__ == doc_type_name)
db_objects = doc_type.get_queryset().filter(pk__in=obj_ids)
# prepare the docs for indexing
docs = [doc_type.prepare(obj) for obj in db_objects]
# set the appropriate action per document type
action = "index"
kwargs = {}
# If the `update_document` is true we are using update instead of index
if doc_type.update_document:
action = "update"
kwargs.update({"doc_as_upsert": True})
# if the request doesn't resolve within `timeout`,
# sleep for `timeout` then try again up to `settings.ES_BULK_MAX_RETRIES` times,
# before raising an exception:
success, errors = es7_bulk(
es7_client(
timeout=timeout,
retry_on_timeout=True,
initial_backoff=timeout,
max_retries=settings.ES_BULK_MAX_RETRIES,
),
(doc.to_action(action=action, is_bulk=True, **kwargs) for doc in docs),
chunk_size=elastic_chunk_size,
raise_on_error=False, # we'll raise the errors ourselves, so all the chunks get sent
)
errors = [
error
for error in errors
if not (error.get("delete") and error["delete"]["status"] in [400, 404])
]
if errors:
raise BulkIndexError(f"{len(errors)} document(s) failed to index.", errors)
@task
def remove_from_field(doc_type_name, field_name, field_value):
"""Remove a value from all documents in the doc_type's index."""
doc_type = next(cls for cls in get_doc_types() if cls.__name__ == doc_type_name)
script = (
f"if (ctx._source.{field_name}.contains(params.value)) {{"
f"ctx._source.{field_name}.remove(ctx._source.{field_name}.indexOf(params.value))"
f"}}"
)
update = UpdateByQuery(using=es7_client(), index=doc_type._index._name)
update = update.filter("term", **{field_name: field_value})
update = update.script(source=script, params={"value": field_value}, conflicts="proceed")
# refresh index to ensure search fetches all matches
doc_type._index.refresh()
update.execute()
@task
def delete_object(doc_type_name, obj_id):
"""Unindex an ORM object given an object id and document type name."""
doc_type = next(cls for cls in get_doc_types() if cls.__name__ == doc_type_name)
doc = doc_type()
doc.meta.id = obj_id
doc.to_action("delete")
|
mozilla/kitsune
|
kitsune/search/es7_utils.py
|
Python
|
bsd-3-clause
| 7,848
|
import os
# Django settings for mysite project.
DEBUG = True
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
SITE_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'db.sqlite3'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
DATE_INPUT_FORMATS = ('%d/%m/%Y')
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Zurich'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '05=^qgbhg3!6-dzb6#&2j^jmh-2fgc%22!z_!w*&8iy_m$2*$*'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'mysite.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(SITE_ROOT, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages'
],
'debug': DEBUG,
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'polls'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
sebnorth/extended_user
|
mysite/settings.py
|
Python
|
bsd-3-clause
| 5,917
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
help_txt = """
:help, show this help menu. :help [command] for detail
:dict [word], only find translation on dict.cn
:google [sentence], only find translation on google api
:lan2lan [sentence], translate from one language to another language
:add [word], add new word to your library
:del [word], delete word from your library
:list [number], list words in your library
:rating [number], lsit words in your library with a certain rate
:history [number], show your search history
:clear, clear your oldest 100 history
for more information, browser http://mardict.appspot.com
"""
help_dict = """
help on dict:
[usage] :dict word
[intro] translate your word only use dict.cn api
[eg] :dict hello
more on http://mardict.appspot.com/help/#dict
"""
help_google = """
help on google:
[usage] :google word
[intro] translate your word only use google api
[eg] :google google is a bitch
more on http://mardict.appspot.com/help/#google
"""
help_lan2lan = """
help on lan2lan:
[usage] :lan2lan word
[intro] translate from one language to another language by google translation api
[eg] :en2zh hello
more on http://mardict.appspot.com/help/#lan2lan
"""
help_history = """
help on history:
[usage] :history (number)
[intro] list your search history
[eg] :history 9
more on http://mardict.appspot.com/help/#history
"""
help_clear = """
help on clear:
[usage] :clear
[intro] clear your search history
more on http://mardict.appspot.com/help/#clear
"""
help_add = """
help on add:
[usage] :add (word)
[intro] add the new word to your library(storing your unfamiliar word)
[eg] :add hello
more on http://mardict.appspot.com/help/#add
"""
help_del = """
help on del:
[usage] :del word
[intro] delete the word from your library
[eg] :del hello
more on http://mardict.appspot.com/help/#del
"""
help_list = """
help on list:
[usage] :list (number)
[intro] list a certain number of words from your library.
[eg] :list 9
this function is very complex, browser the website.
more on http://mardict.appspot.com/help/#list
"""
help_rating = """
help on rating:
[usage] :rating (number)
[intro] list a certain number of words from your library with a certain rate.
[eg] :rating 0 9
this function is very complex, browser the website.
more on http://mardict.appspot.com/help/#rating
"""
|
lepture/mardict
|
utils/helper.py
|
Python
|
bsd-3-clause
| 2,325
|
'''
TODO:
optimize adds, multiplies, 'or' and 'and' as they can accept more than two values
validate type info on specific functions
'''
from .matching import AstHandler, ParseError, DateTimeFunc
class AggregationParser(AstHandler):
FUNC_TO_ARGS = {'concat': '+', # more than 1
'strcasecmp': 2,
'substr': 3,
'toLower': 1,
'toUpper': 1,
'dayOfYear': 1,
'dayOfMonth': 1,
'dayOfWeek': 1,
'year': 1,
'month': 1,
'week': 1,
'hour': 1,
'minute': 1,
'second': 1,
'millisecond': 1,
'date': 1,
'cmp': 2,
'ifnull': 2}
SPECIAL_VALUES = {'False': False,
'false': False,
'True': True,
'true': True,
'None': None,
'null': None}
def handle_Str(self, node):
return node.s
def handle_Num(self, node):
return node.n
def handle_Name(self, node):
return self.SPECIAL_VALUES.get(node.id, '$' + node.id)
def handle_NameConstant(self,node):
return self.SPECIAL_VALUES.get(str(node.value),node.value)
def handle_Attribute(self, node):
return '${0}.{1}'.format(self.handle(node.value), node.attr).replace('$$', '$')
def handle_UnaryOp(self, op):
return {self.handle(op.op): self.handle(op.operand)}
def handle_IfExp(self, op):
return {'$cond': [self.handle(op.test),
self.handle(op.body),
self.handle(op.orelse)]}
def handle_Call(self, node):
name = node.func.id
if name == 'date':
return DateTimeFunc().handle_date(node)
if name not in self.FUNC_TO_ARGS:
raise ParseError('Unsupported function ({0}).'.format(name),
col_offset=node.col_offset)
if len(node.args) != self.FUNC_TO_ARGS[name] and \
self.FUNC_TO_ARGS[name] != '+' or len(node.args) == 0:
raise ParseError('Invalid number of arguments to function {0}'.format(name),
col_offset=node.col_offset)
# because of SERVER-9289 the following fails: {'$year': {'$add' :['$time_stamp', 1]}}
# wrapping both single arg functions in a list solves it: {'$year': [{'$add' :['$time_stamp', 1]}]}
return {'$' + node.func.id: list(map(self.handle, node.args))}
def handle_BinOp(self, node):
return {self.handle(node.op): [self.handle(node.left),
self.handle(node.right)]}
def handle_Not(self, not_node):
return '$not'
def handle_And(self, op):
return '$and'
def handle_Or(self, op):
return '$or'
def handle_BoolOp(self, op):
return {self.handle(op.op): list(map(self.handle, op.values))}
def handle_Compare(self, node):
if len(node.ops) != 1:
raise ParseError('Invalid number of comparators: {0}'.format(len(node.ops)),
col_offset=node.comparators[1].col_offset)
return {self.handle(node.ops[0]): [self.handle(node.left),
self.handle(node.comparators[0])]}
def handle_Gt(self, node):
return '$gt'
def handle_Lt(self,node):
return '$lt'
def handle_GtE(self, node):
return '$gte'
def handle_LtE(self, node):
return '$lte'
def handle_Eq(self, node):
return '$eq'
def handle_NotEq(self, node):
return '$ne'
def handle_Add(self, node):
return '$add'
def handle_Sub(self, node):
return '$subtract'
def handle_Mod(self, node):
return '$mod'
def handle_Mult(self, node):
return '$multiply'
def handle_Div(self, node):
return '$divide'
class AggregationGroupParser(AstHandler):
GROUP_FUNCTIONS = ['addToSet', 'push', 'first', 'last',
'max', 'min', 'avg', 'sum']
def handle_Call(self, node):
if len(node.args) != 1:
raise ParseError('The {0} group aggregation function accepts one argument'.format(node.func.id),
col_offset=node.col_offset)
if node.func.id not in self.GROUP_FUNCTIONS:
raise ParseError('Unsupported group function: {0}'.format(node.func.id),
col_offset=node.col_offset,
options=self.GROUP_FUNCTIONS)
return {'$' + node.func.id: AggregationParser().handle(node.args[0])}
|
alonho/pql
|
pql/aggregation.py
|
Python
|
bsd-3-clause
| 4,848
|
# -*- coding: utf-8 -*-
# TEST_UNICODE_LITERALS
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
from ...extern import six
from ..data_info import dtype_info_name
STRING_TYPE_NAMES = {(False, 'S'): 'str', # PY2
(False, 'U'): 'unicode',
(True, 'S'): 'bytes', # not PY2
(True, 'U'): 'str'}
DTYPE_TESTS = ((np.array(b'abcd').dtype, STRING_TYPE_NAMES[(not six.PY2, 'S')] + '4'),
(np.array(u'abcd').dtype, STRING_TYPE_NAMES[(not six.PY2, 'U')] + '4'),
('S4', STRING_TYPE_NAMES[(not six.PY2, 'S')] + '4'),
('U4', STRING_TYPE_NAMES[(not six.PY2, 'U')] + '4'),
(np.void, 'void'),
(np.int32, 'int32'),
(np.bool, 'bool'),
(bool, 'bool'),
(float, 'float64'),
('<f4', 'float32'),
('u8', 'uint64'),
('c16', 'complex128'),
('object', 'object'))
@pytest.mark.parametrize('input,output', DTYPE_TESTS)
def test_dtype_info_name(input, output):
"""
Test that dtype_info_name is giving the expected output
Here the available types::
'b' boolean
'i' (signed) integer
'u' unsigned integer
'f' floating-point
'c' complex-floating point
'O' (Python) objects
'S', 'a' (byte-)string
'U' Unicode
'V' raw data (void)
"""
assert dtype_info_name(input) == output
|
AustereCuriosity/astropy
|
astropy/utils/tests/test_data_info.py
|
Python
|
bsd-3-clause
| 1,575
|
from __future__ import division, print_function
from os.path import join, split, dirname
import os
import sys
from distutils.dep_util import newer
from distutils.msvccompiler import get_build_version as get_msvc_build_version
def needs_mingw_ftime_workaround():
# We need the mingw workaround for _ftime if the msvc runtime version is
# 7.1 or above and we build with mingw ...
# ... but we can't easily detect compiler version outside distutils command
# context, so we will need to detect in randomkit whether we build with gcc
msver = get_msvc_build_version()
if msver and msver >= 8:
return True
return False
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
tc = testcode_wincrypt()
if config_cmd.try_run(tc):
libs.append('Advapi32')
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1'),
]
if needs_mingw_ftime_workaround():
defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
libs = []
# Configure mtrand
try:
import cffi
have_cffi = True
except ImportError:
have_cffi = False
if have_cffi:
#create the dll/so for the cffi version
if sys.platform == 'win32':
libs.append('Advapi32')
defs.append(('_MTRAND_DLL',None))
config.add_shared_library('_mtrand',
sources=[join('mtrand', x) for x in
['randomkit.c', 'distributions.c', 'initarray.c']],
build_info = {
'libraries': libs,
'depends': [join('mtrand', '*.h'),
],
'macros': defs,
}
)
else:
config.add_extension('mtrand',
sources=[join('mtrand', x) for x in
['mtrand.c', 'randomkit.c', 'initarray.c',
'distributions.c']]+[generate_libraries],
libraries=libs,
depends=[join('mtrand', '*.h'),
join('mtrand', '*.pyx'),
join('mtrand', '*.pxi'),],
define_macros=defs,
)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
NextThought/pypy-numpy
|
numpy/random/setup.py
|
Python
|
bsd-3-clause
| 3,228
|
"""Watch for changes in a collection of source files. If changes, run
the specified test runner (nosetests, by default).
"""
from argparse import ArgumentParser
import ConfigParser
import glob
import os
import stat
import subprocess
import sys
import time
class Nosy(object):
"""Watch for changes in all source files. If changes, run the
specified test runner (nosetests, by default).
"""
def __init__(self):
"""Return an instance with the default configuration, and a
command line parser.
"""
self.config = ConfigParser.SafeConfigParser()
self.config.add_section('nosy')
self.config.set('nosy', 'test_runner', 'nosetests')
self.config.set('nosy', 'base_path', '.')
self.config.set('nosy', 'glob_patterns', '')
self.config.set('nosy', 'exclude_patterns', '')
self.config.set('nosy', 'extra_paths', '')
self.config.set('nosy', 'options', '')
self.config.set('nosy', 'tests', '')
# paths config retained for backward compatibility; use
# extra_paths for any files or paths that aren't easily
# included via base_path, glob_patterns, and exclude_patterns
self.config.set('nosy', 'paths', '*.py')
self._build_cmdline_parser()
def _build_cmdline_parser(self):
self.parser = ArgumentParser(
description='Automatically run a command (nosetest, by default) '
'whenever source files change.')
self.parser.add_argument(
'-c', '--config', dest='config_file', default='setup.cfg',
help='configuration file path and name; defaults to %(default)s')
def parse_cmdline(self):
"""Parse the command line and set the config_file attribute.
"""
args = self.parser.parse_args()
self.config_file = args.config_file
def _read_config(self):
try:
self.config.readfp(open(self.config_file, 'rt'))
except IOError, msg:
self.parser.error("can't read config file:\n %s" % msg)
self.test_runner = self.config.get('nosy', 'test_runner')
self.base_path = self.config.get('nosy', 'base_path')
self.glob_patterns = self.config.get(
'nosy', 'glob_patterns').split()
self.exclude_patterns = self.config.get(
'nosy', 'exclude_patterns').split()
self.extra_paths = self.config.get('nosy', 'extra_paths').split()
self.cmd_opts = self.config.get('nosy', 'options')
self.cmd_args = self.config.get('nosy', 'tests')
# paths config retained for backward compatibility; use
# extra_paths for any files or paths that aren't easily
# included via base_path, glob_patterns, and
# exclude_patterns
self.paths = self.config.get('nosy', 'paths').split()
def _calc_extra_paths_checksum(self):
"""Return the checksum for the files given by the extra paths
pattern(s).
self.paths is included for backward compatibility.
"""
checksum = 0
for path in self.extra_paths + self.paths:
for file_path in glob.iglob(path):
stats = os.stat(file_path)
checksum += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]
return checksum
def _calc_exclusions(self, root):
"""Return a set of file paths to be excluded from the checksum
calculation.
"""
exclusions = set()
for pattern in self.exclude_patterns:
for file_path in glob.iglob(os.path.join(root, pattern)):
exclusions.add(file_path)
return exclusions
def _calc_dir_checksum(self, exclusions, root):
"""Return the checksum for the monitored files in the
specified directory tree.
"""
checksum = 0
for pattern in self.glob_patterns:
for file_path in glob.iglob(os.path.join(root, pattern)):
if file_path not in exclusions:
stats = os.stat(file_path)
checksum += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]
return checksum
def _checksum(self):
"""Return a checksum which indicates if any files in the paths
list have changed.
"""
checksum = self._calc_extra_paths_checksum()
for root, dirs, files in os.walk(self.base_path):
exclusions = self._calc_exclusions(root)
checksum += self._calc_dir_checksum(exclusions, root)
return checksum
def run(self):
"""Run specified test runner (default nosetests) whenever the
source files (default ./*.py) change.
Re-read the configuration before each run so that options and
arguments may be changed.
"""
checksum = 0
self._read_config()
while True:
if self._checksum() != checksum:
self._read_config()
checksum = self._checksum()
cmd = (self.test_runner.split() if ' ' in self.test_runner
else [self.test_runner])
try:
subprocess.call(
cmd
+ self.cmd_opts.replace('\\\n', '').split()
+ self.cmd_args.replace('\\\n', '').split())
except OSError, msg:
sys.stderr.write('Command error: %s: %s\n' % (msg, cmd))
sys.exit(2)
time.sleep(1)
def main():
nosy = Nosy()
nosy.parse_cmdline()
try:
nosy.run()
except KeyboardInterrupt:
sys.exit(130)
except SystemExit:
sys.exit(0)
if __name__ == '__main__':
main()
|
dougbeal/nosy
|
nosy/nosy.py
|
Python
|
bsd-3-clause
| 5,722
|
from magicbot.state_machine import (
default_state,
state,
timed_state,
AutonomousStateMachine,
StateMachine,
IllegalCallError,
NoFirstStateError,
MultipleFirstStatesError,
MultipleDefaultStatesError,
InvalidStateName,
)
from magicbot.magic_tunable import setup_tunables
import pytest
def test_no_timed_state_duration():
with pytest.raises(TypeError):
class _TM(StateMachine):
@timed_state()
def tmp(self):
pass
def test_no_start_state():
class _TM(StateMachine):
pass
with pytest.raises(NoFirstStateError):
_TM()
def test_multiple_first_states():
class _TM(StateMachine):
@state(first=True)
def tmp1(self):
pass
@state(first=True)
def tmp2(self):
pass
with pytest.raises(MultipleFirstStatesError):
_TM()
def test_sm(wpitime):
class _TM(StateMachine):
def __init__(self):
self.executed = []
def some_fn(self):
self.executed.append("sf")
@state(first=True)
def first_state(self):
self.executed.append(1)
self.next_state("second_state")
@timed_state(duration=1, next_state="third_state")
def second_state(self):
self.executed.append(2)
@state
def third_state(self):
self.executed.append(3)
sm = _TM()
setup_tunables(sm, "cname")
sm.some_fn()
# should not be able to directly call
with pytest.raises(IllegalCallError):
sm.first_state()
assert sm.current_state == ""
assert not sm.is_executing
sm.engage()
assert sm.current_state == "first_state"
assert not sm.is_executing
sm.execute()
assert sm.current_state == "second_state"
assert sm.is_executing
# should not change
sm.engage()
assert sm.current_state == "second_state"
assert sm.is_executing
sm.execute()
assert sm.current_state == "second_state"
assert sm.is_executing
wpitime.step(1.5)
sm.engage()
sm.execute()
assert sm.current_state == "third_state"
assert sm.is_executing
sm.engage()
sm.execute()
assert sm.current_state == "third_state"
assert sm.is_executing
# should be done
sm.done()
assert sm.current_state == ""
assert not sm.is_executing
# should be able to start directly at second state
sm.engage(initial_state="second_state")
sm.execute()
assert sm.current_state == "second_state"
assert sm.is_executing
wpitime.step(1.5)
sm.engage()
sm.execute()
assert sm.current_state == "third_state"
assert sm.is_executing
# test force
sm.engage()
sm.execute()
assert sm.current_state == "third_state"
assert sm.is_executing
sm.engage(force=True)
assert sm.current_state == "first_state"
assert sm.is_executing
sm.execute()
sm.execute()
assert not sm.is_executing
assert sm.current_state == ""
assert sm.executed == ["sf", 1, 2, 3, 3, 2, 3, 3, 1]
def test_sm_inheritance():
class _TM1(StateMachine):
@state
def second_state(self):
self.done()
class _TM2(_TM1):
@state(first=True)
def first_state(self):
self.next_state("second_state")
sm = _TM2()
setup_tunables(sm, "cname")
sm.engage()
assert sm.current_state == "first_state"
sm.execute()
assert sm.current_state == "second_state"
sm.execute()
assert sm.current_state == ""
def test_must_finish(wpitime):
class _TM(StateMachine):
def __init__(self):
self.executed = []
@state(first=True)
def ordinary1(self):
self.next_state("ordinary2")
self.executed.append(1)
@state
def ordinary2(self):
self.next_state("must_finish")
self.executed.append(2)
@state(must_finish=True)
def must_finish(self):
self.executed.append("mf")
@state
def ordinary3(self):
self.executed.append(3)
self.next_state_now("timed_must_finish")
@timed_state(duration=1, must_finish=True)
def timed_must_finish(self):
self.executed.append("tmf")
sm = _TM()
setup_tunables(sm, "cname")
sm.engage()
sm.execute()
sm.execute()
assert sm.current_state == ""
assert not sm.is_executing
sm.engage()
sm.execute()
sm.engage()
sm.execute()
sm.execute()
sm.execute()
assert sm.current_state == "must_finish"
assert sm.is_executing
sm.next_state("ordinary3")
sm.engage()
sm.execute()
assert sm.current_state == "timed_must_finish"
sm.execute()
assert sm.is_executing
assert sm.current_state == "timed_must_finish"
for _ in range(7):
wpitime.step(0.1)
sm.execute()
assert sm.is_executing
assert sm.current_state == "timed_must_finish"
wpitime.step(1)
sm.execute()
assert not sm.is_executing
assert sm.executed == [1, 1, 2, "mf", "mf", 3] + ["tmf"] * 9
def test_autonomous_sm():
class _TM(AutonomousStateMachine):
i = 0
VERBOSE_LOGGING = False
@state(first=True)
def something(self):
self.i += 1
if self.i == 6:
self.done()
sm = _TM()
setup_tunables(sm, "cname")
sm.on_enable()
for _ in range(5):
sm.on_iteration(None)
assert sm.is_executing
sm.on_iteration(None)
assert not sm.is_executing
for _ in range(5):
sm.on_iteration(None)
assert not sm.is_executing
assert sm.i == 6
def test_autonomous_sm_end_timed_state(wpitime):
class _TM(AutonomousStateMachine):
i = 0
j = 0
VERBOSE_LOGGING = False
@state(first=True)
def something(self):
self.i += 1
if self.i == 3:
self.next_state("timed")
@timed_state(duration=1)
def timed(self):
self.j += 1
sm = _TM()
setup_tunables(sm, "cname")
sm.on_enable()
for _ in range(5):
wpitime.step(0.7)
sm.on_iteration(None)
assert sm.is_executing
for _ in range(5):
wpitime.step(0.7)
sm.on_iteration(None)
assert not sm.is_executing
assert sm.i == 3
assert sm.j == 2
def test_next_fn():
class _TM(StateMachine):
@state(first=True)
def first_state(self):
self.next_state(self.second_state)
@state
def second_state(self):
self.done()
sm = _TM()
setup_tunables(sm, "cname")
sm.engage()
assert sm.current_state == "first_state"
sm.execute()
assert sm.current_state == "second_state"
sm.engage()
sm.execute()
assert sm.current_state == ""
def test_next_fn2(wpitime):
class _TM(StateMachine):
@state
def second_state(self):
pass
@timed_state(first=True, duration=0.1, next_state=second_state)
def first_state(self):
pass
sm = _TM()
setup_tunables(sm, "cname")
sm.engage()
sm.execute()
assert sm.current_state == "first_state"
assert sm.is_executing
wpitime.step(0.5)
sm.engage()
sm.execute()
assert sm.current_state == "second_state"
assert sm.is_executing
sm.execute()
assert sm.current_state == ""
assert not sm.is_executing
def test_mixup():
from robotpy_ext.autonomous import state as _ext_state
from robotpy_ext.autonomous import timed_state as _ext_timed_state
with pytest.raises(RuntimeError) as exc_info:
class _SM1(StateMachine):
@_ext_state(first=True)
def the_state(self):
pass
assert isinstance(exc_info.value.__cause__, TypeError)
with pytest.raises(RuntimeError) as exc_info:
class _SM2(StateMachine):
@_ext_timed_state(first=True, duration=1)
def the_state(self):
pass
assert isinstance(exc_info.value.__cause__, TypeError)
def test_forbidden_state_names():
with pytest.raises(InvalidStateName):
class _SM(StateMachine):
@state
def done(self):
pass
def test_mixins():
class _SM1(StateMachine):
@state
def state1(self):
pass
class _SM2(StateMachine):
@state
def state2(self):
pass
class _SM(_SM1, _SM2):
@state(first=True)
def first_state(self):
pass
s = _SM()
states = s._StateMachine__states
assert "state1" in states
assert "state2" in states
assert "first_state" in states
def test_multiple_default_states():
class _SM(StateMachine):
@state(first=True)
def state(self):
pass
@default_state
def state1(self):
pass
@default_state
def state2(self):
pass
with pytest.raises(MultipleDefaultStatesError):
_SM()
def test_default_state_machine():
class _SM(StateMachine):
def __init__(self):
self.didOne = None
self.didDefault = None
self.defaultInit = None
self.didDone = None
@state(first=True)
def stateOne(self):
self.didOne = True
self.didDefault = False
self.didDone = False
@state
def doneState(self):
self.didOne = False
self.didDefault = False
self.didDone = True
self.done()
@default_state
def defaultState(self, initial_call):
self.didOne = False
self.didDefault = True
self.defaultInit = initial_call
self.didDone = False
sm = _SM()
setup_tunables(sm, "cname")
sm.execute()
assert sm.didOne == False
assert sm.didDefault == True
assert sm.defaultInit == True
assert sm.didDone == False
sm.execute()
assert sm.didOne == False
assert sm.didDefault == True
assert sm.defaultInit == False
assert sm.didDone == False
# do a thing
sm.engage()
sm.execute()
assert sm.didOne == True
assert sm.didDefault == False
assert sm.didDone == False
# should go back (test for initial)
sm.execute()
assert sm.didOne == False
assert sm.didDefault == True
assert sm.defaultInit == True
assert sm.didDone == False
# should happen again (no initial)
sm.execute()
assert sm.didOne == False
assert sm.didDefault == True
assert sm.defaultInit == False
assert sm.didDone == False
# do another thing
sm.engage()
sm.execute()
assert sm.didOne == True
assert sm.didDefault == False
assert sm.didDone == False
# should go back (test for initial)
sm.execute()
assert sm.didOne == False
assert sm.didDefault == True
assert sm.defaultInit == True
assert sm.didDone == False
# should happen again (no initial)
sm.execute()
assert sm.didOne == False
assert sm.didDefault == True
assert sm.defaultInit == False
assert sm.didDone == False
# enagage a state that will call done, check to see
# if we come back
sm.engage("doneState")
sm.execute()
assert sm.didOne == False
assert sm.didDefault == False
assert sm.defaultInit == False
assert sm.didDone == True
# should go back (test for initial)
sm.execute()
assert sm.didOne == False
assert sm.didDefault == True
assert sm.defaultInit == True
assert sm.didDone == False
# should happen again (no initial)
sm.execute()
assert sm.didOne == False
assert sm.didDefault == True
assert sm.defaultInit == False
assert sm.didDone == False
def test_short_timed_state(wpitime):
"""
Tests two things:
- A timed state that expires before it executes
- Ensures that the default state won't execute if the machine is always
executing
"""
class _SM(StateMachine):
def __init__(self):
self.executed = []
@default_state
def d(self):
self.executed.append("d")
@state(first=True)
def a(self):
self.executed.append("a")
self.next_state("b")
@timed_state(duration=0.01)
def b(self):
self.executed.append("b")
def done(self):
super().done()
self.executed.append("d")
sm = _SM()
setup_tunables(sm, "cname")
assert sm.current_state == ""
assert not sm.is_executing
for _ in [1, 2, 3, 4]:
sm.engage()
sm.execute()
assert sm.current_state == "b"
wpitime.step(0.02)
sm.engage()
sm.execute()
assert sm.current_state == "b"
wpitime.step(0.02)
assert sm.executed == ["a", "b", "d", "a", "b", "d", "a", "b", "d", "a", "b"]
|
robotpy/robotpy-wpilib-utilities
|
tests/test_magicbot_sm.py
|
Python
|
bsd-3-clause
| 13,059
|
from mod_python import apache
from mod_python import util
import os.path
import urllib
import logging
debug = True
def handler(req):
"""
This is called by Apache and maps the request to the resource class.
Process of maping:
1. Try import a python script which handles this resource.
The name will be determined by the *path_info* (see mod_python or apache cgi docs for details). while the last path part is treated as the resource ID.
If no script was found, we return HTTP_NOT_FOUND
2. Check if the request method is in the allowedMethodes list of the imported script.
If not, we set the allowed Methodes and return HTTP_METHOD_NOT_ALLOWED
If the imported script does not define a allowedMethodes list, we return HTTP_NOT_FOUND
assuming this is not a script to call, but some other thing.
3. Parse the form data.
#TODO: add support for JSON and XML. Currently only url-form-data is supported.
4. Call METHOD(req, id, args)
req is the request object,
id is the parsed id or None
args is the mp_table object (may be empty)
returns the return code from the function
if the method is not defined, we return HTTP_NOT_IMPLEMENTED
"""
#Set log level here. For Production, disable both lines
logging.basicConfig(level=logging.DEBUG) #Used for debug, lot of data, not recommended for simple error search.
#logging.basicConfig(level=logging.INFO) #Used for error search with config.
# 1.
try:
(mtype, mid) = req.path_info.lstrip('/').split('/',1)
except ValueError, err:
mtype = req.path_info.lstrip('/')
mid = ''
try:
resourceModule = apache.import_module(mtype.strip('/').replace('/','.'), path=os.path.dirname(__file__))
except Exception, err:
if debug: raise
return apache.HTTP_NOT_FOUND
# 2.
try:
allowedMethodes = resourceModule.allowedMethodes
except AttributeError, err:
if debug: raise
return apache.HTTP_HTTP_NOT_FOUND
if not req.method in allowedMethodes:
req.allow_methods(resourceModule.allowedMethodes, 1)
return apache.HTTP_METHOD_NOT_ALLOWED
# 3.
if not 'form' in dir(req):
req.form = util.FieldStorage(req, True)
# 4.
try:
return getattr(resourceModule, req.method)(req, urllib.unquote(mid))
except AttributeError, err:
if debug: raise
return apache.HTTP_NOT_IMPLEMENTED
def writeError(req, error, message):
"""Send a error page to client. Replaces http error page."""
req.status = apache.HTTP_FORBIDDEN
req.content_type = 'text/plain'
req.write(message)
return apache.OK
|
spahan/unixdmoain
|
wwwlib/rest.py
|
Python
|
bsd-3-clause
| 2,782
|
def extractWwwTccedwardsCom(item):
'''
Parser for 'www.tccedwards.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractWwwTccedwardsCom.py
|
Python
|
bsd-3-clause
| 548
|
import unittest
from restkiss.preparers import Preparer, FieldsPreparer
class InstaObj(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class LookupDataTestCase(unittest.TestCase):
def setUp(self):
super(LookupDataTestCase, self).setUp()
self.preparer = FieldsPreparer(fields=None)
self.obj_data = InstaObj(
say='what',
count=453,
moof={
'buried': {
'id': 7,
'data': InstaObj(yes='no')
}
},
parent=None
)
self.dict_data = {
'hello': 'world',
'abc': 123,
'more': {
'things': 'here',
'nested': InstaObj(
awesome=True,
depth=3
),
},
'parent': None,
}
def test_dict_simple(self):
self.assertEqual(self.preparer.lookup_data('hello', self.dict_data), 'world')
self.assertEqual(self.preparer.lookup_data('abc', self.dict_data), 123)
def test_obj_simple(self):
self.assertEqual(self.preparer.lookup_data('say', self.obj_data), 'what')
self.assertEqual(self.preparer.lookup_data('count', self.obj_data), 453)
def test_dict_nested(self):
self.assertEqual(self.preparer.lookup_data('more.things', self.dict_data), 'here')
self.assertEqual(self.preparer.lookup_data('more.nested.depth', self.dict_data), 3)
def test_obj_nested(self):
self.assertEqual(self.preparer.lookup_data('moof.buried.id', self.obj_data), 7)
self.assertEqual(self.preparer.lookup_data('moof.buried.data.yes', self.obj_data), 'no')
def test_dict_miss(self):
with self.assertRaises(KeyError):
self.preparer.lookup_data('another', self.dict_data)
def test_obj_miss(self):
with self.assertRaises(AttributeError):
self.preparer.lookup_data('whee', self.obj_data)
def test_dict_nullable_fk(self):
self.assertEqual(self.preparer.lookup_data('parent.id', self.dict_data), None)
def test_obj_nullable_fk(self):
self.assertEqual(self.preparer.lookup_data('parent.id', self.obj_data), None)
def test_empty_lookup(self):
# We could possibly get here in the recursion.
self.assertEqual(self.preparer.lookup_data('', 'Last value'), 'Last value')
def test_complex_miss(self):
with self.assertRaises(AttributeError):
self.preparer.lookup_data('more.nested.nope', self.dict_data)
|
CraveFood/restkiss
|
tests/test_preparers.py
|
Python
|
bsd-3-clause
| 2,628
|
import sys
from arrowhead.core import Step
from arrowhead.core import ErrorArrow
from arrowhead.core import NormalArrow
from arrowhead.core import ValueArrow
def print_flow_state(flow, active_step_name=None, file=sys.stdout):
"""
Display the state of a given flow.
:param flow:
A Flow, instance or class
:param active_step_name:
(optional) name of the active step
:param file:
(optional) file to print to (defaults to sys.stdout)
This function actually prints() a developer-friendly version of the state
of the entire flow. The output is composed of many lines. The output will
contain all of the internal state of the flow (may print stuff like
passwords if you stored any).
"""
# show flow name
print("[{}]".format(flow.Meta.name).center(40, "~"), file=file)
# show flow global state
needs_header = True
for f_k, f_v in flow.__dict__.items():
# private stuff is private
if f_k.startswith("_"):
continue
# steps are handled later
if (isinstance(f_v, Step) or
(isinstance(f_v, type) and issubclass(f_v, Step))):
continue
# skip Meta
if f_k == 'Meta':
continue
if needs_header:
print("STATE:", file=file)
needs_header = False
print("{indent}{key}: {value!r}".format(
indent=" " * 4, key=f_k, value=f_v
), file=file)
# show a list of all the steps, their state as well as a marker that
# shows where we actively are
print("STEPS:", file=file)
for name in flow.Meta.steps.keys():
step = getattr(flow, name)
flags = []
if step.Meta.accepting:
flags.append('A')
if step.Meta.initial == name:
flags.append('I')
if flags:
rendered_flags = " ({})".format(''.join(flags))
else:
rendered_flags = ""
if step.Meta.name == active_step_name:
indent = " => "
else:
indent = " "
print("{indent}{step}{flags:4}".format(
indent=indent, flags=rendered_flags, step=step.Meta.label
), file=file)
needs_header = False
for s_k, s_v in step.__dict__.items():
if s_k.startswith("_"):
continue
# skip Meta
if s_k == 'Meta':
continue
if needs_header:
print("STATE:", file=file)
needs_header = False
print("{indent}{key}: {value!r}".format(
indent=" " * 8, key=s_k, value=s_v
), file=file)
print("." * 40, file=file)
def print_dot_graph(flow, active_step_name=None, file=sys.stdout):
"""
Print the dot(1) description of a given flow.
:param flow:
A Flow, instance or class
:param active_step_name:
(optional) name of the active step
:param file:
(optional) file to print to (defaults to sys.stdout)
"""
print('digraph {', file=file)
print('\tnode [shape=box, color=black];', file=file)
print('\tedge [arrowsize=0.5];', file=file)
print(file=file)
print('\tsubgraph {', file=file)
print('\t\tnode [shape=plaintext];', file=file)
# NOTE: levels + 2 because 0 and max are
# for _start and _end that are not
# represented anywhere in the flow. We
# just add them for graphviz
print('\t\t{};'.format(
' -> '.join(str(i) for i in range(flow.Meta.levels + 2))
), file=file)
print('\t}', file=file)
print(file=file)
# NOTE: levels + 2 as above
levels = {i: [] for i in range(flow.Meta.levels + 2)}
levels[0].append('_start')
# NOTE: levels + 1 is the last element
levels[flow.Meta.levels + 1].append('_end')
for step in flow.Meta.steps.values():
levels[step.Meta.level].append(step.Meta.name)
for level, steps in sorted(levels.items()):
print('\t{{ rank=same; {}; {}; }}'.format(
level, '; '.join(steps)
), file=file)
print(file=file)
if active_step_name == '_start':
print('\t_start [shape=circle, style=filled,'
' fillcolor=blue, label=""];', file=file)
else:
print('\t_start [shape=circle, style=filled,'
' fillcolor=black, label=""];', file=file)
for step in flow.Meta.steps.values():
if step.Meta.initial:
print('\t_start -> {};'.format(step.Meta.name), file=file)
print(file=file)
for step in flow.Meta.steps.values():
if active_step_name == step.Meta.name:
print('\t{} [shape={}, label="{}", style=filled, fillcolor=blue, fontcolor=white];'.format(
step.Meta.name, "box",
step.Meta.label.replace('"', '\\"')
), file=file)
else:
print('\t{} [shape={}, label="{}"];'.format(
step.Meta.name, "box",
step.Meta.label.replace('"', '\\"')
), file=file)
for arrow in step.Meta.arrows:
if isinstance(arrow, NormalArrow):
print('\t{} -> {};'.format(
step.Meta.name, arrow.target
), file=file)
elif isinstance(arrow, ValueArrow):
print('\t{} -> {} [label="{}", color=green];'.format(
step.Meta.name, arrow.target, arrow.value
), file=file)
elif isinstance(arrow, ErrorArrow):
print('\t{} -> {} [label="{}", color=red];'.format(
step.Meta.name, arrow.target, arrow.error.__name__
), file=file)
print(file=file)
if active_step_name == '_end':
print('\t_end [shape=doublecircle, style=filled, '
'fillcolor=blue, label=""];', file=file)
else:
print('\t_end [shape=doublecircle, style=filled, '
'fillcolor=black, label=""];', file=file)
for step in flow.Meta.steps.values():
if step.Meta.accepting:
print('\t{} -> _end;'.format(step.Meta.name), file=file)
print("}", file=file)
|
zyga/arrowhead
|
arrowhead/inspector.py
|
Python
|
bsd-3-clause
| 6,120
|
# proxy module
from __future__ import absolute_import
from mayavi.core.api import *
|
enthought/etsproxy
|
enthought/mayavi/core/api.py
|
Python
|
bsd-3-clause
| 84
|
__author__ = 'michael'
from django import template
from unobase import models as unobase_models
register = template.Library()
@register.inclusion_tag('blog/widgets/tag_cloud.html')
def tag_cloud(blog_slug):
tags = unobase_models.TagModel.get_distinct_tags('blogentry')
#set(unobase_models.TagModel.get_tags('blogentry'))
return {
'blog_slug': blog_slug,
'tags': tags
}
|
unomena/unobase
|
unobase/blog/templatetags/blog_widgets.py
|
Python
|
bsd-3-clause
| 407
|
"""
Google Code Wiki translator.
Syntax defined by http://code.google.com/p/support/wiki/WikiSyntax
Here called gwiki to make the dialect clear (g for google).
"""
import re, os, commands, sys
from common import default_movie, plain_exercise, insert_code_and_tex, \
fix_ref_section_chapter
from plaintext import plain_quiz
from misc import _abort
from doconce import errwarn
def gwiki_code(filestr, code_blocks, code_block_types,
tex_blocks, format):
filestr = insert_code_and_tex(filestr, code_blocks, tex_blocks, format)
c = re.compile(r'^!bc(.*?)\n', re.MULTILINE)
filestr = c.sub(r'{{{\n', filestr)
filestr = re.sub(r'!ec\n', r'}}}\n', filestr)
c = re.compile(r'^!bt\n', re.MULTILINE)
filestr = c.sub(r'{{{\n', filestr)
filestr = re.sub(r'!et\n', r'}}}\n', filestr)
return filestr
def gwiki_figure(m):
filename = m.group('filename')
link = filename if filename.startswith('http') else None
if not link and not os.path.isfile(filename):
raise IOError('no figure file %s' % filename)
basename = os.path.basename(filename)
stem, ext = os.path.splitext(basename)
root, ext = os.path.splitext(filename)
if link is None:
if not ext in '.png .gif .jpg .jpeg'.split():
# try to convert image file to PNG, using
# convert from ImageMagick:
cmd = 'convert %s png:%s' % (filename, root+'.png')
failure, output = commands.getstatusoutput(cmd)
if failure:
errwarn('\n**** Warning: could not run ' + cmd)
errwarn('Convert %s to PNG format manually' % filename)
_abort()
filename = root + '.png'
caption = m.group('caption')
# keep label if it's there:
caption = re.sub(r'label\{(.+?)\}', '(\g<1>)', caption)
errwarn("""
NOTE: Place %s at some place on the web and edit the
.gwiki page, either manually (seach for 'Figure: ')
or use the doconce script:
doconce gwiki_figsubst.py mydoc.gwiki URL
""" % filename)
result = r"""
---------------------------------------------------------------
Figure: %s
(the URL of the image file %s must be inserted here)
<wiki:comment>
Put the figure file %s on the web (e.g., as part of the
googlecode repository) and substitute the line above with the URL.
</wiki:comment>
---------------------------------------------------------------
""" % (caption, filename, filename)
return result
from common import table_analysis
def gwiki_table(table):
"""Native gwiki table."""
# add 2 chars for column width since we add boldface _..._
# in headlines:
column_width = [c+2 for c in table_analysis(table['rows'])]
# Does column and heading alignment matter?
# Not according to http://code.google.com/p/support/wiki/WikiSyntax#Tables
# but it is possible to use HTML code in gwiki (i.e., html_table)
# (think this was tried without success...)
s = '\n'
for i, row in enumerate(table['rows']):
if row == ['horizontal rule']:
continue
if i == 1 and \
table['rows'][i-1] == ['horizontal rule'] and \
table['rows'][i+1] == ['horizontal rule']:
headline = True
else:
headline = False
empty_row = max([len(column.strip())
for column in row]) == 0
if empty_row:
continue
for column, w in zip(row, column_width):
if headline:
if column:
c = ' %s ' % (('_'+ column + '_').center(w))
else:
c = ''
else:
c = ' %s ' % column.ljust(w)
s += ' || %s ' % c
s += ' ||\n'
s += '\n\n'
return s
def gwiki_author(authors_and_institutions, auth2index,
inst2index, index2inst, auth2email):
authors = []
for author, i, email in authors_and_institutions:
if email is None:
email_text = ''
else:
name, adr = email.split('@')
email_text = ' (%s at %s)' % (name, adr)
authors.append('_%s_%s' % (author, email_text))
if len(authors) == 1:
authors = authors[0]
elif len(authors) == 2:
authors = authors[0] + ' and ' + authors[1]
elif len(authors) > 2:
authors[-1] = 'and ' + authors[-1]
authors = ', '.join(authors)
else:
# no authors:
return ''
text = '\n\nBy ' + authors + '\n\n'
# we skip institutions in gwiki
return text
def wiki_ref_and_label_common(section_label2title, format, filestr):
filestr = fix_ref_section_chapter(filestr, format)
# remove label{...} from output
filestr = re.sub(r'label\{.+?\}', '', filestr) # all the remaining
# anchors in titles do not work...
# replace all references to sections:
for label in section_label2title:
title = section_label2title[label]
filestr = filestr.replace('ref{%s}' % label,
'[#%s]' % title.replace(' ', '_'))
from common import ref2equations
filestr = ref2equations(filestr)
# replace remaining ref{x} as x
filestr = re.sub(r'ref\{(.+?)\}', '\g<1>', filestr)
return filestr
def gwiki_ref_and_label(section_label2title, format, filestr):
return wiki_ref_and_label_common(section_label2title, format, filestr)
def define(FILENAME_EXTENSION,
BLANKLINE,
INLINE_TAGS_SUBST,
CODE,
LIST,
ARGLIST,
TABLE,
EXERCISE,
FIGURE_EXT,
CROSS_REFS,
INDEX_BIB,
TOC,
ENVIRS,
QUIZ,
INTRO,
OUTRO,
filestr):
# all arguments are dicts and accept in-place modifications (extensions)
FILENAME_EXTENSION['gwiki'] = '.gwiki' # output file extension
BLANKLINE['gwiki'] = '\n'
# replacement patterns for substitutions of inline tags
INLINE_TAGS_SUBST['gwiki'] = {
# use verbatim mode for math:
'math': r'\g<begin>`\g<subst>`\g<end>',
'math2': r'\g<begin>`\g<puretext>`\g<end>',
'emphasize': r'\g<begin>_\g<subst>_\g<end>',
'bold': r'\g<begin>*\g<subst>*\g<end>',
'verbatim': r'\g<begin>`\g<subst>`\g<end>',
#'linkURL': r'\g<begin>[\g<url> \g<link>]\g<end>',
'linkURL2': r'[\g<url> \g<link>]',
'linkURL3': r'[\g<url> \g<link>]',
'linkURL2v': r"[\g<url> `\g<link>`]",
'linkURL3v': r"[\g<url> `\g<link>`]",
'plainURL': r'\g<url>',
'colortext': r'<font color="\g<color>">\g<text></font>',
'chapter': r'= \g<subst> =',
'section': r'== \g<subst> ==',
'subsection': r'=== \g<subst> ===',
'subsubsection': r'==== \g<subst> ====\n',
# 'section': r'++++ \g<subst> ++++',
# 'subsection': r'++++++ \g<subst> ++++++',
# 'subsubsection': r'++++++++ \g<subst> ++++++++',
'paragraph': r'*\g<subst>*\g<space>',
#'title': r'#summary \g<subst>\n<wiki:toc max_depth="2" />',
'title': r'#summary \g<subst>\n',
'date': r'===== \g<subst> =====',
'author': gwiki_author, #r'===== \g<name>, \g<institution> =====',
# 'figure': r'<\g<filename>>',
'figure': gwiki_figure,
'movie': default_movie, # will not work for HTML movie player
'comment': '<wiki:comment> %s </wiki:comment>',
'abstract': r'\n*\g<type>.* \g<text>\g<rest>',
'linebreak': r'\g<text>' + '\n',
'non-breaking-space': ' ',
'ampersand2': r' \g<1>&\g<2>',
}
CODE['gwiki'] = gwiki_code
from html import html_table
#TABLE['gwiki'] = html_table
TABLE['gwiki'] = gwiki_table
# native list:
LIST['gwiki'] = {
'itemize': {'begin': '\n', 'item': '*', 'end': '\n\n'},
'enumerate': {'begin': '\n', 'item': '#', 'end': '\n\n'},
'description': {'begin': '\n', 'item': '* %s ', 'end': '\n\n'},
'separator': '\n'}
# (the \n\n for end is a hack because doconce.py avoids writing
# newline at the end of lists until the next paragraph is hit)
#LIST['gwiki'] = LIST['HTML'] # does not work well
# how to typeset description lists for function arguments, return
# values, and module/class variables:
ARGLIST['gwiki'] = {
'parameter': '*argument*',
'keyword': '*keyword argument*',
'return': '*return value(s)*',
'instance variable': '*instance variable*',
'class variable': '*class variable*',
'module variable': '*module variable*',
}
FIGURE_EXT['gwiki'] = {
'search': ('.png', '.gif', '.jpg', '.jpeg'),
'convert': ('.png', '.gif', '.jpg')}
CROSS_REFS['gwiki'] = gwiki_ref_and_label
from plaintext import plain_index_bib
EXERCISE['gwiki'] = plain_exercise
INDEX_BIB['gwiki'] = plain_index_bib
TOC['gwiki'] = lambda s, f: '<wiki: toc max_depth="2" />'
QUIZ['gwiki'] = plain_quiz
# document start:
INTRO['gwiki'] = ''
#INTRO['gwiki'] = '#summary YourOneLineSummary\n<wiki:toc max_depth="1" />\n'
|
dragly/doconce
|
lib/doconce/gwiki.py
|
Python
|
bsd-3-clause
| 9,341
|
import os
import shutil
import addSubproject
import option
import utility
import grapeGit as git
import grapeConfig
import grapeMenu
import checkout
# update your custom sparse checkout view
class UpdateView(option.Option):
"""
grape uv - Updates your active submodules and ensures you are on a consistent branch throughout your project.
Usage: grape-uv [-f ] [--checkSubprojects] [-b] [--skipSubmodules] [--allSubmodules]
[--skipNestedSubprojects] [--allNestedSubprojects] [--sync=<bool>]
[--add=<addedSubmoduleOrSubproject>...] [--rm=<removedSubmoduleOrSubproject>...]
Options:
-f Force removal of subprojects currently in your view that are taken out of the view as a
result to this call to uv.
--checkSubprojects Checks for branch model consistency across your submodules and subprojects, but does
not go through the 'which submodules do you want' script.
-b Automatically creates subproject branches that should be there according to your branching
model.
--allSubmodules Automatically add all submodules to your workspace.
--allNestedSubprojects Automatically add all nested subprojects to your workspace.
--sync=<bool> Take extra steps to ensure the branch you're on is up to date with origin,
either by pushing or pulling the remote tracking branch.
This will also checkout the public branch in a headless state prior to offering to create
a new branch (in repositories where the current branch does not exist).
[default: .grapeconfig.post-checkout.syncWithOrigin]
--add=<project> Submodule or subproject to add to the workspace. Can be defined multiple times.
--remove=<project> Submodule or subproject to remove from the workspace. Can be defined multiple times.
"""
def __init__(self):
super(UpdateView, self).__init__()
self._key = "uv"
self._section = "Workspace"
self._pushBranch = False
self._skipPush = False
def description(self):
return "Update the view of your current working tree"
@staticmethod
def defineActiveSubmodules(projectType="submodule"):
"""
Queries the user for the submodules (projectType == "submodule") or nested subprojects
(projectType == "nested subproject") they would like to activate.
"""
if projectType == "submodule":
allSubprojects = git.getAllSubmodules()
activeSubprojects = git.getActiveSubmodules()
if projectType == "nested subproject":
config = grapeConfig.grapeConfig()
allSubprojectNames = config.getAllNestedSubprojects()
allSubprojects = []
for project in allSubprojectNames:
allSubprojects.append(config.get("nested-%s" % project, "prefix"))
activeSubprojects = grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes()
toplevelDirs = {}
toplevelActiveDirs = {}
toplevelSubs = []
for sub in allSubprojects:
# we are taking advantage of the fact that branchPrefixes are the same as directory prefixes for local
# top-level dirs.
prefix = git.branchPrefix(sub)
if sub != prefix:
toplevelDirs[prefix] = []
toplevelActiveDirs[prefix] = []
for sub in allSubprojects:
prefix = git.branchPrefix(sub)
if sub != prefix:
toplevelDirs[prefix].append(sub)
else:
toplevelSubs.append(sub)
for sub in activeSubprojects:
prefix = git.branchPrefix(sub)
if sub != prefix:
toplevelActiveDirs[prefix].append(sub)
included = {}
for directory, subprojects in toplevelDirs.items():
activeDir = toplevelActiveDirs[directory]
if len(activeDir) == 0:
defaultValue = "none"
elif set(activeDir) == set(subprojects):
defaultValue = "all"
else:
defaultValue = "some"
opt = utility.userInput("Would you like all, some, or none of the %ss in %s?" % (projectType,directory),
default=defaultValue)
if opt.lower()[0] == "a":
for subproject in subprojects:
included[subproject] = True
if opt.lower()[0] == "n":
for subproject in subprojects:
included[subproject] = False
if opt.lower()[0] == "s":
for subproject in subprojects:
included[subproject] = utility.userInput("Would you like %s %s? [y/n]" % (projectType, subproject),
'y' if (subproject in activeSubprojects) else 'n')
for subproject in toplevelSubs:
included[subproject] = utility.userInput("Would you like %s %s? [y/n]" % (projectType, subproject),
'y' if (subproject in activeSubprojects) else 'n')
return included
@staticmethod
def defineActiveNestedSubprojects():
"""
Queries the user for the nested subprojects they would like to activate.
"""
return UpdateView.defineActiveSubmodules(projectType="nested subproject")
def execute(self, args):
sync = args["--sync"].lower().strip()
sync = sync == "true" or sync == "yes"
args["--sync"] = sync
config = grapeConfig.grapeConfig()
origwd = os.getcwd()
wsDir = utility.workspaceDir()
os.chdir(wsDir)
base = git.baseDir()
if base == "":
return False
hasSubmodules = len(git.getAllSubmodules()) > 0 and not args["--skipSubmodules"]
includedSubmodules = {}
includedNestedSubprojectPrefixes = {}
allSubmodules = git.getAllSubmodules()
allNestedSubprojects = config.getAllNestedSubprojects()
addedSubmodules = []
addedNestedSubprojects = []
addedProjects = args["--add"]
notFound = []
for proj in addedProjects:
if proj in allSubmodules:
addedSubmodules.append(proj)
elif proj in allNestedSubprojects:
addedNestedSubprojects.append(proj)
else:
notFound.append(proj)
rmSubmodules = []
rmNestedSubprojects = []
rmProjects = args["--rm"]
for proj in rmProjects:
if proj in allSubmodules:
rmSubmodules.append(proj)
elif proj in allNestedSubprojects:
rmNestedSubprojects.append(proj)
else:
notFound.append(proj)
if notFound:
utility.printMsg("\"%s\" not found in submodules %s \nor\n nested subprojects %s" % (",".join(notFound),",".join(allSubmodules),",".join(allNestedSubprojects)))
return False
if not args["--checkSubprojects"]:
# get submodules to update
if hasSubmodules:
if args["--allSubmodules"]:
includedSubmodules = {sub:True for sub in allSubmodules}
elif args["--add"] or args["--rm"]:
includedSubmodules = {sub:True for sub in git.getActiveSubmodules()}
includedSubmodules.update({sub:True for sub in addedSubmodules})
includedSubmodules.update({sub:False for sub in rmSubmodules})
else:
includedSubmodules = self.defineActiveSubmodules()
# get subprojects to update
if not args["--skipNestedSubprojects"]:
nestedPrefixLookup = lambda x : config.get("nested-%s" % x, "prefix")
if args["--allNestedSubprojects"]:
includedNestedSubprojectPrefixes = {nestedPrefixLookup(sub):True for sub in allNestedSubprojects}
elif args["--add"] or args["--rm"]:
includedNestedSubprojectPrefixes = {sub:True for sub in grapeConfig.GrapeConfigParser.getAllActiveNestedSubprojectPrefixes()}
includedNestedSubprojectPrefixes.update({nestedPrefixLookup(sub):True for sub in addedNestedSubprojects})
includedNestedSubprojectPrefixes.update({nestedPrefixLookup(sub):False for sub in rmNestedSubprojects})
else:
includedNestedSubprojectPrefixes = self.defineActiveNestedSubprojects()
if hasSubmodules:
initStr = ""
deinitStr = ""
rmCachedStr = ""
resetStr = ""
for submodule, nowActive in includedSubmodules.items():
if nowActive:
initStr += ' %s' % submodule
else:
deinitStr += ' %s' % submodule
rmCachedStr += ' %s' % submodule
resetStr += ' %s' % submodule
if args["-f"] and deinitStr:
deinitStr = "-f"+deinitStr
utility.printMsg("Configuring submodules...")
utility.printMsg("Initializing submodules...")
git.submodule("init %s" % initStr.strip())
if deinitStr:
utility.printMsg("Deiniting submodules that were not requested... (%s)" % deinitStr)
done = False
while not done:
try:
git.submodule("deinit %s" % deinitStr.strip())
done = True
except git.GrapeGitError as e:
if "the following file has local modifications" in e.gitOutput:
print e.gitOutput
utility.printMsg("A submodule that you wanted to remove has local modifications. "
"Use grape uv -f to force removal.")
return False
elif "use 'rm -rf' if you really want to remove it including all of its history" in e.gitOutput:
if not args["-f"]:
raise e
# it is safe to move the .git of the submodule to the .git/modules area of the workspace...
module = None
for l in e.gitOutput.split('\n'):
if "Submodule work tree" in l and "contains a .git directory" in l:
module = l.split("'")[1]
break
if module:
src = os.path.join(module, ".git")
dest = os.path.join(wsDir, ".git", "modules", module)
utility.printMsg("Moving %s to %s"%(src, dest))
shutil.move(src, dest )
else:
raise e
else:
raise e
git.rm("--cached %s" % rmCachedStr)
git.reset(" %s" % resetStr)
if initStr:
utility.printMsg("Updating active submodules...(%s)" % initStr)
git.submodule("update")
# handle nested subprojects
if not args["--skipNestedSubprojects"]:
reverseLookupByPrefix = {nestedPrefixLookup(sub) : sub for sub in allNestedSubprojects}
userConfig = grapeConfig.grapeUserConfig()
updatedActiveList = []
for subproject, nowActive in includedNestedSubprojectPrefixes.items():
subprojectName = reverseLookupByPrefix[subproject]
section = "nested-%s" % reverseLookupByPrefix[subproject]
userConfig.ensureSection(section)
previouslyActive = userConfig.getboolean(section, "active")
previouslyActive = previouslyActive and os.path.exists(os.path.join(base, subproject, ".git"))
userConfig.set(section, "active", "True" if previouslyActive else "False")
if nowActive and previouslyActive:
updatedActiveList.append(subprojectName)
if nowActive and not previouslyActive:
utility.printMsg("Activating Nested Subproject %s" % subproject)
if not addSubproject.AddSubproject.activateNestedSubproject(subprojectName, userConfig):
utility.printMsg("Can't activate %s. Exiting..." % subprojectName)
return False
updatedActiveList.append(subprojectName)
if not nowActive and not previouslyActive:
pass
if not nowActive and previouslyActive:
#remove the subproject
subprojectdir = os.path.join(base, utility.makePathPortable(subproject))
proceed = args["-f"] or \
utility.userInput("About to delete all contents in %s. Any uncommitted changes, committed changes "
"that have not been pushed, or ignored files will be lost. Proceed?" %
subproject, 'n')
if proceed:
shutil.rmtree(subprojectdir)
userConfig.setActiveNestedSubprojects(updatedActiveList)
grapeConfig.writeConfig(userConfig, os.path.join(utility.workspaceDir(), ".git", ".grapeuserconfig"))
checkoutArgs = "-b" if args["-b"] else ""
safeSwitchWorkspaceToBranch( git.currentBranch(), checkoutArgs, sync)
os.chdir(origwd)
return True
@staticmethod
def getDesiredSubmoduleBranch(config):
publicBranches = config.getPublicBranchList()
currentBranch = git.currentBranch()
if currentBranch in publicBranches:
desiredSubmoduleBranch = config.getMapping("workspace", "submodulepublicmappings")[currentBranch]
else:
desiredSubmoduleBranch = currentBranch
return desiredSubmoduleBranch
def setDefaultConfig(self, config):
config.ensureSection("workspace")
config.set("workspace", "submodulepublicmappings", "?:master")
def ensureLocalUpToDateWithRemote(repo = '', branch = 'master'):
utility.printMsg( "Ensuring local branch %s in %s is up to date with origin" % (branch, repo))
with utility.cd(repo):
# attempt to fetch the requested branch
try:
git.fetch("origin", "%s:%s" % (branch, branch))
except:
# the branch may not exist, but this is ok
pass
if git.currentBranch() == branch:
return
if not git.hasBranch(branch):
# switch to corresponding public branch if the branch does not exist
public = grapeConfig.workspaceConfig().getPublicBranchFor(branch)
# figure out if this is a submodule
relpath = os.path.relpath(repo, utility.workspaceDir())
relpath = relpath.replace('\\',"/")
with utility.cd(utility.workspaceDir()):
# if this is a submodule, get the appropriate public mapping
if relpath in git.getAllSubmoduleURLMap().keys():
public = grapeConfig.workspaceConfig().getMapping("workspace", "submodulepublicmappings")[public]
utility.printMsg("Branch %s does not exist in %s, switching to %s and detaching" % (branch, repo, public))
git.checkout(public)
git.pull("origin %s" % (public))
git.checkout("--detach HEAD")
def cleanupPush(repo='', branch='', args='none'):
with utility.cd(repo):
utility.printMsg("Attempting push of local %s in %s" % (branch, repo))
git.push("origin %s" % branch)
def handleCleanupPushMRE(mre):
for e, repo, branch in zip(mre.exceptions(), mre.repos(), mre.branches()):
try:
raise e
except git.GrapeGitError as e2:
utility.printMsg("Local and remote versions of %s may have diverged in %s" % (branch, repo))
utility.printMsg("%s" % e2.gitOutput)
utility.printMsg("Use grape pull to merge the remote version into the local version.")
def handleEnsureLocalUpToDateMRE(mre):
_pushBranch = False
_skipPush = False
cleanupPushArgs = []
for e1, repo, branch in zip(mre.exceptions(), mre.repos(), mre.branches()):
try:
raise e1
except git.GrapeGitError as e:
if ("[rejected]" in e.gitOutput and "(non-fast-forward)" in e.gitOutput) or "Couldn't find remote ref" in e.gitOutput:
if "Couldn't find remote ref" in e.gitOutput:
if not _pushBranch:
utility.printMsg("No remote reference to %s in %s's origin. You may want to push this branch." % (branch, repo))
else:
utility.printMsg("Fetch of %s rejected as non-fast-forward in repo %s" % (branch, repo))
pushBranch = _pushBranch
if _skipPush:
pushBranch = False
elif not pushBranch:
pushBranch = utility.userInput("Would you like to push your local branch? \n"
"(select 'a' to say yes for (a)ll subprojects, 's' to (s)kip push for all subprojects)"
"\n(y,n,a,s)", 'y')
if str(pushBranch).lower()[0] == 'a':
_pushBranch = True
pushBranch = True
if str(pushBranch).lower()[0] == 's':
_skipPush = True
pushBranch = False
if pushBranch:
cleanupPushArgs.append((repo, branch, None))
else:
utility.printMsg("Skipping push of local %s in %s" % (branch, repo))
elif e.commError:
utility.printMsg("Could not update %s from origin due to a connectivity issue. Checking out most recent\n"
"local version. " % branch)
else:
raise(e)
# do another MRC launch to do any follow up pushes that were requested.
utility.MultiRepoCommandLauncher(cleanupPush, listOfRepoBranchArgTuples=cleanupPushArgs).launchFromWorkspaceDir(handleMRE=handleCleanupPushMRE)
return
def safeSwitchWorkspaceToBranch(branch, checkoutArgs, sync):
# Ensure local branches that you are about to check out are up to date with the remote
if sync:
launcher = utility.MultiRepoCommandLauncher(ensureLocalUpToDateWithRemote, branch = branch, globalArgs=[checkoutArgs])
launcher.launchFromWorkspaceDir(handleMRE=handleEnsureLocalUpToDateMRE)
# Do a checkout
# Pass False instead of sync since if sync is True ensureLocalUpToDateWithRemote will have already performed the fetch
launcher = utility.MultiRepoCommandLauncher(checkout.handledCheckout, branch = branch, globalArgs = [checkoutArgs, False])
launcher.launchFromWorkspaceDir(handleMRE=checkout.handleCheckoutMRE)
return
|
robinson96/GRAPE
|
vine/updateView.py
|
Python
|
bsd-3-clause
| 20,286
|
# -*- coding: utf-8 -*-
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import sy, sy.path
setup(
name='sy',
version=sy.__version__,
url='http://sy.afajl.com',
license='BSD',
author='Paul Diaconescu',
author_email='p@afajl.com',
description='Simple tools for system administration tasks',
long_description=sy.path.slurp('README.rst'),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: System :: Systems Administration',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=['sy', 'sy.net', 'sy.net.intf'],
package_data={
'sy': ['lib/*']
},
platforms='Python 2.4 and later on Unix',
install_requires=['logbook>=0.3', 'ipaddr>=2.0.0']
)
|
afajl/sy
|
setup.py
|
Python
|
bsd-3-clause
| 1,024
|
"""
WSGI config for goska project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "goska.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
jasminka/goska
|
goska/wsgi.py
|
Python
|
bsd-3-clause
| 385
|
from enable.api import ColorTrait
from .base_patch_stylus import BasePatchStylus
class RectangleStylus(BasePatchStylus):
""" A Flyweight object for drawing filled rectangles.
"""
edge_color = ColorTrait('black')
fill_color = ColorTrait('yellow')
def draw(self, gc, rect):
with gc:
gc.set_stroke_color(self.edge_color_)
if self.fill_color is not 'none':
gc.set_fill_color(self.fill_color_)
gc.fill_path()
gc.draw_rect([int(a) for a in rect])
|
tonysyu/deli
|
deli/stylus/rect_stylus.py
|
Python
|
bsd-3-clause
| 543
|
'''
Created on 14.07.2015
@author: Aaron Klein
'''
import numpy as np
from robo.task.base_task import BaseTask
class Hartmann6(BaseTask):
def __init__(self):
X_lower = np.array([0, 0, 0, 0, 0, 0])
X_upper = np.array([1, 1, 1, 1, 1, 1])
opt = np.array([[0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573]])
fopt = np.array([[-3.32237]])
super(Hartmann6, self).__init__(X_lower, X_upper, opt, fopt)
self.alpha = [1.00, 1.20, 3.00, 3.20]
self.A = np.array([[10.00, 3.00, 17.00, 3.50, 1.70, 8.00],
[0.05, 10.00, 17.00, 0.10, 8.00, 14.00],
[3.00, 3.50, 1.70, 10.00, 17.00, 8.00],
[17.00, 8.00, 0.05, 10.00, 0.10, 14.00]])
self.P = 0.0001 * np.array([[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381]])
def objective_function(self, x):
"""6d Hartmann test function
input bounds: 0 <= xi <= 1, i = 1..6
global optimum: (0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573),
min function value = -3.32237
"""
external_sum = 0
for i in range(4):
internal_sum = 0
for j in range(6):
internal_sum = internal_sum + self.A[i, j] * (x[:, j] - self.P[i, j]) ** 2
external_sum = external_sum + self.alpha[i] * np.exp(-internal_sum)
return -external_sum[:, np.newaxis]
def objective_function_test(self, x):
return self.objective_function(x)
|
aaronkl/RoBO
|
robo/task/synthetic_functions/hartmann6.py
|
Python
|
bsd-3-clause
| 1,747
|
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"%_!@u)(^e9boux@ji@sa-e*bv&uyb1tn8u%prm%0(#0!jx@@i*"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
'traceability',
'crispy_forms',
)
# Apps specific for this project go here.
LOCAL_APPS = (
)
CRISPY_TEMPLATE_PACK ='bootstrap3'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
########## SOUTH CONFIGURATION
# See: http://south.readthedocs.org/en/latest/installation.html#configuring-your-django-installation
INSTALLED_APPS += (
# Database migration helpers:
'south',
)
# Don't need to use South when setting up a test database.
SOUTH_TESTS_MIGRATE = False
########## END SOUTH CONFIGURATION
|
vandorjw/django-traceability
|
demo/demo/settings/base.py
|
Python
|
bsd-3-clause
| 7,577
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Bridge for using cclib data in PyQuante (http://pyquante.sourceforge.net)."""
from __future__ import print_function
import sys
try:
from PyQuante.Molecule import Molecule
except ImportError:
# Fail silently for now.
pass
def makepyquante(atomcoords, atomnos, charge=0, mult=1):
"""Create a PyQuante Molecule.
>>> import numpy
>>> from PyQuante.hartree_fock import hf
>>> atomnos = numpy.array([1,8,1],"i")
>>> a = numpy.array([[-1,1,0],[0,0,0],[1,1,0]],"f")
>>> pyqmol = makepyquante(a,atomnos)
>>> en,orbe,orbs = hf(pyqmol)
>>> print int(en * 10) / 10. # Should be around -73.8
-73.8
"""
return Molecule("notitle", list(zip(atomnos, atomcoords)), units="Angstrom",
charge=charge, multiplicity=mult)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Schamnad/cclib
|
src/cclib/bridge/cclib2pyquante.py
|
Python
|
bsd-3-clause
| 1,092
|
"""
Documents metadata tools.
"""
from __future__ import absolute_import, print_function, unicode_literals
import yaml
DELIMITER = '---'
def stripped(stream):
"""
Read stream to strip YAML header, returns stripped data.
"""
data = []
meta_started = False
meta_ended = False
# TODO: cleaner implementation?
for next_line in stream:
if meta_ended: # done with meta, collecting data
data.append(next_line)
else:
if next_line.startswith(DELIMITER):
if not meta_started: # meta start found!
meta_started = True
else: # meta end found!
meta_ended = True
else: # meta not found at all
if not meta_started:
data.append(next_line) # don't lose first line
meta_ended = True
# TODO: Avoid double memory use? Oh yes, I'm aware
# of premature optimization :)
return ''.join(data)
def extract(stream, **defaults):
"""
Read stream and extract YAML header.
"""
meta = Meta(**defaults)
meta.load(stream)
return meta
class Meta(dict):
"""
Metadata for Markdown files loaded from YAML headers.
"""
def load(self, stream):
meta_data = []
meta_opened = False
for next_line in stream:
if next_line.startswith(DELIMITER):
if meta_opened:
break # all meta is read, stop reading
else:
meta_opened = True # meta started
continue
elif not meta_opened:
break # no meta found
else:
meta_data.append(next_line)
if meta_data:
self.update(yaml.full_load(''.join(meta_data)))
|
05bit/docta
|
docta/utils/meta.py
|
Python
|
bsd-3-clause
| 1,835
|
from canvas.exceptions import ServiceError, ValidationError
from canvas.economy import InvalidPurchase
from drawquest import knobs
from drawquest.apps.palettes.models import get_palette_by_name, all_palettes
from drawquest.signals import balance_changed
def balance(user):
return int(user.kv.stickers.currency.get() or 0)
def _adjust_balance(user, amount):
if amount >= 0:
user.kv.stickers.currency.increment(amount)
else:
result = user.kv.stickers.currency.increment_ifsufficient(amount)
if not result['success']:
raise InvalidPurchase("Insufficient balance.")
balance_changed.send(None, user=user)
publish_balance(user)
def publish_balance(user):
user.redis.coin_channel.publish({'balance': balance(user)})
def credit(user, amount):
_adjust_balance(user, amount)
def debit(user, amount):
_adjust_balance(user, -amount)
def credit_first_quest(user):
credit(user, knobs.REWARDS['first_quest'])
def credit_quest_of_the_day_completion(user):
credit(user, knobs.REWARDS['quest_of_the_day'])
def credit_archived_quest_completion(user):
credit(user, knobs.REWARDS['archived_quest'])
def credit_personal_share(user):
credit(user, knobs.REWARDS['personal_share'])
def credit_streak(user, streak):
credit(user, knobs.REWARDS['streak_{}'.format(streak)])
def credit_star(user):
user.kv.stickers_received.increment(1)
credit(user, knobs.REWARDS['star'])
def purchase_palette(user, palette):
if isinstance(palette, basestring):
palette = get_palette_by_name(palette_name)
if palette in user.redis.palettes:
raise InvalidPurchase("You've already bought this palette.")
debit(user, palette.cost)
user.redis.palettes.unlock(palette)
|
canvasnetworks/canvas
|
website/drawquest/economy.py
|
Python
|
bsd-3-clause
| 1,765
|
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import re
import operator
import digits
from digits import utils
from digits.utils import subclass, override
from digits.task import Task
# NOTE: Increment this everytime the pickled version changes
PICKLE_VERSION = 3
@subclass
class CreateDbTask(Task):
"""Creates a database"""
def __init__(self, input_file, db_name, backend, image_dims, **kwargs):
"""
Arguments:
input_file -- read images and labels from this file
db_name -- save database to this location
backend -- database backend (lmdb/hdf5)
image_dims -- (height, width, channels)
Keyword Arguments:
image_folder -- prepend image paths with this folder
shuffle -- shuffle images before saving
resize_mode -- used in utils.image.resize_image()
encoding -- 'none', 'png' or 'jpg'
compression -- 'none' or 'gzip'
mean_file -- save mean file to this location
labels_file -- used to print category distribution
"""
# Take keyword arguments out of kwargs
self.image_folder = kwargs.pop('image_folder', None)
self.shuffle = kwargs.pop('shuffle', True)
self.resize_mode = kwargs.pop('resize_mode' , None)
self.encoding = kwargs.pop('encoding', None)
self.compression = kwargs.pop('compression', None)
self.mean_file = kwargs.pop('mean_file', None)
self.labels_file = kwargs.pop('labels_file', None)
super(CreateDbTask, self).__init__(**kwargs)
self.pickver_task_createdb = PICKLE_VERSION
self.input_file = input_file
self.db_name = db_name
self.backend = backend
if backend == 'hdf5':
# the list of hdf5 files is stored in a textfile
self.textfile = os.path.join(self.db_name, 'list.txt')
self.image_dims = image_dims
if image_dims[2] == 3:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = None
self.entries_count = None
self.distribution = None
self.create_db_log_file = "create_%s.log" % db_name
def __getstate__(self):
d = super(CreateDbTask, self).__getstate__()
if 'create_db_log' in d:
# don't save file handle
del d['create_db_log']
if 'labels' in d:
del d['labels']
return d
def __setstate__(self, state):
super(CreateDbTask, self).__setstate__(state)
if self.pickver_task_createdb <= 1:
if self.image_dims[2] == 1:
self.image_channel_order = None
elif self.encode:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = 'RGB'
if self.pickver_task_createdb <= 2:
if hasattr(self, 'encode'):
if self.encode:
self.encoding = 'jpg'
else:
self.encoding = 'none'
delattr(self, 'encode')
else:
self.encoding = 'none'
self.pickver_task_createdb = PICKLE_VERSION
if not hasattr(self, 'backend') or self.backend is None:
self.backend = 'lmdb'
if not hasattr(self, 'compression') or self.compression is None:
self.compression = 'none'
@override
def name(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'Create DB (train)'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'Create DB (val)'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'Create DB (test)'
else:
return 'Create DB (%s)' % self.db_name
@override
def before_run(self):
super(CreateDbTask, self).before_run()
self.create_db_log = open(self.path(self.create_db_log_file), 'a')
@override
def html_id(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'task-create_db-train'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'task-create_db-val'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'task-create_db-test'
else:
return super(CreateDbTask, self).html_id()
@override
def offer_resources(self, resources):
key = 'create_db_task_pool'
if key not in resources:
return None
for resource in resources[key]:
if resource.remaining() >= 1:
return {key: [(resource.identifier, 1)]}
return None
@override
def task_arguments(self, resources, env):
args = [sys.executable, os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(digits.__file__))),
'tools', 'create_db.py'),
self.path(self.input_file),
self.path(self.db_name),
self.image_dims[1],
self.image_dims[0],
'--backend=%s' % self.backend,
'--channels=%s' % self.image_dims[2],
'--resize_mode=%s' % self.resize_mode,
]
if self.mean_file is not None:
args.append('--mean_file=%s' % self.path(self.mean_file))
# Add a visual mean_file
args.append('--mean_file=%s' % self.path(utils.constants.MEAN_FILE_IMAGE))
if self.image_folder:
args.append('--image_folder=%s' % self.image_folder)
if self.shuffle:
args.append('--shuffle')
if self.encoding and self.encoding != 'none':
args.append('--encoding=%s' % self.encoding)
if self.compression and self.compression != 'none':
args.append('--compression=%s' % self.compression)
if self.backend == 'hdf5':
args.append('--hdf5_dset_limit=%d' % 2**31)
return args
@override
def process_output(self, line):
from digits.webapp import socketio
self.create_db_log.write('%s\n' % line)
self.create_db_log.flush()
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Processed (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1))/int(match.group(2))
self.emit_progress_update()
return True
# distribution
match = re.match(r'Category (\d+) has (\d+)', message)
if match and self.labels_file is not None:
if not hasattr(self, 'distribution') or self.distribution is None:
self.distribution = {}
self.distribution[match.group(1)] = int(match.group(2))
data = self.distribution_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'distribution',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
return True
# result
match = re.match(r'(\d+) images written to database', message)
if match:
self.entries_count = int(match.group(1))
self.logger.debug(message)
return True
if level == 'warning':
self.logger.warning('%s: %s' % (self.name(), message))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
@override
def after_run(self):
from digits.webapp import socketio
super(CreateDbTask, self).after_run()
self.create_db_log.close()
if self.backend == 'lmdb':
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'exploration-ready',
},
namespace='/jobs',
room=self.job_id,
)
elif self.backend == 'hdf5':
# add more path information to the list of h5 files
lines = None
with open(self.path(self.textfile)) as infile:
lines = infile.readlines()
with open(self.path(self.textfile), 'w') as outfile:
for line in lines:
# XXX this works because the model job will be in an adjacent folder
outfile.write('%s\n' % os.path.join(
'..', self.job_id, self.db_name, line.strip()))
if self.mean_file:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'mean-image',
# XXX Can't use url_for here because we don't have a request context
'data': '/files/' + self.path('mean.jpg', relative=True),
},
namespace='/jobs',
room=self.job_id,
)
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self, 'labels_file'), 'labels_file not set'
assert self.labels_file, 'labels_file not set'
assert os.path.exists(self.path(self.labels_file)), 'labels_file does not exist'
labels = []
with open(self.path(self.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def distribution_data(self):
"""
Returns distribution data for a C3.js graph
"""
if self.distribution is None:
return None
try:
labels = self.get_labels()
except AssertionError:
return None
if len(self.distribution.keys()) != len(labels):
return None
values = ['Count']
titles = []
for key, value in sorted(
self.distribution.items(),
key=operator.itemgetter(1),
reverse=True):
values.append(value)
titles.append(labels[int(key)])
return {
'data': {
'columns': [values],
'type': 'bar'
},
'axis': {
'x': {
'type': 'category',
'categories': titles,
}
},
}
|
batra-mlp-lab/DIGITS
|
digits/dataset/tasks/create_db.py
|
Python
|
bsd-3-clause
| 11,328
|
from django.shortcuts import get_object_or_404
from apps.canvas_auth.models import User
from canvas.api_decorators import api_decorator
from canvas.metrics import Metrics
from canvas.models import Comment
from canvas.view_guards import require_user
urlpatterns = []
api = api_decorator(urlpatterns)
@api('hide_comment')
@require_user
def hide_comment(request, comment_id):
comment = get_object_or_404(Comment, pk=comment_id)
request.user.redis.hidden_comments.hide_comment(comment)
Metrics.downvote_action.record(request, comment=comment.id)
Metrics.hide_comment.record(request)
@api('hide_thread')
@require_user
def hide_thread(request, comment_id):
"""
`comment_id` may be the thread OP or any reply in it.
Also downvotes.
"""
comment = get_object_or_404(Comment, pk=comment_id)
request.user.redis.hidden_threads.hide_thread(comment)
sticker_count = comment.downvote(request.user, ip=request.META['REMOTE_ADDR'])
Metrics.downvote_action.record(request, count=sticker_count, comment=comment.id)
Metrics.hide_thread.record(request)
|
canvasnetworks/canvas
|
website/apps/comment_hiding/api.py
|
Python
|
bsd-3-clause
| 1,093
|
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Show how to access glyph outline description.
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
face = Face(b'./Vera.ttf')
face.set_char_size( 48*64 )
face.load_char('S')
slot = face.glyph
outline = slot.outline
points = numpy.array(outline.points, dtype=[('x',float), ('y',float)])
x, y = points['x'], points['y']
figure = plt.figure(figsize=(8,10))
axis = figure.add_subplot(111)
#axis.scatter(points['x'], points['y'], alpha=.25)
start, end = 0, 0
VERTS, CODES = [], []
# Iterate over each contour
for i in range(len(outline.contours)):
end = outline.contours[i]
points = outline.points[start:end+1]
points.append(points[0])
tags = outline.tags[start:end+1]
tags.append(tags[0])
segments = [ [points[0],], ]
for j in range(1, len(points) ):
segments[-1].append(points[j])
if tags[j] & (1 << 0) and j < (len(points)-1):
segments.append( [points[j],] )
verts = [points[0], ]
codes = [Path.MOVETO,]
for segment in segments:
if len(segment) == 2:
verts.extend(segment[1:])
codes.extend([Path.LINETO])
elif len(segment) == 3:
verts.extend(segment[1:])
codes.extend([Path.CURVE3, Path.CURVE3])
else:
verts.append(segment[1])
codes.append(Path.CURVE3)
for i in range(1,len(segment)-2):
A,B = segment[i], segment[i+1]
C = ((A[0]+B[0])/2.0, (A[1]+B[1])/2.0)
verts.extend([ C, B ])
codes.extend([ Path.CURVE3, Path.CURVE3])
verts.append(segment[-1])
codes.append(Path.CURVE3)
VERTS.extend(verts)
CODES.extend(codes)
start = end+1
# Draw glyph lines
path = Path(VERTS, CODES)
glyph = patches.PathPatch(path, facecolor='.75', lw=1)
# Draw "control" lines
for i, code in enumerate(CODES):
if code == Path.CURVE3:
CODES[i] = Path.LINETO
path = Path(VERTS, CODES)
patch = patches.PathPatch(path, ec='.5', fill=False, ls='dashed', lw=1 )
axis.add_patch(patch)
axis.add_patch(glyph)
axis.set_xlim(x.min()-100, x.max()+100)
plt.xticks([])
axis.set_ylim(y.min()-100, y.max()+100)
plt.yticks([])
plt.show()
|
nlhepler/freetype-py3
|
examples/glyph-vector.py
|
Python
|
bsd-3-clause
| 2,878
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
from django.core.management import call_command
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Completely rebuilds the search index by removing the old data and then updating."
def add_arguments(self, parser):
parser.add_argument(
'--noinput', action='store_false', dest='interactive', default=True,
help='If provided, no prompts will be issued to the user and the data will be wiped out.'
)
parser.add_argument(
'-u', '--using', action='append', default=[],
help='Update only the named backend (can be used multiple times). '
'By default all backends will be updated.'
)
parser.add_argument(
'-k', '--workers', default=0, type=int,
help='Allows for the use multiple workers to parallelize indexing. Requires multiprocessing.'
)
parser.add_argument(
'--nocommit', action='store_false', dest='commit',
default=True, help='Will pass commit=False to the backend.'
)
def handle(self, **options):
call_command('clear_index', **options)
call_command('update_index', **options)
|
antonyr/django-haystack
|
haystack/management/commands/rebuild_index.py
|
Python
|
bsd-3-clause
| 1,324
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('latest_tweets', '0010_photo_unique'),
]
operations = [
migrations.AddField(
model_name='photo',
name='image_file',
field=models.ImageField(blank=True, upload_to='latest_tweets/photo'),
),
]
|
blancltd/django-latest-tweets
|
latest_tweets/migrations/0011_photo_image_file.py
|
Python
|
bsd-3-clause
| 434
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant des fonctions utiles à la manipulation de masques."""
def chaine_vers_liste(chaine):
"""Convertit une chaîne en sa liste de caractères"""
return list(chaine)
def liste_vers_chaine(liste):
"""Convertit une lsite de caractères en une chaîne"""
return "".join(liste)
def lstrip(liste):
"""Retire les espaces à gauche de la chaîne-liste"""
while liste and liste[0] == " ":
del liste[0]
return liste
|
stormi/tsunami
|
src/primaires/interpreteur/masque/fonctions.py
|
Python
|
bsd-3-clause
| 2,032
|