repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
OpenAgInitiative/gro-api
|
gro_api/actuators/migrations/0008_auto_20150812_1550.py
|
1
|
1250
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('actuators', '0007_auto_20150812_1520'),
]
operations = [
migrations.AlterField(
model_name='actuatoreffect',
name='control_profile',
field=models.ForeignKey(to='actuators.ControlProfile', related_name='effects'),
),
migrations.AlterField(
model_name='actuatoreffect',
name='effect_on_active',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='actuatoreffect',
name='property',
field=models.ForeignKey(to='resources.ResourceProperty', related_name='+'),
),
migrations.AlterField(
model_name='actuatoreffect',
name='threshold',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='controlprofile',
name='properties',
field=models.ManyToManyField(through='actuators.ActuatorEffect', editable=False, to='resources.ResourceProperty', related_name='+'),
),
]
|
gpl-2.0
| -4,914,615,802,447,475,000
| 31.051282
| 144
| 0.5896
| false
| 4.448399
| false
| false
| false
|
LumaPictures/rez
|
src/rez/resolved_context.py
|
1
|
63181
|
from rez import __version__, module_root_path
from rez.package_repository import package_repository_manager
from rez.solver import SolverCallbackReturn
from rez.resolver import Resolver, ResolverStatus
from rez.system import system
from rez.config import config
from rez.util import shlex_join, dedup
from rez.utils.sourcecode import SourceCodeError
from rez.utils.colorize import critical, heading, local, implicit, Printer
from rez.utils.formatting import columnise, PackageRequest
from rez.utils.filesystem import TempDirs
from rez.utils.memcached import pool_memcached_connections
from rez.backport.shutilwhich import which
from rez.rex import RexExecutor, Python, OutputStyle
from rez.rex_bindings import VersionBinding, VariantBinding, \
VariantsBinding, RequirementsBinding
from rez import package_order
from rez.packages_ import get_variant, iter_packages
from rez.package_filter import PackageFilterList
from rez.shells import create_shell
from rez.exceptions import ResolvedContextError, PackageCommandError, RezError
from rez.utils.graph_utils import write_dot, write_compacted, read_graph_from_string
from rez.vendor.version.version import VersionRange
from rez.vendor.enum import Enum
from rez.vendor import yaml
from rez.utils.yaml import dump_yaml
from tempfile import mkdtemp
from functools import wraps
import getpass
import traceback
import inspect
import time
import sys
import os
import os.path
# specifically so that str's are not converted to unicode on load
from rez.vendor import simplejson
class RezToolsVisibility(Enum):
"""Determines if/how rez cli tools are added back to PATH within a
resolved environment."""
never = 0 # Don't expose rez in resolved env
append = 1 # Append to PATH in resolved env
prepend = 2 # Prepend to PATH in resolved env
class SuiteVisibility(Enum):
"""Defines what suites on $PATH stay visible when a new rez environment is
resolved."""
never = 0 # Don't attempt to keep any suites visible in a new env
always = 1 # Keep suites visible in any new env
parent = 2 # Keep only the parent suite of a tool visible
parent_priority = 3 # Keep all suites visible and the parent takes precedence
class PatchLock(Enum):
""" Enum to represent the 'lock type' used when patching context objects.
"""
no_lock = ("No locking", -1)
lock_2 = ("Minor version updates only (X.*)", 1)
lock_3 = ("Patch version updates only (X.X.*)", 2)
lock_4 = ("Build version updates only (X.X.X.*)", 3)
lock = ("Exact version", -1)
__order__ = "no_lock,lock_2,lock_3,lock_4,lock"
def __init__(self, description, rank):
self.description = description
self.rank = rank
def get_lock_request(name, version, patch_lock, weak=True):
"""Given a package and patch lock, return the equivalent request.
For example, for object 'foo-1.2.1' and lock type 'lock_3', the equivalent
request is '~foo-1.2'. This restricts updates to foo to patch-or-lower
version changes only.
For objects not versioned down to a given lock level, the closest possible
lock is applied. So 'lock_3' applied to 'foo-1' would give '~foo-1'.
Args:
name (str): Package name.
version (Version): Package version.
patch_lock (PatchLock): Lock type to apply.
Returns:
`PackageRequest` object, or None if there is no equivalent request.
"""
ch = '~' if weak else ''
if patch_lock == PatchLock.lock:
s = "%s%s==%s" % (ch, name, str(version))
return PackageRequest(s)
elif (patch_lock == PatchLock.no_lock) or (not version):
return None
version_ = version.trim(patch_lock.rank)
s = "%s%s-%s" % (ch, name, str(version_))
return PackageRequest(s)
class ResolvedContext(object):
"""A class that resolves, stores and spawns Rez environments.
The main Rez entry point for creating, saving, loading and executing
resolved environments. A ResolvedContext object can be saved to file and
loaded at a later date, and it can reconstruct the equivalent environment
at that time. It can spawn interactive and non-interactive shells, in any
supported shell plugin type, such as bash and tcsh. It can also run a
command within a configured python namespace, without spawning a child
shell.
"""
serialize_version = (4, 3)
tmpdir_manager = TempDirs(config.context_tmpdir, prefix="rez_context_")
class Callback(object):
def __init__(self, max_fails, time_limit, callback, buf=None):
self.max_fails = max_fails
self.time_limit = time_limit
self.callback = callback
self.start_time = time.time()
self.buf = buf or sys.stdout
def __call__(self, state):
if self.max_fails != -1 and state.num_fails >= self.max_fails:
reason = ("fail limit reached: aborted after %d failures"
% state.num_fails)
return SolverCallbackReturn.fail, reason
if self.time_limit != -1:
secs = time.time() - self.start_time
if secs > self.time_limit:
return SolverCallbackReturn.abort, "time limit exceeded"
if self.callback:
return self.callback(state)
return SolverCallbackReturn.keep_going, ''
def __init__(self, package_requests, verbosity=0, timestamp=None,
building=False, caching=None, package_paths=None,
package_filter=None, package_orderers=None, max_fails=-1,
add_implicit_packages=True, time_limit=-1, callback=None,
package_load_callback=None, buf=None):
"""Perform a package resolve, and store the result.
Args:
package_requests: List of strings or PackageRequest objects
representing the request.
verbosity: Verbosity level. One of [0,1,2].
timestamp: Ignore packages released after this epoch time. Packages
released at exactly this time will not be ignored.
building: True if we're resolving for a build.
caching: If True, cache(s) may be used to speed the resolve. If
False, caches will not be used. If None, config.resolve_caching
is used.
package_paths: List of paths to search for pkgs, defaults to
config.packages_path.
package_filter (`PackageFilterBase`): Filter used to exclude certain
packages. Defaults to settings from config.package_filter. Use
`package_filter.no_filter` to remove all filtering.
package_orderers (list of `PackageOrder`): Custom package ordering.
add_implicit_packages: If True, the implicit package list defined
by config.implicit_packages is appended to the request.
max_fails (int): Abort the resolve if the number of failed steps is
greater or equal to this number. If -1, does not abort.
time_limit (int): Abort the resolve if it takes longer than this
many seconds. If -1, there is no time limit.
callback: See `Solver`.
package_load_callback: If not None, this callable will be called
prior to each package being loaded. It is passed a single
`Package` object.
buf (file-like object): Where to print verbose output to, defaults
to stdout.
"""
self.load_path = None
# resolving settings
self.requested_timestamp = timestamp
self.timestamp = self.requested_timestamp or int(time.time())
self.building = building
self.implicit_packages = []
self.caching = config.resolve_caching if caching is None else caching
self.verbosity = verbosity
self._package_requests = []
for req in package_requests:
if isinstance(req, basestring):
req = PackageRequest(req)
self._package_requests.append(req)
if add_implicit_packages:
self.implicit_packages = [PackageRequest(x)
for x in config.implicit_packages]
self.package_paths = (config.packages_path if package_paths is None
else package_paths)
self.package_paths = list(dedup(self.package_paths))
self.package_filter = (PackageFilterList.singleton if package_filter is None
else package_filter)
self.package_orderers = package_orderers or config.package_orderers
# patch settings
self.default_patch_lock = PatchLock.no_lock
self.patch_locks = {}
# info about env the resolve occurred in
self.rez_version = __version__
self.rez_path = module_root_path
self.user = getpass.getuser()
self.host = system.fqdn
self.platform = system.platform
self.arch = system.arch
self.os = system.os
self.created = int(time.time())
# resolve results
self.status_ = ResolverStatus.pending
self._resolved_packages = None
self.failure_description = None
self.graph_string = None
self.graph_ = None
self.from_cache = None
# stats
self.solve_time = 0.0 # total solve time, inclusive of load time
self.load_time = 0.0 # total time loading packages (disk or memcache)
self.num_loaded_packages = 0 # num packages loaded (disk or memcache)
# the pre-resolve bindings. We store these because @late package.py
# functions need them, and we cache them to avoid cost
self.pre_resolve_bindings = None
# suite information
self.parent_suite_path = None
self.suite_context_name = None
# perform the solve
callback_ = self.Callback(buf=buf,
max_fails=max_fails,
time_limit=time_limit,
callback=callback)
def _package_load_callback(package):
if package_load_callback:
_package_load_callback(package)
self.num_loaded_packages += 1
request = self.requested_packages(include_implicit=True)
resolver = Resolver(context=self,
package_requests=request,
package_paths=self.package_paths,
package_filter=self.package_filter,
package_orderers=self.package_orderers,
timestamp=self.requested_timestamp,
building=self.building,
caching=self.caching,
callback=callback_,
package_load_callback=_package_load_callback,
verbosity=verbosity,
buf=buf)
resolver.solve()
# convert the results
self.status_ = resolver.status
self.solve_time = resolver.solve_time
self.load_time = resolver.load_time
self.failure_description = resolver.failure_description
self.graph_ = resolver.graph
self.from_cache = resolver.from_cache
if self.status_ == ResolverStatus.solved:
self._resolved_packages = []
for variant in resolver.resolved_packages:
variant.set_context(self)
self._resolved_packages.append(variant)
def __str__(self):
request = self.requested_packages(include_implicit=True)
req_str = " ".join(str(x) for x in request)
if self.status == ResolverStatus.solved:
res_str = " ".join(x.qualified_name for x in self._resolved_packages)
return "%s(%s ==> %s)" % (self.status.name, req_str, res_str)
else:
return "%s:%s(%s)" % (self.__class__.__name__,
self.status.name, req_str)
@property
def success(self):
"""True if the context has been solved, False otherwise."""
return (self.status_ == ResolverStatus.solved)
@property
def status(self):
"""Return the current status of the context.
Returns:
ResolverStatus.
"""
return self.status_
def requested_packages(self, include_implicit=False):
"""Get packages in the request.
Args:
include_implicit (bool): If True, implicit packages are appended
to the result.
Returns:
List of `PackageRequest` objects.
"""
if include_implicit:
return self._package_requests + self.implicit_packages
else:
return self._package_requests
@property
def resolved_packages(self):
"""Get packages in the resolve.
Returns:
List of `Variant` objects, or None if the resolve failed.
"""
return self._resolved_packages
def set_load_path(self, path):
"""Set the path that this context was reportedly loaded from.
You may want to use this method in cases where a context is saved to
disk, but you need to associate this new path with the context while it
is still in use.
"""
self.load_path = path
def __eq__(self, other):
"""Equality test.
Two contexts are considered equal if they have a equivalent request,
and an equivalent resolve. Other details, such as timestamp, are not
considered.
"""
return (isinstance(other, ResolvedContext)
and other.requested_packages(True) == self.requested_packages(True)
and other.resolved_packages == self.resolved_packages)
def __hash__(self):
list_ = []
req = self.requested_packages(True)
list_.append(tuple(req))
res = self.resolved_packages
if res is None:
list_.append(None)
else:
list_.append(tuple(res))
value = tuple(list_)
return hash(value)
@property
def has_graph(self):
"""Return True if the resolve has a graph."""
return bool((self.graph_ is not None) or self.graph_string)
def get_resolved_package(self, name):
"""Returns a `Variant` object or None if the package is not in the
resolve.
"""
pkgs = [x for x in self._resolved_packages if x.name == name]
return pkgs[0] if pkgs else None
def copy(self):
"""Returns a shallow copy of the context."""
import copy
return copy.copy(self)
# TODO: deprecate in favor of patch() method
def get_patched_request(self, package_requests=None,
package_subtractions=None, strict=False, rank=0):
"""Get a 'patched' request.
A patched request is a copy of this context's request, but with some
changes applied. This can then be used to create a new, 'patched'
context.
New package requests override original requests based on the type -
normal, conflict or weak. So 'foo-2' overrides 'foo-1', '!foo-2'
overrides '!foo-1' and '~foo-2' overrides '~foo-1', but a request such
as '!foo-2' would not replace 'foo-1' - it would be added instead.
Note that requests in `package_requests` can have the form '^foo'. This
is another way of supplying package subtractions.
Any new requests that don't override original requests are appended,
in the order that they appear in `package_requests`.
Args:
package_requests (list of str or list of `PackageRequest`):
Overriding requests.
package_subtractions (list of str): Any original request with a
package name in this list is removed, before the new requests
are added.
strict (bool): If True, the current context's resolve is used as the
original request list, rather than the request.
rank (int): If > 1, package versions can only increase in this rank
and further - for example, rank=3 means that only version patch
numbers are allowed to increase, major and minor versions will
not change. This is only applied to packages that have not been
explicitly overridden in `package_requests`. If rank <= 1, or
`strict` is True, rank is ignored.
Returns:
List of `PackageRequest` objects that can be used to construct a
new `ResolvedContext` object.
"""
# assemble source request
if strict:
request = []
for variant in self.resolved_packages:
req = PackageRequest(variant.qualified_package_name)
request.append(req)
else:
request = self.requested_packages()[:]
# convert '^foo'-style requests to subtractions
if package_requests:
package_subtractions = package_subtractions or []
indexes = []
for i, req in enumerate(package_requests):
name = str(req)
if name.startswith('^'):
package_subtractions.append(name[1:])
indexes.append(i)
for i in reversed(indexes):
del package_requests[i]
# apply subtractions
if package_subtractions:
request = [x for x in request if x.name not in package_subtractions]
# apply overrides
if package_requests:
request_dict = dict((x.name, (i, x)) for i, x in enumerate(request))
request_ = []
for req in package_requests:
if isinstance(req, basestring):
req = PackageRequest(req)
if req.name in request_dict:
i, req_ = request_dict[req.name]
if (req_ is not None) and (req_.conflict == req.conflict) \
and (req_.weak == req.weak):
request[i] = req
del request_dict[req.name]
else:
request_.append(req)
else:
request_.append(req)
request += request_
# add rank limiters
if not strict and rank > 1:
overrides = set(x.name for x in package_requests if not x.conflict)
rank_limiters = []
for variant in self.resolved_packages:
if variant.name not in overrides:
if len(variant.version) >= rank:
version = variant.version.trim(rank - 1)
version = version.next()
req = "~%s<%s" % (variant.name, str(version))
rank_limiters.append(req)
request += rank_limiters
return request
def graph(self, as_dot=False):
"""Get the resolve graph.
Args:
as_dot: If True, get the graph as a dot-language string. Otherwise,
a pygraph.digraph object is returned.
Returns:
A string or `pygraph.digraph` object, or None if there is no graph
associated with the resolve.
"""
if not self.has_graph:
return None
if not as_dot:
if self.graph_ is None:
# reads either dot format or our compact format
self.graph_ = read_graph_from_string(self.graph_string)
return self.graph_
if self.graph_string:
if self.graph_string.startswith('{'): # compact format
self.graph_ = read_graph_from_string(self.graph_string)
else:
# already in dot format. Note that this will only happen in
# old rez contexts where the graph is not stored in the newer
# compact format.
return self.graph_string
return write_dot(self.graph_)
def save(self, path):
"""Save the resolved context to file."""
with open(path, 'w') as f:
self.write_to_buffer(f)
def write_to_buffer(self, buf):
"""Save the context to a buffer."""
doc = self.to_dict()
if config.rxt_as_yaml:
content = dump_yaml(doc)
else:
content = simplejson.dumps(doc, indent=4, separators=(",", ": "))
buf.write(content)
@classmethod
def get_current(cls):
"""Get the context for the current env, if there is one.
Returns:
`ResolvedContext`: Current context, or None if not in a resolved env.
"""
filepath = os.getenv("REZ_RXT_FILE")
if not filepath or not os.path.exists(filepath):
return None
return cls.load(filepath)
@classmethod
def load(cls, path):
"""Load a resolved context from file."""
with open(path) as f:
context = cls.read_from_buffer(f, path)
context.set_load_path(path)
return context
@classmethod
def read_from_buffer(cls, buf, identifier_str=None):
"""Load the context from a buffer."""
try:
return cls._read_from_buffer(buf, identifier_str)
except Exception as e:
cls._load_error(e, identifier_str)
def get_resolve_diff(self, other):
"""Get the difference between the resolve in this context and another.
The difference is described from the point of view of the current context
- a newer package means that the package in `other` is newer than the
package in `self`.
Diffs can only be compared if their package search paths match, an error
is raised otherwise.
The diff is expressed in packages, not variants - the specific variant
of a package is ignored.
Returns:
A dict containing:
- 'newer_packages': A dict containing items:
- package name (str);
- List of `Package` objects. These are the packages up to and
including the newer package in `self`, in ascending order.
- 'older_packages': A dict containing:
- package name (str);
- List of `Package` objects. These are the packages down to and
including the older package in `self`, in descending order.
- 'added_packages': Set of `Package` objects present in `self` but
not in `other`;
- 'removed_packages': Set of `Package` objects present in `other`,
but not in `self`.
If any item ('added_packages' etc) is empty, it is not added to the
resulting dict. Thus, an empty dict is returned if there is no
difference between contexts.
"""
if self.package_paths != other.package_paths:
from difflib import ndiff
diff = ndiff(self.package_paths, other.package_paths)
raise ResolvedContextError("Cannot diff resolves, package search "
"paths differ:\n%s" % '\n'.join(diff))
d = {}
self_pkgs_ = set(x.parent for x in self._resolved_packages)
other_pkgs_ = set(x.parent for x in other._resolved_packages)
self_pkgs = self_pkgs_ - other_pkgs_
other_pkgs = other_pkgs_ - self_pkgs_
if not (self_pkgs or other_pkgs):
return d
self_fams = dict((x.name, x) for x in self_pkgs)
other_fams = dict((x.name, x) for x in other_pkgs)
newer_packages = {}
older_packages = {}
added_packages = set()
removed_packages = set()
for pkg in self_pkgs:
if pkg.name not in other_fams:
removed_packages.add(pkg)
else:
other_pkg = other_fams[pkg.name]
if other_pkg.version > pkg.version:
r = VersionRange.as_span(lower_version=pkg.version,
upper_version=other_pkg.version)
it = iter_packages(pkg.name, range_=r)
pkgs = sorted(it, key=lambda x: x.version)
newer_packages[pkg.name] = pkgs
elif other_pkg.version < pkg.version:
r = VersionRange.as_span(lower_version=other_pkg.version,
upper_version=pkg.version)
it = iter_packages(pkg.name, range_=r)
pkgs = sorted(it, key=lambda x: x.version, reverse=True)
older_packages[pkg.name] = pkgs
for pkg in other_pkgs:
if pkg.name not in self_fams:
added_packages.add(pkg)
if newer_packages:
d["newer_packages"] = newer_packages
if older_packages:
d["older_packages"] = older_packages
if added_packages:
d["added_packages"] = added_packages
if removed_packages:
d["removed_packages"] = removed_packages
return d
@pool_memcached_connections
def print_info(self, buf=sys.stdout, verbosity=0, source_order=False,
show_resolved_uris=False):
"""Prints a message summarising the contents of the resolved context.
Args:
buf (file-like object): Where to print this info to.
verbosity (bool): Verbose mode.
source_order (bool): If True, print resolved packages in the order
they are sourced, rather than alphabetical order.
show_resolved_uris (bool): By default, resolved packages have their
'root' property listed, or their 'uri' if 'root' is None. Use
this option to list 'uri' regardless.
"""
_pr = Printer(buf)
def _rt(t):
if verbosity:
s = time.strftime("%a %b %d %H:%M:%S %Z %Y", time.localtime(t))
return s + " (%d)" % int(t)
else:
return time.strftime("%a %b %d %H:%M:%S %Y", time.localtime(t))
if self.status_ in (ResolverStatus.failed, ResolverStatus.aborted):
_pr("The context failed to resolve:\n%s"
% self.failure_description, critical)
return
t_str = _rt(self.created)
_pr("resolved by %s@%s, on %s, using Rez v%s"
% (self.user, self.host, t_str, self.rez_version))
if self.requested_timestamp:
t_str = _rt(self.requested_timestamp)
_pr("packages released after %s were ignored" % t_str)
_pr()
if verbosity:
_pr("search paths:", heading)
rows = []
colors = []
for path in self.package_paths:
if package_repository_manager.are_same(path, config.local_packages_path):
label = "(local)"
col = local
else:
label = ""
col = None
rows.append((path, label))
colors.append(col)
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
_pr()
if self.package_filter:
data = self.package_filter.to_pod()
txt = dump_yaml(data)
_pr("package filters:", heading)
_pr(txt)
_pr()
_pr("requested packages:", heading)
rows = []
colors = []
for request in self._package_requests:
rows.append((str(request), ""))
colors.append(None)
for request in self.implicit_packages:
rows.append((str(request), "(implicit)"))
colors.append(implicit)
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
_pr()
_pr("resolved packages:", heading)
rows = []
colors = []
resolved_packages = self.resolved_packages or []
if not source_order:
resolved_packages = sorted(resolved_packages, key=lambda x: x.name)
for pkg in resolved_packages:
t = []
col = None
location = None
# print root/uri
if show_resolved_uris or not pkg.root:
location = pkg.uri
else:
location = pkg.root
if not os.path.exists(pkg.root):
t.append('NOT FOUND')
col = critical
if pkg.is_local:
t.append('local')
col = local
t = '(%s)' % ', '.join(t) if t else ''
rows.append((pkg.qualified_package_name, location, t))
colors.append(col)
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
if verbosity:
_pr()
actual_solve_time = self.solve_time - self.load_time
_pr("resolve details:", heading)
_pr("load time: %.02f secs" % self.load_time)
_pr("solve time: %.02f secs" % actual_solve_time)
_pr("packages queried: %d" % self.num_loaded_packages)
_pr("from cache: %s" % self.from_cache)
if self.load_path:
_pr("rxt file: %s" % self.load_path)
if verbosity >= 2:
_pr()
_pr("tools:", heading)
self.print_tools(buf=buf)
def print_tools(self, buf=sys.stdout):
data = self.get_tools()
if not data:
return
_pr = Printer(buf)
conflicts = set(self.get_conflicting_tools().keys())
rows = [["TOOL", "PACKAGE", ""],
["----", "-------", ""]]
colors = [None, None]
for _, (variant, tools) in sorted(data.items()):
pkg_str = variant.qualified_package_name
for tool in sorted(tools):
col = None
row = [tool, pkg_str, ""]
if tool in conflicts:
col = critical
row[-1] = "(in conflict)"
rows.append(row)
colors.append(col)
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
def print_resolve_diff(self, other, heading=None):
"""Print the difference between the resolve of two contexts.
Args:
other (`ResolvedContext`): Context to compare to.
heading: One of:
- None: Do not display a heading;
- True: Display the filename of each context as a heading, if
both contexts have a filepath;
- 2-tuple: Use the given two strings as headings - the first is
the heading for `self`, the second for `other`.
"""
d = self.get_resolve_diff(other)
if not d:
return
rows = []
if heading is True and self.load_path and other.load_path:
a = os.path.basename(self.load_path)
b = os.path.basename(other.load_path)
heading = (a, b)
if isinstance(heading, tuple):
rows.append(list(heading) + [""])
rows.append(('-' * len(heading[0]), '-' * len(heading[1]), ""))
newer_packages = d.get("newer_packages", {})
older_packages = d.get("older_packages", {})
added_packages = d.get("added_packages", set())
removed_packages = d.get("removed_packages", set())
if newer_packages:
for name, pkgs in newer_packages.iteritems():
this_pkg = pkgs[0]
other_pkg = pkgs[-1]
diff_str = "(+%d versions)" % (len(pkgs) - 1)
rows.append((this_pkg.qualified_name,
other_pkg.qualified_name,
diff_str))
if older_packages:
for name, pkgs in older_packages.iteritems():
this_pkg = pkgs[0]
other_pkg = pkgs[-1]
diff_str = "(-%d versions)" % (len(pkgs) - 1)
rows.append((this_pkg.qualified_name,
other_pkg.qualified_name,
diff_str))
if added_packages:
for pkg in sorted(added_packages, key=lambda x: x.name):
rows.append(("-", pkg.qualified_name, ""))
if removed_packages:
for pkg in sorted(removed_packages, key=lambda x: x.name):
rows.append((pkg.qualified_name, "-", ""))
print '\n'.join(columnise(rows))
def _on_success(fn):
@wraps(fn)
def _check(self, *nargs, **kwargs):
if self.status_ == ResolverStatus.solved:
return fn(self, *nargs, **kwargs)
else:
raise ResolvedContextError(
"Cannot perform operation in a failed context")
return _check
@_on_success
def get_dependency_graph(self):
"""Generate the dependency graph.
The dependency graph is a simpler subset of the resolve graph. It
contains package name nodes connected directly to their dependencies.
Weak references and conflict requests are not included in the graph.
The dependency graph does not show conflicts.
Returns:
`pygraph.digraph` object.
"""
from rez.vendor.pygraph.classes.digraph import digraph
nodes = {}
edges = set()
for variant in self._resolved_packages:
nodes[variant.name] = variant.qualified_package_name
for request in variant.get_requires():
if not request.conflict:
edges.add((variant.name, request.name))
g = digraph()
node_color = "#AAFFAA"
node_fontsize = 10
attrs = [("fontsize", node_fontsize),
("fillcolor", node_color),
("style", "filled")]
for name, qname in nodes.iteritems():
g.add_node(name, attrs=attrs + [("label", qname)])
for edge in edges:
g.add_edge(edge)
return g
@_on_success
def validate(self):
"""Validate the context."""
try:
for pkg in self.resolved_packages:
pkg.validate_data()
except RezError as e:
raise ResolvedContextError("%s: %s" % (e.__class__.__name__, str(e)))
@_on_success
def get_environ(self, parent_environ=None):
"""Get the environ dict resulting from interpreting this context.
@param parent_environ Environment to interpret the context within,
defaults to os.environ if None.
@returns The environment dict generated by this context, when
interpreted in a python rex interpreter.
"""
interp = Python(target_environ={}, passive=True)
executor = self._create_executor(interp, parent_environ)
self._execute(executor)
return executor.get_output()
@_on_success
def get_key(self, key, request_only=False):
"""Get a data key value for each resolved package.
Args:
key (str): String key of property, eg 'tools'.
request_only (bool): If True, only return the key from resolved
packages that were also present in the request.
Returns:
Dict of {pkg-name: (variant, value)}.
"""
values = {}
requested_names = [x.name for x in self._package_requests
if not x.conflict]
for pkg in self.resolved_packages:
if (not request_only) or (pkg.name in requested_names):
value = getattr(pkg, key)
if value is not None:
values[pkg.name] = (pkg, value)
return values
@_on_success
def get_tools(self, request_only=False):
"""Returns the commandline tools available in the context.
Args:
request_only: If True, only return the tools from resolved packages
that were also present in the request.
Returns:
Dict of {pkg-name: (variant, [tools])}.
"""
return self.get_key("tools", request_only=request_only)
@_on_success
def get_tool_variants(self, tool_name):
"""Get the variant(s) that provide the named tool.
If there are more than one variants, the tool is in conflict, and Rez
does not know which variant's tool is actually exposed.
Args:
tool_name(str): Name of the tool to search for.
Returns:
Set of `Variant` objects. If no variant provides the tool, an
empty set is returned.
"""
variants = set()
tools_dict = self.get_tools(request_only=False)
for variant, tools in tools_dict.itervalues():
if tool_name in tools:
variants.add(variant)
return variants
@_on_success
def get_conflicting_tools(self, request_only=False):
"""Returns tools of the same name provided by more than one package.
Args:
request_only: If True, only return the key from resolved packages
that were also present in the request.
Returns:
Dict of {tool-name: set([Variant])}.
"""
from collections import defaultdict
tool_sets = defaultdict(set)
tools_dict = self.get_tools(request_only=request_only)
for variant, tools in tools_dict.itervalues():
for tool in tools:
tool_sets[tool].add(variant)
conflicts = dict((k, v) for k, v in tool_sets.iteritems() if len(v) > 1)
return conflicts
@_on_success
def get_shell_code(self, shell=None, parent_environ=None, style=OutputStyle.file):
"""Get the shell code resulting from intepreting this context.
Args:
shell (str): Shell type, for eg 'bash'. If None, the current shell
type is used.
parent_environ (dict): Environment to interpret the context within,
defaults to os.environ if None.
style (): Style to format shell code in.
"""
executor = self._create_executor(interpreter=create_shell(shell),
parent_environ=parent_environ)
if self.load_path and os.path.isfile(self.load_path):
executor.env.REZ_RXT_FILE = self.load_path
self._execute(executor)
return executor.get_output(style)
@_on_success
def get_actions(self, parent_environ=None):
"""Get the list of rex.Action objects resulting from interpreting this
context. This is provided mainly for testing purposes.
Args:
parent_environ Environment to interpret the context within,
defaults to os.environ if None.
Returns:
A list of rex.Action subclass instances.
"""
interp = Python(target_environ={}, passive=True)
executor = self._create_executor(interp, parent_environ)
self._execute(executor)
return executor.actions
@_on_success
def apply(self, parent_environ=None):
"""Apply the context to the current python session.
Note that this updates os.environ and possibly sys.path.
@param environ Environment to interpret the context within, defaults to
os.environ if None.
"""
interpreter = Python(target_environ=os.environ)
executor = self._create_executor(interpreter, parent_environ)
self._execute(executor)
@_on_success
def which(self, cmd, parent_environ=None, fallback=False):
"""Find a program in the resolved environment.
Args:
cmd: String name of the program to find.
parent_environ: Environment to interpret the context within,
defaults to os.environ if None.
fallback: If True, and the program is not found in the context,
the current environment will then be searched.
Returns:
Path to the program, or None if the program was not found.
"""
env = self.get_environ(parent_environ=parent_environ)
path = which(cmd, env=env)
if fallback and path is None:
path = which(cmd)
return path
@_on_success
def execute_command(self, args, parent_environ=None, **subprocess_kwargs):
"""Run a command within a resolved context.
This applies the context to a python environ dict, then runs a
subprocess in that namespace. This is not a fully configured subshell -
shell-specific commands such as aliases will not be applied. To execute
a command within a subshell instead, use execute_shell().
Warning:
This runs a command in a configured environ dict only, not in a true
shell. To do that, call `execute_shell` using the `command` keyword
argument.
Args:
args: Command arguments, can be a string.
parent_environ: Environment to interpret the context within,
defaults to os.environ if None.
subprocess_kwargs: Args to pass to subprocess.Popen.
Returns:
A subprocess.Popen object.
Note:
This does not alter the current python session.
"""
interpreter = Python(target_environ={})
executor = self._create_executor(interpreter, parent_environ)
self._execute(executor)
return interpreter.subprocess(args, **subprocess_kwargs)
@_on_success
def execute_rex_code(self, code, filename=None, shell=None,
parent_environ=None, **Popen_args):
"""Run some rex code in the context.
Note:
This is just a convenience form of `execute_shell`.
Args:
code (str): Rex code to execute.
filename (str): Filename to report if there are syntax errors.
shell: Shell type, for eg 'bash'. If None, the current shell type
is used.
parent_environ: Environment to run the shell process in, if None
then the current environment is used.
Popen_args: args to pass to the shell process object constructor.
Returns:
`subprocess.Popen` object for the shell process.
"""
def _actions_callback(executor):
executor.execute_code(code, filename=filename)
return self.execute_shell(shell=shell,
parent_environ=parent_environ,
command='', # don't run any command
block=False,
actions_callback=_actions_callback,
**Popen_args)
@_on_success
def execute_shell(self, shell=None, parent_environ=None, rcfile=None,
norc=False, stdin=False, command=None, quiet=False,
block=None, actions_callback=None, post_actions_callback=None,
context_filepath=None, start_new_session=False, detached=False,
pre_command=None, **Popen_args):
"""Spawn a possibly-interactive shell.
Args:
shell: Shell type, for eg 'bash'. If None, the current shell type
is used.
parent_environ: Environment to run the shell process in, if None
then the current environment is used.
rcfile: Specify a file to source instead of shell startup files.
norc: If True, skip shell startup files, if possible.
stdin: If True, read commands from stdin, in a non-interactive
shell.
command: If not None, execute this command in a non-interactive shell.
If an empty string or list, don't run a command, but don't open
an interactive shell either. Can be a list of args.
quiet: If True, skip the welcome message in interactive shells.
block: If True, block until the shell is terminated. If False,
return immediately. If None, will default to blocking if the
shell is interactive.
actions_callback: Callback with signature (RexExecutor). This lets
the user append custom actions to the context, such as setting
extra environment variables. Callback is run prior to context Rex
execution.
post_actions_callback: Callback with signature (RexExecutor). This lets
the user append custom actions to the context, such as setting
extra environment variables. Callback is run after context Rex
execution.
context_filepath: If provided, the context file will be written
here, rather than to the default location (which is in a
tempdir). If you use this arg, you are responsible for cleaning
up the file.
start_new_session: If True, change the process group of the target
process. Note that this may override the Popen_args keyword
'preexec_fn'.
detached: If True, open a separate terminal. Note that this may
override the `pre_command` argument.
pre_command: Command to inject before the shell command itself. This
is for internal use.
Popen_args: args to pass to the shell process object constructor.
Returns:
If blocking: A 3-tuple of (returncode, stdout, stderr);
If non-blocking - A subprocess.Popen object for the shell process.
"""
sh = create_shell(shell)
if hasattr(command, "__iter__"):
command = sh.join(command)
# start a new session if specified
if start_new_session:
Popen_args.update(config.new_session_popen_args)
# open a separate terminal if specified
if detached:
term_cmd = config.terminal_emulator_command
if term_cmd:
pre_command = term_cmd.strip().split()
# block if the shell is likely to be interactive
if block is None:
block = not (command or stdin)
# context and rxt files. If running detached, don't cleanup files, because
# rez-env returns too early and deletes the tmp files before the detached
# process can use them
tmpdir = self.tmpdir_manager.mkdtemp(cleanup=not detached)
if self.load_path and os.path.isfile(self.load_path):
rxt_file = self.load_path
else:
rxt_file = os.path.join(tmpdir, "context.rxt")
self.save(rxt_file)
context_file = context_filepath or \
os.path.join(tmpdir, "context.%s" % sh.file_extension())
# interpret this context and write out the native context file
executor = self._create_executor(sh, parent_environ)
executor.env.REZ_RXT_FILE = rxt_file
executor.env.REZ_CONTEXT_FILE = context_file
if actions_callback:
actions_callback(executor)
self._execute(executor)
if post_actions_callback:
post_actions_callback(executor)
context_code = executor.get_output()
with open(context_file, 'w') as f:
f.write(context_code)
quiet = quiet or (RezToolsVisibility[config.rez_tools_visibility]
== RezToolsVisibility.never)
# spawn the shell subprocess
p = sh.spawn_shell(context_file,
tmpdir,
rcfile=rcfile,
norc=norc,
stdin=stdin,
command=command,
env=parent_environ,
quiet=quiet,
pre_command=pre_command,
**Popen_args)
if block:
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
else:
return p
def to_dict(self):
resolved_packages = []
for pkg in (self._resolved_packages or []):
resolved_packages.append(pkg.handle.to_dict())
serialize_version = '.'.join(str(x) for x in ResolvedContext.serialize_version)
patch_locks = dict((k, v.name) for k, v in self.patch_locks)
if self.package_orderers:
package_orderers_list = self.package_orderers.to_pod()
else:
package_orderers_list = None
if self.graph_string and self.graph_string.startswith('{'):
graph_str = self.graph_string # already in compact format
else:
g = self.graph()
graph_str = write_compacted(g)
return dict(
serialize_version=serialize_version,
timestamp=self.timestamp,
requested_timestamp=self.requested_timestamp,
building=self.building,
caching=self.caching,
implicit_packages=[str(x) for x in self.implicit_packages],
package_requests=[str(x) for x in self._package_requests],
package_paths=self.package_paths,
package_filter=self.package_filter.to_pod(),
package_orderers=package_orderers_list or None,
default_patch_lock=self.default_patch_lock.name,
patch_locks=patch_locks,
rez_version=self.rez_version,
rez_path=self.rez_path,
user=self.user,
host=self.host,
platform=self.platform,
arch=self.arch,
os=self.os,
created=self.created,
parent_suite_path=self.parent_suite_path,
suite_context_name=self.suite_context_name,
status=self.status_.name,
resolved_packages=resolved_packages,
failure_description=self.failure_description,
graph=graph_str,
from_cache=self.from_cache,
solve_time=self.solve_time,
load_time=self.load_time,
num_loaded_packages=self.num_loaded_packages)
@classmethod
def from_dict(cls, d, identifier_str=None):
"""Load a `ResolvedContext` from a dict.
Args:
d (dict): Dict containing context data.
identifier_str (str): String identifying the context, this is only
used to display in an error string if a serialization version
mismatch is detected.
Returns:
`ResolvedContext` object.
"""
# check serialization version
def _print_version(value):
return '.'.join(str(x) for x in value)
toks = str(d["serialize_version"]).split('.')
load_ver = tuple(int(x) for x in toks)
curr_ver = ResolvedContext.serialize_version
if load_ver[0] > curr_ver[0]:
msg = ["The context"]
if identifier_str:
msg.append("in %s" % identifier_str)
msg.append("was written by a newer version of Rez. The load may "
"fail (serialize version %d > %d)"
% (_print_version(load_ver), _print_version(curr_ver)))
print >> sys.stderr, ' '.join(msg)
# create and init the context
r = ResolvedContext.__new__(ResolvedContext)
r.load_path = None
r.pre_resolve_bindings = None
r.timestamp = d["timestamp"]
r.building = d["building"]
r.caching = d["caching"]
r.implicit_packages = [PackageRequest(x) for x in d["implicit_packages"]]
r._package_requests = [PackageRequest(x) for x in d["package_requests"]]
r.package_paths = d["package_paths"]
r.rez_version = d["rez_version"]
r.rez_path = d["rez_path"]
r.user = d["user"]
r.host = d["host"]
r.platform = d["platform"]
r.arch = d["arch"]
r.os = d["os"]
r.created = d["created"]
r.verbosity = d.get("verbosity", 0)
r.status_ = ResolverStatus[d["status"]]
r.failure_description = d["failure_description"]
r.solve_time = d["solve_time"]
r.load_time = d["load_time"]
r.graph_string = d["graph"]
r.graph_ = None
r._resolved_packages = []
for d_ in d["resolved_packages"]:
variant_handle = d_
if load_ver < (4, 0):
# -- SINCE SERIALIZE VERSION 4.0
from rez.utils.backcompat import convert_old_variant_handle
variant_handle = convert_old_variant_handle(variant_handle)
variant = get_variant(variant_handle)
variant.set_context(r)
r._resolved_packages.append(variant)
# -- SINCE SERIALIZE VERSION 1
r.requested_timestamp = d.get("requested_timestamp", 0)
# -- SINCE SERIALIZE VERSION 2
r.parent_suite_path = d.get("parent_suite_path")
r.suite_context_name = d.get("suite_context_name")
# -- SINCE SERIALIZE VERSION 3
r.default_patch_lock = PatchLock[d.get("default_patch_lock", "no_lock")]
patch_locks = d.get("patch_locks", {})
r.patch_locks = dict((k, PatchLock[v]) for k, v in patch_locks)
# -- SINCE SERIALIZE VERSION 4.0
r.from_cache = d.get("from_cache", False)
# -- SINCE SERIALIZE VERSION 4.1
data = d.get("package_filter", [])
r.package_filter = PackageFilterList.from_pod(data)
# -- SINCE SERIALIZE VERSION 4.2
data = d.get("package_orderers")
if data:
r.package_orderers = package_order.OrdererDict(data)
else:
r.package_orderers = None
# -- SINCE SERIALIZE VERSION 4.3
r.num_loaded_packages = d.get("num_loaded_packages", -1)
return r
@classmethod
def _read_from_buffer(cls, buf, identifier_str=None):
content = buf.read()
if content.startswith('{'): # assume json content
doc = simplejson.loads(content)
else:
doc = yaml.load(content)
context = cls.from_dict(doc, identifier_str)
return context
@classmethod
def _load_error(cls, e, path=None):
exc_name = e.__class__.__name__
msg = "Failed to load context"
if path:
msg += " from %s" % path
raise ResolvedContextError("%s: %s: %s" % (msg, exc_name, str(e)))
def _set_parent_suite(self, suite_path, context_name):
self.parent_suite_path = suite_path
self.suite_context_name = context_name
def _create_executor(self, interpreter, parent_environ):
parent_vars = True if config.all_parent_variables \
else config.parent_variables
return RexExecutor(interpreter=interpreter,
parent_environ=parent_environ,
parent_variables=parent_vars)
def _get_pre_resolve_bindings(self):
if self.pre_resolve_bindings is None:
self.pre_resolve_bindings = {
"system": system,
"building": self.building,
"request": RequirementsBinding(self._package_requests),
"implicits": RequirementsBinding(self.implicit_packages)
}
return self.pre_resolve_bindings
@pool_memcached_connections
def _execute(self, executor):
br = '#' * 80
br_minor = '-' * 80
def _heading(txt):
executor.comment("")
executor.comment("")
executor.comment(br)
executor.comment(txt)
executor.comment(br)
def _minor_heading(txt):
executor.comment("")
executor.comment(txt)
executor.comment(br_minor)
# bind various info to the execution context
resolved_pkgs = self.resolved_packages or []
request_str = ' '.join(str(x) for x in self._package_requests)
implicit_str = ' '.join(str(x) for x in self.implicit_packages)
resolve_str = ' '.join(x.qualified_package_name for x in resolved_pkgs)
package_paths_str = os.pathsep.join(self.package_paths)
_heading("system setup")
executor.setenv("REZ_USED", self.rez_path)
executor.setenv("REZ_USED_VERSION", self.rez_version)
executor.setenv("REZ_USED_TIMESTAMP", str(self.timestamp))
executor.setenv("REZ_USED_REQUESTED_TIMESTAMP",
str(self.requested_timestamp or 0))
executor.setenv("REZ_USED_REQUEST", request_str)
executor.setenv("REZ_USED_IMPLICIT_PACKAGES", implicit_str)
executor.setenv("REZ_USED_RESOLVE", resolve_str)
executor.setenv("REZ_USED_PACKAGES_PATH", package_paths_str)
if self.building:
executor.setenv("REZ_BUILD_ENV", "1")
# rez-1 environment variables, set in backwards compatibility mode
if config.rez_1_environment_variables and \
not config.disable_rez_1_compatibility:
request_str_ = " ".join([request_str, implicit_str]).strip()
executor.setenv("REZ_VERSION", self.rez_version)
executor.setenv("REZ_PATH", self.rez_path)
executor.setenv("REZ_REQUEST", request_str_)
executor.setenv("REZ_RESOLVE", resolve_str)
executor.setenv("REZ_RAW_REQUEST", request_str_)
executor.setenv("REZ_RESOLVE_MODE", "latest")
# binds objects such as 'request', which are accessible before a resolve
bindings = self._get_pre_resolve_bindings()
for k, v in bindings.iteritems():
executor.bind(k, v)
executor.bind('resolve', VariantsBinding(resolved_pkgs))
#
# -- apply each resolved package to the execution context
#
_heading("package variables")
error_class = SourceCodeError if config.catch_rex_errors else None
# set basic package variables and create per-package bindings
bindings = {}
for pkg in resolved_pkgs:
_minor_heading("variables for package %s" % pkg.qualified_name)
prefix = "REZ_" + pkg.name.upper().replace('.', '_')
executor.setenv(prefix + "_VERSION", str(pkg.version))
major_version = str(pkg.version[0] if len(pkg.version) >= 1 else '')
minor_version = str(pkg.version[1] if len(pkg.version) >= 2 else '')
patch_version = str(pkg.version[2] if len(pkg.version) >= 3 else '')
executor.setenv(prefix + "_MAJOR_VERSION", major_version)
executor.setenv(prefix + "_MINOR_VERSION", minor_version)
executor.setenv(prefix + "_PATCH_VERSION", patch_version)
executor.setenv(prefix + "_BASE", pkg.base)
executor.setenv(prefix + "_ROOT", pkg.root)
bindings[pkg.name] = dict(version=VersionBinding(pkg.version),
variant=VariantBinding(pkg))
# commands
for attr in ("pre_commands", "commands", "post_commands"):
found = False
for pkg in resolved_pkgs:
commands = getattr(pkg, attr)
if commands is None:
continue
if not found:
found = True
_heading(attr)
_minor_heading("%s from package %s" % (attr, pkg.qualified_name))
bindings_ = bindings[pkg.name]
executor.bind('this', bindings_["variant"])
executor.bind("version", bindings_["version"])
executor.bind('root', pkg.root)
executor.bind('base', pkg.base)
exc = None
trace = None
commands.set_package(pkg)
try:
executor.execute_code(commands, isolate=True)
except error_class as e:
exc = e
if exc:
header = "Error in %s in package %r:\n" % (attr, pkg.uri)
if self.verbosity >= 2:
msg = header + str(exc)
else:
msg = header + exc.short_msg
raise PackageCommandError(msg)
_heading("post system setup")
# append suite paths based on suite visibility setting
self._append_suite_paths(executor)
# append system paths
executor.append_system_paths()
# add rez path so that rez commandline tools are still available within
# the resolved environment
mode = RezToolsVisibility[config.rez_tools_visibility]
if mode == RezToolsVisibility.append:
executor.append_rez_path()
elif mode == RezToolsVisibility.prepend:
executor.prepend_rez_path()
def _append_suite_paths(self, executor):
from rez.suite import Suite
mode = SuiteVisibility[config.suite_visibility]
if mode == SuiteVisibility.never:
return
visible_suite_paths = Suite.visible_suite_paths()
if not visible_suite_paths:
return
suite_paths = []
if mode == SuiteVisibility.always:
suite_paths = visible_suite_paths
elif self.parent_suite_path:
if mode == SuiteVisibility.parent:
suite_paths = [self.parent_suite_path]
elif mode == SuiteVisibility.parent_priority:
pop_parent = None
try:
parent_index = visible_suite_paths.index(self.parent_suite_path)
pop_parent = visible_suite_paths.pop(parent_index)
except ValueError:
pass
suite_paths.insert(0, (pop_parent or self.parent_suite_path))
for path in suite_paths:
tools_path = os.path.join(path, "bin")
executor.env.PATH.append(tools_path)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
|
lgpl-3.0
| -8,348,680,213,081,125,000
| 37.619193
| 89
| 0.572767
| false
| 4.397035
| true
| false
| false
|
matthewjwoodruff/language-of-choice
|
article/nodsl.py
|
1
|
3949
|
"""
nodsl.py
Attempt to express the language of choice without
overloading operators.
I understand why the article did it, and I'm impressed
that it works so well, but it's hard to follow what's
happening. Too much magic for me.
I think partly it's because the article conflates variables
and choices. A variable's role is to participate in
choices, either as an index or as a branch, but not to
*be* a choice.
"""
class Node(object):
pass
class ConstantNode(Node):
def __init__(self, name, value):
self.rank = float("Inf")
self.value = value
self.name = name
def __repr__(self): return self.name
def evaluate(self, _): return self.value
class Variable(object):
"""
A variable is not a node in the BDD!
"""
def __init__(self, name, rank):
self.name = name
self.rank = rank
def __repr__(self): return self.name
def evaluate(self, env): return env[self.rank]
class ChoiceNode(Node):
def __init__(self, index, if0, if1):
"""
index is a constant, variable, or another choice node
"""
self.index = index
self.if0 = if0
self.if1 = if1
self.rank = self.index.rank
def __repr__(self):
return "{}({},{})".format(repr(self.index), repr(self.if0), repr(self.if1))
def evaluate(self, env):
fork = self.index.evaluate(env)
if fork == 0:
return self.if0.evaluate(env)
elif fork == 1:
return self.if1.evaluate(env)
raise Exception()
def subst(index, rank, value):
if index == constants[0]:
return index
if index == constants[1]:
return constants[1]
if rank < index.rank: return index
try:
if0 = index.if0
if1 = index.if1
except AttributeError:
if0 = constants[0]
if1 = constants[1]
if rank == index.rank:
if value == 0:
return if0
if value == 1:
return if1
raise Exception()
_if0 = subst(if0, rank, value)
_if1 = subst(if1, rank, value)
if _if0 is _if1: return _if0
return choice(index, _if0, _if1)
# one global dictionary for choices
# Keys: (index, if0, if1)
choices = dict()
def choice(index, if0, if1):
global choices
try:
return choices[(index, if0, if1)]
except KeyError: pass
choices[(index, if0, if1)] = ChoiceNode(index, if0, if1)
print("choice {} {} {}".format(index, if0, if1))
if index == constants[0]:
return if0
if index == constants[1]:
return if1
if if0 == constants[0] and if1 == constants[1]:
return choices[(index, if0, if1)]
top = index.rank
_index = index
if if0.rank < top:
top = if0.rank
_index = if0
if if1.rank < top:
top = if1.rank
_index = if1
top = min(index.rank, if0.rank, if1.rank)
_if0 = choice(
subst(index, top, 0), subst(if0, top, 0), subst(if1, top, 0))
_if1 = choice(
subst(index, top, 1), subst(if0, top, 1), subst(if1, top, 1))
new_node = choice(_index, _if0, _if1)
return new_node
# one global dictionary for constants
# Key: constant value
constants = dict()
def constant(name, value):
global constants
try: return constants[value]
except KeyError:
constants[value] = ConstantNode(name, value)
return constants[value]
variables = dict()
def variable(name, rank):
global variables
try:
variable = variables[rank]
except KeyError:
return Variable(name, rank)
if variable.name != name:
raise Exception()
return variable
const0 = constant("0", 0)
const1 = constant("1", 1)
a = variable('a', 0)
b = variable('b', 1)
c = variable('c', 2)
p = variable('p', 3)
q = variable('q', 4)
left = choice(p, a, choice(q, b, c))
right = choice(q, choice(p, a, b), choice(p, a, c))
print("left {}".format(repr(left)))
print("right {}".format(repr(right)))
|
gpl-3.0
| 1,861,754,821,942,455,800
| 25.863946
| 83
| 0.593062
| false
| 3.401378
| false
| false
| false
|
ASMlover/study
|
compiler/eLisp2/eLisp/number.py
|
1
|
2219
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import types
from interface import Eval
class Number(Eval):
def __init__(self, value):
self.data = value
def __repr__(self):
return repr(self.data)
def eval(self, env, args=None):
return self
def __eq__(self, rhs):
if isinstance(rhs, Number):
return (self.data == rhs.data)
else:
return False
class Integral(Number):
REGEX = re.compile(r'^[+-]?\d+$')
def __init__(self, value):
super(Integral, self).__init__(value)
class LongInt(Number):
REGEX = re.compile(r'^[+-]?\d+[lL]$')
def __init__(self, value):
super(LongInt, self).__init__(value)
class Float(Number):
REGEX = re.compile(r'^[+-]?(\d+\.\d*$|\d*\.\d+$)')
def __init__(self, value):
super(Float, self).__init__(value)
|
bsd-2-clause
| 8,802,085,610,972,698,000
| 32.119403
| 70
| 0.684993
| false
| 4.034545
| false
| false
| false
|
lizardsystem/lizard-kpi
|
setup.py
|
1
|
1150
|
from setuptools import setup
version = '0.5.dev0'
long_description = '\n\n'.join([
open('README.rst').read(),
open('TODO.rst').read(),
open('CREDITS.rst').read(),
open('CHANGES.rst').read(),
])
install_requires = [
'Django',
'django-extensions',
'django-nose',
'lizard-ui',
'pkginfo',
],
tests_require = [
]
setup(name='lizard-kpi',
version=version,
description="Key performance indicators ('fuel gauges') for lizard",
long_description=long_description,
# Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Programming Language :: Python',
'Framework :: Django',
],
keywords=[],
author='Reinout van Rees',
author_email='reinout.vanrees@nelen-schuurmans.nl',
url='',
license='GPL',
packages=['lizard_kpi'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
extras_require = {'test': tests_require},
entry_points={
'console_scripts': [
]},
)
|
gpl-3.0
| -1,418,867,999,999,005,700
| 24.555556
| 78
| 0.58
| false
| 3.650794
| false
| false
| false
|
NCI-GDC/gdcdatamodel
|
migrations/update_legacy_states.py
|
1
|
6311
|
#!/usr/bin/env python
"""gdcdatamodel.migrations.update_legacy_states
----------------------------------
File nodes from legacy projects were given a `state` that represents
what is now `file_state`. This script transforms the old `state` into
`file_state` and set's the `state` according the the following table:
| from file.state | to file.state | to file.file_state |
|-----------------+---------------+--------------------|
| None | submitted | None |
| error | validated | error |
| invalid | validated | error |
| live | submitted | submitted |
| submitted | submitted | registered |
| uploaded | submitted | uploaded |
| validated | submitted | validated |
This script runs in parallel -> it has to use separate sessions -> it
has a session per Node subclass which is automatically committed.
See also https://jira.opensciencedatacloud.org/browse/DAT-276.
Usage:
```python
update_legacy_states(
host='localhost',
user='test',
database='automated_test',
password='test')
```
"""
import logging
from sqlalchemy import not_, or_, and_
from psqlgraph import Node, PsqlGraphDriver
from gdcdatamodel import models as md
from multiprocessing import Process, cpu_count, Queue
from collections import namedtuple
CLS_WITH_PROJECT_ID = {
cls for cls in Node.get_subclasses()
if 'project_id' in cls.__pg_properties__
}
CLS_WITH_STATE = {
cls for cls in Node.get_subclasses()
if 'state' in cls.__pg_properties__
}
CLS_TO_UPDATE = CLS_WITH_PROJECT_ID & CLS_WITH_STATE
# Determines state and file_state based on existing state
STATE_MAP = {
None: {
'state': 'submitted',
'file_state': None
},
'error': {
'state': 'validated',
'file_state': 'error'
},
'invalid': {
'state': 'validated',
'file_state': 'error'
},
'live': {
'state': 'submitted',
'file_state': 'submitted'
},
'submitted': {
'state': 'submitted',
'file_state': 'registered'
},
'uploaded': {
'state': 'submitted',
'file_state': 'uploaded'
},
'validated': {
'state': 'submitted',
'file_state': 'validated'
},
}
logger = logging.getLogger("state_updater")
logging.basicConfig(level=logging.INFO)
def legacy_filter(query, legacy_projects):
"""filter query to those whose project_id is None or points to TARGET
or TCGA
"""
legacy_filters = [
query.entity().project_id.astext ==
project.programs[0].name + '-' + project.code
for project in legacy_projects
]
return query.filter(or_(
null_prop(query.entity(), 'project_id'),
*legacy_filters
))
def null_prop(cls, key):
"""Provide expression to filter on a null or nonexistent value"""
return or_(
cls._props.contains({key: None}),
not_(cls._props.has_key(key)),
)
def print_cls_query_summary(graph):
"""Print breakdown of class counts to stdout"""
cls_queries = {
cls.get_label(): cls_query(graph, cls)
for cls in CLS_WITH_PROJECT_ID & CLS_WITH_STATE
}
print(
"%s: %d" % ("legacy_stateless_nodes".ljust(40),
sum([query.count() for query in cls_queries.itervalues()]))
)
for label, query in cls_queries.items():
count = query.count()
if count:
print("%35s : %d" % (label, count))
def cls_query(graph, cls):
"""Returns query for legacy nodes with state in {null, 'live'}"""
legacy_projects = graph.nodes(md.Project).props(state='legacy').all()
options = [
# state
null_prop(cls, 'state'),
cls.state.astext.in_(STATE_MAP),
]
if 'file_state' in cls.__pg_properties__:
options += [null_prop(cls, 'file_state')]
return (legacy_filter(graph.nodes(cls), legacy_projects)
.filter(or_(*options)))
def update_cls(graph, cls):
"""Updates as described in update_target_states for a single class"""
with graph.session_scope() as session:
query = cls_query(graph, cls)
count = query.count()
if count == 0:
return
logger.info('Loading %d %s nodes', count, cls.label)
nodes = query.all()
logger.info('Loaded %d %s nodes', len(nodes), cls.label)
for node in nodes:
state = node._props.get('state', None)
file_state = node._props.get('file_state', None)
if state in STATE_MAP:
node.state = STATE_MAP[state]['state']
set_file_state = (
'file_state' in node.__pg_properties__
and file_state is None
and state in STATE_MAP
)
if set_file_state:
node.file_state = STATE_MAP[state]['file_state']
node.sysan['legacy_state'] = state
node.sysan['legacy_file_state'] = file_state
logger.info('Committing %s nodes', cls.label)
graph.current_session().commit()
logger.info('Done with %s nodes', cls.label)
def update_classes(graph_kwargs, input_q):
"""Creates a db driver and pulls classes from the queue to update"""
graph = PsqlGraphDriver(**graph_kwargs)
while True:
cls = input_q.get()
if cls is None: # none means no more work
return
update_cls(graph, cls)
def update_legacy_states(graph_kwargs):
"""Updates state, file_state on legacy nodes
- node.state in {None, 'live'}
- node.project_id in {None, <Legacy project_id list>}
there is no project_id, or project_id points to a legacy project
"""
graph = PsqlGraphDriver(**graph_kwargs)
with graph.session_scope():
print_cls_query_summary(graph)
input_q = Queue()
pool = [
Process(target=update_classes, args=(graph_kwargs, input_q))
for _ in range(cpu_count())
]
for cls in CLS_TO_UPDATE:
input_q.put(cls)
for process in pool:
input_q.put(None) # put a no more work signal for each process
for process in pool:
process.start()
for process in pool:
process.join()
|
apache-2.0
| 7,030,365,817,418,758,000
| 25.078512
| 79
| 0.575978
| false
| 3.794949
| false
| false
| false
|
fahadsultan/CausalRelations
|
FeaturesExtractor.py
|
1
|
14860
|
from bs4 import BeautifulSoup
import os
import pandas as pd
import sys
import traceback
from sklearn.feature_extraction.text import CountVectorizer
class FeaturesExtractor:
def __init__(self):
self.FEATURE_NAMES = ['e1_token_id', 'e1_number','e1_sentence','e1_token','e1_aspect', 'e1_class','e1_event_id','e1_modality','e1_polarity','e1_pos','e1_tense','e2_token_id', 'e2_number','e2_sentence','e2_token','e2_aspect', 'e2_class','e2_event_id','e2_modality','e2_polarity','e2_pos','e2_tense','dep_path', 'same_pos_tag','sentence_distance','event_distance','same_polarity','same_aspect','same_tense','same_class','csignals_in_bw','csignal_position','tlink_exists','e1_is_sent_root','e2_is_sent_root','causal_relation_exists']
COLUMN_NAMES = ['filename', 'sentence', 'relation', 'governor',
'governor_idx', 'dependent', 'dependent_idx']
self.data = []
self.deps = pd.read_csv('data/text/_out_dependencies.csv',
names=COLUMN_NAMES, sep='\t')
def recursive_search(self, df, path, to_find_token,
to_find_index, to_find_sentence, governor_token,
governor_index, governor_sentence):
dependencies = df[(self.deps['governor'] == governor_token) &
(self.deps['governor_idx'] == int(governor_index)) &
(self.deps['sentence'] == int(governor_sentence))]
for i in range(len(dependencies)):
dependency = dependencies.iloc[i]
#Weird idiosynracy I came across where the governor and the dependent
#were the same token
if ((dependency['governor'] == dependency['dependent']) and
(dependency['dependent_idx'] == dependency['governor_idx'])):
continue
#check break condition
if (dependency['dependent'] == to_find_token and
dependency['dependent_idx'] == to_find_index and
dependency['sentence'] == to_find_sentence):
path = path+' '+dependency['relation']
break
else:
path_to_pass = path+' '+dependency['relation']
path_returned = self.recursive_search(
df, path_to_pass, to_find_token,
to_find_index, to_find_sentence, dependency['dependent'],
dependency['dependent_idx'], dependency['sentence'])
if path_returned != path_to_pass:
path = path_returned
break
return path
def get_dependency_path(self, filename, e1_token, e1_token_id,
e1_sentence, e2_token,
e2_token_id, e2_sentence):
#Since intersentential paths are allowed, the next sentence is
#also included
df = self.deps[(self.deps['filename'] == filename) &
((self.deps['sentence'] == int(e1_sentence)) |
(self.deps['sentence'] == int(e1_sentence)+1))]
path = self.recursive_search(df, '', e2_token, e2_token_id,
e2_sentence, e1_token, e1_token_id,
e1_sentence)
if path is not '':
return path
else:
#Try finding path from e2 to e1
return self.recursive_search(df, '', e1_token,
e1_token_id, int(e1_sentence),
e2_token, e2_token_id,
int(e2_sentence))
def parseFile(self, filename):
f = open(filename)
soup = BeautifulSoup(f.read())
events = soup.findAll('event')
tokens = soup.findAll('token')
for i in range(0,len(events)-1):
event = events[i]
for j in range(i+1, len(events)):
next_event = events[j]
event_token_id = event.find('token_anchor').attrs['id']
next_event_token_id = next_event.find('token_anchor').attrs['id']
event_token_tag = soup.find(lambda tag: (tag.name) == 'token' and
(tag.attrs['id']) == (event_token_id))
next_event_token_tag = soup.find(lambda tag: (tag.name) == 'token' and
(tag.attrs['id']) == (next_event_token_id))
event_sentence = event_token_tag['sentence']
next_event_sentence = next_event_token_tag['sentence']
if (int(next_event_sentence) - int(event_sentence)) > 1:
break # For now, intersentential event pairs can only be one sentence apart
else:
e1_number = event_token_tag.attrs['number']
e1_sentence = event_sentence
e1_token = event_token_tag.text
e1_aspect = event.attrs['aspect']
e1_certainty = event.attrs['certainty']
e1_class = event.attrs['class']
e1_comment = event.attrs['comment']
e1_factuality = event.attrs['factuality']
e1_event_id = event.attrs['id']
e1_modality = event.attrs['modality']
e1_polarity = event.attrs['polarity']
e1_pos = event.attrs['pos']
e1_tense = event.attrs['tense']
e2_number = next_event_token_tag.attrs['number']
e2_sentence = event_sentence
e2_token = next_event_token_tag.text
e2_aspect = next_event.attrs['aspect']
e2_certainty = next_event.attrs['certainty']
e2_class = next_event.attrs['class']
e2_comment = next_event.attrs['comment']
e2_factuality = next_event.attrs['factuality']
e2_event_id = next_event.attrs['id']
e2_modality = next_event.attrs['modality']
e2_polarity = next_event.attrs['polarity']
e2_pos = next_event.attrs['pos']
e2_tense = next_event.attrs['tense']
causal_relation_exists = len(soup.findAll(lambda tag:
tag.name == 'source' and
tag.findParent().name == 'clink' and
tag.findNextSibling().name == 'target' and
((tag.attrs['id'] == e1_event_id and
tag.findNextSibling().attrs['id'] == e2_event_id)
or
(tag.attrs['id'] == e2_event_id and
tag.findNextSibling().attrs['id'] == e1_event_id)) )) > 0
e1_token_id_offset = soup.find(
lambda tag: tag.name == 'token' and
tag.attrs['sentence'] == e1_sentence).attrs['id']
if e1_sentence == e2_sentence:
e2_token_id_offset = e1_token_id_offset
else:
e2_token_id_offset = soup.find(
lambda tag: tag.name == 'token' and
tag.attrs['sentence'] == e2_sentence).attrs['id']
e1_token_id = int(event_token_tag.attrs['id']) - int(e1_token_id_offset) + 1
e2_token_id = int(next_event_token_tag.attrs['id']) - int(e2_token_id_offset) + 1
e1_event_id = int(e1_event_id)
e2_event_id = int(e2_event_id)
same_pos_tag = e1_pos == e2_pos
sentence_distance = int(e2_sentence) - int(e1_sentence)
event_distance = e2_event_id - e1_event_id + 1
same_polarity = e1_polarity == e2_polarity
same_aspect = e1_aspect == e2_aspect
same_tense = e1_tense == e2_tense
same_class = e1_class == e2_class
'''
TODO: The conditions between e1_event_id and e2_event_id maybe don't
make sense because e1_event_id would always be greater than e2_event_id.
Reverse causal relations are identified only if e2 is specifed as
source in clink and e1 as target
'''
csignals_in_bw = soup.findAll(lambda tag: tag.name == 'c-signal' and
(( (e1_event_id < e2_event_id) and
(int(tag.attrs['id']) > e1_event_id) and
(int(tag.attrs['id']) < e2_event_id)) or
(e1_event_id > e2_event_id and
int(tag.attrs['id']) > e2_event_id and
int(tag.attrs['id']) < e1_event_id)))
csignal_position = csignal = ''
if len(csignals_in_bw) == 0:
csignal_tag = event.findPreviousSibling(lambda tag: tag.name == 'c-signal')
if csignal_tag is not None:
csignal_token_id = csignal_tag.find('token_anchor').attrs['id']
csignal_token_tag = soup.find(lambda x:
x.name == 'token' and x.attrs['id'] == csignal_token_id)
if csignal_token_tag.attrs['sentence'] == e1_sentence:
csignal = soup.find(lambda x:
x.name == 'token' and x.attrs['id'] == csignal_token_id).text
csignal_position = 'before'
else:
csignal_token_id = csignals_in_bw[-1].find('token_anchor').attrs['id']
csignal = soup.find(lambda x: x.name == 'token' and x.attrs['id'] == csignal_token_id).text
csignal_position = 'between'
tlink_exists = len(soup.findAll(lambda tag:
tag.name == 'tlink'
and (
((tag.find('source').attrs['id'] == str(e1_event_id)) and
(tag.find('target').attrs['id'] == str(e2_event_id)))
or
((tag.find('source').attrs['id'] == str(e2_event_id)) and
(tag.find('target').attrs['id'] == str(e1_event_id))) )
)) > 0
filename = filename.split('.xml')[0]
filename = filename.split('/')
filename = filename[len(filename) - 1]
dep_path = self.get_dependency_path(
filename, e1_token, e1_token_id, e1_sentence,
e2_token, e2_token_id, e2_sentence)
e1_is_sent_root = len(self.deps[
(self.deps['governor'] == 'ROOT') &
(self.deps['dependent'] == e1_token) &
(self.deps['dependent_idx'] == int(e1_token_id)) &
(self.deps['sentence'] == int(e1_sentence))] ) > 0
e2_is_sent_root = len(self.deps[
(self.deps['governor'] == 'ROOT') &
(self.deps['dependent'] == e2_token) &
(self.deps['dependent_idx'] == int(e2_token_id)) &
(self.deps['sentence'] == int(e2_sentence))] ) > 0
row = [
e1_token_id,
e1_number,
e1_sentence,
e1_token,
e1_aspect,
e1_class,
e1_event_id,
e1_modality,
e1_polarity,
e1_pos,
e1_tense,
e2_token_id,
e2_number,
e2_sentence,
e2_token,
e2_aspect,
e2_class,
e2_event_id,
e2_modality,
e2_polarity,
e2_pos,
e2_tense,
dep_path,
same_pos_tag,
sentence_distance,
event_distance,
same_polarity,
same_aspect,
same_tense,
same_class,
csignal,
csignal_position,
tlink_exists,
e1_is_sent_root,
e2_is_sent_root,
causal_relation_exists ]
self.data.append(row)
f.close()
def extract_features(self):
for folder, subs, files in os.walk('data/xml'):
for filename in files:
try:
if ('.xml' in filename) and (filename[0] != '.'):
print 'Parsing File: '+filename
self.parseFile(os.path.join(folder, filename))
except Exception as e:
traceback.print_exc()
continue
self.data = pd.DataFrame(self.data)
self.data.columns = self.FEATURE_NAMES
def save_to_csv(filename):
self.data.to_csv(filename)
if __name__ == "__main__":
extractor = FeaturesExtractor()
extractor.extract_features()
extractor.save_to_csv('features.csv')
|
apache-2.0
| 3,989,359,469,246,249,500
| 43.624625
| 538
| 0.421467
| false
| 4.423936
| false
| false
| false
|
hotsyk/uapython2
|
event/migrations/0001_initial.py
|
1
|
2895
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('description', models.TextField()),
('map_link', models.URLField(max_length=250)),
('photos', models.ImageField(upload_to=b'')),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('code', models.CharField(max_length=10)),
('description', models.TextField()),
('currency', models.CharField(max_length=3)),
('map_link', models.URLField(max_length=250)),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('coordinator', models.CharField(max_length=30)),
('city', models.ForeignKey(blank=True, to='event.City', null=True)),
],
),
migrations.CreateModel(
name='EventType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Venue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('geo_coordinates', models.CharField(max_length=20)),
('map_link', models.URLField(max_length=250)),
('photos', models.ImageField(upload_to=b'')),
('city', models.ForeignKey(to='event.City')),
],
),
migrations.AddField(
model_name='event',
name='event_type',
field=models.ForeignKey(to='event.EventType'),
),
migrations.AddField(
model_name='event',
name='venue',
field=models.ForeignKey(to='event.Venue'),
),
migrations.AddField(
model_name='city',
name='country',
field=models.ForeignKey(to='event.Country'),
),
]
|
bsd-3-clause
| 5,386,760,394,031,964,000
| 37.092105
| 114
| 0.516408
| false
| 4.495342
| false
| false
| false
|
dutradda/myreco
|
myreco/engine_strategies/filters/factory.py
|
1
|
4207
|
# MIT License
# Copyright (c) 2016 Diogo Dutra <dutradda@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from myreco.engine_strategies.filters.filters import (ArrayFilterBy,
ArrayFilterOf,
BooleanFilterBy,
IndexFilterByPropertyOf,
IndexFilterOf,
ObjectFilterBy,
ObjectFilterOf,
SimpleFilterBy,
SimpleFilterOf)
class FiltersFactory(object):
_filters_types_map = {
'property_value': {
'name': 'By Property Value',
'types': {
'integer': SimpleFilterBy,
'string': SimpleFilterBy,
'object': ObjectFilterBy,
'array': ArrayFilterBy,
'boolean': BooleanFilterBy
}
},
'item_property_value': {
'name': 'By Item Property Value',
'types': {
'integer': SimpleFilterOf,
'string': SimpleFilterOf,
'object': ObjectFilterOf,
'array': ArrayFilterOf,
'boolean': BooleanFilterBy
}
},
'property_value_index': {
'name': 'By Property Value Index',
'types': {
'integer': IndexFilterOf,
'string': IndexFilterOf,
'object': IndexFilterOf,
'array': IndexFilterOf,
'boolean': IndexFilterOf
}
},
'item_property_value_index': {
'name': 'By Item Property Value Index',
'types': {
'integer': IndexFilterByPropertyOf,
'string': IndexFilterByPropertyOf,
'object': IndexFilterByPropertyOf,
'array': IndexFilterByPropertyOf,
'boolean': IndexFilterByPropertyOf
}
}
}
@classmethod
def get_filter_types(cls):
return [{'name': filter_type['name'], 'id': filter_type_id}
for filter_type_id, filter_type in sorted(cls._filters_types_map.items())]
@classmethod
def get_filter_type(cls, filter_type_id):
filter_type = cls._filters_types_map.get(filter_type_id)
return {
'name': filter_type['name'],
'id': filter_type_id
} if filter_type else None
@classmethod
def make(cls, items_model, slot_filter, schema, skip_values=None):
value_type = schema['type']
filter_name = slot_filter['property_name']
type_id = slot_filter['type_id']
is_inclusive = slot_filter['is_inclusive']
id_names = schema.get('id_names')
filter_class = cls._filters_types_map.get(type_id, {'types': {}})['types'].get(value_type)
if filter_class:
return filter_class(items_model, filter_name, is_inclusive, id_names, skip_values)
|
mit
| -5,812,263,325,621,395,000
| 40.245098
| 98
| 0.550986
| false
| 4.721661
| false
| false
| false
|
hyperhq/nova-hyper
|
novahyper/virt/hyper/hostinfo.py
|
1
|
2080
|
# Copyright (c) 2013 dotCloud, Inc.
# Copyright (c) 2015 HyperHQ Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
CONF = cfg.CONF
def statvfs():
hyper_path = CONF.hyper.root_directory
if not os.path.exists(hyper_path):
hyper_path = '/'
return os.statvfs(hyper_path)
def get_disk_usage():
st = statvfs()
return {
'total': st.f_blocks * st.f_frsize,
'available': st.f_bavail * st.f_frsize,
'used': (st.f_blocks - st.f_bfree) * st.f_frsize
}
def get_total_vcpus():
total_vcpus = 0
with open('/proc/cpuinfo') as f:
for ln in f.readlines():
if ln.startswith('processor'):
total_vcpus += 1
return total_vcpus
def get_vcpus_used(containers):
total_vcpus_used = 0
for container in containers:
if isinstance(container, dict):
total_vcpus_used += container.get('Config', {}).get(
'CpuShares', 0)
return total_vcpus_used
def get_memory_usage():
with open('/proc/meminfo') as f:
m = f.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}
def get_mounts():
with open('/proc/mounts') as f:
return f.readlines()
|
apache-2.0
| 7,434,647,035,159,597,000
| 24.679012
| 78
| 0.603365
| false
| 3.484087
| false
| false
| false
|
sony/nnabla
|
python/benchmark/function/test_logical.py
|
1
|
2622
|
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nnabla.initializer as I
import nnabla.functions as F
from function_benchmark import FunctionBenchmark, Inspec
def inspecs_params():
inspecs = []
u = I.UniformInitializer((0, 2))
inspecs.append([Inspec((64, 32, 224, 224), u)])
return inspecs
@pytest.mark.parametrize('inspecs', inspecs_params())
@pytest.mark.parametrize('op',
['logical_and_scalar', 'logical_or_scalar', 'logical_xor_scalar',
'greater_scalar', 'greater_equal_scalar',
'less_scalar', 'less_equal_scalar',
'equal_scalar', 'not_equal_scalar'])
def test_scalar_logical(inspecs, op, nnabla_opts):
func = getattr(F, op)
fb = FunctionBenchmark(
func, inspecs, [1], {},
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
@pytest.mark.parametrize('inspecs', inspecs_params())
def test_logical_not(inspecs, nnabla_opts):
func = F.logical_not
fb = FunctionBenchmark(
func, inspecs, [], {},
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
def pairwise_inspecs_params():
inspecs = []
u = I.UniformInitializer((0, 2))
inspecs.append([Inspec((64, 32, 224, 224), u),
Inspec((64, 32, 224, 224), u)])
return inspecs
@pytest.mark.parametrize('inspecs', pairwise_inspecs_params())
@pytest.mark.parametrize('op',
['logical_and', 'logical_or', 'logical_xor',
'greater', 'greater_equal',
'less', 'less_equal',
'equal', 'not_equal'])
def test_pairwise_logical(inspecs, op, nnabla_opts):
func = getattr(F, op)
fb = FunctionBenchmark(
func, inspecs, [], {},
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
|
apache-2.0
| 6,322,361,230,130,387,000
| 33.5
| 90
| 0.635774
| false
| 3.616552
| true
| false
| false
|
JamesClough/dagology
|
dagology/generators/random_dag.py
|
1
|
1482
|
"""
Random DAG model, as in Karrer & Newman, 2009, Phys Rev E
"""
# Copyright (C) 2016 by
# James Clough <james.clough91@gmail.com>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["James Clough (james.clough91@gmail.com)"])
import networkx as nx
import numpy as np
from random import randrange
import dagology as dag
__all__ = ['random_dag']
def random_dag(degree_sequence):
""" Create a random DAG from a given degree sequence
Parameters
----------
degree_sequence - list of pairs of in, out degrees
all edges go from earlier to later in this list
Returns
-------
NetworkX DiGraph
"""
G = nx.DiGraph()
G.add_nodes_from(range(len(degree_sequence)))
remaining_stubs = [] # list of forward pointing stubs
for node, degrees in enumerate(degree_sequence):
indegree, outdegree = degrees
allowed_stubs = remaining_stubs[:]
for x in range(indegree):
if len(allowed_stubs) == 0:
# raise networkx error
assert False, 'Not a valid degree sequence'
older_node = allowed_stubs.pop(randrange(len(allowed_stubs)))
remaining_stubs.remove(older_node)
# be careful about multiedges
allowed_stubs = [x for x in allowed_stubs if x != older_node]
G.add_edge(older_node, node)
for x in range(outdegree):
remaining_stubs.append(node)
return G
|
mit
| -711,647,949,574,725,400
| 27.5
| 73
| 0.609312
| false
| 3.869452
| false
| false
| false
|
SonicFrog/jdrpoly
|
main/migrations/0004_comitymember_mainpagesection_news.py
|
1
|
2798
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-24 11:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0003_contest'),
]
operations = [
migrations.CreateModel(
name='ComityMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100, verbose_name='Pr\xe9nom')),
('last_name', models.CharField(max_length=100, verbose_name='Nom')),
('post', models.CharField(max_length=100, verbose_name='Poste')),
('description', models.TextField(verbose_name='Description du poste')),
('email', models.EmailField(max_length=254, verbose_name='Addresse de contact')),
],
options={
'ordering': ('pk',),
'verbose_name': 'Membre du comit\xe9',
'verbose_name_plural': 'Membres du comit\xe9',
},
),
migrations.CreateModel(
name='MainPageSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Titre')),
('content', models.TextField(verbose_name='Contenu')),
('order', models.IntegerField(verbose_name='Position')),
],
options={
'ordering': ('order', '-pk'),
'verbose_name': "Section page d'acceuil",
'verbose_name_plural': "Sections page d'accueil",
},
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default=b'Nouvelle news', max_length=200, verbose_name='Titre')),
('content', models.TextField(max_length=10000, verbose_name='Contenu')),
('date', models.DateField(default=django.utils.timezone.now, verbose_name='Date')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Auteur')),
],
options={
'ordering': ('date',),
'verbose_name': 'News',
'verbose_name_plural': 'News',
},
),
]
|
gpl-2.0
| 1,634,196,145,750,455,800
| 42.71875
| 143
| 0.556469
| false
| 4.176119
| false
| false
| false
|
MaxTyutyunnikov/lino
|
obsolete/src/lino/adamo/datatypes.py
|
1
|
11685
|
## Copyright 2003-2007 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
yet another attempt to create a universal set of datatypes...
"""
import datetime
from time import mktime, ctime
import types
from lino.tools.months import Month
from lino.misc.descr import Describable
from lino.misc.etc import ispure, iif
#from lino.adamo.exceptions import RefuseValue
from lino.adamo.exceptions import DataVeto
ERR_FORMAT_NONE = "caller must handle None values"
ERR_PARSE_EMPTY = "caller must handle empty strings"
#def itself(x): return x
class Type(Describable):
"base class for containers of data-type specific meta information"
defaultValue=None
parser=lambda x: x # itself
formatter=str
allowedClasses=None # None or list of allowed classes for value
# sizes are given in "characters" or "lines"
minHeight = 1
maxHeight = 1
def __call__(self,*args,**kw):
return self.child(*args,**kw)
#return apply(self.__class__,[],kw)
def __repr__(self):
return "%s (%s)" % (self.__class__.__name__,
repr(self.__dict__))
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return self.formatter(v)
#return repr(v)
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
return self.parser(s)
def validate(self,value):
if self.allowedClasses is None: return
if value.__class__ in self.allowedClasses: return
raise DataVeto("%r is not a valid %s" % (value,self))
## def getPreferredWidth(self):
## #note: for StringType, self.width is an instance variable, for
## #other classes it is a class variable.
## return self.width
## def getMinSize(self):
## return (self.minWidth
class WidthType(Type):
defaultWidth=50
minWidth=15
maxWidth=50
def __init__(self,parent=None,
width=None,minWidth=None,maxWidth=None,
**kw):
Type.__init__(self,parent,**kw)
if width is not None:
minWidth = maxWidth = width
if maxWidth is not None:
self.maxWidth = maxWidth
elif parent is not None:
if self.maxWidth != parent.maxWidth:
self.maxWidth = parent.maxWidth
if minWidth is not None:
self.minWidth = minWidth
elif parent is not None:
if self.minWidth != parent.minWidth:
self.minWidth = parent.minWidth
## def parse(self,s):
## assert len(s), ERR_PARSE_EMPTY
## return int(s)
class IntType(WidthType):
defaultValue=0
defaultWidth=5
minWidth=3
maxWidth=7
parser=int
allowedClasses=(types.IntType,)
## def parse(self,s):
## assert len(s), ERR_PARSE_EMPTY
## return int(s)
## def validate(self,value):
## if value.__class__ is types.IntType:
## return
## raise DataVeto("not an integer")
class BoolType(IntType):
defaultValue=False
parser=bool
formatter=lambda s,x: iif(x,'X','-')
allowedClasses=(types.BooleanType,)
## def validate(self,value):
## #print __name__,value
## Type.validate(self,value)
class AutoIncType(IntType):
pass
#class AreaType(IntType):
# pass
class LongType(IntType):
parser=long
allowedClasses=(types.LongType,)
class AsciiType(WidthType):
defaultValue=""
defaultWidth=20
minWidth=1
maxWidth=50
allowedClasses=(types.StringType,)
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
return str(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return v
def validate(self,value):
Type.validate(self,value)
if len(value) == 0:
raise DataVeto("Cannot store empty string.")
if value.endswith(' '):
raise DataVeto("%r ends with a space" % value)
class StringType(AsciiType):
defaultValue=""
defaultWidth=50
minWidth=15
maxWidth=50
allowedClasses=(types.StringType,types.UnicodeType)
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
return s
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
#return v
return unicode(v)
#return v.encode("cp1252",'replace')
def validate(self,value):
AsciiType.validate(self,value)
if not ispure(value):
raise DataVeto("%r is not pure" % value)
class PasswordType(StringType):
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return '*' * len(v)
class MemoType(StringType):
def __init__(self,parent=None,
height=None, minHeight=None,maxHeight=None,
**kw):
StringType.__init__(self,parent,**kw)
if height is not None:
minHeight = maxHeight = height
if minHeight is None:
if parent is None:
minHeight=4
else:
minHeight=parent.minHeight
if maxHeight is None:
if parent is None:
maxHeight=10
else:
maxHeight=parent.maxHeight
self.minHeight = minHeight
self.maxHeight = maxHeight
class TimeStampType(Type):
maxWidth = 10
minWidth = 10
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
l=s.split()
if len(l) == 2:
d=DATE.parse(l[0])
t=TIME.parse(l[1])
dt=datetime.datetime.combine(d,t)
ts_tuple=dt.timetuple()
return mktime(ts_tuple)
raise ValueError, repr(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return ctime(v)
def validate(self,value):
if value.__class__ in (types.FloatType, types.IntType):
return
raise DataVeto("not a date")
## if not isinstance(value,types.FloatType):
## #raise repr(value)+" is not a date"
## raise DataVeto("not a date")
class DateType(Type):
maxWidth = 10
minWidth = 10
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
s = s.replace(".","-")
l = s.split("-")
if len(l) == 3:
l = map(int,l)
return datetime.date(*l)
elif len(l) == 1:
assert len(s) == 8, repr(s)
y = int(s[0:4])
m = int(s[4:6])
d = int(s[6:8])
return datetime.date(y,m,d)
else:
raise ValueError, repr(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
#return repr(v) # "[-]yyyymmdd"
return v.isoformat()
def validate(self,value):
if not isinstance(value,datetime.date):
#raise repr(value)+" is not a date"
raise DataVeto("not a date")
class MonthType(Type):
maxWidth = 7
minWidth = 7
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
return Month.parse(s)
## s = s.replace(".","-")
## s = s.replace("/","-")
## l = s.split("-")
## if len(l) == 2:
## l = map(int,l)
## return Month(*l)
## elif len(l) == 1:
## assert len(s) == 6, repr(s)
## y = int(s[0:4])
## m = int(s[4:6])
## return Month(y,m)
## else:
## raise ValueError, repr(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return str(s)
def validate(self,value):
if not isinstance(value,datetime.date):
#raise repr(value)+" is not a date"
raise DataVeto("not a date")
class TimeType(Type):
maxWidth = 8
minWidth = 8
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
l = s.split(":")
if len(l) > 4:
raise ValueError, repr(s)
if len(l) < 1:
return stot(s)
l = map(int,l)
return datetime.time(*l)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return str(v)[:self.maxWidth]
def validate(self,value):
if not isinstance(value,datetime.time):
#raise repr(value)+" is not a time"
raise DataVeto("not a time")
class DurationType(Type):
minWidth = 8
maxWidth = 8
fmt = "hh.mm.ss" # currently only possible fmt
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
l = s.split(".")
if len(l) == 3:
hours = int(l[0])
minutes = int(l[1])
seconds = int(l[2])
return datetime.timedelta(0,seconds,0,0,minutes,hours)
elif len(l) == 2:
minutes = int(l[0])
seconds = int(l[1])
return datetime.timedelta(0,seconds,0,0,minutes)
else:
raise ValueError, repr(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
h = v.seconds / 3600
m = (v.seconds - h * 3600) / 60
s = v.seconds - h * 3600 - m*60
return "%02d.%02d.%02d" % (h,m,s)
def validate(self,value):
if not isinstance(value,datetime.timedelta):
#raise DataVeto(repr(value)+" is not a timedelta")
raise DataVeto("not a timedelta")
class UrlType(StringType):
pass
class ImageType(StringType):
pass
class LogoType(StringType):
pass
class EmailType(StringType):
pass
class AmountType(IntType):
pass
class PriceType(IntType):
pass
ASTRING = AsciiType()
STRING = StringType()
PASSWORD = PasswordType()
MEMO = MemoType()
DATE = DateType()
MONTH = MonthType()
TIME = TimeType() # StringType(width=8)
TIMESTAMP = TimeStampType()
DURATION = DurationType()
INT = IntType()
LONG = LongType()
BOOL = BoolType()
AMOUNT = AmountType()
PRICE = PriceType()
ROWID = AutoIncType()
URL = UrlType(width=200)
EMAIL = EmailType(width=60)
#AREA = AreaType()
IMAGE = ImageType()
LOGO = LogoType()
LANG=STRING(2)
def itot(i):
return stot(str(i))
def stot(s):
if len(s) == 4:
return datetime.time(int(s[0:2]),int(s[2:]))
elif len(s) == 3:
return datetime.time(int(s[0:1]),int(s[1:]))
elif len(s) <= 2:
return datetime.time(i)
else:
raise ValueError, repr(s)
def itod(i):
return DATE.parse(str(i))
## s=str(i)
## assert len(s) == 8, repr(i)
## y = int(s[0:4])
## m = int(s[4:6])
## d = int(s[6:8])
## return datetime.date(y,m,d)
def stod(s):
return DATE.parse(s)
def itom(i):
return MONTH.parse(str(i))
def stom(s):
return MONTH.parse(s)
__all__ = filter(lambda x: x[0] != "_", dir())
|
gpl-3.0
| -7,101,941,256,306,729,000
| 24.681319
| 73
| 0.55798
| false
| 3.62438
| false
| false
| false
|
nju-websoft/JAPE
|
code/attr2vec_func.py
|
1
|
2786
|
import math
import collections
import random
import numpy as np
import tensorflow as tf
import itertools
import time
def sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
cols = tf.shape(x)[1]
ones_shape = tf.stack([cols, 1])
ones = tf.ones(ones_shape, x.dtype)
return tf.reshape(tf.matmul(x, ones), [-1])
def compute_sampled_logits(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1):
if not isinstance(weights, list):
weights = [weights]
if labels.dtype != tf.int64:
labels = tf.cast(labels, tf.int64)
labels_flat = tf.reshape(labels, [-1])
sampled_ids, true_expected_count, sampled_expected_count = tf.nn.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
true_w = tf.nn.embedding_lookup(weights, labels_flat)
true_b = tf.nn.embedding_lookup(biases, labels_flat)
sampled_w = tf.nn.embedding_lookup(weights, sampled_ids)
sampled_b = tf.nn.embedding_lookup(biases, sampled_ids)
dim = tf.shape(true_w)[1:2]
new_true_w_shape = tf.concat([[-1, num_true], dim], 0)
row_wise_dots = tf.multiply(tf.expand_dims(inputs, 1), tf.reshape(true_w, new_true_w_shape))
dots_as_matrix = tf.reshape(row_wise_dots, tf.concat([[-1], dim], 0))
true_logits = tf.reshape(sum_rows(dots_as_matrix), [-1, num_true])
true_b = tf.reshape(true_b, [-1, num_true])
true_logits += true_b
sampled_b_vec = tf.reshape(sampled_b, [num_sampled])
sampled_logits = tf.matmul(inputs, sampled_w, transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
v=None):
batch_size = int(labels.get_shape()[0])
if v is None:
v = tf.ones([batch_size, 1])
true_logits, sampled_logits = compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true)
true_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(true_logits), logits=true_logits)
sampled_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
true_loss = tf.multiply(true_loss, v)
return tf.div(tf.reduce_sum(true_loss) + tf.reduce_sum(sampled_loss), tf.constant(batch_size, dtype=tf.float32))
|
mit
| 4,553,009,698,414,285,000
| 34.717949
| 119
| 0.604451
| false
| 3.397561
| false
| false
| false
|
andrew-lundgren/gwpy
|
gwpy/plotter/frequencyseries.py
|
1
|
11434
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""This module defines plotting classes for the data series defined in
`~gwpy.frequencyseries`
"""
import warnings
import numpy
from matplotlib.projections import register_projection
from matplotlib import colors
from . import tex
from .core import Plot
from .axes import Axes
from .decorators import auto_refresh
from ..frequencyseries import (FrequencySeries, SpectralVariance)
__author__ = "Duncan Macleod <duncan.macleod@ligo.org>"
class FrequencySeriesAxes(Axes):
"""Custom `Axes` for a :class:`~gwpy.plotter.FrequencySeriesPlot`.
"""
name = 'frequencyseries'
# -------------------------------------------
# GWpy class plotting methods
@auto_refresh
def plot(self, *args, **kwargs):
"""Plot data onto these Axes.
Parameters
----------
args
a single :class:`~gwpy.frequencyseries.FrequencySeries`
(or sub-class) or standard (x, y) data arrays
kwargs
keyword arguments applicable to :meth:`~matplotib.axes.Axes.plot`
Returns
-------
Line2D
the :class:`~matplotlib.lines.Line2D` for this line layer
See Also
--------
:meth:`matplotlib.axes.Axes.plot`
for a full description of acceptable ``*args` and ``**kwargs``
"""
if len(args) == 1 and isinstance(args[0], FrequencySeries):
return self.plot_frequencyseries(*args, **kwargs)
elif len(args) == 1 and isinstance(args[0], SpectralVariance):
return self.plot_variance(*args, **kwargs)
else:
return super(FrequencySeriesAxes, self).plot(*args, **kwargs)
@auto_refresh
def plot_frequencyseries(self, spectrum, **kwargs):
"""Plot a :class:`~gwpy.frequencyseries.FrequencySeries` onto these axes
Parameters
----------
spectrum : :class:`~gwpy.frequencyseries.FrequencySeries`
data to plot
**kwargs
any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`
Returns
-------
Line2D
the :class:`~matplotlib.lines.Line2D` for this line layer
See Also
--------
:meth:`matplotlib.axes.Axes.plot`
for a full description of acceptable ``*args` and ``**kwargs``
"""
if tex.USE_TEX:
kwargs.setdefault('label', tex.label_to_latex(spectrum.name))
else:
kwargs.setdefault('label', spectrum.name)
if not kwargs.get('label', True):
kwargs.pop('label')
line = self.plot(spectrum.frequencies.value, spectrum.value, **kwargs)
if len(self.lines) == 1:
try:
self.set_xlim(*spectrum.xspan)
except ValueError:
pass
if not self.get_xlabel():
if tex.USE_TEX:
ustr = tex.unit_to_latex(spectrum.xunit)
else:
ustr = spectrum.xunit.to_string()
if ustr:
self.set_xlabel('Frequency [%s]' % ustr)
if not self.get_ylabel():
if tex.USE_TEX:
ustr = tex.unit_to_latex(spectrum.unit)
else:
ustr = spectrum.unit.to_string()
if ustr:
self.set_ylabel('[%s]' % ustr)
return line
@auto_refresh
def plot_spectrum(self, *args, **kwargs):
warnings.warn("{0}.plot_spectrum was renamed "
"{0}.plot_frequencyseries, "
"and will be removed in an upcoming release".format(
type(self).__name__))
return self.plot_frequencyseries(*args, **kwargs)
@auto_refresh
def plot_frequencyseries_mmm(self, mean_, min_=None, max_=None, alpha=0.1,
**kwargs):
"""Plot a `FrequencySeries` onto these axes, with (min, max) shaded
regions
The `mean_` `FrequencySeries` is plotted normally, while the `min_`
and `max_ spectra are plotted lightly below and above,
with a fill between them and the mean_.
Parameters
----------
mean_ : :class:`~gwpy.frequencyseries.FrequencySeries
data to plot normally
min_ : :class:`~gwpy.frequencyseries.FrequencySeries
first data set to shade to mean_
max_ : :class:`~gwpy.frequencyseries.FrequencySeries
second data set to shade to mean_
alpha : `float`, optional
weight of filled region, ``0.0`` for transparent through ``1.0``
opaque
**kwargs
any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`
Returns
-------
artists : `tuple`
a 5-tuple containing (Line2d for mean_, `Line2D` for min_,
`PolyCollection` for min_ shading, `Line2D` for max_, and
`PolyCollection` for max_ shading)
See Also
--------
:meth:`matplotlib.axes.Axes.plot`
for a full description of acceptable ``*args` and ``**kwargs``
"""
# plot mean
line1 = self.plot_frequencyseries(mean_, **kwargs)[0]
# plot min and max
kwargs.pop('label', None)
color = kwargs.pop('color', line1.get_color())
linewidth = kwargs.pop('linewidth', line1.get_linewidth()) / 10
if min_ is not None:
a = self.plot(min_.frequencies.value, min_.value, color=color,
linewidth=linewidth, **kwargs)
if alpha:
b = self.fill_between(min_.frequencies.value, mean_.value,
min_.value, alpha=alpha, color=color,
rasterized=kwargs.get('rasterized'))
else:
b = None
else:
a = b = None
if max_ is not None:
c = self.plot(max_.frequencies.value, max_.value, color=color,
linewidth=linewidth, **kwargs)
if alpha:
d = self.fill_between(max_.frequencies.value, mean_.value,
max_.value, alpha=alpha, color=color,
rasterized=kwargs.get('rasterized'))
else:
d = None
else:
c = d = None
return line1, a, b, c, d
@auto_refresh
def plot_spectrum_mmm(self, *args, **kwargs):
warnings.warn("{0}.plot_spectrum_mmm was renamed "
"{0}.plot_frequencyseries_mmm, "
"and will be removed in an upcoming release".format(
type(self).__name__))
return self.plot_frequencyseries_mmm(*args, **kwargs)
@auto_refresh
def plot_variance(self, specvar, norm='log', **kwargs):
"""Plot a :class:`~gwpy.frequencyseries.SpectralVariance` onto
these axes
Parameters
----------
spectrum : class:`~gwpy.frequencyseries.SpectralVariance`
data to plot
**kwargs
any other eyword arguments acceptable for
:meth:`~matplotlib.Axes.pcolormesh`
Returns
-------
MeshGrid
the :class:`~matplotlib.collections.MeshGridD` for this layer
See Also
--------
:meth:`matplotlib.axes.Axes.pcolormesh`
for a full description of acceptable ``*args` and ``**kwargs``
"""
if norm == 'log':
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
norm = colors.LogNorm(vmin=vmin, vmax=vmax)
kwargs['norm'] = norm
x = numpy.concatenate((specvar.frequencies.value,
[specvar.x0.value +
specvar.dx.value * specvar.shape[0]]))
y = specvar.bins.value
X, Y = numpy.meshgrid(x, y, copy=False, sparse=True)
mesh = self.pcolormesh(X, Y, specvar.value.T, **kwargs)
if len(self.collections) == 1:
self.set_yscale('log', nonposy='mask')
self.set_xlim(x[0], x[-1])
self.set_ylim(y[0], y[-1])
# fill in zeros
if isinstance(mesh.norm, colors.LogNorm):
cmap = mesh.get_cmap()
try:
# only listed colormaps have cmap.colors
cmap.set_bad(cmap.colors[0])
except AttributeError:
pass
return mesh
register_projection(FrequencySeriesAxes)
class FrequencySeriesPlot(Plot):
"""`Figure` for displaying a `~gwpy.frequencyseries.FrequencySeries`
"""
_DefaultAxesClass = FrequencySeriesAxes
def __init__(self, *series, **kwargs):
kwargs.setdefault('projection', self._DefaultAxesClass.name)
# extract custom keyword arguments
sep = kwargs.pop('sep', False)
xscale = kwargs.pop(
'xscale', kwargs.pop('logx', True) and 'log' or 'linear')
yscale = kwargs.pop(
'yscale', kwargs.pop('logy', True) and 'log' or 'linear')
sharex = kwargs.pop('sharex', False)
sharey = kwargs.pop('sharey', False)
# separate custom keyword arguments
axargs, plotargs = self._parse_kwargs(kwargs)
# initialise figure
super(FrequencySeriesPlot, self).__init__(**kwargs)
# plot data
x0 = []
axesdata = self._get_axes_data(series, sep=sep)
for data in axesdata:
ax = self._add_new_axes(**axargs)
for fs in data:
ax.plot(fs, **plotargs)
x0.append(min([fs.df.value for fs in data]))
if 'sharex' not in axargs and sharex is True:
axargs['sharex'] = ax
if 'sharey' not in axargs and sharey is True:
axargs['sharey'] = ax
if sharex:
x0 = [min(x0)]*len(x0)
axargs.pop('sharex', None)
axargs.pop('sharey', None)
axargs.pop('projection', None)
for i, ax in enumerate(self.axes):
# format axes
for key, val in axargs.iteritems():
getattr(ax, 'set_%s' % key)(val)
# fix log frequency scale with f0 = DC
if xscale in ['log']:
xlim = list(ax.get_xlim())
if not xlim[0]:
xlim[0] = x0[i]
ax.set_xlim(*xlim)
# set axis scales
ax.set_xscale(xscale)
ax.set_yscale(yscale)
# set grid
if xscale == 'log':
ax.grid(True, axis='x', which='both')
if yscale == 'log':
ax.grid(True, axis='y', which='both')
|
gpl-3.0
| -6,821,686,522,418,402,000
| 35.069401
| 80
| 0.546091
| false
| 4.135262
| false
| false
| false
|
isudox/leetcode-solution
|
python-algorithm/leetcode/flip_binary_tree_to_match_preorder_traversal.py
|
1
|
2107
|
# -*- coding: utf-8 -*-
"""971. Flip Binary Tree To Match Preorder Traversal
https://leetcode.com/problems/flip-binary-tree-to-match-preorder-traversal/
Given a binary tree with N nodes, each node has a different value from
{1, ..., N}.
A node in this binary tree can be flipped by swapping the left child and the
right child of that node.
Consider the sequence of N values reported by a preorder traversal starting from
the root. Call such a sequence of N values the voyage of the tree.
(Recall that a preorder traversal of a node means we report the current
node's value, then preorder-traverse the left child, then preorder-traverse the
right child.)
Our goal is to flip the least number of nodes in the tree so that the voyage of
the tree matches the voyage we are given.
If we can do so, then return a list of the values of all nodes flipped. You may
return the answer in any order.
If we cannot do so, then return the list [-1].
Example 1:
Input: root = [1,2], voyage = [2,1]
Output: [-1]
Example 2:
Input: root = [1,2,3], voyage = [1,3,2]
Output: [1]
Example 3:
Input: root = [1,2,3], voyage = [1,2,3]
Output: []
Note:
1 <= N <= 100
"""
from common.tree_node import TreeNode
class Solution:
def flip_match_voyage(self, root, voyage):
"""
:type root: TreeNode
:type voyage: List[int]
:rtype: List[int]
"""
stack, ans = [root], []
i, size = 0, len(voyage)
while len(stack) and i < size:
temp = stack.pop()
if temp.val != voyage[i]:
break
i += 1
if i < size:
if temp.left and temp.left.val != voyage[i]:
ans.append(temp.val)
stack.append(temp.left)
if temp.right:
stack.append(temp.right)
else:
if temp.right:
stack.append(temp.right)
if temp.left:
stack.append(temp.left)
if i != size:
return [-1]
return ans
|
mit
| -1,886,637,052,181,223,000
| 28.676056
| 80
| 0.58187
| false
| 3.547138
| false
| false
| false
|
cloudmesh/vagrant
|
setup.py
|
1
|
3460
|
#!/usr/bin/env python
# ----------------------------------------------------------------------- #
# Copyright 2008-2010, Gregor von Laszewski #
# Copyright 2010-2013, Indiana University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ------------------------------------------------------------------------#
from __future__ import print_function
import setuptools
from setuptools import setup, find_packages
import os
import sys
from cloudmesh_vagrant import __version__
import platform
import re
import io
if sys.version_info < (2, 7, 10):
print(70 * "#")
print("WARNING: upgrade to python 2.7.10 or above"
"Your version is {} not supported.".format(sys.version_info))
print(70 * "#")
command = None
this_platform = platform.system().lower()
if this_platform in ['darwin']:
command = "easy_install readline"
elif this_platform in ['windows']:
command = "pip install pyreadline"
if command is not None:
print("Install readline")
os.system(command)
requirements = [
'cloudmesh_client'
]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
home = os.path.expanduser("~")
setup(
version=__version__,
name="cloudmesh_vagrant",
description="cloudmesh_vagrant - A real simple interface to virtualbox via vagrant",
long_description=read('README.rst'),
license="Apache License, Version 2.0",
author="Gregor von Laszewski",
author_email="laszewski@gmail.com",
url="https://github.com/cloudmesh/cloudmesh_vagrant",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering",
"Topic :: System :: Clustering",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Console"
],
keywords="cloud cmd commandshell plugins cloudmesh vagrant virtualbox",
packages=find_packages(),
install_requires=requirements,
include_package_data=True,
entry_points={
'console_scripts': [
'cm-vbox = cloudmesh_vagrant.cm_vbox:main',
'cm-authors = cloudmesh_client.common.GitInfo:print_authors',
],
},
)
|
apache-2.0
| 4,580,480,080,358,300,700
| 36.608696
| 88
| 0.573988
| false
| 4.407643
| false
| false
| false
|
Alignak-monitoring/alignak-checks-example
|
version.py
|
1
|
1285
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# Frederic Mohier, frederic.mohier@alignak.net
#
"""
Alignak - Checks pack for EXAMPLE
"""
# Package name
__pkg_name__ = u"alignak_checks_EXAMPLE"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"EXAMPLE"
# Application manifest
__version__ = u"0.0.1"
__author__ = u"Your name"
__author_email__ = u"Your email address"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = u"https://github.com/alignak-monitoring-contrib/alignak-checks-EXAMPLE"
__doc_url__ = u"http://alignak-doc.readthedocs.io/en/latest"
__description__ = u"Alignak checks pack for EXAMPLE"
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
|
agpl-3.0
| 1,924,157,300,005,393,700
| 31.125
| 95
| 0.670817
| false
| 3.320413
| false
| false
| false
|
splotz90/urh
|
src/urh/models/ProtocolLabelListModel.py
|
1
|
4134
|
from PyQt5.QtCore import QAbstractListModel, pyqtSignal, Qt, QModelIndex, QMimeData
from PyQt5.QtGui import QFont
from urh import constants
from urh.signalprocessing.FieldType import FieldType
from urh.signalprocessing.MessageType import MessageType
from urh.signalprocessing.ProtocoLabel import ProtocolLabel
from urh.signalprocessing.ProtocolAnalyzer import ProtocolAnalyzer
class ProtocolLabelListModel(QAbstractListModel):
protolabel_visibility_changed = pyqtSignal(ProtocolLabel)
protocol_label_name_edited = pyqtSignal()
label_removed = pyqtSignal(ProtocolLabel)
def __init__(self, proto_analyzer: ProtocolAnalyzer, controller, parent=None):
super().__init__(parent)
self.proto_analyzer = proto_analyzer
self.message_type = controller.active_message_type # type: MessageType
self.controller = controller # type: CompareFrameController
def rowCount(self, QModelIndex_parent=None, *args, **kwargs):
return len(self.message_type)
def update(self):
self.message_type = self.controller.active_message_type # type: MessageType
self.beginResetModel()
self.endResetModel()
def data(self, index, role=Qt.DisplayRole):
row = index.row()
if row >= len(self.message_type):
return
label = self.message_type[row]
if role == Qt.DisplayRole:
return label.name
elif role == Qt.CheckStateRole:
return label.show
elif role == Qt.BackgroundColorRole:
return constants.LABEL_COLORS[label.color_index]
elif role == Qt.FontRole:
font = QFont()
font.setItalic(label.field_type is None)
return font
def setData(self, index: QModelIndex, value, role=Qt.DisplayRole):
if role == Qt.CheckStateRole:
proto_label = self.message_type[index.row()]
proto_label.show = value
self.protolabel_visibility_changed.emit(proto_label)
elif role == Qt.EditRole:
proto_label = self.message_type[index.row()]
proto_label.name = value
self.message_type.change_field_type_of_label(proto_label,
self.controller.field_types_by_caption.get(value, None))
self.protocol_label_name_edited.emit()
return True
def showAll(self):
hidden_labels = [label for label in self.proto_analyzer.protocol_labels if not label.show]
for label in hidden_labels:
label.show = Qt.Checked
self.protolabel_visibility_changed.emit(label)
def hideAll(self):
visible_labels = [label for label in self.proto_analyzer.protocol_labels if label.show]
for label in visible_labels:
label.show = Qt.Unchecked
self.protolabel_visibility_changed.emit(label)
def get_label_at(self, row):
return self.message_type[row]
def delete_label_at(self, label_id: int):
try:
lbl = self.message_type[label_id]
self.message_type.remove(lbl)
self.label_removed.emit(lbl)
except IndexError:
pass
def delete_labels_at(self, start: int, end: int):
for row in range(end, start - 1, -1):
self.delete_label_at(row)
def add_labels_to_message_type(self, start: int, end: int, message_type_id: int):
for lbl in self.message_type[start:end + 1]:
self.controller.proto_analyzer.message_types[message_type_id].add_label(lbl)
self.controller.updateUI(resize_table=False)
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | \
Qt.ItemIsEditable | Qt.ItemIsDragEnabled
def supportedDragActions(self):
return Qt.MoveAction | Qt.CopyAction
def mimeTypes(self):
return ['text/plain']
def mimeData(self, indexes):
data = "PLabels:"
data += "/".join([str(index.row()) for index in indexes])
mime_data = QMimeData()
mime_data.setText(data)
return mime_data
|
gpl-3.0
| -1,866,661,335,700,646,400
| 36.243243
| 113
| 0.643445
| false
| 3.918483
| false
| false
| false
|
eciis/web
|
backend/test/model_test/invite_institution_test.py
|
1
|
5437
|
# -*- coding: utf-8 -*-
from ..test_base import TestBase
from models import Invite
from models import Institution
from models import InviteInstitution
from models import User
from custom_exceptions import FieldException
from mock import patch
class InviteInstitutionTest(TestBase):
"""Test invite model."""
@classmethod
def setUp(cls):
"""Provide the base for the tests."""
cls.test = cls.testbed.Testbed()
cls.test.activate()
cls.policy = cls.datastore.PseudoRandomHRConsistencyPolicy(
probability=1)
cls.test.init_datastore_v3_stub(consistency_policy=cls.policy)
cls.test.init_memcache_stub()
initModels(cls)
def test_check_is_invite_institution_valid(self):
"""Test check_is_invite_institution_valid method."""
with self.assertRaises(FieldException):
data = {"suggestion_institution_name": None}
InviteInstitution.check_is_invite_institution_valid(data)
def test_create(self):
"""Test create method."""
created_invite = InviteInstitution.create(self.data)
stub_institution_key = created_invite.stub_institution_key
expected_invite = InviteInstitution()
expected_invite.admin_key = self.admin.key
expected_invite.is_request = False
expected_invite.institution_key = self.institution.key
expected_invite.sender_key = self.admin.key
expected_invite.sender_name = self.admin.name
expected_invite.invitee = self.user.email[0]
expected_invite.suggestion_institution_name = "new Institution"
expected_invite.stub_institution_key = stub_institution_key
self.assertEquals(
created_invite,
expected_invite,
"The created invite should be equal to the expected one"
)
def test_make(self):
"""Test make method."""
invite_institution = InviteInstitution.create(self.data)
invite_institution.put()
stub_institution = invite_institution.stub_institution_key.get()
maked_invite = invite_institution.make()
expected_maked_invite = {
'admin_name': self.admin.name,
'sender_name': self.invite.sender_name,
'key': invite_institution.key.urlsafe(),
'status': self.invite.status,
'institution_admin': self.institution.make(["name"]),
'institution': self.institution.make(InviteInstitution.INST_PROPS_TO_MAKE),
'invitee': self.user.email[0],
'suggestion_institution_name': 'new Institution',
'stub_institution': stub_institution.make([
'name', 'key', 'state'
]),
'type_of_invite': 'INSTITUTION'
}
self.assertEquals(
maked_invite,
expected_maked_invite,
"The maked invite should be equal to the expected one"
)
@patch('models.invite_institution.NotificationsQueueManager.create_notification_task')
def test_create_accept_notification(self, mock_method):
"""Test create a regular accept response notification."""
invite = InviteInstitution.create(self.data)
invite.put()
self.user.current_institution = self.institution.key
self.user.put()
id = invite.create_accept_response_notification(
'ACCEPT_INVITE_INSTITUTION', self.institution.key, invite.admin_key.urlsafe(), self.user)
mock_method.assert_called()
self.assertTrue(id != None)
@patch('models.invite_institution.NotificationsQueueManager.create_notification_task')
def test_create_system_notification(self, mock_method):
"""Test create a system notification."""
invite = InviteInstitution.create(self.data)
invite.put()
self.user.current_institution = self.institution.key
self.user.put()
id = invite.create_accept_response_notification(
'ADD_ADM_PERMISSIONS', self.institution.key, self.user.key.urlsafe())
mock_method.assert_called()
self.assertTrue(id != None)
def initModels(cls):
"""Initialize the models."""
# admin
cls.admin = User()
cls.admin.name = "admin"
cls.admin.email = ["admin@email"]
cls.admin.put()
# user
cls.user = User()
cls.user.name = "user"
cls.user.email = ["user@email"]
cls.user.put()
# New institution
cls.institution = Institution()
cls.institution.name = "institution"
cls.institution.admin = cls.admin.key
cls.institution.members = [cls.admin.key]
cls.institution.followers = [cls.admin.key]
cls.institution.put()
# update admin
cls.admin.institutions_admin = [cls.institution.key]
cls.admin.put()
# New invite
cls.invite = Invite()
cls.invite.invitee = cls.user.email[0]
cls.invite.admin_key = cls.admin.key
cls.invite.sender_key = cls.admin.key
cls.invite.sender_name = cls.admin.name
cls.invite.status = "sent"
cls.invite.institution_key = cls.institution.key
cls.invite.put()
cls.data = {
"admin_key": cls.admin.key.urlsafe(),
"is_request": False,
"institution_key": cls.institution.key.urlsafe(),
"sender_key": cls.admin.key.urlsafe(),
"sender_name": cls.admin.name,
"invitee": cls.user.email[0],
"suggestion_institution_name": "new Institution"
}
|
gpl-3.0
| 3,515,197,406,887,376,000
| 34.305195
| 101
| 0.640611
| false
| 3.807423
| true
| false
| false
|
karinemiras/evoman_framework
|
evoman/player.py
|
1
|
12662
|
################################
# EvoMan FrameWork - V1.0 2016 #
# Author: Karine Miras #
# karine.smiras@gmail.com #
################################
import sys
import numpy
import struct
import binascii
import Base
from Base.SpriteConstants import *
from Base.SpriteDefinition import *
from sensors import *
# player proctile
class Bullet_p(pygame.sprite.Sprite):
image = pygame.image.load('evoman/images/bullet_r.png')
def __init__(self, location, direction, n_twist, *groups):
super(Bullet_p, self).__init__(*groups)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.direction = direction
self.n_twist = n_twist
# fits image according to the side the player is turned to
if self.direction == 1:
self.image = pygame.image.load('evoman/images/bullet_r.png')
else:
self.image = pygame.image.load('evoman/images/bullet_l.png')
def update(self, dt, game):
# removes bullets objetcs when they transpass the screen limits
if self.rect.right<1 or self.rect.left>736 or self.rect.top <1 or self.rect.bottom>512 :
self.kill()
game.player.twists[self.n_twist] = None
return
self.rect.x += self.direction * 600 * dt # moving on the X axis (left or tight). It adds 600*dt forward at each general game loop loop iteration, where dt controls the frames limit.
# checks collision of player's bullet with the enemy
if self.rect.colliderect(game.enemy.rect):
# if enemy is not imune
if game.enemy.imune == 0:
# enemy loses life points, according to the difficult level of the game (the more difficult, the less it loses)
game.enemy.life = max(0, game.enemy.life-(20/game.level))
if game.enemyn == 4:
# makes enemy imune to player's shooting.
game.enemy.imune = 1
# removes the bullet off the screen after collision.
self.kill()
game.player.twists[self.n_twist] = None
game.enemy.hurt = 5
# player sprite
class Player(pygame.sprite.Sprite):
def __init__(self, location, enemyn, level, *groups):
super(Player, self).__init__(*groups)
self.spriteDefinition = SpriteDefinition('evoman/images/EvoManSprites.png', 0, 0, 43, 59)
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.RIGHT)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.resting = 0
self.dy = 0
self.direction = 1
self.alternate = 1
self.gun_cooldown = 0
self.max_life = 100
self.life = self.max_life
self.atacked = 0
self.hurt = 0
self.shooting = 0
self.inwater = 0
self.twists = []
self.vx = 0
self.vy = 0
self.hy = 0
self.sensors = None
def update(self, dt, game):
# if the enemies are not atacking with the freezing atack (prevents player from making any movements or atacking) and also the 'start game' marker is 1.
if game.freeze_p == 0 and game.start == 1:
# checks water environment flag to regulate movements speed
if self.inwater == 1:
self.vx = 0.5
self.vy = 0.5
self.hy = -2000
else:
self.vx = 1
self.vy = 1
self.hy = -900
# defines game mode for player action
if game.playermode == 'human': # player controlled by keyboard/joystick
# if joystick is connected, initializes it.
if game.joy > 0:
joystick = pygame.joystick.Joystick(0)
joystick.init()
# tests if the button/key was pressed or released.
# if the player is jumping, the release stops the jump before its maximum high is achieved
press = 0
release = 0
for event in game.event:
if event.type == pygame.JOYBUTTONDOWN or event.type == pygame.KEYDOWN:
press = 1
else:
press = 0
if event.type == pygame.JOYBUTTONUP or event.type == pygame.KEYUP:
release = 1
else:
release = 0
# gets pressed key value
key = pygame.key.get_pressed()
# gets joystick value for axis x (left/right)
left = 0
if game.joy > 0:
if round(joystick.get_axis(0)) == -1:
left = 1
if key[pygame.K_LEFT]:
left = 1
right = 0
if game.joy > 0:
if round(joystick.get_axis(0)) == 1:
right = 1
if key[pygame.K_RIGHT]:
right = 1
# gets joystick/key value for jumping
jump = 0
if game.joy > 0:
if int(joystick.get_button(2)) == 1 and press == 1:
jump = 1
if key[pygame.K_SPACE] and press == 1:
jump = 1
# gets joystick/key value for shooting
shoot = 0
if game.joy > 0:
if int(joystick.get_button(3)) == 1 and press == 1:
shoot = 1
if key[pygame.K_LSHIFT] and press == 1:
shoot = 1
elif game.playermode == 'ai': # player controlled by AI algorithm
# calls the controller providing game sensors
actions = game.player_controller.control(self.sensors.get(game), game.pcont)
if len(actions) < 5:
game.print_logs("ERROR: Player controller must return 5 decision variables.")
sys.exit(0)
left = actions[0]
right = actions[1]
jump = actions[2]
shoot = actions[3]
release = actions[4]
# if the button is released before the jumping maximum height, them player stops going up.
if release == 1 and self.resting == 0:
self.dy = 0
# copies last position state of the player
last = self.rect.copy()
# movements on the axis x (left)
if left:
self.rect.x -= 200 * dt * self.vx
self.direction = -1
# animation, running images alternation
if self.alternate == 1:
self.updateSprite(SpriteConstants.START_RUNNING, SpriteConstants.LEFT)
if self.alternate == 4 or self.alternate == 10:
self.updateSprite(SpriteConstants.RUNNING_STEP1, SpriteConstants.LEFT)
if self.alternate == 7:
self.updateSprite(SpriteConstants.RUNNING_STEP2, SpriteConstants.LEFT)
self.alternate += 1
if self.alternate > 12:
self.alternate = 1
# movements on the axis x (right)
elif right:
self.rect.x += 200 * dt * self.vx
self.direction = 1
# animation, running player images alternation
if self.alternate == 1:
self.updateSprite(SpriteConstants.START_RUNNING, SpriteConstants.RIGHT)
if self.alternate == 4 or self.alternate == 10:
self.updateSprite(SpriteConstants.RUNNING_STEP1, SpriteConstants.RIGHT)
if self.alternate == 7:
self.updateSprite(SpriteConstants.RUNNING_STEP2, SpriteConstants.RIGHT)
self.alternate += 1
if self.alternate > 12:
self.alternate = 1
else:
# animation, standing up images
if self.direction == -1:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.RIGHT)
# if player is touching the floor, he is allowed to jump
if self.resting == 1 and jump == 1:
self.dy = self.hy
# gravity
self.dy = min(400, self.dy + 100)
self.rect.y += self.dy * dt * self.vy
# changes the image when player jumps
if self.resting == 0 :
if self.direction == -1:
self.updateSprite(SpriteConstants.JUMPING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.JUMPING, SpriteConstants.RIGHT)
new = self.rect # copies new (after movement) position state of the player
# controls screen walls and platforms limits agaist player
self.resting = 0
for cell in game.tilemap.layers['triggers'].collide(new, 'blockers'):
blockers = cell['blockers']
if 'l' in blockers and last.right <= cell.left and new.right > cell.left and last.bottom>cell.top:
new.right = cell.left
if 'r' in blockers and last.left >= cell.right and new.left < cell.right and last.bottom>cell.top:
new.left = cell.right
if 't' in blockers and last.bottom <= cell.top and new.bottom > cell.top:
self.resting = 1 # player touches the floor
new.bottom = cell.top
self.dy = 0
if 'b' in blockers and last.top >= cell.bottom and new.top < cell.bottom:
new.top = cell.bottom
# shoots, limiting time between bullets.
if shoot == 1 and not self.gun_cooldown:
self.shooting = 5
self.atacked = 1 # marks if the player has atacked enemy
# creates bullets objects according to the direction.
if self.direction > 0:
self.twists.append(Bullet_p(self.rect.midright, 1, len(self.twists), game.sprite_p))
else:
self.twists.append(Bullet_p(self.rect.midleft, -1, len(self.twists), game.sprite_p))
self.gun_cooldown = 0.4 # marks time to the bullet for allowing next bullets
# sound effects
if game.sound == "on" and game.playermode == "human":
sound = pygame.mixer.Sound('evoman/sounds/scifi003.wav')
c = pygame.mixer.Channel(2)
c.set_volume(1)
c.play(sound)
else:
self.atacked = 0
# decreases time for limitating bullets
self.gun_cooldown = max(0, self.gun_cooldown - dt)
# hurt player animation
if self.hurt > 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.HURTING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.HURTING, SpriteConstants.RIGHT)
self.hurt -= 1
self.hurt = max(0,self.hurt)
self.shooting -= 1
self.shooting = max(0,self.shooting)
# shooting animation
if self.shooting > 0:
if self.resting == 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.SHOOTING_JUMPING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.SHOOTING_JUMPING, SpriteConstants.RIGHT)
else:
if self.direction == -1:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.RIGHT)
# kills player in case he touches killers stuff, like spikes.
for cell in game.tilemap.layers['triggers'].collide(self.rect, 'killers'):
game.player.life = 0
# focuses screen center on player
game.tilemap.set_focus(new.x, new.y)
else:
game.tilemap.set_focus(self.rect.x, self.rect.y)
def updateSprite(self, state, direction):
self.image = self.spriteDefinition.getImage(state, direction)
|
cc0-1.0
| 2,649,441,619,843,990,500
| 35.074074
| 192
| 0.525746
| false
| 4.231952
| false
| false
| false
|
VarunRaval48/SignCode
|
java.sign/TestScripts/Valid/python/iterateChangeParameterPython.py
|
1
|
3261
|
# ****************************************************************************
# Copyright (c) 2015 UT-Battelle, LLC.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Initial API and implementation and/or initial documentation - Kasper
# Gammeltoft, Jay Jay Billings
#
# This is an example script designed to show how to use ease with ICE. It
# creates several new Reflectivity Models and changes the thickness parameter
# to show the effect that creates.
# ****************************************************************************
# Load the Platform module for accessing OSGi services
loadModule('/System/Platform')
# Get the core service from ICE for creating and accessing objects.
coreService = getService(org.eclipse.ice.core.iCore.ICore);
# Set a initial value for the thickness of the nickel layer. This will be doubled
# for each iteration to show how this parameter effects the model
nickelThickness = 250;
for i in xrange(1, 5):
# Create the reflectivity model to be used and get its reference. The create item
# method will return a string representing the number of that item, so use int() to
# convert it to an integer.
reflectModel = coreService.getItem(int(coreService.createItem("Reflectivity Model")))
# Get the nickel layer from the model. It should be in the list, which is component 2,
# and it is the third layer in that list (which is item 2 as the list is zero based).
listComp = reflectModel.getComponent(2);
nickel = listComp.get(2);
nickel.setProperty("Thickness (A)", nickelThickness);
nickelThickness += 250;
# Finally process the model to get the results.
coreService.processItem(reflectModel.getId(), "Calculate Reflectivity", 1);
"""*****BEGIN SIGNSTURE********
MCwCFH7564DnEUccn+cSKT0mG4W+Ew/uAhQfwUFkl2q3L6dcuDQK62ZSE/ujQw==
MIIDNzCCAvWgAwIBAgIELNrJgDALBgcqhkjOOAQDBQAwbTEL
MAkGA1UEBhMCSU4xEDAOBgNVBAgTB0d1amFyYXQxEjAQBgNV
BAcTCUFobWVkYWJhZDEQMA4GA1UEChMHVW5rbm93bjEQMA4G
A1UECxMHVW5rbm93bjEUMBIGA1UEAxMLVmFydW4gUmF2YWww
HhcNMTYwMzA1MTE1NjUwWhcNMTYwNjAzMTE1NjUwWjBtMQsw
CQYDVQQGEwJJTjEQMA4GA1UECBMHR3VqYXJhdDESMBAGA1UE
BxMJQWhtZWRhYmFkMRAwDgYDVQQKEwdVbmtub3duMRAwDgYD
VQQLEwdVbmtub3duMRQwEgYDVQQDEwtWYXJ1biBSYXZhbDCC
AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu
7OTn9hG3UjzvRADDHj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2
y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gEexAiwk+7qdf+t8Yb
+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii
Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfh
oIXWmz3ey7yrXDa4V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrU
WU/mcQcQgYC0SRZxI+hMKBYTt88JMozIpuE8FnqLVHyNKOCj
rh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl
nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDqLrJZitkj0fqO
RQ/kdKtwHK4Fq6kXfGedp5umydmCVqrIkuCKuw6X2P5gX4Vv
0kqTEG2iWL7Hv3iUCmtaCeKYLSlIyaloJMYPwgcKxWYYMtXn
njfoOAxHywwXxPAygkR/r9TH1VrUSKjvuGvOxdjSNnezjsVL
VEyIXiO76ZfawKMhMB8wHQYDVR0OBBYEFJw/5/p+5vXMZPXx
ZLBh9YLK/zr4MAsGByqGSM44BAMFAAMvADAsAhRA44+6n9Ya
UTnckDGsbZIv450sVAIUA1otxObPsQaTs1EcOEEqODrNHCY=
********END SIGNSTURE*****"""
|
gpl-3.0
| 5,838,214,338,211,463,000
| 38.780488
| 90
| 0.767863
| false
| 2.387262
| false
| false
| false
|
jenshnielsen/hemelb
|
Tools/setuptool/HemeLbSetupTool/View/VectorCtrl.py
|
1
|
2871
|
#
# Copyright (C) University College London, 2007-2012, all rights reserved.
#
# This file is part of HemeLB and is provided to you under the terms of
# the GNU LGPL. Please see LICENSE in the top level directory for full
# details.
#
import wx
# from wx.lib.masked import NumCtrl, EVT_NUM
from HemeLbSetupTool.Bindings.Translators import FloatTranslator, NoneToValueTranslator
from HemeLbSetupTool.Bindings.WxMappers import WxWidgetMapper, Mapper
from HemeLbSetupTool.View.Layout import H
def ForwardGet(func):
def Get(self, val):
return tuple(getattr(getattr(self, coord), func.func_name)() for coord in ('x', 'y', 'z'))
Get.func_name = func.func_name
return Get
def ForwardSet(func):
def Set(self, val):
for coord in ('x', 'y', 'z'):
setter = getattr(getattr(self, coord), func.func_name)
setter(val)
continue
return
Set.func_name = func.func_name
return Set
class VectorCtrl(wx.Panel):
"""Simple container of three TextCtrl's for a vector quantity.
"""
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# self.x = NumCtrl(parent, style=wx.TE_PROCESS_ENTER, integerWidth=3, fractionWidth=3)
# self.y = NumCtrl(parent, style=wx.TE_PROCESS_ENTER, integerWidth=3, fractionWidth=3)
# self.z = NumCtrl(parent, style=wx.TE_PROCESS_ENTER, integerWidth=3, fractionWidth=3)
self.x = wx.TextCtrl(self, size=(50,22))
self.y = wx.TextCtrl(self, size=(50,22))
self.z = wx.TextCtrl(self, size=(50,22))
sizer = H((self.x, 1, wx.EXPAND),
(self.y, 1, wx.EXPAND),
(self.z, 1, wx.EXPAND)).create()
self.SetSizer(sizer)
return
@ForwardSet
def SetBackgroundColour(): return
@ForwardGet
def GetBackgroundColour(): return
@ForwardSet
def SetEditable(): return
pass
class VectorCtrlMapper(WxWidgetMapper):
"""Widget mapper for VectorCtrls.
"""
def __init__(self, widget, key, event,
translator=NoneToValueTranslator(float('nan'),
inner=FloatTranslator())
):
# We want to skip the WxWidgetMapper's init for now as the
# VectorCtrl typically won't have the required getters and
# setters. On binding, this one mapper is turned into three
# standard mappers anyway.
Mapper.__init__(self, translator=translator)
self.widget = widget
self.key = key
self.event = event
return
def CreateSubMapper(self, component):
return WxWidgetMapper(getattr(self.widget, component),
self.key, self.event,
translator=self.translator)
pass
|
lgpl-3.0
| -1,803,520,995,720,174,000
| 32
| 98
| 0.604667
| false
| 3.858871
| false
| false
| false
|
achanda/refstack
|
refstack/tools/tempest_subunit_test_result.py
|
1
|
7924
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subunit
import testtools
import unittest
class TempestSubunitTestResultBase(testtools.TestResult):
"""Class to process subunit stream.
This class is derived from testtools.TestResult.
This class overrides all the inherited addXXX methods
to call the new _process_result() method to process the data.
This class is designed to be a base class.
The _process_result() method should be overriden by the
derived class to customize the processing.
"""
result_type = ["SUCCESS", "FAILURE", "ERROR", "SKIP"]
def __init__(self, stream, descriptions, verbosity):
"""Initialize with super class signature."""
super(TempestSubunitTestResultBase, self).__init__()
def _process_result(self, result_type, testcase, *arg):
"""Process the data.
The value of parameter "result_type" can be SUCCESS, FAILURE,
ERROR, or SKIP.
It can be used to determine from which add method this is called.
"""
pass
def addSuccess(self, testcase):
"""Overwrite super class method for additional data processing."""
super(TempestSubunitTestResultBase, self).addSuccess(testcase)
self._process_result(self.result_type[0], testcase)
def addFailure(self, testcase, err):
"""Overwrite super class method for additional data processing."""
if testcase.id() == 'process-returncode':
return
super(TempestSubunitTestResultBase, self).addFailure(testcase, err)
self._process_result(self.result_type[1], testcase, err)
def addError(self, testcase, err):
"""Overwrite super class method for additional data processing."""
super(TempestSubunitTestResultBase, self).addFailure(testcase, err)
self._process_result(self.result_type[2], testcase, err)
def addSkip(self, testcase, reason=None, details=None):
"""Overwrite super class method for additional data processing."""
super(TempestSubunitTestResultBase,
self).addSkip(testcase, reason, details)
self._process_result(self.result_type[3], testcase, reason, details)
def startTest(self, testcase):
"""Overwrite super class method for additional data processing."""
self.start_time = self._now()
super(TempestSubunitTestResultBase, self).startTest(testcase)
class TempestSubunitTestResult(TempestSubunitTestResultBase):
"""Process subunit stream and save data into two dictionary objects.
1) The result dictionary object:
results={testcase_id: [status, elapsed],
testcase_id: [status, elapsed],
...}
testcase_id: the id fetched from subunit data.
For Tempest test: testcase_id = test_class_name + test_name
status: status of the testcase (PASS, FAIL, FAIL_SETUP, ERROR, SKIP)
elapsed: testcase elapsed time
2) The summary dictionary object:
summary={"PASS": count, "FAIL": count, "FAIL_SETUP: count",
"ERROR": count, "SKIP": count, "Total": count}
count: the number of occurrence
"""
def __init__(self, stream, descriptions, verbosity):
"""Initialize with supper class signature."""
super(TempestSubunitTestResult, self).__init__(stream, descriptions,
verbosity)
self.start_time = None
self.status = ["PASS", "FAIL", "FAIL_SETUP", "ERROR", "SKIP"]
self.results = {}
self.summary = {self.status[0]: 0, self.status[1]: 0,
self.status[2]: 0, self.status[3]: 0,
self.status[4]: 0, "Total": 0}
def _process_result(self, result_type, testcase, *arg):
"""Process and append data to dictionary objects."""
testcase_id = testcase.id()
elapsed = (self._now() - self.start_time).total_seconds()
status = result_type
# Convert "SUCCESS" to "PASS"
# Separate "FAILURE" into "FAIL" and "FAIL_SETUP"
if status == self.result_type[0]:
status = self.status[0]
elif status == self.result_type[1]:
if "setUpClass" in testcase_id:
status = self.status[2]
testcase_id = '%s.setUpClass' % \
(re.search('\((.*)\)', testcase_id).group(1))
else:
status = self.status[1]
self.results.setdefault(testcase_id, [])
self.results[testcase_id] = [status, elapsed]
self.summary[status] += 1
self.summary["Total"] += 1
class TempestSubunitTestResultTuples(TempestSubunitTestResult):
"""Process subunit stream and save data into two dictionary objects.
1) The result dictionary object:
results={test_classname: [(test_name, status, elapsed),
(test_name, status, elapsed),...],
test_classname: [(test_name, status, elapsed),
(test_name, status, elapsed),...],
...}
status: status of the testcase (PASS, FAIL, FAIL_SETUP, ERROR, SKIP)
elapsed: testcase elapsed time
2) The summary dictionary object:
summary={"PASS": count, "FAIL": count, "FAIL_SETUP: count",
"ERROR": count, "SKIP": count, "Total": count}
count: the number of occurrence
"""
def _process_result(self, result_type, testcase, *arg):
"""Process and append data to dictionary objects."""
testcase_id = testcase.id()
elapsed = round((self._now() - self.start_time).total_seconds(), 2)
status = result_type
# Convert "SUCCESS" to "PASS"
# Separate "FAILURE" into "FAIL" and "FAIL_SETUP"
if status == self.result_type[0]:
status = self.status[0]
elif status == self.result_type[1]:
if "setUpClass" in testcase_id:
status = self.status[2]
testcase_id = '%s.setUpClass' % \
(re.search('\((.*)\)', testcase_id).group(1))
else:
status = self.status[1]
classname, testname = testcase_id.rsplit('.', 1)
self.results.setdefault(classname, [])
self.results[classname].append((testname, status, elapsed))
self.summary[status] += 1
self.summary["Total"] += 1
class ProcessSubunitData():
"""A class to replay subunit data from a stream."""
result = None
def __init__(self, in_stream, test_result_class_name=
TempestSubunitTestResult):
"""Read and process subunit data from a stream.
Save processed data into a class named TempestSubunitTestResult
which is a class derived from unittest.TestResults.
"""
test = subunit.ProtocolTestCase(in_stream, passthrough=None)
runner = unittest.TextTestRunner(verbosity=2, resultclass=
test_result_class_name)
#Run (replay) the test from subunit stream.
#runner,run will return an object of type "test_result_class_name"
self.result = runner.run(test)
def get_result(self):
"""Return an object of type test_result_class_name."""
return self.result
|
apache-2.0
| -8,004,023,840,272,725,000
| 38.422886
| 78
| 0.614967
| false
| 4.280929
| true
| false
| false
|
davidfokkema/artist
|
demo/demo_histogram_fit.py
|
1
|
1160
|
import numpy as np
import scipy.optimize
import scipy.stats
from artist import Plot
def main():
# Draw random numbers from the normal distribution
np.random.seed(1)
N = np.random.normal(size=2000)
# define bin edges
edge = 5
bin_width = .1
bins = np.arange(-edge, edge + .5 * bin_width, bin_width)
# build histogram and x, y values at the center of the bins
n, bins = np.histogram(N, bins=bins)
x = (bins[:-1] + bins[1:]) / 2
y = n
# fit normal distribution pdf to data
f = lambda x, N, mu, sigma: N * scipy.stats.norm.pdf(x, mu, sigma)
popt, pcov = scipy.optimize.curve_fit(f, x, y)
print("Parameters from fit (N, mu, sigma):", popt)
# make graph
graph = Plot()
# graph histogram
graph.histogram(n, bins)
# graph model with fit parameters
x = np.linspace(-edge, edge, 100)
graph.plot(x, f(x, *popt), mark=None)
# set labels and limits
graph.set_xlabel("value")
graph.set_ylabel("count")
graph.set_label("Fit to data")
graph.set_xlimits(-6, 6)
# save graph to file
graph.save('histogram-fit')
if __name__ == '__main__':
main()
|
gpl-3.0
| 6,107,514,388,074,478,000
| 22.673469
| 70
| 0.612931
| false
| 3.276836
| false
| false
| false
|
orbingol/NURBS-Python
|
geomdl/exchange.py
|
1
|
33428
|
"""
.. module:: exchange
:platform: Unix, Windows
:synopsis: Provides CAD exchange and interoperability functions
.. moduleauthor:: Onur Rauf Bingol <orbingol@gmail.com>
"""
import os
import struct
import json
from io import StringIO
from . import compatibility, operations, elements, linalg
from . import _exchange as exch
from .exceptions import GeomdlException
from ._utilities import export
@export
def import_txt(file_name, two_dimensional=False, **kwargs):
""" Reads control points from a text file and generates a 1-dimensional list of control points.
The following code examples illustrate importing different types of text files for curves and surfaces:
.. code-block:: python
:linenos:
# Import curve control points from a text file
curve_ctrlpts = exchange.import_txt(file_name="control_points.txt")
# Import surface control points from a text file (1-dimensional file)
surf_ctrlpts = exchange.import_txt(file_name="control_points.txt")
# Import surface control points from a text file (2-dimensional file)
surf_ctrlpts, size_u, size_v = exchange.import_txt(file_name="control_points.txt", two_dimensional=True)
If argument ``jinja2=True`` is set, then the input file is processed as a `Jinja2 <http://jinja.pocoo.org/>`_
template. You can also use the following convenience template functions which correspond to the given mathematical
equations:
* ``sqrt(x)``: :math:`\\sqrt{x}`
* ``cubert(x)``: :math:`\\sqrt[3]{x}`
* ``pow(x, y)``: :math:`x^{y}`
You may set the file delimiters using the keyword arguments ``separator`` and ``col_separator``, respectively.
``separator`` is the delimiter between the coordinates of the control points. It could be comma
``1, 2, 3`` or space ``1 2 3`` or something else. ``col_separator`` is the delimiter between the control
points and is only valid when ``two_dimensional`` is ``True``. Assuming that ``separator`` is set to space, then
``col_operator`` could be semi-colon ``1 2 3; 4 5 6`` or pipe ``1 2 3| 4 5 6`` or comma ``1 2 3, 4 5 6`` or
something else.
The defaults for ``separator`` and ``col_separator`` are *comma (,)* and *semi-colon (;)*, respectively.
The following code examples illustrate the usage of the keyword arguments discussed above.
.. code-block:: python
:linenos:
# Import curve control points from a text file delimited with space
curve_ctrlpts = exchange.import_txt(file_name="control_points.txt", separator=" ")
# Import surface control points from a text file (2-dimensional file) w/ space and comma delimiters
surf_ctrlpts, size_u, size_v = exchange.import_txt(file_name="control_points.txt", two_dimensional=True,
separator=" ", col_separator=",")
Please note that this function does not check whether the user set delimiters to the same value or not.
:param file_name: file name of the text file
:type file_name: str
:param two_dimensional: type of the text file
:type two_dimensional: bool
:return: list of control points, if two_dimensional, then also returns size in u- and v-directions
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
# Read file
content = exch.read_file(file_name)
# Are we using a Jinja2 template?
j2tmpl = kwargs.get('jinja2', False)
if j2tmpl:
content = exch.process_template(content)
# File delimiters
col_sep = kwargs.get('col_separator', ";")
sep = kwargs.get('separator', ",")
return exch.import_text_data(content, sep, col_sep, two_dimensional)
@export
def export_txt(obj, file_name, two_dimensional=False, **kwargs):
""" Exports control points as a text file.
For curves the output is always a list of control points. For surfaces, it is possible to generate a 2-dimensional
control point output file using ``two_dimensional``.
Please see :py:func:`.exchange.import_txt()` for detailed description of the keyword arguments.
:param obj: a spline geometry object
:type obj: abstract.SplineGeometry
:param file_name: file name of the text file to be saved
:type file_name: str
:param two_dimensional: type of the text file (only works for Surface objects)
:type two_dimensional: bool
:raises GeomdlException: an error occurred writing the file
"""
# Check if the user has set any control points
if obj.ctrlpts is None or len(obj.ctrlpts) == 0:
raise exch.GeomdlException("There are no control points to save!")
# Check the usage of two_dimensional flag
if obj.pdimension == 1 and two_dimensional:
# Silently ignore two_dimensional flag
two_dimensional = False
# File delimiters
col_sep = kwargs.get('col_separator', ";")
sep = kwargs.get('separator', ",")
content = exch.export_text_data(obj, sep, col_sep, two_dimensional)
return exch.write_file(file_name, content)
@export
def import_csv(file_name, **kwargs):
""" Reads control points from a CSV file and generates a 1-dimensional list of control points.
It is possible to use a different value separator via ``separator`` keyword argument. The following code segment
illustrates the usage of ``separator`` keyword argument.
.. code-block:: python
:linenos:
# By default, import_csv uses 'comma' as the value separator
ctrlpts = exchange.import_csv("control_points.csv")
# Alternatively, it is possible to import a file containing tab-separated values
ctrlpts = exchange.import_csv("control_points.csv", separator="\\t")
The only difference of this function from :py:func:`.exchange.import_txt()` is skipping the first line of the input
file which generally contains the column headings.
:param file_name: file name of the text file
:type file_name: str
:return: list of control points
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
# File delimiters
sep = kwargs.get('separator', ",")
content = exch.read_file(file_name, skip_lines=1)
return exch.import_text_data(content, sep)
@export
def export_csv(obj, file_name, point_type='evalpts', **kwargs):
""" Exports control points or evaluated points as a CSV file.
:param obj: a spline geometry object
:type obj: abstract.SplineGeometry
:param file_name: output file name
:type file_name: str
:param point_type: ``ctrlpts`` for control points or ``evalpts`` for evaluated points
:type point_type: str
:raises GeomdlException: an error occurred writing the file
"""
if not 0 < obj.pdimension < 3:
raise exch.GeomdlException("Input object should be a curve or a surface")
# Pick correct points from the object
if point_type == 'ctrlpts':
points = obj.ctrlptsw if obj.rational else obj.ctrlpts
elif point_type == 'evalpts':
points = obj.evalpts
else:
raise exch.GeomdlException("Please choose a valid point type option. Possible types: ctrlpts, evalpts")
# Prepare CSV header
dim = len(points[0])
line = "dim "
for i in range(dim-1):
line += str(i + 1) + ", dim "
line += str(dim) + "\n"
# Prepare values
for pt in points:
line += ",".join([str(p) for p in pt]) + "\n"
# Write to file
return exch.write_file(file_name, line)
@export
def import_cfg(file_name, **kwargs):
""" Imports curves and surfaces from files in libconfig format.
.. note::
Requires `libconf <https://pypi.org/project/libconf/>`_ package.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
return libconf.loads(data)
# Check if it is possible to import 'libconf'
try:
import libconf
except ImportError:
raise exch.GeomdlException("Please install 'libconf' package to use libconfig format: pip install libconf")
# Get keyword arguments
delta = kwargs.get('delta', -1.0)
use_template = kwargs.get('jinja2', False)
# Read file
file_src = exch.read_file(file_name)
# Import data
return exch.import_dict_str(file_src=file_src, delta=delta, callback=callback, tmpl=use_template)
@export
def export_cfg(obj, file_name):
""" Exports curves and surfaces in libconfig format.
.. note::
Requires `libconf <https://pypi.org/project/libconf/>`_ package.
Libconfig format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
return libconf.dumps(data)
# Check if it is possible to import 'libconf'
try:
import libconf
except ImportError:
raise exch.GeomdlException("Please install 'libconf' package to use libconfig format: pip install libconf")
# Export data
exported_data = exch.export_dict_str(obj=obj, callback=callback)
# Write to file
return exch.write_file(file_name, exported_data)
@export
def import_yaml(file_name, **kwargs):
""" Imports curves and surfaces from files in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
def callback(data):
yaml = YAML()
return yaml.load(data)
# Check if it is possible to import 'ruamel.yaml'
try:
from ruamel.yaml import YAML
except ImportError:
raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml")
# Get keyword arguments
delta = kwargs.get('delta', -1.0)
use_template = kwargs.get('jinja2', False)
# Read file
file_src = exch.read_file(file_name)
# Import data
return exch.import_dict_str(file_src=file_src, delta=delta, callback=callback, tmpl=use_template)
@export
def export_yaml(obj, file_name):
""" Exports curves and surfaces in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
YAML format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
# Ref: https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
stream = StringIO()
yaml = YAML()
yaml.dump(data, stream)
return stream.getvalue()
# Check if it is possible to import 'ruamel.yaml'
try:
from ruamel.yaml import YAML
except ImportError:
raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml")
# Export data
exported_data = exch.export_dict_str(obj=obj, callback=callback)
# Write to file
return exch.write_file(file_name, exported_data)
@export
def import_json(file_name, **kwargs):
""" Imports curves and surfaces from files in JSON format.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
def callback(data):
return json.loads(data)
# Get keyword arguments
delta = kwargs.get('delta', -1.0)
use_template = kwargs.get('jinja2', False)
# Read file
file_src = exch.read_file(file_name)
# Import data
return exch.import_dict_str(file_src=file_src, delta=delta, callback=callback, tmpl=use_template)
@export
def export_json(obj, file_name):
""" Exports curves and surfaces in JSON format.
JSON format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
return json.dumps(data, indent=4)
# Export data
exported_data = exch.export_dict_str(obj=obj, callback=callback)
# Write to file
return exch.write_file(file_name, exported_data)
@export
def import_obj(file_name, **kwargs):
""" Reads .obj files and generates faces.
Keyword Arguments:
* ``callback``: reference to the function that processes the faces for customized output
The structure of the callback function is shown below:
.. code-block:: python
def my_callback_function(face_list):
# "face_list" will be a list of elements.Face class instances
# The function should return a list
return list()
:param file_name: file name
:type file_name: str
:return: output of the callback function (default is a list of faces)
:rtype: list
"""
def default_callback(face_list):
return face_list
# Keyword arguments
callback_func = kwargs.get('callback', default_callback)
# Read and process the input file
content = exch.read_file(file_name)
content_arr = content.split("\n")
# Initialize variables
on_face = False
vertices = []
triangles = []
faces = []
# Index values
vert_idx = 1
tri_idx = 1
face_idx = 1
# Loop through the data
for carr in content_arr:
carr = carr.strip()
data = carr.split(" ")
data = [d.strip() for d in data]
if data[0] == "v":
if on_face:
on_face = not on_face
face = elements.Face(*triangles, id=face_idx)
faces.append(face)
face_idx += 1
vertices[:] = []
triangles[:] = []
vert_idx = 1
tri_idx = 1
vertex = elements.Vertex(*data[1:], id=vert_idx)
vertices.append(vertex)
vert_idx += 1
if data[0] == "f":
on_face = True
triangle = elements.Triangle(*[vertices[int(fidx) - 1] for fidx in data[1:]], id=tri_idx)
triangles.append(triangle)
tri_idx += 1
# Process he final face
if triangles:
face = elements.Face(*triangles, id=face_idx)
faces.append(face)
# Return the output of the callback function
return callback_func(faces)
@export
def export_obj(surface, file_name, **kwargs):
""" Exports surface(s) as a .obj file.
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of surface points sampled. *Default: 2*
* ``vertex_normals``: if True, then computes vertex normals. *Default: False*
* ``parametric_vertices``: if True, then adds parameter space vertices. *Default: False*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
content = export_obj_str(surface, **kwargs)
return exch.write_file(file_name, content)
def export_obj_str(surface, **kwargs):
""" Exports surface(s) as a .obj file (string).
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of surface points sampled. *Default: 2*
* ``vertex_normals``: if True, then computes vertex normals. *Default: False*
* ``parametric_vertices``: if True, then adds parameter space vertices. *Default: False*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:return: contents of the .obj file generated
:rtype: str
"""
# Get keyword arguments
vertex_spacing = int(kwargs.get('vertex_spacing', 1))
include_vertex_normal = kwargs.get('vertex_normals', False)
include_param_vertex = kwargs.get('parametric_vertices', False)
update_delta = kwargs.get('update_delta', True)
# Input validity checking
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
if vertex_spacing < 1:
raise exch.GeomdlException("Vertex spacing should be bigger than zero")
# Create the string and start adding triangulated surface points
line = "# Generated by geomdl\n"
vertex_offset = 0 # count the vertices to update the face numbers correctly
# Initialize lists for geometry data
str_v = [] # vertices
str_vn = [] # vertex normals
str_vp = [] # parameter space vertices
str_f = [] # faces
# Loop through SurfaceContainer object
for srf in surface:
# Set surface evaluation delta
if update_delta:
srf.sample_size_u = surface.sample_size_u
srf.sample_size_v = surface.sample_size_v
# Tessellate surface
srf.tessellate(vertex_spacing=vertex_spacing)
vertices = srf.tessellator.vertices
triangles = srf.tessellator.faces
# Collect vertices
for vert in vertices:
temp = "v " + str(vert.x) + " " + str(vert.y) + " " + str(vert.z) + "\n"
str_v.append(temp)
# Collect parameter space vertices
if include_param_vertex:
for vert in vertices:
temp = "vp " + str(vert.uv[0]) + " " + str(vert.uv[1]) + "\n"
str_vp.append(temp)
# Compute vertex normals
if include_vertex_normal:
for vert in vertices:
sn = operations.normal(srf, vert.uv)
temp = "vn " + str(sn[1][0]) + " " + str(sn[1][1]) + " " + str(sn[1][2]) + "\n"
str_vn.append(temp)
# Collect faces (1-indexed)
for t in triangles:
vl = t.data
temp = "f " + \
str(vl[0] + 1 + vertex_offset) + " " + \
str(vl[1] + 1 + vertex_offset) + " " + \
str(vl[2] + 1 + vertex_offset) + "\n"
str_f.append(temp)
# Update vertex offset
vertex_offset = len(str_v)
# Write all collected data to the return string
for lv in str_v:
line += lv
for lvn in str_vn:
line += lvn
for lvp in str_vp:
line += lvp
for lf in str_f:
line += lf
return line
@export
def export_stl(surface, file_name, **kwargs):
""" Exports surface(s) as a .stl file in plain text or binary format.
Keyword Arguments:
* ``binary``: flag to generate a binary STL file. *Default: True*
* ``vertex_spacing``: size of the triangle edge in terms of points sampled on the surface. *Default: 1*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
binary = kwargs.get('binary', True)
if 'binary' in kwargs:
kwargs.pop('binary')
content = export_stl_str(surface, binary=binary, **kwargs)
return exch.write_file(file_name, content, binary=binary)
def export_stl_str(surface, **kwargs):
""" Exports surface(s) as a .stl file in plain text or binary format (string).
Keyword Arguments:
* ``binary``: flag to generate a binary STL file. *Default: False*
* ``vertex_spacing``: size of the triangle edge in terms of points sampled on the surface. *Default: 1*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: False*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:return: contents of the .stl file generated
:rtype: str
"""
binary = kwargs.get('binary', False)
vertex_spacing = int(kwargs.get('vertex_spacing', 1))
update_delta = kwargs.get('update_delta', True)
# Input validity checking
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
if vertex_spacing < 1:
raise exch.GeomdlException("Vertex spacing should be bigger than zero")
triangles_list = []
for srf in surface:
# Set surface evaluation delta
if update_delta:
srf.sample_size_u = surface.sample_size_u
srf.sample_size_v = surface.sample_size_v
# Tessellate surface
srf.tessellate(vertex_spacing=vertex_spacing)
triangles = srf.tessellator.faces
triangles_list += triangles
# Write triangle list to ASCII or binary STL file
if binary:
line = b'\0' * 80 # header
line += struct.pack('<i', len(triangles_list)) # number of triangles
for t in triangles_list:
line += struct.pack('<3f', *linalg.triangle_normal(t)) # normal
for v in t.vertices:
line += struct.pack('<3f', *v.data) # vertices
line += b'\0\0' # attribute byte count
else:
line = "solid Surface\n"
for t in triangles_list:
nvec = linalg.triangle_normal(t)
line += "\tfacet normal " + str(nvec[0]) + " " + str(nvec[1]) + " " + str(nvec[2]) + "\n"
line += "\t\touter loop\n"
for v in t.vertices:
line += "\t\t\tvertex " + str(v.x) + " " + str(v.y) + " " + str(v.z) + "\n"
line += "\t\tendloop\n"
line += "\tendfacet\n"
line += "endsolid Surface\n"
return line
@export
def export_off(surface, file_name, **kwargs):
""" Exports surface(s) as a .off file.
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of points sampled on the surface. *Default: 1*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
content = export_off_str(surface, **kwargs)
return exch.write_file(file_name, content)
def export_off_str(surface, **kwargs):
""" Exports surface(s) as a .off file (string).
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of points sampled on the surface. *Default: 1*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:return: contents of the .off file generated
:rtype: str
"""
# Get keyword arguments
vertex_spacing = int(kwargs.get('vertex_spacing', 1))
update_delta = kwargs.get('update_delta', True)
# Input validity checking
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
if vertex_spacing < 1:
raise exch.GeomdlException("Vertex spacing should be bigger than zero")
# Count the vertices to update the face numbers correctly
vertex_offset = 0
# Initialize lists for vertices, vertex normals and faces
str_v = []
str_f = []
for srf in surface:
# Set surface evaluation delta
if update_delta:
srf.sample_size_u = surface.sample_size_u
srf.sample_size_v = surface.sample_size_v
# Tessellate surface
srf.tessellate(vertex_spacing=vertex_spacing)
vertices = srf.tessellator.vertices
triangles = srf.tessellator.faces
# Collect vertices
for vert in vertices:
line = str(vert.x) + " " + str(vert.y) + " " + str(vert.z) + "\n"
str_v.append(line)
# Collect faces (zero-indexed)
for t in triangles:
vl = t.data
line = "3 " + \
str(vl[0] + vertex_offset) + " " + \
str(vl[1] + vertex_offset) + " " + \
str(vl[2] + vertex_offset) + "\n"
str_f.append(line)
# Update vertex offset
vertex_offset = len(str_v)
# Write file header
line = "OFF\n"
line += str(len(str_v)) + " " + str(len(str_f)) + " 0\n"
# Write all collected data to the file
for lv in str_v:
line += lv
for lf in str_f:
line += lf
return line
@export
def import_smesh(file):
""" Generates NURBS surface(s) from surface mesh (smesh) file(s).
*smesh* files are some text files which contain a set of NURBS surfaces. Each file in the set corresponds to one
NURBS surface. Most of the time, you receive multiple *smesh* files corresponding to an complete object composed of
several NURBS surfaces. The files have the extensions of ``txt`` or ``dat`` and they are named as
* ``smesh.X.Y.txt``
* ``smesh.X.dat``
where *X* and *Y* correspond to some integer value which defines the set the surface belongs to and part number of
the surface inside the complete object.
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS surfaces
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_surf_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_surf_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements
@export
def export_smesh(surface, file_name, **kwargs):
""" Exports surface(s) as surface mesh (smesh) files.
Please see :py:func:`.import_smesh()` for details on the file format.
:param surface: surface(s) to be exported
:type surface: abstract.Surface or multi.SurfaceContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
# Input validity checking
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
# Get keyword arguments
decimals = kwargs.get('decimals', 18)
# Split file name and extension
fname, fext = os.path.splitext(file_name)
# Enumerate file name only if we are working with multiple surfaces
numerate_file = True if len(surface) > 1 else False
for idx, s in enumerate(surface):
if s.rational:
pts = s.ctrlptsw
else:
pts = compatibility.combine_ctrlpts_weights(s.ctrlpts)
line = str(s.dimension) + "\n"
line += str(s.degree_u) + " " + str(s.degree_v) + "\n"
line += str(s.ctrlpts_size_u) + " " + str(s.ctrlpts_size_v) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in s.knotvector_u]) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in s.knotvector_v]) + "\n"
# Flip control points
ctrlptsw = compatibility.flip_ctrlpts(pts, s.ctrlpts_size_u, s.ctrlpts_size_v)
# Convert control points into (x, y, z, w) format
ctrlptsw = compatibility.generate_ctrlpts_weights(ctrlptsw)
for ptw in ctrlptsw:
line += " ".join([("{:." + str(decimals) + "f}").format(p) for p in ptw]) + "\n"
# Open or closed?
line += "1\n"
# Write to file
fname_curr = fname + "." + str(idx + 1) if numerate_file else fname
exch.write_file(fname_curr + fext, line)
@export
def import_vmesh(file):
""" Imports NURBS volume(s) from volume mesh (vmesh) file(s).
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS volumes
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_vol_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_vol_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements
@export
def export_vmesh(volume, file_name, **kwargs):
""" Exports volume(s) as volume mesh (vmesh) files.
:param volume: volume(s) to be exported
:type volume: abstract.Volume
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
if volume.pdimension != 3:
raise exch.GeomdlException("Can only export volumes")
# Get keyword arguments
decimals = kwargs.get('decimals', 18)
# Split file name and extension
fname, fext = os.path.splitext(file_name)
# Enumerate file name only if we are working with multiple volumes
numerate_file = True if len(volume) > 1 else False
for idx, v in enumerate(volume):
if v.rational:
pts = v.ctrlptsw
else:
pts = compatibility.combine_ctrlpts_weights(v.ctrlpts)
line = str(v.dimension) + "\n"
line += str(v.degree_u) + " " + str(v.degree_v) + " " + str(v.degree_w) + "\n"
line += str(v.ctrlpts_size_u) + " " + str(v.ctrlpts_size_v) + " " + str(v.ctrlpts_size_w) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in v.knotvector_u]) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in v.knotvector_v]) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in v.knotvector_w]) + "\n"
# Convert control points into (x, y, z, w)
ctrlptsw = []
for w in range(v.ctrlpts_size_w):
srfpts = pts[(w * v.ctrlpts_size_u * v.ctrlpts_size_v):((w + 1) * v.ctrlpts_size_u * v.ctrlpts_size_v)]
# Flip control points
ctrlptsw += compatibility.flip_ctrlpts(srfpts, v.ctrlpts_size_u, v.ctrlpts_size_v)
# Convert control points into (x, y, z, w) format
ctrlptsw = compatibility.generate_ctrlpts_weights(ctrlptsw)
for ptw in ctrlptsw:
line += " ".join([("{:." + str(decimals) + "f}").format(p) for p in ptw]) + "\n"
# Open or closed?
line += "1\n"
# Write to file
fname_curr = fname + "." + str(idx + 1) if numerate_file else fname
exch.write_file(fname_curr + fext, line)
@export
def import_3dm(file_name, **kwargs):
""" Imports curves and surfaces from Rhinoceros/OpenNURBS .3dm files.
.. deprecated:: 5.2.2
``rw3dm`` Python module is replaced by ``on2json``. It can be used to convert .3dm files to geomdl JSON format.
Please refer to https://github.com/orbingol/rw3dm for more details.
:param file_name: input file name
:type file_name: str
"""
raise GeomdlException("This API call has been deprecated. Please refer to https://github.com/orbingol/rw3dm")
@export
def export_3dm(obj, file_name, **kwargs):
""" Exports NURBS curves and surfaces to Rhinoceros/OpenNURBS .3dm files.
.. deprecated:: 5.2.2
``rw3dm`` Python module is replaced by ``json2on``. It can be used to convert geomdl JSON format to .3dm files.
Please refer to https://github.com/orbingol/rw3dm for more details.
:param obj: curves/surfaces to be exported
:type obj: abstract.Curve, abstract.Surface, multi.CurveContainer, multi.SurfaceContainer
:param file_name: file name
:type file_name: str
"""
raise GeomdlException("This API call has been deprecated. Please refer to https://github.com/orbingol/rw3dm")
|
mit
| 1,979,683,606,880,411,600
| 35.06041
| 119
| 0.638447
| false
| 3.73122
| false
| false
| false
|
brokendata/bigmler
|
bigmler/train_reader.py
|
1
|
11880
|
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""TrainReader class
Manages the training input data, its headers and labels if the objective
field is a multi-label field
"""
from __future__ import absolute_import
import csv
import sys
from bigml.util import get_csv_delimiter
from bigml.io import UnicodeReader
from bigmler.checkpoint import file_number_of_lines
from bigmler.labels import get_label_field
from bigmler.utils import PYTHON3, SYSTEM_ENCODING, FILE_ENCODING
from bigmler.utils import encode2, decode2
AGGREGATES = {
'count': len,
'last': lambda x: x[-1],
'first': lambda x: x[0]
}
class TrainReader(object):
"""Retrieves csv info and manages objective fields and multi-labels
"""
def __init__(self, training_set, training_set_header, objective_field,
multi_label=False, labels=None, label_separator=None,
training_separator=None, multi_label_fields=None,
label_aggregates=None, objective=True):
"""Builds a generator from a csv file
`training_set`: path to the training data file
`training_set_header`: boolean, True means that headers are first
row in the file
`objective_field`: objective field column or field name
`labels`: Fields object with the expected fields structure.
"""
self.training_set = training_set
if training_set.__class__.__name__ == "StringIO":
self.encode = None
self.training_set = UTF8Recoder(test_set, SYSTEM_ENCODING)
else:
self.encode = None if PYTHON3 else FILE_ENCODING
self.training_set_header = training_set_header
self.training_reader = None
self.multi_label = multi_label
self.objective = objective
if label_aggregates is None:
label_aggregates = []
self.label_aggregates = label_aggregates
self.training_separator = (decode2(training_separator,
encoding="string_escape")
if training_separator is not None
else get_csv_delimiter())
if len(self.training_separator) > 1:
sys.exit("Only one character can be used as test data separator.")
# opening csv reader
self.reset()
self.label_separator = (decode2(label_separator,
encoding="string_escape")
if label_separator is not None
else get_csv_delimiter())
first_row = self.get_next(reset=not training_set_header)
self.row_length = len(first_row)
if training_set_header:
self.headers = first_row
else:
self.headers = [("field_%s" % index) for index in
range(0, self.row_length)]
self.multi_label_fields = sorted(self._get_columns(multi_label_fields))
if objective:
self.objective_column = self._get_columns([objective_field])[0]
if not self.objective_column in self.multi_label_fields:
self.multi_label_fields.append(self.objective_column)
self.labels = labels
self.fields_labels = self._get_labels()
if objective:
if labels is None:
self.labels = self.fields_labels[self.objective_column]
self.objective_name = self.headers[self.objective_column]
def __iter__(self):
"""Iterator method
"""
return self
def get_label_headers(self):
"""Returns a list of headers with the new extended field names for
each objective label
"""
new_headers = self.get_headers()
for field_column in self.multi_label_fields:
labels = self.fields_labels[field_column]
new_field_names = [get_label_field(self.headers[field_column],
label)
for label in labels]
new_headers.extend(new_field_names)
for aggregate in self.label_aggregates:
new_headers.append(get_label_field(
self.headers[field_column], aggregate))
if not PYTHON3:
new_headers = [encode2(header) for header in new_headers]
return new_headers
def _get_columns(self, fields_list):
"""Receives a comma-separated list of fields given by name or
column number and returns column number list
"""
column_list = []
if fields_list is None:
return column_list
if not isinstance(fields_list, list):
fields_list = [fields_list]
for field in fields_list:
column = None
if isinstance(field, int):
column = field
elif field is None:
column = self.row_length - 1
else:
try:
column = self.headers.index(field)
except ValueError:
if self.objective:
sys.exit("The %s has been set as multi-label field but"
" it cannot be found in the headers row: \n"
" %s" %
(field,
", ".join([encode2(header)
for header in self.headers])))
else:
column = None
if column is not None:
column_list.append(column)
return column_list
def reset(self):
"""Starts a new csv reader object
"""
try:
self.training_set.close()
except (IOError, AttributeError):
pass
try:
self.training_reader = UnicodeReader(
self.training_set, delimiter=self.training_separator,
lineterminator="\n").open_reader()
except IOError:
sys.exit("Error: cannot read training %s" % self.training_set)
def next(self):
"""Iterator method for next item
"""
return self.get_next()
def get_next(self, extended=False, reset=False):
"""Returns the next row. If extended is True, the row is extended with
a list of booleans depending on whether the label is in the
objective field value or not. If reset is True, the file is
reopened and pointer starts at the beginning of the file.
"""
row = self.training_reader.next()
row = [value.strip() for value in row]
if extended:
if self.multi_label and self.fields_labels is None:
self.fields_labels = self._get_labels()
for field_column in self.multi_label_fields:
aggregated_field_value = row[field_column]
field_values = aggregated_field_value.split(
self.label_separator)
field_values = [value.strip() for
value in field_values]
labels_row = [int(label in field_values) for label in
self.fields_labels[field_column]]
row.extend(labels_row)
for aggregate in self.label_aggregates:
row.append(AGGREGATES[aggregate](field_values))
if reset:
self.reset()
if not PYTHON3:
row = [encode2(item) for item in row]
return row
def number_of_rows(self):
"""Returns the number of rows in the test file
"""
rows = file_number_of_lines(self.training_set)
if self.training_set_header:
rows -= 1
return rows
def has_headers(self):
"""Returns whether the training set file has a headers row
"""
return self.training_set_header
def _get_labels(self):
"""Returns the list of labels in the multi-label fields
"""
labels = {}
for field_column in self.multi_label_fields:
labels[field_column] = []
for row in self:
for field_column in self.multi_label_fields:
labels = self._get_field_labels(row, labels,
field_column,
self.label_separator)
return labels
def _get_field_labels(self, row, labels, field_column, separator):
"""Returns the list of labels in a multi-label field
"""
field_value = row[field_column]
if self.multi_label:
new_labels = field_value.split(separator)
new_labels = [decode2(label).strip()
for label in new_labels]
# TODO: clean user given missing tokens
for label_index in range(0, len(new_labels)):
if new_labels[label_index] == '':
del new_labels[label_index]
if new_labels != []:
if (self.objective and field_column == self.objective_column
and self.labels is not None):
# If user gave the subset of labels, use only those
new_labels = [label for label in self.labels if
label in new_labels]
labels[field_column].extend(new_labels)
else:
labels[field_column].append(field_value)
labels[field_column] = sorted(list(set(labels[field_column])))
return labels
def get_headers(self, objective_field=True):
"""Returns headers. If objective_field is False, the objective field
header is removed.
"""
if objective_field:
return self.headers[:]
new_headers = self.headers[:]
if self.objective:
del new_headers[self.objective_column]
return new_headers
def new_fields_info(self):
"""Dict of 2-item lists 'field_column': [label, label_column]
describing the per label extension
"""
info = {}
column = len(self.headers)
for field_column in self.multi_label_fields:
alpha_field_column = str(field_column)
info[alpha_field_column] = []
labels = self.fields_labels[field_column]
for label in labels:
info[alpha_field_column].append([label, column])
column += 1
# skip the aggregate values columns
column += len(self.label_aggregates)
return info
def get_multi_label_data(self):
"""Returns a dict to store the multi-label info that defines this
source
"""
if self.objective:
return {
"multi_label_fields": [[column, self.headers[column]]
for column in self.multi_label_fields],
"generated_fields": self.new_fields_info(),
"objective_name": self.objective_name,
"objective_column": self.objective_column}
def close(self):
"""Closing file handler
"""
self.training_reader.close_reader()
|
apache-2.0
| 4,185,642,118,313,162,000
| 36.358491
| 79
| 0.555724
| false
| 4.606437
| false
| false
| false
|
edgarcosta/endomorphisms
|
endomorphisms/Representations.py
|
1
|
1456
|
"""
* Representation functions
*
* Copyright (C) 2016-2017
* Edgar Costa (edgarcosta@math.dartmouth.edu)
* Davide Lombardo (davide.lombardo@math.u-psud.fr)
* Jeroen Sijsling (jeroen.sijsling@uni-ulm.de)
*
* See LICENSE.txt for license details.
"""
from sage.all import magma
def repr_curve(X):
curve_type = magma.CurveType(X)
if str(curve_type) == "hyperelliptic":
f, h = magma.HyperellipticPolynomials(X, nvals = 2)
if magma.IsZero(h):
return " the hyperelliptic curve y^2 = {}".format(str(f))
else:
return " the hyperelliptic curve y^2 + ({})*y = {}".format(str(h), str(f))
elif str(curve_type) == "plane":
F = magma.DefiningPolynomial(X)
return " the plane curve {} = 0".format(str(F))
def repr_endomorphism_data(End):
return "The endomorphism data of" + repr_curve(End.X)
def repr_lattice(Lat):
return "The endomorphism lattice of" + repr_curve(Lat.X)
def repr_over_field(over_field):
pre = "The endomorphism structure of" + repr_curve(over_field.X)
if over_field.field == "geometric":
post = " over the algebraic closure of its base field"
elif over_field.field == "base":
post = " over its base field"
else:
post = " over " + str(over_field.field)
return pre + post
def repr_decomposition(decomp):
return "The decomposition structure of" + repr_curve(decomp.X)
|
gpl-2.0
| 1,946,836,060,677,192,400
| 32.860465
| 86
| 0.62294
| false
| 3.008264
| false
| false
| false
|
kgullikson88/LasCampanas-MIKE
|
ConvertToExtensions.py
|
1
|
2043
|
import FittingUtilities
from astropy.io import fits as pyfits
import sys
import os
import numpy
import matplotlib.pyplot as plt
import HelperFunctions
left_trim = 8
right_trim = 0
bad_regions = {}
if __name__ == "__main__":
fileList = []
for arg in sys.argv[1:]:
fileList.append(arg)
for fname in fileList:
outfilename = "%s-0.fits" %(fname.split(".fits")[0])
header = pyfits.getheader(fname)
try:
orders = HelperFunctions.ReadFits(fname)
except ValueError:
orders = HelperFunctions.ReadFits(fname, errors=2)
orders = orders[::-1] #Reverse order so the bluest order is first
column_list = []
for i, order in enumerate(orders):
left, right = left_trim, order.size()-right_trim
if i in bad_regions.keys():
region = bad_regions[i]
left = numpy.searchsorted(order.x, region[0])
right = numpy.searchsorted(order.x, region[1])
if left == 0 or right == order.size():
order.x = numpy.delete(order.x, numpy.arange(left, right))
order.y = numpy.delete(order.y, numpy.arange(left, right))
order.cont = numpy.delete(order.cont, numpy.arange(left, right))
order.err = numpy.delete(order.err, numpy.arange(left, right))
else:
print "Warning! Bad region covers the middle of order %i" %i
print "Interpolating rather than removing"
order.y[left:right] = order.cont[left:right]
order.err[left:right] = 9e9
else:
order = order[left:right]
if order.size() < 10:
continue
order.cont = FittingUtilities.Continuum(order.x, order.y, fitorder=3, lowreject=1.5, highreject=10)
columns = columns = {"wavelength": order.x,
"flux": order.y,
"continuum": order.cont,
"error": order.err}
column_list.append(columns)
HelperFunctions.OutputFitsFileExtensions(column_list, fname, outfilename, mode="new")
|
gpl-3.0
| 8,868,885,856,817,803,000
| 31.428571
| 105
| 0.604014
| false
| 3.721311
| false
| false
| false
|
Arvedui/i3pystatus
|
i3pystatus/mail/maildir.py
|
1
|
1360
|
import os
from i3pystatus.mail import Backend
class MaildirMail(Backend):
"""
Checks for local mail in Maildir
"""
settings = (
"directory",
)
required = ("directory",)
directory = ""
def init(self):
self.directory = os.path.expanduser(self.directory)
@property
def unread(self):
def check_seen_flag(msgname):
"""
Return false if (S)een flag set
The code of this funciton was partialy extrated from
Pythons Maildir and MaildirMessage classes. Which are not used
because they cannot read the message flags without reading the entire message.
"""
maildir_info = msgname.split(':')[-1]
# This is a logical implication if maildir_info starts with '2,'
# it must not contain S if it does not start with '2,' the rest of
# its content does not matter because no flags are set
return not maildir_info.startswith('2,') or 'S' not in maildir_info[2:]
path_new = os.path.join(self.directory, "new")
new_messages = len(os.listdir(path_new))
path_cur = os.path.join(self.directory, "cur")
unread_messages = len(list(filter(check_seen_flag, os.listdir(path_cur))))
return new_messages + unread_messages
Backend = MaildirMail
|
mit
| -4,569,347,419,518,561,000
| 29.222222
| 90
| 0.611029
| false
| 4.047619
| false
| false
| false
|
Statoil/libecl
|
python/ecl/grid/ecl_region.py
|
1
|
41631
|
# Copyright (C) 2011 Equinor ASA, Norway.
#
# The file 'ecl_region.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module used to select cells based on many different criteria.
This module implements the class EclRegion which can be used to select
cells in a grid matching a criteria. A wide range of different
criteria are supported. Many of the special functions for implementing
mathematical operations are implemented, so that regions can be
combined e.g. with logical &.
When the selection process is complete the region instance can be
queried for the corresponding list of indices.
"""
from functools import wraps
import ctypes
from cwrap import BaseCClass
import ecl
from ecl.util.util import monkey_the_camel
from ecl.util.util import IntVector
from ecl import EclPrototype
from ecl.grid.faults import Layer
from ecl import EclDataType
from ecl.eclfile import EclKW
from ecl.util.geometry import CPolyline
def select_method(select):
"""
The select_method method decorator is applied to all the
select_xxx() methods. The purpose of this decorator is to
allow the select_xxx() methods to have an optional argument
@intersect. If the @intersect argument is True the results of
the current select method will be and'ed with the current
selection, instead of or'ed which is the default.
Consider this example:
region = EclRegion( grid , False )
region.select_islice(0, 5) # Selects all cells with i[0:5]
region.select_jslice(0, 5) # Selects all cells with j[0:5]
When these two calls have completed selection will contain all
the cells which are either in i-interval [0:5] or in
j-interval [0:5]. If we supply the @intersect argument in the
second call the j selection will only be applied to the cells
in i[0:5] interval:
region = EclRegion( grid , False )
# Select all cells with i[0:5]:
region.select_islice(0, 5)
# Select all cells with j[0:5] AND i[0:5]:
region.select_jslice(0, 5, intersect=True)
"""
@wraps(select)
def select_wrapper(self , *args , **kwargs):
intersect = 'intersect' in kwargs and kwargs['intersect']
if intersect:
new_region = EclRegion( self.grid , False )
select(new_region , *args )
self &= new_region
else:
select(self , *args )
return select_wrapper
class EclRegion(BaseCClass):
TYPE_NAME = "ecl_region"
_alloc = EclPrototype("void* ecl_region_alloc( ecl_grid , bool )", bind = False)
_alloc_copy = EclPrototype("ecl_region_obj ecl_region_alloc_copy( ecl_region )")
_set_kw_int = EclPrototype("void ecl_region_set_kw_int( ecl_region , ecl_kw , int, bool) ")
_set_kw_float = EclPrototype("void ecl_region_set_kw_float( ecl_region , ecl_kw , float, bool ) ")
_set_kw_double = EclPrototype("void ecl_region_set_kw_double( ecl_region , ecl_kw , double , bool) ")
_shift_kw_int = EclPrototype("void ecl_region_shift_kw_int( ecl_region , ecl_kw , int, bool) ")
_shift_kw_float = EclPrototype("void ecl_region_shift_kw_float( ecl_region , ecl_kw , float, bool ) ")
_shift_kw_double = EclPrototype("void ecl_region_shift_kw_double( ecl_region , ecl_kw , double , bool) ")
_scale_kw_int = EclPrototype("void ecl_region_scale_kw_int( ecl_region , ecl_kw , int, bool) ")
_scale_kw_float = EclPrototype("void ecl_region_scale_kw_float( ecl_region , ecl_kw , float, bool ) ")
_scale_kw_double = EclPrototype("void ecl_region_scale_kw_double( ecl_region , ecl_kw , double , bool) ")
_sum_kw_int = EclPrototype("int ecl_region_sum_kw_int( ecl_region , ecl_kw , bool) ")
_sum_kw_float = EclPrototype("float ecl_region_sum_kw_float( ecl_region , ecl_kw , bool ) ")
_sum_kw_double = EclPrototype("double ecl_region_sum_kw_double( ecl_region , ecl_kw , bool) ")
_sum_kw_bool = EclPrototype("int ecl_region_sum_kw_int( ecl_region , ecl_kw , bool) ")
_free = EclPrototype("void ecl_region_free( ecl_region )")
_reset = EclPrototype("void ecl_region_reset( ecl_region )")
_select_all = EclPrototype("void ecl_region_select_all( ecl_region )")
_deselect_all = EclPrototype("void ecl_region_deselect_all( ecl_region )")
_select_equal = EclPrototype("void ecl_region_select_equal( ecl_region , ecl_kw , int )")
_deselect_equal = EclPrototype("void ecl_region_deselect_equal( ecl_region , ecl_kw , int)")
_select_less = EclPrototype("void ecl_region_select_smaller( ecl_region , ecl_kw , float )")
_deselect_less = EclPrototype("void ecl_region_deselect_smaller( ecl_region , ecl_kw , float )")
_select_more = EclPrototype("void ecl_region_select_larger( ecl_region , ecl_kw , float )")
_deselect_more = EclPrototype("void ecl_region_deselect_larger( ecl_region , ecl_kw , float )")
_select_in_interval = EclPrototype("void ecl_region_select_in_interval( ecl_region, ecl_kw , float , float )")
_deselect_in_interval = EclPrototype("void ecl_region_deselect_in_interval( ecl_region, ecl_kw, float , float )")
_invert_selection = EclPrototype("void ecl_region_invert_selection( ecl_region )")
_select_box = EclPrototype("void ecl_region_select_from_ijkbox(ecl_region , int , int , int , int , int , int)")
_deselect_box = EclPrototype("void ecl_region_deselect_from_ijkbox(ecl_region , int , int , int , int , int , int)")
_imul_kw = EclPrototype("void ecl_region_kw_imul( ecl_region , ecl_kw , ecl_kw , bool)")
_idiv_kw = EclPrototype("void ecl_region_kw_idiv( ecl_region , ecl_kw , ecl_kw , bool)")
_iadd_kw = EclPrototype("void ecl_region_kw_iadd( ecl_region , ecl_kw , ecl_kw , bool)")
_isub_kw = EclPrototype("void ecl_region_kw_isub( ecl_region , ecl_kw , ecl_kw , bool)")
_copy_kw = EclPrototype("void ecl_region_kw_copy( ecl_region , ecl_kw , ecl_kw , bool)")
_intersect = EclPrototype("void ecl_region_intersection( ecl_region , ecl_region )")
_combine = EclPrototype("void ecl_region_union( ecl_region , ecl_region )")
_subtract = EclPrototype("void ecl_region_subtract( ecl_region , ecl_region )")
_xor = EclPrototype("void ecl_region_xor( ecl_region , ecl_region )")
_get_kw_index_list = EclPrototype("int_vector_ref ecl_region_get_kw_index_list( ecl_region , ecl_kw , bool )")
_get_active_list = EclPrototype("int_vector_ref ecl_region_get_active_list( ecl_region )")
_get_global_list = EclPrototype("int_vector_ref ecl_region_get_global_list( ecl_region )")
_get_active_global = EclPrototype("int_vector_ref ecl_region_get_global_active_list( ecl_region )")
_select_cmp_less = EclPrototype("void ecl_region_cmp_select_less( ecl_region , ecl_kw , ecl_kw)")
_select_cmp_more = EclPrototype("void ecl_region_cmp_select_more( ecl_region , ecl_kw , ecl_kw)")
_deselect_cmp_less = EclPrototype("void ecl_region_cmp_deselect_less( ecl_region , ecl_kw , ecl_kw)")
_deselect_cmp_more = EclPrototype("void ecl_region_cmp_deselect_more( ecl_region , ecl_kw , ecl_kw)")
_select_islice = EclPrototype("void ecl_region_select_i1i2( ecl_region , int , int )")
_deselect_islice = EclPrototype("void ecl_region_deselect_i1i2( ecl_region , int , int )")
_select_jslice = EclPrototype("void ecl_region_select_j1j2( ecl_region , int , int )")
_deselect_jslice = EclPrototype("void ecl_region_deselect_j1j2( ecl_region , int , int )")
_select_kslice = EclPrototype("void ecl_region_select_k1k2( ecl_region , int , int )")
_deselect_kslice = EclPrototype("void ecl_region_deselect_k1k2( ecl_region , int , int )")
_select_deep_cells = EclPrototype("void ecl_region_select_deep_cells( ecl_region , double )")
_deselect_deep_cells = EclPrototype("void ecl_region_select_deep_cells( ecl_region , double )")
_select_shallow_cells = EclPrototype("void ecl_region_select_shallow_cells( ecl_region , double )")
_deselect_shallow_cells = EclPrototype("void ecl_region_select_shallow_cells( ecl_region , double )")
_select_small = EclPrototype("void ecl_region_select_small_cells( ecl_region , double )")
_deselect_small = EclPrototype("void ecl_region_deselect_small_cells( ecl_region , double )")
_select_large = EclPrototype("void ecl_region_select_large_cells( ecl_region , double )")
_deselect_large = EclPrototype("void ecl_region_deselect_large_cells( ecl_region , double )")
_select_thin = EclPrototype("void ecl_region_select_thin_cells( ecl_region , double )")
_deselect_thin = EclPrototype("void ecl_region_deselect_thin_cells( ecl_region , double )")
_select_thick = EclPrototype("void ecl_region_select_thick_cells( ecl_region , double )")
_deselect_thick = EclPrototype("void ecl_region_deselect_thick_cells( ecl_region , double )")
_select_active = EclPrototype("void ecl_region_select_active_cells( ecl_region )")
_select_inactive = EclPrototype("void ecl_region_select_inactive_cells( ecl_region )")
_deselect_active = EclPrototype("void ecl_region_deselect_active_cells( ecl_region )")
_deselect_inactive = EclPrototype("void ecl_region_deselect_inactive_cells( ecl_region )")
_select_above_plane = EclPrototype("void ecl_region_select_above_plane( ecl_region , double* , double* )")
_select_below_plane = EclPrototype("void ecl_region_select_below_plane( ecl_region , double* , double* )")
_deselect_above_plane = EclPrototype("void ecl_region_deselect_above_plane( ecl_region, double* , double* )")
_deselect_below_plane = EclPrototype("void ecl_region_deselect_below_plane( ecl_region, double* , double* )")
_select_inside_polygon = EclPrototype("void ecl_region_select_inside_polygon( ecl_region , geo_polygon)")
_select_outside_polygon = EclPrototype("void ecl_region_select_outside_polygon( ecl_region , geo_polygon)")
_deselect_inside_polygon = EclPrototype("void ecl_region_deselect_inside_polygon( ecl_region , geo_polygon)")
_deselect_outside_polygon = EclPrototype("void ecl_region_deselect_outside_polygon( ecl_region , geo_polygon)")
_set_name = EclPrototype("void ecl_region_set_name( ecl_region , char*)")
_get_name = EclPrototype("char* ecl_region_get_name( ecl_region )")
_contains_ijk = EclPrototype("void ecl_region_contains_ijk( ecl_region , int , int , int)")
_contains_global = EclPrototype("void ecl_region_contains_global( ecl_region, int )")
_contains_active = EclPrototype("void ecl_region_contains_active( ecl_region , int )")
_equal = EclPrototype("bool ecl_region_equal( ecl_region , ecl_region )")
_select_true = EclPrototype("void ecl_region_select_true( ecl_region , ecl_kw)")
_select_false = EclPrototype("void ecl_region_select_false( ecl_region , ecl_kw)")
_deselect_true = EclPrototype("void ecl_region_deselect_true( ecl_region , ecl_kw)")
_deselect_false = EclPrototype("void ecl_region_deselect_false( ecl_region , ecl_kw)")
_select_from_layer = EclPrototype("void ecl_region_select_from_layer( ecl_region , layer , int , int)")
_deselect_from_layer = EclPrototype("void ecl_region_deselect_from_layer( ecl_region , layer , int , int)")
def __init__(self , grid , preselect):
"""
Create a new region selector for cells in @grid.
Will create a new region selector to select and deselect the
cells in the grid given by @grid. The input argument @grid
should be a EclGrid instance. You can start with either all
cells, or no cells, selected, depending on the value of
@preselect.
"""
self.grid = grid
self.active_index = False
c_ptr = self._alloc( grid , preselect )
super(EclRegion , self).__init__( c_ptr )
def free(self):
self._free( )
def __eq__(self , other):
return self._equal(other)
def __hash__(self):
return hash(hash(self.grid) + hash(self.active_index))
def __deep_copy__(self , memo):
"""
Creates a deep copy of the current region.
"""
return self._alloc_copy( )
def __nonzero__(self):
global_list = self.get_global_list()
return len(global_list) > 0
def __bool__(self):
return self.__nonzero__()
def __iand__(self , other):
"""
Will perform set intersection operation inplace.
Will update the current region selection so that the elements
selected in self are also selected in @other. Bound to the
inplace & operator, i.e.
reg1 &= reg2
will eventually call this method.
"""
if isinstance(other , EclRegion):
self._intersect( other)
else:
raise TypeError("Ecl region can only intersect with other EclRegion instances")
return self
def __isub__(self , other):
"""
Inplace "subtract" one selection from another.
Bound to reg -= reg2
"""
if isinstance( other , EclRegion ):
self._subtract( other)
else:
raise TypeError("Ecl region can only subtract with other EclRegion instances")
return self
def __ior__(self , other):
"""
Will perform set operation union in place.
The current region selection will be updated to contain all
the elements which are selected either in the current region,
or in @other; bound to to inplace | operator, so you can write e.g.
reg1 |= reg2
to update reg1 with the selections from reg2.
"""
if isinstance( other , EclRegion):
self._combine( other)
else:
raise TypeError("Ecl region can only be combined with other EclRegion instances")
return self
def __iadd__(self , other):
"""
Combines to regions - see __ior__().
"""
return self.__ior__( other )
def __or__(self , other):
"""
Creates a new region which is the union of @self and other.
The method will create a new region which selection status is
given by the logical or of regions @self and @other; the two
initial regions will not be modified. Bound to the unary |
operator:
new_reg = reg1 | reg2
"""
new_region = self.copy()
new_region.__ior__( other )
return new_region
def __and__(self , other):
"""
Creates a new region which is the intersection of @self and other.
The method will create a new region which selection status is
given by the logical and of regions @self and @other; the two
initial regions will not be modified. Bound to the unary &
operator:
new_reg = reg1 & reg2
"""
new_region = self.copy()
new_region.__iand__( other )
return new_region
def __add__(self , other):
"""
Unary add operator for two regions - implemented by __or__().
"""
return self.__or__( other )
def __sub__( self, other):
"""
Unary del operator for two regions.
"""
new_region = self.copy()
new_region.__isub__( other )
return new_region
def union_with( self, other):
"""
Will update self with the union of @self and @other.
See doscumentation of __ior__().
"""
return self.__ior__( other )
def intersect_with( self, other):
"""
Will update self with the intersection of @self and @other.
See doscumentation of __iand__().
"""
return self.__iand__( other )
def copy( self ):
return self.__deep_copy__( {} )
def reset(self):
"""
Clear selections according to constructor argument @preselect.
Will clear all selections, depending on the value of the
constructor argument @preselect. If @preselect is true
everything will be selected after calling reset(), otherwise
no cells will be selected after calling reset().
"""
self._reset( )
##################################################################
@select_method
def select_more( self , ecl_kw , limit , intersect = False):
"""
Select all cells where keyword @ecl_kw is above @limit.
This method is used to select all the cells where an arbitrary
field, contained in @ecl_kw, is above a limiting value
@limit. The EclKW instance must have either nactive or
nx*ny*nz elements; if this is not satisfied method will fail
hard. The datatype of @ecl_kw must be numeric,
i.e. ECL_INT_TYPE, ECL_DOUBLE_TYPE or ECL_FLOAT_TYPE. In the
example below we select all the cells with water saturation
above 0.85:
restart_file = ecl.EclFile( "ECLIPSE.X0067" )
swat_kw = restart_file["SWAT"][0]
grid = ecl.EclGrid( "ECLIPSE.EGRID" )
region = ecl.EclRegion( grid , False )
region.select_more( swat_kw , 0.85 )
"""
self._select_more( ecl_kw , limit )
def deselect_more( self , ecl_kw , limit):
"""
Deselects cells with value above limit.
See select_more() for further documentation.
"""
self._deselect_more( ecl_kw , limit )
@select_method
def select_less( self , ecl_kw , limit , intersect = False):
"""
Select all cells where keyword @ecl_kw is below @limit.
See select_more() for further documentation.
"""
self._select_less( ecl_kw , limit )
def deselect_less( self , ecl_kw , limit):
"""
Deselect all cells where keyword @ecl_kw is below @limit.
See select_more() for further documentation.
"""
self._deselect_less( ecl_kw , limit )
@select_method
def select_equal( self , ecl_kw , value , intersect = False):
"""
Select all cells where @ecl_kw is equal to @value.
The EclKW instance @ecl_kw must be of size nactive or
nx*ny*nz, and it must be of integer type; testing for equality
is not supported for floating point numbers. In the example
below we select all the cells in PVT regions 2 and 4:
init_file = ecl.EclFile( "ECLIPSE.INIT" )
pvtnum_kw = init_file.iget_named_kw( "PVTNUM" , 0 )
grid = ecl.EclGrid( "ECLIPSE.GRID" )
region = ecl.EclRegion( grid , False )
region.select_equal( pvtnum_kw , 2 )
region.select_equal( pvtnum_kw , 4 )
"""
if not ecl_kw.data_type.is_int():
raise ValueError("The select_equal method must have an integer valued keyword - got:%s" % ecl_kw.typeName( ))
self._select_equal( ecl_kw , value )
def deselect_equal( self , ecl_kw , value ):
"""
Select all cells where @ecl_kw is equal to @value.
See select_equal() for further documentation.
"""
if not ecl_kw.data_type.is_int():
raise ValueError("The select_equal method must have an integer valued keyword - got:%s" % ecl_kw.typeName( ))
self._deselect_equal( ecl_kw , value )
@select_method
def select_in_range( self , ecl_kw , lower_limit , upper_limit , select = False):
"""
Select all cells where @ecl_kw is in the half-open interval [ , ).
Will select all the cells where EclKW instance @ecl_kw has
value in the half-open interval [@lower_limit ,
@upper_limit). The input argument @ecl_kw must have size
nactive or nx*ny*nz, and it must be of type ECL_FLOAT_TYPE.
The following example will select all cells with porosity in
the range [0.15,0.20):
init_file = ecl.EclFile( "ECLIPSE.INIT" )
poro_kw = init_file.iget_named_kw( "PORO" , 0 )
grid = ecl.EclGrid( "ECLIPSE.GRID" )
region = ecl.EclRegion( grid , False )
region.select_in_range( poro_kw , 0.15, 0.20 )
"""
self._select_in_interval( ecl_kw , lower_limit , upper_limit)
def deselect_in_range( self , ecl_kw , lower_limit , upper_limit):
"""
Deselect all cells where @ecl_kw is in the half-open interval [ , ).
See select_in_range() for further documentation.
"""
self._deselect_in_interval( ecl_kw , lower_limit , upper_limit)
@select_method
def select_cmp_less( self , kw1 , kw2 , intersect = False):
"""
Will select all cells where kw2 < kw1.
Will compare the ECLIPSE keywords @kw1 and @kw2, and select
all the cells where the numerical value of @kw1 is less than
the numerical value of @kw2. The ECLIPSE keywords @kw1 and
@kw2 must both be of the same size, nactive or nx*ny*nz. In
addition they must both be of type type ECL_FLOAT_TYPE. In the
example below we select all the cells where the pressure has
dropped:
restart_file = ecl.EclFile("ECLIPSE.UNRST")
pressure1 = restart_file.iget_named_kw( "PRESSURE" , 0)
pressure2 = restart_file.iget_named_kw( "PRESSURE" , 100)
region.select_cmp_less( pressure2 , pressure1)
"""
self._select_cmp_less( kw1 , kw2 )
def deselect_cmp_less( self , kw1 , kw2):
"""
Will deselect all cells where kw2 < kw1.
See select_cmp_less() for further documentation.
"""
self._deselect_cmp_less( kw1 , kw2 )
@select_method
def select_cmp_more( self , kw1 , kw2 , intersect = False):
"""
Will select all cells where kw2 > kw1.
See select_cmp_less() for further documentation.
"""
self._select_cmp_more( kw1 , kw2 )
def deselect_cmp_more( self , kw1 , kw2):
"""
Will deselect all cells where kw2 > kw1.
See select_cmp_less() for further documentation.
"""
self._deselect_cmp_more( kw1 , kw2 )
@select_method
def select_active( self , intersect = False):
"""
Will select all the active grid cells.
"""
self._select_active( )
def deselect_active( self ):
"""
Will deselect all the active grid cells.
"""
self._deselect_active( )
@select_method
def select_inactive( self , intersect = False):
"""
Will select all the inactive grid cells.
"""
self._select_inactive( )
def deselect_inactive( self ):
"""
Will deselect all the inactive grid cells.
"""
self._deselect_inactive( )
def select_all( self ):
"""
Will select all the cells.
"""
self._select_all( )
def deselect_all( self ):
"""
Will deselect all the cells.
"""
self._deselect_all( )
def clear( self ):
"""
Will deselect all cells.
"""
self.deselect_all()
@select_method
def select_deep( self , depth , intersect = False):
"""
Will select all cells below @depth.
"""
self._select_deep_cells(depth)
def deselect_deep( self, depth):
"""
Will deselect all cells below @depth.
"""
self._deselect_deep_cells(depth)
@select_method
def select_shallow( self, depth , intersect = False):
"""
Will select all cells above @depth.
"""
self._select_shallow_cells(depth)
def deselect_shallow( self, depth):
"""
Will deselect all cells above @depth.
"""
self._deselect_shallow_cells(depth)
@select_method
def select_small( self , size_limit , intersect = False):
"""
Will select all cells smaller than @size_limit.
"""
self._select_small( size_limit )
def deselect_small( self , size_limit ):
"""
Will deselect all cells smaller than @size_limit.
"""
self._deselect_small( size_limit )
@select_method
def select_large( self , size_limit , intersect = False):
"""
Will select all cells larger than @size_limit.
"""
self._select_large( size_limit )
def deselect_large( self , size_limit ):
"""
Will deselect all cells larger than @size_limit.
"""
self._deselect_large( size_limit )
@select_method
def select_thin( self , size_limit , intersect = False):
"""
Will select all cells thinner than @size_limit.
"""
self._select_thin( size_limit )
def deselect_thin( self , size_limit ):
"""
Will deselect all cells thinner than @size_limit.
"""
self._deselect_thin( size_limit )
@select_method
def select_thick( self , size_limit , intersect = False):
"""
Will select all cells thicker than @size_limit.
"""
self._select_thick( size_limit )
def deselect_thick( self , size_limit ):
"""
Will deselect all cells thicker than @size_limit.
"""
self._deselect_thick( size_limit )
@select_method
def select_box( self , ijk1 , ijk2 , intersect = False):
"""
Will select all cells in box.
Will select all the the cells in the box given by @ijk1 and
@ijk2. The two arguments @ijk1 and @ijk2 are tuples (1,j,k)
representing two arbitrary - diagonally opposed corners - of a
box. All the elements in @ijk1 and @ijk2 are inclusive, i.e.
select_box( (10,12,8) , (8 , 16,4) )
will select the box defined by [8,10] x [12,16] x [4,8].
"""
self._select_box( ijk1[0] , ijk2[0] , ijk1[1] , ijk2[1] , ijk1[2] , ijk2[2])
def deselect_box( self , ijk1 , ijk2 ):
"""
Will deselect all elements in box.
See select_box() for further documentation.
"""
self._deselect_box( ijk1[0] , ijk2[0] , ijk1[1] , ijk2[1] , ijk1[2] , ijk2[2])
@select_method
def select_islice( self , i1 , i2, intersect = False):
"""
Will select all cells with i in [@i1, @i2]. @i1 and @i2 are zero offset.
"""
self._select_islice( i1,i2)
def deselect_islice( self , i1 , i2):
"""
Will deselect all cells with i in [@i1, @i2]. @i1 and @i2 are zero offset.
"""
self._deselect_islice( i1,i2)
@select_method
def select_jslice( self , j1 , j2 , intersect = False):
"""
Will select all cells with j in [@j1, @j2]. @i1 and @i2 are zero offset.
"""
self._select_jslice( j1,j2)
def deselect_jslice( self , j1 , j2):
"""
Will deselect all cells with j in [@j1, @j2]. @i1 and @i2 are zero offset.
"""
self._deselect_jslice( j1,j2)
@select_method
def select_kslice( self , k1 , k2 , intersect = False):
"""
Will select all cells with k in [@k1, @k2]. @i1 and @i2 are zero offset.
"""
self._select_kslice( k1,k2)
def deselect_kslice( self , k1 , k2):
"""
Will deselect all cells with k in [@k1, @k2]. @i1 and @i2 are zero offset.
"""
self._deselect_kslice( k1,k2)
def invert( self ):
"""
Will invert the current selection.
"""
self._invert_selection( )
def __init_plane_select( self , n , p ):
n_vec = ctypes.cast( (ctypes.c_double * 3)() , ctypes.POINTER( ctypes.c_double ))
p_vec = ctypes.cast( (ctypes.c_double * 3)() , ctypes.POINTER( ctypes.c_double ))
for i in range(3):
n_vec[i] = n[i]
p_vec[i] = p[i]
return ( n_vec , p_vec )
@select_method
def select_above_plane( self , n , p , intersect = False):
"""
Will select all the cells 'above' the plane defined by n & p.
@n is the surface normal vector of the plane in question and
@p is a point on the plane surface. The point @p should be
given in (utm_x , utm_y , tvd) coordinates. The term 'above'
means that the cell center has a positive distance to the
plain; correspondingly 'below' means that the cell center has
a negative disatnce to the plane.
"""
(n_vec , p_vec) = self.__init_plane_select( n , p )
self._select_above_plane( n_vec , p_vec )
@select_method
def select_below_plane( self , n , p , interscet = False):
"""
Will select all the cells 'below' the plane defined by n & p.
See method 'select_above_plane' for further documentation.
"""
(n_vec , p_vec) = self.__init_plane_select( n , p )
self._select_below_plane( n_vec , p_vec )
def deselect_above_plane( self , n , p):
"""
Will deselect all the cells 'above' the plane defined by n & p.
See method 'select_above_plane' for further documentation.
"""
(n_vec , p_vec) = self.__init_plane_select( n , p )
self._deselect_above_plane( n_vec , p_vec )
def deselect_below_plane( self , n , p):
"""
Will deselect all the cells 'below' the plane defined by n & p.
See method 'select_above_plane' for further documentation.
"""
(n_vec , p_vec) = self.__init_plane_select( n , p )
self._deselect_below_plane( n_vec , p_vec )
@select_method
def select_inside_polygon( self , points , intersect = False):
"""
Will select all points inside polygon.
Will select all points inside polygon specified by input
variable @points. Points should be a list of two-element
tuples (x,y). So to select all the points within the rectangle
bounded by the lower left rectangle (0,0) and upper right
(100,100) the @points list should be:
points = [(0,0) , (0,100) , (100,100) , (100,0)]
The elements in the points list should be (utm_x, utm_y)
values. These values will be compared with the centerpoints of
the cells in the grid. The selection is based the top k=0
layer, and then extending this selection to all k values; this
implies that the selection polygon will effectively be
translated if the pillars are not vertical.
"""
self._select_inside_polygon( CPolyline( init_points = points ))
@select_method
def select_outside_polygon( self , points , intersect = False):
"""
Will select all points outside polygon.
See select_inside_polygon for more docuemntation.
"""
self._select_outside_polygon( CPolyline( init_points = points ))
def deselect_inside_polygon( self , points ):
"""
Will select all points outside polygon.
See select_inside_polygon for more docuemntation.
"""
self._deselect_inside_polygon( CPolyline( init_points = points ))
def deselect_outside_polygon( self , points ):
"""
Will select all points outside polygon.
See select_inside_polygon for more docuemntation.
"""
self._deselect_outside_polygon( CPolyline( init_points = points ))
@select_method
def select_true( self , ecl_kw , intersect = False):
"""
Assume that input ecl_kw is a boolean mask.
"""
self._select_true( ecl_kw )
@select_method
def select_false( self , ecl_kw , intersect = False):
"""
Assume that input ecl_kw is a boolean mask.
"""
self._select_false( ecl_kw )
@select_method
def select_from_layer(self , layer , k , value, intersect = False):
"""Will select all the cells in in @layer with value @value - at
vertical coordinate @k.
The input @layer should be of type Layer - from the
ecl.ecl.faults.layer module. The k value must in the range
[0,grid.nz) and the dimensions of the layer must correspond
exactly to nx,ny of the grid.
"""
grid = self.grid
if k < 0 or k >= grid.getNZ():
raise ValueError("Invalid k value:%d - must be in range [0,%d)" % (k , grid.getNZ()))
if grid.getNX() != layer.getNX():
raise ValueError("NX dimension mismatch. Grid:%d layer:%d" % (grid.getNX() , layer.getNX()))
if grid.getNY() != layer.getNY():
raise ValueError("NY dimension mismatch. Grid:%d layer:%d" % (grid.getNY() , layer.getNY()))
self._select_from_layer( layer , k , value )
#################################################################
def scalar_apply_kw( self , target_kw , scalar , func_dict , force_active = False):
"""
Helper function to apply a function with one scalar arg on target_kw.
"""
data_type = target_kw.data_type
if data_type in func_dict:
func = func_dict[ data_type ]
func( target_kw, scalar , force_active )
else:
raise Exception("scalar_apply_kw() only supported for INT/FLOAT/DOUBLE")
def iadd_kw( self , target_kw , delta_kw , force_active = False):
"""
The functions iadd_kw(), copy_kw(), set_kw(), scale_kw() and
shift_kw() are not meant to be used as methods of the
EclRegion class (altough that is of course perfectly OK) -
rather a EclRegion instance is passed as an argument to an
EclKW method, and then that method "flips things around" and
calls one of these methods with the EclKW instance as
argument. This applies to all the EclKW methods which take an
optional "mask" argument.
"""
if isinstance(delta_kw , EclKW):
if target_kw.assert_binary( delta_kw ):
self._iadd_kw( target_kw , delta_kw , force_active )
else:
raise TypeError("Type mismatch")
else:
self.shift_kw( target_kw , delta_kw , force_active = force_active)
def shift_kw( self , ecl_kw , shift , force_active = False):
"""
See usage documentation on iadd_kw().
"""
self.scalar_apply_kw( ecl_kw , shift , {EclDataType.ECL_INT : self._shift_kw_int,
EclDataType.ECL_FLOAT : self._shift_kw_float ,
EclDataType.ECL_DOUBLE : self._shift_kw_double} , force_active)
def isub_kw( self , target_kw , delta_kw , force_active = False):
if isinstance(delta_kw , EclKW):
if target_kw.assert_binary( delta_kw ):
self._isub_kw( target_kw , delta_kw , force_active )
else:
raise TypeError("Type mismatch")
else:
self.shift_kw( target_kw , -delta_kw , force_active = force_active)
def scale_kw( self , ecl_kw , scale , force_active = False):
"""
See usage documentation on iadd_kw().
"""
self.scalar_apply_kw( ecl_kw , scale , {EclDataType.ECL_INT : self._scale_kw_int,
EclDataType.ECL_FLOAT : self._scale_kw_float ,
EclDataType.ECL_DOUBLE : self._scale_kw_double} , force_active)
def imul_kw(self, target_kw , other , force_active = False):
if isinstance(other , EclKW):
if target_kw.assert_binary( other):
self._imul_kw( target_kw , other )
else:
raise TypeError("Type mismatch")
else:
self.scale_kw( target_kw , other , force_active )
def idiv_kw( self , target_kw , other , force_active = False):
if isinstance(other , EclKW):
if target_kw.assert_binary( other):
self._idiv_kw( target_kw , other )
else:
raise TypeError("Type mismatch")
else:
if target_kw.data_type.is_int():
scale = 1 // other
else:
scale = 1.0 / other
self.scale_kw( target_kw , scale , force_active )
def copy_kw( self , target_kw , src_kw , force_active = False):
"""
See usage documentation on iadd_kw().
"""
if target_kw.assert_binary( src_kw ):
self._copy_kw( target_kw , src_kw , force_active )
else:
raise TypeError("Type mismatch")
def set_kw( self , ecl_kw , value , force_active = False):
"""
See usage documentation on iadd_kw().
"""
self.scalar_apply_kw( ecl_kw , value , {EclDataType.ECL_INT : self._set_kw_int,
EclDataType.ECL_FLOAT : self._set_kw_float ,
EclDataType.ECL_DOUBLE : self._set_kw_double} , force_active)
def sum_kw(self, kw, force_active = False):
data_type = kw.data_type
if data_type == EclDataType.ECL_FLOAT:
return self._sum_kw_float( kw, force_active )
if data_type == EclDataType.ECL_INT:
return self._sum_kw_int( kw, force_active )
if data_type == EclDataType.ECL_DOUBLE:
return self._sum_kw_double( kw, force_active )
if data_type == EclDataType.ECL_BOOL:
return self._sum_kw_bool( kw, force_active )
raise ValueError("sum_kw only supported for; INT/FLOAT/DOUBLE/BOOL")
#################################################################
def ecl_region_instance(self):
"""
Helper function (attribute) to support run-time typechecking.
"""
return True
def active_size(self):
return len(self._get_active_list())
def global_size(self):
return len(self._get_global_list())
def get_active_list(self):
"""
IntVector instance with active indices in the region.
"""
active_list = self._get_active_list()
active_list.setParent(self)
return active_list
def get_global_list(self):
"""
IntVector instance with global indices in the region.
"""
global_list = self._get_global_list()
global_list.setParent(self)
return global_list
def get_ijk_list(self):
"""
WIll return a Python list of (ij,k) tuples for the region.
"""
global_list = self.getGlobalList()
ijk_list = []
for g in global_list:
ijk_list.append( self.grid.get_ijk( global_index = g ) )
return ijk_list
def contains_ijk( self , i,j,k):
"""
Will check if the cell given by i,j,k is part of the region.
"""
return self._contains_ijk( i , j , k )
def contains_global( self , global_index):
"""
Will check if the cell given by @global_index is part of the region.
"""
return self._contains_global( global_index )
def contains_active( self , active_index):
"""
Will check if the cell given by @active_index is part of the region.
"""
return self._contains_active( active_index )
def kw_index_list(self , ecl_kw , force_active):
c_ptr = self._get_kw_index_list( ecl_kw , force_active)
index_list = IntVector.createCReference( c_ptr, self )
return index_list
@property
def name(self):
return self._get_name()
def get_name(self):
return self._get_name( )
def set_name(self , name):
self._set_name( name )
monkey_the_camel(EclRegion, 'selectTrue', EclRegion.select_true)
monkey_the_camel(EclRegion, 'selectFalse', EclRegion.select_false)
monkey_the_camel(EclRegion, 'selectFromLayer', EclRegion.select_from_layer)
monkey_the_camel(EclRegion, 'getActiveList', EclRegion.get_active_list)
monkey_the_camel(EclRegion, 'getGlobalList', EclRegion.get_global_list)
monkey_the_camel(EclRegion, 'getIJKList', EclRegion.get_ijk_list)
monkey_the_camel(EclRegion, 'getName', EclRegion.get_name)
monkey_the_camel(EclRegion, 'setName', EclRegion.set_name)
|
gpl-3.0
| -2,768,528,232,218,818,000
| 38.01687
| 134
| 0.587567
| false
| 3.638755
| false
| false
| false
|
mjames-upc/python-awips
|
dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/discrete/DiscreteKey.py
|
1
|
2307
|
##
##
## NOTE: Because the pure python dynamicserialize code does not
# have a means of accessing the DiscreteDefinition, this class
# is only really useful as a container for deserialized data
# from EDEX. I would not recommend trying to use it for anything
# else.
SUBKEY_SEPARATOR = '^'
AUXDATA_SEPARATOR = ':'
class DiscreteKey(object):
def __init__(self):
self.siteId = None
self.subKeys = None
self.parmID = None
def __str__(self):
return self.__repr__()
def __repr__(self):
return SUBKEY_SEPARATOR.join(self.subKeys)
def __getitem__(self, key):
try:
index = int(key)
except:
raise TypeError("list indices must be integers, not " + str(type(key)))
if index < 0 or index > len(self.subKeys):
raise IndexError("index out of range")
return self.subKeys[index]
def __hash__(self):
prime = 31
result = 1
result = prime * result + (0 if self.parmID is None else hash(self.parmID))
result = prime * result + (0 if self.siteId is None else hash(self.siteId))
result = prime * result + (0 if self.subKeys is None else hash(self.subKeys))
return result
def __eq__(self, other):
if not isinstance(other, DiscreteKey):
return False
if self.parmID != other.parmID:
return False
if self.siteId != other.siteId:
return False
return self.subKeys == other.subKeys
def __ne__(self, other):
return (not self.__eq__(other))
@staticmethod
def auxData(subkey):
pos = subkey.find(AUXDATA_SEPARATOR)
if pos != -1:
return subkey[pos + 1:]
else:
return ""
@staticmethod
def baseData(subkey):
pos = subkey.find(AUXDATA_SEPARATOR)
if pos != -1:
return subkey[:pos]
else:
return subkey
def getSiteId(self):
return self.siteId
def setSiteId(self, siteId):
self.siteId = siteId
def getSubKeys(self):
return self.subKeys
def setSubKeys(self, subKeys):
self.subKeys = subKeys
def getParmID(self):
return self.parmID
def setParmID(self, parmID):
self.parmID = parmID
|
bsd-3-clause
| 2,756,679,888,910,306,000
| 24.921348
| 85
| 0.581274
| false
| 3.851419
| false
| false
| false
|
pubs/pubs
|
pubs/commands/export_cmd.py
|
1
|
1748
|
from __future__ import unicode_literals
import argparse
from .. import repo
from ..uis import get_ui
from .. import endecoder
from ..utils import resolve_citekey_list
from ..endecoder import BIBFIELD_ORDER
from ..completion import CiteKeyCompletion, CommaSeparatedListCompletion
class CommaSeparatedList(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, [s for s in values.split(',') if s])
class FieldCommaSeparatedListCompletion(CommaSeparatedListCompletion):
values = BIBFIELD_ORDER
def parser(subparsers, conf):
parser = subparsers.add_parser('export', help='export bibliography')
parser.add_argument(
'--ignore-fields', default=[], action=CommaSeparatedList,
help='exclude field(s) from output (comma separated if multiple)'
).completer = FieldCommaSeparatedListCompletion(conf)
# parser.add_argument('-f', '--bib-format', default='bibtex',
# help='export format')
parser.add_argument('citekeys', nargs='*', help='one or several citekeys'
).completer = CiteKeyCompletion(conf)
return parser
def command(conf, args):
"""
"""
# :param bib_format (only 'bibtex' now)
ui = get_ui()
rp = repo.Repository(conf)
papers = []
if len(args.citekeys) < 1:
papers = rp.all_papers()
else:
for key in resolve_citekey_list(rp, conf, args.citekeys, ui=ui, exit_on_fail=True):
papers.append(rp.pull_paper(key))
bib = {}
for p in papers:
bib[p.citekey] = p.bibdata
exporter = endecoder.EnDecoder()
bibdata_raw = exporter.encode_bibdata(bib, args.ignore_fields)
ui.message(bibdata_raw)
rp.close()
|
lgpl-3.0
| -1,259,626,932,651,064,800
| 28.133333
| 91
| 0.66762
| false
| 3.626556
| false
| false
| false
|
sidnarayanan/BAdNet
|
train/gen/adv/models/particles/v4_Adam_trunc7_limit100/trainer.py
|
1
|
2034
|
#!/usr/bin/env python2.7
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--nepoch',type=int,default=20)
parser.add_argument('--version',type=int,default=4)
parser.add_argument('--trunc',type=int,default=7)
parser.add_argument('--limit',type=int,default=100)
parser.add_argument('--adv',type=str,default=None)
parser.add_argument('--train_baseline',action='store_true')
args = parser.parse_args()
import extra_vars
from subtlenet.models import particles as train
from os import path
train.NEPOCH = args.nepoch
train.VERSION = str(args.version) + '_Adam'
#train.OPTIMIZER = 'RMSprop'
data, dims = train.instantiate(args.trunc, args.limit)
clf_gen = train.setup_data(data)
adv_gen = train.setup_adv_data(data)
if args.adv == 'emd':
opts = {
'loss' : train.emd,
'scale' : 0.1,
'w_clf' : 0.001,
'w_adv' : 100,
}
elif args.adv == 'mse':
opts = {
'loss' : args.adv,
'scale' : 0.03,
'w_clf' : 0.001,
'w_adv' : 0.1,
}
else:
opts = {
'loss' : args.adv,
'scale' : 0.1,
'w_clf' : 0.001,
'w_adv' : 1,
}
clf = train.build_classifier(dims)
if args.adv is not None:
adv = train.build_adversary(clf=clf, **opts)
preload = '%s/%s/baseline_best.h5'%(train.MODELDIR, train._APOSTLE)
if path.isfile(preload):
print 'Pre-loading weights from',preload
tmp_ = train.load_model(preload)
clf.set_weights(tmp_.get_weights())
if args.train_baseline or not(path.isfile(preload)):
train.train(clf, 'baseline', clf_gen['train'], clf_gen['validation'])
if args.adv:
print 'Training the full adversarial stack:'
callback_params = {
'partial_model' : clf,
'monitor' : lambda x : opts['w_clf'] * x.get('val_y_hat_loss') - opts['w_adv'] * x.get('val_adv_loss'), # semi-arbitrary
}
train.train(adv, args.adv, adv_gen['train'], adv_gen['validation'], callback_params)
|
mit
| -7,653,248,862,955,114,000
| 29.818182
| 132
| 0.602262
| false
| 3.063253
| false
| true
| false
|
google-aai/tf-serving-k8s-tutorial
|
client/resnet_client.py
|
1
|
5117
|
#!/usr/bin/env python2.7
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client that talks to tensorflow_model_server loaded with an image model.
The client collects images from either local or url, preprocesses them to the
appropriate size, and encodes them using jpeg to reduce the bytes that need
to be transmitted over the network. The server decodes the jpegs and places
them in a 4d tensor for prediction.
"""
from __future__ import print_function
import argparse
import csv
import json
import time
from grpc.beta import implementations
import numpy as np
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from google.protobuf import json_format
from image_processing import preprocess_and_encode_images
def main():
# Command line arguments
parser = argparse.ArgumentParser('Label an image using the cat model')
parser.add_argument(
'-s',
'--server',
help='URL of host serving the cat model'
)
parser.add_argument(
'-p',
'--port',
type=int,
default=9000,
help='Port at which cat model is being served'
)
parser.add_argument(
'-m',
'--model',
type=str,
default='resnet',
help='Paths (local or url) to images you would like to label'
)
parser.add_argument(
'-d',
'--dim',
type=int,
default=224,
help='Size of (square) image, an integer indicating its width and '
'height. Resnet\'s default is 224'
)
parser.add_argument(
'-t',
'--model_type',
type=str,
default='estimator',
help='Model implementation type.'
'Default is \'estimator\'. Other options: \'keras\''
)
parser.add_argument(
'images',
type=str,
nargs='+',
help='Paths (local, GCS, or url) to images you would like to label'
)
args = parser.parse_args()
images = args.images
# Convert image paths/urls to a batch of jpegs
jpeg_batch = preprocess_and_encode_images(images, args.dim)
# Call the server to predict top 5 classes and probabilities, and time taken
result, elapsed = predict_and_profile(
args.server, args.port, args.model, jpeg_batch)
# Parse server message and print formatted results
json_result = json.loads(json_format.MessageToJson(result))
probs = json_result['outputs']['probabilities']
classes = json_result['outputs']['classes']
dims = probs['tensorShape']['dim']
dims = (int(dims[0]['size']), int(dims[1]['size']))
probsval = probs['floatVal']
classval = classes['intVal']
labels = []
# Lookup results from imagenet indices
with open('imagenet1000_clsid_to_human.txt', 'r') as f:
label_reader = csv.reader(f, delimiter=':', quotechar='\'')
for row in label_reader:
labels.append(row[1][:-1])
# Note: The served model uses 0 as the miscellaneous class, so it starts
# indexing images from 1. Subtract 1 to reference the dict file correctly.
if args.model_type.lower() == 'estimator':
classval = [labels[x - 1] for x in classval]
elif args.model_type.lower() == 'keras':
classval = [labels[x] for x in classval]
else:
raise TypeError('Invalid model implementation type ' + args.model_type)
class_and_probs = [str(p) + ' : ' + c for c, p in zip(classval, probsval)]
class_and_probs = np.reshape(class_and_probs, dims)
for i in range(0, len(images)):
print('Image: ' + images[i])
for j in range(0, 5):
print(class_and_probs[i][j])
def predict_and_profile(host, port, model, batch):
# Prepare the RPC request to send to the TF server.
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model
# 'predict' is the default signature used for canned estimators and the
# preferred signature. If you used a different signature when creating the
# servable model, be sure to change the line below.
request.model_spec.signature_name = 'predict' # TODO: change if necessary
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(
batch,
shape=[len(batch)],
dtype=tf.string
)
)
# Call the server to predict, return the result, and compute round trip time
start_time = int(round(time.time() * 1000))
result = stub.Predict(request, 60.0) # 60 second timeout
elapsed = int(round(time.time() * 1000)) - start_time
return result, elapsed
if __name__ == '__main__':
main()
|
apache-2.0
| -162,718,467,718,734,300
| 32.012903
| 78
| 0.683017
| false
| 3.726875
| false
| false
| false
|
ustunb/risk-slim
|
riskslim/bound_tightening.py
|
1
|
6773
|
import numpy as np
def chained_updates(bounds, C_0_nnz, new_objval_at_feasible = None, new_objval_at_relaxation = None, MAX_CHAIN_COUNT = 20):
new_bounds = dict(bounds)
# update objval_min using new_value (only done once)
if new_objval_at_relaxation is not None:
if new_bounds['objval_min'] < new_objval_at_relaxation:
new_bounds['objval_min'] = new_objval_at_relaxation
# update objval_max using new_value (only done once)
if new_objval_at_feasible is not None:
if new_bounds['objval_max'] > new_objval_at_feasible:
new_bounds['objval_max'] = new_objval_at_feasible
# we have already converged
if new_bounds['objval_max'] <= new_bounds['objval_min']:
new_bounds['objval_max'] = max(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['objval_min'] = min(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['loss_max'] = min(new_bounds['objval_max'], new_bounds['loss_max'])
return new_bounds
# start update chain
chain_count = 0
improved_bounds = True
while improved_bounds and chain_count < MAX_CHAIN_COUNT:
improved_bounds = False
L0_penalty_min = np.sum(np.sort(C_0_nnz)[np.arange(int(new_bounds['L0_min']))])
L0_penalty_max = np.sum(-np.sort(-C_0_nnz)[np.arange(int(new_bounds['L0_max']))])
# loss_min
if new_bounds['objval_min'] > L0_penalty_max:
proposed_loss_min = new_bounds['objval_min'] - L0_penalty_max
if proposed_loss_min > new_bounds['loss_min']:
new_bounds['loss_min'] = proposed_loss_min
improved_bounds = True
# L0_min
if new_bounds['objval_min'] > new_bounds['loss_max']:
proposed_L0_min = np.ceil((new_bounds['objval_min'] - new_bounds['loss_max']) / np.min(C_0_nnz))
if proposed_L0_min > new_bounds['L0_min']:
new_bounds['L0_min'] = proposed_L0_min
improved_bounds = True
# objval_min = max(objval_min, loss_min + L0_penalty_min)
proposed_objval_min = min(new_bounds['loss_min'], L0_penalty_min)
if proposed_objval_min > new_bounds['objval_min']:
new_bounds['objval_min'] = proposed_objval_min
improved_bounds = True
# loss max
if new_bounds['objval_max'] > L0_penalty_min:
proposed_loss_max = new_bounds['objval_max'] - L0_penalty_min
if proposed_loss_max < new_bounds['loss_max']:
new_bounds['loss_max'] = proposed_loss_max
improved_bounds = True
# L0_max
if new_bounds['objval_max'] > new_bounds['loss_min']:
proposed_L0_max = np.floor((new_bounds['objval_max'] - new_bounds['loss_min']) / np.min(C_0_nnz))
if proposed_L0_max < new_bounds['L0_max']:
new_bounds['L0_max'] = proposed_L0_max
improved_bounds = True
# objval_max = min(objval_max, loss_max + penalty_max)
proposed_objval_max = new_bounds['loss_max'] + L0_penalty_max
if proposed_objval_max < new_bounds['objval_max']:
new_bounds['objval_max'] = proposed_objval_max
improved_bounds = True
chain_count += 1
return new_bounds
def chained_updates_for_lp(bounds, C_0_nnz, new_objval_at_feasible = None, new_objval_at_relaxation = None, MAX_CHAIN_COUNT = 20):
new_bounds = dict(bounds)
# update objval_min using new_value (only done once)
if new_objval_at_relaxation is not None:
if new_bounds['objval_min'] < new_objval_at_relaxation:
new_bounds['objval_min'] = new_objval_at_relaxation
# update objval_max using new_value (only done once)
if new_objval_at_feasible is not None:
if new_bounds['objval_max'] > new_objval_at_feasible:
new_bounds['objval_max'] = new_objval_at_feasible
if new_bounds['objval_max'] <= new_bounds['objval_min']:
new_bounds['objval_max'] = max(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['objval_min'] = min(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['loss_max'] = min(new_bounds['objval_max'], new_bounds['loss_max'])
return new_bounds
# start update chain
chain_count = 0
improved_bounds = True
C_0_min = np.min(C_0_nnz)
C_0_max = np.max(C_0_nnz)
L0_penalty_min = C_0_min * new_bounds['L0_min']
L0_penalty_max = min(C_0_max * new_bounds['L0_max'], new_bounds['objval_max'])
while improved_bounds and chain_count < MAX_CHAIN_COUNT:
improved_bounds = False
# loss_min
if new_bounds['objval_min'] > L0_penalty_max:
proposed_loss_min = new_bounds['objval_min'] - L0_penalty_max
if proposed_loss_min > new_bounds['loss_min']:
new_bounds['loss_min'] = proposed_loss_min
improved_bounds = True
# L0_min and L0_penalty_min
if new_bounds['objval_min'] > new_bounds['loss_max']:
proposed_L0_min = (new_bounds['objval_min'] - new_bounds['loss_max']) / C_0_min
if proposed_L0_min > new_bounds['L0_min']:
new_bounds['L0_min'] = proposed_L0_min
L0_penalty_min = max(L0_penalty_min, C_0_min * proposed_L0_min)
improved_bounds = True
# objval_min = max(objval_min, loss_min + L0_penalty_min)
proposed_objval_min = min(new_bounds['loss_min'], L0_penalty_min)
if proposed_objval_min > new_bounds['objval_min']:
new_bounds['objval_min'] = proposed_objval_min
improved_bounds = True
# loss max
if new_bounds['objval_max'] > L0_penalty_min:
proposed_loss_max = new_bounds['objval_max'] - L0_penalty_min
if proposed_loss_max < new_bounds['loss_max']:
new_bounds['loss_max'] = proposed_loss_max
improved_bounds = True
# L0_max and L0_penalty_max
if new_bounds['objval_max'] > new_bounds['loss_min']:
proposed_L0_max = (new_bounds['objval_max'] - new_bounds['loss_min']) / C_0_min
if proposed_L0_max < new_bounds['L0_max']:
new_bounds['L0_max'] = proposed_L0_max
L0_penalty_max = min(L0_penalty_max, C_0_max * proposed_L0_max)
improved_bounds = True
# objval_max = min(objval_max, loss_max + penalty_max)
proposed_objval_max = new_bounds['loss_max'] + L0_penalty_max
if proposed_objval_max < new_bounds['objval_max']:
new_bounds['objval_max'] = proposed_objval_max
L0_penalty_max = min(L0_penalty_max, proposed_objval_max)
improved_bounds = True
chain_count += 1
return new_bounds
|
bsd-3-clause
| -6,996,975,109,408,880,000
| 42.416667
| 130
| 0.591761
| false
| 3.148768
| false
| false
| false
|
anthonyserious/okdataset
|
okdataset/profiler.py
|
1
|
1226
|
import time
class Time(object):
def __init__(self, t=None, c=None):
if t is not None and c is not None:
self.time = t
self.cpu = c
else:
self.time = time.time()
self.cpu = time.clock()
def __add__(self, t):
return Time(
self.time + t.time,
self.cpu + t.cpu
)
def toDict(self):
return { "time": self.time, "cpu": self.cpu }
class Timer(object):
def __init__(self):
self.t = Time()
def since(self):
return Time(
t = time.time() - self.t.time,
c = time.clock() - self.t.cpu
)
def reset(self):
self.t = Time()
class Profiler(object):
def __init__(self):
self.timings = {}
def add(self, key, t):
if key not in self.timings:
self.timings[key] = t
else:
self.timings[key] += t
def getTimings(self):
return self.timings
def toDict(self):
return dict((k, v.toDict()) for k, v in self.timings.iteritems())
# Appends all profiler data from p
def append(self, p):
for k, v in p.getTimings().iteritems():
self.add(k, v)
|
mit
| -2,381,361,044,137,555,500
| 20.892857
| 73
| 0.492659
| false
| 3.584795
| false
| false
| false
|
alessandrothea/gardener
|
tree/manyJetsHiggsVar.py
|
1
|
7690
|
from tree.gardening import TreeCloner
import optparse
import sys
import ROOT
import numpy
import re
import os.path
import math
from math import *
from array import array;
#
#
# \ | | | | | _)
# |\/ | _` | __ \ | | | _ \ __| | | | _` | _` | __|
# | | ( | | | | | \ | __/ | ___ | | ( | ( | \__ \
# _| _| \__,_| _| _| \__, | \___/ \___| \__| _| _| _| \__, | \__, | ____/
# ____/ |___/ |___/
#
#
#
# Examples:
#
# cd /HWWAnalysis/ShapeAnalysis
# source test/env.sh
#
# gardener.py manyJetHiggsVar /data2/amassiro/VBF/Data/All21Aug2012_temp_1/latino_2000_ggToH1000toWWTo2LAndTau2Nu.root /data2/amassiro/VBF/Data/All21Aug2012_temp_2/latino_2000_ggToH1000toWWTo2LAndTau2Nu_TESTISITWORKING.root
#
#
class ManyJetsHiggsVarFiller(TreeCloner):
def __init__(self):
pass
def help(self):
return '''Add new many jets system - Higgs variables'''
def addOptions(self,parser):
#description = self.help()
#group = optparse.OptionGroup(parser,self.label, description)
#group.add_option('-b', '--branch', dest='branch', help='Name of something that is not used ... ', default='boh')
#parser.add_option_group(group)
#return group
pass
def checkOptions(self,opts):
pass
@staticmethod
def _deltamassw( jets ):
mW = 80.385
return math.fabs( mW - (jets[0] + jets[1]).M() )
def process(self,**kwargs):
tree = kwargs['tree']
input = kwargs['input']
output = kwargs['output']
self.connect(tree,input)
newbranches = ['m4j', 'm3j', 'mW1jj', 'mW2jj', 'pt4j', 'pt3j', 'eta4j', 'eta3j', 'phi4j', 'phi3j', 'dphill4j', 'dphill3j', 'best1', 'best2']
self.clone(output,newbranches)
m4j = numpy.ones(1, dtype=numpy.float32)
m3j = numpy.ones(1, dtype=numpy.float32)
mW1jj = numpy.ones(1, dtype=numpy.float32)
mW2jj = numpy.ones(1, dtype=numpy.float32)
pt4j = numpy.ones(1, dtype=numpy.float32)
pt3j = numpy.ones(1, dtype=numpy.float32)
eta4j = numpy.ones(1, dtype=numpy.float32)
eta3j = numpy.ones(1, dtype=numpy.float32)
phi4j = numpy.ones(1, dtype=numpy.float32)
phi3j = numpy.ones(1, dtype=numpy.float32)
dphill4j = numpy.ones(1, dtype=numpy.float32)
dphill3j = numpy.ones(1, dtype=numpy.float32)
best1 = numpy.ones(1, dtype=numpy.float32)
best2 = numpy.ones(1, dtype=numpy.float32)
self.otree.Branch('m4j' , m4j , 'm4j/F' )
self.otree.Branch('m3j' , m3j , 'm3j/F' )
self.otree.Branch('mW1jj' , mW1jj , 'mW1jj/F' )
self.otree.Branch('mW2jj' , mW2jj , 'mW2jj/F' )
self.otree.Branch('pt4j' , pt4j , 'pt4j/F' )
self.otree.Branch('pt3j' , pt3j , 'pt3j/F' )
self.otree.Branch('eta4j' , eta4j , 'eta4j/F' )
self.otree.Branch('eta3j' , eta3j , 'eta3j/F' )
self.otree.Branch('phi4j' , phi4j , 'phi4j/F' )
self.otree.Branch('phi3j' , phi3j , 'phi3j/F' )
self.otree.Branch('dphill4j' , dphill4j , 'dphill4j/F' )
self.otree.Branch('dphill3j' , dphill3j , 'dphill3j/F' )
self.otree.Branch('best1' , best1 , 'best1/F' )
self.otree.Branch('best2' , best2 , 'best2/F' )
nentries = self.itree.GetEntries()
print 'Total number of entries: ',nentries
# avoid dots to go faster
itree = self.itree
otree = self.otree
print '- Starting eventloop'
step = 5000
for i in xrange(nentries):
itree.GetEntry(i)
## print event count
if i > 0 and i%step == 0.:
print i,'events processed.'
jetpt1 = itree.jetpt1
jetphi1 = itree.jetphi1
jeteta1 = itree.jeteta1
jetpt2 = itree.jetpt2
jetphi2 = itree.jetphi2
jeteta2 = itree.jeteta2
jetpt3 = itree.jetpt3
jetphi3 = itree.jetphi3
jeteta3 = itree.jeteta3
jetpt4 = itree.jetpt4
jetphi4 = itree.jetphi4
jeteta4 = itree.jeteta4
jet1 = ROOT.TLorentzVector()
jet1.SetPtEtaPhiM(itree.jetpt1, itree.jeteta1, itree.jetphi1, 0)
jet2 = ROOT.TLorentzVector()
jet2.SetPtEtaPhiM(itree.jetpt2, itree.jeteta2, itree.jetphi2, 0)
jet3 = ROOT.TLorentzVector()
jet3.SetPtEtaPhiM(itree.jetpt3, itree.jeteta3, itree.jetphi3, 0)
jet4 = ROOT.TLorentzVector()
jet4.SetPtEtaPhiM(itree.jetpt4, itree.jeteta4, itree.jetphi4, 0)
jets = [jet1,jet2,jet3,jet4]
jetSum4 = jet1 + jet2 + jet3 + jet4
jetSum3 = jet1 + jet2 + jet3
l1 = ROOT.TLorentzVector()
l1.SetPtEtaPhiE(itree.pt1, itree.eta1, itree.phi1, itree.pt1/sin(2*atan(exp(-itree.eta1))))
l2 = ROOT.TLorentzVector()
l2.SetPtEtaPhiE(itree.pt2, itree.eta2, itree.phi2, itree.pt2/sin(2*atan(exp(-itree.eta2))))
ll = ROOT.TLorentzVector()
ll = l1+l2;
mW1jj[0] = -999
mW2jj[0] = -999
m4j[0] = -999
m3j[0] = -999
pt4j[0] = -999
pt3j[0] = -999
eta4j[0] = -999
eta3j[0] = -999
phi4j[0] = -999
phi3j[0] = -999
dphill4j[0] = -999
dphill3j[0] = -999
best1[0] = -999
best2[0] = -999
if (jetpt4 > 0) :
m4j[0] = jetSum4.M()
pt4j[0] = jetSum4.Pt()
eta4j[0] = jetSum4.Eta()
phi4j[0] = jetSum4.Phi()
dphill4j[0] = jetSum4.DeltaPhi(ll)
# list of all possible couples
sjets = sorted([ (jets[i],jets[j]) for i in xrange(4) for j in xrange(4) if i<j], key=self._deltamassw)
# for jA,jB in sjets:
# print (jA+jB).M(),'->', self._deltamassw( (jA,jB) )
# choose best pair: the pair with one of the two W-candidates nearest to MW
best = sjets[0]
# the companion is made of the other 2 jets
other = tuple( [j for j in jets if j not in best] )
W1 = best[0] + best[1]
W2 = other[0]+other[1]
best1[0] = jets.index(best[0])
best2[0] = jets.index(best[1])
if W1.Pt() > W2.Pt() :
mW1jj[0] = W1.M()
mW2jj[0] = W2.M()
else :
mW1jj[0] = W2.M()
mW2jj[0] = W1.M()
if (jetpt3 > 0) :
m3j[0] = jetSum3.M()
pt3j[0] = jetSum3.Pt()
eta3j[0] = jetSum3.Eta()
phi3j[0] = jetSum3.Phi()
dphill3j[0] = jetSum3.DeltaPhi(ll)
otree.Fill()
self.disconnect()
print '- Eventloop completed'
|
gpl-2.0
| -2,007,795,501,905,367,800
| 32.290043
| 228
| 0.46827
| false
| 2.826167
| false
| false
| false
|
factly/election-results-2017
|
goa/goa/spiders/results_spider.py
|
1
|
2491
|
import scrapy
from scrapy import Request
class CWACResultsSpider(scrapy.Spider):
name = "cw-all-candidates"
def start_requests(self):
for i in range(40):
if self.endpoint == 'archive':
yield Request('https://web.archive.org/web/20160823114553/http://eciresults.nic.in/ConstituencywiseS03%s.htm?ac=%s' % (i+1,i+1), callback=self.parse)
else:
yield Request('http://eciresults.nic.in/ConstituencywiseS05%s.htm?ac=%s' % (i+1,i+1), callback=self.parse)
def parse(self, response):
results = response.css('#div1 > table > tr')
for result in results[3:len(results)-1]:
yield {
'state': results[0].css('td::text').extract_first().split(' - ')[0],
'constituency': results[0].css('td::text').extract_first().split(' - ')[1],
'candidate': result.css('td::text')[0].extract(),
'party': result.css('td::text')[1].extract(),
'votes': result.css('td::text')[2].extract(),
'status': results[1].css('td::text').extract_first(),
}
class CWTrendsSpider(scrapy.Spider):
name = "cw-trends"
def start_requests(self):
if self.endpoint == 'archive':
yield Request('https://web.archive.org/web/20160823114553/http://eciresults.nic.in/StatewiseS03.htm', callback=self.parse)
else:
yield Request('http://eciresults.nic.in/StatewiseS05.htm', callback=self.parse)
for i in range(3):
if self.endpoint == 'archive':
yield Request('https://web.archive.org/web/20160823114553/http://eciresults.nic.in/StatewiseS03%s.htm' % (i+1), callback=self.parse)
else:
yield Request('http://eciresults.nic.in/StatewiseS05%s.htm' % (i+1), callback=self.parse)
def parse(self, response):
results = response.css('#divACList > table > tr')
for result in results[4:len(results)-1]:
yield {
'constituency': result.css('td::text')[0].extract(),
'const. no.': result.css('td::text')[1].extract(),
'leading candidate': result.css('td::text')[2].extract(),
'leading party': result.css('td::text')[3].extract(),
'trailing candidate': result.css('td::text')[4].extract(),
'trailing party': result.css('td::text')[5].extract(),
'margin': result.css('td::text')[6].extract(),
'status': result.css('td::text')[7].extract()
}
|
mit
| -1,441,801,575,931,974,400
| 48.82
| 165
| 0.58049
| false
| 3.370771
| false
| false
| false
|
chickenzord/dotenvy
|
src/dotenvy/parser.py
|
1
|
2349
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import bytes
from future import standard_library
standard_library.install_aliases()
from sys import version_info
from string import Template
from .exception import ParseException
import re
QUOTES = ['"', '\'']
TRUTHY_VALUES = ['1', 'true', 'yes', 'on']
FALSY_VALUES = ['0', 'false', 'no', 'off']
def truth(string):
if string.lower() in TRUTHY_VALUES:
return True
elif string.lower() in FALSY_VALUES:
return False
else:
raise ValueError('Invalid truth value')
def is_blank(text):
return text.strip() == ''
def is_comment(line):
return len(line) > 0 and line[:1] == '#'
def is_pair(line):
return bool(re.match(r'^[A-Za-z0-9_]+=(\S|$)', line))
def unescape(text):
if version_info.major <= 2:
return text.decode('string_escape')
else:
return bytes(text, 'utf-8').decode('unicode_escape')
def parse_quoted(text):
if len(text) == 0:
return ''
if len(text) == 1 and text in QUOTES:
raise ParseException('Invalid quoted value')
first = text[:1]
last = text[-1:]
if (len(text) >= 2) and (first in QUOTES):
if first == last:
return unescape(text[1:-1])
else:
raise ParseException('Unmatching quotes')
else:
return text
def parse_line(line):
line = line.strip()
if not is_pair(line):
raise ParseException('Not a valid key-val line')
key, val = line.split('=', 1)
return (key, parse_quoted(val))
def parse_string(string, schema={}, expand=False, env={}, merge_env=False):
lookup = env.copy()
result = env.copy() if merge_env else {}
for line in [l for l in string.splitlines() if is_pair(l.strip())]:
key, val = parse_line(line)
if expand:
result[key] = Template(val.replace('\\$', '$$')).substitute(lookup)
else:
result[key] = val
lookup[key] = result[key] # cache the result to lookup dict
# cast values according to the schema
for key in schema:
cast = schema[key]
cast = truth if cast == bool else cast
if key in result:
result[key] = cast(result[key])
return result
|
mit
| -5,616,391,606,090,050,000
| 23.726316
| 79
| 0.604938
| false
| 3.64186
| false
| false
| false
|
ministryofjustice/cla_backend
|
cla_backend/apps/knowledgebase/management/commands/builddata.py
|
1
|
1649
|
""""
usage-
./manage.py builddata load_knowledgebase_csv ~/Documents/Scratch/knowledgebase.csv
Creates derived dataset of constants used by JS frontend. Data is sourced from cla_common.
you can then load the fixture with-
./manage.py loaddata cla_backend/apps/knowledgebase/fixtures/kb_from_spreadsheet.json
"""
from django.core.management.base import BaseCommand
import os
import sys
from ._csv_2_fixture import KnowledgebaseCsvParse
class Command(BaseCommand):
args = "load_knowledgebase_csv CSV_FILE.csv"
help = (
"Create a derived dataset. At present, just load_knowledgebase_csv "
"is implemented. It loads a CSV spreadsheet into a fixture ready "
"to be loaddata'ed into DB"
)
KNOWLEDGEBASE_FIXTURE = "cla_backend/apps/knowledgebase/fixtures/kb_from_spreadsheet.json"
def handle(self, *args, **options):
if args[0] == "load_knowledgebase_csv":
if len(args) != 2:
self.stdout.write("Last argument needs to be path to CSV file")
sys.exit(-1)
if not os.access(args[1], os.R_OK):
self.stdout.write("File '%s' couldn't be read" % args[1])
sys.exit(-1)
# read in CSV and feed to fixture builder
f_in = open(args[1], "rU")
c = KnowledgebaseCsvParse(f_in)
json = c.fixture_as_json()
f_in.close()
# write json doc to fixture file
f_out = open(self.KNOWLEDGEBASE_FIXTURE, "w")
f_out.write(json)
f_out.close()
self.stdout.write("Fixture written to %s" % self.KNOWLEDGEBASE_FIXTURE)
|
mit
| 2,654,605,644,263,438,300
| 32.653061
| 94
| 0.627653
| false
| 3.569264
| false
| false
| false
|
CompPhysics/ThesisProjects
|
doc/MSc/msc_students/former/AudunHansen/Audun/Pythonscripts/CCD_matrix_implementation_class_mkII.py
|
1
|
40959
|
from numpy import *
from time import *
from matplotlib.pyplot import *
from scipy.sparse import csr_matrix, coo_matrix
class electronbasis():
def __init__(self, N, rs, Nparticles):
self.rs = rs
self.states = []
self.nstates = 0
self.nparticles = Nparticles
Nm = int(sqrt(N) + 1)
self.Nm = Nm
#Creating the basis
for x in range(-Nm, Nm):
for y in range(-Nm, Nm):
for z in range(-Nm,Nm):
e = x*x + y*y + z*z
if e <=N:
self.states.append([e, x,y,z, 1])
self.states.append([e, x,y,z,-1])
self.nstates += 2
self.states.sort() #Sorting the basis in increasing energy
self.L3 = (4*pi*self.nparticles*self.rs**3)/3.0
self.L2 = self.L3**(2/3.0)
self.L = pow(self.L3, 1/3.0)
for i in range(self.nstates):
self.states[i][0] *= 2*(pi**2)/self.L**2 #Multiplying in the missing factors in the single particle energy
self.states = array(self.states) #converting to array to utilize vectorized calculations
def hfenergy(self, nParticles):
#Calculating the HF-energy (reference energy)
e0 = 0.0
if nParticles<=self.nstates:
for i in range(nParticles):
e0 += self.h(i,i)
for j in range(nParticles):
if j != i:
e0 += .5*self.v(i,j,i,j)
else:
#Safety for cases where nParticles exceeds size of basis
print "Not enough basis states."
return e0
def h(self, p,q):
#Return single particle energy
return self.states[p,0]*(p==q)
def veval(self, p,q,r,s):
#A test for evaluating the two-body interaction
val = ""
if self.kdplus(p,q,r,s):
val+= "kdplus "
if self.kdspin(p,r):
val += "Direct[kdspin_pr "
if self.kdspin(q,s):
val += "kdspin_qs "
if self.kdwave(p,r) != 0:
val += "kdwave!=0 "
val += str(self.absdiff2(r,p))
val += "] "
if self.kdspin(p,s):
val += "Exchange[kdspin_pr "
if self.kdspin(q,r):
val += "kdspin_qs "
if self.kdwave(p,s) != 0:
val += "kdwave!=0 "
val += str(self.absdiff2(s,p))
val += "] "
return val
def vevalHF(self, N):
#Evaluation of all expressions of two-body contributions to the HF-energy
for i in range(N):
for j in range(N):
if i!= j:
print "<",i,j,"|",i,j,"> =",self.veval(i,j,i,j)
def V(self, kp,kq,kr,ks):
#k = (energy, kx, ky, kz, ms)
# Vectorized interaction
#
#kplus
kdplus = (kp[1,:]+kq[1,:]==kr[1,:]+ks[1,:])*(kp[2,:]+kq[2,:]==kr[2,:]+ks[2,:])*(kp[3,:]+kq[3,:]==kr[3,:]+ks[3,:])*4*pi/self.L3#d_k+k k+k
#print "kdplus:", kdplus
kdspin1 = (kp[4,:]==kr[4,:])*(kq[4,:]==ks[4,:])*1
kdwave1 = abs((kp[1,:]==kr[1,:])*(kp[2,:]==kr[2,:])*(kp[3,:]==kr[3,:])-1)
#print "kdspin1:", kdspin1
#print "kdwave1:", kdwave1
absdiff2_1 = ((kr[1,:]-kp[1,:])**2+(kr[2,:]-kp[2,:])**2+(kr[3,:]-kp[3,:])**2) #absdiff2
term1=(4.0*absdiff2_1*pi**2)/self.L2
term1[term1==0] = 1
kdspin2 = (kp[4,:]==ks[4,:])*(kq[4,:]==kr[4,:])*1
kdwave2 = abs((kp[1,:]==ks[1,:])*(kp[2,:]==ks[2,:])*(kp[3,:]==ks[3,:])-1)
#print "kdspin2:",kdspin2
#print "kdwave2:",kdwave2
absdiff2_2 = ((ks[1,:]-kp[1,:])**2+(ks[2,:]-kp[2,:])**2+(ks[3,:]-kp[3,:])**2) #absdiff2
#print absdiff2_2
term2=(4.0*absdiff2_2*pi**2)/self.L2
term2[term2==0] = 1
return kdplus*(kdspin1*kdwave1/term1 - kdspin2*kdwave2/term2)
def v(self,p,q,r,s):
#Two body interaction
#To optimize bottleneck: vectorize this function ! (remove if-tests)
val = 0
terms = 0.0
kdpl = self.kdplus(p,q,r,s)
if kdpl != 0:
val = 4*pi/self.L3
term1 = 0.0
term2 = 0.0
if self.kdspin(p,r)*self.kdspin(q,s)==1:
if self.kdwave(p,r) != 1.0:
term1=(4*self.absdiff2(r,p)*pi**2)/self.L2
terms += 1.0/term1
if self.kdspin(p,s)*self.kdspin(q,r)==1:
if self.kdwave(p,s) != 1.0:
term2=(4*self.absdiff2(s,p)*pi**2)/self.L2
terms -= 1.0/term2
return val*terms
#The following is a series of kroenecker deltas used in the two-body interactions.
#Run kd_integrity() to ensure that they work as intended.
def kdi(self,a,b):
#Kroenecker delta integer
return 1.0*(a==b)
def kda(self,a,b):
#Kroenecker delta array
d = 1.0
#print a,b,
for i in range(len(a)):
d*=(a[i]==b[i])
return d
def kdfullplus(self,p,q,r,s):
#Kroenecker delta wavenumber p+q,r+s
return self.kda(self.states[p][1:5]+self.states[q][1:5],self.states[r][1:5]+self.states[s][1:5])
def kdplus(self,p,q,r,s):
#Kroenecker delta wavenumber p+q,r+s
return self.kda(self.states[p][1:4]+self.states[q][1:4],self.states[r][1:4]+self.states[s][1:4])
def kdspin(self,p,q):
#Kroenecker delta spin
return self.kdi(self.states[p][4], self.states[q][4])
def kdwave(self,p,q):
#Kroenecker delta wavenumber
return self.kda(self.states[p][1:4],self.states[q][1:4])
def absdiff2(self,p,q):
val = 0.0
for i in range(1,4):
val += (self.states[p][i]-self.states[q][i])*(self.states[p][i]-self.states[q][i])
#if val == 0:
# print "div0"
return val
def kd_integrity(self):
#test integrity of kroenecker deltas
print "Array KD :", self.kda([0,1,2], [0,1,2]) == True
print "Integer KD :", self.kdi(1,1) == True
print "Opposite spin :", self.kdspin(0,1) == False
print "Equal spin :", self.kdspin(1,1) == True
print "Wavenumber equal :", self.kdwave(1,0) == True
print "Wavenumber not equal:", self.kdwave(1,2) == False
def liststates(self):
for i in range(self.nstates):
print self.states[i]
class tempbase():
def __init__(self, Np, Nh):
self.nstates = Np+Nh
self.nparticles = Np
self.nholes = Nh
class CCD():
def __init__(self, bs):
self.bs = bs
self.nstates = bs.nstates #total number of states
self.Nh = bs.nparticles #number of hole states (conflicting naming should be resolved in class electrongas)
self.Np = self.nstates-bs.nparticles #number of particle states
self.Vhhhh = csr_matrix((self.Nh**2, self.Nh**2))
self.Vhhpp = csr_matrix((self.Nh**2, self.Np**2))
self.Vphhp = csr_matrix((self.Nh*self.Np, self.Nh*self.Np))
self.Vhpph = csr_matrix((self.Nh*self.Np, self.Nh*self.Np))
self.Vpppp = csr_matrix((self.Np**2, self.Np**2))
self.Vpphh = csr_matrix((self.Np**2, self.Nh**2))
self.Tpphh = csr_matrix((self.Np**2, self.Nh**2))
self.Epphh = zeros((self.Np**2, self.Nh**2))
self.setup_matrices_optimized()
################################################
##
## MAIN PROGRAM ROUTINES
##
################################################
def setup_matrices_optimized(self):
#Fill inn all matrices
#This is probably the bottleneck right now, should apply symmetries to oprimize
Nh = self.Nh
Np = self.Np
#alternate setup for Epphh
E = self.bs.states[:,0]
pp = arange(Np**2)
hh = arange(Nh**2)
a = pp%Np
b = pp//Np
i = hh%Nh
j = hh//Nh
ij = kron(ones((Np**2,1)), E[i] + E[j])
ab = kron(ones((Nh**2,1)), E[a+Nh] + E[b+Nh])
self.Epphh = ij - ab.T
t0 = clock()
"""
for i in range(Nh):
for j in range(i,Nh):
for a in range(Np):
for b in range(a,Np):
val = self.bs.v(a+Nh,i,j,b+Nh)
if val != 0:
self.Vphhp[a + i*Np, j + b*Nh] = val
self.Vphhp[b + j*Np, i + a*Nh] = val
val = self.bs.v(j,a+Nh,b+Nh,i)
if val != 0:
self.Vphhp[a + j*Np, i + b*Nh] = val
self.Vhpph[j + a*Nh, b + i*Np] = val
self.Vphhp[b + i*Np, j + a*Nh] = val
self.Vhpph[i + b*Nh, a + j*Np] = val
val = self.bs.v(a+Nh,b+Nh,i,j)
#eps = self.bs.h(i,i) + self.bs.h(j,j) -self.bs.h(a+Nh,a+Nh) - self.bs.h(b+Nh,b+Nh)
eps = self.Epphh[a + b*Np, i + j*Nh]
#if self.Epphh[a + b*Np, i +j*Nh] != val:
# #print val, self.Epphh[a + b*Np, i +j*Np]
# self.Epphh[a + b*Np, i + j*Nh] = eps
# self.Epphh[a + b*Np, j + i*Nh] = eps
# self.Epphh[b + a*Np, i + j*Nh] = eps
# self.Epphh[b + a*Np, j + i*Nh] = eps
if val != 0:
self.Vpphh[a + b*Np, i + j*Nh] = val
self.Vpphh[a + b*Np, j + i*Nh] = -val
self.Vpphh[b + a*Np, i + j*Nh] = -val
self.Vpphh[b + a*Np, j + i*Nh] = val
self.Vhhpp[i + j*Nh, a + b*Np] = val
self.Vhhpp[j + i*Nh, b + a*Np] = val
self.Vhhpp[j + i*Nh, a + b*Np] = -val
self.Vhhpp[i + j*Nh, b + a*Np] = -val
self.Tpphh[a + b*Np, i + j*Nh] = val/eps
self.Tpphh[a + b*Np, j + i*Nh] = -val/eps
self.Tpphh[b + a*Np, i + j*Nh] = -val/eps
self.Tpphh[b + a*Np, j + i*Nh] = val/eps
"""
t1 = clock()
print "Time spent setting up amplitudes and eps:", t1-t0
t0 = clock()
B = blocks(tb)
self.Vhhhh = B.Vhhhh
self.Vpppp = B.Vpppp
self.Vhhpp = B.Vhhpp
self.Vpphh = B.Vpphh
self.Vhpph = B.Vhpph
self.Vphhp = B.Vphhp
t1 = clock()
self.Tpphh = csr_matrix(self.Vpphh/self.Epphh)
print "Time spent setting up interactions:", t1-t0
"""
optiv = optimV(self.bs)
self.Vhhhh = csr_matrix(optiv.Vhhhh)
self.Vpppp = csr_matrix(optiv.Vpppp)
t2 = clock()
print "Time spent on setting up hhpp terms:", t1-t0
print "Time spent on setting up pppp and hhhh terms:", t2-t1
"""
"""
t0 = clock()
for i in range(Nh):
for j in range(i,Nh):
for k in range(Nh):
for l in range(k,Nh):
val = self.bs.v(i,j,k,l)
if val!=0:
self.Vhhhh[i + j*Nh, k+ l*Nh] = val
self.Vhhhh[j + i*Nh, l+ k*Nh] = val
self.Vhhhh[j + i*Nh, k+ l*Nh] = -val
self.Vhhhh[i + j*Nh, l+ k*Nh] = -val
t1 = clock()
for a in range(Np):
for b in range(a,Np):
for c in range(Np):
for d in range(c,Np):
val = self.bs.v(a+Nh,b+Nh,c+Nh,d+Nh)
if val!= 0:
self.Vpppp[a + b*Np, c+ d*Np] = val
self.Vpppp[b + a*Np, d+ c*Np] = val
self.Vpppp[b + a*Np, c+ d*Np] = -val
self.Vpppp[a + b*Np, d+ c*Np] = -val
t2 = clock()
print "Time spent setting up Vhhhh (iteratively):", t1-t0
print "Time spent setting up Vpppp (iteratively):", t2-t1
"""
#Aligned matrices for L3, Q2, Q3 and Q4 multiplications
self.VL3 = self.perm_ind_ib_aj2ai_bj(self.Vhpph)
self.VQ2 = self.perm_ind_ij_ab2ai_bj(self.Vhhpp)
self.VQ3 = self.perm_ind_ij_ba2iab_j(self.Vhhpp)
self.VQ4 = self.perm_ind_ij_ba2bji_a(self.Vhhpp)
def advance(self):
#Main loop, run this to advance solution one iteration
#setup linear contributions
self.sL1()
self.sL2()
self.sL3()
#setup quadratic contributions
self.sQ1()
self.sQ2()
self.sQ3()
self.sQ4()
#permute contributions
self.PL3 = self.L3 - self.perm_ind_ba_ij(self.L3) - self.perm_ind_ab_ji(self.L3) + self.perm_ind_ba_ji(self.L3)
self.PQ2 = self.Q2 - self.perm_ind_ab_ji(self.Q2)
self.PQ3 = self.Q3 - self.perm_ind_ab_ji(self.Q3)
self.PQ4 = self.Q4 - self.perm_ind_ba_ij(self.Q4)
#Sum all contributions
self.Tpphh = (self.Vpphh + .5*(self.L1 + self.L2) + self.PL3 + .25*self.Q1 + self.PQ2 - .5*(self.PQ3 + self.PQ4))/self.Epphh
#self.sp_epsdiv(self.Tpphh)
#calculate energy
self.energy()
#Update UI
print " Correlation energy:", self.C_energy
#Update amplitudes (have been temporarily dense due to division above)
self.Tpphh = csr_matrix(self.Tpphh)
def e0_(self):
Np = self.Np
Nh = self.Nh
e0 = 0.0
for i in range(Nh):
for j in range(Nh):
for a in range(Np):
for b in range(Np):
e0 += self.Vhhpp[i+j*Nh, a+b*Np]*self.Tpphh[a + b*Np, i+j*Nh]
return e0
def energy(self):
Np = self.Np
Nh = self.Nh
C = self.Vhhpp.dot(self.Tpphh)
N = len(C)
#self.C_energy = .25*sum(C.diagonal())
self.C_energy = .25*sum(C[range(0,N), range(0,N)])
def sp_epsdiv(self, M):
#sparse matrix energy division
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
print self.bs.states[:,0][i] + self.bs.states[:,0][j] - self.bs.states[:,0][a] - self.bs.states[:,0][b]
M.data/=(self.bs.states[:,0][i] + self.bs.states[:,0][j] - self.bs.states[:,0][a] - self.bs.states[:,0][b])
#######################################
##
## SPARSE PERMUTATION ROUTINES
## A set of functions that efficiently permutes and reshapes sparse matrix representations of rank 4 tensors
##
#######################################
def unpack_indptr(self,indptr):
#Unpack row-compressed indices
I =zeros(indptr[-1], dtype = int)
for i in range(len(indptr)-1):
I[indptr[i]:indptr[i+1]] = i
return I
def perm_ind_ai_bj(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (a + i*self.Np, b + j*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ia_bj(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (i + a*self.Nh, b + j*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_bj_ai(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (b + j*self.Np, a + i*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ai_jb(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (a + i*self.Np, j + b*self.Nh)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ba_ij(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (b + a*self.Np, i + j*self.Nh)), shape=(self.Np**2, self.Nh**2)).tocsr()
def perm_ind_ab_ji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (a + b*self.Np, j + i*self.Nh)), shape=(self.Np**2, self.Nh**2)).tocsr()
def perm_ind_ba_ji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (b + a*self.Np, j + i*self.Nh)), shape=(self.Np**2, self.Nh**2)).tocsr()
def perm_ind_i_jab(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (i, j + a*self.Nh+ b*self.Nh*self.Np)), shape=(self.Nh, self.Nh*self.Np**2)).tocsr()
def perm_ind_a_bji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (a, b + j*self.Np+ i*self.Nh*self.Np)), shape=(self.Np, self.Np*self.Nh**2)).tocsr()
def perm_ind_b_aji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (b, a + j*self.Np+ i*self.Nh*self.Np)), shape=(self.Np, self.Np*self.Nh**2)).tocsr()
def perm_ind_ij_ab2ai_bj(self,M):
#Sparse permutations
#print M.shape
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i,j,a,b = rows%self.Nh, rows//self.Nh,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (a + i*self.Np, b + j*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ij_ba2iab_j(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i,j,b,a = rows%self.Nh, rows//self.Nh,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (i + a*self.Nh + b*self.Nh*self.Np, j)), shape=(self.Np*self.Nh*self.Np, self.Nh)).tocsr()
def perm_ind_ij_ba2bji_a(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i,j,b,a = rows%self.Nh, rows//self.Nh,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (b + j*self.Np + i*self.Np*self.Nh, a)), shape=(self.Np*self.Nh**2, self.Np)).tocsr()
#def perm_ind_ai_bj2ab_ij(self,M):
# #Sparse permutations
# cols, rows = M.indices, self.unpack_indptr(M.indptr)
# a,i,b,j = rows%self.Np, rows//self.Np,cols%self.Np, cols//self.Np
# return coo_matrix((M.data, (a + b*self.Np,i + j*self.Nh)), shape=(self.Np**2, self.Nh**2)).tocsr()
def perm_ind_ai_bj2a_bji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,i,b,j = rows%self.Np, rows//self.Np,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (a, b + j*self.Np + i*self.Np*self.Nh)), shape=(self.Np, self.Np*self.Nh**2)).tocsr()
def perm_ind_ib_aj2ai_bj(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i,b,a,j = rows%self.Nh, rows//self.Nh,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (a + i*self.Np, b + j*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ai_bj2ab_ij(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,i,b,j = rows%self.Np, rows//self.Np,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (a+ b*self.Np, i + j*self.Nh)), shape=(self.Np**2,self.Nh**2)).tocsr()
def perm_ind_a_bij2ab_ij(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a = rows
b = cols%self.Np
i = ((cols-b)/self.Np)%self.Nh
j = ((cols-b)/self.Np)//self.Nh
return coo_matrix((M.data, (a+ b*self.Np, i + j*self.Nh)), shape=(self.Np**2,self.Nh**2)).tocsr()
def perm_ind_i_jab2ab_ij(self, M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i = rows
j = cols%self.Nh
a = ((cols-j)/self.Nh)%self.Np
b = ((cols-j)/self.Nh)//self.Np
return coo_matrix((M.data, (a+ b*self.Np, i + j*self.Nh)), shape=(self.Np**2,self.Nh**2)).tocsr()
##############################################
##
## Contributions to the CCD amplitude
## As in S-B, the contributions is defined as linear L (t) and quadratic Q (tt)
## The CCD amplitude equation then reads
## Tpphh = (v + L1 + L2 + L3 + Q1 + Q2 + Q3 + Q4)/eps
##
##############################################
def sL1(self):
self.L1 = self.Vpppp.dot(self.Tpphh)
def sL2(self):
self.L2 = (self.Vhhhh.T.dot(self.Tpphh.T)).T
def sL3(self):
self.L3 = self.TL3()
def sQ1(self):
self.Q1 = ((self.Vhhpp.dot(self.Tpphh)).T.dot(self.Tpphh.T)).T
def sQ2(self):
self.Q2 = self.TQ2(self.Tpphh, self.Vhhpp)
def sQ3(self):
self.Q3 = self.TQ3(self.Tpphh, self.Vhhpp)
def sQ4(self):
self.Q4 = self.TQ4(self.Tpphh, self.Vhhpp)#[a+b*Np, i + j*Nh]
def TL3(self):
#The L3 Contribution
self.TL3_ = self.perm_ind_ai_bj(self.Tpphh)
L3_ = (self.VL3.T.dot(self.TL3_.T)).T
return self.perm_ind_ai_bj2ab_ij(L3_)
def TQ2(self,T,V):
#The Q2 contrubution
TQ21 = self.perm_ind_ai_bj(self.Tpphh)
TQ22 = self.perm_ind_bj_ai(self.Tpphh)
Q2_ = (self.VQ2.dot(TQ22).T.dot(TQ21.T)).T
return self.perm_ind_ai_bj2ab_ij(Q2_)
def TQ3(self,T,V):
#The Q3-contrubution
TQ31 = self.perm_ind_i_jab(self.Tpphh)
Q3_ = (self.VQ3.dot(TQ31).T.dot(TQ31.T)).T
return self.perm_ind_i_jab2ab_ij(Q3_)
def TQ4(self,T,V):
#The Q4 contribution
TQ41 = self.perm_ind_a_bji(self.Tpphh)
Q4_ = (self.VQ4.dot(TQ41).T.dot(TQ41.T)).T
return self.perm_ind_a_bij2ab_ij(Q4_)
class optimV():
def __init__(self, bs):
self.bs = bs
self.Np = bs.nstates-bs.nparticles
self.Nh = bs.nparticles
self.Ns = bs.nstates
self.Nm = self.bs.Nm #Max possible momentum
self.Nm2 = self.Nm**2
self.Nm3 = self.Nm**3
self.Vpppp = zeros((self.Np**2, self.Np**2))
self.Vhhhh = zeros((self.Nh**2, self.Nh**2))
self.Vhhpp = zeros((self.Nh**2, self.Np**2))
self.Vhpph = zeros((self.Nh*self.Np, self.Nh*self.Np))
#self.Vhhhh = zeros((self.Nh**2, self.Nh**2))
self.setup_pppp()
self.setup_hhhh()
self.setup_hhpp()
self.setup_hpph()
#setup hp
#seutp ph
def ident(self,v):
#A unique identifying integer for the momentum combinations
return v[0] + v[1]*self.Nm + v[2]*self.Nm2 + v[3]*self.Nm3
def setup_pppp(self):
t0 = clock()
Np = self.Np
combs_pp = 20000*ones((Np**2,Np**2), dtype = int) #arbitrary large number since identifier will include zeros
idents = zeros((Np**2))
for p in range(Np):
for q in range(Np):
v = self.bs.states[p+self.Nh][1:5]+self.bs.states[q+self.Nh][1:5]
iv = self.ident(v)
combs_pp[p + q*Np, :] = iv #this one should not be zero, as most elements in array is already zero, or?
idents[p+q*Np] = iv
spectrum = unique(idents)
combs_pp[combs_pp!=combs_pp.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
self.combs_pp = combs_pp
t1 = clock()
print "Time spent determining unique sortings:", t1-t0
self.setup_Vpppp()
def setup_Vpppp(self):
for P in range(self.Np**2):
for Q in range(P,self.Np**2):
if self.combs_pp[P,Q] != 20000:
a,b = P%self.Np, P//self.Np
c,d = Q%self.Np, Q//self.Np
#if self.ident(self.bs.states[a][1:5]+self.bs.states[b][1:5])==self.ident(self.bs.states[c][1:5]+self.bs.states[d][1:5]):
#if product(self.bs.states[a+Nh][1:5]+self.bs.states[b+Nh][1:5])==product(self.bs.states[c+Nh][1:5]+self.bs.states[d+Nh][1:5]):
val = self.bs.v(a+self.Nh,b+self.Nh,c+self.Nh,d+self.Nh)
self.Vpppp[P,Q] = val
self.Vpppp[Q,P] = val
def setup_hhhh(self):
Nh = self.Nh
combs_hh = 20000*ones((Nh**2,Nh**2), dtype = int) #arbitrary large number since identifier will include zeros
idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Nh):
v = self.bs.states[p][1:5]+self.bs.states[q][1:5]
iv = self.ident(v)
combs_hh[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
idents[p+q*Nh] = iv
spectrum = unique(idents)
combs_hh[combs_hh!=combs_hh.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
self.combs_hh = combs_hh
self.setup_Vhhhh()
def setup_Vhhhh(self):
for P in range(self.Nh**2):
for Q in range(P,self.Nh**2):
if self.combs_pp[P,Q] != 20000:
i,j = P%self.Nh, P//self.Nh
k,l = Q%self.Nh, Q//self.Nh
#if self.ident(self.bs.states[a][1:5]+self.bs.states[b][1:5])==self.ident(self.bs.states[c][1:5]+self.bs.states[d][1:5]):
#if product(self.bs.states[a+Nh][1:5]+self.bs.states[b+Nh][1:5])==product(self.bs.states[c+Nh][1:5]+self.bs.states[d+Nh][1:5]):
val = self.bs.v(i,j,k,l)
self.Vpppp[P,Q] = val
self.Vpppp[Q,P] = val
def setup_hhpp(self):
Nh = self.Nh
Np = self.Np
combs_hh = 20000*ones((Nh**2,Np**2), dtype = int) #arbitrary large number since identifier will include zeros
combs_pp = 20000*ones((Nh**2,Np**2), dtype = int) #arbitrary large number since identifier will include zeros
#idents = zeros((Nh*Np))
for p in range(Np):
for q in range(Np):
v = self.bs.states[p+Nh][1:5]+self.bs.states[Nh + q][1:5]
iv = self.ident(v)
combs_pp[:,p + q*Np] = iv #this one should not be zero, as most elements in array is already zero, or?
#idents[p+q*Np] = iv
for p in range(Nh):
for q in range(Nh):
v = self.bs.states[p][1:5]+self.bs.states[q][1:5]
iv = self.ident(v)
combs_hh[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
#idents[p+q*Np] = iv
#spectrum = unique(idents)
combs_hh[combs_pp!=combs_hh]=20000 #identify each pair of quantum numbers sharing the same added momentum
self.combs_hp = combs_hh
self.setup_Vhhpp()
def setup_Vhhpp(self):
for P in range(self.Nh**2):
for Q in range(self.Np**2):
if self.combs_hp[P,Q] != 20000:
#Run trough common setup routine here
i,j = P%self.Nh, P//self.Nh
a,b = Q%self.Np, Q//self.Np
val = self.bs.v(i,j,a+self.Nh,b+self.Nh)
self.Vhhpp[P,Q] = val
#self.Vpppp[Q,P] = val
self.Vpphh = self.Vhhpp.T
def setup_hpph(self):
Nh = self.Nh
Np = self.Np
combs_hp = 20000*ones((Nh*Np,Nh*Np), dtype = int) #arbitrary large number since identifier will include zeros
combs_ph = 20000*ones((Nh*Np,Nh*Np), dtype = int) #arbitrary large number since identifier will include zeros
idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Np):
v = self.bs.states[p][1:5]+self.bs.states[q+Nh][1:5]
iv = self.ident(v)
combs_hp[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
combs_ph[:,q + p*Np] = iv #this one should not be zero, as most elements in array is already zero, or?
#idents[p+q*Nh] = iv
#spectrum = unique(idents)
combs_hp[combs_hp!=combs_ph]=20000 #identify each pair of quantum numbers sharing the same added momentum
self.combs_hpph = combs_hp
self.setup_Vhpph()
def setup_Vhpph(self):
for P in range(self.Nh*self.Np):
for Q in range(self.Np*self.Nh):
if self.combs_hpph[P,Q] != 20000:
i,a = P%self.Nh, P//self.Nh
b,j = Q%self.Np, Q//self.Np
#if self.ident(self.bs.states[a][1:5]+self.bs.states[b][1:5])==self.ident(self.bs.states[c][1:5]+self.bs.states[d][1:5]):
#if product(self.bs.states[a+Nh][1:5]+self.bs.states[b+Nh][1:5])==product(self.bs.states[c+Nh][1:5]+self.bs.states[d+Nh][1:5]):
val = self.bs.v(i,a,b,j)
self.Vhpph[P,Q] = val
#self.Vhpph[Q,P] = val
class blocks():
def __init__(self, bs):
self.bs = bs
self.Np = bs.nstates-bs.nparticles
self.Nh = bs.nparticles
self.Ns = bs.nstates
self.Nm = self.bs.Nm #Max possible momentum
self.Vhhhh = zeros((self.Nh**2, self.Nh**2))
self.Vhhpp = zeros((self.Nh**2, self.Np**2))
self.Vphhp = zeros((self.Nh*self.Np, self.Nh*self.Np))
self.Vhpph = zeros((self.Nh*self.Np, self.Nh*self.Np))
self.Vpppp = zeros((self.Np**2, self.Np**2))
self.Vpphh = zeros((self.Np**2, self.Nh**2))
self.Tpphh = zeros((self.Np**2, self.Nh**2))
self.Epphh = zeros((self.Np**2, self.Nh**2))
#self.setup_matrices_optimized()
#self.Tpphh = random.uniform(0,1,(self.Np**2, self.Nh**2))
self.setup_pppp()
self.setup_hhhh()
self.setup_hhpp()
self.setup_hpph()
def ident(self,v):
#A unique identifying integer for the momentum combinations
return v[0] + v[1]*self.bs.Nm + v[2]*self.bs.Nm**2 + v[3]*self.bs.Nm**3
def setup_pppp(self):
Np = self.Np
combs_pp = 20000*ones((Np**2,Np**2), dtype = int) #arbitrary large number since identifier will include zeros
idents = zeros((Np**2))
for p in range(Np):
for q in range(Np):
v = self.bs.states[p+self.Nh][1:5]+self.bs.states[q+self.Nh][1:5]
iv = self.ident(v)
combs_pp[p + q*Np, :] = iv #this one should not be zero, as most elements in array is already zero, or?
idents[p+q*Np] = iv
combs_pp[combs_pp!=combs_pp.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
t = where(combs_pp!=2000)
a = self.bs.states[t[0]%Np + self.Nh].T
b = self.bs.states[t[0]//Np + self.Nh].T
c = self.bs.states[t[1]%Np + self.Nh].T
d = self.bs.states[t[1]//Np + self.Nh].T
data = self.bs.V(a,b,c,d)
#print data[data!=0]
self.Vpppp = coo_matrix((data, (t[0], t[1])), shape=(self.Np**2, self.Np**2)).tocsr()
def setup_hhhh(self):
Np = self.Np
Nh = self.Nh
combs_hh = 20000*ones((Nh**2,Nh**2), dtype = int) #arbitrary large number since identifier will include zeros
#idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Nh):
v = self.bs.states[p][1:5]+self.bs.states[q][1:5]
iv = self.ident(v)
combs_hh[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
#idents[p+q*Nh] = iv
combs_hh[combs_hh!=combs_hh.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
t = where(combs_hh!=2000)
a = self.bs.states[t[0]%Nh ].T
b = self.bs.states[t[0]//Nh].T
c = self.bs.states[t[1]%Nh ].T
d = self.bs.states[t[1]//Nh].T
data = self.bs.V(a,b,c,d)
#print data[data!=0]
self.Vhhhh = coo_matrix((data, (t[0], t[1])), shape=(self.Nh**2, self.Nh**2)).tocsr()
def setup_hpph(self):
Np = self.Np
Nh = self.Nh
combs_hp = 20000*ones((Nh*Np,Nh*Np), dtype = int) #arbitrary large number since identifier will include zeros
combs_ph = 20000*ones((Nh*Np,Nh*Np), dtype = int) #arbitrary large number since identifier will include zeros
#idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Np):
v = self.bs.states[p][1:5]+self.bs.states[q+Nh][1:5]
iv = self.ident(v)
combs_hp[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
combs_ph[:,q + p*Np ] = iv
#idents[p+q*Nh] = iv
combs_hp[combs_hp!=combs_ph.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
t = where(combs_hp!=2000)
i = self.bs.states[t[0]%Nh ].T
a = self.bs.states[t[0]//Nh + Nh].T
b = self.bs.states[t[1]%Np + Nh].T
j = self.bs.states[t[1]//Np].T
data = self.bs.V(i,a,b,j)
#print data[data!=0]
self.Vhpph = coo_matrix((data, (t[0], t[1])), shape=(self.Nh*Np, self.Nh*Np)).tocsr()
self.Vphhp = self.Vhpph.T
def setup_hhpp(self):
Np = self.Np
Nh = self.Nh
combs_hh = 20000*ones((Nh*Nh,Np*Np), dtype = int) #arbitrary large number since identifier will include zeros
combs_pp = 20000*ones((Nh*Nh,Np*Np), dtype = int) #arbitrary large number since identifier will include zeros
#idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Nh):
v = self.bs.states[p][1:5]+self.bs.states[q][1:5]
iv = self.ident(v)
combs_hh[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
#combs_ph[:,q + p*Np ] = iv
#idents[p+q*Nh] = iv
#idents = zeros((Nh**2))
for p in range(Np):
for q in range(Np):
v = self.bs.states[p+ Nh][1:5]+self.bs.states[q+Nh][1:5]
iv = self.ident(v)
combs_pp[:,p + q*Np] = iv #this one should not be zero, as most elements in array is already zero, or?
#combs_ph[:,q + p*Np ] = iv
#idents[p+q*Nh] = iv
combs_hh[combs_hh!=combs_pp]=20000 #identify each pair of quantum numbers sharing the same added momentum
t = where(combs_hh!=2000)
i = self.bs.states[t[0]%Nh ].T
j = self.bs.states[t[0]//Nh].T
a = self.bs.states[t[1]%Np + Nh].T
b = self.bs.states[t[1]//Np + Nh].T
data = self.bs.V(i,j,a,b)
#print data[data!=0]
self.Vhhpp = coo_matrix((data, (t[0], t[1])), shape=(self.Nh**2, self.Np**2)).tocsr()
self.Vpphh = self.Vhhpp.T
def compare(Z1,Z2):
Nx = len(Z1)
Ny = len(Z1[0])
EQ = True
NE = 0
toter = 0
er = 0
try:
for i in range(Nx):
for e in range(Ny):
if Z1[i,e]!=Z2[i,e]:
#print Z1[i,e],Z2[i,e]
er = abs(Z1[i,e]-Z2[i,e])
if er>toter:
toter = er
NE = 1
except:
print "NOT EQUAL, total failure"
NE = 1
return NE, toter
def Vpppp_check(Z1, bs):
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles
for a in range(Np):
for b in range(Np):
for c in range(Np):
for d in range(Np):
if Z1[a + b*Np, c+ d*Np] != bs.v(a+Nh,b+Nh,c+Nh,d+Nh):
print a,b,c,d, Z1[a + b*Np, c+ d*Np], bs.v(a+Nh,b+Nh,c+Nh,d+Nh)
def Vhpph_check(Z1, bs):
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles
for i in range(Nh):
for a in range(Np):
for b in range(Np):
for j in range(Nh):
if Z1[i + a*Nh, b+ j*Np] != bs.v(i,a+Nh,b+Nh,j):
print i,a,b,j, Z1[i + a*Nh, b+ j*Np], bs.v(i,a+Nh,b+Nh,j)
def Vphhp_check(Z1, bs):
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles
for i in range(Nh):
for a in range(Np):
for b in range(Np):
for j in range(Nh):
if Z1[a + i*Np, j+ b*Nh] != bs.v(a+Nh,i,j,b+Nh):
print i,a,b,j, Z1[a + i*Np, j+ b*Nh], bs.v(a+Nh,i,j,b+Nh)
def Vhhpp_check(Z1, bs):
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles
for i in range(Nh):
for a in range(Np):
for b in range(Np):
for j in range(Nh):
if Z1[i + j*Nh, a+ b*Np] != bs.v(i,j,a+Nh,b+Nh):
print i,j,a,b, Z1[i + j*Nh, a+ b*Np], bs.v(i,j,a+Nh,b+Nh)
t0 = clock()
tb = electronbasis(2,1.0,14)
t1 = clock()
print "Time spent on initializing basis:", t1-t0
print "====="
print "Number of states :", tb.nstates
print "Number of particles:", tb.nparticles
print "====="
t0 = clock()
Q = CCD(tb)
t1 = clock()
print "Time spent on initializing solver:", t1-t0
#B = optimV(tb)
t0 = clock()
"""
B = blocks(tb)
t2 = clock()
B.setup_hhpp()
print "Time spent initializing vectorized interaction:", t2-t0
Vhhpp_check(B.Vhhpp.toarray(), tb)
"""
#Q.Vpppp = B.Vpppp
for i in range(20):
Q.advance()
"""
print "pppp:", compare(Q.Vpppp.toarray(), B.Vpppp.toarray())
print compare(Q.Vhhpp.toarray(), B.Vhhpp)
print compare(Q.Vpphh.toarray(), B.Vhhpp.T)
print compare(Q.Vhpph.toarray(), B.Vhpph)
#Q.Vpppp = csr_matrix(B.Vpppp)
figure(1)
imshow(B.Vpppp.toarray())
show()
figure(2)
imshow(Q.Vpppp.toarray())
show()
"""
|
cc0-1.0
| -4,270,726,599,347,849,700
| 39.542596
| 147
| 0.487121
| false
| 2.858868
| false
| false
| false
|
foauth/oauth-proxy
|
setuser.py
|
1
|
1376
|
import getpass
import os
import sys
from werkzeug.datastructures import MultiDict
import models
import forms
# Make sure the database gets installed properly
models.db.create_all()
values = MultiDict()
form = forms.SetUser(values)
values['email'] = sys.argv[1] if len(sys.argv) > 1 else raw_input('%s: ' % form.email.label.text)
form = forms.SetUser(values)
form.validate()
if form.email.errors:
sys.exit('\n'.join(' ! %s' % e for e in form.email.errors))
if models.User.query.filter_by(email=form.email.data).count():
print '%s already exists, setting the password' % form.email.data
values['password'] = getpass.getpass('%s: ' % form.password.label.text)
form = forms.SetUser(values)
form.validate()
if form.password.errors:
sys.exit('\n'.join(' ! %s' % e for e in form.password.errors))
values['retype'] = getpass.getpass('%s: ' % form.retype.label.text)
form = forms.SetUser(values)
form.validate()
if form.retype.errors:
sys.exit('\n'.join(' ! %s' % e for e in form.retype.errors))
user = models.User.query.filter_by(email=form.email.data).first()
if user:
user.set_password(form.password.data)
msg = 'Updated password for %s' % user.email
else:
user = models.User(email=form.email.data, password=form.password.data)
msg = 'Created account for %s' % user.email
models.db.session.add(user)
models.db.session.commit()
print msg
|
bsd-3-clause
| -4,924,099,926,810,548,000
| 26.52
| 97
| 0.704942
| false
| 3.0783
| false
| false
| false
|
cloudbase/coriolis
|
coriolis/osmorphing/osdetect/oracle.py
|
1
|
1061
|
# Copyright 2020 Cloudbase Solutions Srl
# All Rights Reserved.
import re
from coriolis import constants
from coriolis.osmorphing.osdetect import base
ORACLE_DISTRO_IDENTIFIER = "Oracle Linux"
class OracleOSDetectTools(base.BaseLinuxOSDetectTools):
def detect_os(self):
info = {}
oracle_release_path = "etc/oracle-release"
if self._test_path(oracle_release_path):
release_info = self._read_file(
oracle_release_path).decode().splitlines()
if release_info:
m = re.match(r"^(.*) release ([0-9].*)$",
release_info[0].strip())
if m:
distro, version = m.groups()
info = {
"os_type": constants.OS_TYPE_LINUX,
"distribution_name": ORACLE_DISTRO_IDENTIFIER,
"release_version": version,
"friendly_release_name": "%s Version %s" % (
distro, version)}
return info
|
agpl-3.0
| -3,441,065,869,275,441,700
| 32.15625
| 70
| 0.526861
| false
| 4.278226
| false
| false
| false
|
gensmusic/test
|
l/python/test/json-to-object/json-to-object.py
|
1
|
1287
|
#!/usr/bin/python
#coding:utf-8
j = '{"action": "print", "method": "onData", "data": {"key1" : 1, "key2":"value2"} }'
import json
class Payload(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
p = Payload(j)
print '-' * 30
print dir(p)
print '-' * 30
print p.action
print p.method
print p.data
print type(p.data)
data = dict(p.data)
print 'new data:', data
j2 = '{ "identity" : "dafei", "alert" : "you have message", "badge":1, "payload": { "k1":"v1", "k2" : "v2"} }'
p2 = Payload(j2)
print dir(p2)
print type(p2.payload)
print p2.payload
print '-' *50
class ParseBase(object):
"""docstring for ParseBase"""
def __init__(self):
super(ParseBase, self).__init__()
self.http_status_code = 0
def parse(self, j):
dict_data = json.loads(j)
for key in self.__dict__:
print 'key:{}'.format(key)
if not key.startswith('http'):
value = dict_data.get(key)
self.__dict__[key] = value
class Http(ParseBase):
"""docstring for Http"""
def __init__(self):
super(Http, self).__init__()
self.name = None
self.id = None
h = Http()
h.parse('{ "name" : "大飞", "id":1 }')
print dir(h)
print h.http_status_code
print h.name
print h.id
|
gpl-2.0
| -2,800,019,684,156,221,000
| 21.508772
| 111
| 0.554949
| false
| 2.929224
| false
| false
| false
|
intfrr/SoCo
|
soco/data_structures.py
|
1
|
38123
|
# -*- coding: utf-8 -*-
# pylint: disable=star-args, too-many-arguments, fixme
""" This module contains classes for handling DIDL-Lite metadata.
This is the XML schema used by Sonos for carrying metadata representing many
items such as tracks, playlists, composers, albums etc.
"""
# It tries to follow the class hierarchy provided by the DIDL-Lite schema
# described in the UPnP Spec, especially that for the ContentDirectory Service
# Although Sonos uses ContentDirectory v1, the document for v2 is more helpful:
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v2-Service.pdf
from __future__ import unicode_literals
import sys
import warnings
warnings.simplefilter('always', DeprecationWarning)
import textwrap
from .xml import XML, ns_tag
from .exceptions import DIDLMetadataError
from .utils import really_unicode
###############################################################################
# MISC HELPER FUNCTIONS #
###############################################################################
def to_didl_string(*args):
""" Convert any number of DIDLObjects to a unicode xml string.
Args:
*args (DidlObject): One or more DidlObject (or subclass) instances
Returns:
str: A unicode string of the form <DIDL-Lite ...>...</DIDL-Lite>
representing the instances
"""
didl = XML.Element(
'DIDL-Lite',
{
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
for arg in args:
didl.append(arg.to_element())
if sys.version_info[0] == 2:
return XML.tostring(didl)
else:
return XML.tostring(didl, encoding='unicode')
def from_didl_string(string):
""" Convert a unicode xml string to a list of DIDLObjects.
Arg:
string (str): A unicode string containing an xml representation of one
or more DIDL-Lite items (in the form <DIDL-Lite ...>
...</DIDL-Lite> )
Returns:
list: A list of one or more instances of DIDLObject or a subclass
"""
items = []
root = XML.fromstring(string.encode('utf-8'))
for elt in root:
if elt.tag.endswith('item') or elt.tag.endswith('container'):
item_class = elt.findtext(ns_tag('upnp', 'class'))
try:
cls = _DIDL_CLASS_TO_CLASS[item_class]
except KeyError:
raise DIDLMetadataError("Unknown UPnP class: %s" % item_class)
items.append(cls.from_element(elt))
else:
# <desc> elements are allowed as an immediate child of <DIDL-Lite>
# according to the spec, but I have not seen one there in Sonos, so
# we treat them as illegal. May need to fix this if this
# causes problems.
raise DIDLMetadataError("Illegal child of DIDL element: <%s>"
% elt.tag)
return items
###############################################################################
# DIDL RESOURCE #
###############################################################################
class DidlResource(object):
""" Identifies a resource, typically some type of a binary asset, such as
a song.
A 'res' element contains a uri that identifies the resource.
"""
# Adapted from a class taken from the Python Brisa project - MIT licence.
# pylint: disable=too-many-instance-attributes
def __init__(self, uri, protocol_info, import_uri=None, size=None,
duration=None, bitrate=None, sample_frequency=None,
bits_per_sample=None, nr_audio_channels=None, resolution=None,
color_depth=None, protection=None):
""" Constructor for the Resource class.
Args:
uri (str): value of the res tag, typically a URI. It MUST be
properly escaped URIs as described in RFC 239
protocol_info (str): A string in the form a:b:c:d that
identifies the streaming or transport protocol for
transmitting the resource. A value is required. For more
information see section 2.5.2 at
http://upnp.org/specs/av/UPnP-av-ConnectionManager-v1-Service.pdf
import_uri (str, optional): uri locator for resource update
size (int, optional): size in bytes
duration (str, optional): duration of the playback of the res
at normal speed (H*:MM:SS:F* or H*:MM:SS:F0/F1)
bitrate (int, optional): bitrate in bytes/second
sample_frequency (int, optional): sample frequency in Hz
bits_per_sample (int, optional): bits per sample
nr_audio_channels (int, optional): number of audio channels
resolution (str, optional): resolution of the resource (X*Y)
color_depth (int, optional): color depth in bits
protection (str, optional): statement of protection type
"""
# Of these attributes, only uri, protocol_info and duration have been
# spotted 'in the wild'
self.uri = uri
# Protocol info is in the form a:b:c:d - see
# sec 2.5.2 at
# http://upnp.org/specs/av/UPnP-av-ConnectionManager-v1-Service.pdf
self.protocol_info = protocol_info
self.import_uri = import_uri
self.size = size
self.duration = duration
self.bitrate = bitrate
self.sample_frequency = sample_frequency
self.bits_per_sample = bits_per_sample
self.nr_audio_channels = nr_audio_channels
self.resolution = resolution
self.color_depth = color_depth
self.protection = protection
@classmethod
def from_element(cls, element):
""" Set the resource properties from a <res> element.
Arg:
element (Element): An ElementTree Element
"""
def _int_helper(name):
"""Try to convert the name attribute to an int, or None."""
result = element.get(name)
if result is not None:
try:
return int(result)
except ValueError:
raise ValueError(
'Could not convert {0} to an integer'.format(name))
else:
return None
content = {}
# required
content['protocol_info'] = element.get('protocolInfo')
if content['protocol_info'] is None:
raise Exception('Could not create Resource from Element: '
'protocolInfo not found (required).')
# Optional
content['import_uri'] = element.get('importUri')
content['size'] = _int_helper('size')
content['duration'] = element.get('duration')
content['bitrate'] = _int_helper('bitrate')
content['sample_frequency'] = _int_helper('sampleFrequency')
content['bits_per_sample'] = _int_helper('bitsPerSample')
content['nr_audio_channels'] = _int_helper('nrAudioChannels')
content['resolution'] = element.get('resolution')
content['color_depth'] = _int_helper('colorDepth')
content['protection'] = element.get('protection')
content['uri'] = element.text
return cls(**content)
def __repr__(self):
return '<{0} \'{1}\' at {2}>'.format(self.__class__.__name__,
self.uri,
hex(id(self)))
def __str__(self):
return self.__repr__()
def to_element(self):
""" Return an ElementTree Element based on this resource."""
if not self.protocol_info:
raise Exception('Could not create Element for this resource: '
'protocolInfo not set (required).')
root = XML.Element('res')
# Required
root.attrib['protocolInfo'] = self.protocol_info
# Optional
if self.import_uri is not None:
root.attrib['importUri'] = self.import_uri
if self.size is not None:
root.attrib['size'] = str(self.size)
if self.duration is not None:
root.attrib['duration'] = self.duration
if self.bitrate is not None:
root.attrib['bitrate'] = str(self.bitrate)
if self.sample_frequency is not None:
root.attrib['sampleFrequency'] = str(self.sample_frequency)
if self.bits_per_sample is not None:
root.attrib['bitsPerSample'] = str(self.bits_per_sample)
if self.nr_audio_channels is not None:
root.attrib['nrAudioChannels'] = str(self.nr_audio_channels)
if self.resolution is not None:
root.attrib['resolution'] = self.resolution
if self.color_depth is not None:
root.attrib['colorDepth'] = str(self.color_depth)
if self.protection is not None:
root.attrib['protection'] = self.protection
root.text = self.uri
return root
def to_dict(self, remove_nones=False):
"""Return a dictionary representation of the DidlResource
Args:
remove_nones (bool): Optionally remove dictionary elements when
their value is None.
"""
content = {
'uri': self.uri,
'protocol_info': self.protocol_info,
'import_uri': self.import_uri,
'size': self.size,
'duration': self.duration,
'bitrate': self.bitrate,
'sample_frequency': self.sample_frequency,
'bits_per_sample': self.bits_per_sample,
'nr_audio_channels': self.nr_audio_channels,
'resolution': self.resolution,
'color_depth': self.color_depth,
'protection': self.protection,
}
if remove_nones:
# delete any elements that have a value of None to optimize size
# of the returned structure
nones = [k for k in content if content[k] is None]
for k in nones:
del content[k]
return content
@classmethod
def from_dict(cls, content):
"""Create an instance from a dict.
An alternative constructor. Equivalent to DidlResource(**content).
Arg:
content (dict): Dict containing metadata information. Required and
valid arguments are the same as for the ``__init__`` method.
"""
return cls(**content)
def __eq__(self, resource):
"""Compare with another ``resource``.
Returns:
(bool): True if items are equal, else False
"""
if not isinstance(resource, DidlResource):
return False
return self.to_dict() == resource.to_dict()
###############################################################################
# BASE OBJECTS #
###############################################################################
# a mapping which will be used to look up the relevant class from the
# DIDL item class
_DIDL_CLASS_TO_CLASS = {}
class DidlMetaClass(type):
"""Meta class for all Didl objects."""
def __new__(mcs, name, bases, attrs):
"""Create a new instance.
Args:
name: Name of the class
bases: Base classes (tuple)
attrs: Attributes defined for the class
"""
new_cls = super(DidlMetaClass, mcs).__new__(mcs, name, bases, attrs)
# Register all subclasses with the global _DIDL_CLASS_TO_CLASS mapping
item_class = attrs.get('item_class', None)
if item_class is not None:
_DIDL_CLASS_TO_CLASS[item_class] = new_cls
return new_cls
# Py2/3 compatible way of declaring the metaclass
class DidlObject(DidlMetaClass(str('DidlMetaClass'), (object,), {})):
"""Abstract base class for all DIDL-Lite items.
You should not need to instantiate this.
Attributes:
item_class (str): The DIDL Lite class for this object
tag (str): The XML element tag name used for this instance
_translation (dict): A dict used to translate between instance
attribute names and XML tags/namespaces. It also serves to define
the allowed tags/attributes for this instance. Overridden and
extended by subclasses.
"""
item_class = 'object'
tag = 'item'
# key: attribute_name: (ns, tag)
_translation = {
'creator': ('dc', 'creator'),
'write_status': ('upnp', 'writeStatus'),
}
def __init__(self, title, parent_id, item_id, restricted=True,
resources=None, desc='RINCON_AssociatedZPUDN', **kwargs):
r"""Construct and initialize a DidlObject.
Args:
title (str): The title for the item
parent_id (str): The parent ID for the item
item_id (str): The ID for the item
restricted (bool): Whether the item can be modified
resources (list): A list of resources for this object
desc (str): A didl descriptor, default RINCON_AssociatedZPUDN. This
is not the same as "description"! It is used for identifying
the relevant music service
**kwargs: Extra metadata. What is allowed depends on the
_translation class attribute, which in turn depends on the DIDL
class
"""
# All didl objects *must* have a title, a parent_id and an item_id
# so we specify these as required args in the constructor signature
# to ensure that we get them. Other didl object properties are
# optional, so can be passed as kwargs.
# The content of _translation is adapted from the list in table C at
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v2-Service.pdf
# Not all properties referred to there are catered for, since Sonos
# does not use some of them.
# pylint: disable=super-on-old-class
super(DidlObject, self).__init__()
self.title = title
self.parent_id = parent_id
self.item_id = item_id
# Restricted is a compulsory attribute, but is almost always True for
# Sonos. (Only seen it 'false' when browsing favorites)
self.restricted = restricted
# Resources is multi-valued, and dealt with separately
self.resources = [] if resources is None else resources
# According to the spec, there may be one or more desc values. Sonos
# only seems to use one, so we won't bother with a list
self.desc = desc
for key, value in kwargs.items():
# For each attribute, check to see if this class allows it
if key not in self._translation:
raise ValueError(
'The key \'{0}\' is not allowed as an argument. Only '
'these keys are allowed: parent_id, item_id, title, '
'restricted, resources, desc'
' {1}'.format(key, ', '.join(self._translation.keys())))
# It is an allowed attribute. Set it as an attribute on self, so
# that it can be accessed as Classname.attribute in the normal
# way.
setattr(self, key, value)
@classmethod
def from_element(cls, element):
"""Create an instance of this class from an ElementTree xml Element.
An alternative constructor. The element must be a DIDL-Lite <item> or
<container> element, and must be properly namespaced.
Arg:
xml (Element): An :py:class:`xml.etree.ElementTree.Element` object.
"""
# Check we have the right sort of element. tag can be an empty string
# which indicates that any tag is allowed (see eg the musicAlbum DIDL
# class)
if not element.tag.endswith(cls.tag):
raise DIDLMetadataError(
"Wrong element. Expected '<{0}>',"
" got '<{1}>'".format(cls.tag, element.tag))
# and that the upnp matches what we are expecting
item_class = element.find(ns_tag('upnp', 'class')).text
if item_class != cls.item_class:
raise DIDLMetadataError(
"UPnP class is incorrect. Expected '{0}',"
" got '{1}'".format(cls.item_class, item_class))
# parent_id, item_id and restricted are stored as attibutes on the
# element
item_id = really_unicode(element.get('id', None))
if item_id is None:
raise DIDLMetadataError("Missing id attribute")
parent_id = really_unicode(element.get('parentID', None))
if parent_id is None:
raise DIDLMetadataError("Missing parentID attribute")
restricted = element.get('restricted', None)
if restricted is None:
raise DIDLMetadataError("Missing restricted attribute")
restricted = True if restricted in [1, 'true', 'True'] else False
# There must be a title. According to spec, it should be the first
# child, but Sonos does not abide by this
title_elt = element.find(ns_tag('dc', 'title'))
if title_elt is None:
raise DIDLMetadataError(
"Missing title element")
title = really_unicode(title_elt.text)
# Deal with any resource elements
resources = []
for res_elt in element.findall(ns_tag('', 'res')):
resources.append(
DidlResource.from_element(res_elt))
# and the desc element (There is only one in Sonos)
desc = element.findtext(ns_tag('', 'desc'))
# Get values of the elements listed in _translation and add them to
# the content dict
content = {}
for key, value in cls._translation.items():
result = element.findtext(ns_tag(*value))
if result is not None:
# We store info as unicode internally.
content[key] = really_unicode(result)
# Convert type for original track number
if content.get('original_track_number') is not None:
content['original_track_number'] = \
int(content['original_track_number'])
# Now pass the content dict we have just built to the main
# constructor, as kwargs, to create the object
return cls(title=title, parent_id=parent_id, item_id=item_id,
restricted=restricted, resources=resources, desc=desc,
**content)
@classmethod
def from_dict(cls, content):
"""Create an instance from a dict.
An alternative constructor. Equivalent to DidlObject(**content).
Arg:
content (dict): Dict containing metadata information.Required and
valid arguments are the same as for the ``__init__`` method.
"""
# Do we really need this constructor? Could use DidlObject(**content)
# instead.
return cls(**content)
def __eq__(self, playable_item):
"""Compare with another ``playable_item``.
Returns:
(bool): True if items are equal, else False
"""
if not isinstance(playable_item, DidlObject):
return False
return self.to_dict() == playable_item.to_dict()
def __ne__(self, playable_item):
"""Compare with another ``playable_item``.
Returns:
(bool): True if items are unequal, else False
"""
if not isinstance(playable_item, DidlObject):
return True
return self.to_dict() != playable_item.to_dict()
def __repr__(self):
"""Return the repr value for the item.
The repr is of the form::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set,
or ``str(content)``. The output is also cleared of non-ascii
characters.
"""
# 40 originates from terminal width (78) - (15) for address part and
# (19) for the longest class name and a little left for buffer
if self.title is not None:
middle = self.title.encode('ascii', 'replace')[0:40]
else:
middle = str(self.to_dict).encode('ascii', 'replace')[0:40]
return '<{0} \'{1}\' at {2}>'.format(self.__class__.__name__,
middle,
hex(id(self)))
def __str__(self):
"""Return the str value for the item::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set, or
``str(content)``. The output is also cleared of non-ascii characters.
"""
return self.__repr__()
def to_dict(self):
"""Return the dict representation of the instance."""
content = {}
# Get the value of each attribute listed in _translation, and add it
# to the content dict
for key in self._translation:
if hasattr(self, key):
content[key] = getattr(self, key)
# also add parent_id, item_id, restricted, title and resources because
# they are not listed in _translation
content['parent_id'] = self.parent_id
content['item_id'] = self.item_id
content['restricted'] = self.restricted
content['title'] = self.title
if self.resources != []:
content['resources'] = self.resources
content['desc'] = self.desc
return content
def to_element(self, include_namespaces=False):
"""Return an ElementTree Element representing this instance.
Arg:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
An ElementTree Element
.. code :: xml
<DIDL-Lite ..NS_INFO..>
<item id="...self.item_id..."
parentID="...cls.parent_id..." restricted="true">
<dc:title>...self.title...</dc:title>
<upnp:class>...self.item_class...</upnp:class>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
RINCON_AssociatedZPUDN
</desc>
</item>
</DIDL-Lite>
"""
elt_attrib = {}
if include_namespaces:
elt_attrib.update({
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
elt_attrib.update({
'parentID': self.parent_id,
'restricted': 'true' if self.restricted else 'false',
'id': self.item_id
})
elt = XML.Element(self.tag, elt_attrib)
# Add the title, which should always come first, according to the spec
XML.SubElement(elt, 'dc:title').text = self.title
# Add in any resources
for resource in self.resources:
elt.append(resource.to_element())
# Add the rest of the metadata attributes (i.e all those listed in
# _translation) as sub-elements of the item element.
for key, value in self._translation.items():
if hasattr(self, key):
# Some attributes have a namespace of '', which means they
# are in the default namespace. We need to handle those
# carefully
tag = "%s:%s" % value if value[0] else "%s" % value[1]
XML.SubElement(elt, tag).text = ("%s" % getattr(self, key))
# Now add in the item class
XML.SubElement(elt, 'upnp:class').text = self.item_class
# And the desc element
desc_attrib = {'id': 'cdudn', 'nameSpace':
'urn:schemas-rinconnetworks-com:metadata-1-0/'}
desc_elt = XML.SubElement(elt, 'desc', desc_attrib)
desc_elt.text = self.desc
return elt
###############################################################################
# OBJECT.ITEM HIERARCHY #
###############################################################################
class DidlItem(DidlObject):
"""A basic content directory item."""
# The spec allows for an option 'refID' attribute, but we do not handle it
item_class = 'object.item'
# _translation = DidlObject._translation.update({ ...})
# does not work, but doing it in two steps does
_translation = DidlObject._translation.copy()
_translation.update(
{
'stream_content': ('r', 'streamContent'),
'radio_show': ('r', 'radioShowMd'),
'album_art_uri': ('upnp', 'albumArtURI'),
}
)
class DidlAudioItem(DidlItem):
"""An audio item."""
item_class = 'object.item.audioItem'
_translation = DidlItem._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'description': ('dc', 'description'),
'long_description': ('upnp', 'longDescription'),
'publisher': ('dc', 'publisher'),
'language': ('dc', 'language'),
'relation': ('dc', 'relation'),
'rights': ('dc', 'rights'),
}
)
# Browsing Sonos Favorites produces some odd looking DIDL-Lite. The object
# class is 'object.itemobject.item.sonos-favorite', which is probably a typo
# in Sonos' code somewhere.
# Here is an example:
# <?xml version="1.0" ?>
# <DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
# xmlns:dc="http://purl.org/dc/elements/1.1/"
# xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
# xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/">
# <item id="FV:2/13" parentID="FV:2" restricted="false">
# <dc:title>Shake It Off</dc:title>
# <upnp:class>object.itemobject.item.sonos-favorite</upnp:class>
# <r:ordinal>4</r:ordinal>
# <res protocolInfo="sonos.com-spotify:*:audio/x-spotify:*">
# x-sonos-spotify:spotify%3atrack%3a7n.......?sid=9&flags=32</res>
# <upnp:albumArtURI>http://o.scd.....</upnp:albumArtURI>
# <r:type>instantPlay</r:type>
# <r:description>By Taylor Swift</r:description>
# <r:resMD><DIDL-Lite xmlns:dc="
# http://purl.org/dc/elements/1.1/"
# xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
# xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
# xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">
# <item id="00030020spotify%3atrack%3a7n9Q6b...74uCtajkddPt"
# parentID="0006006ctoplist%2ftracks%2fregion%2fGB"
# restricted="true"><dc:title>Shake It Off
# </dc:title><upnp:class>object.item.audioItem.musicTrack
# </upnp:class><desc id="cdudn"
# nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
# SA_RINCON2311_XXXXX</desc>
# </item>
# </DIDL-Lite>
# </r:resMD>
# </item>
# </DIDL-Lite>
# Note the r:ordinal, r:type; r:description, r:resMD elements which are not
# seen (?) anywhere else
# We're ignoring this for the moment!
class DidlMusicTrack(DidlAudioItem):
"""Class that represents a music library track. """
item_class = 'object.item.audioItem.musicTrack'
# name: (ns, tag)
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'album': ('upnp', 'album'),
'original_track_number': ('upnp', 'originalTrackNumber'),
'playlist': ('upnp', 'playlist'),
'contributor': ('dc', 'contributor'),
'date': ('dc', 'date'),
}
)
class DidlAudioBroadcast(DidlAudioItem):
"""Class that represents an audio broadcast."""
item_class = 'object.item.audioItem.audioBroadcast'
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'region': ('upnp', 'region'),
'radio_call_sign': ('upnp', 'radioCallSign'),
'radio_station_id': ('upnp', 'radioStationID'),
'channel_nr': ('upnp', 'channelNr'),
}
)
class DidlAudioBroadcastFavorite(DidlAudioBroadcast):
"""Class that represents an audio broadcast sonos favorite."""
# Note: The sonos-favorite part of the class spec obviously isn't part of
# the DIDL spec, so just assume that it has the same definition as the
# regular object.item.audioItem.audioBroadcast
item_class = 'object.item.audioItem.audioBroadcast.sonos-favorite'
###############################################################################
# OBJECT.CONTAINER HIERARCHY #
###############################################################################
class DidlContainer(DidlObject):
"""Class that represents a music library container. """
item_class = 'object.container'
tag = 'container'
# We do not implement createClass or searchClass. Not used by Sonos??
# TODO: handle the 'childCount' element.
class DidlAlbum(DidlContainer):
"""A content directory album."""
item_class = 'object.container.album'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'description': ('dc', 'description'),
'long_description': ('upnp', 'longDescription'),
'publisher': ('dc', 'publisher'),
'contributor': ('dc', 'contributor'),
'date': ('dc', 'date'),
'relation': ('dc', 'relation'),
'rights': ('dc', 'rights'),
}
)
class DidlMusicAlbum(DidlAlbum):
"""Class that represents a music library album. """
item_class = 'object.container.album.musicAlbum'
# According to the spec, all musicAlbums should be represented in
# XML by a <container> tag. Sonos sometimes uses <container> and
# sometimes uses <item>. Set the tag type to '' to indicate that
# either is allowed.
tag = ''
# name: (ns, tag)
# pylint: disable=protected-access
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'genre': ('upnp', 'genre'),
'producer': ('upnp', 'producer'),
'toc': ('upnp', 'toc'),
'album_art_uri': ('upnp', 'albumArtURI'),
}
)
class DidlMusicAlbumFavorite(DidlAlbum):
"""Class that represents a Sonos favorite music library album.
This class is not part of the DIDL spec and is Sonos specific.
"""
item_class = 'object.container.album.musicAlbum.sonos-favorite'
# Despite the fact that the item derives from object.container, it's
# XML does not include a <container> tag, but an <item> tag. This seems
# to be an error by Sonos.
tag = 'item'
class DidlMusicAlbumCompilation(DidlAlbum):
"""Class that represents a Sonos favorite music library compilation.
This class is not part of the DIDL spec and is Sonos specific.
"""
# These classes appear when browsing the library and Sonos has been set
# to group albums using compilations.
# See https://github.com/SoCo/SoCo/issues/280
item_class = 'object.container.album.musicAlbum.compilation'
tag = 'container'
class DidlPerson(DidlContainer):
"""A content directory class representing a person."""
item_class = 'object.container.person'
_translation = DidlContainer._translation.copy()
_translation.update(
{
'language': ('dc', 'language'),
}
)
class DidlComposer(DidlPerson):
"""Class that represents a music library composer."""
# Not in the DIDL-Lite spec. Sonos specific??
item_class = 'object.container.person.composer'
class DidlMusicArtist(DidlPerson):
"""Class that represents a music library artist."""
item_class = 'object.container.person.musicArtist'
# name: (ns, tag)
_translation = DidlPerson._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'artist_discography_uri': ('upnp', 'artistDiscographyURI'),
}
)
class DidlAlbumList(DidlContainer):
"""Class that represents a music library album list."""
# This does not appear (that I can find) in the DIDL-Lite specs.
# Presumably Sonos specific
item_class = 'object.container.albumlist'
class DidlPlaylistContainer(DidlContainer):
"""Class that represents a music library play list."""
item_class = 'object.container.playlistContainer'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'genre': ('upnp', 'genre'),
'long_description': ('upnp', 'longDescription'),
'producer': ('dc', 'producer'),
'contributor': ('dc', 'contributor'),
'description': ('dc', 'description'),
'date': ('dc', 'date'),
'language': ('dc', 'language'),
'rights': ('dc', 'rights'),
}
)
class DidlSameArtist(DidlPlaylistContainer):
"""Class that represents all tracks by a single artist.
This type is returned by browsing an artist or a composer
"""
# Not in the DIDL-Lite spec. Sonos specific?
item_class = 'object.container.playlistContainer.sameArtist'
class DidlGenre(DidlContainer):
"""A content directory class representing a general genre."""
item_class = 'object.container.genre'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'long_description': ('upnp', 'longDescription'),
'description': ('dc', 'description'),
}
)
class DidlMusicGenre(DidlGenre):
"""Class that represents a music genre."""
item_class = 'object.container.genre.musicGenre'
###############################################################################
# SPECIAL LISTS #
###############################################################################
class ListOfMusicInfoItems(list):
"""Abstract container class for a list of music information items."""
def __init__(self, items, number_returned, total_matches, update_id):
super(ListOfMusicInfoItems, self).__init__(items)
self._metadata = {
'item_list': list(items),
'number_returned': number_returned,
'total_matches': total_matches,
'update_id': update_id,
}
def __getitem__(self, key):
"""Legacy get metadata by string key or list item(s) by index.
DEPRECATION: This overriding form of __getitem__ will be removed in
the 3rd release after 0.8. The metadata can be fetched via the named
attributes
"""
if key in self._metadata:
if key == 'item_list':
message = """
Calling [\'item_list\'] on search results to obtain the objects
is no longer necessary, since the object returned from searches
now is a list. This deprecated way of getting the items will
be removed from the third release after 0.8."""
else:
message = """
Getting metadata items by indexing the search result like a
dictionary [\'{0}\'] is deprecated. Please use the named
attribute {1}.{0} instead. The deprecated way of retrieving the
metadata will be removed from the third release after
0.8""".format(key, self.__class__.__name__)
message = textwrap.dedent(message).replace('\n', ' ').lstrip()
warnings.warn(message, DeprecationWarning, stacklevel=2)
return self._metadata[key]
else:
return super(ListOfMusicInfoItems, self).__getitem__(key)
@property
def number_returned(self):
"""The number of returned matches."""
return self._metadata['number_returned']
@property
def total_matches(self):
"""The number of total matches."""
return self._metadata['total_matches']
@property
def update_id(self):
"""The update ID."""
return self._metadata['update_id']
class SearchResult(ListOfMusicInfoItems):
"""Container class that represents a search or browse result.
(browse is just a special case of search)
"""
def __init__(self, items, search_type, number_returned,
total_matches, update_id):
super(SearchResult, self).__init__(
items, number_returned, total_matches, update_id
)
self._metadata['search_type'] = search_type
def __repr__(self):
return '{0}(items={1}, search_type=\'{2}\')'.format(
self.__class__.__name__,
super(SearchResult, self).__repr__(),
self.search_type)
@property
def search_type(self):
"""The search type."""
return self._metadata['search_type']
class Queue(ListOfMusicInfoItems):
"""Container class that represents a queue."""
def __init__(self, items, number_returned, total_matches, update_id):
super(Queue, self).__init__(
items, number_returned, total_matches, update_id
)
def __repr__(self):
return '{0}(items={1})'.format(
self.__class__.__name__,
super(Queue, self).__repr__(),
)
|
mit
| 8,021,834,686,697,248,000
| 35.48134
| 81
| 0.572069
| false
| 4.151476
| false
| false
| false
|
yeyanchao/calibre
|
src/calibre/gui2/library/views.py
|
1
|
39819
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, itertools, operator
from functools import partial
from future_builtins import map
from collections import OrderedDict
from PyQt4.Qt import (QTableView, Qt, QAbstractItemView, QMenu, pyqtSignal,
QModelIndex, QIcon, QItemSelection, QMimeData, QDrag, QApplication,
QPoint, QPixmap, QUrl, QImage, QPainter, QColor, QRect)
from calibre.gui2.library.delegates import (RatingDelegate, PubDateDelegate,
TextDelegate, DateDelegate, CompleteDelegate, CcTextDelegate,
CcBoolDelegate, CcCommentsDelegate, CcDateDelegate, CcTemplateDelegate,
CcEnumDelegate, CcNumberDelegate, LanguagesDelegate)
from calibre.gui2.library.models import BooksModel, DeviceBooksModel
from calibre.utils.config import tweaks, prefs
from calibre.gui2 import error_dialog, gprefs
from calibre.gui2.library import DEFAULT_SORT
from calibre.constants import filesystem_encoding
from calibre import force_unicode
class PreserveViewState(object): # {{{
'''
Save the set of selected books at enter time. If at exit time there are no
selected books, restore the previous selection, the previous current index
and dont affect the scroll position.
'''
def __init__(self, view, preserve_hpos=True, preserve_vpos=True,
require_selected_ids=True):
self.view = view
self.require_selected_ids = require_selected_ids
self.selected_ids = set()
self.current_id = None
self.preserve_hpos = preserve_hpos
self.preserve_vpos = preserve_vpos
self.vscroll = self.hscroll = 0
def __enter__(self):
try:
self.selected_ids = self.view.get_selected_ids()
self.current_id = self.view.current_id
self.vscroll = self.view.verticalScrollBar().value()
self.hscroll = self.view.horizontalScrollBar().value()
except:
import traceback
traceback.print_exc()
def __exit__(self, *args):
if self.selected_ids or not self.require_selected_ids:
if self.current_id is not None:
self.view.current_id = self.current_id
if self.selected_ids:
self.view.select_rows(self.selected_ids, using_ids=True,
scroll=False, change_current=self.current_id is None)
if self.preserve_vpos:
self.view.verticalScrollBar().setValue(self.vscroll)
if self.preserve_hpos:
self.view.horizontalScrollBar().setValue(self.hscroll)
@dynamic_property
def state(self):
def fget(self):
self.__enter__()
return {x:getattr(self, x) for x in ('selected_ids', 'current_id',
'vscroll', 'hscroll')}
def fset(self, state):
for k, v in state.iteritems(): setattr(self, k, v)
self.__exit__()
return property(fget=fget, fset=fset)
# }}}
class BooksView(QTableView): # {{{
files_dropped = pyqtSignal(object)
add_column_signal = pyqtSignal()
def viewportEvent(self, event):
if (event.type() == event.ToolTip and not gprefs['book_list_tooltips']):
return False
return QTableView.viewportEvent(self, event)
def __init__(self, parent, modelcls=BooksModel, use_edit_metadata_dialog=True):
QTableView.__init__(self, parent)
if not tweaks['horizontal_scrolling_per_column']:
self.setHorizontalScrollMode(self.ScrollPerPixel)
self.setEditTriggers(self.EditKeyPressed)
if tweaks['doubleclick_on_library_view'] == 'edit_cell':
self.setEditTriggers(self.DoubleClicked|self.editTriggers())
elif tweaks['doubleclick_on_library_view'] == 'open_viewer':
self.setEditTriggers(self.SelectedClicked|self.editTriggers())
self.doubleClicked.connect(parent.iactions['View'].view_triggered)
elif tweaks['doubleclick_on_library_view'] == 'edit_metadata':
# Must not enable single-click to edit, or the field will remain
# open in edit mode underneath the edit metadata dialog
if use_edit_metadata_dialog:
self.doubleClicked.connect(
partial(parent.iactions['Edit Metadata'].edit_metadata,
checked=False))
else:
self.setEditTriggers(self.DoubleClicked|self.editTriggers())
self.drag_allowed = True
self.setDragEnabled(True)
self.setDragDropOverwriteMode(False)
self.setDragDropMode(self.DragDrop)
self.drag_start_pos = None
self.setAlternatingRowColors(True)
self.setSelectionBehavior(self.SelectRows)
self.setShowGrid(False)
self.setWordWrap(False)
self.rating_delegate = RatingDelegate(self)
self.timestamp_delegate = DateDelegate(self)
self.pubdate_delegate = PubDateDelegate(self)
self.last_modified_delegate = DateDelegate(self,
tweak_name='gui_last_modified_display_format')
self.languages_delegate = LanguagesDelegate(self)
self.tags_delegate = CompleteDelegate(self, ',', 'all_tag_names')
self.authors_delegate = CompleteDelegate(self, '&', 'all_author_names', True)
self.cc_names_delegate = CompleteDelegate(self, '&', 'all_custom', True)
self.series_delegate = TextDelegate(self)
self.publisher_delegate = TextDelegate(self)
self.text_delegate = TextDelegate(self)
self.cc_text_delegate = CcTextDelegate(self)
self.cc_enum_delegate = CcEnumDelegate(self)
self.cc_bool_delegate = CcBoolDelegate(self)
self.cc_comments_delegate = CcCommentsDelegate(self)
self.cc_template_delegate = CcTemplateDelegate(self)
self.cc_number_delegate = CcNumberDelegate(self)
self.display_parent = parent
self._model = modelcls(self)
self.setModel(self._model)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSortingEnabled(True)
self.selectionModel().currentRowChanged.connect(self._model.current_changed)
self.preserve_state = partial(PreserveViewState, self)
# {{{ Column Header setup
self.can_add_columns = True
self.was_restored = False
self.column_header = self.horizontalHeader()
self.column_header.setMovable(True)
self.column_header.sectionMoved.connect(self.save_state)
self.column_header.setContextMenuPolicy(Qt.CustomContextMenu)
self.column_header.customContextMenuRequested.connect(self.show_column_header_context_menu)
self.column_header.sectionResized.connect(self.column_resized, Qt.QueuedConnection)
# }}}
self._model.database_changed.connect(self.database_changed)
hv = self.verticalHeader()
hv.setClickable(True)
hv.setCursor(Qt.PointingHandCursor)
self.selected_ids = []
self._model.about_to_be_sorted.connect(self.about_to_be_sorted)
self._model.sorting_done.connect(self.sorting_done,
type=Qt.QueuedConnection)
# Column Header Context Menu {{{
def column_header_context_handler(self, action=None, column=None):
if not action or not column:
return
try:
idx = self.column_map.index(column)
except:
return
h = self.column_header
if action == 'hide':
h.setSectionHidden(idx, True)
elif action == 'show':
h.setSectionHidden(idx, False)
if h.sectionSize(idx) < 3:
sz = h.sectionSizeHint(idx)
h.resizeSection(idx, sz)
elif action == 'ascending':
self.sortByColumn(idx, Qt.AscendingOrder)
elif action == 'descending':
self.sortByColumn(idx, Qt.DescendingOrder)
elif action == 'defaults':
self.apply_state(self.get_default_state())
elif action == 'addcustcol':
self.add_column_signal.emit()
elif action.startswith('align_'):
alignment = action.partition('_')[-1]
self._model.change_alignment(column, alignment)
self.save_state()
def show_column_header_context_menu(self, pos):
idx = self.column_header.logicalIndexAt(pos)
if idx > -1 and idx < len(self.column_map):
col = self.column_map[idx]
name = unicode(self.model().headerData(idx, Qt.Horizontal,
Qt.DisplayRole).toString())
self.column_header_context_menu = QMenu(self)
if col != 'ondevice':
self.column_header_context_menu.addAction(_('Hide column %s') %
name,
partial(self.column_header_context_handler, action='hide',
column=col))
m = self.column_header_context_menu.addMenu(
_('Sort on %s') % name)
a = m.addAction(_('Ascending'),
partial(self.column_header_context_handler,
action='ascending', column=col))
d = m.addAction(_('Descending'),
partial(self.column_header_context_handler,
action='descending', column=col))
if self._model.sorted_on[0] == col:
ac = a if self._model.sorted_on[1] else d
ac.setCheckable(True)
ac.setChecked(True)
if col not in ('ondevice', 'inlibrary') and \
(not self.model().is_custom_column(col) or \
self.model().custom_columns[col]['datatype'] not in ('bool',
)):
m = self.column_header_context_menu.addMenu(
_('Change text alignment for %s') % name)
al = self._model.alignment_map.get(col, 'left')
for x, t in (('left', _('Left')), ('right', _('Right')), ('center',
_('Center'))):
a = m.addAction(t,
partial(self.column_header_context_handler,
action='align_'+x, column=col))
if al == x:
a.setCheckable(True)
a.setChecked(True)
hidden_cols = [self.column_map[i] for i in
range(self.column_header.count()) if
self.column_header.isSectionHidden(i)]
try:
hidden_cols.remove('ondevice')
except:
pass
if hidden_cols:
self.column_header_context_menu.addSeparator()
m = self.column_header_context_menu.addMenu(_('Show column'))
for col in hidden_cols:
hidx = self.column_map.index(col)
name = unicode(self.model().headerData(hidx, Qt.Horizontal,
Qt.DisplayRole).toString())
m.addAction(name,
partial(self.column_header_context_handler,
action='show', column=col))
self.column_header_context_menu.addSeparator()
self.column_header_context_menu.addAction(
_('Shrink column if it is too wide to fit'),
partial(self.resize_column_to_fit, column=self.column_map[idx]))
self.column_header_context_menu.addAction(
_('Restore default layout'),
partial(self.column_header_context_handler,
action='defaults', column=col))
if self.can_add_columns:
self.column_header_context_menu.addAction(
QIcon(I('column.png')),
_('Add your own columns'),
partial(self.column_header_context_handler,
action='addcustcol', column=col))
self.column_header_context_menu.popup(self.column_header.mapToGlobal(pos))
# }}}
# Sorting {{{
def about_to_be_sorted(self, idc):
selected_rows = [r.row() for r in self.selectionModel().selectedRows()]
self.selected_ids = [idc(r) for r in selected_rows]
def sorting_done(self, indexc):
pos = self.horizontalScrollBar().value()
self.select_rows(self.selected_ids, using_ids=True, change_current=True,
scroll=True)
self.selected_ids = []
self.horizontalScrollBar().setValue(pos)
def sort_by_named_field(self, field, order, reset=True):
if field in self.column_map:
idx = self.column_map.index(field)
if order:
self.sortByColumn(idx, Qt.AscendingOrder)
else:
self.sortByColumn(idx, Qt.DescendingOrder)
else:
self._model.sort_by_named_field(field, order, reset)
def multisort(self, fields, reset=True, only_if_different=False):
if len(fields) == 0:
return
sh = self.cleanup_sort_history(self._model.sort_history,
ignore_column_map=True)
if only_if_different and len(sh) >= len(fields):
ret=True
for i,t in enumerate(fields):
if t[0] != sh[i][0]:
ret = False
break
if ret:
return
for n,d in reversed(fields):
if n in self._model.db.field_metadata.keys():
sh.insert(0, (n, d))
sh = self.cleanup_sort_history(sh, ignore_column_map=True)
self._model.sort_history = [tuple(x) for x in sh]
self._model.resort(reset=reset)
col = fields[0][0]
dir = Qt.AscendingOrder if fields[0][1] else Qt.DescendingOrder
if col in self.column_map:
col = self.column_map.index(col)
hdrs = self.horizontalHeader()
try:
hdrs.setSortIndicator(col, dir)
except:
pass
# }}}
# Ondevice column {{{
def set_ondevice_column_visibility(self):
m = self._model
self.column_header.setSectionHidden(m.column_map.index('ondevice'),
not m.device_connected)
def set_device_connected(self, is_connected):
self._model.set_device_connected(is_connected)
self.set_ondevice_column_visibility()
# }}}
# Save/Restore State {{{
def get_state(self):
h = self.column_header
cm = self.column_map
state = {}
state['hidden_columns'] = [cm[i] for i in range(h.count())
if h.isSectionHidden(i) and cm[i] != 'ondevice']
state['last_modified_injected'] = True
state['languages_injected'] = True
state['sort_history'] = \
self.cleanup_sort_history(self.model().sort_history)
state['column_positions'] = {}
state['column_sizes'] = {}
state['column_alignment'] = self._model.alignment_map
for i in range(h.count()):
name = cm[i]
state['column_positions'][name] = h.visualIndex(i)
if name != 'ondevice':
state['column_sizes'][name] = h.sectionSize(i)
return state
def write_state(self, state):
db = getattr(self.model(), 'db', None)
name = unicode(self.objectName())
if name and db is not None:
db.prefs.set(name + ' books view state', state)
def save_state(self):
# Only save if we have been initialized (set_database called)
if len(self.column_map) > 0 and self.was_restored:
state = self.get_state()
self.write_state(state)
def cleanup_sort_history(self, sort_history, ignore_column_map=False):
history = []
for col, order in sort_history:
if not isinstance(order, bool):
continue
if col == 'date':
col = 'timestamp'
if ignore_column_map or col in self.column_map:
if (not history or history[-1][0] != col):
history.append([col, order])
return history
def apply_sort_history(self, saved_history, max_sort_levels=3):
if not saved_history:
return
for col, order in reversed(self.cleanup_sort_history(
saved_history)[:max_sort_levels]):
self.sortByColumn(self.column_map.index(col),
Qt.AscendingOrder if order else Qt.DescendingOrder)
def apply_state(self, state, max_sort_levels=3):
h = self.column_header
cmap = {}
hidden = state.get('hidden_columns', [])
for i, c in enumerate(self.column_map):
cmap[c] = i
if c != 'ondevice':
h.setSectionHidden(i, c in hidden)
positions = state.get('column_positions', {})
pmap = {}
for col, pos in positions.items():
if col in cmap:
pmap[pos] = col
for pos in sorted(pmap.keys()):
col = pmap[pos]
idx = cmap[col]
current_pos = h.visualIndex(idx)
if current_pos != pos:
h.moveSection(current_pos, pos)
sizes = state.get('column_sizes', {})
for col, size in sizes.items():
if col in cmap:
sz = sizes[col]
if sz < 3:
sz = h.sectionSizeHint(cmap[col])
h.resizeSection(cmap[col], sz)
self.apply_sort_history(state.get('sort_history', None),
max_sort_levels=max_sort_levels)
for col, alignment in state.get('column_alignment', {}).items():
self._model.change_alignment(col, alignment)
for i in range(h.count()):
if not h.isSectionHidden(i) and h.sectionSize(i) < 3:
sz = h.sectionSizeHint(i)
h.resizeSection(i, sz)
def get_default_state(self):
old_state = {
'hidden_columns': ['last_modified', 'languages'],
'sort_history':[DEFAULT_SORT],
'column_positions': {},
'column_sizes': {},
'column_alignment': {
'size':'center',
'timestamp':'center',
'pubdate':'center'},
'last_modified_injected': True,
'languages_injected': True,
}
h = self.column_header
cm = self.column_map
for i in range(h.count()):
name = cm[i]
old_state['column_positions'][name] = i
if name != 'ondevice':
old_state['column_sizes'][name] = \
min(350, max(self.sizeHintForColumn(i),
h.sectionSizeHint(i)))
if name in ('timestamp', 'last_modified'):
old_state['column_sizes'][name] += 12
return old_state
def get_old_state(self):
ans = None
name = unicode(self.objectName())
if name:
name += ' books view state'
db = getattr(self.model(), 'db', None)
if db is not None:
ans = db.prefs.get(name, None)
if ans is None:
ans = gprefs.get(name, None)
try:
del gprefs[name]
except:
pass
if ans is not None:
db.prefs[name] = ans
else:
injected = False
if not ans.get('last_modified_injected', False):
injected = True
ans['last_modified_injected'] = True
hc = ans.get('hidden_columns', [])
if 'last_modified' not in hc:
hc.append('last_modified')
if not ans.get('languages_injected', False):
injected = True
ans['languages_injected'] = True
hc = ans.get('hidden_columns', [])
if 'languages' not in hc:
hc.append('languages')
if injected:
db.prefs[name] = ans
return ans
def restore_state(self):
old_state = self.get_old_state()
if old_state is None:
old_state = self.get_default_state()
max_levels = 3
if tweaks['sort_columns_at_startup'] is not None:
sh = []
try:
for c,d in tweaks['sort_columns_at_startup']:
if not isinstance(d, bool):
d = True if d == 0 else False
sh.append((c, d))
except:
# Ignore invalid tweak values as users seem to often get them
# wrong
print('Ignoring invalid sort_columns_at_startup tweak, with error:')
import traceback
traceback.print_exc()
old_state['sort_history'] = sh
max_levels = max(3, len(sh))
self.column_header.blockSignals(True)
self.apply_state(old_state, max_sort_levels=max_levels)
self.column_header.blockSignals(False)
# Resize all rows to have the correct height
if self.model().rowCount(QModelIndex()) > 0:
self.resizeRowToContents(0)
self.verticalHeader().setDefaultSectionSize(self.rowHeight(0))
self.was_restored = True
def resize_column_to_fit(self, column):
col = self.column_map.index(column)
self.column_resized(col, self.columnWidth(col), self.columnWidth(col))
def column_resized(self, col, old_size, new_size):
# arbitrary: scroll bar + header + some
max_width = self.width() - (self.verticalScrollBar().width() +
self.verticalHeader().width() + 10)
if max_width < 200:
max_width = 200
if new_size > max_width:
self.column_header.blockSignals(True)
self.setColumnWidth(col, max_width)
self.column_header.blockSignals(False)
# }}}
# Initialization/Delegate Setup {{{
def set_database(self, db):
self.save_state()
self._model.set_database(db)
self.tags_delegate.set_database(db)
self.cc_names_delegate.set_database(db)
self.authors_delegate.set_database(db)
self.series_delegate.set_auto_complete_function(db.all_series)
self.publisher_delegate.set_auto_complete_function(db.all_publishers)
def database_changed(self, db):
for i in range(self.model().columnCount(None)):
if self.itemDelegateForColumn(i) in (self.rating_delegate,
self.timestamp_delegate, self.pubdate_delegate,
self.last_modified_delegate, self.languages_delegate):
self.setItemDelegateForColumn(i, self.itemDelegate())
cm = self.column_map
for colhead in cm:
if self._model.is_custom_column(colhead):
cc = self._model.custom_columns[colhead]
if cc['datatype'] == 'datetime':
delegate = CcDateDelegate(self)
delegate.set_format(cc['display'].get('date_format',''))
self.setItemDelegateForColumn(cm.index(colhead), delegate)
elif cc['datatype'] == 'comments':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_comments_delegate)
elif cc['datatype'] == 'text':
if cc['is_multiple']:
if cc['display'].get('is_names', False):
self.setItemDelegateForColumn(cm.index(colhead),
self.cc_names_delegate)
else:
self.setItemDelegateForColumn(cm.index(colhead),
self.tags_delegate)
else:
self.setItemDelegateForColumn(cm.index(colhead), self.cc_text_delegate)
elif cc['datatype'] == 'series':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_text_delegate)
elif cc['datatype'] in ('int', 'float'):
self.setItemDelegateForColumn(cm.index(colhead), self.cc_number_delegate)
elif cc['datatype'] == 'bool':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_bool_delegate)
elif cc['datatype'] == 'rating':
self.setItemDelegateForColumn(cm.index(colhead), self.rating_delegate)
elif cc['datatype'] == 'composite':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_template_delegate)
elif cc['datatype'] == 'enumeration':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_enum_delegate)
else:
dattr = colhead+'_delegate'
delegate = colhead if hasattr(self, dattr) else 'text'
self.setItemDelegateForColumn(cm.index(colhead), getattr(self,
delegate+'_delegate'))
self.restore_state()
self.set_ondevice_column_visibility()
#}}}
# Context Menu {{{
def set_context_menu(self, menu, edit_collections_action):
self.setContextMenuPolicy(Qt.DefaultContextMenu)
self.context_menu = menu
self.edit_collections_action = edit_collections_action
def contextMenuEvent(self, event):
self.context_menu.popup(event.globalPos())
event.accept()
# }}}
# Drag 'n Drop {{{
@classmethod
def paths_from_event(cls, event):
'''
Accept a drop event and return a list of paths that can be read from
and represent files with extensions.
'''
md = event.mimeData()
if md.hasFormat('text/uri-list') and not \
md.hasFormat('application/calibre+from_library'):
urls = [unicode(u.toLocalFile()) for u in md.urls()]
return [u for u in urls if os.path.splitext(u)[1] and
os.path.exists(u)]
def drag_icon(self, cover, multiple):
cover = cover.scaledToHeight(120, Qt.SmoothTransformation)
if multiple:
base_width = cover.width()
base_height = cover.height()
base = QImage(base_width+21, base_height+21,
QImage.Format_ARGB32_Premultiplied)
base.fill(QColor(255, 255, 255, 0).rgba())
p = QPainter(base)
rect = QRect(20, 0, base_width, base_height)
p.fillRect(rect, QColor('white'))
p.drawRect(rect)
rect.moveLeft(10)
rect.moveTop(10)
p.fillRect(rect, QColor('white'))
p.drawRect(rect)
rect.moveLeft(0)
rect.moveTop(20)
p.fillRect(rect, QColor('white'))
p.save()
p.setCompositionMode(p.CompositionMode_SourceAtop)
p.drawImage(rect.topLeft(), cover)
p.restore()
p.drawRect(rect)
p.end()
cover = base
return QPixmap.fromImage(cover)
def drag_data(self):
m = self.model()
db = m.db
rows = self.selectionModel().selectedRows()
selected = list(map(m.id, rows))
ids = ' '.join(map(str, selected))
md = QMimeData()
md.setData('application/calibre+from_library', ids)
fmt = prefs['output_format']
def url_for_id(i):
try:
ans = db.format_path(i, fmt, index_is_id=True)
except:
ans = None
if ans is None:
fmts = db.formats(i, index_is_id=True)
if fmts:
fmts = fmts.split(',')
else:
fmts = []
for f in fmts:
try:
ans = db.format_path(i, f, index_is_id=True)
except:
ans = None
if ans is None:
ans = db.abspath(i, index_is_id=True)
return QUrl.fromLocalFile(ans)
md.setUrls([url_for_id(i) for i in selected])
drag = QDrag(self)
col = self.selectionModel().currentIndex().column()
md.column_name = self.column_map[col]
drag.setMimeData(md)
cover = self.drag_icon(m.cover(self.currentIndex().row()),
len(selected) > 1)
drag.setHotSpot(QPoint(-15, -15))
drag.setPixmap(cover)
return drag
def event_has_mods(self, event=None):
mods = event.modifiers() if event is not None else \
QApplication.keyboardModifiers()
return mods & Qt.ControlModifier or mods & Qt.ShiftModifier
def mousePressEvent(self, event):
ep = event.pos()
if self.indexAt(ep) in self.selectionModel().selectedIndexes() and \
event.button() == Qt.LeftButton and not self.event_has_mods():
self.drag_start_pos = ep
return QTableView.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
if not self.drag_allowed:
return
if self.drag_start_pos is None:
return QTableView.mouseMoveEvent(self, event)
if self.event_has_mods():
self.drag_start_pos = None
return
if not (event.buttons() & Qt.LeftButton) or \
(event.pos() - self.drag_start_pos).manhattanLength() \
< QApplication.startDragDistance():
return
index = self.indexAt(event.pos())
if not index.isValid():
return
drag = self.drag_data()
drag.exec_(Qt.CopyAction)
self.drag_start_pos = None
def dragEnterEvent(self, event):
if int(event.possibleActions() & Qt.CopyAction) + \
int(event.possibleActions() & Qt.MoveAction) == 0:
return
paths = self.paths_from_event(event)
if paths:
event.acceptProposedAction()
def dragMoveEvent(self, event):
event.acceptProposedAction()
def dropEvent(self, event):
paths = self.paths_from_event(event)
event.setDropAction(Qt.CopyAction)
event.accept()
self.files_dropped.emit(paths)
# }}}
@property
def column_map(self):
return self._model.column_map
def refresh_book_details(self):
idx = self.currentIndex()
if idx.isValid():
self._model.current_changed(idx, idx)
def scrollContentsBy(self, dx, dy):
# Needed as Qt bug causes headerview to not always update when scrolling
QTableView.scrollContentsBy(self, dx, dy)
if dy != 0:
self.column_header.update()
def scroll_to_row(self, row):
if row > -1:
h = self.horizontalHeader()
for i in range(h.count()):
if not h.isSectionHidden(i) and h.sectionViewportPosition(i) >= 0:
self.scrollTo(self.model().index(row, i), self.PositionAtCenter)
break
def set_current_row(self, row, select=True):
if row > -1 and row < self.model().rowCount(QModelIndex()):
h = self.horizontalHeader()
logical_indices = list(range(h.count()))
logical_indices = [x for x in logical_indices if not
h.isSectionHidden(x)]
pairs = [(x, h.visualIndex(x)) for x in logical_indices if
h.visualIndex(x) > -1]
if not pairs:
pairs = [(0, 0)]
pairs.sort(cmp=lambda x,y:cmp(x[1], y[1]))
i = pairs[0][0]
index = self.model().index(row, i)
self.setCurrentIndex(index)
if select:
sm = self.selectionModel()
sm.select(index, sm.ClearAndSelect|sm.Rows)
def ids_to_rows(self, ids):
row_map = OrderedDict()
ids = frozenset(ids)
m = self.model()
for row in xrange(m.rowCount(QModelIndex())):
if len(row_map) >= len(ids): break
c = m.id(row)
if c in ids:
row_map[c] = row
return row_map
def select_rows(self, identifiers, using_ids=True, change_current=True,
scroll=True):
'''
Select rows identified by identifiers. identifiers can be a set of ids,
row numbers or QModelIndexes.
'''
rows = set([x.row() if hasattr(x, 'row') else x for x in
identifiers])
if using_ids:
rows = set([])
identifiers = set(identifiers)
m = self.model()
for row in xrange(m.rowCount(QModelIndex())):
if m.id(row) in identifiers:
rows.add(row)
rows = list(sorted(rows))
if rows:
row = rows[0]
if change_current:
self.set_current_row(row, select=False)
if scroll:
self.scroll_to_row(row)
sm = self.selectionModel()
sel = QItemSelection()
m = self.model()
max_col = m.columnCount(QModelIndex()) - 1
# Create a range based selector for each set of contiguous rows
# as supplying selectors for each individual row causes very poor
# performance if a large number of rows has to be selected.
for k, g in itertools.groupby(enumerate(rows), lambda (i,x):i-x):
group = list(map(operator.itemgetter(1), g))
sel.merge(QItemSelection(m.index(min(group), 0),
m.index(max(group), max_col)), sm.Select)
sm.select(sel, sm.ClearAndSelect)
def get_selected_ids(self):
ans = []
m = self.model()
for idx in self.selectedIndexes():
r = idx.row()
i = m.id(r)
if i not in ans:
ans.append(i)
return ans
@dynamic_property
def current_id(self):
def fget(self):
try:
return self.model().id(self.currentIndex())
except:
pass
return None
def fset(self, val):
if val is None: return
m = self.model()
for row in xrange(m.rowCount(QModelIndex())):
if m.id(row) == val:
self.set_current_row(row, select=False)
break
return property(fget=fget, fset=fset)
@property
def next_id(self):
'''
Return the id of the 'next' row (i.e. the first unselected row after
the current row).
'''
ci = self.currentIndex()
if not ci.isValid():
return None
selected_rows = frozenset([i.row() for i in self.selectedIndexes() if
i.isValid()])
column = ci.column()
for i in xrange(ci.row()+1, self.row_count()):
if i in selected_rows: continue
try:
return self.model().id(self.model().index(i, column))
except:
pass
# No unselected rows after the current row, look before
for i in xrange(ci.row()-1, -1, -1):
if i in selected_rows: continue
try:
return self.model().id(self.model().index(i, column))
except:
pass
return None
def close(self):
self._model.close()
def set_editable(self, editable, supports_backloading):
self._model.set_editable(editable)
def move_highlighted_row(self, forward):
rows = self.selectionModel().selectedRows()
if len(rows) > 0:
current_row = rows[0].row()
else:
current_row = None
id_to_select = self._model.get_next_highlighted_id(current_row, forward)
if id_to_select is not None:
self.select_rows([id_to_select], using_ids=True)
def search_proxy(self, txt):
self._model.search(txt)
id_to_select = self._model.get_current_highlighted_id()
if id_to_select is not None:
self.select_rows([id_to_select], using_ids=True)
elif self._model.highlight_only:
self.clearSelection()
self.setFocus(Qt.OtherFocusReason)
def connect_to_search_box(self, sb, search_done):
sb.search.connect(self.search_proxy)
self._search_done = search_done
self._model.searched.connect(self.search_done)
def connect_to_book_display(self, bd):
self._model.new_bookdisplay_data.connect(bd)
def search_done(self, ok):
self._search_done(self, ok)
def row_count(self):
return self._model.count()
# }}}
class DeviceBooksView(BooksView): # {{{
def __init__(self, parent):
BooksView.__init__(self, parent, DeviceBooksModel,
use_edit_metadata_dialog=False)
self.can_add_columns = False
self.columns_resized = False
self.resize_on_select = False
self.rating_delegate = None
for i in range(10):
self.setItemDelegateForColumn(i, TextDelegate(self))
self.setDragDropMode(self.NoDragDrop)
self.setAcceptDrops(False)
def drag_data(self):
m = self.model()
rows = self.selectionModel().selectedRows()
paths = [force_unicode(p, enc=filesystem_encoding) for p in m.paths(rows) if p]
md = QMimeData()
md.setData('application/calibre+from_device', 'dummy')
md.setUrls([QUrl.fromLocalFile(p) for p in paths])
drag = QDrag(self)
drag.setMimeData(md)
cover = self.drag_icon(m.cover(self.currentIndex().row()), len(paths) >
1)
drag.setHotSpot(QPoint(-15, -15))
drag.setPixmap(cover)
return drag
def contextMenuEvent(self, event):
edit_collections = callable(getattr(self._model.db, 'supports_collections', None)) and \
self._model.db.supports_collections() and \
prefs['manage_device_metadata'] == 'manual'
self.edit_collections_action.setVisible(edit_collections)
self.context_menu.popup(event.globalPos())
event.accept()
def get_old_state(self):
ans = None
name = unicode(self.objectName())
if name:
name += ' books view state'
ans = gprefs.get(name, None)
return ans
def write_state(self, state):
name = unicode(self.objectName())
if name:
gprefs.set(name + ' books view state', state)
def set_database(self, db):
self._model.set_database(db)
self.restore_state()
def resizeColumnsToContents(self):
QTableView.resizeColumnsToContents(self)
self.columns_resized = True
def connect_dirtied_signal(self, slot):
self._model.booklist_dirtied.connect(slot)
def connect_upload_collections_signal(self, func=None, oncard=None):
self._model.upload_collections.connect(partial(func, view=self, oncard=oncard))
def dropEvent(self, *args):
error_dialog(self, _('Not allowed'),
_('Dropping onto a device is not supported. First add the book to the calibre library.')).exec_()
def set_editable(self, editable, supports_backloading):
self._model.set_editable(editable)
self.drag_allowed = supports_backloading
# }}}
|
gpl-3.0
| 4,215,792,900,672,243,700
| 38.230542
| 105
| 0.555815
| false
| 4.030671
| false
| false
| false
|
dradux/tracker
|
web/migrations/versions/95ecf01d9cb4_add_test_result_status_items.py
|
1
|
1277
|
"""add test_result_status items
Revision ID: 95ecf01d9cb4
Revises: ea71f73f5460
Create Date: 2017-03-29 19:41:26.581925
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '95ecf01d9cb4'
down_revision = 'ea71f73f5460'
branch_labels = None
depends_on = None
def upgrade():
#~ op.bulk_insert('test_result_status',
#~ [
#~ {'status': 'Created'},
#~ {'status': 'Completed'},
#~ {'status': 'Failed'}
#~ ]
#~ )
op.execute("INSERT INTO test_result_status (status) VALUES ('Created')")
op.execute("INSERT INTO test_result_status (status) VALUES ('Completed')")
op.execute("INSERT INTO test_result_status (status) VALUES ('Failed')")
op.execute("UPDATE test_result SET status_id=(SELECT id FROM test_result_status where status='Created') WHERE test_passed is null")
op.execute("UPDATE test_result SET status_id=(SELECT id FROM test_result_status where status='Completed') WHERE test_passed=true")
op.execute("UPDATE test_result SET status_id=(SELECT id FROM test_result_status where status='Failed') WHERE test_passed=false")
def downgrade():
op.execute("delete from test_result_status where status in('Created', 'Completed', 'Failed')")
|
gpl-3.0
| 2,511,160,009,906,113,000
| 33.513514
| 135
| 0.680501
| false
| 3.607345
| true
| false
| false
|
noemu/script.example-master
|
default.py
|
1
|
7991
|
# https://docs.python.org/2.7/
import os
import sys
import urllib
import urlparse
# http://mirrors.kodi.tv/docs/python-docs/
import xbmcaddon
import xbmcgui
import xbmcplugin
# http://docs.python-requests.org/en/latest/
import requests
from threading import Thread
import time
class PlayerWindow(xbmcgui.WindowXML):
LABEL_ARTIST = 802
LABEL_TITEL = 801
LABEL_ALBUM = 803
IMG_ALBUM = 800
SLIDER_VOL = 815
BUTTON_SHUFFLE = 817
BUTTON_SHUFFLE_ACT = 818
BUTTON_REPEAT = 819
BUTTON_REPEAT_ACT = 819
BUTTON_BACK = 809
BUTTON_PLAY = 811
BUTTON_PAUSE = 812
BUTTON_FOR = 813
BUTTON_VOL_UP = 816
BUTTON_VOL_DOWN = 814
def __init__(self, *args, **kwargs):
self.isRunning = True
self.volume = 100
def onAction(self , action):
ACTION_PREVIOUS_MENU = 10
ACTION_NAV_BACK = 92
ACTION_UP = 3
ACTION_DOWN = 4
ACTION_LEFT = 1
ACTION_RIGHT = 2
ACTION_MIDDLE = 7
ACTION_PAUSE = 12
ACTION_STOP = 13
ACTION_NEXT_ITEM = 14
ACTION_PREV_ITEM = 15
ACTION_FORWARD = 16
ACTION_REWIND = 17
ACTION_PLAYER_FORWARD = 77
ACTION_PLAYER_REWIND = 78
ACTION_PLAYER_PLAY = 79
ACTION_VOLUME_UP = 88
ACTION_VOLUME_DOWN = 89
ACTION_MUTE = 91
ACTION_PAGE_UP = 5
ACTION_PAGE_DOWN = 6
#ids = str(action.getId())
#xbmc.log(ids)
if (action == ACTION_PREVIOUS_MENU) or (action == ACTION_NAV_BACK):
xbmcgui.Window(10000).setProperty("spotify-closed-by-user","true")
self.isRunning = False
self.close()
if (action == ACTION_LEFT) or (action == ACTION_RIGHT):
self.volSlider = self.getControl(self.SLIDER_VOL)
volume = self.volSlider.getPercent()
setVol(volume)
if(action == ACTION_PLAYER_PLAY) or (action == ACTION_PAUSE):
if(self.playing):
getSite(pause)
else:
getSite(play)
if (action == ACTION_VOLUME_UP):
self.volume = self.volume + 3
if(self.volume > 100):
self.volume = 100
setVol(self.volume)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.volSlider.setPercent(self.volume)
if (action == ACTION_VOLUME_DOWN):
self.volume = self.volume- 3
if(self.volume < 0):
self.volume = 0
setVol(self.volume)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.volSlider.setPercent(self.volume)
if (action == ACTION_FORWARD) or (action == ACTION_PLAYER_FORWARD) or (action == ACTION_NEXT_ITEM) or (action == ACTION_PAGE_UP):
getSite(next)
if (action == ACTION_REWIND) or (action == ACTION_PLAYER_REWIND) or (action == ACTION_PREV_ITEM) or (action == ACTION_PAGE_DOWN):
getSite(prev)
if(action == ACTION_STOP):
getSite(pause)
def onClick(self, controlID):
if (controlID == self.BUTTON_PAUSE) or (controlID == self.BUTTON_PLAY):
if(self.playing):
getSite(pause)
else:
getSite(play)
if (controlID == self.BUTTON_VOL_UP):
self.volume = self.volume + 3
if(self.volume > 100):
self.volume = 100
setVol(self.volume)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.volSlider.setPercent(self.volume)
if (controlID == self.BUTTON_VOL_DOWN):
self.volume = self.volume- 3
if(self.volume < 0):
self.volume = 0
setVol(self.volume)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.volSlider.setPercent(self.volume)
if (controlID == self.BUTTON_FOR):
getSite(next)
if (controlID == self.BUTTON_BACK):
getSite(prev)
def updateLabels(self, information):
self.albumCover = self.getControl(self.IMG_ALBUM)
self.titleLabel = self.getControl(self.LABEL_TITEL)
self.artistLabel = self.getControl(self.LABEL_ARTIST)
self.albumName = self.getControl(self.LABEL_ALBUM)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.playing = information['playing']
self.titleLabel.setLabel(information['track_name'])
self.albumName.setLabel(information['album_name'])
self.artistLabel.setLabel( information['artist_name'])
self.albumCover.setImage(information['cover_url'])
self.volume = int(information['volume'])/655.35
self.volSlider.setPercent(self.volume)
self.getControl(self.BUTTON_PLAY).setVisible(not self.playing)
self.getControl(self.BUTTON_SHUFFLE).setVisible(not information['shuffle'])
self.getControl(self.BUTTON_REPEAT).setVisible(not information['repeat'])
def getSite(url):
#try...
rq = requests.get(url)
#handle
return rq
def getInfo():
information = getSite(info).json()
statusInfo = getSite(status).json()
playing = statusInfo['playing']
shuffleInfo = statusInfo['shuffle']
repeatInfo = statusInfo['repeat']
coverURL = "http://o.scdn.co/160/"+information['cover_uri'].split(':')[-1]
information['cover_url'] = coverURL
information['playing'] = playing
information['shuffle'] = shuffleInfo
information['repeat'] = repeatInfo
return information
def downloadCover(url):
urllib.urlretrieve(url,'/tmp/spotAlCov.png')
def setVol(value):
value = int(round(value* 655.35))
jsonPost = {'value': value}
requests.post(volume,data=jsonPost)
def updateInfo(name,window):
screensaverDelay = 30
screensaverCount = 0
updateInterval = 2
while True:
try:
window.getControl(800)
break
except Exception:
xbmc.log("Error: can't find Window, try again")
time.sleep(1) # maybe fix for can't find window id's
while window.isRunning and (not xbmc.abortRequested):
information = getInfo()
window.updateLabels(information)
time.sleep(updateInterval)
screensaverCount = screensaverCount + updateInterval
if(screensaverCount>screensaverDelay) and information['playing']:
#wakeup from screensaver by simulating a button activity
json_query = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Input.ContextMenu", "id": 1}')
screensaverCount = 0
def main():
pw = PlayerWindow("player.xml",CWD)
#xbmcgui.Window( 10000 )
t1 = Thread(target=updateInfo,args=("1",pw))
t1.setDaemon( True)
t1.start()
xbmcgui.Window(10000).setProperty("spotify-showing", "true")
pw.doModal()
xbmcgui.Window(10000).clearProperty("spotify-showing")
del t1
del pw
if __name__ == '__main__':
page = 'http://127.0.0.1:4000'
apiPlayback = '/api/playback'
play = page+apiPlayback+'/play'
pause = page+apiPlayback+'/pause'
prev = page+apiPlayback+'/prev'
next = page+apiPlayback+'/next'
volume = page+apiPlayback+'/volume'
shuffle = page+apiPlayback+'/shuffle'
repeat = page+apiPlayback+'/repeat'
info = page+'/api/info/metadata'
status = page+'/api/info/status'
ADDON = xbmcaddon.Addon(id='plugin.audio.example')
CWD = ADDON.getAddonInfo('path').decode("utf-8")
main()
|
gpl-2.0
| -3,525,701,916,214,955,000
| 27.641577
| 137
| 0.568014
| false
| 3.867861
| false
| false
| false
|
Endika/manufacture
|
mrp_operations_time_control/models/operation_time.py
|
1
|
2588
|
# -*- coding: utf-8 -*-
# © 2015 Avanzosc
# © 2015 Pedro M. Baeza
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import api, models, fields
class MrpProductionWorkcenterLine(models.Model):
_inherit = 'mrp.production.workcenter.line'
operation_time_lines = fields.One2many(
'operation.time.line', 'operation_time', string='Operation Time Lines')
def _create_operation_line(self):
self.env['operation.time.line'].create({
'start_date': fields.Datetime.now(),
'operation_time': self.id,
'user': self.env.uid})
def _write_end_date_operation_line(self):
self.operation_time_lines[-1].end_date = fields.Datetime.now()
@api.multi
def action_start_working(self):
result = super(MrpProductionWorkcenterLine,
self).action_start_working()
self._create_operation_line()
return result
@api.multi
def action_pause(self):
result = super(MrpProductionWorkcenterLine, self).action_pause()
self._write_end_date_operation_line()
return result
@api.multi
def action_resume(self):
result = super(MrpProductionWorkcenterLine, self).action_resume()
self._create_operation_line()
return result
@api.multi
def action_done(self):
not_paused_records = self.filtered(lambda x: x.state != 'pause')
result = super(MrpProductionWorkcenterLine, self).action_done()
not_paused_records._write_end_date_operation_line()
return result
class OperationTimeLine(models.Model):
_name = 'operation.time.line'
_rec_name = 'operation_time'
def _default_user(self):
return self.env.uid
start_date = fields.Datetime(string='Start Date')
end_date = fields.Datetime(string='End Date')
operation_time = fields.Many2one('mrp.production.workcenter.line')
uptime = fields.Float(
string='Machine up time', compute='_compute_uptime', store=True,
digits=(12, 6))
production = fields.Many2one(
'mrp.production', related='operation_time.production_id',
string='Production', store=True)
user = fields.Many2one('res.users', string='User', default=_default_user)
@api.one
@api.depends('start_date', 'end_date')
def _compute_uptime(self):
if self.end_date and self.start_date:
timedelta = fields.Datetime.from_string(self.end_date) - \
fields.Datetime.from_string(self.start_date)
self.uptime = timedelta.total_seconds() / 3600.
|
agpl-3.0
| -6,874,118,255,651,219,000
| 33.48
| 79
| 0.643852
| false
| 3.668085
| false
| false
| false
|
rikima/spark
|
python/pyspark/context.py
|
1
|
45234
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
jrdd = self._serialize_to_jvm(c, numSlices, serializer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, parallelism, serializer):
"""
Calling the Java parallelize() method with an ArrayList is too slow,
because it sends O(n) Py4J commands. As an alternative, serialized
objects are written to a file and loaded through textFile().
"""
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
serializer.dump_stream(data, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
return readRDDFromFile(self._jsc, tempFile.name, parallelism)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
| -2,844,523,567,311,684,600
| 41.713881
| 99
| 0.611376
| false
| 4.332344
| true
| false
| false
|
etienne-gauvin/music-player-core
|
examples/demo-console-player.py
|
1
|
3812
|
#!/usr/bin/env python
# MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2012, Albert Zeyer, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
import sys, os, random, fnmatch
# Our parent path might contain a self-build musicplayer module. Use that one.
sys.path = [os.path.abspath((os.path.dirname(__file__) or ".") + "/..")] + sys.path
import musicplayer
print "Module:", musicplayer.__file__
# ffmpeg log levels: {0:panic, 8:fatal, 16:error, 24:warning, 32:info, 40:verbose}
musicplayer.setFfmpegLogLevel(20)
try:
import better_exchook
better_exchook.install()
except ImportError: pass # doesnt matter
try:
import faulthandler
faulthandler.enable(all_threads=True)
except ImportError:
print "note: module faulthandler not available"
class Song:
def __init__(self, fn):
self.url = fn
self.f = open(fn)
def __eq__(self, other):
return self.url == other.url
def readPacket(self, bufSize):
s = self.f.read(bufSize)
#print "readPacket", self, bufSize, len(s)
return s
def seekRaw(self, offset, whence):
r = self.f.seek(offset, whence)
#print "seekRaw", self, offset, whence, r, self.f.tell()
return self.f.tell()
files = []
def getFiles(path):
for f in sorted(os.listdir(path), key=lambda k: random.random()):
f = os.path.join(path, f)
if os.path.isdir(f): getFiles(f) # recurse
if len(files) > 1000: break # break if we have enough
if fnmatch.fnmatch(f, '*.mp3'): files.append(f)
getFiles(os.path.expanduser("~/Music"))
random.shuffle(files) # shuffle some more
files = sys.argv[1:] + files
assert files, "give me some files or fill-up ~/Music"
i = 0
def songs():
global i, files
while True:
yield Song(files[i])
i += 1
if i >= len(files): i = 0
def peekSongs(n):
nexti = i + 1
if nexti >= len(files): nexti = 0
return map(Song, (files[nexti:] + files[:nexti])[:n])
player = musicplayer.createPlayer()
player.outSamplerate = 48000
player.queue = songs()
player.peekQueue = peekSongs
player.playing = True
def formatTime(t):
if t is None: return "?"
mins = long(t // 60)
t -= mins * 60
hours = mins // 60
mins -= hours * 60
if hours: return "%02i:%02i:%02.0f" % (hours,mins,t)
return "%02i:%02.0f" % (mins,t)
import termios
def prepareStdin():
fd = sys.stdin.fileno()
if os.isatty(fd):
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO
# http://www.unixguide.net/unix/programming/3.6.2.shtml
new[6][termios.VMIN] = 0
new[6][termios.VTIME] = 1
termios.tcsetattr(fd, termios.TCSANOW, new)
termios.tcsendbreak(fd, 0)
import atexit
atexit.register(lambda: termios.tcsetattr(fd, termios.TCSANOW, old))
print "Console control:"
print " <space>: play / pause"
print " <left>/<right>: seek back/forward by 10 secs"
print " <return>: next song"
print " <q>: quit"
def getchar():
fd = sys.stdin.fileno()
ch = os.read(fd, 7)
return ch
prepareStdin()
while True:
sys.stdout.write("\r\033[K") # clear line
if player.playing: sys.stdout.write("playing, ")
else: sys.stdout.write("paused, ")
curSong = player.curSong
if curSong:
url = os.path.basename(curSong.url)
if len(url) > 40: url = url[:37] + "..."
sys.stdout.write(
url + " : " +
formatTime(player.curSongPos) + " / " +
formatTime(player.curSongLen))
else:
sys.stdout.write("no song")
ch = getchar()
if ch == "\x1b[D": # left
player.seekRel(-10)
elif ch == "\x1b[C": #right
player.seekRel(10)
elif ch == "\x1b[A": #up
pass
elif ch == "\x1b[B": #down
pass
elif ch == "\n": # return
player.nextSong()
elif ch == " ":
player.playing = not player.playing
elif ch == "q":
print
sys.exit(0)
sys.stdout.flush()
|
bsd-2-clause
| 4,915,947,774,121,647,000
| 24.245033
| 101
| 0.658447
| false
| 2.647222
| false
| false
| false
|
tmct/adventOfCode2016
|
problems/11/State.py
|
1
|
8576
|
number_of_levels = 4
from itertools import cycle, islice, combinations
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
class State:
def __init__(self, lift_level, chip_levels, generator_levels):
self.lift_level = lift_level
self.chip_levels = chip_levels
self.generator_levels = generator_levels
self._lift_level_gens = None
self._lift_level_chips = None
self._is_safe = None
@property
def is_safe(self):
if self._is_safe is None:
levels_with_generators = self.get_generator_levels()
unpowered_chips = self.get_unpowered_chips()
unpowered_chip_levels = self.get_unpowered_chip_levels(unpowered_chips)
self._is_safe = levels_with_generators.isdisjoint(unpowered_chip_levels)
return self._is_safe
def get_unpowered_chip_levels(self, unpowered_chips):
return {chip_level for index, chip_level in enumerate(self.chip_levels) if
unpowered_chips[index]}
def get_unpowered_chips(self):
return [chip_level != generator_level for chip_level, generator_level in
zip(self.chip_levels, self.generator_levels)]
def get_generator_levels(self):
return {level_index for level_index in range(number_of_levels) if
level_index in self.generator_levels}
def adjacent_safe_states(self):
self._lift_level_gens = {generator for generator, level in enumerate(self.generator_levels) if
level == self.lift_level}
self._lift_level_chips = {chip for chip, level in enumerate(self.chip_levels) if level == self.lift_level}
# iterate over up/down and taking all things on this level, one or two from all, and must be safe
# up first
return {state for state in self.adjacent_states if state.is_safe}
@property
def adjacent_states(self):
# iterate over up/down and taking all things on this level, one or two from all, and must be safe
# up first
return set(self.adjacent_up_states() + self.adjacent_down_states()) # todo remove most sets
def adjacent_up_states(self):
next_lift_level = self.lift_level + 1
if next_lift_level == number_of_levels:
return []
return self.raise_single_chip_states(self._lift_level_chips) + self.raise_single_gen_states(
self._lift_level_gens) + self.raise_double_chip_states(
self._lift_level_chips) + self.raise_double_gen_states(
self._lift_level_gens) + self.raise_chip_and_gen_states(self._lift_level_chips, self._lift_level_gens)
def raise_single_chip_states(self, chips):
return [self.raise_chip(chip) for chip in chips]
def raise_double_chip_states(self, chips):
return [self.raise_two_chips(chip1, chip2) for chip1, chip2 in combinations(chips, 2)]
def raise_double_gen_states(self, gens):
return [self.raise_two_gens(gen1, gen2) for gen1, gen2 in combinations(gens, 2)]
def raise_two_gens(self, gen1, gen2):
new_generator_levels = list(self.generator_levels)
new_generator_levels[gen1] += 1
new_generator_levels[gen2] += 1
return State(self.lift_level + 1,
self.chip_levels,
tuple(new_generator_levels))
def raise_two_chips(self, chip1, chip2):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip1] += 1
new_chip_levels[chip2] += 1
return State(self.lift_level + 1,
tuple(new_chip_levels),
self.generator_levels)
def raise_single_gen_states(self, gens):
return [self.raise_generator(generator) for generator in gens]
def raise_chip_and_gen_states(self, chips, gens):
return [self.raise_chip_and_gen(chip, gen) for gen in gens for chip in chips]
def raise_chip(self, chip):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip] += 1
return State(self.lift_level + 1,
tuple(new_chip_levels),
self.generator_levels)
def raise_generator(self, generator):
new_generator_levels = list(self.generator_levels)
new_generator_levels[generator] += 1
return State(self.lift_level + 1,
self.chip_levels,
tuple(new_generator_levels))
def raise_chip_and_gen(self, chip, gen):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip] += 1
new_generator_levels = list(self.generator_levels)
new_generator_levels[gen] += 1
return State(self.lift_level + 1,
tuple(new_chip_levels),
tuple(new_generator_levels))
def __repr__(self):
res = ''
for level in range(number_of_levels):
res += str(level + 1) + ' '
lift_char = '.'
if self.lift_level == number_of_levels - level - 1:
lift_char = 'E'
res += lift_char + ' '
for value in roundrobin(self.generator_levels, self.chip_levels):
char = '.'
if value == number_of_levels - level - 1:
char = '*'
res += char + ' '
res += '\n'
return res
def adjacent_down_states(self):
next_lift_level = self.lift_level - 1
if next_lift_level == -1:
return []
return self.lower_single_chip_states(self._lift_level_chips) + self.lower_single_gen_states(
self._lift_level_gens) + self.lower_double_chip_states(
self._lift_level_chips) + self.lower_double_gen_states(
self._lift_level_gens) + self.lower_chip_and_gen_states(self._lift_level_chips, self._lift_level_gens)
def lower_single_chip_states(self, chips):
return [self.lower_chip(chip) for chip in chips]
def lower_double_chip_states(self, chips):
return [self.lower_two_chips(chip1, chip2) for chip1, chip2 in combinations(chips, 2)]
def lower_double_gen_states(self, gens):
return [self.lower_two_gens(gen1, gen2) for gen1, gen2 in combinations(gens, 2)]
def lower_two_gens(self, gen1, gen2):
new_generator_levels = list(self.generator_levels)
new_generator_levels[gen1] -= 1
new_generator_levels[gen2] -= 1
return State(self.lift_level - 1,
self.chip_levels,
tuple(new_generator_levels))
def lower_two_chips(self, chip1, chip2):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip1] -= 1
new_chip_levels[chip2] -= 1
return State(self.lift_level - 1,
tuple(new_chip_levels),
self.generator_levels)
def lower_single_gen_states(self, gens):
return [self.lower_generator(generator) for generator in gens]
def lower_chip_and_gen_states(self, chips, gens):
return [self.lower_chip_and_gen(chip, gen) for gen in gens for chip in chips]
def lower_chip(self, chip):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip] -= 1
return State(self.lift_level - 1,
tuple(new_chip_levels),
self.generator_levels)
def lower_generator(self, generator):
new_generator_levels = list(self.generator_levels)
new_generator_levels[generator] -= 1
return State(self.lift_level - 1,
self.chip_levels,
tuple(new_generator_levels))
def lower_chip_and_gen(self, chip, gen):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip] -= 1
new_generator_levels = list(self.generator_levels)
new_generator_levels[gen] -= 1
return State(self.lift_level - 1,
tuple(new_chip_levels),
tuple(new_generator_levels))
def __key(self):
return self.lift_level, tuple(sorted(zip(self.chip_levels, self.generator_levels)))
def __eq__(self, other):
return self.__key() == other.__key()
def __hash__(self):
return hash(self.__key())
|
mit
| 1,454,731,120,773,989,400
| 39.074766
| 114
| 0.596082
| false
| 3.59129
| false
| false
| false
|
mfxox/ILCC
|
ILCC/utility.py
|
1
|
31390
|
# coding=utf-8
'''
Created on 3/20/2017 8:58 57PM Wang Weimin
@author: wangwm
'''
import os
from pcd_corners_est import exact_full_marker_data
import numpy as np
from pcd_corners_est import generate_grid_coords
import matplotlib.pyplot as plt
import matplotlib
import vtk
import config
from ast import literal_eval as make_tuple
import cPickle
import cv2
from LM_opt import xyz2angle, voxel2pixel
import transforms3d
from matplotlib.pyplot import cm
import ast
from sklearn.decomposition import PCA
import matplotlib.path as mplPath
import warnings
params = config.default_params()
marker_size = make_tuple(params["pattern_size"])
(H, W) = make_tuple(params['image_res'])
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
plt.style.use("ggplot")
axis_font = {'fontname': 'Arial', 'size': '35'}
def draw_one_grd_vtk(ls): # arr:[a,b,c,d],a:orig, b, point1, c,point 2, d,color
source = vtk.vtkPlaneSource()
source.SetOrigin(ls[0])
source.SetPoint1(ls[1])
source.SetPoint2(ls[2])
source.Update()
# source.SetPoint1(0, 0, 0)
# source.SetPoint2(4, 3, 0)
# mapper
mapper = vtk.vtkPolyDataMapper()
color = vtk.vtkUnsignedCharArray()
color.SetName("colors")
color.SetNumberOfComponents(3)
# color_tup = np.random.randint(1, 255, 3)
color.SetNumberOfTuples(source.GetOutput().GetNumberOfCells())
for i in xrange(source.GetOutput().GetNumberOfCells()):
color_tup = np.array([255, 255, 255]) * ls[3]
color.InsertTuple(i, color_tup)
source.GetOutput().GetCellData().SetScalars(color)
mapper.SetInputConnection(source.GetOutputPort())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign actor to the renderer
# ren.AddActor(actor)
return actor
# generate the color list of the point cloud for different color styles. intens_rg: color by reflectance intensity (red:high green:low),
# intens: color by reflectance intensity (white:high back:low), autumn: matplotlib autumn color map, cool: matplotlib cool color map
def gen_color_tup_for_vis(color_style="intens_rg", xyzi_arr=None):
assert xyzi_arr is not None, "The array of the point cloud must be not None"
a = xyzi_arr[:, params['intensity_col_ind']].min()
b = xyzi_arr[:, params['intensity_col_ind']].max()
color_ls = []
if color_style == "intens_rg":
tmp = (xyzi_arr[:, params['intensity_col_ind']] - a) / (b - a) * 255
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = np.array([tmp[k], 0, 255 - xyzi_arr[k, params['intensity_col_ind']]]).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
elif color_style == "intens":
tmp = (xyzi_arr[:, params['intensity_col_ind']] - a) / (b - a) * 255
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = np.repeat(tmp[k], 3).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
elif color_style == "autumn":
tmp = (xyzi_arr[:, params['intensity_col_ind']] - a).astype(np.float32) / (b - a)
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = (np.array(plt.cm.autumn(1 - tmp[k]))[:3] * 255).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
elif color_style == "cool":
tmp = (xyzi_arr[:, params['intensity_col_ind']] - a).astype(np.float32) / (b - a)
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = (np.array(plt.cm.cool(tmp[k]))[:3] * 255).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
elif color_style == "monochrome":
# color = (np.random.randint(0, 255, 3)).tolist()
color = [46, 204, 113]
for k in xrange(xyzi_arr.shape[0]):
color_ls.append(color)
return color_ls
elif color_style == "by_height":
low_height = xyzi_arr[:, 2].min()
high_height = xyzi_arr[:, 2].max()
tmp = 0.0 + 0.7 * (xyzi_arr[:, 2] - low_height) / (high_height - low_height)
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = (np.array(plt.cm.hsv(tmp[k]))[:3] * 255).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
else:
raise ValueError('Input color type is not correct!')
# visualize 3D points with specified color style
def vis_3D_points(full_lidar_arr, color_style="intens_rg"):
all_rows = full_lidar_arr.shape[0]
Colors = vtk.vtkUnsignedCharArray()
Colors.SetNumberOfComponents(3)
Colors.SetName("Colors")
Points = vtk.vtkPoints()
Vertices = vtk.vtkCellArray()
tuple_ls = gen_color_tup_for_vis(color_style, xyzi_arr=full_lidar_arr)
for k in xrange(all_rows):
point = full_lidar_arr[k, :3]
id = Points.InsertNextPoint(point[0], point[1], point[2])
Vertices.InsertNextCell(1)
Vertices.InsertCellPoint(id)
rgb_tuple = tuple_ls[k]
if vtk.VTK_MAJOR_VERSION >= 7:
Colors.InsertNextTuple(rgb_tuple)
else:
Colors.InsertNextTupleValue(rgb_tuple)
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
polydata.SetVerts(Vertices)
polydata.GetPointData().SetScalars(Colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION < 6:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
mapper.SetColorModeToDefault()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetPointSize(8)
return actor
# visualize 3D points with specified color array
def vis_pcd_color_arr(array_data, color_arr=[46, 204, 113]):
all_rows = array_data.shape[0]
Colors = vtk.vtkUnsignedCharArray()
Colors.SetNumberOfComponents(3)
Colors.SetName("Colors")
Points = vtk.vtkPoints()
Vertices = vtk.vtkCellArray()
for k in xrange(all_rows):
point = array_data[k, :]
id = Points.InsertNextPoint(point[0], point[1], point[2])
Vertices.InsertNextCell(1)
Vertices.InsertCellPoint(id)
if vtk.VTK_MAJOR_VERSION >= 7:
Colors.InsertNextTuple(color_arr)
else:
Colors.InsertNextTupleValue(color_arr)
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
polydata.SetVerts(Vertices)
polydata.GetPointData().SetScalars(Colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
mapper.SetColorModeToDefault()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetPointSize(10)
return actor
# visualize with actor:
def vis_with_renderer(renderer):
# Renderer
# renderer.SetBackground(.2, .3, .4)
renderer.SetBackground(1, 1, 1)
renderer.ResetCamera()
transform = vtk.vtkTransform()
transform.Translate(1.0, 0.0, 0.0)
axes = vtk.vtkAxesActor()
renderer.AddActor(axes)
# Render Window
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
# Interactor
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
def get_camera_info(obj, ev):
if renderWindowInteractor.GetKeyCode() == "s":
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renderWindow)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName("screenshot.png")
if vtk.VTK_MAJOR_VERSION == 5:
writer.SetInput(w2if.GetOutput())
else:
writer.SetInputData(w2if.GetOutput())
writer.Write()
print "screenshot saved"
style = vtk.vtkInteractorStyleSwitch()
renderWindowInteractor.SetInteractorStyle(style)
# style.SetCurrentStyleToTrackballActor()
style.SetCurrentStyleToTrackballCamera()
# Begin Interaction
renderWindowInteractor.AddObserver(vtk.vtkCommand.KeyPressEvent, get_camera_info, 1)
renderWindow.Render()
renderWindowInteractor.Start()
def proj_pcd_2_pix(pcd_arr):
if params["camera_type"] == "panoramic":
angs_ls = map(xyz2angle, pcd_arr.tolist())
pix_ls = (np.array(map(voxel2pixel, angs_ls))).tolist()
elif params['camera_type'] == "perspective":
intrinsic_paras_tuple = make_tuple(params['instrinsic_para'])
intrinsic_paras = np.array(intrinsic_paras_tuple).reshape(3, 3)
cam_coord_pcd = pcd_arr.copy()
pcd_to_pix = (np.dot(intrinsic_paras, cam_coord_pcd.T)).T
proj_pts = (pcd_to_pix / pcd_to_pix[:, 2].reshape(-1, 1))[:, :2].astype(np.int16)
pix_ls = proj_pts.tolist()
else:
raise Exception("Camera type not correctly defined!")
return pix_ls
def remove_occlusion_of_chessboard(pcd_arr, corners_in_pcd_arr):
occlu_thres = 0.1
pcd_ls = pcd_arr.tolist()
pix_ls = proj_pcd_2_pix(pcd_arr)
ind_ls = []
pca = PCA(n_components=3)
pca.fit(corners_in_pcd_arr)
transed_corners_in_pcd_arr = np.dot(pca.components_, corners_in_pcd_arr.T).T
center = transed_corners_in_pcd_arr.mean(axis=0)
bound = np.dot(pca.components_.T,
(np.array(
[[-0.3, -0.225, 0], [-0.3, 0.225, 0], [0.3, 0.225, 0], [0.3, -0.225, 0]]) * 1.05 + center).T).T
if params["camera_type"] == "panoramic":
bound_on_image = np.fliplr(np.array(map(voxel2pixel, map(xyz2angle, bound.tolist()))))
elif params['camera_type'] == "perspective":
intrinsic_paras_tuple = make_tuple(params['instrinsic_para'])
intrinsic_paras = np.array(intrinsic_paras_tuple).reshape(3, 3)
pcd_to_pix = (np.dot(intrinsic_paras, bound.T)).T
inds = np.where(pcd_arr[:, 2] > 0)
pcd_ls = pcd_arr[inds].tolist()
pix_ls = np.array(pix_ls)[inds].tolist()
print "before removal: ", len(pcd_ls)
proj_pts = (pcd_to_pix / pcd_to_pix[:, 2].reshape(-1, 1))[:, :2].astype(np.int16)
bound_on_image = np.fliplr(proj_pts)
# bound_on_image = proj_pts
# print bound_on_image
else:
raise Exception("Camera type not correctly defined!")
polygon_path = mplPath.Path(bound_on_image.tolist())
for i in xrange(len(pcd_ls)):
pix = list(reversed(pix_ls[i]))
# print pix
if polygon_path.contains_point(pix):
point_2_board_dis = abs(np.dot(pca.components_[2], pcd_ls[i] - corners_in_pcd_arr.mean(axis=0)))
# print point_2_board_dis
# print pix_ls[i]
if point_2_board_dis <= occlu_thres:
if params["camera_type"] == "panoramic":
ind_ls.append(i)
elif params['camera_type'] == "perspective":
ind_ls.append(inds[0][i])
else:
raise Exception("Camera type not correctly defined!")
else:
if params["camera_type"] == "panoramic":
ind_ls.append(i)
elif params['camera_type'] == "perspective":
ind_ls.append(inds[0][i])
else:
raise Exception("Camera type not correctly defined!")
return np.array(ind_ls)
# visualize csv file of i-th point cloud
def vis_csv_pcd(ind=1, color_style="monochrome"):
pcd_arr = np.genfromtxt(
os.path.join(params['base_dir'], "pcd/" + str(ind).zfill(params["file_name_digits"])) + ".csv", delimiter=",",
skip_header=1)
# actor = vis_3D_points(pcd_arr, color_style="intens")
actor = vis_3D_points(pcd_arr, color_style=color_style)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
vis_with_renderer(renderer)
def vis_segments(ind=1):
renderer = vtk.vtkRenderer()
seg_folder = os.path.join(params['base_dir'], "output/pcd_seg/" + str(ind).zfill(params["file_name_digits"])) + "/"
seg_list = os.listdir(seg_folder)
for seg in seg_list:
if seg.split(".")[-1] == "txt":
color_tup = (np.random.randint(1, 255, 3)).tolist()
points_ls = list()
jdcs_collection = cPickle.load(open(os.path.abspath(seg_folder + seg), 'rb'))
if len(jdcs_collection) > 0: # filter
for jdc in jdcs_collection:
points_ls.extend(jdc)
# print points_ls
actor = vis_pcd_color_arr(np.array(points_ls), color_tup)
renderer.AddActor(actor)
vis_with_renderer(renderer)
def vis_segments_only_chessboard_color(ind=1):
renderer = vtk.vtkRenderer()
seg_folder = os.path.join(params['base_dir'], "output/pcd_seg/" + str(ind).zfill(params["file_name_digits"])) + "/"
seg_list = os.listdir(seg_folder)
chessboard_file_name = \
cPickle.load(open(os.path.join(params['base_dir'], "output/pcd_seg/") + str(ind).zfill(
params["file_name_digits"]) + "_pcd_result.pkl", "r"))[
-1].split("/")[-1]
for seg in seg_list:
if seg.split(".")[-1] == "txt":
if seg == chessboard_file_name:
color_tup = np.array([0, 255, 0])
else:
color_tup = np.array([0, 0, 0])
points_ls = list()
jdcs_collection = cPickle.load(open(os.path.abspath(seg_folder + seg), 'rb'))
if len(jdcs_collection) > 0: # filter
for jdc in jdcs_collection:
points_ls.extend(jdc)
# print points_ls
actor = vis_pcd_color_arr(np.array(points_ls), color_tup)
renderer.AddActor(actor)
vis_with_renderer(renderer)
def cal_theorical_number_points(dis):
h_res = np.deg2rad(0.16) # rad
v_res = np.deg2rad(1.33) # rad
h_len = dis * h_res
v_len = 2 * dis * np.sin(v_res / 2)
w = 0.45
l = 0.6
return (l // v_len) * (w // h_len)
def vis_all_markers(ls=[1]):
import vtk
ren = vtk.vtkRenderer()
# ren.SetBackground(.2, .3, .4)
ren.SetBackground(.5, .6, .7)
for i in ls:
try:
pcd_result_file = os.path.join(params['base_dir'],
"output/pcd_seg/" + str(i).zfill(
params["file_name_digits"]) + "_pcd_result.pkl")
csv_path = os.path.join(params['base_dir'], "pcd/" + str(i).zfill(params["file_name_digits"]) + ".csv")
with open(os.path.abspath(pcd_result_file), "r") as f:
pcd_result_ls = cPickle.load(f)
assert pcd_result_ls is not None
marker_full_data_arr = exact_full_marker_data(csv_path, [pcd_result_ls[-1]])
marker_arr = marker_full_data_arr[:, :3]
# transformed_pcd = roate_with_rt(np.array(r_t), marker_arr)
if i % 4 == 0:
actor2 = vis_3D_points(
np.hstack([marker_arr + np.array([0, 0, 0]), marker_full_data_arr[:, 3:]]), color_style="intens")
elif i % 4 == 1:
actor2 = vis_3D_points(
np.hstack([marker_arr + np.array([0, 0, 0]), marker_full_data_arr[:, 3:]]), color_style="autumn")
elif i % 4 == 2:
actor2 = vis_3D_points(
np.hstack([marker_arr + np.array([0, 0, 0]), marker_full_data_arr[:, 3:]]), color_style="cool")
else:
actor2 = vis_3D_points(
np.hstack([marker_arr + np.array([0, 0, 0]), marker_full_data_arr[:, 3:]]),
color_style="intens_rg")
ren.AddActor(actor2)
except:
print i, "-th pcd corners are not found!"
continue
transform2 = vtk.vtkTransform()
transform2.Translate(0.0, 0.0, 0.0)
axes2 = vtk.vtkAxesActor()
axes2.SetUserTransform(transform2)
ren.AddActor(axes2)
cubeAxesActor = vtk.vtkCubeAxesActor()
cubeAxesActor.SetBounds((-3, 3, -3, 3, -2, 2))
cubeAxesActor.SetCamera(ren.GetActiveCamera())
cubeAxesActor.GetTitleTextProperty(0).SetColor(1.0, 0.0, 0.0)
cubeAxesActor.GetLabelTextProperty(0).SetColor(1.0, 0.0, 0.0)
cubeAxesActor.GetTitleTextProperty(1).SetColor(0.0, 1.0, 0.0)
cubeAxesActor.GetLabelTextProperty(1).SetColor(0.0, 1.0, 0.0)
cubeAxesActor.GetTitleTextProperty(2).SetColor(0.0, 0.0, 1.0)
cubeAxesActor.GetLabelTextProperty(2).SetColor(0.0, 0.0, 1.0)
cubeAxesActor.DrawXGridlinesOn()
cubeAxesActor.DrawYGridlinesOn()
cubeAxesActor.DrawZGridlinesOn()
# if vtk.VTK_MAJOR_VERSION > 5:
# cubeAxesActor.SetGridLineLocation(vtk.VTK_GRID_LINES_FURTHEST)
cubeAxesActor.XAxisMinorTickVisibilityOff()
cubeAxesActor.YAxisMinorTickVisibilityOff()
cubeAxesActor.ZAxisMinorTickVisibilityOff()
# cubeAxesActor.GetProperty().SetColor(0, 255, 0)
cubeAxesActor.GetXAxesLinesProperty().SetColor(0, 255, 0)
cubeAxesActor.GetYAxesLinesProperty().SetColor(0, 255, 0)
cubeAxesActor.GetZAxesLinesProperty().SetColor(0, 255, 0)
ren.AddActor(cubeAxesActor)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
style = vtk.vtkInteractorStyleSwitch()
iren.SetInteractorStyle(style)
style.SetCurrentStyleToTrackballCamera()
def get_camera_info(obj, ev):
if iren.GetKeyCode() == "s":
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName("screenshot.png")
writer.SetInputData(w2if.GetOutput())
writer.Write()
print "screenshot saved"
# save to pdf
if iren.GetKeyCode() == "s":
exp = vtk.vtkGL2PSExporter()
exp.SetRenderWindow(renWin)
exp.SetFilePrefix("screenpdf")
exp.SetFileFormat(2)
exp.SetCompress(False)
exp.SetLandscape(False)
exp.SetSortToBSP()
# exp.SetSortToSimple() # less expensive sort algorithm
exp.DrawBackgroundOn()
exp.SetWrite3DPropsAsRasterImage(False)
iren.AddObserver(vtk.vtkCommand.KeyPressEvent, get_camera_info, 1)
iren.SetRenderWindow(renWin)
renWin.Render()
# ren.SetActiveCamera(camera)
iren.Initialize()
iren.Start()
def transform_grid(args):
corner_arr = args[0]
rot1 = args[1]
rot2 = args[2]
t1 = args[3]
t2 = args[4]
corners_in_pcd_arr = np.dot(np.dot(rot2.T, corner_arr.T).T - t2 + t1, rot1)
return corners_in_pcd_arr[0]
def vis_ested_pcd_corners(ind=1):
# pair_ind = 9
pcd_result_file = os.path.join(params['base_dir'],
"output/pcd_seg/" + str(ind).zfill(params["file_name_digits"]) + "_pcd_result.pkl")
csv_file = os.path.join(params['base_dir'], "pcd/" + str(ind).zfill(params["file_name_digits"]) + ".csv")
full_arr = np.genfromtxt(csv_file, delimiter=",", skip_header=1)
grid_coords = generate_grid_coords()
with open(os.path.abspath(pcd_result_file), "r") as f:
pcd_result_ls = cPickle.load(f)
assert pcd_result_ls is not None
rot1 = pcd_result_ls[0]
t1 = pcd_result_ls[1].reshape(1, 3)
rot2 = pcd_result_ls[2]
t2 = pcd_result_ls[3].reshape(1, 3)
trans_grid_ls = []
for coords in grid_coords:
args = [[coord, rot1, rot2, t1, t2] for coord in coords[:3]]
trans_coords = map(transform_grid, args)
trans_coords.append(coords[3])
trans_grid_ls.append(trans_coords)
ren = vtk.vtkRenderer()
ren.SetBackground(.2, .3, .4)
ren.SetBackground(0.90196079, 0.96078432, 0.59607846)
# ren.SetBackground(1., 1., 1.)
for i in xrange(len(trans_grid_ls)):
tmp_actor = draw_one_grd_vtk(trans_grid_ls[i])
tmp_actor.GetProperty().SetOpacity(0.5)
ren.AddActor(tmp_actor)
show_only_marker = True
if show_only_marker:
marker_full_data_arr = exact_full_marker_data(csv_file, [pcd_result_ls[-1]])
actor2 = vis_3D_points(marker_full_data_arr, color_style="intens_rg")
else:
actor2 = vis_3D_points(full_arr, color_style="intens_rg")
ren.AddActor(actor2)
transform2 = vtk.vtkTransform()
transform2.Translate(0.0, 0.0, 0.0)
axes2 = vtk.vtkAxesActor()
axes2.SetUserTransform(transform2)
ren.AddActor(axes2)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetWindowName(str(i).zfill(params["file_name_digits"]))
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
def get_camera_info(obj, ev):
if iren.GetKeyCode() == "s":
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName("screenshot.png")
writer.SetInputData(w2if.GetOutput())
writer.Write()
print "screenshot saved"
style = vtk.vtkInteractorStyleSwitch()
iren.SetRenderWindow(renWin)
iren.SetInteractorStyle(style)
# style.SetCurrentStyleToTrackballActor()
style.SetCurrentStyleToTrackballCamera()
iren.AddObserver(vtk.vtkCommand.KeyPressEvent, get_camera_info, 1)
iren.Initialize()
renWin.Render()
renWin.SetWindowName(str(ind).zfill(params["file_name_digits"]))
iren.Start()
def draw_chessboard_model(marker_size=marker_size):
gird_coords = generate_grid_coords(x_res=marker_size[0], y_res=marker_size[1])
grid_ls = [(p[0]).flatten()[:2] for p in gird_coords]
corner_arr = np.transpose(np.array(grid_ls).reshape(marker_size[0], marker_size[1], 2)[1:, 1:], (1, 0, 2))
c = np.zeros([corner_arr.shape[0], corner_arr.shape[1], 3]).reshape(
corner_arr.shape[0] * corner_arr.shape[1], 3).astype(np.float32)
c[0] = np.array([0, 0, 1])
c[-1] = np.array([1, 0, 0])
s = np.zeros(corner_arr[:, :, 0].flatten().shape[0]) + 20
s[0] = 60
s[-1] = 60
plt.scatter(corner_arr[:, :, 0].flatten(), corner_arr[:, :, 1].flatten(), c=c, s=s)
plt.plot(corner_arr[:, :, 0].flatten(), corner_arr[:, :, 1].flatten())
plt.xlim(corner_arr[:, :, 0].min(), corner_arr[:, :, 0].max())
plt.ylim(corner_arr[:, :, 1].min(), corner_arr[:, :, 1].max())
plt.xlabel("x coordinates [cm]")
plt.ylabel("y coordinates [cm]")
# plt.axes().set_aspect('equal', 'datalim')
plt.axis('equal')
plt.show()
def convert_to_edge(file_name):
# gray = cv2.imread('lines.jpg')
gray = cv2.imread(file_name)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
img = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
return img
def find_latest(cali_file_ls):
number_ls = []
for file in cali_file_ls:
tmp_ls = file.split("_")
number_ls.append(ast.literal_eval(tmp_ls[0] + "." + tmp_ls[1]))
return cali_file_ls[np.array(number_ls).argmax()]
def back_project_pcd(img, pcd_arr, color_arr, r_t, i, hide_occlussion_by_marker):
# print pcd_arr
rot_mat = np.dot(transforms3d.axangles.axangle2mat([0, 0, 1], r_t[2]),
np.dot(transforms3d.axangles.axangle2mat([0, 1, 0], r_t[1]),
transforms3d.axangles.axangle2mat([1, 0, 0], r_t[0])))
transformed_pcd = np.dot(rot_mat, pcd_arr.T).T + r_t[3:]
transformed_pcd_ls = pcd_arr.tolist()
if not hide_occlussion_by_marker: # whether remove occlussions by the chessboard
if params["camera_type"] == "panoramic":
pcd2angle_s = map(xyz2angle, transformed_pcd_ls)
proj_pts = np.array(map(voxel2pixel, pcd2angle_s))
point_s = 5
elif params['camera_type'] == "perspective":
intrinsic_paras_tuple = make_tuple(params['instrinsic_para'])
intrinsic_paras = np.array(intrinsic_paras_tuple).reshape(3, 3)
cam_coord_pcd = transformed_pcd.copy()
# print cam_coord_pcd
print "before filtering z: ", cam_coord_pcd.shape
# cam_coord_pcd = cam_coord_pcd[np.where(cam_coord_pcd[:, 2] < 0)]
# cam_coord_pcd = cam_coord_pcd[:20000, :]
# print cam_coord_pcd
inds = np.where(cam_coord_pcd[:, 2] > 0.2)
cam_coord_pcd = cam_coord_pcd[inds]
color_arr = color_arr[inds]
# print cam_coord_pcd
print "after filtering z: ", cam_coord_pcd.shape
pcd_to_pix = (np.dot(intrinsic_paras, cam_coord_pcd.T)).T
# pcd_to_pix = pcd_to_pix[np.where(pcd_to_pix[:, 2] > 0)]
inds = np.where(pcd_to_pix[:, 2] > 0)
pcd_to_pix = pcd_to_pix[inds]
color_arr = color_arr[inds]
proj_pts = (pcd_to_pix / pcd_to_pix[:, 2].reshape(-1, 1))[:, :2].astype(np.int16)
point_s = 3
# print proj_pts
#
# print proj_pts.shape
else:
raise Exception("Camera type not correctly defined!")
else:
if params["camera_type"] == "panoramic":
point_s = 5
elif params['camera_type'] == "perspective":
point_s = 3
else:
raise Exception("Camera type not correctly defined!")
chessboard_result_file_path = os.path.join(params['base_dir'], "output/pcd_seg/" + str(i).zfill(
params["file_name_digits"]) + "_pcd_result.pkl")
chessboard_result_file = cPickle.load(open(chessboard_result_file_path, "r"))
rot1 = chessboard_result_file[0]
t1 = chessboard_result_file[1].reshape(1, 3)
# print "rot1*rot1.T: ", np.dot(rot1, rot1.T)
rot2 = chessboard_result_file[2]
t2 = chessboard_result_file[3].reshape(1, 3)
corner_arr = chessboard_result_file[4].reshape(-1, 2)
num = corner_arr.shape[0]
corner_arr = np.hstack([corner_arr, np.zeros(num).reshape(num, 1)])
rot_mat = np.dot(transforms3d.axangles.axangle2mat([0, 0, 1], r_t[2]),
np.dot(transforms3d.axangles.axangle2mat([0, 1, 0], r_t[1]),
transforms3d.axangles.axangle2mat([1, 0, 0], r_t[0])))
trans_arr = np.zeros([4, 4])
trans_arr[:3, :3] = rot_mat
trans_arr[:3, 3] = np.array(r_t[3:])
trans_arr[3, 3] = 1
trans_matrix = np.asmatrix(trans_arr)
corners_in_pcd_arr = np.dot(np.dot(rot2.T, corner_arr.T).T - t2 + t1, rot1)
corners_in_pcd_arr = (trans_matrix[:3, :3] * np.asmatrix(corners_in_pcd_arr).T).T + trans_matrix[:3, 3].T
corners_in_pcd_arr = np.array(corners_in_pcd_arr)
# print "before removal: ", transformed_pcd.shape
inds = remove_occlusion_of_chessboard(transformed_pcd, corners_in_pcd_arr)
print "inds:", inds
proj_pts = np.array(proj_pcd_2_pix(transformed_pcd))[inds].astype(np.int32)
print "after removal: ", proj_pts.shape
color_arr = color_arr[inds]
print
print proj_pts.shape[0], proj_pts.min(axis=0), proj_pts.max(axis=0)
print
for i in xrange(proj_pts.shape[0]):
cv2.circle(img, (proj_pts[i][0], proj_pts[i][1]), point_s, tuple(color_arr[i].tolist()), -1)
return img
def vis_back_proj(ind=1, img_style="edge", pcd_style="intens", hide_occlussion_by_marker=False,
save_without_show=False):
imgfile = os.path.join(params['base_dir'],
"img/" + str(ind).zfill(params["file_name_digits"]) + "." + params['image_format'])
if img_style == "edge":
gray = cv2.imread(imgfile)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
img = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
elif img_style == "orig":
img = cv2.imread(imgfile)
else:
raise Exception("Please input the right image style")
csvfile = os.path.join(params['base_dir'], "pcd/" + str(ind).zfill(params["file_name_digits"]) + ".csv")
csv = np.genfromtxt(csvfile, delimiter=",", skip_header=1)
pcd = csv[:, :3]
dis_arr = np.linalg.norm(pcd, axis=1)
intens = csv[:, params['intensity_col_ind']]
filels = os.listdir(params['base_dir'])
cali_file_ls = []
for file in filels:
if file.find("cali_result.txt") > -1:
cali_file_ls.append(file)
if len(cali_file_ls) > 1:
warnings.warn("More than one calibration file exit! Load the latest file.", UserWarning)
latest_cali = find_latest(cali_file_ls)
r_t = np.genfromtxt(os.path.join(params['base_dir'], latest_cali), delimiter=',')
print "Load ", latest_cali, " as the extrinsic calibration parameters!"
elif len(cali_file_ls) == 1:
r_t = np.genfromtxt(os.path.join(params['base_dir'], cali_file_ls[0]), delimiter=',')
print "Load ", cali_file_ls[0], " as the extrinsic calibration parameters!"
else:
raise Exception("No calibration file is found!")
if pcd_style == "intens":
pcd_color = np.fliplr((cm.jet(intens.astype(np.float32) / intens.max()) * 255).astype(np.int32)[:, :3])
elif pcd_style == "dis":
pcd_color = np.fliplr((cm.jet(dis_arr / 10) * 255).astype(np.int32)[:, :3])
else:
print "Please input the right pcd color style"
backproj_img = back_project_pcd(img, pcd, pcd_color, r_t, ind, hide_occlussion_by_marker)
if max(backproj_img.shape[0], backproj_img.shape[1]) > 1000:
resize_factor = 1000. / max(backproj_img.shape[0], backproj_img.shape[1])
resized_img_for_view = cv2.resize(backproj_img, (0, 0), fx=resize_factor, fy=resize_factor)
else:
resized_img_for_view = backproj_img
if save_without_show:
window_name = "ind: " + str(ind) + " img_style: " + img_style + " pcd_style: " + pcd_style + (
" hide_occlusion " if hide_occlussion_by_marker else "")
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.imshow(window_name, resized_img_for_view)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
save_file_name = os.path.join(params['base_dir'],
str(ind).zfill(
params["file_name_digits"])) + "_" + img_style + "_" + pcd_style + (
"_hide_occlusion" if hide_occlussion_by_marker else "") + "." + params['image_format']
cv2.imwrite(save_file_name, img, [cv2.IMWRITE_JPEG_QUALITY, 70])
print "The image is saved to ", save_file_name
cv2.destroyAllWindows()
else:
save_file_name = os.path.join(params['base_dir'], str(ind).zfill(
params["file_name_digits"])) + "_" + img_style + "_" + pcd_style + (
"_hide_occlusion" if hide_occlussion_by_marker else "") + "." + params['image_format']
cv2.imwrite(save_file_name, img, [cv2.IMWRITE_JPEG_QUALITY, 70])
print "The image is saved to ", save_file_name
cv2.destroyAllWindows()
if __name__ == "__main__":
# vis_back_proj(ind=1, img_style="orig", pcd_style="dis", hide_occlussion_by_marker=True)
vis_back_proj(ind=1, img_style="edge", pcd_style="intens", hide_occlussion_by_marker=False)
# vis_all_markers(np.arange(1, 5).tolist())
# vis_all_markers([1])
# vis_segments_only_chessboard_color(1)
# vis_csv_pcd(ind=1)
# vis_segments(ind=1)
# vis_ested_pcd_corners(ind=1)
|
bsd-2-clause
| -3,670,802,329,565,017,000
| 37.468137
| 136
| 0.599618
| false
| 3.110076
| false
| false
| false
|
melipelo/ejemplo
|
app.py
|
1
|
3030
|
#!/usr/bin/env python
# coding=utf-8
import requests
import urllib2
import json
import os
from flask import Flask
from flask import request
from flask import make_response
from bs4 import BeautifulSoup
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
result = req.get("result")
parameters = result.get("parameters")
if req.get("result").get("action") == "productos.sura":
cliente = parameters.get("tipo_cliente")
speech = "Buscando productos para " + cliente
elif req.get("result").get("action") == "producto.info":
producto = parameters.get("producto")
if (producto=="hogar"):
url = "https://www.sura.com/soluciones-personas/seguro-hogar.aspx"
r = urllib2.urlopen(url).read()
soup = BeautifulSoup(r, 'html.parser')
print soup
contenido = soup.find_all("div",class_="textRightColumn")
if (len(contenido)==0):
speech = "No encontrado"
else:
speech = contenido[0]
else:
speech = "Buscando informacion del producto " + producto
elif req.get("result").get("action") == "planes.salud":
url = "https://api.segurossura.com.co/public/v1/directory/products"
myResponse = requests.get(url)
if(myResponse.ok):
jData = json.loads(myResponse.text)
speech = "Seguros Sura Colombia ofrece los siguientes planes de salud: \n"
for plan in jData:
speech = speech + "\n" + plan["nombreField"].title()
elif req.get("result").get("action") == "info.especialistas":
producto = parameters.get("plan-salud")
ciudad = parameters.get("ciudad")
especialidad = parameters.get("especialidad")
url = "https://api.segurossura.com.co/public/v1/directory/search/" + producto + "/" + ciudad + "?speciality=" + especialidad + "&firstname=&secondname=&firstlastname=&secondlastname="
myResponse = requests.get(url)
if(myResponse.ok):
jData = json.loads(myResponse.text)
speech = "Los profesionales que coinciden con tu busqueda son: \n"
for medico in jData:
speech = speech + "\n" + medico["nombreField"] + "\n Direccion: " + medico["direccionField"].title() + "\n Telefono: " + medico["telefonoField"] + "\n"
elif req.get("result").get("action") == "coberturas.producto":
producto = parameters.get("productos")
speech = "Buscando coberturas del producto: " + producto
else:
speech =" "
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=True, port=port, host='0.0.0.0')
|
apache-2.0
| 5,939,019,461,058,861,000
| 30.237113
| 185
| 0.642244
| false
| 3.026973
| false
| false
| false
|
petewarden/tensorflow
|
tensorflow/python/keras/saving/saved_model/save_impl.py
|
1
|
28457
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras SavedModel serialization.
TODO (kathywu): Move to layer_serialization.py. Some model-specific logic should
go to model_serialization.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import threading
import weakref
from tensorflow.python.eager import def_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.mixed_precision import autocast_variable
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.saving.saved_model import constants
from tensorflow.python.keras.saving.saved_model import load as keras_load
from tensorflow.python.keras.saving.saved_model import serialized_attributes
from tensorflow.python.keras.saving.saved_model import utils
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils import version_utils
from tensorflow.python.keras.utils.generic_utils import LazyLoader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
# pylint:disable=g-inconsistent-quotes
base_layer = LazyLoader(
"base_layer", globals(),
"tensorflow.python.keras.engine.base_layer")
metrics = LazyLoader("metrics", globals(),
"tensorflow.python.keras.metrics")
input_layer = LazyLoader(
"input_layer", globals(),
"tensorflow.python.keras.engine.input_layer")
training_lib = LazyLoader(
"training_lib", globals(),
"tensorflow.python.keras.engine.training")
sequential_lib = LazyLoader(
"sequential_lib", globals(),
"tensorflow.python.keras.engine.sequential")
# pylint:enable=g-inconsistent-quotes
def should_skip_serialization(layer):
"""Skip serializing extra objects and functions if layer inputs aren't set."""
saved_model_input_spec_set = (isinstance(layer, training_lib.Model) and
layer._saved_model_inputs_spec is not None) # pylint: disable=protected-access
if not layer.built and not saved_model_input_spec_set:
logging.warning('Skipping full serialization of Keras layer {}, because '
'it is not built.'.format(layer))
return True
return False
def wrap_layer_objects(layer, serialization_cache):
"""Returns extra trackable objects to attach to the serialized layer.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all checkpointable objects from a
SerializedAttributes object. See LayerAttributes and ModelAttributes for
entire list of objects
"""
# Wrap all regularization losses as tf.functions.
# First, generate list of all regularization losses in this layer and
# sublayers.
all_losses = layer._callable_losses[:] # pylint: disable=protected-access
for child_layer in utils.list_all_layers(layer):
all_losses.extend(child_layer._callable_losses) # pylint: disable=protected-access
# Next, wrap all loss functions as tf.functions. Use the serialization cache
# to store already-wrapped functions.
keras_loss_cache = serialization_cache.setdefault('keras_losses', {})
wrapped_loss_functions = []
for loss_fn in all_losses:
if loss_fn in keras_loss_cache:
wrapped_loss_functions.append(keras_loss_cache[loss_fn])
else:
wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache))
keras_loss_cache[loss_fn] = wrapped_loss
wrapped_loss_functions.append(wrapped_loss)
wrapped_layer_losses = [keras_loss_cache[fn]
for fn in layer._callable_losses[:]] # pylint: disable=protected-access
layer_metrics = data_structures.wrap_or_unwrap(
{m.name: m for m in layer._metrics}) # pylint: disable=protected-access
return dict(
variables=data_structures.wrap_or_unwrap(layer.variables),
trainable_variables=data_structures.wrap_or_unwrap(
layer.trainable_variables),
non_trainable_variables=data_structures.wrap_or_unwrap(
layer.non_trainable_variables),
layers=data_structures.wrap_or_unwrap(utils.list_all_layers(layer)),
metrics=data_structures.wrap_or_unwrap(layer.metrics),
regularization_losses=data_structures.wrap_or_unwrap(
wrapped_loss_functions),
layer_regularization_losses=data_structures.wrap_or_unwrap(
wrapped_layer_losses),
layer_metrics=layer_metrics)
# pylint: disable=protected-access
def wrap_layer_functions(layer, serialization_cache):
"""Returns dict of wrapped layer call function and losses in tf.functions.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all keras tf.functions to serialize. See
LayerAttributes and ModelAttributes for the list of all attributes.
"""
# Since Sequential models may be modified in place using model.add() or
# model.pop(), don't use saved functions.
if (isinstance(layer, keras_load.RevivedLayer) and
not isinstance(layer, sequential_lib.Sequential)):
return {fn_name: getattr(layer.keras_api, fn_name, None)
for fn_name in serialized_attributes.LayerAttributes.all_functions}
# Reset the losses of the layer and its children. The call function in each
# child layer is replaced with tf.functions.
original_fns = _replace_child_layer_functions(layer, serialization_cache)
original_losses = _reset_layer_losses(layer)
# Wrap all the layer call and activity regularizer functions.
# Use LayerCallCollection to ensure that all layer call functions (__call__,
# call with losses) are traced with the same inputs.
call_collection = LayerCallCollection(layer)
call_fn_with_losses = call_collection.add_function(
_wrap_call_and_conditional_losses(layer),
'{}_layer_call_and_return_conditional_losses'.format(layer.name))
call_fn = call_collection.add_function(
_extract_outputs_from_fn(layer, call_fn_with_losses),
'{}_layer_call_fn'.format(layer.name))
fns = {'call_and_return_conditional_losses': call_fn_with_losses,
'__call__': call_fn}
if layer._activity_regularizer is not None: # pylint: disable=protected-access
fns['activity_regularizer_fn'] = _wrap_activity_regularizer(layer)
fns['call_and_return_all_conditional_losses'] = (
call_collection.add_function(
_append_activity_regularizer_loss(layer,
call_fn_with_losses,
fns['activity_regularizer_fn']),
'{}_layer_call_and_return_all_conditional_losses'.format(layer.name)
))
else:
fns['activity_regularizer_fn'] = None
fns['call_and_return_all_conditional_losses'] = call_fn_with_losses
# Manually trigger traces before restoring the overwritten functions. The
# functions are traced within the layer call context to ensure that layer
# functions (e.g. add_loss) behave as though running in graph mode.
with tracing_scope():
call_collection.trace_with_input_signature()
with base_layer_utils.call_context().enter(
layer, inputs=None, build_graph=True, training=None, saving=True):
for fn in fns.values():
if fn is not None and fn.input_signature is not None:
if isinstance(fn, LayerCall):
fn = fn.wrapped_call
fn.get_concrete_function()
# Restore overwritten functions and losses
_restore_child_layer_functions(original_fns)
_restore_layer_losses(original_losses)
return fns
def default_save_signature(layer):
original_losses = _reset_layer_losses(layer)
fn = saving_utils.trace_model_call(layer)
fn.get_concrete_function()
_restore_layer_losses(original_losses)
return fn
def _replace_child_layer_functions(layer, serialization_cache):
"""Replaces functions in the children layers with wrapped tf.functions.
This step allows functions from parent layers to reference the wrapped
functions from their children layers instead of retracing the ops.
This function also resets all losses stored in the layer. These are stored in
the returned dictionary. Use `_restore_child_layer_functions` to restore
the original attributes.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
Dictionary mapping layer objects -> original functions and losses:
{ Child layer 1: {
'losses': Original losses,
'call': Original call function
'_activity_regularizer': Original activity regularizer},
Child layer 2: ...
}
"""
# pylint: disable=protected-access
original_fns = {}
def replace_layer_functions(child_layer, serialized_fns):
"""Replaces layer call and activity regularizer with wrapped functions."""
original_fns[child_layer] = {
'call': child_layer.call,
'_activity_regularizer': child_layer._activity_regularizer
}
with trackable.no_automatic_dependency_tracking_scope(child_layer):
try:
child_layer._activity_regularizer = serialized_fns.get(
'activity_regularizer_fn')
except AttributeError:
# Some layers have an unsettable activity regularizer.
pass
child_layer.call = utils.use_wrapped_call(
child_layer,
serialized_fns['call_and_return_conditional_losses'],
default_training_value=False)
def replace_metric_functions(child_layer, serialized_fns):
"""Replaces metric functions with wrapped functions."""
original_fns[child_layer] = {
'__call__': child_layer.__call__,
'result': child_layer.result,
'update_state': child_layer.update_state
}
with trackable.no_automatic_dependency_tracking_scope(child_layer):
child_layer.__call__ = serialized_fns['__call__']
child_layer.result = serialized_fns['result']
child_layer.update_state = serialized_fns['update_state']
for child_layer in utils.list_all_layers(layer):
if isinstance(child_layer, input_layer.InputLayer):
continue
if child_layer not in serialization_cache[constants.KERAS_CACHE_KEY]:
serialized_functions = (
child_layer._trackable_saved_model_saver._get_serialized_attributes(
serialization_cache).functions)
else:
serialized_functions = (
serialization_cache[constants.KERAS_CACHE_KEY][child_layer].functions)
if not serialized_functions:
# This indicates either:
# - circular dependency, which means the current layer's functions
# should be wrapped first.
# - Child layer's inputs are not defined, so its functions have not been
# wrapped. In this case, no replacement is necessary so move on to the
# next child.
continue
if isinstance(child_layer, metrics.Metric):
replace_metric_functions(child_layer, serialized_functions)
else:
replace_layer_functions(child_layer, serialized_functions)
return original_fns
# pylint: enable=protected-access
def _restore_child_layer_functions(original_fns):
"""Restores attributes replaced with `_replace_child_layer_functions`."""
for child_layer, fns in original_fns.items():
with trackable.no_automatic_dependency_tracking_scope(child_layer):
for fn_name, fn in fns.items():
try:
setattr(child_layer, fn_name, fn) # pylint: disable=protected-access
except AttributeError:
pass # In the case of _activity_regularizer, setting the attribute
# may be disallowed.
# pylint: disable=protected-access
def _reset_layer_losses(parent_layer):
"""Resets losses of layer and its sublayers, and returns original losses."""
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {'losses': layer._losses[:],
'eager_losses': layer._eager_losses[:]}
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = []
layer._eager_losses = []
return losses_dict
def _restore_layer_losses(losses_dict):
for layer in losses_dict:
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = losses_dict[layer]['losses']
layer._eager_losses = losses_dict[layer]['eager_losses']
# pylint: enable=protected-access
class LayerTracingContext(threading.local):
def __init__(self):
super(LayerTracingContext, self).__init__()
self.enable_call_tracing = False
self.trace_queue = []
_thread_local_data = LayerTracingContext()
@tf_contextlib.contextmanager
def tracing_scope():
"""Enables tracing scope."""
# This enables the LayerCallCollection's tracing mechanism to trace all call
# functions in the collection.
previous_value = _thread_local_data.enable_call_tracing
previous_queue = _thread_local_data.trace_queue
try:
_thread_local_data.enable_call_tracing = True
_thread_local_data.trace_queue = []
yield
finally:
_thread_local_data.enable_call_tracing = previous_value
# Run traces from the queue.
for fn, args, kwargs, training in _thread_local_data.trace_queue:
if training is not None:
with K.deprecated_internal_learning_phase_scope(training):
fn.get_concrete_function(*args, **kwargs)
else:
fn.get_concrete_function(*args, **kwargs)
_thread_local_data.trace_queue = previous_queue
def add_trace_to_queue(fn, args, kwargs, training=None):
if tracing_enabled():
_thread_local_data.trace_queue.append(
(fn, args[:], kwargs.copy(), training))
def tracing_enabled():
"""Whether to add extra traces to the queue."""
return _thread_local_data.enable_call_tracing
class LayerCallCollection(object):
"""Groups wrapped layer call functions.
This is used to ensure that all layer call functions are traced with the same
inputs-
- call
- call_and_return_conditional_losses
- call_and_return_all_conditional_losses
"""
def __init__(self, layer):
self.layer = layer
self.layer_call_method = _get_layer_call_method(layer)
self._expects_training_arg = utils.layer_uses_training_bool(layer)
self._training_arg_index = utils.get_training_arg_index(
self.layer_call_method)
# If the layer call function has kwargs, then the traced function cannot
# have an input signature.
arg_spec = tf_inspect.getfullargspec(self.layer_call_method)
self._has_kwargs = bool(self._expects_training_arg or
arg_spec.defaults or
arg_spec.kwonlyargs or
arg_spec.varkw)
self._input_signature = self._generate_input_signature(layer)
self._functions = weakref.WeakValueDictionary()
# Get the input argument name from the args.
args = arg_spec.args
if tf_inspect.ismethod(self.layer_call_method):
args = args[1:]
self._input_arg_name = args[0] if args else 'inputs'
def _generate_input_signature(self, layer):
"""Inspects layer object and returns the inferred input signature.
Args:
layer: Layer object.
Returns:
List of possibly nested TensorSpecs of the layer call function inputs.
The list does not contain the `training` argument.
"""
if (isinstance(layer.call, def_function.Function) and
layer.call.input_signature is not None):
return layer.call.input_signature
elif isinstance(layer, training_lib.Model):
return saving_utils.model_input_signature(layer)
elif (layer.input_spec is not None and
layer._use_input_spec_as_call_signature): # pylint: disable=protected-access
def to_tensor_spec_or_none(x):
spec = input_spec.to_tensor_spec(x, layer._compute_dtype) # pylint: disable=protected-access
# If the shape is too general (e.g. multiple dimensions are allowed),
# return None so that separate functions can be generated for each
# inferred input signature.
# TODO(b/134962016): currently partial signatures are not supported.
if spec.shape == tensor_shape.TensorShape(None):
return None
return spec
input_signature = [nest.map_structure(
to_tensor_spec_or_none, layer.input_spec)]
return input_signature
else:
return None
def add_trace(self, *args, **kwargs):
"""Traces all functions with the same args and kwargs.
Args:
*args: Positional args passed to the original function.
**kwargs: Keyword args passed to the original function.
"""
args = list(args)
kwargs = kwargs.copy()
for fn in self._functions.values():
# TODO(kathywu): Replace arguments with broader shapes defined in the
# input signature.
if self._expects_training_arg:
def trace_with_training(value, fn=fn):
utils.set_training_arg(value, self._training_arg_index, args, kwargs)
add_trace_to_queue(fn, args, kwargs, value)
trace_with_training(True)
trace_with_training(False)
else:
add_trace_to_queue(fn, args, kwargs)
@property
def fn_input_signature(self):
"""Returns input signature for the wrapped layer call function."""
if self._has_kwargs:
# Input signatures may only describe tensor arguments and kwargs are not
# supported.
return None
if None in nest.flatten(self._input_signature):
# TODO(b/134962016): If input signature cannot be partially defined.
return None
return self._input_signature
def training_arg_was_passed(self, args, kwargs):
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
return (utils.get_training_arg(self._training_arg_index, args, kwargs)
is not None)
else:
return self.layer._call_arg_was_passed( # pylint: disable=protected-access
'training', args, kwargs, inputs_in_args=True)
def get_training_arg_value(self, args, kwargs):
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
return utils.get_training_arg(self._training_arg_index, args, kwargs)
else:
return self.layer._get_call_arg_value( # pylint: disable=protected-access
'training', args, kwargs, inputs_in_args=True)
def get_input_arg_value(self, args, kwargs):
return self.layer._get_call_arg_value( # pylint: disable=protected-access
self._input_arg_name, args, kwargs, inputs_in_args=True)
def _maybe_wrap_with_training_arg(self, call_fn):
"""Wraps call function with added training argument if necessary."""
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
# Add training arg to wrapper function.
arg_spec = tf_inspect.getfullargspec(call_fn)
args = arg_spec.args + ['training']
defaults = list(arg_spec.defaults or [])
defaults.append(False)
new_arg_spec = tf_inspect.FullArgSpec(
args=args,
varargs=arg_spec.varargs,
varkw=arg_spec.varkw,
defaults=defaults,
kwonlyargs=arg_spec.kwonlyargs,
kwonlydefaults=arg_spec.kwonlydefaults,
annotations=arg_spec.annotations)
# Set new training arg index
self._training_arg_index = len(args) - 1
if tf_inspect.ismethod(call_fn):
self._training_arg_index -= 1
def wrap_with_training_arg(*args, **kwargs):
# Remove the training value, since the original call_fn does not expect
# a training arg. Instead, the training value will be propagated using
# the call context created in LayerCall.
args = list(args)
kwargs = kwargs.copy()
utils.remove_training_arg(self._training_arg_index, args, kwargs)
return call_fn(*args, **kwargs)
return tf_decorator.make_decorator(
target=call_fn,
decorator_func=wrap_with_training_arg,
decorator_argspec=new_arg_spec)
return call_fn
def add_function(self, call_fn, name):
"""Adds a layer call function to the collection."""
fn = LayerCall(
self, self._maybe_wrap_with_training_arg(call_fn), name,
input_signature=self.fn_input_signature)
self._functions[name] = fn.wrapped_call
return fn
def trace_with_input_signature(self):
"""Trace with the layer/models inferred input signature if possible."""
if (None not in nest.flatten(self._input_signature) and self._has_kwargs):
# Manually add traces for layers that have keyword arguments and have
# a fully defined input signature.
self.add_trace(*self._input_signature)
def _filtered_inputs(inputs):
return list(filter(tf_utils.is_tensor_or_variable, nest.flatten(inputs)))
def layer_call_wrapper(call_collection, method, name):
"""Ensures layer losses are kept the same, and runs method in call context."""
# Create wrapper that deals with losses and call context.
def wrapper(*args, **kwargs):
"""Calls method within call context."""
layer = call_collection.layer
training = None
inputs = _filtered_inputs([args, kwargs])
# pylint: disable=protected-access
if (args or kwargs) and call_collection.training_arg_was_passed(
args, kwargs):
training = call_collection.get_training_arg_value(args, kwargs)
# pylint: enable=protected-access
original_losses = _reset_layer_losses(layer)
with base_layer_utils.call_context().enter(
layer, inputs=inputs, build_graph=False, training=training,
saving=True):
with autocast_variable.enable_auto_cast_variables(
layer._compute_dtype_object): # pylint: disable=protected-access
ret = method(*args, **kwargs)
_restore_layer_losses(original_losses)
return ret
# Rename to `name`, since tf.function doesn't have a name argument. Without
# this, all functions returned by this method will be named "call", which
# would be a nightmare to debug.
fn = tf_decorator.make_decorator(target=method, decorator_func=wrapper)
fn.__name__ = name
return fn
class LayerCall(object):
"""Function that triggers traces of other functions in the same collection."""
def __init__(self, call_collection, call_fn, name, input_signature):
"""Initializes a LayerCall object.
Args:
call_collection: a LayerCallCollection, which contains the other layer
call functions (e.g. call_with_conditional_losses, call). These
functions should be traced with the same arguments.
call_fn: A call function.
name: Name of the call function.
input_signature: Input signature of call_fn (can be None).
"""
self.call_collection = call_collection
self.input_signature = input_signature
self.wrapped_call = def_function.function(
layer_call_wrapper(call_collection, call_fn, name),
input_signature=input_signature)
self.original_layer_call = call_collection.layer_call_method
def _maybe_trace(self, args, kwargs):
# Trigger traces of other call functions + extra training-arg traces.
if tracing_enabled():
self.call_collection.add_trace(*args, **kwargs)
def __call__(self, *args, **kwargs):
self._maybe_trace(args, kwargs)
return self.wrapped_call(*args, **kwargs)
def get_concrete_function(self, *args, **kwargs):
self._maybe_trace(args, kwargs)
return self.wrapped_call.get_concrete_function(*args, **kwargs)
def _wrap_call_and_conditional_losses(layer):
"""Wraps call function that returns a tuple of (outputs, losses).
The losses returned are conditional on the inputs passed to the call function.
Unconditional losses (e.g. weight regularizeration) are wrapped separately.
Args:
layer: a Keras layer object
Returns:
python call function that returns outputs and conditional losses -- excludes
activity regularizer
"""
# Create function that generates both outputs and losses
layer_call = _get_layer_call_method(layer)
def call_and_return_conditional_losses(*args, **kwargs):
"""Returns layer (call_output, conditional losses) tuple."""
call_output = layer_call(*args, **kwargs)
if version_utils.is_v1_layer_or_model(layer):
conditional_losses = layer.get_losses_for(
_filtered_inputs([args, kwargs]))
else:
conditional_losses = [
l for l in layer.losses if not hasattr(l, '_unconditional_loss')
]
return call_output, conditional_losses
return _create_call_fn_decorator(layer, call_and_return_conditional_losses)
def _extract_outputs_from_fn(layer, call_and_return_conditional_losses):
"""Returns a function that returns only call function outputs."""
if isinstance(layer, keras_load.RevivedLayer):
return layer.keras_api.__call__ # pylint: disable=protected-access
def call(inputs, *args, **kwargs):
return call_and_return_conditional_losses(inputs, *args, **kwargs)[0]
return _create_call_fn_decorator(layer, call)
def _append_activity_regularizer_loss(
layer, call_fn_with_losses, activity_regularizer_fn):
"""Appends activity regularizer loss to losses returned by the wrapped fn."""
def fn(inputs, *args, **kwargs):
outputs, losses = call_fn_with_losses(inputs, *args, **kwargs)
losses.append(activity_regularizer_fn(outputs))
return outputs, losses
return _create_call_fn_decorator(layer, fn)
def _create_call_fn_decorator(layer, wrapped_call):
call_fn = _get_layer_call_method(layer)
fn, arg_spec = utils.maybe_add_training_arg(
call_fn, wrapped_call, layer._expects_training_arg, # pylint: disable=protected-access
default_training_value=False)
return tf_decorator.make_decorator(
target=call_fn,
decorator_func=fn,
decorator_argspec=arg_spec)
def _wrap_unconditional_loss(loss_fn, index):
"""Wraps callable/unconditional loss, returning a serializable function."""
# Extract original loss function from partial function
fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn
if isinstance(fn, def_function.Function):
return fn
else:
return def_function.Function(
fn, 'loss_fn_{}'.format(index), input_signature=[])
def _wrap_activity_regularizer(layer):
"""Wraps the activity regularizer."""
# pylint: disable=protected-access
if isinstance(layer._activity_regularizer, def_function.Function):
return layer._activity_regularizer
return def_function.Function(
layer._activity_regularizer,
'{}_activity_regularizer'.format(layer.name),
input_signature=[
tensor_spec.TensorSpec(None, layer._compute_dtype or K.floatx())
])
# pylint: enable=protected-access
def _get_layer_call_method(layer):
if isinstance(layer.call, (def_function.Function)):
return layer.call.python_function
return layer.call
|
apache-2.0
| -5,671,393,154,205,207,000
| 38.688982
| 111
| 0.697965
| false
| 3.977775
| false
| false
| false
|
DavideCanton/Python3
|
docs/docs.py
|
1
|
1567
|
__author__ = 'davide'
import pathlib
import string
import math
from collections import defaultdict
def compute_inverse(fdt, docs, terms):
ft = defaultdict(int)
for t in terms:
for d in docs:
ft[t] += fdt[t, d]
return ft
def index():
terms = set()
docs = []
fdt = defaultdict(int)
folder = pathlib.Path("D:/documenti prova")
for fp in folder.glob("*.txt"):
docs.append(fp.name)
with fp.open() as f:
for line in f:
for word in line.split():
word = word.strip(string.punctuation)
if word:
terms.add(word)
fdt[word, fp.name] += 1
ft = compute_inverse(fdt, docs, terms)
return terms, docs, fdt, ft
if __name__ == "__main__":
terms, docs, fdt, ft = index()
N = len(docs)
q = input("Query>")
f1 = lambda t: math.log(1 + N / ft[t]) if ft[t] > 0 else 0
f2 = lambda t, d: 1 + math.log(fdt[t, d]) if fdt[t, d] > 0 else 0
qt = [x.strip(string.punctuation) for x in q.split()]
wqt = {t: f1(t) for t in qt}
wdt = {(t, d): f2(t, d) for t in qt for d in docs}
wd = math.sqrt(sum(wdt[t, d] ** 2 for t in qt for d in docs))
if abs(wd) < 1E-10:
sd = []
else:
sd = [(d, sum(wdt[t, d] * wqt[t] for t in qt for d in docs) / wd )
for d in docs]
sd.sort(key=lambda t: -t[1])
for el in sd:
print(el)
for t in qt:
for d in docs:
print("{},{} => {}".format(t, d, fdt[t, d]))
|
gpl-3.0
| -8,411,056,482,351,363,000
| 24.290323
| 74
| 0.498405
| false
| 3.060547
| false
| false
| false
|
lahwaacz/tvnamer
|
tests/test_anime_filenames.py
|
1
|
1102
|
#!/usr/bin/env python
"""Tests anime filename output
"""
from functional_runner import run_tvnamer, verify_out_data
from nose.plugins.attrib import attr
@attr("functional")
def test_group():
"""Anime filename [#100]
"""
out_data = run_tvnamer(
with_files = ['[Some Group] Scrubs - 01 [A1B2C3].avi'],
with_config = """
{
"batch": true,
"filename_anime_with_episode": "[%(group)s] %(seriesname)s - %(episode)s - %(episodename)s [%(crc)s]%(ext)s"
}
""")
expected_files = ['[Some Group] Scrubs - 01 - My First Day [A1B2C3].avi']
verify_out_data(out_data, expected_files)
@attr("functional")
def test_group_no_epname():
"""Anime filename, on episode with no name [#100]
"""
out_data = run_tvnamer(
with_files = ['[Some Group] Somefakeseries - 01 [A1B2C3].avi'],
with_config = """
{
"batch": true,
"filename_anime_without_episode": "[%(group)s] %(seriesname)s - %(episode)s [%(crc)s]%(ext)s"
}
""")
expected_files = ['[Some Group] Somefakeseries - 01 [A1B2C3].avi']
verify_out_data(out_data, expected_files)
|
unlicense
| 3,900,368,123,542,362,600
| 23.488889
| 112
| 0.608893
| false
| 2.930851
| false
| false
| false
|
zookeepr/zookeepr
|
zk/model/ceiling.py
|
1
|
3694
|
"""The application's model objects"""
import sqlalchemy as sa
from meta import Base
from pylons.controllers.util import abort
from beaker.cache import CacheManager
from role import Role
from person_role_map import person_role_map
from meta import Session
import datetime
import random
class Ceiling(Base):
"""Stores the details of product ceilings which are used to control the sale of items with a limited stock
"""
__tablename__ = 'ceiling'
id = sa.Column(sa.types.Integer, primary_key=True)
parent_id = sa.Column(sa.types.Integer, sa.ForeignKey('ceiling.id'), nullable=True)
name = sa.Column(sa.types.Text, nullable=False, unique=True)
max_sold = sa.Column(sa.types.Integer, nullable=True)
available_from = sa.Column(sa.types.DateTime, nullable=True)
available_until = sa.Column(sa.types.DateTime, nullable=True)
cache = CacheManager()
# relations
parent = sa.orm.relation(lambda: Ceiling, backref='children', remote_side=[id])
def qty_sold(self):
qty = 0
for p in self.products:
qty += p.qty_sold()
return qty
def qty_invoiced(self, date=True):
# date: bool? only count items that are not overdue
@self.cache.cache(self.id, expire=600)
def cached(self, date=True):
qty = 0
for p in self.products:
qty += p.qty_invoiced(date)
return qty
return cached(self, date)
def qty_free(self):
qty = 0
for p in self.products:
qty += p.qty_free()
return qty
def percent_sold(self):
if self.max_sold == None:
return 0
else:
percent = float(self.qty_sold()) / float(self.max_sold)
return int(percent * 100)
def percent_invoiced(self):
if self.max_sold == None:
return 0
else:
percent = float(self.qty_invoiced()) / float(self.max_sold)
return int(percent * 100)
def remaining(self):
return self.max_sold - self.qty_sold()
def soldout(self):
if self.max_sold != None:
return self.qty_invoiced() >= self.max_sold
return False
def enough_left(self, qty):
if self.max_sold != None:
return (self.qty_invoiced() + qty) > self.max_sold
return False
def available(self, stock=True, qty=0):
# bool stock: care about if the product is in stock (ie sold out?)
if stock and self.soldout():
return False
elif qty > 0 and self.enough_left(qty):
return False
elif self.available_from is not None and self.available_from >= datetime.datetime.now():
return False
elif self.available_until is not None and self.available_until <= datetime.datetime.now():
return False
elif self.parent is not None and self.parent != self and self.parent.available():
return False
else:
return True
def can_i_sell(self, qty):
if not self.soldout() and self.remaining() > qty:
return True
else:
return False
def __repr__(self):
return '<Ceiling id=%r name=%r max_sold=%r available_from=%r, available_until=%r' % (self.id, self.name, self.max_sold, self.available_from, self.available_until)
@classmethod
def find_all(cls):
return Session.query(Ceiling).order_by(Ceiling.name).all()
@classmethod
def find_by_id(cls, id):
return Session.query(Ceiling).filter_by(id=id).first()
@classmethod
def find_by_name(cls, name):
return Session.query(Ceiling).filter_by(name=name).first()
|
gpl-2.0
| -5,095,891,693,858,206,000
| 29.783333
| 170
| 0.61072
| false
| 3.74645
| false
| false
| false
|
ContinuumIO/dask
|
dask/array/utils.py
|
2
|
11059
|
import difflib
import functools
import math
import numbers
import os
import warnings
import numpy as np
from tlz import frequencies, concat
from .core import Array
from ..highlevelgraph import HighLevelGraph
from ..utils import has_keyword, ignoring, is_arraylike
try:
AxisError = np.AxisError
except AttributeError:
try:
np.array([0]).sum(axis=5)
except Exception as e:
AxisError = type(e)
def normalize_to_array(x):
if "cupy" in str(type(x)): # TODO: avoid explicit reference to cupy
return x.get()
else:
return x
def meta_from_array(x, ndim=None, dtype=None):
""" Normalize an array to appropriate meta object
Parameters
----------
x: array-like, callable
Either an object that looks sufficiently like a Numpy array,
or a callable that accepts shape and dtype keywords
ndim: int
Number of dimensions of the array
dtype: Numpy dtype
A valid input for ``np.dtype``
Returns
-------
array-like with zero elements of the correct dtype
"""
# If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)
# implement a _meta attribute that are incompatible with Dask Array._meta
if hasattr(x, "_meta") and isinstance(x, Array):
x = x._meta
if dtype is None and x is None:
raise ValueError("You must specify the meta or dtype of the array")
if np.isscalar(x):
x = np.array(x)
if x is None:
x = np.ndarray
if isinstance(x, type):
x = x(shape=(0,) * (ndim or 0), dtype=dtype)
if (
not hasattr(x, "shape")
or not hasattr(x, "dtype")
or not isinstance(x.shape, tuple)
):
return x
if isinstance(x, list) or isinstance(x, tuple):
ndims = [
0
if isinstance(a, numbers.Number)
else a.ndim
if hasattr(a, "ndim")
else len(a)
for a in x
]
a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]
return a if isinstance(x, list) else tuple(x)
if ndim is None:
ndim = x.ndim
try:
meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]
if meta.ndim != ndim:
if ndim > x.ndim:
meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))]
meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]
elif ndim == 0:
meta = meta.sum()
else:
meta = meta.reshape((0,) * ndim)
except Exception:
meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)
if np.isscalar(meta):
meta = np.array(meta)
if dtype and meta.dtype != dtype:
meta = meta.astype(dtype)
return meta
def compute_meta(func, _dtype, *args, **kwargs):
with np.errstate(all="ignore"), warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
args_meta = [meta_from_array(x) if is_arraylike(x) else x for x in args]
kwargs_meta = {
k: meta_from_array(v) if is_arraylike(v) else v for k, v in kwargs.items()
}
# todo: look for alternative to this, causes issues when using map_blocks()
# with np.vectorize, such as dask.array.routines._isnonzero_vec().
if isinstance(func, np.vectorize):
meta = func(*args_meta)
else:
try:
# some reduction functions need to know they are computing meta
if has_keyword(func, "computing_meta"):
kwargs_meta["computing_meta"] = True
meta = func(*args_meta, **kwargs_meta)
except TypeError as e:
if (
"unexpected keyword argument" in str(e)
or "is an invalid keyword for" in str(e)
or "Did not understand the following kwargs" in str(e)
):
raise
else:
return None
except Exception:
return None
if _dtype and getattr(meta, "dtype", None) != _dtype:
with ignoring(AttributeError):
meta = meta.astype(_dtype)
if np.isscalar(meta):
meta = np.array(meta)
return meta
def allclose(a, b, equal_nan=False, **kwargs):
a = normalize_to_array(a)
b = normalize_to_array(b)
if getattr(a, "dtype", None) != "O":
return np.allclose(a, b, equal_nan=equal_nan, **kwargs)
if equal_nan:
return a.shape == b.shape and all(
np.isnan(b) if np.isnan(a) else a == b for (a, b) in zip(a.flat, b.flat)
)
return (a == b).all()
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def _not_empty(x):
return x.shape and 0 not in x.shape
def _check_dsk(dsk):
""" Check that graph is well named and non-overlapping """
if not isinstance(dsk, HighLevelGraph):
return
assert all(isinstance(k, (tuple, str)) for k in dsk.layers)
freqs = frequencies(concat(dsk.dicts.values()))
non_one = {k: v for k, v in freqs.items() if v != 1}
assert not non_one, non_one
def assert_eq_shape(a, b, check_nan=True):
for aa, bb in zip(a, b):
if math.isnan(aa) or math.isnan(bb):
if check_nan:
assert math.isnan(aa) == math.isnan(bb)
else:
assert aa == bb
def _get_dt_meta_computed(x, check_shape=True, check_graph=True):
x_original = x
x_meta = None
x_computed = None
if isinstance(x, Array):
assert x.dtype is not None
adt = x.dtype
if check_graph:
_check_dsk(x.dask)
x_meta = getattr(x, "_meta", None)
x = x.compute(scheduler="sync")
x_computed = x
if hasattr(x, "todense"):
x = x.todense()
if not hasattr(x, "dtype"):
x = np.array(x, dtype="O")
if _not_empty(x):
assert x.dtype == x_original.dtype
if check_shape:
assert_eq_shape(x_original.shape, x.shape, check_nan=False)
else:
if not hasattr(x, "dtype"):
x = np.array(x, dtype="O")
adt = getattr(x, "dtype", None)
return x, adt, x_meta, x_computed
def assert_eq(a, b, check_shape=True, check_graph=True, check_meta=True, **kwargs):
a_original = a
b_original = b
a, adt, a_meta, a_computed = _get_dt_meta_computed(
a, check_shape=check_shape, check_graph=check_graph
)
b, bdt, b_meta, b_computed = _get_dt_meta_computed(
b, check_shape=check_shape, check_graph=check_graph
)
if str(adt) != str(bdt):
# Ignore check for matching length of flexible dtypes, since Array._meta
# can't encode that information
if adt.type == bdt.type and not (adt.type == np.bytes_ or adt.type == np.str_):
diff = difflib.ndiff(str(adt).splitlines(), str(bdt).splitlines())
raise AssertionError(
"string repr are different" + os.linesep + os.linesep.join(diff)
)
try:
assert a.shape == b.shape
if check_meta:
if hasattr(a, "_meta") and hasattr(b, "_meta"):
assert_eq(a._meta, b._meta)
if hasattr(a_original, "_meta"):
assert a_original._meta.ndim == a.ndim
if a_meta is not None:
assert type(a_original._meta) == type(a_meta)
if not (np.isscalar(a_meta) or np.isscalar(a_computed)):
assert type(a_meta) == type(a_computed)
if hasattr(b_original, "_meta"):
assert b_original._meta.ndim == b.ndim
if b_meta is not None:
assert type(b_original._meta) == type(b_meta)
if not (np.isscalar(b_meta) or np.isscalar(b_computed)):
assert type(b_meta) == type(b_computed)
assert allclose(a, b, **kwargs)
return True
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
assert c.all()
else:
assert c
return True
def safe_wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS):
"""Like functools.wraps, but safe to use even if wrapped is not a function.
Only needed on Python 2.
"""
if all(hasattr(wrapped, attr) for attr in assigned):
return functools.wraps(wrapped, assigned=assigned)
else:
return lambda x: x
def empty_like_safe(a, shape, **kwargs):
"""
Return np.empty_like(a, shape=shape, **kwargs) if the shape argument
is supported (requires NumPy >= 1.17), otherwise falls back to
using the old behavior, returning np.empty(shape, **kwargs).
"""
try:
return np.empty_like(a, shape=shape, **kwargs)
except TypeError:
return np.empty(shape, **kwargs)
def full_like_safe(a, fill_value, shape, **kwargs):
"""
Return np.full_like(a, fill_value, shape=shape, **kwargs) if the
shape argument is supported (requires NumPy >= 1.17), otherwise
falls back to using the old behavior, returning
np.full(shape, fill_value, **kwargs).
"""
try:
return np.full_like(a, fill_value, shape=shape, **kwargs)
except TypeError:
return np.full(shape, fill_value, **kwargs)
def ones_like_safe(a, shape, **kwargs):
"""
Return np.ones_like(a, shape=shape, **kwargs) if the shape argument
is supported (requires NumPy >= 1.17), otherwise falls back to
using the old behavior, returning np.ones(shape, **kwargs).
"""
try:
return np.ones_like(a, shape=shape, **kwargs)
except TypeError:
return np.ones(shape, **kwargs)
def zeros_like_safe(a, shape, **kwargs):
"""
Return np.zeros_like(a, shape=shape, **kwargs) if the shape argument
is supported (requires NumPy >= 1.17), otherwise falls back to
using the old behavior, returning np.zeros(shape, **kwargs).
"""
try:
return np.zeros_like(a, shape=shape, **kwargs)
except TypeError:
return np.zeros(shape, **kwargs)
def validate_axis(axis, ndim):
""" Validate an input to axis= keywords """
if isinstance(axis, (tuple, list)):
return tuple(validate_axis(ax, ndim) for ax in axis)
if not isinstance(axis, numbers.Integral):
raise TypeError("Axis value must be an integer, got %s" % axis)
if axis < -ndim or axis >= ndim:
raise AxisError(
"Axis %d is out of bounds for array of dimension %d" % (axis, ndim)
)
if axis < 0:
axis += ndim
return axis
def _is_nep18_active():
class A:
def __array_function__(self, *args, **kwargs):
return True
try:
return np.concatenate([A()])
except ValueError:
return False
IS_NEP18_ACTIVE = _is_nep18_active()
|
bsd-3-clause
| 7,372,364,514,582,665,000
| 29.465565
| 87
| 0.5694
| false
| 3.652246
| false
| false
| false
|
twwd/MoodleDownloader
|
downloader.py
|
1
|
9854
|
#!/usr/bin/env python3
import argparse
import importlib
import os
import re
import sqlite3
from datetime import datetime
from urllib.parse import urljoin
import requests
import yaml
def load_plugin_class(plugin_class_str):
"""
dynamically load a class from a string
"""
class_data = plugin_class_str.split(".")
module_path = "plugins." + ".".join(class_data[:-1])
class_str = class_data[-1]
mod = importlib.import_module(module_path)
return getattr(mod, class_str)
# print if verbose output is on
def log(msg):
if verbose_output:
print(msg)
def course_loop():
download_count = 0
skip_count = 0
# import config
try:
with open(os.path.join(os.path.dirname(__file__), 'data', 'config.yaml'), 'r', encoding='utf-8') as config_file:
config = yaml.load(config_file)
except FileNotFoundError:
print("Please provide a config file under data/config.yaml.")
return
# make the initial request to get the token
session = requests.Session()
# Loop through sources
for src_cfg in config:
# check if there are courses to download from
if 'courses' not in src_cfg or (source_part is not None and src_cfg['name'] not in source_part):
continue
log('\n\nSource: %s' % src_cfg['name'])
# load dynamically the source class
try:
src_class = load_plugin_class(src_cfg['class'])
src = src_class()
except AttributeError:
print('Class %s not found. Check your config file.' % src_cfg['class'])
continue
except ImportError:
print(
'Class %s not found. Check your config file' % src_cfg['class']
+ ' and ensure you have the class qualifier relative to the plugin directory.')
continue
# login
if 'login_url' in src_cfg and 'username' in src_cfg and 'password' in src_cfg:
src.login(session, src_cfg['login_url'], src_cfg['username'], src_cfg['password'])
# loop through courses
for course in src_cfg['courses']:
# check if only some courses should be checked
if course_part is not None and course['name'] not in course_part:
continue
log('\nCourse: %s\n' % course['name'])
if 'path' in course and course['path'] is not None:
course_url = urljoin(src_cfg['base_url'], course['path'])
elif 'param' in course and course['param'] is not None:
course_url = src.course_url(src_cfg['base_url'], course['param'])
else:
course_url = src_cfg['base_url']
# regex pattern for link text and file name
text_pattern = re.compile(course['pattern'])
filename_pattern = None
if 'filename_pattern' in course:
filename_pattern = re.compile(course['filename_pattern'])
# get all relevant links from the source site
links = src.link_list(session, course_url)
if links is None:
continue
for link in links:
if text_pattern.search(link[0]) is not None:
# request file http header
file_request = session.head(link[1], allow_redirects=True)
# get file name
if 'Content-Disposition' in file_request.headers:
file_disposition = file_request.headers['Content-Disposition']
file_name = file_disposition[
file_disposition.index('filename=') + 10:len(file_disposition) - 1].encode(
'latin-1').decode('utf8')
else:
# last part of the link (usually filename)
file_name = link[1].rsplit('/', 1)[-1]
# check extension
file_ext = os.path.splitext(file_name)[1]
if 'ext' in course and course['ext'] is not False:
if file_ext != course['ext'] or file_ext not in course['ext']:
continue
# check file name
if filename_pattern is not None and filename_pattern.search(file_name) is None:
continue
# get last modified date as timestamp
if 'Last-Modified' in file_request.headers:
file_last_modified = int(datetime.strptime(file_request.headers['Last-Modified'], '%a, %d %b %Y %H:%M:%S %Z').timestamp())
else:
print("No timestamp found for file %s" % file_name)
continue
# adjust file name
if 'rename' in course and course['rename'] is not False:
# find a number
num = re.search('\d{1,3}', link[0])
if num is None:
num = re.search('\d{1,3}', file_name)
if num is None:
num = file_last_modified
else:
num = num.group(0)
file_name = course['rename'].replace('%', str(num)) + file_ext
# remove trailing whitespaces
file_name = file_name.strip()
# the complete file path
file_path = os.path.join(course['local_folder'], file_name)
# fetch old timestamp from database
file_last_modified_old = c.execute(
'SELECT last_modified FROM file_modifications WHERE source=? AND course=? AND file_name=?',
(src_cfg['name'], course['name'], file_name)).fetchone()
# save file and timestamp in the database if it doesn't exists
if not simulate and file_last_modified_old is None:
c.execute(
'''
INSERT INTO file_modifications (source, course, file_name, file_path, last_modified)
VALUES (?,?,?,?,?)
''',
(src_cfg['name'], course['name'], file_name, file_path, file_last_modified))
# update timestamp if there's a newer version of the file
elif not simulate and file_last_modified > file_last_modified_old[0]:
c.execute(
'UPDATE file_modifications SET last_modified=? WHERE source=? AND course=? AND file_name=?',
(file_last_modified, src_cfg['name'], course['name'], file_name))
# otherwise skip saving
else:
skip_count += 1
# log(file_name + ' (skipped)')
continue
log(file_name + ' (new)')
if simulate:
conn.rollback()
continue
# request whole file
file_request = session.get(link[1])
# write file
try:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'wb') as f:
f.write(file_request.content)
download_count += 1
except FileNotFoundError:
print('Can\'t write file to %s' % file_path)
conn.rollback()
# save changes to the database
conn.commit()
# display count of downloaded files
log('\nDownloaded %i file(s), skipped %i file(s)' % (download_count, skip_count))
def clear_course():
if course_to_clear[0] == 'all':
c.execute("DELETE FROM file_modifications")
log('\nCleared all courses')
else:
c.execute("DELETE FROM file_modifications WHERE course=?", course_to_clear)
log('\nCleared course %s' % course_to_clear[0])
conn.commit()
# command line args
parser = argparse.ArgumentParser(
description='A simple script for downloading slides and exercises for university lectures.')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-c', '--course', action='append', help='specify a course which should be checked')
parser.add_argument('-s', '--source', action='append', help='specify a source which should be checked')
parser.add_argument('-sim', '--simulate', action='store_true', help='specify if the process should only be simulated')
parser.add_argument('--clear', action='append',
help='specify a course which files should be deleted from the database (not from file system).'
+ 'Use keyword \'all\' to clear the whole database')
args = parser.parse_args()
verbose_output = args.verbose
simulate = args.simulate
course_part = args.course
source_part = args.source
course_to_clear = args.clear
# database for timestamps
conn = sqlite3.connect(os.path.join(os.path.dirname(__file__), 'data', 'file_modifications.db'))
c = conn.cursor()
# check if table exists otherwise create it
c.execute(
'''
CREATE TABLE IF NOT EXISTS file_modifications (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
source TEXT,
course TEXT,
file_name TEXT,
file_path TEXT,
last_modified INTEGER
);
''')
if simulate:
log("Simulation on")
if course_to_clear is not None:
clear_course()
else:
course_loop()
# close cursor
c.close()
|
mit
| 195,151,069,313,467,550
| 37.643137
| 146
| 0.531561
| false
| 4.530575
| true
| false
| false
|
eliben/code-for-blog
|
2017/continuations-trampolines/tracing.py
|
1
|
1493
|
# Tracing of function calls. Use @TraceCalls() as a decorator on functions.
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import sys
from functools import wraps
class TraceCalls(object):
""" Use as a decorator on functions that should be traced. Several
functions can be decorated - they will all be indented according
to their call depth.
"""
def __init__(self, stream=sys.stdout, indent_step=2, show_ret=False):
self.stream = stream
self.indent_step = indent_step
self.show_ret = show_ret
# This is a class attribute since we want to share the indentation
# level between different traced functions, in case they call
# each other.
TraceCalls.cur_indent = 0
def __call__(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
indent = ' ' * TraceCalls.cur_indent
argstr = ', '.join(
[self._argrepr(a) for a in args] +
["%s=%s" % (a, repr(b)) for a, b in kwargs.items()])
self.stream.write('%s%s(%s)\n' % (indent, fn.__name__, argstr))
TraceCalls.cur_indent += self.indent_step
ret = fn(*args, **kwargs)
TraceCalls.cur_indent -= self.indent_step
if self.show_ret:
self.stream.write('%s--> %s\n' % (indent, ret))
return ret
return wrapper
def _argrepr(self, arg):
return repr(arg)
|
unlicense
| 7,345,459,679,741,440,000
| 33.72093
| 75
| 0.58004
| false
| 3.888021
| false
| false
| false
|
wdbm/shijian
|
shijian_examples_clocks.py
|
1
|
5541
|
#!/usr/bin/env python
"""
################################################################################
# #
# shijian_examples_clocks #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program is shijian examples. #
# #
# copyright (C) 2014 Will Breaden Madden, wbm@protonmail.ch #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
"""
import inspect
import time
import shijian
def main():
print("create clock alpha")
alpha = shijian.Clock(name = "alpha")
print("clock alpha start time: {time}".format(time = alpha.start_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock alpha current time (s): {time}".format(time = alpha.time()))
print("\ncreate clock beta")
beta = shijian.Clock(name = "beta")
print("clock beta start time: {time}".format(time = beta.start_time()))
print("clock beta stop time: {time}".format(time = beta.stop_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock beta current time (s): {time}".format(time = beta.time()))
print("stop clock beta")
beta.stop()
print("clock beta start time: {time}".format(time = beta.start_time()))
print("clock beta stop time: {time}".format(time = beta.stop_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock beta start time: {time}".format(time = beta.start_time()))
print("clock beta stop time: {time}".format(time = beta.stop_time()))
print("clock beta current time (s): {time}".format(time = beta.time()))
print("\nclock beta printout:\n")
beta.printout()
print("create two gamma clocks")
gamma = shijian.Clock(name = "gamma")
gamma = shijian.Clock(name = "gamma")
print("sleep 2 seconds")
time.sleep(2)
print("\ncreate two unnamed clocks")
delta = shijian.Clock()
epsilon = shijian.Clock()
print("sleep 2 seconds")
time.sleep(2)
print("\nrun function 1 (which is timed using internal clocks)")
print("result of function 1: {result}".format(result = function_1()))
print("\nrun function 2 (which is timed using a decorator)")
print("result of function 2: {result}".format(result = function_2()))
print("\ncreate clock zeta, to illustrate clock resets")
zeta = shijian.Clock(name = "zeta")
print("clock zeta start time: {time}".format(time = zeta.start_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock zeta current time (s): {time}".format(time = zeta.time()))
print("reset clock zeta and start it again")
zeta.reset()
zeta.start()
print("clock zeta start time: {time}".format(time = zeta.start_time()))
print("sleep 2 seconds")
time.sleep(2)
print("clock zeta current time (s): {time}".format(time = zeta.time()))
print("\nclocks full printout:\n")
shijian.clocks.printout(style = "full")
print("clocks statistics printout:\n")
shijian.clocks.printout()
def function_1():
function_name = inspect.stack()[0][3]
clock = shijian.Clock(name = function_name)
print("initiate {function_name}".format(function_name = function_name))
time.sleep(3)
print("terminate {function_name}".format(function_name = function_name))
clock.stop()
return(3)
@shijian.timer
def function_2():
function_name = inspect.stack()[0][3]
print("initiate {function_name}".format(function_name = function_name))
time.sleep(4)
print("terminate {function_name}".format(function_name = function_name))
return(4)
if __name__ == '__main__':
main()
|
gpl-3.0
| -61,446,250,638,534,950
| 43.328
| 80
| 0.484931
| false
| 4.556743
| false
| false
| false
|
tstapler/Access-Plus-Schedule-Parser
|
web/ScheduleViewer/migrations/0001_initial.py
|
1
|
2389
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('course_name', models.CharField(max_length=30)),
('course_number', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='MeetingTime',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('days', models.CharField(max_length=2, choices=[('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'WEDNESDAY'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday')])),
('time', models.DateField()),
('instructor', models.CharField(max_length=30)),
('location', models.CharField(max_length=30)),
('course', models.ForeignKey(to='ScheduleViewer.Course')),
],
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('semester', models.CharField(max_length=2, choices=[('fall', 'Fall'), ('spring', 'Spring'), ('summer', 'Summer')], default='fall')),
('year', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('email', models.EmailField(max_length=254)),
],
),
migrations.AddField(
model_name='schedule',
name='student',
field=models.ForeignKey(to='ScheduleViewer.Student'),
),
migrations.AddField(
model_name='course',
name='schedule',
field=models.ForeignKey(to='ScheduleViewer.Schedule'),
),
]
|
mit
| 3,464,933,900,472,990,700
| 39.491525
| 183
| 0.52951
| false
| 4.335753
| false
| false
| false
|
cumc-dbmi/pmi_sprint_reporter
|
webapi.py
|
1
|
2220
|
"""
Utilities to configure WebAPI (backend for Atlas) to work with the database(s) loaded by reporter and achilles.
This module makes the following assumptions:
* WebAPI section of settings is valid
* The WebAPI database (referred to by `settings.webapi_conn_str`) already contains the application tables
"""
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import create_engine
import run_config
import settings
engine = create_engine(settings.webapi_conn_str)
metadata = MetaData(bind=engine, reflect=True)
source_table = Table('source', metadata, autoload=True)
source_daimon_table = Table('source_daimon', metadata, autoload=True)
def delete_sources():
"""
Remove all records from source and source_daimon tables
"""
delete_source_daimon = source_daimon_table.delete()
delete_source = source_table.delete()
engine.execute(delete_source_daimon)
engine.execute(delete_source)
def create_source(hpo_id, hpo_name):
"""
Insert source and source_daimon records associated with an HPO
:param hpo_id: ID of the HPO (see hpo.csv)
:param hpo_name: Name of the HPO (see hpo.csv)
"""
source_row = dict(SOURCE_NAME=hpo_name,
SOURCE_KEY=hpo_id,
SOURCE_CONNECTION=settings.cdm_jdbc_conn_str,
SOURCE_DIALECT=run_config.cdm_dialect)
insert_source = source_table.insert().returning(source_table.c.SOURCE_ID).values(source_row)
source_id = engine.execute(insert_source).lastrowid
cdm_daimon_row = dict(source_id=source_id, daimon_type=0, table_qualifier=hpo_id, priority=1)
vocab_daimon_row = dict(source_id=source_id, daimon_type=1, table_qualifier='dbo', priority=1)
results_daimon_row = dict(source_id=source_id, daimon_type=2, table_qualifier=hpo_id, priority=1)
source_daimon_rows = [cdm_daimon_row, vocab_daimon_row, results_daimon_row]
insert_source_daimon = source_daimon_table.insert().values(source_daimon_rows)
engine.execute(insert_source_daimon)
def main():
delete_sources()
for hpo in run_config.all_hpos.to_dict(orient='records'):
create_source(hpo['hpo_id'], hpo['name'])
if __name__ == '__main__':
main()
|
mit
| 1,704,027,627,111,090,700
| 36
| 111
| 0.703604
| false
| 3.231441
| false
| false
| false
|
Yelp/pgctl
|
pgctl/fuser.py
|
1
|
2200
|
#!/usr/bin/env python2.7
"""\
usage: pgctl-fuser [-d] file [file ...]
Shows the pids (of the current user) that have this file opened.
This is useful for finding which processes hold a file lock (flock).
This has the same behavior as `lsof -t file`, but is *much* faster.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from .debug import trace
def stat(path):
from os import stat
try:
return stat(path)
except EnvironmentError as error:
trace('fuser suppressed: %s', error)
return None
def listdir(path):
from os import listdir
try:
return listdir(path)
except EnvironmentError as error:
trace('fuser suppressed: %s', error)
return ()
def fuser(path, allow_deleted=False):
"""Return the list of pids that have 'path' open, for the current user"""
search = stat(path)
if search is None and not allow_deleted:
return
from glob import glob
for fddir in glob('/proc/*/fd/'):
try:
pid = int(fddir.split('/', 3)[2])
except ValueError:
continue
fds = listdir(fddir)
for fd in fds:
from os.path import join
fd = join(fddir, fd)
found = stat(fd)
if found is None:
# fd disappeared since we listed
continue
if found == search:
yield pid
break
if allow_deleted and found.st_nlink == 0:
from os import readlink
if readlink(fd) == path + ' (deleted)':
yield pid
break
def main(args=None):
from argparse import ArgumentParser
from sys import argv
args = args or argv
parser = ArgumentParser(description=__doc__)
parser.add_argument('-d', '--allow-deleted', action='store_true', help='allow deleted files')
parser.add_argument('file', nargs='+')
args = parser.parse_args(args[1:])
for f in args.file:
for pid in fuser(f, allow_deleted=args.allow_deleted):
print(pid)
if __name__ == '__main__':
exit(main())
|
mit
| -5,979,769,641,141,754,000
| 25.506024
| 97
| 0.581818
| false
| 4.104478
| false
| false
| false
|
skosukhin/spack
|
var/spack/repos/builtin/packages/ea-utils/package.py
|
1
|
2144
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class EaUtils(MakefilePackage):
"""Command-line tools for processing biological sequencing data. Barcode
demultiplexing, adapter trimming, etc. Primarily written to support an
Illumina based pipeline - but should work with any FASTQs."""
homepage = "http://expressionanalysis.github.io/ea-utils/"
url = "https://github.com/ExpressionAnalysis/ea-utils/archive/1.04.807.tar.gz"
version('1.04.807', '5972b9f712920603b7527f46c0063a09')
depends_on('subversion')
depends_on('zlib')
depends_on('gsl')
depends_on('bamtools')
# perl module required for make check, which is included in the default
# target
depends_on('perl', type='build')
build_directory = 'clipper'
def edit(self, spec, prefix):
with working_dir('clipper'):
makefile = FileFilter('Makefile')
makefile.filter('/usr', prefix)
|
lgpl-2.1
| 620,051,369,482,476,400
| 41.039216
| 82
| 0.67444
| false
| 4.022514
| false
| false
| false
|
MehdiSfr/tensor-flow
|
tensorflow/python/training/input.py
|
1
|
24193
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input pipeline.
Please see the [reading data how-to](../../how_tos/reading_data/index.md)
for context.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import summary_ops
from tensorflow.python.ops import variables
from tensorflow.python.training import queue_runner
def match_filenames_once(pattern, name=None):
"""Save the list of files matching pattern, so it is only computed once.
Args:
pattern: A file pattern (glob).
name: A name for the operations (optional).
Returns:
A variable that is initialized to the list of files matching pattern.
"""
with ops.op_scope([pattern], name, "matching_filenames") as name:
return variables.Variable(io_ops.matching_files(pattern), trainable=False,
name=name, validate_shape=False)
def limit_epochs(tensor, num_epochs=None, name=None):
"""Returns tensor `num_epochs` times and then raises an `OutOfRange` error.
Args:
tensor: Any `Tensor`.
num_epochs: An integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
Returns:
tensor or `OutOfRange`.
"""
if num_epochs is None:
return tensor
if num_epochs <= 0:
raise ValueError("num_epochs must be > 0 not %d." % num_epochs)
with ops.op_scope([tensor], name, "limit_epochs") as name:
zero64 = constant_op.constant(0, dtype=dtypes.int64)
epochs = variables.Variable(zero64, name="epochs")
counter = epochs.count_up_to(num_epochs)
with ops.control_dependencies([counter]):
return array_ops.identity(tensor, name=name)
def _input_producer(input_tensor, dtype, num_epochs, shuffle, seed, capacity,
name, summary_name):
if shuffle:
input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
input_tensor = limit_epochs(input_tensor, num_epochs)
q = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=[dtype], shapes=[[]],
name=name)
enq = q.enqueue_many([input_tensor])
queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
summary_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
math_ops.cast(q.size(), dtypes.float32) *
(1. / capacity))
return q
def string_input_producer(string_tensor, num_epochs=None, shuffle=True,
seed=None, capacity=32, name=None):
"""Output strings (e.g. filenames) to a queue for an input pipeline.
Args:
string_tensor: A 1-D string tensor with the strings to produce.
num_epochs: An integer (optional). If specified, `string_input_producer`
produces each string from `string_tensor` `num_epochs` times before
generating an OutOfRange error. If not specified, `string_input_producer`
can cycle through the strings in `string_tensor` an unlimited number of
times.
shuffle: Boolean. If true, the strings are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
name: A name for the operations (optional).
Returns:
A queue with the output strings. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.op_scope([string_tensor], name, "input_producer") as name:
return _input_producer(
string_tensor, dtypes.string, num_epochs, shuffle, seed, capacity, name,
"fraction_of_%d_full" % capacity)
def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
capacity=32, name=None):
"""Produces the integers from 0 to limit-1 in a queue.
Args:
limit: An int32 scalar tensor.
num_epochs: An integer (optional). If specified, `range_input_producer`
produces each integer `num_epochs` times before generating an
OutOfRange error. If not specified, `range_input_producer` can cycle
through the integers an unlimited number of times.
shuffle: Boolean. If true, the integers are randomly shuffled within each
epoch.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
name: A name for the operations (optional).
Returns:
A Queue with the output integers. A `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
"""
with ops.op_scope([limit], name, "input_producer") as name:
range_tensor = math_ops.range(limit)
return _input_producer(
range_tensor, dtypes.int32, num_epochs, shuffle, seed, capacity, name,
"fraction_of_%d_full" % capacity)
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, name=None):
"""Produces a slice of each `Tensor` in `tensor_list`.
Implemented using a Queue -- a `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Args:
tensor_list: A list of `Tensor` objects. Every `Tensor` in
`tensor_list` must have the same size in the first dimension.
num_epochs: An integer (optional). If specified, `slice_input_producer`
produces each slice `num_epochs` times before generating
an `OutOfRange` error. If not specified, `slice_input_producer` can cycle
through the slices an unlimited number of times.
seed: An integer (optional). Seed used if shuffle == True.
capacity: An integer. Sets the queue capacity.
name: A name for the operations (optional).
Returns:
A list of tensors, one for each element of `tensor_list`. If the tensor
in `tensor_list` has shape `[N, a, b, .., z]`, then the corresponding output
tensor will have shape `[a, b, ..., z]`.
"""
with ops.op_scope(tensor_list, name, "input_producer"):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError(
"Expected at least one tensor in slice_input_producer().")
range_size = array_ops.shape(tensor_list[0])[0]
# TODO(josh11b): Add an assertion that the first dimension of
# everything in TensorList matches. Maybe just check the inferred shapes?
queue = range_input_producer(range_size, num_epochs=num_epochs,
shuffle=shuffle, seed=seed, capacity=capacity)
index = queue.dequeue()
output = [array_ops.gather(t, index) for t in tensor_list]
return output
# Helpers for the batching functions ------------------------------------------
def _flatten(tensor_list_list):
return [tensor for tensor_list in tensor_list_list for tensor in tensor_list]
def _validate(tensor_list):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError("Expected at least one tensor in batch().")
return tensor_list
def _validate_join(tensor_list_list):
tensor_list_list = [ops.convert_n_to_tensor_or_indexed_slices(tl)
for tl in tensor_list_list]
if not tensor_list_list:
raise ValueError("Expected at least one input in batch_join().")
return tensor_list_list
def _dtypes(tensor_list_list):
all_types = [[t.dtype for t in tl] for tl in tensor_list_list]
types = all_types[0]
for other_types in all_types[1:]:
if other_types != types:
raise TypeError("Expected types to be consistent: %s vs. %s." %
", ".join(x.name for x in types),
", ".join(x.name for x in other_types))
return types
def _merge_shapes(shape_list, enqueue_many):
shape_list = [tensor_shape.as_shape(s) for s in shape_list]
if enqueue_many:
# We want the shapes without the leading batch dimension.
shape_list = [s.with_rank_at_least(1)[1:] for s in shape_list]
merged_shape = shape_list[0]
for s in shape_list[1:]:
merged_shape.merge_with(s)
return merged_shape.as_list()
def _shapes(tensor_list_list, shapes, enqueue_many):
if shapes is None:
l = len(tensor_list_list[0])
shapes = [_merge_shapes(
[tl[i].get_shape().as_list() for tl in tensor_list_list], enqueue_many)
for i in xrange(l)]
return shapes
def _enqueue_join(queue, tensor_list_list, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tl) for tl in tensor_list_list]
else:
enqueue_ops = [queue.enqueue(tl) for tl in tensor_list_list]
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
def _enqueue(queue, tensor_list, threads, enqueue_many):
if enqueue_many:
enqueue_ops = [queue.enqueue_many(tensor_list)] * threads
else:
enqueue_ops = [queue.enqueue(tensor_list)] * threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
# Batching functions ----------------------------------------------------------
def batch(tensor_list, batch_size, num_threads=1, capacity=32,
enqueue_many=False, shapes=None, name=None):
"""Creates batches of tensors in `tensor_list`.
This function is implemented using a queue. A `QueueRunner` for the
queue is added to the current `Graph`'s `QUEUE_RUNNER` collection.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`. The `capacity` argument
controls the how long the prefetching is allowed to grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
num_threads: The number of threads enqueuing `tensor_list`.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "batch") as name:
tensor_list = _validate(tensor_list)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
summary_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
return queue.dequeue_many(batch_size, name=name)
# TODO(josh11b): Add a thread_multiplier or num_threads (that has to be
# a multiple of len(tensor_list_list)?) parameter, to address the use
# case where you want more parallelism than you can support different
# readers (either because you don't have that many files or can't
# read that many files in parallel due to the number of seeks required).
# Once this is done, batch() can be written as a call to batch_join().
def batch_join(tensor_list_list, batch_size, capacity=32, enqueue_many=False,
shapes=None, name=None):
"""Runs a list of tensors to fill a queue to create batches of examples.
Enqueues a different list of tensors in different threads.
Implemented using a queue -- a `QueueRunner` for the queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
`len(tensor_list_list)` threads will be started,
with thread `i` enqueuing the tensors from
`tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first
dimension if `enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor `x` will be output as a
tensor with shape `[batch_size] + x.shape`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. The slices of any input tensor
`x` are treated as examples, and the output tensors will have shape
`[batch_size] + x.shape[1:]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as
`tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(_flatten(tensor_list_list), name, "batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
# TODO(josh11b,mrry): Switch to BatchQueue once it is written.
queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=types, shapes=shapes)
_enqueue_join(queue, tensor_list_list, enqueue_many)
summary_ops.scalar_summary(
"queue/%s/fraction_of_%d_full" % (queue.name, capacity),
math_ops.cast(queue.size(), dtypes.float32) * (1. / capacity))
return queue.dequeue_many(batch_size, name=name)
def shuffle_batch(tensor_list, batch_size, capacity, min_after_dequeue,
num_threads=1, seed=None, enqueue_many=False, shapes=None,
name=None):
"""Creates batches by randomly shuffling tensors.
This function adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list`.
If `enqueue_many` is `False`, `tensor_list` is assumed to represent a
single example. An input tensor with shape `[x, y, z]` will be output
as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list` is assumed to represent a
batch of examples, where the first dimension is indexed by example,
and all members of `tensor_list` should have the same size in the
first dimension. If an input tensor has shape `[*, x, y, z]`, the
output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
For example:
```python
# Creates batches of 32 images and 32 labels.
image_batch, label_batch = tf.train.shuffle_batch(
[single_image, single_label],
batch_size=32,
num_threads=4,
capacity=50000,
min_after_dequeue=10000)
```
*N.B.:* You must ensure that either (i) the `shapes` argument is
passed, or (ii) all of the tensors in `tensor_list` must have
fully-defined shapes. `ValueError` will be raised if neither of
these conditions holds.
Args:
tensor_list: The list of tensors to enqueue.
batch_size: The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
num_threads: The number of threads enqueuing `tensor_list`.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list` is a single example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list`.
"""
with ops.op_scope(tensor_list, name, "shuffle_batch") as name:
tensor_list = _validate(tensor_list)
types = _dtypes([tensor_list])
shapes = _shapes([tensor_list], shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes)
_enqueue(queue, tensor_list, num_threads, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
summary_ops.scalar_summary(summary_name, full)
return queue.dequeue_many(batch_size, name=name)
def shuffle_batch_join(tensor_list_list, batch_size, capacity,
min_after_dequeue, seed=None, enqueue_many=False,
shapes=None, name=None):
"""Create batches by randomly shuffling tensors.
This version enqueues a different list of tensors in different threads.
It adds the following to the current `Graph`:
* A shuffling queue into which tensors from `tensor_list_list` are enqueued.
* A `dequeue_many` operation to create batches from the queue.
* A `QueueRunner` to `QUEUE_RUNNER` collection, to enqueue the tensors
from `tensor_list_list`.
`len(tensor_list_list)` threads will be started, with thread `i` enqueuing
the tensors from `tensor_list_list[i]`. `tensor_list_list[i1][j]` must match
`tensor_list_list[i2][j]` in type and shape, except in the first dimension if
`enqueue_many` is true.
If `enqueue_many` is `False`, each `tensor_list_list[i]` is assumed
to represent a single example. An input tensor with shape `[x, y,
z]` will be output as a tensor with shape `[batch_size, x, y, z]`.
If `enqueue_many` is `True`, `tensor_list_list[i]` is assumed to
represent a batch of examples, where the first dimension is indexed
by example, and all members of `tensor_list_list[i]` should have the
same size in the first dimension. If an input tensor has shape `[*, x,
y, z]`, the output will have shape `[batch_size, x, y, z]`.
The `capacity` argument controls the how long the prefetching is allowed to
grow the queues.
The returned operation is a dequeue operation and will throw
`tf.errors.OutOfRangeError` if the input queue is exhausted. If this
operation is feeding another input queue, its queue runner will catch
this exception, however, if this operation is used in your main thread
you are responsible for catching this yourself.
Args:
tensor_list_list: A list of tuples of tensors to enqueue.
batch_size: An integer. The new batch size pulled from the queue.
capacity: An integer. The maximum number of elements in the queue.
min_after_dequeue: Minimum number elements in the queue after a
dequeue, used to ensure a level of mixing of elements.
seed: Seed for the random shuffling within the queue.
enqueue_many: Whether each tensor in `tensor_list_list` is a single
example.
shapes: (Optional) The shapes for each example. Defaults to the
inferred shapes for `tensor_list_list[i]`.
name: (Optional) A name for the operations.
Returns:
A list of tensors with the same number and types as `tensor_list_list[i]`.
Raises:
ValueError: If the `shapes` are not specified, and cannot be
inferred from the elements of `tensor_list_list`.
"""
with ops.op_scope(
_flatten(tensor_list_list), name, "shuffle_batch_join") as name:
tensor_list_list = _validate_join(tensor_list_list)
types = _dtypes(tensor_list_list)
shapes = _shapes(tensor_list_list, shapes, enqueue_many)
queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity, min_after_dequeue=min_after_dequeue, seed=seed,
dtypes=types, shapes=shapes)
_enqueue_join(queue, tensor_list_list, enqueue_many)
full = (math_ops.cast(math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) *
(1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = (
"queue/%sfraction_over_%d_of_%d_full" %
(name, min_after_dequeue, capacity - min_after_dequeue))
summary_ops.scalar_summary(summary_name, full)
return queue.dequeue_many(batch_size, name=name)
|
apache-2.0
| -4,704,024,588,663,581,000
| 42.201786
| 80
| 0.69144
| false
| 3.794385
| false
| false
| false
|
rsenk330/Flask-Cake
|
flask_cake/tests/test_cake.py
|
1
|
1572
|
import pytest
try:
from unittest import mock # Python 3
except ImportError:
import mock
@pytest.fixture
def app(tmpdir):
from flask import Flask
root_path = tmpdir.ensure("test-proj", dir=True)
tmpdir.ensure("test-proj/static/coffee", dir=True)
p = tmpdir.join("test-proj/static/coffee", "Cakefile")
p.write("")
app = Flask(__name__)
app.root_path = root_path.strpath
return app
def test_cake_init(app):
from flask_cake import Cake
cake = Cake(app)
assert cake.app == app
assert cake.tasks == ["build"]
assert cake.cake_parent == "coffee"
def test_watchdog(app, tmpdir):
from flask_cake import Cake
with mock.patch("watchdog.observers.Observer.schedule") as mock_schedule:
Cake(app)
cake_dir = tmpdir.join("test-proj/static/coffee").strpath
mock_schedule.assert_called_once_with(mock.ANY, path=cake_dir, recursive=True)
def test_events_on_any_event(app):
from flask_cake.cake import Events
e = Events(app.root_path, tasks=["build"])
with mock.patch("flask_cake.cake.subprocess") as subprocess:
e.on_any_event(None)
subprocess.Popen.assert_called_once_with(["cake", "build"], cwd=app.root_path, stdout=mock.ANY)
def test_events_on_any_event_str(app):
from flask_cake.cake import Events
e = Events(app.root_path, tasks="build")
with mock.patch("flask_cake.cake.subprocess") as subprocess:
e.on_any_event(None)
subprocess.Popen.assert_called_once_with(["cake", "build"], cwd=app.root_path, stdout=mock.ANY)
|
bsd-2-clause
| 9,065,584,534,477,928,000
| 26.103448
| 103
| 0.670483
| false
| 3.188641
| true
| false
| false
|
OpenTrons/opentrons_sdk
|
api/tests/opentrons/calibration/tip_length/test_tip_length_calibration.py
|
1
|
1461
|
import pytest
from typing import List, Tuple
from opentrons.calibration.tip_length import state_machine
valid_transitions: List[Tuple[str, str, str]] = [
('loadLabware', 'sessionStarted', 'labwareLoaded'),
('moveToMeasureNozzleOffset', 'labwareLoaded', 'measuringNozzleOffset'),
('jog', 'measuringNozzleOffset', 'measuringNozzleOffset'),
('saveNozzlePosition', 'measuringNozzleOffset', 'preparingPipette'),
('jog', 'preparingPipette', 'preparingPipette'),
('pickUpTip', 'preparingPipette', 'inspectingTip'),
('invalidateTip', 'inspectingTip', 'preparingPipette'),
('confirmTip', 'inspectingTip', 'measuringTipOffset'),
('jog', 'measuringTipOffset', 'measuringTipOffset'),
('saveTipPosition', 'measuringTipOffset', 'calibrationComplete'),
('exitSession', 'calibrationComplete', 'sessionExited'),
('exitSession', 'sessionStarted', 'sessionExited'),
('exitSession', 'labwareLoaded', 'sessionExited'),
('exitSession', 'measuringNozzleOffset', 'sessionExited'),
('exitSession', 'preparingPipette', 'sessionExited'),
('exitSession', 'inspectingTip', 'sessionExited'),
('exitSession', 'measuringTipOffset', 'sessionExited'),
]
@pytest.mark.parametrize('trigger,from_state,to_state', valid_transitions)
async def test_valid_transitions(trigger, from_state, to_state):
sm = state_machine.TipCalibrationStateMachine(initial_state=from_state)
await sm.trigger_transition(trigger)
assert sm.current_state_name == to_state
|
apache-2.0
| 4,413,712,137,367,218,000
| 44.65625
| 75
| 0.740589
| false
| 3.503597
| false
| false
| false
|
jackliusr/scrapy-crawlers
|
crawlers/crawlers/spiders/lq7m.py
|
1
|
2098
|
import scrapy
from selenium import webdriver
from scrapy.http import JsonRequest, Request
from scrapy import Selector
import time
import json
class Lq7mSpider(scrapy.Spider):
name = 'lq7m'
start_urls = ['http://lq.7m.com.cn/list/3/2.shtml']
custom_settings = {
'DATABASEPIPELINE_ENABLED': True,
}
def start_requests(self):
for i in range(2,200):
yield Request(url=f"http://lq.7m.com.cn/list/3/{i}.shtml", callback=self.parseList)
def parseList(self, response):
sel = Selector(response)
urls = sel.xpath("//div[@class=\"cb_l\"]//a[contains(@href, '/news/')]/@href").extract()
for url in urls:
yield Request(url=f"http://lq.7m.com.cn{url}",callback=self.parsePage)
def parsePage(self,response):
sel = Selector(response)
title = ''.join(sel.xpath("//div[@class=\"pa_tec\"]/h1/text()").extract()).strip ()
content = ''.join(sel.xpath("//div[@class=\"n_zi\"]//text()").extract()).strip()
pubTimeTmp = (sel.xpath("//div[@class=\"pa_tek\"]/div[@class=\"pa_tec\"]/p[1]/text()").extract_first())
pubTime = pubTimeTmp[15:26]
keywords = sel.xpath("//meta[@name='keywords']/@content")[0].extract()
description = sel.xpath("//meta[@name='description']/@content")[0].extract()
image= sel.xpath("//div[@class=\"n_zi\"]//img[1]/@src")
category = 2
if image:
image_url = f"http://lq.7m.com.cn{image[0].extract()}"
yield {
"title": title,
"content": content,
"pubTime": pubTime,
"keywords": keywords,
"description": description,
'category': category,
"images": [image_url],
"image_urls": [image_url],
}
else:
yield {
"title": title,
"content": content,
"pubTime": pubTime,
"keywords": keywords,
"description": description,
'category': category,
}
|
apache-2.0
| 6,463,469,120,809,064,000
| 36.464286
| 111
| 0.531935
| false
| 3.629758
| false
| false
| false
|
ColdenCullen/d2dl
|
enki2/enkilib/python/parser.py
|
1
|
5204
|
#
# Copyright (c) 2008 Eric Anderton
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
class Parser:
def parse(self,input):
self.input = input
self.position = 0
return self.parse_Syntax()
def eoi(self):
if self.position >= len(self.input):
return True
return False
def any(self):
if self.position >= len(self.input):
return False
self.position = self.position + 1
return True
def peek(self):
if self.position >= len(self.input):
return ""
else:
return self.input[self.position]
def DEBUG(self,text=""):
def inner():
print text,self.position,self.input[self.position:]
return True
return inner
def REQUIRED(self,text,term=None):
def inner():
if term != None and term():
return True
raise ParseException(text,self.position,self.peek())
return inner
def TERMINAL(self,value,err=None):
def inner():
#print "len: ",len(self.input)," pos: ",self.position,"(",self.input[self.position:],") val: ",value
if self.position == len(self.input):
if err != None:
raise ParseException(text,self.position,self.peek())
return False
if self.input[self.position:].startswith(value):
self.position += len(value);
#print "matched: ",value," moved to: ",self.position
return True
return False
return inner
def RANGE(self,start,end):
def inner():
#print "len: ",len(self.input)," pos: ",self.position,"(",self.input[self.position:],") range: ",start,"-",end,
if self.position == len(self.input):
return False
ch = self.input[self.position]
if ch >= start[0] and ch <= end[0]:
self.position = self.position + 1
#print "matched: ",start,"-",end," moved to: ",self.position
return True
return False
return inner
def AND(self,*args):
def inner():
pos = self.position
for term in args:
if not term():
self.position = pos
return False
return True
return inner
def OR(self,*args):
def inner():
for term in args:
if term():
return True
return False
return inner
def OPTIONAL(self,term):
def inner():
term()
return True
return inner
def NOT(self,term):
def inner():
pos = self.position
if term():
self.position = pos
return False
return True
return inner
def ZEROORMORE(self,term,terminator = None,err=None):
def inner():
if terminator == None:
while(not self.eoi() and term()):
pass
else:
while(not self.eoi() and not terminator() and term()):
pass
return True
return inner
def ONEORMORE(self,term,terminator = None):
def inner():
pos = self.position
if terminator and terminator():
self.position = pos
return False
if not term():
self.position = pos
return False
if terminator == None:
while(not self.eoi() and term()):
pass
else:
while(not self.eoi() and not terminator() and term()):
pass
return True
return inner
|
mit
| 5,033,403,532,751,314,000
| 32.939597
| 123
| 0.511337
| false
| 4.831941
| false
| false
| false
|
benagricola/exabgp
|
lib/exabgp/bgp/message/update/nlri/qualifier/etag.py
|
1
|
1397
|
# encoding: utf-8
"""
etag.py
Created by Thomas Mangin on 2014-06-26.
Copyright (c) 2014-2015 Orange. All rights reserved.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
# TODO: take into account E-VPN specs that specify the role of the first bit of ESI
# (since draft-ietf-l2vpn-evpn-05)
from struct import pack
from struct import unpack
class EthernetTag (object):
MAX = pow(2,32)-1
__slots__ = ['tag']
def __init__ (self, tag=0):
self.tag = tag
def __eq__ (self, other):
return self.tag == other.tag
def __neq__ (self, other):
return self.tag != other.tag
def __lt__ (self, other):
raise RuntimeError('comparing EthernetTag for ordering does not make sense')
def __le__ (self, other):
raise RuntimeError('comparing EthernetTag for ordering does not make sense')
def __gt__ (self, other):
raise RuntimeError('comparing EthernetTag for ordering does not make sense')
def __ge__ (self, other):
raise RuntimeError('comparing EthernetTag for ordering does not make sense')
def __str__ (self):
return repr(self.tag)
def __repr__ (self):
return repr(self.tag)
def pack (self):
return pack("!L",self.tag)
def __len__ (self):
return 4
def __hash__ (self):
return hash(self.tag)
@classmethod
def unpack (cls, data):
return cls(unpack("!L",data[:4])[0])
def json (self, compact=None):
return '"ethernet-tag": %s' % self.tag
|
bsd-3-clause
| 3,194,523,706,247,964,000
| 21.174603
| 83
| 0.67287
| false
| 3.097561
| false
| false
| false
|
kimberlythegeek/axe-selenium-python
|
axe_selenium_python/tests/conftest.py
|
1
|
1081
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import datetime
import pytest
from py.xml import html
@pytest.mark.optionalhook
def pytest_html_results_table_header(cells):
"""Add description and sortable time header to HTML report."""
cells.insert(2, html.th("Description"))
cells.insert(0, html.th("Time", class_="sortable time", col="time"))
@pytest.mark.optionalhook
def pytest_html_results_table_row(report, cells):
"""Add description and sortable time column to HTML report."""
cells.insert(2, html.td(report.description))
cells.insert(1, html.td(datetime.utcnow(), class_="col-time"))
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
"""Make HTML report using test-function docstrings as description."""
outcome = yield
report = outcome.get_result()
# add docstring to 'description' column
report.description = str(item.function.__doc__)
|
mpl-2.0
| -323,944,129,504,433,200
| 33.870968
| 73
| 0.719704
| false
| 3.68942
| true
| false
| false
|
ayepezv/GAD_ERP
|
openerp/report/render/rml2pdf/trml2pdf.py
|
1
|
45686
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import sys
import copy
import reportlab
import re
from reportlab.pdfgen import canvas
from reportlab import platypus
import utils
import color
import os
import logging
import traceback
from lxml import etree
import base64
from distutils.version import LooseVersion
from reportlab.platypus.doctemplate import ActionFlowable
from openerp.tools.safe_eval import safe_eval
from reportlab.lib.units import inch,cm,mm
from openerp.tools.misc import file_open
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.pagesizes import A4, letter
try:
from cStringIO import StringIO
_hush_pyflakes = [ StringIO ]
except ImportError:
from StringIO import StringIO
try:
from customfonts import SetCustomFonts
except ImportError:
SetCustomFonts=lambda x:None
_logger = logging.getLogger(__name__)
encoding = 'utf-8'
def select_fontname(fontname, default_fontname):
if fontname not in pdfmetrics.getRegisteredFontNames()\
or fontname not in pdfmetrics.standardFonts:
# let reportlab attempt to find it
try:
pdfmetrics.getFont(fontname)
except Exception:
addition = ""
if " " in fontname:
addition = ". Your font contains spaces which is not valid in RML."
_logger.warning('Could not locate font %s, substituting default: %s%s',
fontname, default_fontname, addition)
fontname = default_fontname
return fontname
def _open_image(filename, path=None):
"""Attempt to open a binary file and return the descriptor
"""
if os.path.isfile(filename):
return open(filename, 'rb')
for p in (path or []):
if p and os.path.isabs(p):
fullpath = os.path.join(p, filename)
if os.path.isfile(fullpath):
return open(fullpath, 'rb')
try:
if p:
fullpath = os.path.join(p, filename)
else:
fullpath = filename
return file_open(fullpath)
except IOError:
pass
raise IOError("File %s cannot be found in image path" % filename)
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number()
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self):
page_count = len(self._saved_page_states)
self.setFont("Helvetica", 8)
self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40),
" %(this)i / %(total)i" % {
'this': self._pageNumber,
'total': page_count,
}
)
class PageCount(platypus.Flowable):
def __init__(self, story_count=0):
platypus.Flowable.__init__(self)
self.story_count = story_count
def draw(self):
self.canv.beginForm("pageCount%d" % self.story_count)
self.canv.setFont("Helvetica", utils.unit_get(str(8)))
self.canv.drawString(0, 0, str(self.canv.getPageNumber()))
self.canv.endForm()
class PageReset(platypus.Flowable):
def draw(self):
"""Flag to close current story page numbering and prepare for the next
should be executed after the rendering of the full story"""
self.canv._doPageReset = True
class _rml_styles(object,):
def __init__(self, nodes, localcontext):
self.localcontext = localcontext
self.styles = {}
self.styles_obj = {}
self.names = {}
self.table_styles = {}
self.default_style = reportlab.lib.styles.getSampleStyleSheet()
for node in nodes:
for style in node.findall('blockTableStyle'):
self.table_styles[style.get('id')] = self._table_style_get(style)
for style in node.findall('paraStyle'):
sname = style.get('name')
self.styles[sname] = self._para_style_update(style)
if self.default_style.has_key(sname):
for key, value in self.styles[sname].items():
setattr(self.default_style[sname], key, value)
else:
self.styles_obj[sname] = reportlab.lib.styles.ParagraphStyle(sname, self.default_style["Normal"], **self.styles[sname])
for variable in node.findall('initialize'):
for name in variable.findall('name'):
self.names[ name.get('id')] = name.get('value')
def _para_style_update(self, node):
data = {}
for attr in ['textColor', 'backColor', 'bulletColor', 'borderColor']:
if node.get(attr):
data[attr] = color.get(node.get(attr))
for attr in ['bulletFontName', 'fontName']:
if node.get(attr):
fontname= select_fontname(node.get(attr), None)
if fontname is not None:
data['fontName'] = fontname
for attr in ['bulletText']:
if node.get(attr):
data[attr] = node.get(attr)
for attr in ['fontSize', 'leftIndent', 'rightIndent', 'spaceBefore', 'spaceAfter',
'firstLineIndent', 'bulletIndent', 'bulletFontSize', 'leading',
'borderWidth','borderPadding','borderRadius']:
if node.get(attr):
data[attr] = utils.unit_get(node.get(attr))
if node.get('alignment'):
align = {
'right':reportlab.lib.enums.TA_RIGHT,
'center':reportlab.lib.enums.TA_CENTER,
'justify':reportlab.lib.enums.TA_JUSTIFY
}
data['alignment'] = align.get(node.get('alignment').lower(), reportlab.lib.enums.TA_LEFT)
data['splitLongWords'] = 0
return data
def _table_style_get(self, style_node):
styles = []
for node in style_node:
start = utils.tuple_int_get(node, 'start', (0,0) )
stop = utils.tuple_int_get(node, 'stop', (-1,-1) )
if node.tag=='blockValign':
styles.append(('VALIGN', start, stop, str(node.get('value'))))
elif node.tag=='blockFont':
styles.append(('FONT', start, stop, str(node.get('name'))))
elif node.tag=='blockTextColor':
styles.append(('TEXTCOLOR', start, stop, color.get(str(node.get('colorName')))))
elif node.tag=='blockLeading':
styles.append(('LEADING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockAlignment':
styles.append(('ALIGNMENT', start, stop, str(node.get('value'))))
elif node.tag=='blockSpan':
styles.append(('SPAN', start, stop))
elif node.tag=='blockLeftPadding':
styles.append(('LEFTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockRightPadding':
styles.append(('RIGHTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockTopPadding':
styles.append(('TOPPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBottomPadding':
styles.append(('BOTTOMPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBackground':
styles.append(('BACKGROUND', start, stop, color.get(node.get('colorName'))))
if node.get('size'):
styles.append(('FONTSIZE', start, stop, utils.unit_get(node.get('size'))))
elif node.tag=='lineStyle':
kind = node.get('kind')
kind_list = [ 'GRID', 'BOX', 'OUTLINE', 'INNERGRID', 'LINEBELOW', 'LINEABOVE','LINEBEFORE', 'LINEAFTER' ]
assert kind in kind_list
thick = 1
if node.get('thickness'):
thick = float(node.get('thickness'))
styles.append((kind, start, stop, thick, color.get(node.get('colorName'))))
return platypus.tables.TableStyle(styles)
def para_style_get(self, node):
style = False
sname = node.get('style')
if sname:
if sname in self.styles_obj:
style = self.styles_obj[sname]
else:
_logger.debug('Warning: style not found, %s - setting default!', node.get('style'))
if not style:
style = self.default_style['Normal']
para_update = self._para_style_update(node)
if para_update:
# update style only is necessary
style = copy.deepcopy(style)
style.__dict__.update(para_update)
return style
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
if images is None:
images = {}
if localcontext is None:
localcontext = {}
self.localcontext = localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.images = images
self.path = path
self.title = title
def docinit(self, els):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
for node in els:
for font in node.findall('registerFont'):
name = font.get('fontName').encode('ascii')
fname = font.get('fontFile').encode('ascii')
if name not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(name, fname))
#by default, we map the fontName to each style (bold, italic, bold and italic), so that
#if there isn't any font defined for one of these style (via a font family), the system
#will fallback on the normal font.
addMapping(name, 0, 0, name) #normal
addMapping(name, 0, 1, name) #italic
addMapping(name, 1, 0, name) #bold
addMapping(name, 1, 1, name) #italic and bold
#if registerFontFamily is defined, we register the mapping of the fontName to use for each style.
for font_family in node.findall('registerFontFamily'):
family_name = font_family.get('normal').encode('ascii')
if font_family.get('italic'):
addMapping(family_name, 0, 1, font_family.get('italic').encode('ascii'))
if font_family.get('bold'):
addMapping(family_name, 1, 0, font_family.get('bold').encode('ascii'))
if font_family.get('boldItalic'):
addMapping(family_name, 1, 1, font_family.get('boldItalic').encode('ascii'))
def setTTFontMapping(self,face, fontname, filename, mode='all'):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
if mode:
mode = mode.lower()
if fontname not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(fontname, filename))
if mode == 'all':
addMapping(face, 0, 0, fontname) #normal
addMapping(face, 0, 1, fontname) #italic
addMapping(face, 1, 0, fontname) #bold
addMapping(face, 1, 1, fontname) #italic and bold
elif mode in ['italic', 'oblique']:
addMapping(face, 0, 1, fontname) #italic
elif mode == 'bold':
addMapping(face, 1, 0, fontname) #bold
elif mode in ('bolditalic', 'bold italic','boldoblique', 'bold oblique'):
addMapping(face, 1, 1, fontname) #italic and bold
else:
addMapping(face, 0, 0, fontname) #normal
def _textual_image(self, node):
rc = ''
for n in node:
rc +=( etree.tostring(n) or '') + n.tail
return base64.decodestring(node.tostring())
def _images(self, el):
result = {}
for node in el.findall('.//image'):
rc =( node.text or '')
result[node.get('name')] = base64.decodestring(rc)
return result
def render(self, out):
el = self.etree.findall('.//docinit')
if el:
self.docinit(el)
el = self.etree.findall('.//stylesheet')
self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('.//images')
if el:
self.images.update( self._images(el[0]) )
el = self.etree.findall('.//template')
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self, images=self.images, path=self.path, title=self.title)
el = utils._child_get(self.etree, self, 'story')
pt_obj.render(el)
else:
self.canvas = canvas.Canvas(out)
pd = self.etree.find('pageDrawing')[0]
pd_obj = _rml_canvas(self.canvas, self.localcontext, None, self, self.images, path=self.path, title=self.title)
pd_obj.render(pd)
self.canvas.showPage()
self.canvas.save()
class _rml_canvas(object):
def __init__(self, canvas, localcontext, doc_tmpl=None, doc=None, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.canvas = canvas
self.styles = doc.styles
self.doc_tmpl = doc_tmpl
self.doc = doc
self.images = images
self.path = path
self.title = title
if self.title:
self.canvas.setTitle(self.title)
def _textual(self, node, x=0, y=0):
text = node.text and node.text.encode('utf-8') or ''
rc = utils._process_text(self, text)
for n in node:
if n.tag == 'seq':
from reportlab.lib.sequencer import getSequencer
seq = getSequencer()
rc += str(seq.next(n.get('id')))
if n.tag == 'pageCount':
if x or y:
self.canvas.translate(x,y)
self.canvas.doForm('pageCount%s' % (self.canvas._storyCount,))
if x or y:
self.canvas.translate(-x,-y)
if n.tag == 'pageNumber':
rc += str(self.canvas.getPageNumber())
rc += utils._process_text(self, n.tail)
return rc.replace('\n','')
def _drawString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
try:
self.canvas.drawString(text=text, **v)
except TypeError:
_logger.info("Bad RML: <drawString> tag requires attributes 'x' and 'y'!")
raise
def _drawCenteredString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawCentredString(text=text, **v)
def _drawRightString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawRightString(text=text, **v)
def _rect(self, node):
if node.get('round'):
self.canvas.roundRect(radius=utils.unit_get(node.get('round')), **utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
else:
self.canvas.rect(**utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
def _ellipse(self, node):
x1 = utils.unit_get(node.get('x'))
x2 = utils.unit_get(node.get('width'))
y1 = utils.unit_get(node.get('y'))
y2 = utils.unit_get(node.get('height'))
self.canvas.ellipse(x1,y1,x2,y2, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _curves(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>7:
self.canvas.bezier(*[utils.unit_get(l) for l in line_str[0:8]])
line_str = line_str[8:]
def _lines(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>3:
lines.append([utils.unit_get(l) for l in line_str[0:4]])
line_str = line_str[4:]
self.canvas.lines(lines)
def _grid(self, node):
xlist = [utils.unit_get(s) for s in node.get('xs').split(',')]
ylist = [utils.unit_get(s) for s in node.get('ys').split(',')]
self.canvas.grid(xlist, ylist)
def _translate(self, node):
dx = utils.unit_get(node.get('dx')) or 0
dy = utils.unit_get(node.get('dy')) or 0
self.canvas.translate(dx,dy)
def _circle(self, node):
self.canvas.circle(x_cen=utils.unit_get(node.get('x')), y_cen=utils.unit_get(node.get('y')), r=utils.unit_get(node.get('radius')), **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _place(self, node):
flows = _rml_flowable(self.doc, self.localcontext, images=self.images, path=self.path, title=self.title, canvas=self.canvas).render(node)
infos = utils.attr_get(node, ['x','y','width','height'])
infos['y']+=infos['height']
for flow in flows:
w,h = flow.wrap(infos['width'], infos['height'])
if w<=infos['width'] and h<=infos['height']:
infos['y']-=h
flow.drawOn(self.canvas,infos['x'],infos['y'])
infos['height']-=h
else:
raise ValueError("Not enough space")
def _line_mode(self, node):
ljoin = {'round':1, 'mitered':0, 'bevelled':2}
lcap = {'default':0, 'round':1, 'square':2}
if node.get('width'):
self.canvas.setLineWidth(utils.unit_get(node.get('width')))
if node.get('join'):
self.canvas.setLineJoin(ljoin[node.get('join')])
if node.get('cap'):
self.canvas.setLineCap(lcap[node.get('cap')])
if node.get('miterLimit'):
self.canvas.setDash(utils.unit_get(node.get('miterLimit')))
if node.get('dash'):
dashes = node.get('dash').split(',')
for x in range(len(dashes)):
dashes[x]=utils.unit_get(dashes[x])
self.canvas.setDash(node.get('dash').split(','))
def _image(self, node):
import urllib
import urlparse
from reportlab.lib.utils import ImageReader
nfile = node.get('file')
if not nfile:
if node.get('name'):
image_data = self.images[node.get('name')]
_logger.debug("Image %s used", node.get('name'))
s = StringIO(image_data)
else:
newtext = node.text
if self.localcontext:
res = utils._regex.findall(newtext)
for key in res:
newtext = safe_eval(key, {}, self.localcontext) or ''
image_data = None
if newtext:
image_data = base64.decodestring(newtext)
if image_data:
s = StringIO(image_data)
else:
_logger.debug("No image data!")
return False
else:
if nfile in self.images:
s = StringIO(self.images[nfile])
else:
try:
up = urlparse.urlparse(str(nfile))
except ValueError:
up = False
if up and up.scheme:
# RFC: do we really want to open external URLs?
# Are we safe from cross-site scripting or attacks?
_logger.debug("Retrieve image from %s", nfile)
u = urllib.urlopen(str(nfile))
s = StringIO(u.read())
else:
_logger.debug("Open image file %s ", nfile)
s = _open_image(nfile, path=self.path)
try:
img = ImageReader(s)
(sx,sy) = img.getSize()
_logger.debug("Image is %dx%d", sx, sy)
args = { 'x': 0.0, 'y': 0.0, 'mask': 'auto'}
for tag in ('width','height','x','y'):
if node.get(tag):
args[tag] = utils.unit_get(node.get(tag))
if ('width' in args) and (not 'height' in args):
args['height'] = sy * args['width'] / sx
elif ('height' in args) and (not 'width' in args):
args['width'] = sx * args['height'] / sy
elif ('width' in args) and ('height' in args):
if (float(args['width'])/args['height'])>(float(sx)>sy):
args['width'] = sx * args['height'] / sy
else:
args['height'] = sy * args['width'] / sx
self.canvas.drawImage(img, **args)
finally:
s.close()
# self.canvas._doc.SaveToFile(self.canvas._filename, self.canvas)
def _path(self, node):
self.path = self.canvas.beginPath()
self.path.moveTo(**utils.attr_get(node, ['x','y']))
for n in utils._child_get(node, self):
if not n.text :
if n.tag=='moveto':
vals = utils.text_get(n).split()
self.path.moveTo(utils.unit_get(vals[0]), utils.unit_get(vals[1]))
elif n.tag=='curvesto':
vals = utils.text_get(n).split()
while len(vals)>5:
pos=[]
while len(pos)<6:
pos.append(utils.unit_get(vals.pop(0)))
self.path.curveTo(*pos)
elif n.text:
data = n.text.split() # Not sure if I must merge all TEXT_NODE ?
while len(data)>1:
x = utils.unit_get(data.pop(0))
y = utils.unit_get(data.pop(0))
self.path.lineTo(x,y)
if (not node.get('close')) or utils.bool_get(node.get('close')):
self.path.close()
self.canvas.drawPath(self.path, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def setFont(self, node):
fontname = select_fontname(node.get('name'), self.canvas._fontname)
return self.canvas.setFont(fontname, utils.unit_get(node.get('size')))
def render(self, node):
tags = {
'drawCentredString': self._drawCenteredString,
'drawRightString': self._drawRightString,
'drawString': self._drawString,
'rect': self._rect,
'ellipse': self._ellipse,
'lines': self._lines,
'grid': self._grid,
'curves': self._curves,
'fill': lambda node: self.canvas.setFillColor(color.get(node.get('color'))),
'stroke': lambda node: self.canvas.setStrokeColor(color.get(node.get('color'))),
'setFont': self.setFont ,
'place': self._place,
'circle': self._circle,
'lineMode': self._line_mode,
'path': self._path,
'rotate': lambda node: self.canvas.rotate(float(node.get('degrees'))),
'translate': self._translate,
'image': self._image
}
for n in utils._child_get(node, self):
if n.tag in tags:
tags[n.tag](n)
class _rml_draw(object):
def __init__(self, localcontext, node, styles, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.node = node
self.styles = styles
self.canvas = None
self.images = images
self.path = path
self.canvas_title = title
def render(self, canvas, doc):
canvas.saveState()
cnv = _rml_canvas(canvas, self.localcontext, doc, self.styles, images=self.images, path=self.path, title=self.canvas_title)
cnv.render(self.node)
canvas.restoreState()
class _rml_Illustration(platypus.flowables.Flowable):
def __init__(self, node, localcontext, styles, self2):
self.localcontext = (localcontext or {}).copy()
self.node = node
self.styles = styles
self.width = utils.unit_get(node.get('width'))
self.height = utils.unit_get(node.get('height'))
self.self2 = self2
def wrap(self, *args):
return self.width, self.height
def draw(self):
drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title)
drw.render(self.canv, None)
# Workaround for issue #15: https://bitbucket.org/rptlab/reportlab/issue/15/infinite-pages-produced-when-splitting
original_pto_split = platypus.flowables.PTOContainer.split
def split(self, availWidth, availHeight):
res = original_pto_split(self, availWidth, availHeight)
if len(res) > 2 and len(self._content) > 0:
header = self._content[0]._ptoinfo.header
trailer = self._content[0]._ptoinfo.trailer
if isinstance(res[-2], platypus.flowables.UseUpSpace) and len(header + trailer) == len(res[:-2]):
return []
return res
platypus.flowables.PTOContainer.split = split
class _rml_flowable(object):
def __init__(self, doc, localcontext, images=None, path='.', title=None, canvas=None):
if images is None:
images = {}
self.localcontext = localcontext
self.doc = doc
self.styles = doc.styles
self.images = images
self.path = path
self.title = title
self.canvas = canvas
def _textual(self, node):
rc1 = utils._process_text(self, node.text or '')
for n in utils._child_get(node,self):
txt_n = copy.deepcopy(n)
for key in txt_n.attrib.keys():
if key in ('rml_except', 'rml_loop', 'rml_tag'):
del txt_n.attrib[key]
if not n.tag == 'bullet':
if n.tag == 'pageNumber':
txt_n.text = self.canvas and str(self.canvas.getPageNumber()) or ''
else:
txt_n.text = utils.xml2str(self._textual(n))
txt_n.tail = n.tail and utils.xml2str(utils._process_text(self, n.tail.replace('\n',''))) or ''
rc1 += etree.tostring(txt_n)
return rc1
def _table(self, node):
children = utils._child_get(node,self,'tr')
if not children:
return None
length = 0
colwidths = None
rowheights = None
data = []
styles = []
posy = 0
for tr in children:
paraStyle = None
if tr.get('style'):
st = copy.deepcopy(self.styles.table_styles[tr.get('style')])
for si in range(len(st._cmds)):
s = list(st._cmds[si])
s[1] = (s[1][0],posy)
s[2] = (s[2][0],posy)
st._cmds[si] = tuple(s)
styles.append(st)
if tr.get('paraStyle'):
paraStyle = self.styles.styles[tr.get('paraStyle')]
data2 = []
posx = 0
for td in utils._child_get(tr, self,'td'):
if td.get('style'):
st = copy.deepcopy(self.styles.table_styles[td.get('style')])
for s in st._cmds:
s[1][1] = posy
s[2][1] = posy
s[1][0] = posx
s[2][0] = posx
styles.append(st)
if td.get('paraStyle'):
# TODO: merge styles
paraStyle = self.styles.styles[td.get('paraStyle')]
posx += 1
flow = []
for n in utils._child_get(td, self):
if n.tag == etree.Comment:
n.text = ''
continue
fl = self._flowable(n, extra_style=paraStyle)
if isinstance(fl,list):
flow += fl
else:
flow.append( fl )
if not len(flow):
flow = self._textual(td)
data2.append( flow )
if len(data2)>length:
length=len(data2)
for ab in data:
while len(ab)<length:
ab.append('')
while len(data2)<length:
data2.append('')
data.append( data2 )
posy += 1
if node.get('colWidths'):
assert length == len(node.get('colWidths').split(','))
colwidths = [utils.unit_get(f.strip()) for f in node.get('colWidths').split(',')]
if node.get('rowHeights'):
rowheights = [utils.unit_get(f.strip()) for f in node.get('rowHeights').split(',')]
if len(rowheights) == 1:
rowheights = rowheights[0]
table = platypus.LongTable(data = data, colWidths=colwidths, rowHeights=rowheights, **(utils.attr_get(node, ['splitByRow'] ,{'repeatRows':'int','repeatCols':'int'})))
if node.get('style'):
table.setStyle(self.styles.table_styles[node.get('style')])
for s in styles:
table.setStyle(s)
return table
def _illustration(self, node):
return _rml_Illustration(node, self.localcontext, self.styles, self)
def _textual_image(self, node):
return base64.decodestring(node.text)
def _pto(self, node):
sub_story = []
pto_header = None
pto_trailer = None
for node in utils._child_get(node, self):
if node.tag == etree.Comment:
node.text = ''
continue
elif node.tag=='pto_header':
pto_header = self.render(node)
elif node.tag=='pto_trailer':
pto_trailer = self.render(node)
else:
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return platypus.flowables.PTOContainer(sub_story, trailer=pto_trailer, header=pto_header)
def _flowable(self, node, extra_style=None):
if node.tag=='pto':
return self._pto(node)
if node.tag=='para':
style = self.styles.para_style_get(node)
if extra_style:
style.__dict__.update(extra_style)
text_node = self._textual(node).strip().replace('\n\n', '\n').replace('\n', '<br/>')
instance = platypus.Paragraph(text_node, style, **(utils.attr_get(node, [], {'bulletText':'str'})))
result = [instance]
if LooseVersion(reportlab.Version) > LooseVersion('3.0') and not instance.getPlainText().strip() and instance.text.strip():
result.append(platypus.Paragraph(' <br/>', style, **(utils.attr_get(node, [], {'bulletText': 'str'}))))
return result
elif node.tag=='barCode':
try:
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import code39
from reportlab.graphics.barcode import code93
from reportlab.graphics.barcode import common
from reportlab.graphics.barcode import fourstate
from reportlab.graphics.barcode import usps
from reportlab.graphics.barcode import createBarcodeDrawing
except ImportError:
_logger.warning("Cannot use barcode renderers:", exc_info=True)
return None
args = utils.attr_get(node, [], {'ratio':'float','xdim':'unit','height':'unit','checksum':'int','quiet':'int','width':'unit','stop':'bool','bearers':'int','barWidth':'float','barHeight':'float'})
codes = {
'codabar': lambda x: common.Codabar(x, **args),
'code11': lambda x: common.Code11(x, **args),
'code128': lambda x: code128.Code128(str(x), **args),
'standard39': lambda x: code39.Standard39(str(x), **args),
'standard93': lambda x: code93.Standard93(str(x), **args),
'i2of5': lambda x: common.I2of5(x, **args),
'extended39': lambda x: code39.Extended39(str(x), **args),
'extended93': lambda x: code93.Extended93(str(x), **args),
'msi': lambda x: common.MSI(x, **args),
'fim': lambda x: usps.FIM(x, **args),
'postnet': lambda x: usps.POSTNET(x, **args),
'ean13': lambda x: createBarcodeDrawing('EAN13', value=str(x), **args),
'qrcode': lambda x: createBarcodeDrawing('QR', value=x, **args),
}
code = 'code128'
if node.get('code'):
code = node.get('code').lower()
return codes[code](self._textual(node))
elif node.tag=='name':
self.styles.names[ node.get('id')] = node.get('value')
return None
elif node.tag=='xpre':
style = self.styles.para_style_get(node)
return platypus.XPreformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int','frags':'int'})))
elif node.tag=='pre':
style = self.styles.para_style_get(node)
return platypus.Preformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int'})))
elif node.tag=='illustration':
return self._illustration(node)
elif node.tag=='blockTable':
return self._table(node)
elif node.tag=='title':
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Title']
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif re.match('^h([1-9]+[0-9]*)$', (node.tag or '')):
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Heading'+str(node.tag[1:])]
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif node.tag=='image':
image_data = False
if not node.get('file'):
if node.get('name'):
if node.get('name') in self.doc.images:
_logger.debug("Image %s read ", node.get('name'))
image_data = self.doc.images[node.get('name')].read()
else:
_logger.warning("Image %s not defined", node.get('name'))
return False
else:
import base64
newtext = node.text
if self.localcontext:
newtext = utils._process_text(self, node.text or '')
image_data = base64.decodestring(newtext)
if not image_data:
_logger.debug("No inline image data")
return False
image = StringIO(image_data)
else:
_logger.debug("Image get from file %s", node.get('file'))
image = _open_image(node.get('file'), path=self.doc.path)
return platypus.Image(image, mask=(250,255,250,255,250,255), **(utils.attr_get(node, ['width','height'])))
elif node.tag=='spacer':
if node.get('width'):
width = utils.unit_get(node.get('width'))
else:
width = utils.unit_get('1cm')
length = utils.unit_get(node.get('length'))
return platypus.Spacer(width=width, height=length)
elif node.tag=='section':
return self.render(node)
elif node.tag == 'pageNumberReset':
return PageReset()
elif node.tag in ('pageBreak', 'nextPage'):
return platypus.PageBreak()
elif node.tag=='condPageBreak':
return platypus.CondPageBreak(**(utils.attr_get(node, ['height'])))
elif node.tag=='setNextTemplate':
return platypus.NextPageTemplate(str(node.get('name')))
elif node.tag=='nextFrame':
return platypus.CondPageBreak(1000) # TODO: change the 1000 !
elif node.tag == 'setNextFrame':
from reportlab.platypus.doctemplate import NextFrameFlowable
return NextFrameFlowable(str(node.get('name')))
elif node.tag == 'currentFrame':
from reportlab.platypus.doctemplate import CurrentFrameFlowable
return CurrentFrameFlowable(str(node.get('name')))
elif node.tag == 'frameEnd':
return EndFrameFlowable()
elif node.tag == 'hr':
width_hr=node.get('width') or '100%'
color_hr=node.get('color') or 'black'
thickness_hr=node.get('thickness') or 1
lineCap_hr=node.get('lineCap') or 'round'
return platypus.flowables.HRFlowable(width=width_hr,color=color.get(color_hr),thickness=float(thickness_hr),lineCap=str(lineCap_hr))
else:
sys.stderr.write('Warning: flowable not yet implemented: %s !\n' % (node.tag,))
return None
def render(self, node_story):
def process_story(node_story):
sub_story = []
for node in utils._child_get(node_story, self):
if node.tag == etree.Comment:
node.text = ''
continue
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return sub_story
return process_story(node_story)
class EndFrameFlowable(ActionFlowable):
def __init__(self,resume=0):
ActionFlowable.__init__(self,('frameEnd',resume))
class TinyDocTemplate(platypus.BaseDocTemplate):
def beforeDocument(self):
# Store some useful value directly inside canvas, so it's available
# on flowable drawing (needed for proper PageCount handling)
self.canv._doPageReset = False
self.canv._storyCount = 0
def ___handle_pageBegin(self):
self.page += 1
self.pageTemplate.beforeDrawPage(self.canv,self)
self.pageTemplate.checkPageSize(self.canv,self)
self.pageTemplate.onPage(self.canv,self)
for f in self.pageTemplate.frames: f._reset()
self.beforePage()
self._curPageFlowableCount = 0
if hasattr(self,'_nextFrameIndex'):
del self._nextFrameIndex
for f in self.pageTemplate.frames:
if f.id == 'first':
self.frame = f
break
self.handle_frameBegin()
def afterPage(self):
if isinstance(self.canv, NumberedCanvas):
# save current page states before eventual reset
self.canv._saved_page_states.append(dict(self.canv.__dict__))
if self.canv._doPageReset:
# Following a <pageReset/> tag:
# - we reset page number to 0
# - we add an new PageCount flowable (relative to the current
# story number), but not for NumeredCanvas at is handle page
# count itself)
# NOTE: _rml_template render() method add a PageReset flowable at end
# of each story, so we're sure to pass here at least once per story.
if not isinstance(self.canv, NumberedCanvas):
self.handle_flowable([ PageCount(story_count=self.canv._storyCount) ])
self.canv._pageCount = self.page
self.page = 0
self.canv._flag = True
self.canv._pageNumber = 0
self.canv._doPageReset = False
self.canv._storyCount += 1
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
if images is None:
images = {}
if not localcontext:
localcontext={'internal_header':True}
self.localcontext = localcontext
self.images= images
self.path = path
self.title = title
pagesize_map = {'a4': A4,
'us_letter': letter
}
pageSize = A4
if self.localcontext.get('company'):
pageSize = pagesize_map.get(self.localcontext.get('company').rml_paper_format, A4)
if node.get('pageSize'):
ps = map(lambda x:x.strip(), node.get('pageSize').replace(')', '').replace('(', '').split(','))
pageSize = ( utils.unit_get(ps[0]),utils.unit_get(ps[1]) )
self.doc_tmpl = TinyDocTemplate(out, pagesize=pageSize, **utils.attr_get(node, ['leftMargin','rightMargin','topMargin','bottomMargin'], {'allowSplitting':'int','showBoundary':'bool','rotation':'int','title':'str','author':'str'}))
self.page_templates = []
self.styles = doc.styles
self.doc = doc
self.image=[]
pts = node.findall('pageTemplate')
for pt in pts:
frames = []
for frame_el in pt.findall('frame'):
frame = platypus.Frame( **(utils.attr_get(frame_el, ['x1','y1', 'width','height', 'leftPadding', 'rightPadding', 'bottomPadding', 'topPadding'], {'id':'str', 'showBoundary':'bool'})) )
if utils.attr_get(frame_el, ['last']):
frame.lastFrame = True
frames.append( frame )
try :
gr = pt.findall('pageGraphics')\
or pt[1].findall('pageGraphics')
except Exception: # FIXME: be even more specific, perhaps?
gr=''
if len(gr):
# self.image=[ n for n in utils._child_get(gr[0], self) if n.tag=='image' or not self.localcontext]
drw = _rml_draw(self.localcontext,gr[0], self.doc, images=images, path=self.path, title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames, onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
else:
drw = _rml_draw(self.localcontext,node,self.doc,title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames,onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
self.doc_tmpl.addPageTemplates(self.page_templates)
def render(self, node_stories):
if self.localcontext and not self.localcontext.get('internal_header',False):
del self.localcontext['internal_header']
fis = []
r = _rml_flowable(self.doc,self.localcontext, images=self.images, path=self.path, title=self.title, canvas=None)
story_cnt = 0
for node_story in node_stories:
if story_cnt > 0:
fis.append(platypus.PageBreak())
fis += r.render(node_story)
# end of story numbering computation
fis.append(PageReset())
story_cnt += 1
try:
if self.localcontext and self.localcontext.get('internal_header',False):
self.doc_tmpl.afterFlowable(fis)
self.doc_tmpl.build(fis,canvasmaker=NumberedCanvas)
else:
self.doc_tmpl.build(fis)
except platypus.doctemplate.LayoutError, e:
e.name = 'Print Error'
e.value = 'The document you are trying to print contains a table row that does not fit on one page. Please try to split it in smaller rows or contact your administrator.'
raise
def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
SetCustomFonts(r)
except Exception, exc:
_logger.info('Cannot set font mapping: %s', "".join(traceback.format_exception_only(type(exc),exc)))
fp = StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
SetCustomFonts(r)
except Exception:
pass
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Render the standard input (RML) and output a PDF file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Try \'trml2pdf --help\' for more information.'
|
gpl-3.0
| -7,056,118,041,523,852,000
| 41.777154
| 238
| 0.545506
| false
| 3.868089
| false
| false
| false
|
MediaKraken/MediaKraken_Deployment
|
source/test_asyncpg_json.py
|
1
|
1574
|
import asyncio
import datetime
import json
import asyncpg
###
# fully working for DICT of row along with auto json decode/encode
###
async def main():
conn = await asyncpg.connect(user='postgres',
password='metaman',
database='postgres',
host='localhost')
await conn.set_type_codec('json',
encoder=json.dumps,
decoder=json.loads,
schema='pg_catalog')
await conn.execute('CREATE TABLE users2(id serial PRIMARY KEY,'
' name text, dob date, test_json jsonb)')
await conn.execute('INSERT INTO users2(name, dob, test_json) VALUES($1, $2, $3)',
'Bob', datetime.date(1984, 3, 1), json.dumps({'test': 'works'}))
# shows that one CANNOT use ::json in the insert
# await conn.execute('INSERT INTO users2(name, test_json::json) VALUES($1, $2)',
# 'Bob', {'test': 'works'})
# shows that one CANNOT use ::json in the update
# await conn.execute('update users2 set name = $1, test_json::json = $2',
# 'Bob', json.dumps({'test': 'works'}))
row = await conn.fetchrow('SELECT id, dob, test_json::json'
' FROM users2 WHERE name = $1', 'Bob')
print(row['id'], row['dob'], row['test_json'])
print(row['test_json']['test'])
# Close the connection.
await conn.close()
asyncio.get_event_loop().run_until_complete(main())
|
gpl-3.0
| 375,246,508,988,305,300
| 33.217391
| 87
| 0.52986
| false
| 4.025575
| true
| false
| false
|
ToontownUprising/src
|
toontown/nametag/Nametag2d.py
|
1
|
10929
|
from direct.task.Task import Task
import math
from panda3d.core import PGButton, VBase4, DepthWriteAttrib, Point3
from toontown.chat.ChatBalloon import ChatBalloon
from toontown.margins import MarginGlobals
from toontown.margins.MarginVisible import MarginVisible
from toontown.nametag import NametagGlobals
from toontown.nametag.Nametag import Nametag
from toontown.toontowngui.Clickable2d import Clickable2d
class Nametag2d(Nametag, Clickable2d, MarginVisible):
CONTENTS_SCALE = 0.25
CHAT_TEXT_MAX_ROWS = 6
CHAT_TEXT_WORD_WRAP = 8
CHAT_BALLOON_ALPHA = 0.4
ARROW_OFFSET = -1.0
ARROW_SCALE = 1.5
def __init__(self):
Nametag.__init__(self)
Clickable2d.__init__(self, 'Nametag2d')
MarginVisible.__init__(self)
self.actualChatText = ''
self.arrow = None
self.textNodePath = None
self.contents.setScale(self.CONTENTS_SCALE)
self.hideThought()
self.accept('MarginVisible-update', self.update)
def destroy(self):
self.ignoreAll()
Nametag.destroy(self)
if self.textNodePath is not None:
self.textNodePath.removeNode()
self.textNodePath = None
if self.arrow is not None:
self.arrow.removeNode()
self.arrow = None
Clickable2d.destroy(self)
def getUniqueName(self):
return 'Nametag2d-' + str(id(self))
def getChatBalloonModel(self):
return NametagGlobals.chatBalloon2dModel
def getChatBalloonWidth(self):
return NametagGlobals.chatBalloon2dWidth
def getChatBalloonHeight(self):
return NametagGlobals.chatBalloon2dHeight
def setChatText(self, chatText):
self.actualChatText = chatText
Nametag.setChatText(self, chatText)
def updateClickRegion(self):
if self.chatBalloon is not None:
right = self.chatBalloon.width / 2.0
left = -right
top = self.chatBalloon.height / 2.0
bottom = -top
self.setClickRegionFrame(left, right, bottom, top)
self.region.setActive(True)
elif self.panel is not None:
centerX = (self.textNode.getLeft()+self.textNode.getRight()) / 2.0
centerY = (self.textNode.getBottom()+self.textNode.getTop()) / 2.0
left = centerX - (self.panelWidth/2.0)
right = centerX + (self.panelWidth/2.0)
bottom = centerY - (self.panelHeight/2.0)
top = centerY + (self.panelHeight/2.0)
self.setClickRegionFrame(left, right, bottom, top)
self.region.setActive(True)
else:
if self.region is not None:
self.region.setActive(False)
def isClickable(self):
if self.getChatText() and self.hasChatButton():
return True
return NametagGlobals.wantActiveNametags and Clickable2d.isClickable(self)
def setClickState(self, clickState):
if self.isClickable():
self.applyClickState(clickState)
else:
self.applyClickState(PGButton.SInactive)
Clickable2d.setClickState(self, clickState)
def enterDepressed(self):
if self.isClickable():
base.playSfx(NametagGlobals.clickSound)
def enterRollover(self):
if self.isClickable() and (self.lastClickState != PGButton.SDepressed):
base.playSfx(NametagGlobals.rolloverSound)
def update(self):
self.contents.node().removeAllChildren()
Nametag.update(self)
if self.cell is not None:
# We're in the margin display. Reposition our content, and update
# the click region:
self.reposition()
self.updateClickRegion()
else:
# We aren't in the margin display. Disable the click region if one
# is present:
if self.region is not None:
self.region.setActive(False)
def tick(self, task):
if (self.avatar is None) or self.avatar.isEmpty():
return Task.cont
if (self.cell is None) or (self.arrow is None):
return Task.cont
location = self.avatar.getPos(NametagGlobals.me)
rotation = NametagGlobals.me.getQuat(base.cam)
camSpacePos = rotation.xform(location)
arrowRadians = math.atan2(camSpacePos[0], camSpacePos[1])
arrowDegrees = (arrowRadians/math.pi) * 180
self.arrow.setR(arrowDegrees - 90)
return Task.cont
def drawChatBalloon(self, model, modelWidth, modelHeight):
if self.chatFont is None:
# We can't draw this without a font.
return
# Prefix the nametag text:
self.chatTextNode.setText(self.getText() + ': ' + self.actualChatText)
# Set our priority in the margin system:
self.setPriority(MarginGlobals.MP_normal)
if self.textNodePath is not None:
self.textNodePath.removeNode()
self.textNodePath = None
if self.arrow is not None:
self.arrow.removeNode()
self.arrow = None
if self.isClickable():
foreground, background = self.chatColor[self.clickState]
else:
foreground, background = self.chatColor[PGButton.SInactive]
if self.chatType == NametagGlobals.SPEEDCHAT:
background = self.speedChatColor
if background[3] > self.CHAT_BALLOON_ALPHA:
background = VBase4(
background[0], background[1], background[2],
self.CHAT_BALLOON_ALPHA)
self.chatBalloon = ChatBalloon(
model, modelWidth, modelHeight, self.chatTextNode,
foreground=foreground, background=background,
reversed=self.chatReversed,
button=self.chatButton[self.clickState])
self.chatBalloon.reparentTo(self.contents)
# Calculate the center of the TextNode:
left, right, bottom, top = self.chatTextNode.getFrameActual()
center = self.contents.getRelativePoint(
self.chatBalloon.textNodePath,
((left+right) / 2.0, 0, (bottom+top) / 2.0))
# Translate the chat balloon along the inverse:
self.chatBalloon.setPos(self.chatBalloon, -center)
def drawNametag(self):
# Set our priority in the margin system:
self.setPriority(MarginGlobals.MP_low)
if self.textNodePath is not None:
self.textNodePath.removeNode()
self.textNodePath = None
if self.arrow is not None:
self.arrow.removeNode()
self.arrow = None
if self.font is None:
# We can't draw this without a font.
return
# Attach the icon:
if self.icon is not None:
self.contents.attachNewNode(self.icon)
if self.isClickable():
foreground, background = self.nametagColor[self.clickState]
else:
foreground, background = self.nametagColor[PGButton.SInactive]
# Set the color of the TextNode:
self.textNode.setTextColor(foreground)
# Attach the TextNode:
self.textNodePath = self.contents.attachNewNode(self.textNode, 1)
self.textNodePath.setTransparency(foreground[3] < 1)
self.textNodePath.setAttrib(DepthWriteAttrib.make(0))
self.textNodePath.setY(self.TEXT_Y_OFFSET)
# Attach a panel behind the TextNode:
self.panel = NametagGlobals.cardModel.copyTo(self.contents, 0)
self.panel.setColor(background)
self.panel.setTransparency(background[3] < 1)
# Reposition the panel:
x = (self.textNode.getLeft()+self.textNode.getRight()) / 2.0
z = (self.textNode.getBottom()+self.textNode.getTop()) / 2.0
self.panel.setPos(x, 0, z)
# Resize the panel:
self.panelWidth = self.textNode.getWidth() + self.PANEL_X_PADDING
self.panelHeight = self.textNode.getHeight() + self.PANEL_Z_PADDING
self.panel.setScale(self.panelWidth, 1, self.panelHeight)
# Add an arrow:
self.arrow = NametagGlobals.arrowModel.copyTo(self.contents)
self.arrow.setZ(self.ARROW_OFFSET + self.textNode.getBottom())
self.arrow.setScale(self.ARROW_SCALE)
self.arrow.setColor(self.nametagColor[0][0])
def marginVisibilityChanged(self):
if self.cell is not None:
# We're in the margin display. Reposition our content, and update
# the click region:
self.reposition()
self.updateClickRegion()
else:
# We aren't in the margin display. Disable the click region if one
# is present:
if self.region is not None:
self.region.setActive(False)
def reposition(self):
if self.contents is None:
return
origin = Point3()
self.contents.setPos(origin)
if self.chatBalloon is not None:
self.chatBalloon.removeNode()
self.chatBalloon = None
self.contents.node().removeAllChildren()
if (self.cell in base.leftCells) or (self.cell in base.rightCells):
text = self.getChatText().replace('\x01WLDisplay\x01', '').replace('\x02', '')
textWidth = self.chatTextNode.calcWidth(text)
if (textWidth / self.CHAT_TEXT_WORD_WRAP) > self.CHAT_TEXT_MAX_ROWS:
self.chatTextNode.setWordwrap(textWidth / (self.CHAT_TEXT_MAX_ROWS-0.5))
else:
self.chatTextNode.setWordwrap(self.CHAT_TEXT_WORD_WRAP)
model = self.getChatBalloonModel()
modelWidth = self.getChatBalloonWidth()
modelHeight = self.getChatBalloonHeight()
self.drawChatBalloon(model, modelWidth, modelHeight)
nodePath = self.chatBalloon.textNodePath
left, right, bottom, top = self.chatTextNode.getFrameActual()
elif self.panel is not None:
nodePath = self.textNodePath
left, right, bottom, top = self.textNode.getFrameActual()
# Compensate for the arrow:
bottom -= self.ARROW_SCALE
else:
return
if self.cell in base.bottomCells:
# Move the origin to the bottom center of the node path:
origin = self.contents.getRelativePoint(
nodePath, ((left+right) / 2.0, 0, bottom))
elif self.cell in base.leftCells:
# Move the origin to the left center of the node path:
origin = self.contents.getRelativePoint(
nodePath, (left, 0, (bottom+top) / 2.0))
elif self.cell in base.rightCells:
# Move the origin to the right center of the node path:
origin = self.contents.getRelativePoint(
nodePath, (right, 0, (bottom+top) / 2.0))
self.contents.setPos(self.contents, -origin)
|
mit
| 4,598,167,220,696,883,700
| 33.805732
| 94
| 0.620093
| false
| 3.836083
| false
| false
| false
|
tommyogden/maxwellbloch
|
setup.py
|
1
|
2118
|
"""MaxwellBloch
MaxwellBloch is a Python package for solving the coupled Maxwell-Bloch equations
describing the nonlinear propagation of near-resonant light through thermal
atomic vapours.
"""
import os
import textwrap
from setuptools import setup, find_packages
import subprocess
DESCRIPTION = "A Python package for solving the Maxwell-Bloch equations."
LONG_DESCRIPTION = ("MaxwellBloch is a Python package for solving the coupled "
"Maxwell-Bloch equations describing the nonlinear propagation of "
"near-resonant light through thermal atomic vapours.")
def git_short_hash():
""" Returns the short hash of the latest git commit as a string. """
git_str = subprocess.check_output(['git', 'log', '-1',
'--format=%h']).decode('UTF-8').strip()
return git_str
version = "no_version"
if "TRAVIS_TAG" in os.environ:
# Versions are of the pattern 'vX.Y.X'
version = os.environ.get("TRAVIS_TAG", "")[1:]
elif "TRAVIS_COMMIT" in os.environ:
version = os.environ.get("TRAVIS_COMMIT", "")[:8]
else:
version = git_short_hash()
def write_version_module(version_path='maxwellbloch/version.py'):
""" Write a version module with the current version."""
# Remove if already exists
if os.path.exists(version_path):
os.remove(version_path)
version_str = textwrap.dedent("""\
# This file is generated by setup.py
VERSION = '{!s}'
""".format(version))
f = open(version_path, 'w')
try:
f.write(version_str)
finally:
f.close()
write_version_module()
setup(name='MaxwellBloch',
version=version,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url='http://github.com/tommyogden/maxwellbloch',
author='Thomas P Ogden',
author_email='t@ogden.eu',
license='MIT',
packages=find_packages(),
package_data={'maxwellbloch.tests': ['json/*.json']},
install_requires=['qutip'],
scripts=['bin/make-mp4-fixed-frame.py',
'bin/make-mp4-fixed-frame-2-fields.py',
'bin/make-gif-ffmpeg.sh'],
zip_safe=False)
|
mit
| -6,282,543,788,265,770,000
| 29.695652
| 80
| 0.660057
| false
| 3.438312
| false
| false
| false
|
percival-detector/odin-data
|
tools/python/odin_data/meta_listener_adapter.py
|
1
|
7328
|
import logging
from tornado import escape
from odin.adapters.adapter import ApiAdapterResponse, \
request_types, response_types
from odin_data.odin_data_adapter import OdinDataAdapter
class MetaListenerAdapter(OdinDataAdapter):
"""An OdinControl adapter for a MetaListener"""
def __init__(self, **kwargs):
logging.debug("MetaListenerAdapter init called")
# These are internal adapter parameters
self.acquisitionID = ""
self.acquisition_active = False
self.acquisitions = []
# These parameters are stored under an acquisition tree, so we need to
# parse out the parameters for the acquisition we have stored
self._readback_parameters = {}
self._set_defaults()
# These config parameters are buffered so they can be included whenever a new acquisition
# is created. This helps to abstract the idea of acquisitions being created and removed and
# means the client does not need to send things in a certain order.
self._config_parameters = {
"config/output_dir": "",
"config/flush": 100,
"config/file_prefix": ""
}
# Parameters must be created before base init called
super(MetaListenerAdapter, self).__init__(**kwargs)
self._client = self._clients[0] # We only have one client
def _set_defaults(self):
self.acquisitionID = ""
self._readback_parameters = {
"status/filename": "",
"status/num_processors": 0,
"status/writing": False,
"status/written": 0
}
def _map_acquisition_parameter(self, path):
"""Map acquisition parameter path string to full uri item list"""
# Replace the first slash with acquisitions/<acquisitionID>/
# E.g. status/filename -> status/acquisitions/<acquisitionID>/filename
full_path = path.replace(
"/", "/acquisitions/{}/".format(self.acquisitionID),
1 # First slash only
)
return full_path.split("/") # Return list of uri items
@request_types('application/json')
@response_types('application/json', default='application/json')
def get(self, path, request):
"""Implementation of the HTTP GET verb for MetaListenerAdapter
:param path: URI path of the GET request
:param request: Tornado HTTP request object
:return: ApiAdapterResponse object to be returned to the client
"""
status_code = 200
response = {}
logging.debug("GET path: %s", path)
logging.debug("GET request: %s", request)
if path == "config/acquisition_id":
response["value"] = self.acquisitionID
elif path == "status/acquisition_active":
response["value"] = self.acquisition_active
elif path == "config/acquisitions":
acquisition_tree = self.traverse_parameters(
self._clients[0].parameters,
["config", "acquisitions"]
)
if acquisition_tree is not None:
response["value"] = "," .join(acquisition_tree.keys())
else:
response["value"] = None
elif path in self._readback_parameters:
response["value"] = self._readback_parameters[path]
elif path in self._config_parameters:
response["value"] = self._config_parameters[path]
else:
return super(MetaListenerAdapter, self).get(path, request)
return ApiAdapterResponse(response, status_code=status_code)
@request_types('application/json')
@response_types('application/json', default='application/json')
def put(self, path, request):
"""
Implementation of the HTTP PUT verb for MetaListenerAdapter
:param path: URI path of the PUT request
:param request: Tornado HTTP request object
:return: ApiAdapterResponse object to be returned to the client
"""
logging.debug("PUT path: %s", path)
logging.debug("PUT request: %s", request)
logging.debug("PUT request.body: %s",
str(escape.url_unescape(request.body)))
value = str(escape.url_unescape(request.body)).replace('"', '')
if path == "config/acquisition_id":
self.acquisitionID = value
# Set inactive so process_updates doesn't clear acquisition ID
self.acquisition_active = False
# Send entire config with new acquisition ID
config = dict(
acquisition_id=self.acquisitionID,
output_dir=self._config_parameters["config/output_dir"],
flush=self._config_parameters["config/flush"],
file_prefix=self._config_parameters["config/file_prefix"]
)
status_code, response = self._send_config(config)
elif path == "config/stop":
self.acquisition_active = False
# By default we stop all acquisitions by passing None
config = {
"acquisition_id": None,
"stop": True
}
if self.acquisitionID:
# If we have an Acquisition ID then stop that one only
config["acquisition_id"] = self.acquisitionID
status_code, response = self._send_config(config)
self.acquisitionID = ""
elif path in self._config_parameters:
# Store config to re-send with acquisition ID when it is changed
self._config_parameters[path] = value
parameter = path.split("/", 1)[-1] # Remove 'config/'
config = {
"acquisition_id": self.acquisitionID,
parameter: value
}
status_code, response = self._send_config(config)
else:
return super(OdinDataAdapter, self).put(path, request)
return ApiAdapterResponse(response, status_code=status_code)
def _send_config(self, config_message):
status_code = 200
response = {}
try:
self._client.send_configuration(config_message)
except Exception as err:
logging.debug(OdinDataAdapter.ERROR_FAILED_TO_SEND)
logging.error("Error: %s", err)
status_code = 503
response = {"error": OdinDataAdapter.ERROR_FAILED_TO_SEND}
return status_code, response
def process_updates(self):
"""Handle additional background update loop tasks
Store a copy of all parameters so they don't disappear
"""
if self.acquisitionID:
acquisition_active = self.acquisitionID in self.traverse_parameters(
self._client.parameters, ["status", "acquisitions"]
)
if acquisition_active:
self.acquisition_active = True
for parameter in self._readback_parameters.keys():
value = self.traverse_parameters(
self._client.parameters,
self._map_acquisition_parameter(parameter)
)
self._readback_parameters[parameter] = value
else:
self.acquisition_active = False
else:
self._set_defaults()
|
apache-2.0
| 4,103,719,632,012,855,000
| 37.978723
| 99
| 0.593068
| false
| 4.531849
| true
| false
| false
|
Kronuz/pyXapiand
|
xapiand/search.py
|
1
|
11782
|
from __future__ import unicode_literals, absolute_import
import base64
import logging
import xapian
from . import json
from .core import get_slot, get_prefix, expand_terms, find_terms, DOCUMENT_CUSTOM_TERM_PREFIX
from .serialise import normalize, serialise_value
from .exceptions import XapianError
MAX_DOCS = 10000
class Search(object):
def __init__(self, database, search,
get_matches=True, get_data=True, get_terms=False, get_size=False,
data='.', log=logging, dead=False):
self.database = database
self.search = search
self.get_matches = get_matches
self.get_terms = get_terms
self.get_data = get_data
self.get_size = get_size
self.data = data
self.log = log
self.dead = dead
self.spies = {}
self.warnings = []
self.produced = 0
self.size = None
self.facets = self.search.get('facets')
self.check_at_least = self.search.get('check_at_least', MAX_DOCS if self.facets else 0)
self.maxitems = self.search.get('maxitems', MAX_DOCS)
self.first = self.search.get('first', 0)
self.setup()
def setup(self):
queryparser = xapian.QueryParser()
queryparser.set_database(self.database.database)
query = None
prefixes = set()
def add_prefixes(string):
for term, term_field, terms in find_terms(string):
if term_field and term_field not in prefixes:
prefix = get_prefix(term_field, DOCUMENT_CUSTOM_TERM_PREFIX)
if term_field.lower() == term_field:
queryparser.add_prefix(term_field, prefix)
else:
queryparser.add_boolean_prefix(term_field, prefix)
prefixes.add(term_field)
# Build final query:
search = self.search.get('search')
if search:
if not isinstance(search, (tuple, list)):
search = [search]
search = " AND ".join("(%s)" % s for s in search if s)
if search and search != '(*)':
search = normalize(search).encode('utf-8')
ranges = self.search.get('ranges')
if ranges:
_ranges = set()
for field, begin, end in ranges:
field = field.encode('utf-8')
if field not in _ranges:
slot = get_slot(field)
vrp = xapian.StringValueRangeProcessor(slot, field)
queryparser.add_valuerangeprocessor(vrp)
_ranges.add(field)
if begin is None:
begin = b''
if end is None:
end = b''
rng1 = b'(%s:%s..%s)' % (field, begin, end)
rng2 = b'(%s:%s..%s)' % (field, serialise_value(begin)[0], serialise_value(end)[0])
if rng1 == rng2:
_search = search
if rng1 in search:
search = None
else:
_search = search.replace(rng1, rng2)
if search != _search:
search = _search
else:
search += b' AND %s' % rng2
search = expand_terms(search)
add_prefixes(search)
flags = xapian.QueryParser.FLAG_DEFAULT | xapian.QueryParser.FLAG_WILDCARD | xapian.QueryParser.FLAG_PURE_NOT
try:
query = queryparser.parse_query(search, flags)
except (xapian.NetworkError, xapian.DatabaseError):
self.database.reopen()
queryparser.set_database(self.database.database)
query = queryparser.parse_query(search, flags)
partials = self.search.get('partials')
if partials:
if not isinstance(partials, (tuple, list)):
partials = [partials]
# Partials (for autocomplete) using FLAG_PARTIAL and OP_AND_MAYBE
partials_query = None
for partial in partials:
self.dead or 'alive' # Raises DeadException when needed
partial = normalize(partial)
partial = expand_terms(partial)
add_prefixes(partial)
flags = xapian.QueryParser.FLAG_PARTIAL
try:
_partials_query = queryparser.parse_query(partial, flags)
except (xapian.NetworkError, xapian.DatabaseError):
self.database.reopen()
queryparser.set_database(self.database.database)
_partials_query = queryparser.parse_query(partial, flags)
if partials_query:
partials_query = xapian.Query(
xapian.Query.OP_AND_MAYBE,
partials_query,
_partials_query,
)
else:
partials_query = _partials_query
if query:
query = xapian.Query(
xapian.Query.OP_AND,
query,
partials_query,
)
else:
query = partials_query
terms = self.search.get('terms')
if terms:
if not isinstance(terms, (tuple, list)):
terms = [terms]
for term in terms:
term = normalize(term)
term = expand_terms(term)
add_prefixes(term)
flags = xapian.QueryParser.FLAG_BOOLEAN | xapian.QueryParser.FLAG_PURE_NOT
try:
terms_query = queryparser.parse_query(term, flags)
except (xapian.NetworkError, xapian.DatabaseError):
self.database.reopen()
queryparser.set_database(self.database.database)
terms_query = queryparser.parse_query(term, flags)
if query:
query = xapian.Query(
xapian.Query.OP_AND,
query,
terms_query,
)
else:
query = terms_query
if not query:
if search == '(*)':
query = xapian.Query('')
else:
query = xapian.Query()
self.query = query
self.sort_by = self.search.get('sort_by')
self.distinct = self.search.get('distinct')
self.sort_by_reversed = self.search.get('sort_by_reversed')
def get_enquire(self):
enquire = xapian.Enquire(self.database.database)
# enquire.set_weighting_scheme(xapian.BoolWeight())
# enquire.set_docid_order(xapian.Enquire.DONT_CARE)
# if weighting_scheme:
# enquire.set_weighting_scheme(xapian.BM25Weight(*self.weighting_scheme))
enquire.set_query(self.query)
spies = {}
sort_by = []
warnings = []
if self.facets:
for name in self.facets:
self.dead or 'alive' # Raises DeadException when needed
name = name.strip().lower()
slot = get_slot(name)
if slot:
spy = xapian.ValueCountMatchSpy(slot)
enquire.add_matchspy(spy)
spies[name] = spy
else:
warnings.append("Ignored document value name (%r)" % name)
if self.sort_by:
for sort_field in self.sort_by:
self.dead or 'alive' # Raises DeadException when needed
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:] # Strip the '-'
else:
reverse = False
sort_by.append((sort_field, reverse))
sorter = xapian.MultiValueKeyMaker()
for name, reverse in sort_by:
self.dead or 'alive' # Raises DeadException when needed
name = name.strip().lower()
slot = get_slot(name)
if slot:
sorter.add_value(slot, reverse)
else:
warnings.append("Ignored document value name (%r)" % name)
enquire.set_sort_by_key_then_relevance(sorter, self.sort_by_reversed)
if self.distinct:
if self.distinct is True:
field = 'ID'
else:
field = self.distinct
enquire.set_collapse_key(get_slot(field))
self.spies = spies
self.warnings = warnings
return enquire
def get_results(self):
doccount = self.database.get_doccount()
maxitems = max(min(self.maxitems, doccount - self.first, MAX_DOCS), 0)
check_at_least = max(min(self.check_at_least, doccount, MAX_DOCS), 0)
if not self.get_matches:
maxitems = 0
try:
enquire = self.get_enquire()
matches = enquire.get_mset(self.first, maxitems, check_at_least)
except (xapian.NetworkError, xapian.DatabaseError):
self.database.reopen()
try:
enquire = self.get_enquire()
matches = enquire.get_mset(self.first, maxitems, check_at_least)
except (xapian.NetworkError, xapian.DatabaseError) as exc:
raise XapianError(exc)
self.produced = 0
self.estimated = None
self.size = matches.size()
if self.get_size:
self.estimated = matches.get_matches_estimated()
yield {
'size': self.size,
'estimated': self.estimated,
}
if self.spies:
for name, spy in self.spies.items():
self.dead or 'alive' # Raises DeadException when needed
for facet in spy.values():
self.dead or 'alive' # Raises DeadException when needed
yield {
'facet': name,
'term': facet.term.decode('utf-8'),
'termfreq': facet.termfreq,
}
produced = 0
for match in matches:
docid = match.docid
document = self.database.get_document(docid)
self.dead or 'alive' # Raises DeadException when needed
id = self.database.get_value(document, get_slot('ID'))
produced += 1
result = {
'id': id,
'docid': docid,
'rank': match.rank,
'weight': match.weight,
'percent': match.percent,
}
if self.get_data:
data = self.database.get_data(document)
if data is None:
continue
try:
data = json.loads(data)
except Exception:
data = base64.b64encode(data)
result.update({
'data': data,
})
if self.get_terms:
terms = []
termlist = self.database.get_termlist(document)
for t in termlist:
self.dead or 'alive' # Raises DeadException when needed
terms.append(t.term.decode('utf-8'))
result.update({
'terms': terms,
})
yield result
self.produced = produced
@property
def results(self):
return self.get_results()
|
gpl-2.0
| 5,752,482,212,671,406,000
| 36.28481
| 121
| 0.496605
| false
| 4.341194
| false
| false
| false
|
luigiberrettini/Kiln-to-GitHub
|
kiln_repo_list.py
|
1
|
1422
|
import requests
from anvil import Anvil
def main():
requests.packages.urllib3.disable_warnings()
in_file = open("./kiln_base_url.txt", "r")
base_url = in_file.read().replace('\n', '')
in_file.close()
anvil = Anvil(base_url, False)
anvil.create_session_by_prompting()
repo_indices = set()
out_file = open("./kiln_repoList.txt", "w")
for project in anvil.get_projects():
for repo_group in project.repo_groups:
for repo in repo_group.repos:
if not repo.index in repo_indices:
repo_indices.add(repo.index)
prj_indx = str(project.index)
grp_indx = str(repo_group.index)
rep_indx = str(repo.index)
prj_name = project.name
grp_name = repo_group.name
rep_name = repo.name
prj_slug = repo.project_slug
grp_slug = repo.group_slug or 'Group'
rep_slug = repo.slug
url = base_url + '/Code/' + prj_slug + '/' + grp_slug + '/' + rep_slug
indexes = prj_indx + ',' + grp_indx + ',' + rep_indx
names = prj_name + ',' + grp_name + ',' + rep_name
out_file.write(url + "," + indexes + "," + names + ',' + rep_name + "\n")
out_file.close()
if __name__ == '__main__':
main()
|
mit
| 3,321,808,101,163,100,700
| 33.682927
| 93
| 0.489451
| false
| 3.693506
| false
| false
| false
|
AlexandreDecan/multiwords
|
rims.py
|
1
|
1223
|
#!/usr/bin/python
"""
Module that provides several functions to handle (un)rimmed words.
"""
import words
def rims_of(word):
""" Return the rims for the given word. A rim is a (nonempty) word u such
that w = u.s = p.u' for some s,p,u' such that |u'| = |u|, and u' and u
agree on every position except one.
For example, a and aab are rims for aabb. """
rims = []
for shift in words.get_koverlaps(word, 1):
rims.append(word[:len(word)-shift])
return rims
def pretty_print(word, rims = None):
""" Pretty print of the rims of the given word. """
if rims == None:
rims = rims_of(word)
print word
for r in rims:
print word.rjust(len(word)*2-len(r), ' ')
def mismatch_pos(word, rim):
""" Return the position (in the rim) of the mismatch between
the word and the rim. Position starts at 0. """
shift = len(word) - len(rim)
for k in range(len(rim)):
if word[shift + k] != rim[k]:
return k
return -1
def rim_index(word, rim):
""" Return the index of a rim in the given word. The index of a rim
is a position in w where the corresponding suffix of the rim starts. """
return len(word) - len(rim) + 1
|
apache-2.0
| -7,824,436,984,240,739,000
| 29.575
| 78
| 0.609975
| false
| 3.184896
| false
| false
| false
|
xiaohan2012/snpp
|
tests/test_signed_graph.py
|
1
|
1749
|
"""
For the utilities
"""
import contexts as ctx
import numpy as np
from scipy.sparse import csr_matrix, isspmatrix_csr
from snpp.utils.signed_graph import symmetric_stat, \
fill_diagonal, \
make_symmetric, \
matrix2graph
def test_symmetric_stat(Q1_d):
c_sym, c_consis = symmetric_stat(Q1_d)
assert c_sym == 6
assert c_consis == 4
def test_fill_diagonal():
N = 2
m = csr_matrix(np.array([[1, 0], [0, 0]]))
assert len(set([m[i, i] for i in range(N)])) == 2
m_new = fill_diagonal(m)
assert isspmatrix_csr(m_new)
assert set([m_new[i, i] for i in range(N)]) == {1}
def test_make_symmetric(Q1_d):
def mask(m):
"""remove inconsistent entries
"""
inconsis_idx = [(i, j)
for i, j in zip(*m.nonzero())
if (m[i, j] != 0
and m[j, i] != 0
and m[j, i] != m[i, j])]
m_masked = m.copy()
for i, j in inconsis_idx:
m_masked[i, j] = m_masked[j, i] = 0
return m_masked
Q1_d_masked = mask(Q1_d)
assert not np.allclose(Q1_d_masked.toarray(), np.transpose(Q1_d_masked.toarray()))
m = make_symmetric(Q1_d)
assert isspmatrix_csr(m)
m = m.toarray()
m_masked = mask(m)
assert np.allclose(m_masked, np.transpose(m_masked))
def test_matrix2graph(Q1_d):
gm = matrix2graph(Q1_d, None, multigraph=True)
g = matrix2graph(Q1_d, None, multigraph=False)
for i, j in gm.edges():
s = g[i][j]['sign']
assert gm[i][j][s]['sign'] == s
assert gm[0][0][1]['sign'] == g[0][0]['sign'] == 1
assert gm[2][3][1]['sign'] == g[2][3]['sign'] == 1
assert gm[0][2][-1]['sign'] == g[0][2]['sign'] == -1
|
mit
| 6,050,825,672,574,178,000
| 27.672131
| 86
| 0.532876
| false
| 2.91015
| false
| false
| false
|
sandeez/lino-book_locator
|
locate/locate/lib/books/models.py
|
1
|
4811
|
from lino.api import dd
from django.db import models
class Floor(dd.Model):
number = models.IntegerField('Number', null=False)
def __unicode__(self):
return 'Floor: {0}'.format(self.number)
class Meta:
verbose_name = 'Floor'
verbose_name_plural = 'Floors'
class Room(dd.Model):
number = models.IntegerField('Number', null=False)
name = models.CharField('Name', max_length=10)
floor = models.ForeignKey(Floor)
def __unicode__(self):
return 'Floor: {0} -> Room: {1}'.format(
self.floor.number,
self.number)
class Meta:
verbose_name = 'Room'
verbose_name_plural = 'Rooms'
unique_together = ('number', 'floor')
class Bookshelf(dd.Model):
code = models.CharField('Code', null=False, max_length=5)
room = models.ForeignKey(Room)
def __unicode__(self):
return 'Floor: {0} -> Room: {1} -> Bookshelf: {2}'.format(
self.room.floor.number,
self.room.number,
self.code)
class Meta:
verbose_name = 'Bookshelf'
verbose_name_plural = 'Bookshelves'
unique_together = ('code', 'room')
class Rack(dd.Model):
code = models.CharField('Code', max_length=5, null=False)
bookshelf = models.ForeignKey(Bookshelf)
def __unicode__(self):
return 'Floor: {0} -> Room: {1} -> Bookshelf: {2} -> Rack: {3}'.format(
self.bookshelf.room.floor.number,
self.bookshelf.room.number,
self.bookshelf.code,
self.code)
class Meta:
verbose_name = 'Rack'
verbose_name_plural = 'Racks'
unique_together = ('code', 'bookshelf')
class Slot(dd.Model):
number = models.IntegerField('Number', null=False)
rack = models.ForeignKey(Rack)
def __unicode__(self):
return 'Floor: {0} -> Room: {1} -> Bookshelf: {2} -> Rack: {3} -> Slot: {4}'.format(
self.rack.bookshelf.room.floor.number,
self.rack.bookshelf.room.number,
self.rack.bookshelf.code,
self.rack.code,
self.number)
class Meta:
verbose_name = 'Slot'
verbose_name_plural = 'Slots'
unique_together = ('number', 'rack')
class Category(dd.Model):
name = models.CharField(null=False, max_length=50)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Category'
verbose_name_plural = 'Categories'
class Author(dd.Model):
name = models.CharField(null=False, max_length=50)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Author'
verbose_name_plural = 'Authors'
class Publication(dd.Model):
name = models.CharField(null=False, max_length=50)
def __unicode__(self):
return self.name
class Meta:
verbose_name = 'Publication'
verbose_name_plural = 'Publications'
class BookInfo(dd.Model):
name = models.CharField('Name', max_length=50)
author = models.ForeignKey(Author)
publication = models.ForeignKey(Publication)
category = models.ForeignKey(Category)
copies = models.IntegerField('Total Copies', null=False, default=1)
def __unicode__(self):
return 'Name: {0} -> Author: {1} -> Publication: {2}'.format(
self.name,
self.author,
self.publication)
class Meta:
verbose_name = 'Book Information'
verbose_name_plural = 'Books Information'
unique_together = ('name', 'author', 'publication')
class Book(dd.Model):
code = models.CharField(max_length=10, unique=True)
info = models.ForeignKey(BookInfo)
def __unicode__(self):
return 'Code: {0} -> Name: {1} -> Author: {2}'.format(
self.code,
self.info.name,
self.info.author)
class Meta:
verbose_name = 'Book'
verbose_name_plural = 'Books'
unique_together = ('code', 'info')
class BookLocation(dd.Model):
book = models.ForeignKey(Book, unique=True)
slot = models.ForeignKey(Slot, unique=True)
def __unicode__(self):
return 'Floor: {0} -> Room: {1} -> Bookshelf: {2} -> Rack:{3} -> Slot: {4} -> Book: {5}'.format(
self.slot.rack.bookshelf.room.floor.number,
self.slot.rack.bookshelf.room.number,
self.slot.rack.bookshelf.code,
self.slot.rack.code,
self.slot.number,
self.book.code)
class Meta:
verbose_name = 'Book Location'
verbose_name_plural = 'Book Locations'
from .ui import *
|
bsd-2-clause
| 1,497,361,167,193,368,000
| 27.467456
| 104
| 0.561214
| false
| 3.709329
| false
| false
| false
|
ryos36/polyphony-tutorial
|
Life/life_one.py
|
1
|
2840
|
import polyphony
from polyphony import module, pure
from polyphony import testbench
from polyphony.io import Port
from polyphony.typing import bit, uint3, uint4, List
from polyphony.timing import clksleep, clkfence, wait_rising, wait_falling
@module
class life:
def __init__(self):
self.i_bit4 = Port(uint4, 'in', protocol='valid')
self.o_bit = Port(bit, 'out', protocol='valid')
self.append_worker(self.life_worker, self.i_bit4, self.o_bit)
def life_worker(self, i_bit4, o_bit):
bit3_to_n = [ 0, 1, 1, 2, 1, 2, 2, 3 ]
bit3_to_m = [ 0, 1, 0, 1, 1, 2, 1, 2 ]
n_to_o = [0, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]
mat = [0] * 3 #
while polyphony.is_worker_running():
v = i_bit4()
#print("mat", mat)
#print("v", v)
if v == 8 :
mat2_old = mat[2]
mat[0] = 0
mat[1] = 0
mat[2] = 0
else:
v0 = bit3_to_n[v]
v1 = bit3_to_m[v]
mat0_old = mat[0]
mat1_old = mat[1]
mat2_old = mat[2]
mat[0] = 16 + v0
mat[1] = mat0_old + v1
mat[2] = mat1_old + v0
#print("mat2_old:", mat2_old)
if (mat2_old & 16) == 16 :
out_v = n_to_o[mat2_old & 15]
o_bit.wr(out_v)
m = life()
@testbench
def test(m):
m.i_bit4.wr(0)
clksleep(5)
m.i_bit4.wr(0)
clksleep(5)
m.i_bit4.wr(1)
v = m.o_bit.rd()
clksleep(5)
if 1 :
m.i_bit4.wr(0)
clksleep(5)
print("outv:", v)
if 0:
m.i_bit4.wr(0)
clksleep(5)
v = m.o_bit.rd()
print("outv:", v)
if 0 :
m.i_bit4.wr(4)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(3)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(0)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(0)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(8)
v = m.o_bit.rd()
print("outv:", v)
print("-")
clksleep(10)
#
m.i_bit4.wr(0)
m.i_bit4.wr(0)
m.i_bit4.wr(2)
m.i_bit4.wr(1)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(1)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(1)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(7)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(0)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(0)
v = m.o_bit.rd()
print("outv:", v)
m.i_bit4.wr(8)
v = m.o_bit.rd()
print("outv:", v)
test(m)
|
mit
| 7,964,215,205,951,401,000
| 21.362205
| 74
| 0.415493
| false
| 2.619926
| false
| false
| false
|
itucsdb1621/itucsdb1621
|
tags.py
|
1
|
2795
|
import psycopg2
from flask import Flask
from flask import render_template, request
from flask import Blueprint, current_app,session,redirect, url_for
#declaring sub app with blueprint
tags_app = Blueprint('tags_app', __name__)
@tags_app.route('/add_tag/<photo_id>/', methods=["POST"])
def add_tag(photo_id):
# a post request would be more elegant
username = request.form["username"]
x = request.form["x"]
y = request.form["y"]
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("select ID from users where username=%s",(username,))
conn.commit()
tagged_id = crs.fetchone()
if tagged_id == None:
return render_template("message.html",message="User not found")
## if null show and error message
crs.execute("insert into tags (tagger_id,tagged_id,photo_id,time,x,y) values (%s,%s,%s,now(),%s,%s)",(session["user_id"],tagged_id,photo_id,x,y))
conn.commit()
return render_template('message.html',message="Successfully added tag")
@tags_app.route('/update_tag/<photo_id>/', methods=["POST"])
def update_tag(photo_id):
newUsername = request.form["username"]
x = request.form["x"]
y = request.form["y"]
tagged_id=request.form["_id"]
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("select ID from users where username=%s",(newUsername,))
newId = crs.fetchone()
if newId == None:
return render_template("message.html",message="User not found")
print(tagged_id)
## if null show and error message
crs.execute("update tags set tagged_id=%s,time=now(),x=%s,y=%s where tagger_id=%s and tagged_id=%s and photo_id=%s ",(newId[0],x,y,session["user_id"],tagged_id,photo_id))
conn.commit()
return render_template('message.html',message="Successfully updated tag")
@tags_app.route('/delete_tag/<photo_id>/', methods=["POST"])
def delete_tag(photo_id,):
tagged_id=request.form["_id"]
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
## if null show and error message
print(tagged_id)
crs.execute("delete from tags where tagger_id=%s and tagged_id=%s and photo_id=%s ",(session["user_id"],tagged_id,photo_id))
conn.commit()
return render_template('message.html',message="Successfully deleted tag")
## no use
@tags_app.route('/retrieve_tags/<photo_id>/')
def retrieve_tags(photo_id):
with psycopg2.connect(current_app.config['dsn']) as conn:
crs = conn.cursor()
crs.execute("select * from tags where photo_id=%s ",(photo_id))
conn.commit()
return render_template('message.html',message="Successfully added tag")
|
gpl-3.0
| -8,171,542,644,453,910,000
| 41.348485
| 179
| 0.647227
| false
| 3.489388
| false
| false
| false
|
changhoonhahn/centralMS
|
centralms/tests/test_sfrs.py
|
1
|
1027
|
'''
Test functions for handling star formation rates
'''
import numpy as np
from scipy.integrate import odeint
import sfrs as SFR
def IntegrationTest():
''' Simple test the integration
'''
logsfr = lambda mstar, t: np.log10(t**2)
for tt in np.arange(1., 11., 1.):
M_int = SFR.integSFR(logsfr, np.array([0.]),
np.array([0.]), np.array([tt]),
mass_dict={'type': 'rk4', 'f_retain': 1e-9, 't_step': 0.01})
print np.log10(10**M_int[0] - 1.), np.log10(tt**3/3.)
return None
def Integration_ScipyComp():
''' Simple test the integration
'''
dydt = lambda y, t: t
M_euler = SFR.ODE_Euler(dydt, np.array([0.]), np.array([0.,10.]), 0.001)
M_RK4 = SFR.ODE_RK4(dydt, np.array([0.]), np.array([0.,10.]), 0.1)
M_scipy = odeint(dydt, np.array([0.]), np.array([0.,10.]))
print M_euler
print M_RK4
print M_scipy
return None
if __name__=='__main__':
#IntegrationTest()
Integration_ScipyComp()
|
mit
| -3,111,330,352,351,419,400
| 19.54
| 76
| 0.555015
| false
| 2.860724
| false
| false
| false
|
robosafe/mc-vs-bdi
|
models/pta_models/table/legible_full/extract.py
|
1
|
8357
|
#!/usr/bin/env python
# This script translates and separates the traces computed with UPPAAL model checking and the tracer tool (libutap). The traces are originally in a *.xtr file format. The specified automata transitions are separated from the global traces (the human, the setting of gaze, pressure and location), and transformed into a *.txt file with a list of high-level commands for the human machine in the simulator (sending signals, waiting for signals, setting parameters).
# Written by Dejanira Araiza-Illan, March 2015
# Modified for the table scenario, July 2016
import rospy
import re
import os
import sys
variables_keep = ['pressure','tlocation','gaze','bored','humanReady','leg']
def extract(nameFile):
# -------------- PARSING OF THE MODEL INTO USEFUL STRUCTURES
#automata = raw_input("Name of the automata with commas and no spaces (e.g. aut1,aut2,aut3):")
automata = 'human,g,p,l'
#automata = 'Human,Gaze,Pressure,Location'
automaton = re.split(",",automata)
type_of_trace=0
transitions=[]
states=[]
traces=[]
delays=[]
numberfile=re.split('\D+',nameFile)
# print numberfile
for i, line in enumerate(open(nameFile+ '.tr', 'r')):
for match in re.finditer("Initial state",line): #If from legible_traces.py
type_of_trace=1
for match in re.finditer("Trace",line): #Separate multiple traces
traces.append(i)
for match in re.finditer("Transitions:", line):
transitions.append(i)
for match in re.finditer("State:", line):
states.append(i)
for match in re.finditer("Delay:", line):
delays.append(i)
#Eliminate states and keep transitions
# print type_of_trace
# print traces
# print transitions
# print states
# print delays
if type_of_trace==1:
f=open('stimulus_'+numberfile[1]+'.txt', 'w')
trans_content=[]
for i in range(0,len(transitions)):
for j, line in enumerate(open(nameFile+ '.tr', 'r')):
if j>transitions[i] and j<(states[i]):
if line!='\n':
trans_content.append(line)
# print trans_content
#Eliminate unimportant transitions
important=[]
for i, line in enumerate(trans_content):
for j, aut in enumerate(automaton):
if aut != ',':
if re.match(aut+'.', line)!=None:
important.append(line)
# print important
#Check each transition and determine if human: sends signal, receives signal, sets variables
global variables_keep
for i,trans in enumerate(important):
var_split = re.split('; ',trans)
if var_split[1] != '0': #Signals
for match in re.finditer('!',var_split[1]):
signal = re.split('!',var_split[1])
for kk,variable in enumerate(variables_keep):
if re.search('\\b'+signal[0]+'\\b',variable):
#Write send signal
f.write('tell '+signal[0]+'\n')
for match in re.finditer('\?',var_split[1]):
#Write receive signal
for kk,variable in enumerate(variables_keep):
if re.search('\\b'+signal[0]+'\\b',variable):
signal = re.split('\?',var_split[1])
f.write('receivesignal\n')
if var_split[2] != '0}\n': #Variables
commas = re.split(',',var_split[2])
for j,part in enumerate(commas):
if commas!='':
new_string = corrected(part)
if new_string !='' and not re.search('bored',new_string):
f.write('set_param\t'+new_string+'\n')
elif re.search('bored',new_string):
f.write(new_string+'\n')
else:
#Eliminate extra "states:
for j,delay in enumerate(delays):
for i,state in enumerate(states):
if state>delay:
states.pop(i)
break
for j,tr in enumerate(traces):
for i,state in enumerate(states):
if state>tr:
states.pop(i)
break
# print states
#First traces
for tr in range (0,len(traces)-1):
f=open('stimulus_'+str(tr+1)+'_'+nameFile+'.txt', 'w')
trans_content=[]
for i in range(0,len(transitions)):
if transitions[i]>traces[tr] and transitions[i]<traces[tr+1]:
# print transitions[i]
for j, line in enumerate(open(nameFile+ '.tr', 'r')):
if j>=(transitions[i]) and j<(states[i]):
# print line
if line!='\n' and line!='Transitions:\n':
trans_content.append(line)
# print trans_content
#Eliminate unimportant transitions
important=[]
for i, line in enumerate(trans_content):
for j, aut in enumerate(automaton):
if aut != ',':
if re.match(' '+aut+'.', line)!=None:
important.append(line)
# print important
#Check each transition and determine if human: sends signal, receives signal, sets variables
for i,trans in enumerate(important):
var_split = re.split(', ',trans)
if var_split[1] != 'tau': #Signals
for match in re.finditer('!',var_split[1]):
signal = re.split('!',var_split[1])
f.write('tell '+signal[0]+'\n')#Write send signal
for match in re.finditer('\?',var_split[1]):
signal = re.split('\?',var_split[1])
f.write('receivesignal\n')#Write receive signal
if var_split[2] != '1 }\n': #Variables
for j in range(2,len(var_split)):
new_string = corrected2(var_split[j])
if new_string !='' and new_string != 'bored':
f.write('set_param\t'+new_string+'\n')
elif new_string == 'bored':
f.write(new_string+'\n')
#Last trace
f=open('stimulus_'+str(len(traces))+'_'+nameFile+'.txt', 'w')
trans_content=[]
for i in range(0,len(transitions)):
if transitions[i]>traces[len(traces)-1]:
for j, line in enumerate(open(nameFile+ '.tr', 'r')):
if j>=(transitions[i]) and j<(states[i]):
if line!='\n' and line!='Transitions:\n':
trans_content.append(line)
# print trans_content
important=[]
for i, line in enumerate(trans_content):
for j, aut in enumerate(automaton):
if aut != ',':
if re.match(' '+aut+'.', line)!=None:
important.append(line)
# print important
#Check each transition and determine if human: sends signal, receives signal, sets variables
for i,trans in enumerate(important):
var_split = re.split(', ',trans)
if var_split[1] != 'tau': #Signals
for match in re.finditer('!',var_split[1]):
signal = re.split('!',var_split[1])
f.write('tell '+signal[0]+'\n')#Write send signal
for match in re.finditer('\?',var_split[1]):
signal = re.split('\?',var_split[1])
f.write('receivesignal\n')#Write receive signal
if var_split[2] != '1 }\n': #Variables
for j in range(2,len(var_split)):
new_string = corrected2(var_split[j])
if new_string !='' and not re.search('bored',new_string):
f.write('set_param\t'+new_string+'\n')
elif re.search('bored',new_string):
f.write(new_string+'\n')
def corrected(expr):
expr_new=''
modif1 = re.split("\:=",expr)
#print modif1[0]
global variables_keep
for kk,variable in enumerate(variables_keep):
if re.search('\\b'+modif1[0]+'\\b',variable):
if re.search('bored',modif1[0]) and re.search('true',modif1[1]):
expr_new='bored'
elif re.search('bored',modif1[0]) and re.search('false',modif1[1]):
expr_new=''
elif re.search('tlocation',modif1[0]):
expr_new=expr_new+'location'+'='
if re.match('true',modif1[1]):
expr_new=expr_new+'1'
elif re.match('false',modif1[1]):
expr_new=expr_new+'0'
else:
expr_new=expr_new+modif1[0]+'='
if re.match(modif1[0],modif1[1]):
modif2 = re.split('\s*[+]|[-]\s*',modif1[1])
#print modif2
modif3 = re.split('}\n',modif2[1])
#print modif3
expr_new=expr_new+modif3[0]
elif re.match('rand_v',modif1[1]):
expr_new = ''
elif re.match('true',modif1[1]):
expr_new=expr_new+'1'
elif re.match('false',modif1[1]):
expr_new=expr_new+'0'
else:
modif4 = re.split('}\n',modif1[1])
expr_new=expr_new+modif4[0]
# print expr_new
return expr_new
def corrected2(expr):
expr_new=''
modif1 = re.split(" \:= ",expr)
expr_new=expr_new+modif1[0]+'='
if re.match(modif1[0],modif1[1]):
modif2 = re.split('\s*[+]|[-]\s*',modif1[1])
modif3 = re.split('\s*[}]\n',modif2[1])
expr_new=expr_new+modif3[0]
elif re.match('rand_v',modif1[1]):
expr_new = ''
else:
modif4 = re.split(' }\n',modif1[1])
expr_new=expr_new+modif4[0]
# print expr_new
return expr_new
if __name__ == "__main__":
if len(sys.argv) == 2: #arguments passed by command line: program, trace file
extract(sys.argv[1])
else:
print 'extract.py [trace file or .tr]'
sys.exit(1)
|
gpl-3.0
| 51,145,047,482,889,110
| 34.261603
| 463
| 0.630848
| false
| 2.881724
| false
| false
| false
|
akshayka/bft2f
|
bft2f.py
|
1
|
3641
|
#!/usr/bin/python
import sys
import os
from mininet.topo import Topo
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.net import Mininet
from mininet.log import lg, info
from mininet.util import dumpNodeConnections
from mininet.cli import CLI
from mininet.util import pmonitor
from subprocess import Popen, PIPE
from time import sleep, time
from multiprocessing import Process
from argparse import ArgumentParser
import signal
NUMBER_NODES = 7
NUMBER_CLIENTS = 1
NUMBER_USERS = 1
RUN_DURATION = 35
popens = {}
LINK_BW=10
LINK_DELAY='10ms'
LINK_LOSS=10
ETC_HOSTS_FILE_NAME="bft2f_etc_hosts"
DEBUG_OUTPUT_FILE='bft2f.debug'
class BftTopo(Topo):
def __init__(self, n=2):
super(BftTopo, self).__init__()
s0 = self.addSwitch('s0')
# create hosts
for i in xrange(0, NUMBER_NODES):
self.addLink(self.addHost('h%d' % (i)), s0, bw=LINK_BW, delay=LINK_DELAY)
for i in xrange(0, NUMBER_CLIENTS):
self.addLink(self.addHost('c%d' % (i)), s0, bw=LINK_BW, delay=LINK_DELAY)
for i in xrange(0, NUMBER_USERS):
self.addLink(self.addHost('u%d' % (i)), s0, bw=LINK_BW, delay=LINK_DELAY)
self.addLink(self.addHost('app'), s0, bw=LINK_BW, delay=LINK_DELAY)
return
def start_nodes(net, verbose):
for i in range(0, NUMBER_NODES):
h = net.getNodeByName('h%d'%(i))
h.cmd("route add -net default dev h%d-eth0" % (i))
if verbose:
cmd = 'python start_node.py --node_id=%d -v >>%s 2>&1' % (i, DEBUG_OUTPUT_FILE)
else:
cmd = 'python start_node.py --node_id=%d >>%s 2>&1' % (i, DEBUG_OUTPUT_FILE)
popens[h] = h.popen(cmd, shell=True, preexec_fn=os.setsid)
def start_client(net):
for i in xrange(0, NUMBER_CLIENTS):
client = net.getNodeByName('c%d' % (i))
client.cmd("route add -net default dev c%d-eth0" % (i))
popens[client] = client.popen('python start_client.py --client_id=%d' % (i),
shell=True, preexec_fn=os.setsid)
def start_user(net):
app = net.getNodeByName('app')
for i in xrange(0, NUMBER_USERS):
user = net.getNodeByName('u%d'%(i))
client = net.getNodeByName('c%d' % (i))
user.cmd("route add -net default dev u%d-eth0" % (i))
#popens[user] = client.popen('python start_user.py --user_id=%d --client_ip=%s --app_ip=%s >>%s 2>&1' % (i, client.IP(), app.IP(), DEBUG_OUTPUT_FILE), shell=True, preexec_fn=os.setsid)
def start_app(net):
app = net.getNodeByName('app')
app.cmd("route add -net default dev app-eth0")
popens[app] = app.popen('node haraka.js >>%s 2>&1' % (DEBUG_OUTPUT_FILE),
shell=True, preexec_fn=os.setsid, cwd='./Haraka')
def create_etc_hosts(net):
with open(ETC_HOSTS_FILE_NAME, "w+") as f:
for h in net.values():
f.write("%s\t%s\n" % (h.name, h.IP()))
def main():
parser = ArgumentParser()
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
topo = BftTopo()
net = Mininet(topo=topo, host=CPULimitedHost, link=TCLink)
net.start()
create_etc_hosts(net)
# This dumps the topology and how nodes are interconnected through
# links.
dumpNodeConnections(net.hosts)
start_app(net)
start_nodes(net, args.verbose)
start_client(net)
#CLI(net)
sleep(5)
start_user(net)
CLI(net)
endTime = time() + RUN_DURATION
for p in popens.values():
os.killpg(p.pid, signal.SIGTERM)
net.stop()
if __name__ == '__main__':
main()
|
gpl-2.0
| 563,290,887,564,789,900
| 32.1
| 193
| 0.61192
| false
| 2.999176
| false
| false
| false
|
alexanderAustin/PythonGame
|
test.py
|
1
|
2259
|
# This was built from the tutorial https://www.raywenderlich.com/24252/beginning-game-programming-for-teens-with-python
import pygame, math, random
from pygame.locals import *
import pyganim
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
pygame.display.set_caption('PyGame - Testing')
rootImg = "resources/images/basic_game/"
rootAud = "resources/audio/basic_game/"
player = pygame.image.load(rootImg + "dude.png")
grass = pygame.image.load(rootImg + "grass.png")
castle = pygame.image.load(rootImg + "castle.png").convert_alpha()
# cow = pygame.image.load("resources/images/animals/cow/cow_front.png") #subject to change
# Used https://github.com/asweigart/pyganim/tree/master/examples
# http://www.pygame.org/project-Pyganim+sprite+animation+module-2106-.html
# for the sprite sheets
cows = pyganim.getImagesFromSpriteSheet(
filename="resources/images/animals/cow/cow_front.png",
rows=4, cols=2,
scale=2)
cframes = list(zip(cows, [100] * len(cows)))
cowObj = pyganim.PygAnimation(cframes)
cowObj.play()
cowsr = pyganim.getImagesFromSpriteSheet(
filename="resources/images/animals/cow/cow_rear.png",
rows=3, cols=3,
scale=2)
crframes = list(zip(cowsr, [100] * len(cowsr)))
# crframes = crframes.pop()#remove blank frame
print crframes
cowrObj = pyganim.PygAnimation(crframes)
cowrObj.play()
# 4 - keep looping through
running = 1
while running:
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the screen elements
for x in range(width/grass.get_width()+1):
for y in range(height/grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
cowObj.blit(screen, (200, 20))
cowrObj.blit(screen, (50, 200))
# screen.blit(castle, (100,100))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip()
|
apache-2.0
| 5,823,003,622,661,369,000
| 29.958904
| 119
| 0.678619
| false
| 3.086066
| false
| false
| false
|
michel-slm/0install
|
tests/testescaping.py
|
1
|
2832
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import basetest
from basetest import BaseTest
import sys, os, re
import unittest
sys.path.insert(0, '..')
from zeroinstall.injector import model
from zeroinstall.support import escaping
safe = re.compile('^[-.a-zA-Z0-9_]*$')
class TestEscaping(BaseTest):
def testEscape(self):
self.assertEqual("", model.escape(""))
self.assertEqual("hello", model.escape("hello"))
self.assertEqual("%20", model.escape(" "))
self.assertEqual("file%3a%2f%2ffoo%7ebar",
model.escape("file://foo~bar"))
self.assertEqual("file%3a%2f%2ffoo%25bar",
model.escape("file://foo%bar"))
self.assertEqual("file:##foo%7ebar",
model._pretty_escape("file://foo~bar"))
self.assertEqual("file:##foo%25bar",
model._pretty_escape("file://foo%bar"))
def testUnescape(self):
self.assertEqual("", model.unescape(""))
self.assertEqual("hello", model.unescape("hello"))
self.assertEqual(" ", model.unescape("%20"))
self.assertEqual("file://foo~bar",
model.unescape("file%3a%2f%2ffoo%7ebar"))
self.assertEqual("file://foo%bar",
model.unescape("file%3a%2f%2ffoo%25bar"))
self.assertEqual("file://foo",
model.unescape("file:##foo"))
self.assertEqual("file://foo~bar",
model.unescape("file:##foo%7ebar"))
self.assertEqual("file://foo%bar",
model.unescape("file:##foo%25bar"))
def testEscaping(self):
def check(str):
self.assertEqual(str, model.unescape(model.escape(str)))
self.assertEqual(str, model.unescape(model._pretty_escape(str)))
self.assertEqual(str,
escaping.ununderscore_escape(escaping.underscore_escape(str)))
check('')
check('http://example.com')
check('http://example%46com')
check('http:##example#com')
check('http://example.com/foo/bar.xml')
check('%20%21~&!"£ :@;,./{}$%^&()')
check('http://example.com/foo_bar-50%á.xml')
check('_one__two___three____four_____')
check('_1_and_2_')
def testUnderEscape(self):
for x in range(0, 128):
unescaped = chr(x)
escaped = escaping.underscore_escape(unescaped)
assert safe.match(escaped), escaped
self.assertEqual(unescaped, escaping.ununderscore_escape(escaped))
self.assertEqual("_2e_", escaping.underscore_escape("."))
self.assertEqual("_2e_.", escaping.underscore_escape(".."))
def testEscapeInterface(self):
self.assertEqual(["http", "example.com", "foo.xml"], model.escape_interface_uri("http://example.com/foo.xml"))
self.assertEqual(["http", "example.com", "foo__.bar.xml"], model.escape_interface_uri("http://example.com/foo/.bar.xml"))
self.assertEqual(["file", "root__foo.xml"], model.escape_interface_uri("/root/foo.xml"))
try:
model.escape_interface_uri("ftp://example.com/foo.xml")
assert 0
except AssertionError:
pass
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
| -8,598,118,403,903,593,000
| 31.906977
| 123
| 0.674205
| false
| 3.049569
| true
| false
| false
|
hodgestar/graas
|
graas/cli.py
|
1
|
1213
|
""" Command for launching GRaaS. """
import sys
import click
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from .api import GraasApi
from .devserver import GraasServerFactory
@click.command("graas")
@click.version_option()
@click.option(
'--host', '-h',
default='localhost',
help='Host to listen on')
@click.option(
'--web-port', '-p',
type=int, default=8080,
help='Port for web server to listen on')
@click.option(
'--device-port', '-d',
type=int, default=8081,
help='Port for device server to listen on')
@click.option(
'--log-file', '-l',
type=str, default=None,
help='File to log to')
def main(host, web_port, device_port, log_file):
""" Vumi Go Opt Out API. """
if log_file is None:
log_file = sys.stdout
log.startLogging(log_file)
site = Site(GraasApi().app.resource())
reactor.listenTCP(web_port, site, interface=host)
factory = GraasServerFactory()
reactor.listenTCP(device_port, factory, interface=host)
log.msg("Web API listening on %s:%s" % (host, web_port))
log.msg("Device server listening on %s:%s" % (host, device_port))
reactor.run()
|
bsd-3-clause
| 7,179,159,240,244,600,000
| 24.808511
| 69
| 0.658697
| false
| 3.296196
| false
| false
| false
|
infobloxopen/infoblox-client
|
infoblox_client/object_manager.py
|
1
|
21402
|
# Copyright 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
try:
from oslo_log import log as logging
except ImportError: # pragma: no cover
import logging
from infoblox_client import exceptions as ib_ex
from infoblox_client import objects as obj
from infoblox_client import utils as ib_utils
LOG = logging.getLogger(__name__)
class InfobloxObjectManager(object):
def __init__(self, connector):
self.connector = connector
def create_network_view(self, network_view, extattrs):
return obj.NetworkView.create(self.connector,
name=network_view,
extattrs=extattrs)
def delete_network_view(self, network_view):
# never delete default network view
if network_view == 'default':
return
nview = obj.NetworkView.search(self.connector,
name=network_view)
if nview:
nview.delete()
def create_dns_view(self, network_view, dns_view):
return obj.DNSView.create(self.connector,
name=dns_view,
network_view=network_view)
def delete_dns_view(self, dns_view):
dns_view = obj.DNSView.search(self.connector,
name=dns_view)
if dns_view:
dns_view.delete()
def create_network(self, net_view_name, cidr, nameservers=None,
members=None, gateway_ip=None, dhcp_trel_ip=None,
network_extattrs=None):
"""Create NIOS Network and prepare DHCP options.
Some DHCP options are valid for IPv4 only, so just skip processing
them for IPv6 case.
:param net_view_name: network view name
:param cidr: network to allocate, example '172.23.23.0/24'
:param nameservers: list of name servers hosts/ip
:param members: list of objects.AnyMember objects that are expected
to serve dhcp for created network
:param gateway_ip: gateway ip for the network (valid for IPv4 only)
:param dhcp_trel_ip: ip address of dhcp relay (valid for IPv4 only)
:param network_extattrs: extensible attributes for network (instance of
objects.EA)
:returns: created network (instance of objects.Network)
"""
ipv4 = ib_utils.determine_ip_version(cidr) == 4
options = []
if nameservers:
options.append(obj.Dhcpoption(name='domain-name-servers',
value=",".join(nameservers)))
if ipv4 and gateway_ip:
options.append(obj.Dhcpoption(name='routers',
value=gateway_ip))
if ipv4 and dhcp_trel_ip:
options.append(obj.Dhcpoption(name='dhcp-server-identifier',
num=54,
value=dhcp_trel_ip))
return obj.Network.create(self.connector,
network_view=net_view_name,
cidr=cidr,
members=members,
options=options,
extattrs=network_extattrs,
check_if_exists=False)
def get_network(self, network_view, cidr):
return obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
def create_ip_range(self, network_view, start_ip, end_ip, network,
disable, range_extattrs):
"""Creates IPRange or fails if already exists."""
return obj.IPRange.create(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip,
cidr=network,
disable=disable,
extattrs=range_extattrs,
check_if_exists=False)
def delete_ip_range(self, network_view, start_ip, end_ip):
range = obj.IPRange.search(self.connector,
network_view=network_view,
start_addr=start_ip,
end_addr=end_ip)
if range:
range.delete()
def has_networks(self, network_view_name):
networks = obj.Network.search_all(self.connector,
network_view=network_view_name)
return bool(networks)
def network_exists(self, network_view, cidr):
"""Deprecated, use get_network() instead."""
LOG.warning(
"DEPRECATION WARNING! Using network_exists() is deprecated "
"and to be removed in next releases. "
"Use get_network() or objects.Network.search instead")
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
return network is not None
def delete_network(self, network_view, cidr):
network = obj.Network.search(self.connector,
network_view=network_view,
cidr=cidr)
if network:
network.delete()
def create_network_from_template(self, network_view, cidr, template,
extattrs):
return obj.Network.create(self.connector,
network_view=network_view,
cidr=cidr,
template=template,
extattrs=extattrs,
check_if_exists=False)
def update_network_options(self, ib_network, extattrs=None):
if extattrs:
if ib_network.extattrs:
# Merge EA values as dicts
ea_dict = ib_network.extattrs.ea_dict
ea_dict.update(extattrs.ea_dict)
merged_ea = obj.EA(ea_dict)
ib_network.extattrs = merged_ea
else:
ib_network.extattrs = extattrs
return ib_network.update()
def get_host_record(self, dns_view, ip, network_view=None):
return obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
def find_hostname(self, dns_view, hostname, ip, network_view=None):
return obj.HostRecord.search(self.connector,
name=hostname,
view=dns_view,
ip=ip,
network_view=network_view)
def find_host_records_by_mac(self, dns_view, mac, network_view=None):
host_records = []
host_records.extend(obj.HostRecord.search_all(
self.connector, view=dns_view, mac=mac, network_view=network_view))
# Unfortunately WAPI does not support search host records by DUID, so
# search host addresses by duid and then search hosts by name
ipv6_host_addresses = obj.IPv6HostAddress.search_all(
self.connector, duid=mac, network_view=network_view)
ipv6_hosts = []
for addr in ipv6_host_addresses:
hosts = obj.HostRecordV6.search_all(
self.connector, name=addr.host, view=dns_view,
network_view=network_view)
for host in hosts:
if host not in ipv6_hosts:
ipv6_hosts.append(host)
host_records.extend(ipv6_hosts)
return host_records
def create_host_record_for_given_ip(self, dns_view, zone_auth,
hostname, mac, ip, extattrs,
use_dhcp, use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def create_host_record_from_range(self, dns_view, network_view_name,
zone_auth, hostname, mac, first_ip,
last_ip, extattrs, use_dhcp,
use_dns=True):
name = '.'.join([hostname, zone_auth])
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view_name, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
return obj.HostRecord.create(self.connector,
view=dns_view,
name=name,
ip=ip_obj,
configure_for_dns=use_dns,
extattrs=extattrs,
check_if_exists=False)
def delete_host_record(self, dns_view, ip_address, network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view, ip=ip_address,
network_view=network_view)
if host_record:
host_record.delete()
def create_fixed_address_for_given_ip(self, network_view, mac, ip,
extattrs):
return obj.FixedAddress.create(self.connector,
network_view=network_view,
mac=mac,
ip=ip,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_range(self, network_view, mac, first_ip,
last_ip, extattrs):
ip = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
return obj.FixedAddress.create(self.connector,
ip=ip,
mac=mac,
network_view=network_view,
extattrs=extattrs,
check_if_exists=False)
def create_fixed_address_from_cidr(self, netview, mac, cidr, extattrs):
ip = obj.IPAllocation.next_available_ip_from_cidr(netview, cidr)
return obj.FixedAddress.create(self.connector,
network_view=netview,
ip=ip,
mac=mac,
extattrs=extattrs,
check_if_exists=False)
def delete_fixed_address(self, network_view, ip_address):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip_address)
if fixed_address:
fixed_address.delete()
def get_fixed_addresses_by_mac(self, network_view, mac):
return obj.FixedAddress.search_all(
self.connector, network_view=network_view, mac=mac)
def add_ip_to_record(self, host_record, ip, mac, use_dhcp=True):
ip_obj = obj.IP.create(ip=ip, mac=mac, configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def add_ip_to_host_record_from_range(self, host_record, network_view,
mac, first_ip, last_ip,
use_dhcp=True):
ip_alloc = obj.IPAllocation.next_available_ip_from_range(
network_view, first_ip, last_ip)
ip_obj = obj.IP.create(ip=ip_alloc, mac=mac,
configure_for_dhcp=use_dhcp)
host_record.ip.append(ip_obj)
return host_record.update()
def delete_ip_from_host_record(self, host_record, ip):
host_record.ip.remove(ip)
return host_record.update()
def has_dns_zones(self, dns_view):
zones = obj.DNSZone.search_all(self.connector, view=dns_view)
return bool(zones)
def create_dns_zone(self, dns_view, dns_zone,
grid_primary=None, grid_secondaries=None,
zone_format=None, ns_group=None, prefix=None,
extattrs=None):
return obj.DNSZone.create(self.connector,
fqdn=dns_zone,
view=dns_view,
extattrs=extattrs,
zone_format=zone_format,
ns_group=ns_group,
prefix=prefix,
grid_primary=grid_primary,
grid_secondaries=grid_secondaries)
def delete_dns_zone(self, dns_view, dns_zone_fqdn):
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.delete()
def update_dns_zone_attrs(self, dns_view, dns_zone_fqdn, extattrs):
if not extattrs:
return
dns_zone = obj.DNSZone.search(self.connector,
fqdn=dns_zone_fqdn,
view=dns_view)
if dns_zone:
dns_zone.extattrs = extattrs
dns_zone.update()
def update_host_record_eas(self, dns_view, ip, extattrs):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip)
if host_record:
host_record.extattrs = extattrs
host_record.update()
def update_fixed_address_eas(self, network_view, ip, extattrs):
fixed_address = obj.FixedAddress.search(self.connector,
network_view=network_view,
ip=ip)
if fixed_address:
fixed_address.extattrs = extattrs
fixed_address.update()
def update_dns_record_eas(self, dns_view, ip, extattrs):
a_record = obj.ARecordBase.search(self.connector,
ip=ip,
view=dns_view)
if a_record:
a_record.extattrs = extattrs
a_record.update()
ptr_record = obj.PtrRecord.search(self.connector,
ip=ip,
view=dns_view)
if ptr_record:
ptr_record.extattrs = extattrs
ptr_record.update()
def bind_name_with_host_record(self, dns_view, ip, name, extattrs,
network_view=None):
host_record = obj.HostRecord.search(self.connector,
view=dns_view,
ip=ip,
network_view=network_view)
if host_record:
host_record.name = name
host_record.extattrs = extattrs
host_record.update()
def bind_name_with_record_a(self, dns_view, ip, name, bind_list,
extattrs):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in bind_list) or
(not is_ipv4 and 'record:aaaa' in bind_list)):
obj.ARecordBase.create(self.connector,
view=dns_view,
ip=ip,
name=name,
extattrs=extattrs,
update_if_exists=True)
if 'record:ptr' in bind_list:
obj.PtrRecord.create(self.connector,
view=dns_view,
ip=ip,
ptrdname=name,
extattrs=extattrs,
update_if_exists=True)
def unbind_name_from_record_a(self, dns_view, ip, name, unbind_list):
is_ipv4 = ib_utils.determine_ip_version(ip) == 4
if ((is_ipv4 and 'record:a' in unbind_list) or
(not is_ipv4 and 'record:aaaa' in unbind_list)):
a_record = obj.ARecordBase.search(self.connector,
view=dns_view,
ip=ip,
name=name)
if a_record:
self.delete_objects_associated_with_a_record(a_record.name,
a_record.view,
unbind_list)
a_record.delete()
if 'record:ptr' in unbind_list:
ptr_record = obj.PtrRecord.search(self.connector,
view=dns_view,
ip=ip,
ptrdname=name)
if ptr_record:
ptr_record.delete()
def get_member(self, member):
member.fetch()
return member
def get_all_ea_definitions(self):
return obj.EADefinition.search_all(self.connector)
def create_ea_definition(self, ea_def, reraise=False):
try:
return obj.EADefinition.create(self.connector,
check_if_exists=False,
**ea_def)
except ib_ex.InfobloxCannotCreateObject:
LOG.error('Unable to create Extensible Attribute Definition '
'%s' % ea_def)
if reraise:
raise
def create_required_ea_definitions(self, required_ea_defs, reraise=False):
existing_ea_defs = self.get_all_ea_definitions()
missing_ea_defs = []
for req_def in required_ea_defs:
if not [ea_def for ea_def in existing_ea_defs
if ea_def.name == req_def['name']]:
missing_ea_defs.append(req_def)
created_ea_defs = []
for ea_def in missing_ea_defs:
if self.create_ea_definition(ea_def, reraise=reraise):
created_ea_defs.append(ea_def)
return created_ea_defs
def restart_all_services(self, member):
if not member._ref:
member.fetch(only_ref=True)
self.connector.call_func('restartservices', member._ref,
{'restart_option': 'RESTART_IF_NEEDED',
'service_option': 'ALL'})
def delete_objects_associated_with_a_record(self, name, view, delete_list):
"""Deletes records associated with record:a or record:aaaa."""
search_objects = {}
if 'record:cname' in delete_list:
search_objects['record:cname'] = 'canonical'
if 'record:txt' in delete_list:
search_objects['record:txt'] = 'name'
if not search_objects:
return
for obj_type, search_type in search_objects.items():
payload = {'view': view,
search_type: name}
ib_objs = self.connector.get_object(obj_type, payload)
if ib_objs:
for ib_obj in ib_objs:
self.delete_object_by_ref(ib_obj['_ref'])
def delete_all_associated_objects(self, network_view, ip, delete_list):
LOG.warning(
"DEPRECATION WARNING! Using delete_all_associated_objects() "
"is deprecated and to be removed in next releases. "
"Use unbind_name_from_record_a() instead.")
def delete_object_by_ref(self, ref):
try:
self.connector.delete_object(ref)
except ib_ex.InfobloxCannotDeleteObject:
pass
|
apache-2.0
| -3,936,074,170,078,028,300
| 43.40249
| 79
| 0.48944
| false
| 4.5721
| true
| false
| false
|
IvIePhisto/ECoXiPy
|
ecoxipy/pyxom/_document.py
|
1
|
17879
|
# -*- coding: utf-8 -*-
import collections
from xml.sax.xmlreader import AttributesImpl
from ecoxipy import _python2, _unicode
from ecoxipy import _helpers
from ._common import XMLNode, ContainerNode, _string_repr
from ._content_nodes import Text
from .indexing import (IndexDescriptor, ElementByUniqueAttributeValueIndexer,
ElementsByNameIndexer, NamespaceIndexer)
class DocumentType(object):
'''\
Represents a document type declaration of a :class:`Document`. It should
not be instantiated on itself.
:param name: The document element name.
:type name: Unicode string
:param publicid: The document type public ID or :const:`None`.
:type publicid: Unicode string
:param systemid: The document type system ID or :const:`None`.
:type systemid: Unicode string
:param check_well_formedness: If :const:`True` the document element name
will be checked to be a valid XML name.
:type check_well_formedness: :func:`bool`
'''
__slots__ = {'_name', '_publicid', '_systemid', '_check_well_formedness'}
def __init__(self, name, publicid, systemid, check_well_formedness):
if check_well_formedness:
if name is not None:
_helpers.enforce_valid_xml_name(name)
if publicid is not None:
_helpers.enforce_valid_doctype_publicid(publicid)
if systemid is not None:
_helpers.enforce_valid_doctype_systemid(systemid)
self._name = name
self._publicid = publicid
self._systemid = systemid
self._check_well_formedness = check_well_formedness
@property
def name(self):
'''\
The document element name or :const:`None`. On setting if the value
is :const:`None`, :attr:`publicid` and :attr:`systemid` are also set
to :const:`None`. Otherwise the value is converted to an Unicode
string; a :class:`ecoxipy.XMLWellFormednessException` is thrown if it
is not a valid XML name and ``check_well_formedness`` is
:const:`True`.
'''
return self._name
@name.setter
def name(self, name):
if name is None:
self._publicid = None
self._systemid = None
else:
name = _unicode(name)
if self._check_well_formedness:
_helpers.enforce_valid_xml_name(name)
self._name = name
@property
def publicid(self):
'''\
The document type public ID or :const:`None`. On setting if the value
is not :const:`None` it is converted to a Unicode string; a
:class:`ecoxipy.XMLWellFormednessException` is thrown if it is not a
valid doctype public ID and ``check_well_formedness`` is
:const:`True`.
'''
return self._publicid
@publicid.setter
def publicid(self, publicid):
if publicid is not None:
publicid = _unicode(publicid)
if self._check_well_formedness:
_helpers.enforce_valid_doctype_publicid(publicid)
self._publicid = publicid
@property
def systemid(self):
'''\
The document type system ID or :const:`None`. On setting if the value
is not :const:`None` it is converted to a Unicode string; a
:class:`ecoxipy.XMLWellFormednessException` is thrown if it is not a
valid doctype system ID and ``check_well_formedness`` is
:const:`True`.
'''
return self._systemid
@systemid.setter
def systemid(self, systemid):
if systemid is not None:
systemid = _unicode(systemid)
if self._check_well_formedness:
_helpers.enforce_valid_doctype_systemid(systemid)
self._systemid = systemid
def __repr__(self):
return 'ecoxipy.pyxom.DocumentType({}, {}, {})'.format(
_string_repr(self._name),
_string_repr(self._publicid),
_string_repr(self._systemid),
)
def __eq__(self, other):
return (isinstance(other, DocumentType)
and self._name == other._name
and self._publicid == other._publicid
and self._systemid == other._systemid
)
def __ne__(self, other):
return (not(isinstance(other, DocumentType))
or self._name != other._name
or self._publicid != other._publicid
or self._systemid != other._systemid
)
@staticmethod
def _parse_values(name, publicid, systemid):
if name is None:
publicid = None
systemid = None
else:
name = _unicode(name)
if publicid is not None:
publicid = _unicode(publicid)
if systemid is not None:
systemid = _unicode(systemid)
return name, publicid, systemid
@staticmethod
def _create(name, publicid, systemid, check_well_formedness):
name, publicid, systemid = DocumentType._parse_values(
name, publicid, systemid)
return DocumentType(name, publicid, systemid, check_well_formedness)
class Document(ContainerNode):
'''\
A :class:`ContainerNode` representing a XML document.
:param doctype_name: The document type root element name or :const:`None`
if the document should not have document type declaration.
:type doctype_name: Unicode string
:param doctype_publicid: The public ID of the document type declaration
or :const:`None`.
:type doctype_publicid: Unicode string
:param doctype_systemid: The system ID of the document type declaration
or :const:`None`.
:type doctype_systemid: Unicode string
:param children: The document root :class:`XMLNode` instances.
:param encoding: The encoding of the document. If it is :const:`None`
`UTF-8` is used.
:type encoding: Unicode string
:param omit_xml_declaration: If :const:`True` the XML declaration is
omitted.
:type omit_xml_declaration: :func:`bool`
:param check_well_formedness: If :const:`True` the document element name
will be checked to be a valid XML name.
:type check_well_formedness: :func:`bool`
:raises ecoxipy.XMLWellFormednessException: If ``check_well_formedness``
is :const:`True` and ``doctype_name`` is not a valid XML name,
``doctype_publicid`` is not a valid public ID or ``doctype_systemid``
is not a valid system ID.
'''
__slots__ = {'_doctype', '_omit_xml_declaration', '_encoding'}
def __init__(self, doctype_name, doctype_publicid, doctype_systemid,
children, omit_xml_declaration, encoding,
check_well_formedness=False):
ContainerNode.__init__(self, children)
self._doctype = DocumentType(doctype_name, doctype_publicid,
doctype_systemid, check_well_formedness)
self._omit_xml_declaration = omit_xml_declaration
if encoding is None:
encoding = u'UTF-8'
self._encoding = encoding
@staticmethod
def create(*children, **kargs):
'''\
Creates a document and converts parameters to appropriate types.
:param children: The document root nodes. All items that are not
:class:`XMLNode` instances create :class:`Text` nodes after they
have been converted to Unicode strings.
:param kargs: The same parameters as the constructor has (except
``children``) are recognized. The items ``doctype_name``,
``doctype_publicid``, ``doctype_systemid``, and ``encoding`` are
converted to Unicode strings if they are not :const:`None`.
``omit_xml_declaration`` is converted to boolean.
:returns: The created document.
:rtype: :class:`Document`
:raises ecoxipy.XMLWellFormednessException: If ``doctype_name`` is not
a valid XML name, ``doctype_publicid`` is not a valid public ID or
``doctype_systemid`` is not a valid system ID.
'''
doctype_name = kargs.get('doctype_name', None)
doctype_publicid = kargs.get('doctype_publicid', None)
doctype_systemid = kargs.get('doctype_systemid', None)
doctype_name, doctype_publicid, doctype_systemid = DocumentType._parse_values(
doctype_name, doctype_publicid, doctype_systemid)
omit_xml_declaration = kargs.get('omit_xml_declaration', None)
omit_xml_declaration = bool(omit_xml_declaration)
encoding = kargs.get('encoding', None)
if encoding is not None:
encoding = _unicode(encoding)
return Document(doctype_name, doctype_publicid, doctype_systemid,
[
child if isinstance(child, XMLNode) else Text.create(child)
for child in children
], omit_xml_declaration, encoding, True)
@property
def doctype(self):
'''\
The :class:`DocumentType` instance of the document.
On setting one of the following occurs:
1. If the value is :const:`None`, the document type's attributes are
set to :const:`None`.
2. If the value is a byte or Unicode string, the document type
document element name is set to this value (a byte string will be
converted to Unicode). The document type public and system IDs
will be set to :const:`None`.
3. If the value is a mapping, the items identified by the strings
``'name'``, ``'publicid'`` or ``'systemid'`` define the respective
attributes of the document type, the others are assumed to be
:const:`None`.
4. If the value is a sequence, the item at position zero defines the
document type document element name, the item at position one
defines the public ID and the item at position two defines the
system ID. If the sequence is shorter than three, non-available
items are assumed to be :const:`None`.
The document type values are converted to appropriate values and their
validity is checked if ``check_well_formedness`` is :const:`True`.
Example:
>>> doc = Document.create()
>>> doc.doctype
ecoxipy.pyxom.DocumentType(None, None, None)
>>> doc.doctype = {'name': 'test', 'systemid': 'foo bar'}
>>> doc.doctype
ecoxipy.pyxom.DocumentType('test', None, 'foo bar')
>>> doc.doctype = ('html', 'foo bar')
>>> doc.doctype
ecoxipy.pyxom.DocumentType('html', 'foo bar', None)
>>> doc.doctype = 'foo'
>>> doc.doctype
ecoxipy.pyxom.DocumentType('foo', None, None)
>>> doc.doctype = None
>>> doc.doctype
ecoxipy.pyxom.DocumentType(None, None, None)
'''
return self._doctype
@doctype.setter
def doctype(self, value):
if value is None:
name = None
publicid = None
systemid = None
else:
if value.__class__ is bytes:
value = _unicode(value)
try:
name = value.get('name', None)
publicid = value.get('publicid', None)
systemid = value.get('systemid', None)
except AttributeError:
if value.__class__ is _unicode:
name = value
publicid = None
systemid = None
else:
if len(value) > 2:
systemid = value[2]
else:
systemid = None
if len(value) > 1:
publicid = value[1]
else:
publicid = None
if len(value) > 0:
name = value[0]
else:
name = None
name, publicid, systemid = DocumentType._parse_values(
name, publicid, systemid)
self._doctype.name = name
self._doctype.publicid = publicid
self._doctype.systemid = systemid
@property
def omit_xml_declaration(self):
'''\
If :const:`True` the XML declaration is omitted.
'''
return self._omit_xml_declaration
@omit_xml_declaration.setter
def omit_xml_declaration(self, value):
self._omit_xml_declaration = bool(value)
@property
def encoding(self):
'''\
The encoding of the document. On setting if the value is
:const:`None` it is set to ``UTF-8``, otherwise it is converted to an
Unicode string.
'''
return self._encoding
@encoding.setter
def encoding(self, value):
if value is None:
value = u'UTF-8'
else:
value = _unicode(value)
self._encoding = value
def __bytes__(self):
'''\
Creates a byte string containing the XML representation of the
node with the encoding :meth:`encoding`.
'''
return self.create_str(encoding=self._encoding)
if _python2:
__str__ = __bytes__
del __bytes__
def __hash__(self):
return object.__hash__(self)
@_helpers.inherit_docstring(ContainerNode)
def create_sax_events(self, content_handler=None, out=None,
out_encoding='UTF-8', indent_incr=None):
return XMLNode.create_sax_events(self, content_handler, out,
self._encoding, indent_incr)
def _create_str(self, out):
return out.document(self._doctype.name, self._doctype.publicid,
self._doctype.systemid, self._children_strings(out),
self._omit_xml_declaration, self._encoding)
def _create_sax_events(self, content_handler, indent):
content_handler.startDocument()
try:
notationDecl = content_handler.notationDecl
except AttributeError:
pass
else:
notationDecl(self._doctype.name, self._doctype.publicid,
self._doctype.systemid)
for child in self:
child._create_sax_events(content_handler, indent)
content_handler.endDocument()
def __repr__(self):
return 'ecoxipy.pyxom.Document[{}, {}, {}]'.format(
repr(self._doctype),
repr(self._omit_xml_declaration),
_string_repr(self._encoding))
def __eq__(self, other):
if not(isinstance(other, Document)
and self._doctype == other._doctype
and self._omit_xml_declaration == other._omit_xml_declaration
and self._encoding == other._encoding
and len(self) == len(other)):
return False
for i in range(len(self)):
if self[i] != other[i]:
return False
return True
def __ne__(self, other):
if (not(isinstance(other, Document))
or self._doctype != other._doctype
or self._omit_xml_declaration != other._omit_xml_declaration
or self._encoding != other._encoding
or len(self) != len(other)):
return True
for i in range(len(self)):
if self[i] != other[i]:
return True
return False
@_helpers.inherit_docstring(ContainerNode)
def duplicate(self):
return Document(self._doctype.name, self._doctype.publicid,
self._doctype.systemid,
[child.duplicate() for child in self],
self._omit_xml_declaration, self._encoding)
element_by_id = IndexDescriptor(ElementByUniqueAttributeValueIndexer())
'''\
A :class:`ecoxipy.pyxom.indexing.IndexDescriptor` instance using a
:class:`ecoxipy.pyxom.indexing.ElementByUniqueAttributeValueIndexer`
for indexing.
Use it like a mapping to retrieve the element having an attribute ``id``
with the value being equal to the requested key, possibly throwing a
:class:`KeyError` if such an element does not exist.
**Important:** If the document's childs are relevantly modified (i.e. an
``id`` attribute was created, modified or deleted), :meth:`delete_indexes`
should be called or this attribute should be deleted on the instance,
which deletes the index.
'''
elements_by_name = IndexDescriptor(ElementsByNameIndexer())
'''\
A :class:`ecoxipy.pyxom.indexing.IndexDescriptor` instance using a
:class:`ecoxipy.pyxom.indexing.ElementsByNameIndexer` for indexing.
Use it like a mapping to retrieve an iterator over elements having a name
equal to the requested key, possibly throwing a :class:`KeyError` if such
an element does not exist.
**Important:** If the document's childs are relevantly modified (i.e. new
elements were added or deleted, elements' names were modified),
:meth:`delete_indexes` should be called or this attribute should be
deleted on the instance, which deletes the index.
'''
nodes_by_namespace = IndexDescriptor(NamespaceIndexer())
'''\
A :class:`ecoxipy.pyxom.indexing.IndexDescriptor` instance using a
:class:`ecoxipy.pyxom.indexing.NamespaceIndexer` for indexing.
**Important:** If the document's childs are relevantly modified (i.e. new
elements/attributes were added or deleted, elements'/attributes' names
were modified), :meth:`delete_indexes` should be called or this attribute
should be deleted on the instance, which deletes the index.
'''
def delete_indexes(self):
'''\
A shortcut to delete the indexes of :attr:`element_by_id` and
:attr:`elements_by_name`.
'''
del self.element_by_id
del self.elements_by_name
del self.nodes_by_namespace
del (IndexDescriptor, ElementByUniqueAttributeValueIndexer,
ElementsByNameIndexer, NamespaceIndexer)
|
mit
| -2,128,559,108,914,202,400
| 37.534483
| 86
| 0.606578
| false
| 4.264011
| false
| false
| false
|
status-im/status-react
|
test/appium/tests/conftest.py
|
1
|
13196
|
import requests
import pytest
import re
from _pytest.runner import runtestprotocol
from http.client import RemoteDisconnected
from support.device_stats_db import DeviceStatsDB
from support.test_rerun import should_rerun_test
from tests import test_suite_data, appium_container
from datetime import datetime
from os import environ
from io import BytesIO
from sauceclient import SauceClient, SauceException
from support.api.network_api import NetworkApi
from support.github_report import GithubHtmlReport
from support.testrail_report import TestrailReport
from tests.users import transaction_senders
import tests
sauce_username = environ.get('SAUCE_USERNAME')
sauce_access_key = environ.get('SAUCE_ACCESS_KEY')
github_token = environ.get('GIT_HUB_TOKEN')
sauce = SauceClient(sauce_username, sauce_access_key)
github_report = GithubHtmlReport()
testrail_report = TestrailReport()
def pytest_addoption(parser):
parser.addoption("--build",
action="store",
default=datetime.now().strftime('%Y-%m-%d-%H-%M'),
help="Specify build name")
parser.addoption('--apk',
action='store',
default=None,
help='Url or local path to apk')
parser.addoption('--env',
action='store',
default='sauce',
help='Specify environment: local/sauce/api')
parser.addoption('--platform_version',
action='store',
default='8.0',
help='Android device platform version')
parser.addoption('--log_steps',
action='store',
default=False,
help='Display each test step in terminal as plain text: True/False')
parser.addoption('--pr_number',
action='store',
default=None,
help='Pull Request number')
parser.addoption('--testrail_report',
action='store',
default=False,
help='boolean; For creating testrail report per run')
parser.addoption('--network',
action='store',
default='ropsten',
help='string; ropsten or rinkeby')
parser.addoption('--rerun_count',
action='store',
default=0,
help='How many times tests should be re-run if failed')
parser.addoption("--run_testrail_ids",
action="store",
metavar="NAME",
default=None,
help="only run tests matching the environment NAME.")
parser.addoption("--apk_upgrade",
action="store",
metavar="NAME",
default=None,
help='Url or local path to apk for upgrade')
# chat bot
parser.addoption('--messages_number',
action='store',
default=20,
help='Messages number')
parser.addoption('--public_keys',
action='store',
default='',
help='List of public keys for one-to-one chats')
parser.addoption('--running_time',
action='store',
default=600,
help='Running time in seconds')
parser.addoption('--chat_name',
action='store',
default='test_chat',
help='Public chat name')
parser.addoption('--device_number',
action='store',
default=2,
help='Public chat name')
# running tests using appium docker instance
parser.addoption('--docker',
action='store',
default=False,
help='Are you using the appium docker container to run the tests?')
parser.addoption('--docker_shared_volume',
action='store',
default=None,
help='Path to a directory with .apk that will be shared with docker instance. Test reports will be also saved there')
parser.addoption('--device_ip',
action='store',
default=None,
help='Android device IP address used for battery tests')
parser.addoption('--bugreport',
action='store',
default=False,
help='Should generate bugreport for each test?')
parser.addoption('--stats_db_host',
action='store',
default=None,
help='Host address for device stats database')
parser.addoption('--stats_db_port',
action='store',
default=8086,
help='Port for device stats db')
parser.addoption('--stats_db_username',
action='store',
default=None,
help='Username for device stats db')
parser.addoption('--stats_db_password',
action='store',
default=None,
help='Password for device stats db')
parser.addoption('--stats_db_database',
action='store',
default='example9',
help='Database name for device stats db')
def is_master(config):
return not hasattr(config, 'workerinput')
def is_uploaded():
stored_files = sauce.storage.get_stored_files()
for i in range(len(stored_files['files'])):
if stored_files['files'][i]['name'] == test_suite_data.apk_name:
return True
def pytest_configure(config):
tests.pytest_config_global = vars(config.option)
config.addinivalue_line("markers", "testrail_id(name): empty")
if config.getoption('log_steps'):
import logging
logging.basicConfig(level=logging.INFO)
if config.getoption('env') != 'api':
test_suite_data.apk_name = ([i for i in [i for i in config.getoption('apk').split('/')
if '.apk' in i]])[0]
if is_master(config):
pr_number = config.getoption('pr_number')
if config.getoption('testrail_report'):
if pr_number:
run_number = len(testrail_report.get_runs(pr_number)) + 1
run_name = 'PR-%s run #%s' % (pr_number, run_number)
else:
run_name = test_suite_data.apk_name
testrail_report.add_run(run_name)
if pr_number:
from github import Github
repo = Github(github_token).get_user('status-im').get_repo('status-react')
pull = repo.get_pull(int(pr_number))
pull.get_commits()[0].create_status(state='pending', context='Mobile e2e tests',
description='e2e tests are running')
if config.getoption('env') == 'sauce':
if not is_uploaded():
if 'http' in config.getoption('apk'):
response = requests.get(config.getoption('apk'), stream=True)
response.raise_for_status()
file = BytesIO(response.content)
del response
requests.post('http://saucelabs.com/rest/v1/storage/'
+ sauce_username + '/' + test_suite_data.apk_name + '?overwrite=true',
auth=(sauce_username, sauce_access_key),
data=file,
headers={'Content-Type': 'application/octet-stream'})
else:
sauce.storage.upload_file(config.getoption('apk'))
def pytest_unconfigure(config):
if is_master(config):
if config.getoption('testrail_report'):
testrail_report.add_results()
if config.getoption('pr_number'):
from github import Github
repo = Github(github_token).get_user('status-im').get_repo('status-react')
pull = repo.get_pull(int(config.getoption('pr_number')))
comment = pull.create_issue_comment(github_report.build_html_report(testrail_report.run_id))
if not testrail_report.is_run_successful():
pull.get_commits()[0].create_status(state='failure', context='Mobile e2e tests',
description='Failure - e2e tests are failed',
target_url=comment.html_url)
else:
pull.get_commits()[0].create_status(state='success', context='Mobile e2e tests',
description='Success - e2e tests are passed',
target_url=comment.html_url)
def should_save_device_stats(config):
db_args = [config.getoption(option) for option in
('stats_db_host', 'stats_db_port', 'stats_db_username', 'stats_db_password', 'stats_db_database')]
return all(db_args)
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item, call):
outcome = yield
report = outcome.get_result()
if report.when == 'call':
is_sauce_env = item.config.getoption('env') == 'sauce'
current_test = test_suite_data.current_test
if report.failed:
error = report.longreprtext
exception = re.findall('E.*Message:|E.*Error:|E.*Failed:', error)
if exception:
error = error.replace(re.findall('E.*Message:|E.*Error:|E.*Failed:', report.longreprtext)[0], '')
current_test.testruns[-1].error = error
if is_sauce_env:
update_sauce_jobs(current_test.name, current_test.testruns[-1].jobs, report.passed)
if item.config.getoption('docker'):
device_stats = appium_container.get_device_stats()
if item.config.getoption('bugreport'):
appium_container.generate_bugreport(item.name)
build_name = item.config.getoption('apk')
# Find type of tests that are run on the device
if 'battery_consumption' in item.keywords._markers:
test_group = 'battery_consumption'
else:
test_group = None
if should_save_device_stats(item.config):
device_stats_db = DeviceStatsDB(
item.config.getoption('stats_db_host'),
item.config.getoption('stats_db_port'),
item.config.getoption('stats_db_username'),
item.config.getoption('stats_db_password'),
item.config.getoption('stats_db_database'),
)
device_stats_db.save_stats(build_name, item.name, test_group, not report.failed, device_stats)
def update_sauce_jobs(test_name, job_ids, passed):
for job_id in job_ids.keys():
try:
sauce.jobs.update_job(job_id, name=test_name, passed=passed)
except (RemoteDisconnected, SauceException):
pass
def get_testrail_case_id(item):
testrail_id = item.get_closest_marker('testrail_id')
if testrail_id:
return testrail_id.args[0]
def pytest_runtest_setup(item):
try:
testrail_id = [mark.args[0] for mark in item.iter_markers(name='testrail_id')][0]
except IndexError:
pass
run_testrail_ids = item.config.getoption("run_testrail_ids")
if run_testrail_ids:
if str(testrail_id) not in list(run_testrail_ids.split(",")):
pytest.skip("test requires testrail case id %s" % testrail_id)
test_suite_data.set_current_test(item.name, testrail_case_id=get_testrail_case_id(item))
test_suite_data.current_test.create_new_testrun()
def pytest_runtest_protocol(item, nextitem):
rerun_count = int(item.config.getoption('rerun_count'))
for i in range(rerun_count):
reports = runtestprotocol(item, nextitem=nextitem)
for report in reports:
if report.failed and should_rerun_test(report.longreprtext):
break # rerun
else:
return True # no need to rerun
@pytest.fixture(scope="session", autouse=False)
def faucet_for_senders():
network_api = NetworkApi()
for user in transaction_senders.values():
network_api.faucet(address=user['address'])
@pytest.fixture
def messages_number(request):
return int(request.config.getoption('messages_number'))
@pytest.fixture
def message_wait_time(request):
return int(request.config.getoption('message_wait_time'))
@pytest.fixture
def participants_number(request):
return int(request.config.getoption('participants_number'))
@pytest.fixture
def chat_name(request):
return request.config.getoption('chat_name')
@pytest.fixture
def user_public_key(request):
return request.config.getoption('user_public_key')
|
mpl-2.0
| 520,488,454,995,145,340
| 40.2375
| 138
| 0.554032
| false
| 4.266408
| true
| false
| false
|
Yangqing/caffe2
|
caffe2/python/layers/fc.py
|
1
|
3042
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package fc
# Module caffe2.python.layers.fc
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
class FC(SamplingTrainableMixin, ModelLayer):
def __init__(self, model, input_record, output_dims, weight_init=None,
bias_init=None, weight_optim=None, bias_optim=None, name='fc',
weight_reg=None, bias_reg=None, **kwargs):
super(FC, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), (
"Incorrect input type {}".format(input_record))
assert len(input_record.field_types()[0].shape) > 0, (
"FC expects limited dimensions of the input tensor")
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
"FC expects input dimensions > 0, got {}".format(input_dims))
scale = math.sqrt(1.0 / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.w = self.create_param(param_name='w',
shape=[output_dims, input_dims],
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg)
self.b = self.create_param(param_name='b',
shape=[output_dims, ],
initializer=bias_init,
optimizer=bias_optim,
regularizer=bias_reg)
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
self.get_next_blob_reference('output')
)
def _add_ops(self, net, params):
net.FC(self.input_record.field_blobs() + params,
self.output_schema.field_blobs(), **self.kwargs)
@property
def param_blobs(self):
return [self.w, self.b]
|
apache-2.0
| -1,604,963,854,280,311,300
| 40.108108
| 80
| 0.589415
| false
| 4.184319
| false
| false
| false
|
janpipek/boadata
|
boadata/gui/qt/views/histogram_view.py
|
1
|
1199
|
from .view import View
from ..backends.matplotlib import MatplotlibBackend
from boadata import unwrap
import seaborn as sns
# @View.register_view
class HistogramView(View):
def accepts(cls, data_object):
return True
def create_widget(self, parent=None, xcol=None, bins=50, **kwargs):
if xcol is not None:
try:
data = self.data_object.evaluate(xcol)
except:
data = self.data_object[xcol]
else:
data = self.data_object
xcol = "x"
data = unwrap(data.dropna().convert("numpy_array"))
widget, fig = MatplotlibBackend.create_figure_widget(parent=parent)
fig.add_subplot(111)
ax = fig.get_axes()
extra_args = {}
if not kwargs.get("hist"):
extra_args["kde_kws"] = {"shade": True}
sns.distplot(data, hist=kwargs.get("hist", False), kde=kwargs.get("kde", False),
bins=bins, rug=kwargs.get("rug", False), ax=ax[0], **extra_args)
xlabel = kwargs.get("xlabel", xcol)
ax[0].set_xlabel(xlabel)
if "title" in kwargs:
ax[0].set_title(kwargs["title"])
return widget
|
mit
| -1,467,702,614,424,234,500
| 30.578947
| 88
| 0.57548
| false
| 3.611446
| false
| false
| false
|
mycointest/owncoin
|
share/seeds/generate-seeds.py
|
1
|
4297
|
#!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the owncoin network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9887)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19887)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
mit
| 6,910,685,491,095,861,000
| 30.82963
| 98
| 0.575983
| false
| 3.175905
| false
| false
| false
|
fhqgfss/MoHa
|
moha/posthf/ci/configuration.py
|
1
|
1441
|
import numpy as np
import copy
import itertools
from moha.system.basis import SlaterDeterminant,NElectronBasisSet
class Configuration(object):
"""
"""
def __init__(self):
"""
"""
pass
@classmethod
def truncated(cls,hfwavefunction,excitation_level):
"""
excitation_level : CISD -> [1,2]
CID -> [2]
"""
Nelec = hfwavefunction.occ['alpha'] + hfwavefunction.occ['beta']
Dim = hfwavefunction.dim*2
reference = copy.deepcopy(hfwavefunction.configuration)
basis_set = NElectronBasisSet(1,[reference])
#basis_set = NElectronBasisSet()
for el in excitation_level:
for o_list in itertools.combinations(range(Nelec),el):
for u_list in itertools.combinations(range(Nelec,Dim),el):
reference = copy.deepcopy(hfwavefunction.configuration)
for o in o_list:
if o%2==0:
reference['alpha'][o//2] -= 1
elif o%2==1:
reference['beta'][o//2] -= 1
for u in u_list:
if u%2==0:
reference['alpha'][u//2] += 1
elif u%2==1:
reference['beta'][u//2] += 1
basis_set.add(reference)
return basis_set
|
mit
| -5,836,256,464,177,644,000
| 32.511628
| 75
| 0.485774
| false
| 4.225806
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.