code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
Build extension modules, package and install Fatiando.
"""
import sys
import os
from setuptools import setup, Extension, find_packages
import numpy
# Get the version number and setup versioneer
import versioneer
versioneer.VCS = 'git'
versioneer.versionfile_source = 'fatiando/_version.py'
versioneer.versionfile_build = 'fatiando/_version.py'
versioneer.tag_prefix = 'v'
versioneer.parentdir_prefix = '.'
NAME = 'fatiando'
FULLNAME = 'Fatiando a Terra'
DESCRIPTION = "Modeling and inversion for geophysics"
AUTHOR = "Leonardo Uieda"
AUTHOR_EMAIL = 'leouieda@gmail.com'
MAINTAINER = AUTHOR
MAINTAINER_EMAIL = AUTHOR_EMAIL
VERSION = versioneer.get_version()
CMDCLASS = versioneer.get_cmdclass()
with open("README.rst") as f:
LONG_DESCRIPTION = ''.join(f.readlines())
PACKAGES = find_packages(exclude=['doc', 'ci', 'cookbook', 'gallery'])
LICENSE = "BSD 3-clause"
URL = "http://www.fatiando.org"
PLATFORMS = "Any"
SCRIPTS = []
PACKAGE_DATA = {'fatiando': [os.path.join('data', '*')]}
CLASSIFIERS = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Topic :: Scientific/Engineering",
"Topic :: Software Development :: Libraries",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: {}".format(LICENSE),
]
KEYWORDS = 'geophysics modeling inversion gravimetry seismic magnetometry'
# The running setup.py with --cython, then set things up to generate the Cython
# .c files. If not, then compile the pre-converted C files.
USE_CYTHON = True if '--cython' in sys.argv else False
ext = '.pyx' if USE_CYTHON else '.c'
libs = []
if os.name == 'posix':
libs.append('m')
C_EXT = [[['fatiando', 'seismic', '_ttime2d'], {}],
[['fatiando', 'seismic', '_wavefd'], {}],
[['fatiando', 'gravmag', '_polyprism'], {}],
[['fatiando', 'gravmag', '_sphere'], {}],
[['fatiando', 'gravmag', '_prism'], {}],
]
extensions = []
for e, extra_args in C_EXT:
extensions.append(
Extension('.'.join(e), [os.path.join(*e) + ext],
libraries=libs,
include_dirs=[numpy.get_include()],
**extra_args))
if USE_CYTHON:
sys.argv.remove('--cython')
from Cython.Build import cythonize
extensions = cythonize(extensions)
if __name__ == '__main__':
setup(name=NAME,
fullname=FULLNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
license=LICENSE,
url=URL,
platforms=PLATFORMS,
scripts=SCRIPTS,
packages=PACKAGES,
ext_modules=extensions,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
cmdclass=CMDCLASS)
|
mtb-za/fatiando
|
setup.py
|
Python
|
bsd-3-clause
| 2,945
|
# -*- coding: utf-8 -*-
from django.utils import baseconv
from django.template.defaultfilters import slugify
import time
def slugify_uniquely(value, model, slugfield="slug"):
"""
Returns a slug on a name which is unique within a model's table
"""
suffix = 0
potential = base = slugify(value)
if len(potential) == 0:
potential = 'null'
while True:
if suffix:
potential = "-".join([base, str(suffix)])
if not model.objects.filter(**{slugfield: potential}).count():
return potential
suffix += 1
def ref_uniquely(project, model, field='ref'):
"""
Returns a unique reference code based on base64 and time.
"""
# this prevents concurrent and inconsistent references.
time.sleep(0.001)
new_timestamp = lambda: int("".join(str(time.time()).split(".")))
while True:
potential = baseconv.base62.encode(new_timestamp())
params = {field: potential, 'project': project}
if not model.objects.filter(**params).exists():
return potential
time.sleep(0.0002)
|
niwinz/Green-Mine
|
src/greenmine/core/utils/slug.py
|
Python
|
bsd-3-clause
| 1,106
|
#
# An attempt at re-implementing LZJB compression in native Python.
#
# Created in May 2014 by Emil Brink <emil@obsession.se>. See LICENSE.
#
# ---------------------------------------------------------------------
#
# Copyright (c) 2014-2016, Emil Brink
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided
# that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and
# the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
BYTE_BITS = 8
MATCH_BITS = 6
MATCH_MIN = 3
MATCH_MAX = (1 << MATCH_BITS) + (MATCH_MIN - 1)
MATCH_RANGE = range(MATCH_MIN, MATCH_MAX + 1) # Length 64, fine on 2.x.
OFFSET_MASK = (1 << (16 - MATCH_BITS)) - 1
LEMPEL_SIZE = 1024
def size_encode(size, dst=None):
"""
Encodes the given size in little-endian variable-length encoding.
The dst argument can be an existing bytearray to append the size. If it's
omitted (or None), a new bytearray is created and used.
Returns the destination bytearray.
"""
if dst is None:
dst = bytearray()
done = False
while not done:
dst.append(size & 0x7f)
size >>= 7
done = size == 0
dst[-1] |= 0x80
return dst
def size_decode(src):
"""
Decodes a size (encoded with size_encode()) from the start of src.
Returns a tuple (size, len) where size is the size that was decoded,
and len is the number of bytes from src that were consumed.
"""
dst_size = 0
pos = 0
# Extract prefixed encoded size, if present.
val = 1
while True:
c = src[pos]
pos += 1
if c & 0x80:
dst_size += val * (c & 0x7f)
break
dst_size += val * c
val <<= 7
return dst_size, pos
def lzjb_compress(src, dst=None):
"""
Compresses src, the source bytearray.
If dst is not None, it's assumed to be the output bytearray and bytes are appended to it using dst.append().
If it is None, a new bytearray is created.
The destination bytearray is returned.
"""
if dst is None:
dst = bytearray()
lempel = [0] * LEMPEL_SIZE
copymap = 0
copymask = 1 << (BYTE_BITS - 1)
pos = 0 # Current input offset.
while pos < len(src):
copymask <<= 1
if copymask == (1 << BYTE_BITS):
copymask = 1
copymap = len(dst)
dst.append(0)
if pos > len(src) - MATCH_MAX:
dst.append(src[pos])
pos += 1
continue
hsh = (src[pos] << 16) + (src[pos + 1] << 8) + src[pos + 2]
hsh += hsh >> 9
hsh += hsh >> 5
hsh &= LEMPEL_SIZE - 1
offset = (pos - lempel[hsh]) & OFFSET_MASK
lempel[hsh] = pos
cpy = pos - offset
if cpy >= 0 and cpy != pos and src[pos:pos + 3] == src[cpy:cpy + 3]:
dst[copymap] |= copymask
for mlen in MATCH_RANGE:
if src[pos + mlen] != src[cpy + mlen]:
break
dst.append(((mlen - MATCH_MIN) << (BYTE_BITS - MATCH_BITS)) | (offset >> BYTE_BITS))
dst.append(offset & 255)
pos += mlen
else:
dst.append(src[pos])
pos += 1
return dst
def lzjb_decompress(src, dlen, dst=None):
"""
Decompresses src, a bytearray of compressed data.
The dst argument can be an optional bytearray which will have the output appended.
If it's None, a new bytearray is created.
The output bytearray is returned.
"""
if dst is None:
dst = bytearray()
pos = 0
dpos = 0
copymap = 0
copymask = 1 << (BYTE_BITS - 1)
while pos < len(src):
copymask <<= 1
if copymask == (1 << BYTE_BITS):
copymask = 1
copymap = src[pos]
pos += 1
if copymap & copymask:
mlen = (src[pos] >> (BYTE_BITS - MATCH_BITS)) + MATCH_MIN
offset = ((src[pos] << BYTE_BITS) | src[pos + 1]) & OFFSET_MASK
pos += 2
cpy = dpos - offset
if cpy < 0:
return None
while mlen > 0 and dpos < dlen:
dst.append(dst[cpy])
dpos += 1
cpy += 1
mlen -= 1
elif dpos < dlen:
dst.append(src[pos])
dpos += 1
pos += 1
return dst
|
hiliev/py-zfs-rescue
|
zfs/lzjb.py
|
Python
|
bsd-3-clause
| 5,428
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-12-27 09:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('heatcontrol', '0015_heatcontrol_profile_add_holes'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='end',
field=models.TimeField(blank=True, null=True),
),
]
|
rkojedzinszky/thermo-center
|
heatcontrol/migrations/0016_heatcontrol_profile_end_null.py
|
Python
|
bsd-3-clause
| 473
|
from __future__ import absolute_import
import hashlib
import jwt
from six.moves.urllib.parse import quote
from sentry.shared_integrations.exceptions import ApiError
def percent_encode(val):
# see https://en.wikipedia.org/wiki/Percent-encoding
return quote(val.encode("utf8", errors="replace")).replace("%7E", "~").replace("/", "%2F")
def get_query_hash(uri, method, query_params=None):
# see
# https://developer.atlassian.com/static/connect/docs/latest/concepts/understanding-jwt.html#qsh
uri = uri.rstrip("/")
method = method.upper()
if query_params is None:
query_params = {}
sorted_query = []
for k, v in sorted(query_params.items()):
# don't include jwt query param
if k != "jwt":
if isinstance(v, list):
param_val = [percent_encode(val) for val in v].join(",")
else:
param_val = percent_encode(v)
sorted_query.append("%s=%s" % (percent_encode(k), param_val))
query_string = "%s&%s&%s" % (method, uri, "&".join(sorted_query))
return hashlib.sha256(query_string.encode("utf8")).hexdigest()
def get_jira_auth_from_request(request):
# https://developer.atlassian.com/static/connect/docs/latest/concepts/authentication.html
# Extract the JWT token from the request's jwt query
# parameter or the authorization header.
token = request.GET.get("jwt")
if token is None:
raise ApiError("No token parameter")
# Decode the JWT token, without verification. This gives
# you a header JSON object, a claims JSON object, and a signature.
decoded = jwt.decode(token, verify=False)
# Extract the issuer ('iss') claim from the decoded, unverified
# claims object. This is the clientKey for the tenant - an identifier
# for the Atlassian application making the call
issuer = decoded["iss"]
# Look up the sharedSecret for the clientKey, as stored
# by the add-on during the installation handshake
from sentry_plugins.jira_ac.models import JiraTenant
jira_auth = JiraTenant.objects.get(client_key=issuer)
# Verify the signature with the sharedSecret and
# the algorithm specified in the header's alg field.
decoded_verified = jwt.decode(token, jira_auth.secret)
# Verify the query has not been tampered by Creating a Query Hash
# and comparing it against the qsh claim on the verified token.
# TODO: probably shouldn't need to hardcode get... for post maybe
# the secret should just be a hidden field in the form ?
qsh = get_query_hash(request.path, "GET", request.GET)
# qsh = get_query_hash(request.path, request.method, request.GET)
if qsh != decoded_verified["qsh"]:
raise ApiError("Query hash mismatch")
return jira_auth
|
beeftornado/sentry
|
src/sentry_plugins/jira_ac/utils.py
|
Python
|
bsd-3-clause
| 2,784
|
"""
Clickjacking Protection Middleware.
This module provides a middleware that implements protection against a
malicious site loading resources from your site in a hidden frame.
"""
from django.conf import settings
class XFrameOptionsMiddleware(object):
"""
Middleware that sets the X-Frame-Options HTTP header in HTTP responses.
Does not set the header if it's already set or if the response contains
a xframe_options_exempt value set to True.
By default, sets the X-Frame-Options header to 'SAMEORIGIN', meaning the
response can only be loaded on a frame within the same site. To prevent the
response from being loaded in a frame in any site, set X_FRAME_OPTIONS in
your project's Django secure to 'DENY'.
Note: older browsers will quietly ignore this header, thus other
clickjacking protection techniques should be used if protection in those
browsers is required.
https://en.wikipedia.org/wiki/Clickjacking#Server_and_client
"""
def process_response(self, request, response):
# Don't set it if it's already in the response
if response.get('X-Frame-Options') is not None:
return response
# Don't set it if they used @xframe_options_exempt
if getattr(response, 'xframe_options_exempt', False):
return response
response['X-Frame-Options'] = self.get_xframe_options_value(request,
response)
return response
def get_xframe_options_value(self, request, response):
"""
Gets the value to set for the X_FRAME_OPTIONS header.
By default this uses the value from the X_FRAME_OPTIONS Django
secure. If not found in secure, defaults to 'SAMEORIGIN'.
This method can be overridden if needed, allowing it to vary based on
the request or response.
"""
return getattr(settings, 'X_FRAME_OPTIONS', 'SAMEORIGIN').upper()
|
bretlowery/snakr
|
lib/django/middleware/clickjacking.py
|
Python
|
bsd-3-clause
| 1,983
|
from django.conf.urls.defaults import url, patterns
urlpatterns = patterns('tutorial.views',
url(r'^$', 'tutorial', name='tutorial'),
)
|
mozilla/FlightDeck
|
apps/tutorial/urls.py
|
Python
|
bsd-3-clause
| 141
|
from __future__ import absolute_import
from __future__ import unicode_literals
from base64 import b64encode
from django_digest.test.methods import WWWAuthenticateError, BaseAuth
class BasicAuth(BaseAuth):
def authorization(self, request, response):
if response is not None:
challenges = self._authenticate_headers(response)
if 'Basic' not in challenges:
raise WWWAuthenticateError(
'Basic authentication unsupported for %s to %r.' %
(response.request['REQUEST_METHOD'],
response.request['PATH_INFO'])
)
return 'Basic %s' % b64encode((self.username + ':' + self.password).encode('utf-8')).decode('utf-8')
|
dimagi/django-digest
|
django_digest/test/methods/basic.py
|
Python
|
bsd-3-clause
| 744
|
#(c) 2016 by Authors
#This file is a part of ABruijn program.
#Released under the BSD license (see LICENSE file)
"""
Runs polishing binary in parallel and concatentes output
"""
from __future__ import absolute_import
from __future__ import division
import logging
import subprocess
import os
from collections import defaultdict
from flye.polishing.alignment import (make_alignment, get_contigs_info,
merge_chunks, split_into_chunks)
from flye.utils.sam_parser import SynchronizedSamReader
from flye.polishing.bubbles import make_bubbles
import flye.utils.fasta_parser as fp
from flye.utils.utils import which
import flye.config.py_cfg as cfg
from flye.six import iteritems
from flye.six.moves import range
POLISH_BIN = "flye-modules"
logger = logging.getLogger()
class PolishException(Exception):
pass
def check_binaries():
if not which(POLISH_BIN):
raise PolishException("polishing binary was not found. "
"Did you run 'make'?")
try:
devnull = open(os.devnull, "w")
subprocess.check_call([POLISH_BIN, "polisher", "-h"], stderr=devnull)
except subprocess.CalledProcessError as e:
if e.returncode == -9:
logger.error("Looks like the system ran out of memory")
raise PolishException(str(e))
except OSError as e:
raise PolishException(str(e))
def polish(contig_seqs, read_seqs, work_dir, num_iters, num_threads, error_mode,
output_progress):
"""
High-level polisher interface
"""
logger_state = logger.disabled
if not output_progress:
logger.disabled = True
subs_matrix = os.path.join(cfg.vals["pkg_root"],
cfg.vals["err_modes"][error_mode]["subs_matrix"])
hopo_matrix = os.path.join(cfg.vals["pkg_root"],
cfg.vals["err_modes"][error_mode]["hopo_matrix"])
stats_file = os.path.join(work_dir, "contigs_stats.txt")
prev_assembly = contig_seqs
contig_lengths = None
coverage_stats = None
for i in range(num_iters):
logger.info("Polishing genome (%d/%d)", i + 1, num_iters)
#split into 1Mb chunks to reduce RAM usage
#slightly vary chunk size between iterations
CHUNK_SIZE = 1000000 - (i % 2) * 100000
chunks_file = os.path.join(work_dir, "chunks_{0}.fasta".format(i + 1))
chunks = split_into_chunks(fp.read_sequence_dict(prev_assembly),
CHUNK_SIZE)
fp.write_fasta_dict(chunks, chunks_file)
####
logger.info("Running minimap2")
alignment_file = os.path.join(work_dir, "minimap_{0}.bam".format(i + 1))
make_alignment(chunks_file, read_seqs, num_threads,
work_dir, error_mode, alignment_file,
reference_mode=True, sam_output=True)
#####
logger.info("Separating alignment into bubbles")
contigs_info = get_contigs_info(chunks_file)
bubbles_file = os.path.join(work_dir,
"bubbles_{0}.fasta".format(i + 1))
coverage_stats, mean_aln_error = \
make_bubbles(alignment_file, contigs_info, chunks_file,
error_mode, num_threads,
bubbles_file)
logger.info("Alignment error rate: %f", mean_aln_error)
consensus_out = os.path.join(work_dir, "consensus_{0}.fasta".format(i + 1))
polished_file = os.path.join(work_dir, "polished_{0}.fasta".format(i + 1))
if os.path.getsize(bubbles_file) == 0:
logger.info("No reads were aligned during polishing")
if not output_progress:
logger.disabled = logger_state
open(stats_file, "w").write("#seq_name\tlength\tcoverage\n")
open(polished_file, "w")
return polished_file, stats_file
#####
logger.info("Correcting bubbles")
_run_polish_bin(bubbles_file, subs_matrix, hopo_matrix,
consensus_out, num_threads, output_progress)
polished_fasta, polished_lengths = _compose_sequence(consensus_out)
merged_chunks = merge_chunks(polished_fasta)
fp.write_fasta_dict(merged_chunks, polished_file)
#Cleanup
os.remove(chunks_file)
os.remove(bubbles_file)
os.remove(consensus_out)
os.remove(alignment_file)
contig_lengths = polished_lengths
prev_assembly = polished_file
#merge information from chunks
contig_lengths = merge_chunks(contig_lengths, fold_function=sum)
coverage_stats = merge_chunks(coverage_stats,
fold_function=lambda l: sum(l) // len(l))
with open(stats_file, "w") as f:
f.write("#seq_name\tlength\tcoverage\n")
for ctg_id in contig_lengths:
f.write("{0}\t{1}\t{2}\n".format(ctg_id,
contig_lengths[ctg_id], coverage_stats[ctg_id]))
if not output_progress:
logger.disabled = logger_state
return prev_assembly, stats_file
def generate_polished_edges(edges_file, gfa_file, polished_contigs, work_dir,
error_mode, num_threads):
"""
Generate polished graph edges sequences by extracting them from
polished contigs
"""
logger.debug("Generating polished GFA")
alignment_file = os.path.join(work_dir, "edges_aln.bam")
polished_dict = fp.read_sequence_dict(polished_contigs)
make_alignment(polished_contigs, [edges_file], num_threads,
work_dir, error_mode, alignment_file,
reference_mode=True, sam_output=True)
aln_reader = SynchronizedSamReader(alignment_file,
polished_dict,
cfg.vals["max_read_coverage"])
aln_by_edge = defaultdict(list)
#getting one best alignment for each contig
while not aln_reader.is_eof():
_, ctg_aln = aln_reader.get_chunk()
for aln in ctg_aln:
aln_by_edge[aln.qry_id].append(aln)
aln_reader.close()
MIN_CONTAINMENT = 0.9
updated_seqs = 0
edges_dict = fp.read_sequence_dict(edges_file)
for edge in edges_dict:
if edge in aln_by_edge:
main_aln = aln_by_edge[edge][0]
map_start = main_aln.trg_start
map_end = main_aln.trg_end
for aln in aln_by_edge[edge]:
if aln.trg_id == main_aln.trg_id and aln.trg_sign == main_aln.trg_sign:
map_start = min(map_start, aln.trg_start)
map_end = max(map_end, aln.trg_end)
new_seq = polished_dict[main_aln.trg_id][map_start : map_end]
if main_aln.qry_sign == "-":
new_seq = fp.reverse_complement(new_seq)
#print edge, main_aln.qry_len, len(new_seq), main_aln.qry_start, main_aln.qry_end
if len(new_seq) / aln.qry_len > MIN_CONTAINMENT:
edges_dict[edge] = new_seq
updated_seqs += 1
#writes fasta file with polished egdes
#edges_polished = os.path.join(work_dir, "polished_edges.fasta")
#fp.write_fasta_dict(edges_dict, edges_polished)
#writes gfa file with polished edges
with open(os.path.join(work_dir, "polished_edges.gfa"), "w") as gfa_polished, \
open(gfa_file, "r") as gfa_in:
for line in gfa_in:
if line.startswith("S"):
seq_id = line.split()[1]
coverage_tag = line.split()[3]
gfa_polished.write("S\t{0}\t{1}\t{2}\n"
.format(seq_id, edges_dict[seq_id], coverage_tag))
else:
gfa_polished.write(line)
logger.debug("%d sequences remained unpolished",
len(edges_dict) - updated_seqs)
os.remove(alignment_file)
def filter_by_coverage(args, stats_in, contigs_in, stats_out, contigs_out):
"""
Filters out contigs with low coverage
"""
SUBASM_MIN_COVERAGE = 1
HARD_MIN_COVERAGE = cfg.vals["hard_minimum_coverage"]
RELATIVE_MIN_COVERAGE = cfg.vals["relative_minimum_coverage"]
ctg_stats = {}
sum_cov = 0
sum_length = 0
with open(stats_in, "r") as f:
for line in f:
if line.startswith("#"): continue
tokens = line.split("\t")
ctg_id, ctg_len, ctg_cov = tokens[0], int(tokens[1]), int(tokens[2])
ctg_stats[ctg_id] = (ctg_len, ctg_cov)
sum_cov += ctg_cov * ctg_len
sum_length += ctg_len
mean_coverage = int(sum_cov / sum_length)
coverage_threshold = None
if args.read_type == "subasm":
coverage_threshold = SUBASM_MIN_COVERAGE
elif args.meta:
coverage_threshold = HARD_MIN_COVERAGE
else:
coverage_threshold = int(round(mean_coverage /
RELATIVE_MIN_COVERAGE))
coverage_threshold = max(HARD_MIN_COVERAGE, coverage_threshold)
logger.debug("Mean contig coverage: %d, selected threshold: %d",
mean_coverage, coverage_threshold)
filtered_num = 0
filtered_seq = 0
good_fasta = {}
for hdr, seq in fp.stream_sequence(contigs_in):
if ctg_stats[hdr][1] >= coverage_threshold:
good_fasta[hdr] = seq
else:
filtered_num += 1
filtered_seq += ctg_stats[hdr][0]
logger.debug("Filtered %d contigs of total length %d",
filtered_num, filtered_seq)
fp.write_fasta_dict(good_fasta, contigs_out)
with open(stats_out, "w") as f:
f.write("#seq_name\tlength\tcoverage\n")
for ctg_id in good_fasta:
f.write("{0}\t{1}\t{2}\n".format(ctg_id,
ctg_stats[ctg_id][0], ctg_stats[ctg_id][1]))
def _run_polish_bin(bubbles_in, subs_matrix, hopo_matrix,
consensus_out, num_threads, output_progress):
"""
Invokes polishing binary
"""
cmdline = [POLISH_BIN, "polisher", "--bubbles", bubbles_in, "--subs-mat", subs_matrix,
"--hopo-mat", hopo_matrix, "--out", consensus_out,
"--threads", str(num_threads)]
if not output_progress:
cmdline.append("--quiet")
try:
subprocess.check_call(cmdline)
except subprocess.CalledProcessError as e:
if e.returncode == -9:
logger.error("Looks like the system ran out of memory")
raise PolishException(str(e))
except OSError as e:
raise PolishException(str(e))
def _compose_sequence(consensus_file):
"""
Concatenates bubbles consensuses into genome
"""
consensuses = defaultdict(list)
coverage = defaultdict(list)
with open(consensus_file, "r") as f:
header = True
for line in f:
if header:
tokens = line.strip().split(" ")
ctg_id = tokens[0][1:]
ctg_pos = int(tokens[1])
coverage[ctg_id].append(int(tokens[2]))
else:
consensuses[ctg_id].append((ctg_pos, line.strip()))
header = not header
polished_fasta = {}
polished_stats = {}
for ctg_id, seqs in iteritems(consensuses):
sorted_seqs = [p[1] for p in sorted(seqs, key=lambda p: p[0])]
concat_seq = "".join(sorted_seqs)
#mean_coverage = sum(coverage[ctg_id]) / len(coverage[ctg_id])
polished_fasta[ctg_id] = concat_seq
polished_stats[ctg_id] = len(concat_seq)
return polished_fasta, polished_stats
|
fenderglass/ABruijn
|
flye/polishing/polish.py
|
Python
|
bsd-3-clause
| 11,547
|
import os
from traits.api import HasTraits
from traitsui.api import View, Item
from enable.savage.trait_defs.ui.svg_button import SVGButton
pause_icon = os.path.join(os.path.dirname(__file__), 'player_pause.svg')
resume_icon = os.path.join(os.path.dirname(__file__), 'player_play.svg')
class SVGDemo(HasTraits):
pause = SVGButton('Pause', filename=pause_icon,
toggle_filename=resume_icon,
toggle_state=True,
toggle_label='Resume',
toggle_tooltip='Resume',
tooltip='Pause', toggle=True)
trait_view = View(Item('pause'))
SVGDemo().configure_traits()
|
tommy-u/enable
|
examples/savage/toggle_demo.py
|
Python
|
bsd-3-clause
| 673
|
"""
BNF reference: http://theory.lcs.mit.edu/~rivest/sexp.txt
<sexp> :: <string> | <list>
<string> :: <display>? <simple-string> ;
<simple-string> :: <raw> | <token> | <base-64> | <hexadecimal> |
<quoted-string> ;
<display> :: "[" <simple-string> "]" ;
<raw> :: <decimal> ":" <bytes> ;
<decimal> :: <decimal-digit>+ ;
-- decimal numbers should have no unnecessary leading zeros
<bytes> -- any string of bytes, of the indicated length
<token> :: <tokenchar>+ ;
<base-64> :: <decimal>? "|" ( <base-64-char> | <whitespace> )* "|" ;
<hexadecimal> :: "#" ( <hex-digit> | <white-space> )* "#" ;
<quoted-string> :: <decimal>? <quoted-string-body>
<quoted-string-body> :: "\"" <bytes> "\""
<list> :: "(" ( <sexp> | <whitespace> )* ")" ;
<whitespace> :: <whitespace-char>* ;
<token-char> :: <alpha> | <decimal-digit> | <simple-punc> ;
<alpha> :: <upper-case> | <lower-case> | <digit> ;
<lower-case> :: "a" | ... | "z" ;
<upper-case> :: "A" | ... | "Z" ;
<decimal-digit> :: "0" | ... | "9" ;
<hex-digit> :: <decimal-digit> | "A" | ... | "F" | "a" | ... | "f" ;
<simple-punc> :: "-" | "." | "/" | "_" | ":" | "*" | "+" | "=" ;
<whitespace-char> :: " " | "\t" | "\r" | "\n" ;
<base-64-char> :: <alpha> | <decimal-digit> | "+" | "/" | "=" ;
<null> :: "" ;
"""
from pyparsing import *
from base64 import b64decode
import pprint
def verifyLen(s, l, t):
t = t[0]
if t.len is not None:
t1len = len(t[1])
if t1len != t.len:
raise ParseFatalException(s, l, "invalid data of length %d, expected %s" % (t1len, t.len))
return t[1]
# define punctuation literals
LPAR, RPAR, LBRK, RBRK, LBRC, RBRC, VBAR = map(Suppress, "()[]{}|")
decimal = Regex(r'0|[1-9]\d*').setParseAction(lambda t: int(t[0]))
hexadecimal = ("#" + OneOrMore(Word(hexnums)) + "#") \
.setParseAction(lambda t: int("".join(t[1:-1]), 16))
bytes = Word(printables)
raw = Group(decimal("len") + Suppress(":") + bytes).setParseAction(verifyLen)
token = Word(alphanums + "-./_:*+=")
base64_ = Group(
Optional(decimal | hexadecimal, default=None)("len") + VBAR + OneOrMore(Word(alphanums + "+/=")).setParseAction(
lambda t: b64decode("".join(t))) + VBAR).setParseAction(verifyLen)
qString = Group(Optional(decimal, default=None)("len") +
dblQuotedString.setParseAction(removeQuotes)).setParseAction(verifyLen)
simpleString = base64_ | raw | decimal | token | hexadecimal | qString
# extended definitions
decimal = Regex(r'-?0|[1-9]\d*').setParseAction(lambda t: int(t[0]))
real = Regex(r"[+-]?\d+\.\d*([eE][+-]?\d+)?").setParseAction(lambda tokens: float(tokens[0]))
token = Word(alphanums + "-./_:*+=!<>")
simpleString = real | base64_ | raw | decimal | token | hexadecimal | qString
display = LBRK + simpleString + RBRK
string_ = Optional(display) + simpleString
uLisp_parse = Forward()
sexpList = Group(LPAR + ZeroOrMore(uLisp_parse) + RPAR)
uLisp_parse << ( string_ | sexpList )
|
bossiernesto/uLisp
|
uLisp/parser/uLispParser.py
|
Python
|
bsd-3-clause
| 2,979
|
import asyncio
from unittest.mock import MagicMock
def SimpleCoroutineMock(f=lambda *args, **kwargs: None):
builder = CoroutineMockBuilder()
return builder.addDelegate(f).build().mock()
class CoroutineMock(object):
# Handy for debugging failing tests in the debugger.
__blocking_dict = {}
def __init__(self, returnSequence, block:asyncio.Event):
self.__startingEvent = asyncio.Event()
self.__endingEvent = asyncio.Event()
self.__returnSequence = tuple(returnSequence)
if (len(self.__returnSequence) < 1):
self.__returnSequence = (lambda *args, **kwargs: None, )
self.__returnSequenceLen = len(self.__returnSequence)
self.__block = block
self.__mock = self.__createMock()
# It's easier to find a dictionary that is an instance variable than
# one that is a class static, so just make an instance variable that
# references the shared dictionary.
self.__blocking_dict = CoroutineMock.__blocking_dict
def __createMock(self):
returnIndex = 0
async def cr(*args, **kwargs):
nonlocal returnIndex
try:
self.__endingEvent.clear()
self.__startingEvent.set()
if (self.__block is not None):
self.__blocking_dict[id(self)] = self
try:
await self.__block.wait()
finally:
del self.__blocking_dict[id(self)]
self.__block.clear()
returnFunc = self.__returnSequence[returnIndex % self.__returnSequenceLen]
returnIndex += 1
return returnFunc(*args, **kwargs)
finally:
self.__startingEvent.clear()
self.__endingEvent.set()
return MagicMock(wraps=cr)
def start(self):
return self.__startingEvent
def end(self):
return self.__endingEvent
def unblock(self):
self.__block.set()
def mock(self):
return self.__mock
async def waitForSingleCall(self):
await self.start().wait()
self.unblock()
await self.end().wait()
class CoroutineMockBuilder(object):
def __init__(self):
self.__block = None
self.__returnSequence = []
def blocks(self):
return self.blocksOn(asyncio.Event())
def blocksOn(self, event:asyncio.Event):
self.__block = event
return self
def exception(self, e, repeats=1):
def r(*args, **kwargs):
raise e
self.__returnSequence.extend([r] * repeats)
return self
def returns(self, v, repeats=1):
def r(*args, **kwargs):
return v
self.__returnSequence.extend([r] * repeats)
return self
def addDelegate(self, f, repeats=1):
self.__returnSequence.extend([f] * repeats)
return self
def build(self):
return CoroutineMock(self.__returnSequence, self.__block)
|
cbrichford/async-mock
|
async_mock/coroutine.py
|
Python
|
bsd-3-clause
| 3,037
|
from collections import OrderedDict
import unittest
import numpy
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import CategoricalHyperparameter
from autosklearn.pipeline.components.classification.liblinear_svc import LibLinear_SVC
from autosklearn.pipeline.components.classification.lda import LDA
from autosklearn.pipeline.components.feature_preprocessing.pca import PCA
from autosklearn.pipeline.components.feature_preprocessing.truncatedSVD import TruncatedSVD
from autosklearn.pipeline.components.feature_preprocessing.no_preprocessing import NoPreprocessing
from autosklearn.pipeline.components.feature_preprocessing.random_trees_embedding \
import RandomTreesEmbedding
import autosklearn.pipeline.create_searchspace_util
class TestCreateClassificationSearchspace(unittest.TestCase):
_multiprocess_can_split_ = True
def test_get_match_array_sparse_and_dense(self):
# preproc is empty
preprocessors = OrderedDict()
preprocessors['pca'] = PCA
classifiers = OrderedDict()
classifiers['lda'] = LDA
# Sparse + dense
class Preprocessors(object):
@classmethod
def get_available_components(self, *args, **kwargs):
return preprocessors
class Classifiers(object):
@classmethod
def get_available_components(self, *args, **kwargs):
return classifiers
# Dense
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, PCA), (1, LDA)), dataset_properties={'sparse': True})
self.assertEqual(numpy.sum(m), 0)
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, PCA), (1, LDA)), dataset_properties={'sparse': False})
self.assertEqual(m, [[1]])
# Sparse
preprocessors['tSVD'] = TruncatedSVD
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, LDA)),
dataset_properties={'sparse': True})
self.assertEqual(m[0], [0]) # pca
self.assertEqual(m[1], [1]) # svd
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, LDA)),
dataset_properties={'sparse': False})
self.assertEqual(m[0], [1]) # pca
self.assertEqual(m[1], [0]) # svd
preprocessors['none'] = NoPreprocessing
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, LDA)),
dataset_properties={'sparse': True})
self.assertEqual(m[0, :], [0]) # pca
self.assertEqual(m[1, :], [1]) # tsvd
self.assertEqual(m[2, :], [0]) # none
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, LDA)),
dataset_properties={'sparse': False})
self.assertEqual(m[0, :], [1]) # pca
self.assertEqual(m[1, :], [0]) # tsvd
self.assertEqual(m[2, :], [1]) # none
classifiers['libsvm'] = LibLinear_SVC
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, Classifiers)),
dataset_properties={'sparse': False})
self.assertListEqual(list(m[0, :]), [1, 1]) # pca
self.assertListEqual(list(m[1, :]), [0, 0]) # tsvd
self.assertListEqual(list(m[2, :]), [1, 1]) # none
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, Classifiers)),
dataset_properties={'sparse': True})
self.assertListEqual(list(m[0, :]), [0, 0]) # pca
self.assertListEqual(list(m[1, :]), [1, 1]) # tsvd
self.assertListEqual(list(m[2, :]), [0, 1]) # none
# Do fancy 3d stuff
preprocessors['random_trees'] = RandomTreesEmbedding
m = autosklearn.pipeline.create_searchspace_util.get_match_array(
pipeline=((0, Preprocessors), (1, Preprocessors), (2, Classifiers)),
dataset_properties={'sparse': False})
# PCA followed by truncated SVD is forbidden
self.assertEqual(list(m[0].flatten()), [1, 1, 0, 0, 1, 1, 0, 1])
# Truncated SVD is forbidden
self.assertEqual(list(m[1].flatten()), [0, 0, 0, 0, 0, 0, 0, 0])
# Truncated SVD is forbidden after no_preprocessing
self.assertEqual(list(m[2].flatten()), [1, 1, 0, 0, 1, 1, 0, 1])
# PCA is forbidden, truncatedSVD allowed after random trees embedding
# lda only allowed after truncatedSVD
self.assertEqual(list(m[3].flatten()), [0, 0, 1, 1, 0, 1, 0, 1])
def test_get_match_array_signed_unsigned_and_binary(self):
pass
@unittest.skip("Not currently working.")
def test_add_forbidden(self):
m = numpy.ones([2, 3])
preprocessors_list = ['pa', 'pb']
classifier_list = ['ca', 'cb', 'cc']
cs = ConfigurationSpace()
preprocessor = CategoricalHyperparameter(name='feature_preprocessor',
choices=preprocessors_list)
classifier = CategoricalHyperparameter(name='classifier',
choices=classifier_list)
cs.add_hyperparameter(preprocessor)
cs.add_hyperparameter(classifier)
new_cs = autosklearn.pipeline.create_searchspace_util.add_forbidden(
conf_space=cs, node_0_list=preprocessors_list,
node_1_list=classifier_list, matches=m,
node_0_name='feature_preprocessor', node_1_name="classifier")
self.assertEqual(len(new_cs.forbidden_clauses), 0)
self.assertIsInstance(new_cs, ConfigurationSpace)
m[1, 1] = 0
new_cs = autosklearn.pipeline.create_searchspace_util.add_forbidden(
conf_space=cs, node_0_list=preprocessors_list,
node_1_list=classifier_list, matches=m,
node_0_name='feature_preprocessor', node_1_name="classifier")
self.assertEqual(len(new_cs.forbidden_clauses), 1)
self.assertEqual(new_cs.forbidden_clauses[0].components[0].value, 'cb')
self.assertEqual(new_cs.forbidden_clauses[0].components[1].value, 'pb')
self.assertIsInstance(new_cs, ConfigurationSpace)
|
automl/auto-sklearn
|
test/test_pipeline/test_create_searchspace_util_classification.py
|
Python
|
bsd-3-clause
| 6,398
|
from __future__ import unicode_literals
from tests.utils import ConverterTestCase
class EnumTestCase(ConverterTestCase):
def test_empty(self):
self.assertGeneratedOutput(
"""
enum Bar {
};
""",
"""
from enum import Enum
class Bar(Enum):
pass
"""
)
def test_without_values(self):
self.assertGeneratedOutput(
"""
enum Bar {
TOP,
RIGHT,
BOTTOM,
LEFT
};
""",
"""
from enum import Enum
class Bar(Enum):
TOP = 0
RIGHT = 1
BOTTOM = 2
LEFT = 3
"""
)
def test_values(self):
self.assertGeneratedOutput(
"""
enum Bar {
TOP = 37,
RIGHT = 42,
BOTTOM = 55,
LEFT = 69
};
""",
"""
from enum import Enum
class Bar(Enum):
TOP = 37
RIGHT = 42
BOTTOM = 55
LEFT = 69
"""
)
def test_initial_values(self):
self.assertGeneratedOutput(
"""
enum Bar {
TOP = 37,
RIGHT,
BOTTOM,
LEFT
};
""",
"""
from enum import Enum
class Bar(Enum):
TOP = 37
RIGHT = 38
BOTTOM = 39
LEFT = 40
"""
)
def test_multiple_initial_values(self):
self.assertGeneratedOutput(
"""
enum Bar {
TOP = 37,
RIGHT,
BOTTOM = 42,
LEFT
};
""",
"""
from enum import Enum
class Bar(Enum):
TOP = 37
RIGHT = 38
BOTTOM = 42
LEFT = 43
"""
)
def test_expressions_for_values(self):
self.assertGeneratedOutput(
"""
enum Bar {
TOP = 1 << 0,
RIGHT = 1 << 1,
BOTTOM = 1 << 2,
LEFT = 1 << 3
};
""",
"""
from enum import Enum
class Bar(Enum):
TOP = 1
RIGHT = 2
BOTTOM = 4
LEFT = 8
"""
)
def test_local_enum_reference(self):
self.assertGeneratedOutput(
"""
enum Bar {
TOP,
RIGHT,
BOTTOM,
LEFT
};
void test() {
Bar position = TOP;
}
""",
"""
from enum import Enum
class Bar(Enum):
TOP = 0
RIGHT = 1
BOTTOM = 2
LEFT = 3
def test():
position = Bar.TOP
"""
)
|
pybee/seasnake
|
tests/test_enum.py
|
Python
|
bsd-3-clause
| 3,328
|
"""
ios.py
Handle arguments, configuration file
@author: K.Edeline
"""
import sys
import argparse
import configparser
import logging
import shutil
class IOManager(object):
"""
extend me
"""
#DEFAULT_CONFIG_LOC="/tmp/deploypl.ini"
PKG_FILE = "packages.txt"
def __init__(self, child=None, **kwargs):
super().__init__(**kwargs)
if child == None:
raise IOSException("Child class not found")
self.child = child
self.args = None
self.config = None
self.logger = None
def load_inputs(self):
self.arguments()
if "start" in self.args.cmd:
self.configuration()
def load_outputs(self, decoy=False):
self.log(decoy=decoy)
########################################################
# ARGPARSE
########################################################
def arguments(self):
"""
Parse arguments
Used mostly to provide the location of the config file.
"""
parser = argparse.ArgumentParser(description='PlanetLab C&C server')
parser.add_argument('cmd', type=str,
choices=["start", "stop", "restart", "status"])
parser.add_argument('-l' , '--log-file', type=str, default="deploypl.log",
help='log file location (default: deploypl.log)')
parser.add_argument('-c' , '--config', type=str,
#default=IOManager.DEFAULT_CONFIG_LOC,
help='configuration file location')
parser.add_argument('-d' , '--debug', action='store_true',
help='increase log output level')
parser.add_argument('-v' , '--verbose', action='store_true',
help='status print node descriptions')
parser.add_argument('-vv' , '--vverbose', action='store_true',
help='print info about non-usable nodes')
parser.add_argument('-n' , '--names', action='store_true',
help='status print node names, not addresses')
self.args = parser.parse_args()
return self.args
########################################################
# CONFIGPARSER
########################################################
def configuration(self):
"""
Parse configuration file
"""
if self.args == None or self.args.config == None:
raise IOSException("Arguments not found")
self.config = configparser.ConfigParser()
parsed = self.config.read(self.args.config)
if not parsed:
print("Configuration file not found:", self.args.config)
sys.exit(1)
# copy cfg file to /tmp/
#if self.args.config != IOManager.DEFAULT_CONFIG_LOC:
# shutil.copyfile(self.args.config, IOManager.DEFAULT_CONFIG_LOC)
# Load config
self._load_config()
return self.config
def _load_config(self):
"""
Load configuration
"""
self.slice = self.config["core"]["slice"]
self.user = self.config["core"]["user"]
# PL settings
self._nodedir = self._to_absolute(self.config["core"]["nodes_dir"])
self._datadir = self._to_absolute(self.config["core"]["data_dir"])
self._logdir = self._to_absolute(self.config["core"]["log_dir"])
self._rawfile = self._to_absolute(self.config["core"]["raw_nodes"],
root=self._nodedir)
self.userdir = self._to_absolute(self.user, root=self._logdir)
self.pkgfile = self._to_absolute(IOManager.PKG_FILE, root=self.userdir)
self.threadlimit = int(self.config["core"]["thread_limit"])
self.sshlimit = int(self.config["core"]["ssh_limit"])
self.sshkeyloc = self.config["core"]["ssh_keyloc"]
self.period = int(self.config["core"]["probing_period"])
self.initialdelay = (self.config["core"]["initial_delay"] == 'yes')
self._package_list()
def _package_list(self):
"""
load pkg list from file
"""
self.pkglist = []
if not self.userdir:
return
def pkgs(line):
return (line and not line.startswith(';'))
with open(self.pkgfile, 'r') as f:
lines = map(str.rstrip, f.readlines())
self.pkglist = list(filter(pkgs, lines))
def _to_absolute(self, path, root=None):
"""
Convert path to absolute if it's not already
"""
if not path:
return None
if path.startswith("/"):
return path
if not root:
root = self.cwd
return "/".join([root, path])
########################################################
# LOGGING
########################################################
def log(self, decoy=False, console=False, logfile=True, errfile=False):
"""
load logging facility
"""
if decoy:
decoy_logger = lambda _ : None
self.debug = self.info \
= self.warn \
= self.error \
= self.critical \
= decoy_logger
return
if self.args == None:
raise IOManagerException("Arguments not found")
if self.config == None:
raise IOManagerException("Configuration not found")
# create logger
self.logger = logging.getLogger(self.child.__class__.__name__)
self.logger.setLevel(logging.DEBUG)
# console handler and set level to debug
if console:
ch = logging.StreamHandler()
ch.setLevel(logging.INFO if self.args.debug else logging.ERROR)
# XXX
#filehandler = logging.handlers.TimedRotatingFileHandler('/tmp/daemon.log',
# when='midnight',interval=1,backupCount=10)
# log file handler
if logfile:
fh = logging.FileHandler(self._to_absolute(self.args.log_file,
root=self._logdir))
fh.setLevel(logging.DEBUG if self.args.debug else logging.INFO)
# error file handler
if errfile:
eh = logging.FileHandler(self._to_absolute(self.args.error_file,
root=self._logdir))
eh.setLevel(logging.ERROR)
# add formatter to handlers & handlers to logger
formatter = logging.Formatter("%(asctime)s : %(levelname)-5s : %(message)s",
"%Y-%m-%d %H:%M:%S")
if console:
ch.setFormatter(formatter)
self.logger.addHandler(ch)
if logfile:
fh.setFormatter(formatter)
self.logger.addHandler(fh)
if errfile:
eh.setFormatter(formatter)
self.logger.addHandler(eh)
# log functions
self.debug = self.logger.debug
self.info = self.logger.info
self.warn = self.logger.warn
self.error = self.logger.error
self.critical = self.logger.critical
return self.logger
class IOManagerException(Exception):
"""
IOManagerException(Exception)
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
ekorian/deploypl
|
deployer/ios.py
|
Python
|
bsd-3-clause
| 7,259
|
from django.views.generic import TemplateView
from django.shortcuts import render_to_response
from django.template import RequestContext
from braces.views import LoginRequiredMixin
class DashboardView(LoginRequiredMixin, TemplateView):
template_name = 'dashboard.html'
def get(self, *args, **kwargs):
return render_to_response(self.template_name, {}, context_instance=RequestContext(self.request))
def get_context_data(self, **kwargs):
context_data = super(DashboardView, self).get_context_data(**kwargs)
context_data['number_livestock'] = None
return context_data
|
savioabuga/phoenix
|
phoenix/dashboard/views.py
|
Python
|
bsd-3-clause
| 614
|
from btmux_template_io.item_table import ITEM_TABLE
from btmux_template_io.parsers.ssw.crit_mapping import PHYSICAL_WEAPON_MAP, \
EQUIPMENT_MAP
from . ammo import add_ammo
from . common import add_crits_from_locations
from . weapons import add_weapon
def populate_equipment(xml_root, unit_obj):
"""
Equipment is the general term for an item within a mech. Weapons,
ammo, melee weapons, etc.
:param lxml.etree.Element xml_root: The root of the XML doc.
:param btmux_template_io.unit.BTMuxUnit unit_obj: The unit instance
being populated.
"""
equipment_elements = xml_root.xpath('baseloadout/equipment')
for equip_e in equipment_elements:
e_type = equip_e.xpath('type')[0].text
e_name = equip_e.xpath('name')[0].text
if e_type in ['energy', 'ballistic', 'missile']:
add_weapon(equip_e, unit_obj)
elif 'Anti-Missile' in e_name and '@' not in e_name:
# These are of type equipment, but BTMux handles them like weapons.
add_weapon(equip_e, unit_obj)
elif e_type == 'ammunition':
add_ammo(equip_e, unit_obj)
elif e_type == 'physical':
_add_equipment(equip_e, unit_obj, PHYSICAL_WEAPON_MAP)
elif e_type in ['equipment', 'CASE', 'TargetingComputer']:
_add_equipment(equip_e, unit_obj, EQUIPMENT_MAP)
else:
raise ValueError("Invalid equipment type: %s" % e_type)
def _add_equipment(equip_e, unit_obj, map_dict):
ssw_name = equip_e.xpath('name')[0].text
try:
mapped_add_special = map_dict[ssw_name].get('add_special')
except KeyError:
raise ValueError("Unknown equipment type: %s" % ssw_name)
if mapped_add_special:
unit_obj.specials.add(mapped_add_special)
btmux_name = map_dict[ssw_name]['name']
if not btmux_name:
# Probably something like a SearchLight, which has no crit in BTMux.
return
data_dict = ITEM_TABLE[btmux_name]
if 'tons_per_crit' in data_dict:
crits_per_item = int(round(
float(unit_obj.weight) / data_dict['tons_per_crit'], 0))
else:
crits_per_item = data_dict.get('crits', 1)
add_special = data_dict.get('add_special')
if add_special:
unit_obj.specials.add(add_special)
add_crits_from_locations(
equip_e,
btmux_name,
unit_obj,
crits_per_item=crits_per_item)
|
gtaylor/btmux_template_io
|
btmux_template_io/parsers/ssw/populators/equipment.py
|
Python
|
bsd-3-clause
| 2,429
|
"""The image module provides basic functions for working with images in nipy.
Functions are provided to load, save and create image objects, along with
iterators to easily slice through volumes.
load : load an image from a file
save : save an image to a file
fromarray : create an image from a numpy array
Examples
--------
See documentation for load and save functions for 'working' examples.
"""
import os
import numpy as np
import nipy.io.imageformats as formats
from nipy.core.api import Image, is_image
from nifti_ref import (coordmap_from_affine, coerce_coordmap,
ijk_from_fps, fps_from_ijk)
def load(filename):
"""Load an image from the given filename.
Parameters
----------
filename : string
Should resolve to a complete filename path.
Returns
-------
image : An `Image` object
If successful, a new `Image` object is returned.
See Also
--------
save_image : function for saving images
fromarray : function for creating images from numpy arrays
Examples
--------
>>> from nipy.io.api import load_image
>>> from nipy.testing import anatfile
>>> img = load_image(anatfile)
>>> img.shape
(33, 41, 25)
"""
img = formats.load(filename)
aff = img.get_affine()
shape = img.get_shape()
hdr = img.get_header()
# Get info from NIFTI header, if present, to tell which axes are
# which. This is a NIFTI-specific kludge, that might be abstracted
# out into the image backend in a general way. Similarly for
# getting zooms
try:
fps = hdr.get_dim_info()
except (TypeError, AttributeError):
fps = (None, None, None)
ijk = ijk_from_fps(fps)
try:
zooms = hdr.get_zooms()
except AttributeError:
zooms = np.ones(len(shape))
aff = _match_affine(aff, len(shape), zooms)
coordmap = coordmap_from_affine(aff, ijk)
img = Image(img.get_data(), coordmap)
img.header = hdr
return img
def _match_affine(aff, ndim, zooms=None):
''' Fill or prune affine to given number of dimensions
>>> aff = np.arange(16).reshape(4,4)
>>> _match_affine(aff, 3)
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> _match_affine(aff, 2)
array([[ 0., 1., 3.],
[ 4., 5., 7.],
[ 0., 0., 1.]])
>>> _match_affine(aff, 4)
array([[ 0., 1., 2., 0., 3.],
[ 4., 5., 6., 0., 7.],
[ 8., 9., 10., 0., 11.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
>>> aff = np.arange(9).reshape(3,3)
>>> _match_affine(aff, 2)
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
'''
if aff.shape[0] != aff.shape[1]:
raise ValueError('Need square affine')
aff_dim = aff.shape[0] - 1
if ndim == aff_dim:
return aff
aff_diag = np.ones(ndim+1)
if not zooms is None:
n = min(len(zooms), ndim)
aff_diag[:n] = zooms[:n]
mod_aff = np.diag(aff_diag)
n = min(ndim, aff_dim)
# rotations zooms shears
mod_aff[:n,:n] = aff[:n,:n]
# translations
mod_aff[:n,-1] = aff[:n,-1]
return mod_aff
def save(img, filename, dtype=None):
"""Write the image to a file.
Parameters
----------
img : An `Image` object
filename : string
Should be a valid filename.
Returns
-------
image : An `Image` object
See Also
--------
load_image : function for loading images
fromarray : function for creating images from numpy arrays
Examples
--------
>>> import os
>>> import numpy as np
>>> from tempfile import mkstemp
>>> from nipy.core.api import fromarray
>>> from nipy.io.api import save_image
>>> data = np.zeros((91,109,91), dtype=np.uint8)
>>> img = fromarray(data, 'kji', 'zxy')
>>> fd, fname = mkstemp(suffix='.nii.gz')
>>> saved_img = save_image(img, fname)
>>> saved_img.shape
(91, 109, 91)
>>> os.unlink(fname)
>>> fd, fname = mkstemp(suffix='.img.gz')
>>> saved_img = save_image(img, fname)
>>> saved_img.shape
(91, 109, 91)
>>> os.unlink(fname)
>>> fname = 'test.mnc'
>>> saved_image = save_image(img, fname)
Traceback (most recent call last):
...
ValueError: Cannot save file type "minc"
Notes
-----
Filetype is determined by the file extension in 'filename'. Currently the
following filetypes are supported:
* Nifti single file : ['.nii', '.nii.gz']
* Nifti file pair : ['.hdr', '.hdr.gz']
* Analyze file pair : ['.img', 'img.gz']
"""
# Get header from image
try:
original_hdr = img.header
except AttributeError:
original_hdr = None
# Make NIFTI compatible version of image
newcmap, order = coerce_coordmap(img.coordmap)
Fimg = Image(np.transpose(np.asarray(img), order), newcmap)
# Expand or contract affine to 4x4 (3 dimensions)
rzs = Fimg.affine[:-1,:-1]
zooms = np.sqrt(np.sum(rzs * rzs, axis=0))
aff = _match_affine(Fimg.affine, 3, zooms)
ftype = _type_from_filename(filename)
if ftype.startswith('nifti1'):
klass = formats.Nifti1Image
elif ftype == 'analyze':
klass = formats.Spm2AnalyzeImage
else:
raise ValueError('Cannot save file type "%s"' % ftype)
# make new image
out_img = klass(data=np.asarray(Fimg),
affine=aff,
header=original_hdr)
hdr = out_img.get_header()
# work out phase, freqency, slice from coordmap names
ijk = newcmap.input_coords.coord_names
fps = fps_from_ijk(ijk)
# put fps into header if possible
try:
hdr.set_dim_info(*fps)
except AttributeError:
pass
# Set zooms
hdr.set_zooms(zooms)
# save to disk
out_img.to_filename(filename)
return Fimg
def _type_from_filename(filename):
''' Return image type determined from filename
Filetype is determined by the file extension in 'filename'.
Currently the following filetypes are supported:
* Nifti single file : ['.nii', '.nii.gz']
* Nifti file pair : ['.hdr', '.hdr.gz']
* Analyze file pair : ['.img', '.img.gz']
>>> _type_from_filename('test.nii')
'nifti1single'
>>> _type_from_filename('test')
'nifti1single'
>>> _type_from_filename('test.hdr')
'nifti1pair'
>>> _type_from_filename('test.hdr.gz')
'nifti1pair'
>>> _type_from_filename('test.img.gz')
'analyze'
>>> _type_from_filename('test.mnc')
'minc'
'''
if filename.endswith('.gz'):
filename = filename[:-3]
elif filename.endswith('.bz2'):
filename = filename[:-4]
_, ext = os.path.splitext(filename)
if ext in ('', '.nii'):
return 'nifti1single'
if ext == '.hdr':
return 'nifti1pair'
if ext == '.img':
return 'analyze'
if ext == '.mnc':
return 'minc'
raise ValueError('Strange file extension "%s"' % ext)
def as_image(image_input):
''' Load image from filename or pass through image instance
Parameters
----------
image_input : str or Image instance
image or string filename of image. If a string, load image and
return. If an image, pass through without modification
Returns
-------
img : Image or Image-like instance
Input object if `image_input` seemed to be an image, loaded Image
object if `image_input` was a string.
Raises
------
TypeError : if neither string nor image-like passed
Examples
--------
>>> from nipy.testing import anatfile
>>> from nipy.io.api import load_image
>>> img = as_image(anatfile)
>>> img2 = as_image(img)
>>> img2 is img
True
'''
if is_image(image_input):
return image_input
if isinstance(image_input, basestring):
return load(image_input)
raise TypeError('Expecting an image-like object or filename string')
|
yarikoptic/NiPy-OLD
|
nipy/io/files.py
|
Python
|
bsd-3-clause
| 8,110
|
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
default_app_config = 'yepes.contrib.slugs.apps.SlugsConfig'
|
samuelmaudo/yepes
|
yepes/contrib/slugs/__init__.py
|
Python
|
bsd-3-clause
| 125
|
"""Management command for uploading master json data for OCW courses"""
from django.core.management import BaseCommand
from course_catalog.etl.deduplication import generate_duplicates_yaml
class Command(BaseCommand):
"""Print course duplicates yaml"""
help = "Print course duplicates yaml"
def handle(self, *args, **options):
self.stdout.write(generate_duplicates_yaml())
|
mitodl/open-discussions
|
course_catalog/management/commands/print_course_duplicates_yaml.py
|
Python
|
bsd-3-clause
| 397
|
import re
from django.conf import settings
from django.utils.html import strip_tags
import amo
from amo.helpers import absolutify
from amo.urlresolvers import reverse
from amo.utils import urlparams, epoch
from tags.models import Tag
from versions.compare import version_int
# For app version major.minor matching.
m_dot_n_re = re.compile(r'^\d+\.\d+$')
def addon_to_dict(addon, disco=False, src='api'):
"""
Renders an addon in JSON for the API.
"""
v = addon.current_version
url = lambda u, **kwargs: settings.SITE_URL + urlparams(u, **kwargs)
if disco:
learnmore = settings.SERVICES_URL + reverse('discovery.addons.detail',
args=[addon.slug])
learnmore = urlparams(learnmore, src='discovery-personalrec')
else:
learnmore = url(addon.get_url_path(), src=src)
d = {
'id': addon.id,
'name': unicode(addon.name) if addon.name else None,
'guid': addon.guid,
'status': amo.STATUS_CHOICES_API[addon.status],
'type': amo.ADDON_SLUGS_UPDATE[addon.type],
'authors': [{'id': a.id, 'name': unicode(a.name),
'link': absolutify(a.get_url_path(src=src))}
for a in addon.listed_authors],
'summary': (
strip_tags(unicode(addon.summary)) if addon.summary else None),
'description': strip_tags(unicode(addon.description)),
'icon': addon.icon_url,
'learnmore': learnmore,
'reviews': url(addon.reviews_url),
'total_dls': addon.total_downloads,
'weekly_dls': addon.weekly_downloads,
'adu': addon.average_daily_users,
'created': epoch(addon.created),
'last_updated': epoch(addon.last_updated),
'homepage': unicode(addon.homepage) if addon.homepage else None,
'support': unicode(addon.support_url) if addon.support_url else None,
}
if addon.is_persona():
d['theme'] = addon.persona.theme_data
if v:
d['version'] = v.version
d['platforms'] = [unicode(a.name) for a in v.supported_platforms]
d['compatible_apps'] = v.compatible_apps.values()
if addon.eula:
d['eula'] = unicode(addon.eula)
if addon.developer_comments:
d['dev_comments'] = unicode(addon.developer_comments)
if addon.takes_contributions:
contribution = {
'link': url(addon.contribution_url, src=src),
'meet_developers': url(addon.meet_the_dev_url(), src=src),
'suggested_amount': addon.suggested_amount,
}
d['contribution'] = contribution
if addon.type == amo.ADDON_PERSONA:
d['previews'] = [addon.persona.preview_url]
else:
d['previews'] = [p.as_dict(src=src) for p in addon.all_previews]
return d
def extract_from_query(term, filter, regexp, end_of_word_boundary=True):
"""
This pulls out a keyword filter from a search term and returns the value
for the filter and a new term with the filter removed.
E.g. term="yslow version:3", filter='version', regexp='\w+' will result in
a return value of: (yslow, 3).
"""
re_string = r'\b%s:\s*(%s)' % (filter, regexp)
if end_of_word_boundary:
re_string += r'\b'
match = re.search(re_string, term)
if match:
term = term.replace(match.group(0), '').strip()
value = match.group(1)
else:
value = None
return (term, value)
def extract_filters(term, opts=None):
"""
Pulls all the filtering options out of the term and returns a cleaned term
and a dictionary of filter names and filter values. Term filters override
filters found in opts.
"""
opts = opts or {}
filters = {}
params = {}
# Type filters.
term, addon_type = extract_from_query(term, 'type', '\w+')
addon_type = addon_type or opts.get('addon_type')
if addon_type:
try:
atype = int(addon_type)
if atype in amo.ADDON_SEARCH_TYPES:
filters['type'] = atype
except ValueError:
# `addon_type` is not a digit.
# Try to find it in `ADDON_SEARCH_SLUGS`.
atype = amo.ADDON_SEARCH_SLUGS.get(addon_type.lower())
if atype:
filters['type'] = atype
# Platform filters.
term, platform = extract_from_query(term, 'platform', '\w+')
params['platform'] = platform or opts.get('platform')
# Version filters.
term, version = extract_from_query(term, 'version', '[0-9.]+')
params['version'] = version or opts.get('version')
# Tag filters.
term, tag = extract_from_query(term, 'tag', '\w+')
if tag:
tag = Tag.objects.filter(tag_text=tag).values_list('tag_text',
flat=True)
if tag:
filters['tags__in'] = list(tag)
return (term, filters, params)
def filter_version(version, app_id):
"""
Returns filters that can be sent to ES for app version ranges.
If the version is a alpha, beta, or pre-release this does an exact match.
Otherwise it will query where max >= M.Na and min <= M.N.
"""
low = version_int(version)
return {'appversion.%s.min__lte' % app_id: low}
|
SuriyaaKudoIsc/olympia
|
apps/api/utils.py
|
Python
|
bsd-3-clause
| 5,273
|
import zeit.newsletter.testing
class MetadataTest(zeit.newsletter.testing.SeleniumTestCase):
def test_form_should_save_entered_data_on_blur(self):
s = self.selenium
self.open('/repository/newsletter/@@checkout')
s.waitForElementPresent('id=metadata.subject')
s.assertValue('id=metadata.subject', '')
s.type('id=metadata.subject', 'flubber\t')
s.waitForElementNotPresent('css=.field.dirty')
# Re-open the page and verify that the data is still there
s.clickAndWait('link=Edit contents')
s.waitForElementPresent('id=metadata.subject')
s.assertValue('id=metadata.subject', 'flubber')
|
ZeitOnline/zeit.newsletter
|
src/zeit/newsletter/browser/tests/test_form.py
|
Python
|
bsd-3-clause
| 668
|
from .Base_Action import *
class ProfileAction(Base_Action):
def __init__(self, action_xml, root_action=None):
super(self.__class__, self).__init__(action_xml, root_action)
self.shouldUseLaunchSchemeArgsEnv = self.contents.get('shouldUseLaunchSchemeArgsEnv');
self.savedToolIdentifier = self.contents.get('savedToolIdentifier');
self.useCustomWorkingDirectory = self.contents.get('useCustomWorkingDirectory');
self.buildConfiguration = self.contents.get('buildConfiguration');
self.debugDocumentVersioning = self.contents.get('debugDocumentVersioning');
|
samdmarshall/pyxcscheme
|
pyxcscheme/ProfileAction.py
|
Python
|
bsd-3-clause
| 611
|
class Error ( Exception ):
"""Exception class for Address exceptions"""
def __init__( self, message ) :
Exception.__init__(self,message)
|
SPlanzer/AIMS
|
ElectoralAddress/Error.py
|
Python
|
bsd-3-clause
| 159
|
__version__ = '0.1.0'
from .reports import Report
|
grantmcconnaughey/django-reports
|
djreports/__init__.py
|
Python
|
bsd-3-clause
| 51
|
#!/usr/bin/env python
from __future__ import absolute_import
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "service.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
peragro/peragro-rest
|
manage.py
|
Python
|
bsd-3-clause
| 289
|
from django import forms
from dragnet.dll.models import File, Comment
class FileForm(forms.ModelForm):
"""Using a model form to expedite the creation of DLL records"""
class Meta:
model = File
exclude = ('date_created', 'date_modified', 'created_by',
'modified_by', )
class CommentForm(forms.ModelForm):
"""Comment form for DLL comments"""
class Meta:
model = Comment
exclude = ('user', 'date', 'dll')
class SearchForm(forms.Form):
term = forms.CharField()
|
mozilla/dragnet
|
dragnet/dll/forms.py
|
Python
|
bsd-3-clause
| 534
|
"""
To start UNO for both Calc and Writer:
(Note that if you use the current_document command, it will open the Calc's current document since it's the first switch passed)
libreoffice "--accept=socket,host=localhost,port=18100;urp;StarOffice.ServiceManager" --norestore --nofirststartwizard --nologo --calc --writer
To start UNO without opening a libreoffice instance, use the --headless switch:
(Note that this doesn't allow to use the current_document command)
libreoffice --headless "--accept=socket,host=localhost,port=18100;urp;StarOffice.ServiceManager" --norestore --nofirststartwizard --nologo --calc --writer
"""
from uno import getComponentContext
from com.sun.star.connection import ConnectionSetupException
from com.sun.star.awt.FontWeight import BOLD
import sys
# For saving the file
from com.sun.star.beans import PropertyValue
from uno import systemPathToFileUrl
class Message(object):
connection_setup_exception = "Error: Please start the uno bridge first."
# Connect to libreoffice using UNO
UNO_PORT = 18100
try:
localContext = getComponentContext()
resolver = localContext.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", localContext)
context = resolver.resolve(
"uno:socket,host=localhost,port=%d;urp;StarOffice.ComponentContext" % UNO_PORT)
except ConnectionSetupException:
print("%s\n" % Message.connection_setup_exception)
sys.exit(1)
# Get the desktop service
desktop = context.ServiceManager.createInstanceWithContext("com.sun.star.frame.Desktop", context)
class Interface(object):
variables = {}
@staticmethod
def current_document():
"""current_document()"""
return desktop.getCurrentComponent()
@staticmethod
def load_document(path):
"""load_document(['path'])"""
url = systemPathToFileUrl(path)
return desktop.loadComponentFromURL(url ,"_blank", 0, ())
@staticmethod
def new_document():
"""new_document()"""
return desktop.loadComponentFromURL("private:factory/scalc","_blank", 0, ())
@staticmethod
def current_sheet(document):
"""[document].current_sheet()"""
return document.getCurrentController().getActiveSheet()
@staticmethod
def save_as(document, path):
"""[document].save_as(['path'])"""
url = systemPathToFileUrl(path)
# Set file to overwrite
property_value = PropertyValue()
property_value.Name = 'Overwrite'
property_value.Value = 'overwrite'
properties = (property_value,)
# Save to file
document.storeAsURL(url, properties)
return True
@staticmethod
def fetch_cell(sheet, cell_range):
"""[sheet].fetch_cell(['A1'])"""
return sheet.getCellRangeByName(cell_range)
@staticmethod
def set_text(cell, string):
"""[cell].set_text(['string'])"""
if (string.startswith('"') and string.endswith('"')) or \
(string.startswith("'") and string.endswith("'")):
string = string[1:-1]
cell.setString(string)
return True
@staticmethod
def get_text(cell):
"""[cell].get_text()"""
return cell.getString()
@staticmethod
def weight(cell, bold):
"""[cell].weight(['bold'])"""
if bold.strip("'").strip('"') == "bold":
cell.CharWeight = BOLD
return True
else:
return False
|
Risto-Stevcev/iac-protocol
|
iac/app/libreoffice/calc.py
|
Python
|
bsd-3-clause
| 3,484
|
#-*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '0.9a4.dev1' # pragma: nocover
|
BertrandBordage/django-filer
|
filer/__init__.py
|
Python
|
bsd-3-clause
| 118
|
import copy
import pandas as pd
from threeML.plugins.SpectrumLike import SpectrumLike
from threeML.utils.OGIP.response import InstrumentResponse
from threeML.utils.spectrum.binned_spectrum import (
BinnedSpectrumWithDispersion,
ChannelSet,
)
__instrument_name = "General binned spectral data with energy dispersion"
class DispersionSpectrumLike(SpectrumLike):
def __init__(
self,
name,
observation,
background=None,
background_exposure=None,
verbose=True,
tstart=None,
tstop=None,
):
"""
A plugin for generic spectral data with energy dispersion, accepts an observed binned spectrum,
and a background binned spectrum or plugin with the background data.
In the case of a binned background spectrum, the background model is profiled
out and the appropriate profile-likelihood is used to fit the total spectrum. In this
case, caution must be used when there are zero background counts in bins as the
profiled background parameters (one per channel) will then have zero information from which to
constrain the background. It is recommended to bin the spectrum such that there is one background count
per channel.
If either an SpectrumLike or XYLike instance is provided as background, it is assumed that this is the
background data and the likelihood model from this plugin is used to simultaneously fit the background
and source.
:param name: the plugin name
:param observation: the observed spectrum
:param background: the background spectrum or a plugin from which the background will be modeled
:param background_exposure: (optional) adjust the background exposure of the modeled background data comes from and
XYLike plugin
:param verbose: turn on/off verbose logging
"""
assert isinstance(
observation, BinnedSpectrumWithDispersion
), "observed spectrum is not an instance of BinnedSpectrumWithDispersion"
assert (
observation.response is not None
), "the observed spectrum does not have a response"
# assign the response to the plugins
self._rsp = observation.response # type: InstrumentResponse
super(DispersionSpectrumLike, self).__init__(
name=name,
observation=observation,
background=background,
background_exposure=background_exposure,
verbose=verbose,
tstart=tstart,
tstop=tstop,
)
def set_model(self, likelihoodModel):
"""
Set the model to be used in the joint minimization.
"""
# Store likelihood model
self._like_model = likelihoodModel
# We assume there are no extended sources, since we cannot handle them here
assert self._like_model.get_number_of_extended_sources() == 0, (
"OGIP-like plugins do not support " "extended sources"
)
# Get the differential flux function, and the integral function, with no dispersion,
# we simply integrate the model over the bins
differential_flux, integral = self._get_diff_flux_and_integral(self._like_model)
self._rsp.set_function(integral)
def _evaluate_model(self):
"""
evaluates the full model over all channels
:return:
"""
return self._rsp.convolve()
def get_simulated_dataset(self, new_name=None, **kwargs):
"""
Returns another DispersionSpectrumLike instance where data have been obtained by randomizing the current expectation from the
model, as well as from the background (depending on the respective noise models)
:return: a DispersionSpectrumLike simulated instance
"""
# pass the response thru to the constructor
return super(DispersionSpectrumLike, self).get_simulated_dataset(
new_name=new_name, **kwargs
)
def get_pha_files(self):
info = {}
# we want to pass copies so that
# the user doesn't grab the instance
# and try to modify things. protection
info["pha"] = copy.copy(self._observed_spectrum)
if self._background_spectrum is not None:
info["bak"] = copy.copy(self._background_spectrum)
info["rsp"] = copy.copy(self._rsp)
return info
def display_rsp(self):
"""
Display the currently loaded full response matrix, i.e., RMF and ARF convolved
:return:
"""
self._rsp.plot_matrix()
@property
def response(self):
return self._rsp
def _output(self):
# type: () -> pd.Series
super_out = super(DispersionSpectrumLike, self)._output() # type: pd.Series
the_df = pd.Series({"response": self._rsp.rsp_filename})
return super_out.append(the_df)
def write_pha(self, filename, overwrite=False, force_rsp_write=False):
"""
Writes the observation, background and (optional) rsp to PHAII fits files
:param filename: base file name to write out
:param overwrite: if you would like to force overwriting of the files
:param force_rsp_write: force the writing of an rsp even if not required
"""
# we need to pass up the variables to an OGIPLike
# so that we have the proper variable name
# a local import here because OGIPLike is dependent on this
from threeML.plugins.OGIPLike import OGIPLike
ogiplike = OGIPLike.from_general_dispersion_spectrum(self)
ogiplike.write_pha(
file_name=filename, overwrite=overwrite, force_rsp_write=force_rsp_write
)
@staticmethod
def _build_fake_observation(
fake_data, channel_set, source_errors, source_sys_errors, is_poisson, **kwargs
):
"""
This is the fake observation builder for SpectrumLike which builds data
for a binned spectrum without dispersion. It must be overridden in child classes.
:param fake_data: series of values... they are ignored later
:param channel_set: a channel set
:param source_errors:
:param source_sys_errors:
:param is_poisson:
:return:
"""
assert (
"response" in kwargs
), "A response was not provided. Cannor build synthetic observation"
response = kwargs.pop("response")
observation = BinnedSpectrumWithDispersion(
fake_data,
exposure=1.0,
response=response,
count_errors=source_errors,
sys_errors=source_sys_errors,
quality=None,
scale_factor=1.0,
is_poisson=is_poisson,
mission="fake_mission",
instrument="fake_instrument",
tstart=0.0,
tstop=1.0,
)
return observation
@classmethod
def from_function(
cls,
name,
source_function,
response,
source_errors=None,
source_sys_errors=None,
background_function=None,
background_errors=None,
background_sys_errors=None,
):
"""
Construct a simulated spectrum from a given source function and (optional) background function. If source and/or background errors are not supplied, the likelihood is assumed to be Poisson.
:param name: simulated data set name
:param source_function: astromodels function
:param response: 3ML Instrument response
:param source_errors: (optional) gaussian source errors
:param source_sys_errors: (optional) systematic source errors
:param background_function: (optional) astromodels background function
:param background_errors: (optional) gaussian background errors
:param background_sys_errors: (optional) background systematic errors
:return: simulated DispersionSpectrumLike plugin
"""
channel_set = ChannelSet.from_instrument_response(response)
energy_min, energy_max = channel_set.bin_stack.T
# pass the variables to the super class
return super(DispersionSpectrumLike, cls).from_function(
name,
source_function,
energy_min,
energy_max,
source_errors,
source_sys_errors,
background_function,
background_errors,
background_sys_errors,
response=response,
)
|
giacomov/3ML
|
threeML/plugins/DispersionSpectrumLike.py
|
Python
|
bsd-3-clause
| 8,596
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2020-02-21 17:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
import django.db.models.deletion
import sentry.db.models.fields.foreignkey
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
# This flag is used to decide whether to run this migration in a transaction or not.
# By default we prefer to run in a transaction, but for migrations where you want
# to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll
# want to create an index concurrently when adding one to an existing table.
atomic = True
dependencies = [
('sentry', '0045_remove_incidentactivity_event_stats_snapshot'),
]
operations = [
migrations.AlterField(
model_name='exporteddata',
name='file',
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(db_constraint=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to='sentry.File'),
),
migrations.AlterField(
model_name='exporteddata',
name='user',
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
|
beeftornado/sentry
|
src/sentry/migrations/0046_auto_20200221_1735.py
|
Python
|
bsd-3-clause
| 1,992
|
from django.conf.urls.defaults import *
from corehq import AccountingAdminInterfaceDispatcher
from corehq.apps.accounting.views import *
urlpatterns = patterns('corehq.apps.accounting.views',
url(r'^$', 'accounting_default', name='accounting_default'),
url(r'^accounts/(\d+)/$', ManageBillingAccountView.as_view(), name=ManageBillingAccountView.urlname),
url(r'^accounts/new/$', NewBillingAccountView.as_view(), name=NewBillingAccountView.urlname),
url(r'^subscriptions/(\d+)/$', EditSubscriptionView.as_view(), name=EditSubscriptionView.urlname),
url(r'^accounts/new_subscription/$', NewSubscriptionViewNoDefaultDomain.as_view(),
name=NewSubscriptionViewNoDefaultDomain.urlname),
url(r'^accounts/new_subscription/(\d+)/$', NewSubscriptionView.as_view(), name=NewSubscriptionView.urlname),
url(r'^software_plans/new/$', NewSoftwarePlanView.as_view(), name=NewSoftwarePlanView.urlname),
url(r'^software_plans/(\d+)/$', EditSoftwarePlanView.as_view(), name=EditSoftwarePlanView.urlname),
url(AccountingAdminInterfaceDispatcher.pattern(), AccountingAdminInterfaceDispatcher.as_view(),
name=AccountingAdminInterfaceDispatcher.name()),
url(r'^pricing_table/(?P<product>[\w-]+)/(?P<locale>[\w-]+)/$', 'pricing_table_json', name='pricing_table_json'),
)
|
gmimano/commcaretest
|
corehq/apps/accounting/urls.py
|
Python
|
bsd-3-clause
| 1,305
|
from django.conf import settings
def MAX_USERNAME_LENGTH():
return getattr(settings, "MAX_USERNAME_LENGTH", 255)
def MAX_EMAIL_LENGTH():
return getattr(settings, "MAX_EMAIL_LENGTH", 255)
def REQUIRE_UNIQUE_EMAIL():
return getattr(settings, "REQUIRE_UNIQUE_EMAIL", True)
|
madssj/django-longer-username-and-email
|
longerusernameandemail/__init__.py
|
Python
|
bsd-3-clause
| 288
|
"""Auto-generated file, do not edit by hand. PE metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_PE = PhoneMetadata(id='PE', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_number_pattern='\\d{3}', possible_length=(3,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
emergency=PhoneNumberDesc(national_number_pattern='1(?:05|1[67])', possible_number_pattern='\\d{3}', example_number='105', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:05|1[67])', possible_number_pattern='\\d{3}', example_number='105', possible_length=(3,)),
standard_rate=PhoneNumberDesc(),
carrier_specific=PhoneNumberDesc(),
short_data=True)
|
vicky2135/lucious
|
oscar/lib/python2.7/site-packages/phonenumbers/shortdata/region_PE.py
|
Python
|
bsd-3-clause
| 816
|
def extractDhragonisslytherinWordpressCom(item):
'''
Parser for 'dhragonisslytherin.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractDhragonisslytherinWordpressCom.py
|
Python
|
bsd-3-clause
| 576
|
"""Auto-generated file, do not edit by hand. NF metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NF = PhoneMetadata(id='NF', country_code=672, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[13]\\d{5}', possible_number_pattern='\\d{5,6}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:1(?:06|17|28|39)|3[012]\\d)\\d{3}', possible_number_pattern='\\d{5,6}', example_number='106609'),
mobile=PhoneNumberDesc(national_number_pattern='38\\d{4}', possible_number_pattern='\\d{5,6}', example_number='381234'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
number_format=[NumberFormat(pattern='(\\d{2})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['1']),
NumberFormat(pattern='(\\d)(\\d{5})', format='\\1 \\2', leading_digits_pattern=['3'])])
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/phonenumbers/data/region_NF.py
|
Python
|
bsd-3-clause
| 1,648
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe SujetAide, détaillée plus bas."""
from abstraits.obase import BaseObj
from primaires.format.description import Description
from primaires.format.fonctions import supprimer_accents, couper_phrase
class SujetAide(BaseObj):
"""Classe représentant un sujet d'aide.
Un sujet d'aide est une aide disponible in-game sur un sujet précis.
Il peut être consultable par un certain groupe de personnes (seulement
les administrateurs du jeu, par exemple) et peut être lié à d'autres
sujets.
Ses attributs sont :
cle -- la clé identifiant le sujet pour les immortels
titre -- le titre du sujet
contenu -- le contenu du sujet d'aide
mots_cles -- des mots-clés pointant vers ce sujet
str_groupe -- une chaîne décrivant le groupe autorisé
sujets_lies -- les sujets liés (des objets SujetAide contenus
dans une liste)
"""
enregistrer = True
_nom = "sujet_aide"
_version = 1
def __init__(self, cle):
"""Constructeur du sujet d'aide."""
BaseObj.__init__(self)
self.cle = cle
self.titre = "un sujet d'aide"
self.pere = None
self.contenu = Description(parent=self, scriptable=False)
self.mots_cles = []
self._str_groupe = "joueur"
self.__sujets_lies = []
self.__sujets_fils = []
self._construire()
def __getnewargs__(self):
return ("", )
def __str__(self):
return "aide:" + self.titre
@property
def str_mots_cles(self):
return ", ".join(self.mots_cles) or "aucun mot-clé"
def _get_str_groupe(self):
return self._str_groupe or "aucun"
def _set_str_groupe(self, nom_groupe):
self._str_groupe = nom_groupe
str_groupe = property(_get_str_groupe, _set_str_groupe)
@property
def grp(self):
groupe = type(self).importeur.interpreteur.groupes[self._str_groue]
return groupe
@property
def sujets_lies(self):
"""Retourne une liste déréférencée des sujets liés."""
return [s for s in self.__sujets_lies if s is not None]
@property
def str_sujets_lies(self):
"""Retourne une chaîne contenant les sujets liés."""
return ", ".join([s.titre for s in self.sujets_lies]) or \
"aucun sujet lié"
@property
def sujets_fils(self):
"""Retourne une liste déréférencée des sujets fils."""
return [s for s in self.__sujets_fils if s is not None]
@property
def tab_sujets_fils(self):
"""Retourne un tableau des sujets fils."""
lignes = []
taille = max([len(s.titre) for s in self.sujets_fils] or (10, ))
if taille > 30:
taille = 30
sep = "+" + 17 * "-" + "+" + (taille + 2) * "-" + "+"
en_tete = sep + "\n" + "| |tit|" + "Sujet".ljust(15) + "|ff| |"
en_tete += " |tit|" + "Titre".ljust(taille) + "|ff| |\n" + sep
for s in self.sujets_fils:
ligne = "| |ent|" + s.cle.ljust(15) + "|ff| | "
ligne += couper_phrase(s.titre, taille).ljust(taille) + " |"
lignes.append(ligne)
if lignes:
return en_tete + "\n" + "\n".join(lignes) + "\n" + sep
else:
return "|att|Aucun sujet affilié.|ff|"
def sommaire(self, personnage, indent=""):
"""Renvoie le sommaire du sujet, si sommaire il y a."""
ret = ""
i = 1
for sujet in self.sujets_fils:
if importeur.interpreteur.groupes.explorer_groupes_inclus(
personnage.grp, sujet.str_groupe):
ret += "\n" + indent + str(i) + ". |cmd|"
ret += sujet.titre.capitalize() + "|ff|"
if self.sujets_fils:
ret += sujet.sommaire(personnage, \
indent=indent+"{}.".format(i))
i += 1
return ret
def est_lie(self, sujet):
"""Retourne True si le sujet est lié, False sinon."""
return sujet in self.__sujets_lies and self in sujet.__sujets_lies
def ajouter_lie(self, sujet):
"""Lie un sujet au courant."""
self.__sujets_lies.append(sujet)
sujet.__sujets_lies.append(self)
def supprimer_lie(self, sujet):
"""Supprime un sujet de la liste des sujets liés."""
self.__sujets_lies.remove(sujet)
sujet.__sujets_lies.remove(self)
def est_fils(self, sujet):
"""Retourne True si le sujet est fils de celui-ci, False sinon."""
return sujet in self.__sujets_fils and sujet.pere is self
def ajouter_fils(self, sujet):
"""Ajoute le sujet aux fils."""
self.__sujets_fils.append(sujet)
sujet.pere = self
def supprimer_fils(self, sujet):
"""Supprime le sujet des fils."""
self.__sujets_fils.remove(sujet)
sujet.pere = None
def echanger_fils(self, sujet, bas=False):
"""Change un fils de place vers le haut ou le bas de la liste."""
i = self.sujets_fils.index(sujet)
if i == 0 and not bas:
raise ValueError("le sujet est déjà en haut de la liste")
elif i == len(self.__sujets_fils) - 1 and bas:
raise ValueError("le sujet est déjà en bas de la liste")
del self.__sujets_fils[i]
if not bas:
self.__sujets_fils.insert(i - 1, sujet)
else:
self.__sujets_fils.insert(i + 1, sujet)
def vider(self):
"""Prépare la destruction du sujet."""
for s in self.sujets_fils:
s.pere = self.pere
if self.pere:
self.pere.ajouter_fils(s)
if self.pere is not None:
self.pere.supprimer_fils(self)
for s in self.sujets_lies:
s.supprimer_lie(self)
def afficher_pour(self, personnage):
"""Affiche le sujet d'aide pour personnage."""
nb_ti = int((31 - len(self.titre)) / 2)
ret = "|tit|" + "-" * nb_ti + "= " + self.titre.capitalize()
ret += " =" + "-" * nb_ti
ret += "|ff|\n"
if self.sujets_fils:
ret += "\nSommaire :"
ret += self.sommaire(personnage) + "\n"
ret += "\n" + self.afficher_contenu(personnage)
if self.sujets_lies:
sujets_lies = []
for sujet in self.sujets_lies:
if self.importeur.interpreteur.groupes. \
explorer_groupes_inclus(personnage.grp,
sujet.str_groupe):
sujets_lies.append(sujet)
if sujets_lies:
s = len(sujets_lies) > 1 and "s" or ""
ret += "\n\nSujet{s} lié{s} : |ent|".format(s=s)
ret += "|ff|, |ent|".join([s.titre for s in sujets_lies])
ret += "|ff|."
return ret
def afficher_contenu(self, personnage, ident="", sp="|sp|"):
"""Affiche le contenu de self et ses sujets fils."""
ret = str(self.contenu)
for i, s in enumerate(self.sujets_fils):
if importeur.interpreteur.groupes.explorer_groupes_inclus(
personnage.grp, s.str_groupe):
ret += "\n" + sp + "\n|tit|" + ident + str(i + 1) + ". " + \
s.titre.capitalize() + "|ff|"
ret += "\n\n" + s.afficher_contenu(personnage,
ident=ident + "{}.".format(i + 1), sp="\n\n")
return ret
|
stormi/tsunami
|
src/primaires/information/sujet.py
|
Python
|
bsd-3-clause
| 9,045
|
import copy
import types
from django.core.urlresolvers import reverse
from django.db.models.query import QuerySet
registry = []
def register(*args):
"""
Register urls, views, model instances and QuerySets to be potential
pages for menu items.
Example::
import simplemenu
simplemenu.register(
'package.module.view',
('package.module.view','name'),
FlatPage.objects.all(),
(FlatPage.objects.all(),'attr_containing_name'),
Products.objects.get(pk=1),
)
"""
registry.extend(args)
class PageWrapper(object):
"""
A helper-object to wrap the pages, which might be django models or
strings.
"""
def __init__(self, urlobj_or_str, name=None):
if isinstance(urlobj_or_str, types.StringTypes):
self.urlobj = None
self.urlstr = urlobj_or_str
else:
self.urlobj = urlobj_or_str
self.urlstr = str()
self._name = name
def name(self):
if self._name:
name = self._name
elif self.urlobj:
name = unicode(self.urlobj)
elif "/" in self.urlstr:
name = self.urlstr
else:
name = self.urlstr.rsplit('.', 1)[-1]
name = name.replace("_", " ").capitalize()
return name
def url(self):
if self.urlobj:
url = self.urlobj.get_absolute_url()
elif "/" in self.urlstr:
url = self.urlstr
else:
url = reverse(self.urlstr)
return url
def strkey(self):
"""
Generates somewhat unique string id of the wrappee.
"""
if self.urlobj:
return "%s.%s.pk%s" % (self.urlobj.__module__,
self.urlobj.__class__.__name__,
self.urlobj.pk)
else:
return self.urlstr
def get_registered_pages():
"""
Returns all registered pages wrapped in PageWrapper helper-object
evaluating all QuerySets along the way.
"""
pages = []
for reg in map(copy.deepcopy, registry):
name = None
if isinstance(reg, types.TupleType):
reg, name = reg
if isinstance(reg, QuerySet):
# Name is the given attr if possible elsewise just use unicode(obj)
if not name:
f = lambda obj: PageWrapper(obj, unicode(obj))
else:
f = lambda obj: PageWrapper(obj, getattr(obj, name, unicode(obj)))
# evaluating QuerySet objects by iteration
pages.extend(map(f, reg))
else:
pages.append(PageWrapper(reg, name))
return pages
|
elpaso/django-simplemenu
|
simplemenu/pages.py
|
Python
|
bsd-3-clause
| 2,730
|
#!/usr/bin/env python
from setuptools import setup
setup(name='tagdog',
version='0.2',
description='Tag media files',
author='Albert Pham',
author_email='the.sk89q@gmail.com',
url='https://github.com/sk89q/TagDog',
install_requires=[
'titlecase',
'mutagen',
'pyechonest'
],
scripts=['tagdog.py']
)
|
sk89q/TagDog
|
setup.py
|
Python
|
bsd-3-clause
| 383
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 0, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_Anscombe/trend_LinearTrend/cycle_7/ar_12/test_artificial_32_Anscombe_LinearTrend_7_12_0.py
|
Python
|
bsd-3-clause
| 264
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to find the closest match of a string in a list"""
from __future__ import unicode_literals
import re
import difflib
import six
#import ctypes
#import ldistance
#levenshtein_distance = ctypes.cdll.levenshtein.levenshtein_distance
#levenshtein_distance = ldistance.distance
find_best_control_match_cutoff = .6
#====================================================================
class MatchError(IndexError):
"""A suitable match could not be found"""
def __init__(self, items = None, tofind = ''):
"""Init the parent with the message"""
self.tofind = tofind
self.items = items
if self.items is None:
self.items = []
IndexError.__init__(self,
"Could not find '{0}' in '{1}'".format(tofind, self.items))
_cache = {}
# given a list of texts return the match score for each
# and the best score and text with best score
#====================================================================
def _get_match_ratios(texts, match_against):
"""Get the match ratio of how each item in texts compared to match_against"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
ratio_calc.set_seq1(match_against)
ratios = {}
best_ratio = 0
best_text = ''
for text in texts:
if 0:
pass
if (text, match_against) in _cache:
ratios[text] = _cache[(text, match_against)]
elif(match_against, text) in _cache:
ratios[text] = _cache[(match_against, text)]
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(six.text_type(match_against), six.text_type(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text] = ratio
# calculate ratio and store it
ratios[text] = ratio_calc.ratio()
_cache[(match_against, text)] = ratios[text]
# if this is the best so far then update best stats
if ratios[text] > best_ratio:
best_ratio = ratios[text]
best_text = text
return ratios, best_ratio, best_text
#====================================================================
def find_best_match(search_text, item_texts, items, limit_ratio = .5):
"""Return the item that best matches the search_text
* **search_text** The text to search for
* **item_texts** The list of texts to search through
* **items** The list of items corresponding (1 to 1)
to the list of texts to search through.
* **limit_ratio** How well the text has to match the best match.
If the best match matches lower then this then it is not
considered a match and a MatchError is raised, (default = .5)
"""
search_text = _cut_at_eol(_cut_at_tab(search_text))
text_item_map = UniqueDict()
# Clean each item, make it unique and map to
# to the item index
for text, item in zip(item_texts, items):
text_item_map[_cut_at_eol(_cut_at_tab(text))] = item
ratios, best_ratio, best_text = \
_get_match_ratios(text_item_map.keys(), search_text)
if best_ratio < limit_ratio:
raise MatchError(items = text_item_map.keys(), tofind = search_text)
return text_item_map[best_text]
#====================================================================
_after_tab = re.compile(r"\t.*", re.UNICODE)
_after_eol = re.compile(r"\n.*", re.UNICODE)
_non_word_chars = re.compile(r"\W", re.UNICODE)
def _cut_at_tab(text):
"""Clean out non characters from the string and return it"""
# remove anything after the first tab
return _after_tab.sub("", text)
def _cut_at_eol(text):
"""Clean out non characters from the string and return it"""
# remove anything after the first EOL
return _after_eol.sub("", text)
def _clean_non_chars(text):
"""Remove non word characters"""
# should this also remove everything after the first tab?
# remove non alphanumeric characters
return _non_word_chars.sub("", text)
def is_above_or_to_left(ref_control, other_ctrl):
"""Return true if the other_ctrl is above or to the left of ref_control"""
text_r = other_ctrl.rectangle()
ctrl_r = ref_control.rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
return False
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
return False
# text control top left corner is below control
# top left corner - so not to the above or left :)
if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left:
return False
return True
#====================================================================
distance_cuttoff = 999
def get_non_text_control_name(ctrl, controls, text_ctrls):
"""
return the name for this control by finding the closest
text control above and to its left
"""
names = []
# simply look for an instance of the control in the list,
# we don't use list.index() method as it invokes __eq__
ctrl_index = 0
for i, c in enumerate(controls):
if c is ctrl:
ctrl_index = i
break
ctrl_friendly_class_name = ctrl.friendly_class_name()
if ctrl_index != 0:
prev_ctrl = controls[ctrl_index-1]
prev_ctrl_text = prev_ctrl.window_text()
if prev_ctrl.friendly_class_name() == "Static" and \
prev_ctrl.is_visible() and prev_ctrl_text and \
is_above_or_to_left(ctrl, prev_ctrl):
names.append(
prev_ctrl_text +
ctrl_friendly_class_name)
best_name = ''
closest = distance_cuttoff
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.rectangle()
ctrl_r = ctrl.rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
continue
# calculate the distance between the controls
# at first I just calculated the distance from the top left
# corner of one control to the top left corner of the other control
# but this was not best, so as a text control should either be above
# or to the left of the control I get the distance between
# the top left of the non text control against the
# Top-Right of the text control (text control to the left)
# Bottom-Left of the text control (text control above)
# then I get the min of these two
# We do not actually need to calculate the difference here as we
# only need a comparative number. As long as we find the closest one
# the actual distance is not all that important to us.
# this reduced the unit tests run on my by about 1 second
# (from 61 ->60 s)
# (x^2 + y^2)^.5
#distance = (
# (text_r.left - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.bottom - ctrl_r.top) ** 2) \
# ** .5 # ^.5
#distance2 = (
# (text_r.right - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.top - ctrl_r.top) ** 2) \
# ** .5 # ^.5
distance = abs(text_r.left - ctrl_r.left) + abs(text_r.bottom - ctrl_r.top)
distance2 = abs(text_r.right - ctrl_r.left) + abs(text_r.top - ctrl_r.top)
distance = min(distance, distance2)
# UpDown control should use Static text only because edit box text is often useless
if ctrl_friendly_class_name == "UpDown" and \
text_ctrl.friendly_class_name() == "Static" and distance < closest:
# TODO: use search in all text controls for all non-text ones
# (like Dijkstra algorithm vs Floyd one)
closest = distance
ctrl_text = text_ctrl.window_text()
if ctrl_text is None:
# the control probably doesn't exist so skip it
continue
best_name = ctrl_text + ctrl_friendly_class_name
# if this distance was closer than the last one
elif distance < closest:
closest = distance
#if text_ctrl.window_text() == '':
# best_name = ctrl_friendly_class_name + ' '.join(text_ctrl.texts()[1:2])
#else:
ctrl_text = text_ctrl.window_text()
if ctrl_text is None:
# the control probably doesn't exist so skip it
continue
best_name = ctrl_text + ctrl_friendly_class_name
names.append(best_name)
return names
#====================================================================
def get_control_names(control, allcontrols, textcontrols):
"""Returns a list of names for this control"""
names = []
# if it has a reference control - then use that
#if hasattr(control, 'ref') and control.ref:
# control = control.ref
# Add the control based on it's friendly class name
friendly_class_name = control.friendly_class_name()
names.append(friendly_class_name)
# if it has some character text then add it base on that
# and based on that with friendly class name appended
cleaned = control.window_text()
# Todo - I don't like the hardcoded classnames here!
if cleaned and control.has_title:
names.append(cleaned)
names.append(cleaned + friendly_class_name)
elif control.has_title and friendly_class_name != 'TreeView':
try:
for text in control.texts()[1:]:
names.append(friendly_class_name + text)
except Exception:
#import traceback
#from .actionlogger import ActionLogger
pass #ActionLogger().log('Warning! Cannot get control.texts()') #\nTraceback:\n' + traceback.format_exc())
# so find the text of the nearest text visible control
non_text_names = get_non_text_control_name(control, allcontrols, textcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# it didn't have visible text
else:
# so find the text of the nearest text visible control
non_text_names = get_non_text_control_name(control, allcontrols, textcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# return the names - and make sure there are no duplicates or empty values
cleaned_names = set(names) - set([None, ""])
return cleaned_names
#====================================================================
class UniqueDict(dict):
"""A dictionary subclass that handles making its keys unique"""
def __setitem__(self, text, item):
"""Set an item of the dictionary"""
# this text is already in the map
# so we need to make it unique
if text in self:
# find next unique text after text1
unique_text = text
counter = 2
while unique_text in self:
unique_text = text + str(counter)
counter += 1
# now we also need to make sure the original item
# is under text0 and text1 also!
if text + '0' not in self:
dict.__setitem__(self, text+'0', self[text])
dict.__setitem__(self, text+'1', self[text])
# now that we don't need original 'text' anymore
# replace it with the uniq text
text = unique_text
# add our current item
dict.__setitem__(self, text, item)
def find_best_matches(
self,
search_text,
clean = False,
ignore_case = False):
"""Return the best matches for search_text in the items
* **search_text** the text to look for
* **clean** whether to clean non text characters out of the strings
* **ignore_case** compare strings case insensitively
"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
if ignore_case:
search_text = search_text.lower()
ratio_calc.set_seq1(search_text)
ratios = {}
best_ratio = 0
best_texts = []
ratio_offset = 1
if clean:
ratio_offset *= .9
if ignore_case:
ratio_offset *= .9
for text_ in self:
# make a copy of the text as we need the original later
text = text_
if clean:
text = _clean_non_chars(text)
if ignore_case:
text = text.lower()
# check if this item is in the cache - if yes, then retrieve it
if (text, search_text) in _cache:
ratios[text_] = _cache[(text, search_text)]
elif(search_text, text) in _cache:
ratios[text_] = _cache[(search_text, text)]
# not in the cache - calculate it and add it to the cache
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# if a very quick check reveals that this is not going
# to match then
ratio = ratio_calc.real_quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.ratio() * ratio_offset
# save the match we got and store it in the cache
ratios[text_] = ratio
_cache[(text, search_text)] = ratio
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(six.text_type(search_text), six.text_type(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text_] = ratio
#print "%5s" %("%0.2f"% ratio), search_text, `text`
# if this is the best so far then update best stats
if ratios[text_] > best_ratio and \
ratios[text_] >= find_best_control_match_cutoff:
best_ratio = ratios[text_]
best_texts = [text_]
elif ratios[text_] == best_ratio:
best_texts.append(text_)
#best_ratio *= ratio_offset
return best_ratio, best_texts
#====================================================================
def build_unique_dict(controls):
"""Build the disambiguated list of controls
Separated out to a different function so that we can get
the control identifiers for printing.
"""
name_control_map = UniqueDict()
# get the visible text controls so that we can get
# the closest text if the control has no text
text_ctrls = [ctrl_ for ctrl_ in controls
if ctrl_.can_be_label and ctrl_.is_visible() and ctrl_.window_text()]
# collect all the possible names for all controls
# and build a list of them
for ctrl in controls:
ctrl_names = get_control_names(ctrl, controls, text_ctrls)
# for each of the names
for name in ctrl_names:
name_control_map[name] = ctrl
return name_control_map
#====================================================================
def find_best_control_matches(search_text, controls):
"""Returns the control that is the the best match to search_text
This is slightly differnt from find_best_match in that it builds
up the list of text items to search through using information
from each control. So for example for there is an OK, Button
then the following are all added to the search list:
"OK", "Button", "OKButton"
But if there is a ListView (which do not have visible 'text')
then it will just add "ListView".
"""
name_control_map = build_unique_dict(controls)
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl, controls)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
search_text = six.text_type(search_text)
best_ratio, best_texts = name_control_map.find_best_matches(search_text)
best_ratio_ci, best_texts_ci = \
name_control_map.find_best_matches(search_text, ignore_case = True)
best_ratio_clean, best_texts_clean = \
name_control_map.find_best_matches(search_text, clean = True)
best_ratio_clean_ci, best_texts_clean_ci = \
name_control_map.find_best_matches(
search_text, clean = True, ignore_case = True)
if best_ratio_ci > best_ratio:
best_ratio = best_ratio_ci
best_texts = best_texts_ci
if best_ratio_clean > best_ratio:
best_ratio = best_ratio_clean
best_texts = best_texts_clean
if best_ratio_clean_ci > best_ratio:
best_ratio = best_ratio_clean_ci
best_texts = best_texts_clean_ci
if best_ratio < find_best_control_match_cutoff:
raise MatchError(items = name_control_map.keys(), tofind = search_text)
return [name_control_map[best_text] for best_text in best_texts]
#
#def GetControlMatchRatio(text, ctrl):
# # get the texts for the control
# ctrl_names = get_control_names(ctrl)
#
# #get the best match for these
# matcher = UniqueDict()
# for name in ctrl_names:
# matcher[name] = ctrl
#
# best_ratio, unused = matcher.find_best_matches(text)
#
# return best_ratio
#
#
#
#def get_controls_ratios(search_text, controls):
# name_control_map = UniqueDict()
#
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
#
# match_ratios, best_ratio, best_text = \
# _get_match_ratios(name_control_map.keys(), search_text)
#
# return match_ratios, best_ratio, best_text,
|
pywinauto/pywinauto
|
pywinauto/findbestmatch.py
|
Python
|
bsd-3-clause
| 20,676
|
# -*- coding: utf-8 -*-
__version__ = '1.3.7'
default_app_config = 'aldryn_redirects.apps.AldrynRedirects'
|
aldryn/aldryn-redirects
|
aldryn_redirects/__init__.py
|
Python
|
bsd-3-clause
| 109
|
import json
import shutil
import sys
import warnings
from itertools import zip_longest
import s3fs
from smart_open import open
from tqdm import tqdm
def session_type():
if 'IPython' not in sys.modules:
# IPython hasn't been imported, definitely not
return "python"
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
if getattr(get_ipython(), 'kernel', None) is not None:
return "kernel"
return "ipython"
def make_tqdm_iterator(**kwargs):
options = {
"file": sys.stdout,
"leave": True
}
options.update(kwargs)
if session_type() == 'kernel':
# from IPython import display
# capture_stderr = StringIO()
# with RedirectStdStreams(stderr=capture_stderr):
# try:
# iterator = tqdm_notebook(**options)
# except:
# failed = True
# else:
# failed = False
# err_out = capture_stderr.getvalue()
# capture_stderr.close()
# if failed or err_out.lower().find("widget javascript not detected") \
# >-1:
# display.clear_output(wait=True)
# iterator = tqdm(**options)
iterator = tqdm(**options)
else:
iterator = tqdm(**options)
return iterator
def get_relationship_variable_id(path):
_, r = path[0]
child_link_name = r.child_variable.id
for _, r in path[1:]:
parent_link_name = child_link_name
child_link_name = '%s.%s' % (r.parent_entity.id,
parent_link_name)
return child_link_name
def find_descendents(cls):
"""
A generator which yields all descendent classes of the given class
(including the given class)
Args:
cls (Class): the class to find descendents of
"""
yield cls
for sub in cls.__subclasses__():
for c in find_descendents(sub):
yield c
def check_schema_version(cls, cls_type):
if isinstance(cls_type, str):
if cls_type == 'entityset':
from featuretools.entityset.serialize import SCHEMA_VERSION
version_string = cls.get('schema_version')
elif cls_type == 'features':
from featuretools.feature_base.features_serializer import SCHEMA_VERSION
version_string = cls.features_dict['schema_version']
current = SCHEMA_VERSION.split('.')
saved = version_string.split('.')
warning_text_upgrade = ('The schema version of the saved %s'
'(%s) is greater than the latest supported (%s). '
'You may need to upgrade featuretools. Attempting to load %s ...'
% (cls_type, version_string, SCHEMA_VERSION, cls_type))
for c_num, s_num in zip_longest(current, saved, fillvalue=0):
if c_num > s_num:
break
elif c_num < s_num:
warnings.warn(warning_text_upgrade)
break
warning_text_outdated = ('The schema version of the saved %s'
'(%s) is no longer supported by this version'
'of featuretools. Attempting to load %s ...'
% (cls_type, version_string, cls_type))
# Check if saved has older major version.
if current[0] > saved[0]:
warnings.warn(warning_text_outdated)
def use_smartopen_es(file_path, path, transport_params=None, read=True):
if read:
with open(path, "rb", transport_params=transport_params) as fin:
with open(file_path, 'wb') as fout:
shutil.copyfileobj(fin, fout)
else:
with open(file_path, 'rb') as fin:
with open(path, 'wb', transport_params=transport_params) as fout:
shutil.copyfileobj(fin, fout)
def use_s3fs_es(file_path, path, read=True):
s3 = s3fs.S3FileSystem(anon=True)
if read:
s3.get(path, file_path)
else:
s3.put(file_path, path)
def use_smartopen_features(path, features_dict=None, transport_params=None, read=True):
if read:
with open(path, 'r', encoding='utf-8', transport_params=transport_params) as f:
features_dict = json.load(f)
return features_dict
else:
with open(path, "w", transport_params=transport_params) as f:
json.dump(features_dict, f)
def use_s3fs_features(file_path, features_dict=None, read=True):
s3 = s3fs.S3FileSystem(anon=True)
if read:
with s3.open(file_path, "r", encoding='utf-8') as f:
features_dict = json.load(f)
return features_dict
else:
with s3.open(file_path, "w") as f:
features = json.dumps(features_dict, ensure_ascii=False)
f.write(features)
|
Featuretools/featuretools
|
featuretools/utils/gen_utils.py
|
Python
|
bsd-3-clause
| 4,834
|
import numpy as nm
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
except (ImportError, RuntimeError):
plt = mpl = None
#print 'matplotlib import failed!'
from sfepy.base.base import output, pause
def spy(mtx, eps=None, color='b', **kwargs):
"""
Show sparsity structure of a `scipy.sparse` matrix.
"""
aux = mtx.tocoo()
ij, val = nm.concatenate((aux.row[:,nm.newaxis],
aux.col[:,nm.newaxis]), 1), aux.data
n_item = aux.getnnz()
n_row, n_col = aux.shape
if eps is not None:
output('using eps =', eps)
ij = nm.compress(nm.absolute(val) > eps, ij, 0)
n_item = ij.shape[0]
else:
output('showing all')
output('n_item:', n_item)
if n_item:
args = {'marker' : '.', 'markersize' : 0.5, 'markeredgewidth' : 0.5}
args.update(kwargs)
plt.plot(ij[:,1] + 0.5, ij[:,0] + 0.5, color, linestyle='None',
**args)
plt.axis([-0.5, n_row+0.5, -0.5, n_col+0.5])
plt.axis('image')
plt.xlabel(r'%d x %d: %d nnz, %.2f%% fill'
% (n_row, n_col, n_item, 100. * n_item /
(float(n_row) * float(n_col))))
ax = plt.gca()
ax.set_ylim(ax.get_ylim()[::-1])
def spy_and_show(mtx, **kwargs):
spy(mtx, **kwargs)
plt.show()
##
# 13.12.2005, c
def print_matrix_diff( title, legend, mtx1, mtx2, mtx_da, mtx_dr, iis ):
import copy
print '%s: ir, ic, %s, %s, adiff, rdiff' % ((title,) + tuple( legend ))
aux = copy.copy(mtx_da)
aux.data = nm.ones(mtx_da.data.shape[0])
irs, ics = aux.nonzero()
for ii in iis:
ir, ic = irs[ii], ics[ii]
print '%5d %5d %11.4e %11.4e %9.2e %9.2e'\
% (ir, ic, mtx1[ir,ic], mtx2[ir,ic], mtx_da[ir,ic], mtx_dr[ir,ic] )
print 'total: %d' % len( iis )
##
# 13.12.2005, c
# 14.12.2005
# 15.12.2005
# 18.07.2007
def plot_matrix_diff( mtx1, mtx2, delta, legend, mode ):
eps = 1e-16
print nm.amin( mtx1.data ), nm.amin( mtx2.data )
print nm.amax( mtx1.data ), nm.amax( mtx2.data )
mtx_da = mtx1.copy() # To preserve structure of mtx1.
mtx_da.data[:] = nm.abs( mtx1.data - mtx2.data )
mtx_dr = mtx_da.copy()
mtx_dr.data[:] = -1
iin = nm.where( nm.abs( mtx1.data ) > eps )[0]
mtx_dr.data[iin] = mtx_da.data[iin] / nm.abs( mtx1.data[iin] )
print nm.amin( mtx_da.data ), nm.amax( mtx_da.data )
print nm.amin( mtx_dr.data ), nm.amax( mtx_dr.data )
epsilon = max( 1e-5, 10 * delta )
print 'epsilon:', epsilon
pause()
ija = nm.where( mtx_da.data > epsilon )[0]
print_matrix_diff( '--- absolute diff', legend,
mtx1, mtx2, mtx_da, mtx_dr, ija )
pause()
iin = nm.where( nm.abs( mtx1.data ) > epsilon )[0]
ij = nm.where( nm.abs( mtx_dr.data[iin] ) > epsilon )[0]
ij = iin[ij]
print_matrix_diff( '--- relative diff', legend,
mtx1, mtx2, mtx_da, mtx_dr, ij )
pause()
ijb = nm.intersect1d( ija, ij )
print_matrix_diff( '--- a-r', legend,
mtx1, mtx2, mtx_da, mtx_dr, ijb )
pause()
ii = nm.argsort( mtx_dr.data[ijb] )
n_s = min( 20, len( ii ) )
ijbs = ijb[ii[-1:-n_s-1:-1]]
print_matrix_diff( '--- a-r 20 biggest (by r)', legend,
mtx1, mtx2, mtx_da, mtx_dr, ijbs )
pause()
if mode < 2: return
h = 100
plt.figure( h ); plt.clf()
plt.axes( [0.04, 0.6, 0.3, 0.3], frameon = True )
spy( mtx_da, epsilon )
plt.title( 'absolute diff' )
plt.axes( [0.68, 0.6, 0.3, 0.3], frameon = True )
iia = nm.where( mtx_dr.data )[0]
mtx_dr.data[nm.setdiff1d( iia, iin )] = 0.0
spy( mtx_dr, epsilon )
plt.title( 'relative diff' )
plt.axes( [0.36, 0.6, 0.3, 0.3], frameon = True )
mtx = mtx_dr.copy()
mtx.data[:] = 0.0
ii = nm.intersect1d( nm.where( mtx_dr.data > epsilon )[0],
nm.where( mtx_da.data > epsilon )[0] )
mtx.data[ii] = 1.0
spy( mtx, epsilon )
plt.title( 'a-r intersection' )
plt.axes( [0.04, 0.08, 0.42, 0.42], frameon = True )
spy( mtx1, epsilon )
plt.title( legend[0] )
plt.axes( [0.54, 0.08, 0.42, 0.42], frameon = True )
spy( mtx2, epsilon )
plt.title( legend[1] )
plt.show()
##
# 02.05.2006, c
def set_axes_font_size( ax, size ):
labels = ax.get_xticklabels() + ax.get_yticklabels()
for label in labels:
label.set_size( size )
##
# 27.09.2006, c
def font_size( size ):
return mpl.font_manager.FontProperties( size = size )
##
# 28.08.2007, c
def iplot( *args, **kwargs ):
plt.ion()
plt.plot( *args, **kwargs )
plt.draw()
plt.ioff()
pause()
|
RexFuzzle/sfepy
|
sfepy/base/plotutils.py
|
Python
|
bsd-3-clause
| 4,703
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-16 12:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20161116_1209'),
]
operations = [
migrations.AddField(
model_name='workertype',
name='is_active',
field=models.BooleanField(default=True),
),
]
|
smn/blinky
|
blinky/core/migrations/0005_workertype_is_active.py
|
Python
|
bsd-3-clause
| 455
|
from neurotune.controllers import SineWaveController
import sys
from neurotune import evaluators
from neurotune import optimizers
from neurotune import utils
if __name__ == "__main__":
showPlots = not ("-nogui" in sys.argv)
verbose = not ("-silent" in sys.argv)
sim_vars = {"amp": 65, "period": 250, "offset": -10}
min_constraints = [60, 150, -20]
max_constraints = [100, 300, 20]
swc = SineWaveController(1000, 0.1)
times, volts = swc.run_individual(sim_vars, showPlots, False)
weights = {"value_200": 1.0, "value_400": 1.0, "value_812": 1.0}
data_analysis = evaluators.PointBasedAnalysis(volts, times)
targets = data_analysis.analyse(weights.keys())
print("Target data: %s" % targets)
# make an evaluator
my_evaluator = evaluators.PointValueEvaluator(
controller=swc, parameters=sim_vars.keys(), weights=weights, targets=targets
)
population_size = 20
max_evaluations = 300
num_selected = 10
num_offspring = 5
mutation_rate = 0.5
num_elites = 1
# make an optimizer
my_optimizer = optimizers.CustomOptimizerA(
max_constraints,
min_constraints,
my_evaluator,
population_size=population_size,
max_evaluations=max_evaluations,
num_selected=num_selected,
num_offspring=num_offspring,
num_elites=num_elites,
mutation_rate=mutation_rate,
seeds=None,
verbose=verbose,
)
# run the optimizer
best_candidate, fitness = my_optimizer.optimize(do_plot=False, seed=1234567)
keys = list(sim_vars.keys())
for i in range(len(best_candidate)):
sim_vars[keys[i]] = best_candidate[i]
fit_times, fit_volts = swc.run_individual(sim_vars, showPlots, False)
if showPlots:
utils.plot_generation_evolution(sim_vars.keys(), sim_vars)
|
NeuralEnsemble/neurotune
|
examples/example_4/SineWavePointOptimizer.py
|
Python
|
bsd-3-clause
| 1,854
|
import json
from django.core import serializers
from django.core.serializers.json import DjangoJSONEncoder
from .base import Binding
from ..generic.websockets import WebsocketDemultiplexer
from ..sessions import enforce_ordering
class WebsocketBinding(Binding):
"""
Websocket-specific outgoing binding subclass that uses JSON encoding
and the built-in JSON/WebSocket multiplexer.
To implement outbound, implement:
- group_names, which returns a list of group names to send to
To implement inbound, implement:
- has_permission, which says if the user can do the action on an instance
Optionally also implement:
- serialize_data, which returns JSON-safe data from a model instance
- create, which takes incoming data and makes a model instance
- update, which takes incoming data and a model instance and applies one to the other
"""
# Mark as abstract
model = None
# Stream multiplexing name
stream = None
# Decorators
strict_ordering = False
slight_ordering = False
# Outbound
@classmethod
def encode(cls, stream, payload):
return WebsocketDemultiplexer.encode(stream, payload)
def serialize(self, instance, action):
payload = {
"action": action,
"pk": instance.pk,
"data": self.serialize_data(instance),
"model": self.model_label,
}
return payload
def serialize_data(self, instance):
"""
Serializes model data into JSON-compatible types.
"""
if self.fields == ['__all__']:
fields = None
else:
fields = self.fields
data = serializers.serialize('json', [instance], fields=fields)
return json.loads(data)[0]['fields']
# Inbound
@classmethod
def get_handler(cls):
"""
Adds decorators to trigger_inbound.
"""
# Get super-handler
handler = super(WebsocketBinding, cls).get_handler()
# Ordering decorators
if cls.strict_ordering:
return enforce_ordering(handler, slight=False)
elif cls.slight_ordering:
return enforce_ordering(handler, slight=True)
else:
return handler
def deserialize(self, message):
"""
You must hook this up behind a Deserializer, so we expect the JSON
already dealt with.
"""
action = message['action']
pk = message.get('pk', None)
data = message.get('data', None)
return action, pk, data
def _hydrate(self, pk, data):
"""
Given a raw "data" section of an incoming message, returns a
DeserializedObject.
"""
s_data = [
{
"pk": pk,
"model": self.model_label,
"fields": data,
}
]
# TODO: Avoid the JSON roundtrip by using encoder directly?
return list(serializers.deserialize("json", json.dumps(s_data)))[0]
def create(self, data):
self._hydrate(None, data).save()
def update(self, pk, data):
instance = self.model.objects.get(pk=pk)
hydrated = self._hydrate(pk, data)
for name in data.keys():
if name in self.fields or self.fields == ['__all__']:
setattr(instance, name, getattr(hydrated.object, name))
instance.save()
class WebsocketBindingWithMembers(WebsocketBinding):
"""
Outgoing binding binding subclass based on WebsocketBinding.
Additionally enables sending of member variables, properties and methods.
Member methods can only have self as a required argument.
Just add the name of the member to the send_members-list.
Example:
class MyModel(models.Model):
my_field = models.IntegerField(default=0)
my_var = 3
@property
def my_property(self):
return self.my_var + self.my_field
def my_function(self):
return self.my_var - self.my_vield
class MyBinding(BindingWithMembersMixin, WebsocketBinding):
model = MyModel
stream = 'mystream'
send_members = ['my_var', 'my_property', 'my_function']
"""
model = None
send_members = []
encoder = DjangoJSONEncoder()
def serialize_data(self, instance):
data = super(WebsocketBindingWithMembers, self).serialize_data(instance)
member_data = {}
for m in self.send_members:
member = instance
for s in m.split('.'):
member = getattr(member, s)
if callable(member):
member_data[m.replace('.', '__')] = member()
else:
member_data[m.replace('.', '__')] = member
member_data = json.loads(self.encoder.encode(member_data))
# the update never overwrites any value from data,
# because an object can't have two attributes with the same name
data.update(member_data)
return data
|
linuxlewis/channels
|
channels/binding/websockets.py
|
Python
|
bsd-3-clause
| 5,020
|
"""
pakbase module
This module contains the base package class from which
all of the other packages inherit from.
"""
from __future__ import print_function
import os
import webbrowser as wb
import numpy as np
from numpy.lib.recfunctions import stack_arrays
from .modflow.mfparbc import ModflowParBc as mfparbc
from .utils import Util2d, Util3d, Transient2d, MfList, check
class Package(object):
"""
Base package class from which most other packages are derived.
"""
def __init__(self, parent, extension='glo', name='GLOBAL', unit_number=1,
extra='', filenames=None, allowDuplicates=False):
"""
Package init
"""
self.parent = parent # To be able to access the parent modflow object's attributes
if (not isinstance(extension, list)):
extension = [extension]
self.extension = []
self.file_name = []
for idx, e in enumerate(extension):
self.extension.append(e)
file_name = self.parent.name + '.' + e
if filenames is not None:
try:
if filenames[idx] is not None:
file_name = filenames[idx]
except:
pass
self.file_name.append(file_name)
self.fn_path = os.path.join(self.parent.model_ws, self.file_name[0])
if (not isinstance(name, list)):
name = [name]
self.name = name
if (not isinstance(unit_number, list)):
unit_number = [unit_number]
self.unit_number = unit_number
if (not isinstance(extra, list)):
self.extra = len(self.unit_number) * [extra]
else:
self.extra = extra
self.url = 'index.html'
self.allowDuplicates = allowDuplicates
self.acceptable_dtypes = [int, np.float32, str]
return
def __repr__(self):
s = self.__doc__
exclude_attributes = ['extension', 'heading', 'name', 'parent', 'url']
for attr, value in sorted(self.__dict__.items()):
if not (attr in exclude_attributes):
if (isinstance(value, list)):
if (len(value) == 1):
s = s + ' {0:s} = {1:s}\n'.format(attr, str(value[0]))
else:
s = s + ' {0:s} (list, items = {1:d}\n'.format(attr,
len(
value))
elif (isinstance(value, np.ndarray)):
s = s + ' {0:s} (array, shape = {1:s})\n'.format(attr,
value.shape.__str__()[
1:-1])
else:
s = s + ' {0:s} = {1:s} ({2:s})\n'.format(attr, str(value),
str(type(value))[
7:-2])
return s
def __getitem__(self, item):
if hasattr(self, 'stress_period_data'):
# added this check because stress_period_data also used in Oc and Oc88 but is not a MfList
if isinstance(item, MfList):
if not isinstance(item, list) and not isinstance(item, tuple):
assert item in list(
self.stress_period_data.data.keys()), "package.__getitem__() kper " + str(
item) + " not in data.keys()"
return self.stress_period_data[item]
else:
if item[1] not in self.dtype.names:
raise Exception(
"package.__getitem(): item \'" + item + "\' not in dtype names " + str(
self.dtype.names))
assert item[0] in list(
self.stress_period_data.data.keys()), "package.__getitem__() kper " + str(
item[0]) + " not in data.keys()"
if self.stress_period_data.vtype[item[0]] == np.recarray:
return self.stress_period_data[item[0]][item[1]]
def __setitem__(self, key, value):
raise NotImplementedError("package.__setitem__() not implemented")
def __setattr__(self, key, value):
var_dict = vars(self)
if key in list(var_dict.keys()):
old_value = var_dict[key]
if isinstance(old_value, Util2d):
value = Util2d(self.parent, old_value.shape,
old_value.dtype, value,
name=old_value.name,
fmtin=old_value.format.fortran,
locat=old_value.locat,
array_free_format=old_value.format.array_free_format)
elif isinstance(old_value, Util3d):
value = Util3d(self.parent, old_value.shape,
old_value.dtype, value,
name=old_value.name_base,
fmtin=old_value.fmtin,
locat=old_value.locat,
array_free_format=old_value.array_free_format)
elif isinstance(old_value, Transient2d):
value = Transient2d(self.parent, old_value.shape,
old_value.dtype, value,
name=old_value.name_base,
fmtin=old_value.fmtin,
locat=old_value.locat)
elif isinstance(old_value, MfList):
value = MfList(self, dtype=old_value.dtype,
data=value)
elif isinstance(old_value, list):
if len(old_value) > 0:
if isinstance(old_value[0], Util3d):
new_list = []
for vo, v in zip(old_value, value):
new_list.append(Util3d(self.parent, vo.shape,
vo.dtype, v,
name=vo.name_base,
fmtin=vo.fmtin,
locat=vo.locat))
value = new_list
elif isinstance(old_value[0], Util2d):
new_list = []
for vo, v in zip(old_value, value):
new_list.append(Util2d(self.parent, vo.shape,
vo.dtype, v,
name=vo.name,
fmtin=vo.fmtin,
locat=vo.locat))
value = new_list
super(Package, self).__setattr__(key, value)
def export(self, f, **kwargs):
from flopy import export
return export.utils.package_helper(f, self, **kwargs)
@staticmethod
def add_to_dtype(dtype, field_names, field_types):
if not isinstance(field_names, list):
field_names = [field_names]
if not isinstance(field_types, list):
field_types = [field_types] * len(field_names)
newdtypes = [dtype]
for field_name, field_type in zip(field_names, field_types):
tempdtype = np.dtype([(field_name, field_type)])
newdtypes.append(tempdtype)
newdtype = sum((dtype.descr for dtype in newdtypes), [])
newdtype = np.dtype(newdtype)
return newdtype
def check(self, f=None, verbose=True, level=1):
"""
Check package data for common errors.
Parameters
----------
f : str or file handle
String defining file name or file handle for summary file
of check method output. If a sting is passed a file handle
is created. If f is None, check method does not write
results to a summary file. (default is None)
verbose : bool
Boolean flag used to determine if check method results are
written to the screen
level : int
Check method analysis level. If level=0, summary checks are
performed. If level=1, full checks are performed.
Returns
-------
None
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow.load('model.nam')
>>> m.dis.check()
"""
chk = None
if self.__dict__.get('stress_period_data', None) is not None and \
self.name[0] != 'OC':
spd_inds_valid = True
chk = check(self, f=f, verbose=verbose, level=level)
for per in self.stress_period_data.data.keys():
if isinstance(self.stress_period_data.data[per], np.recarray):
spd = self.stress_period_data.data[per]
inds = (spd.k, spd.i, spd.j) if self.parent.structured \
else (spd.node)
# General BC checks
# check for valid cell indices
spd_inds_valid = chk._stress_period_data_valid_indices(spd)
# first check for and list nan values
chk._stress_period_data_nans(spd)
if spd_inds_valid:
# next check for BCs in inactive cells
chk._stress_period_data_inactivecells(spd)
# More specific BC checks
# check elevations in the ghb, drain, and riv packages
if self.name[0] in check.bc_stage_names.keys():
# check that bc elevations are above model cell bottoms
# also checks for nan values
elev_name = chk.bc_stage_names[self.name[0]]
botms = self.parent.dis.botm.array[inds]
chk.stress_period_data_values(spd, spd[elev_name] < botms,
col=elev_name,
error_name='BC elevation below cell bottom',
error_type='Error')
chk.summarize()
# check property values in upw and lpf packages
elif self.name[0] in ['UPW', 'LPF']:
chk = check(self, f=f, verbose=verbose, level=level)
active = chk.get_active()
# check for confined layers above convertable layers
confined = False
thickstrt = False
for option in self.options:
if option.lower() == 'thickstrt':
thickstrt = True
for i, l in enumerate(self.laytyp.array.tolist()):
if l == 0 or l < 0 and thickstrt:
confined = True
continue
if confined and l > 0:
chk._add_to_summary(type='Warning',
desc='\r LAYTYP: unconfined (convertible) ' + \
'layer below confined layer')
# check for zero or negative values of hydraulic conductivity, anisotropy,
# and quasi-3D confining beds
kparams = {'hk': 'horizontal hydraulic conductivity',
'vka': 'vertical hydraulic conductivity'}
for kp, name in kparams.items():
chk.values(self.__dict__[kp].array,
active & (self.__dict__[kp].array <= 0),
'zero or negative {} values'.format(name), 'Error')
# check for negative hani
chk.values(self.__dict__['hani'].array,
active & (self.__dict__['hani'].array < 0),
'negative horizontal anisotropy values', 'Error')
def check_thresholds(array, active, thresholds, name):
"""Checks array against min and max threshold values."""
mn, mx = thresholds
chk.values(array, active & (array < mn),
'{} values below checker threshold of {}'
.format(name, mn), 'Warning')
chk.values(array, active & (array > mx),
'{} values above checker threshold of {}'
.format(name, mx), 'Warning')
# check for unusually high or low values of hydraulic conductivity
if self.layvka.sum() > 0: # convert vertical anistropy to Kv for checking
vka = self.vka.array.copy()
for l in range(vka.shape[0]):
vka[l] *= self.hk.array[l] if self.layvka.array[
l] != 0 else 1
check_thresholds(vka, active,
chk.property_threshold_values['vka'],
kparams.pop('vka'))
for kp, name in kparams.items():
check_thresholds(self.__dict__[kp].array, active,
chk.property_threshold_values[kp],
name)
# check vkcb if there are any quasi-3D layers
if self.parent.dis.laycbd.sum() > 0:
# pad non-quasi-3D layers in vkcb array with ones so they won't fail checker
vkcb = self.vkcb.array.copy()
for l in range(self.vkcb.shape[0]):
if self.parent.dis.laycbd[l] == 0:
vkcb[l, :,
:] = 1 # assign 1 instead of zero as default value that won't violate checker
# (allows for same structure as other checks)
chk.values(vkcb, active & (vkcb <= 0),
'zero or negative quasi-3D confining bed Kv values',
'Error')
check_thresholds(vkcb, active,
chk.property_threshold_values['vkcb'],
'quasi-3D confining bed Kv')
if not np.all(self.parent.dis.steady): # only check storage if model is transient
# do the same for storage if the model is transient
sarrays = {'ss': self.ss.array, 'sy': self.sy.array}
if 'STORAGECOEFFICIENT' in self.options: # convert to specific for checking
chk._add_to_summary(type='Warning',
desc='\r STORAGECOEFFICIENT option is activated, \
storage values are read storage coefficients')
sarrays['ss'] /= self.parent.dis.thickness.array
sarrays['sy'] /= self.parent.dis.thickness.array
chk.values(sarrays['ss'], active & (sarrays['ss'] < 0),
'zero or negative specific storage values', 'Error')
check_thresholds(sarrays['ss'], active,
chk.property_threshold_values['ss'],
'specific storage')
# only check specific yield for convertible layers
inds = np.array(
[True if l > 0 or l < 0 and 'THICKSRT' in self.options
else False for l in self.laytyp])
sarrays['sy'] = sarrays['sy'][inds, :, :]
active = active[inds, :, :]
chk.values(sarrays['sy'], active & (sarrays['sy'] < 0),
'zero or negative specific yield values', 'Error')
check_thresholds(sarrays['sy'], active,
chk.property_threshold_values['sy'],
'specific yield')
chk.summarize()
else:
txt = 'check method not implemented for {} Package.'.format(
self.name[0])
if f is not None:
if isinstance(f, str):
pth = os.path.join(self.parent.model_ws, f)
f = open(pth, 'w')
f.write(txt)
f.close()
if verbose:
print(txt)
return chk
def level1_arraylist(self, idx, v, name, txt):
ndim = v.ndim
if ndim == 3:
kon = -1
for [k, i, j] in idx:
if k > kon:
kon = k
txt += ' {:>10s}{:>10s}{:>10s}{:>15s}\n'.format('layer',
'row',
'column',
name[
k].lower().replace(
' layer ',
''))
txt += ' {:10d}{:10d}{:10d}{:15.7g}\n'.format(k + 1, i + 1,
j + 1,
v[k, i, j])
elif ndim == 2:
txt += ' {:>10s}{:>10s}{:>15s}\n'.format('row', 'column',
name[
0].lower().replace(
' layer ', ''))
for [i, j] in idx:
txt += ' {:10d}{:10d}{:15.7g}\n'.format(i + 1, j + 1,
v[i, j])
elif ndim == 1:
txt += ' {:>10s}{:>15s}\n'.format('number', name[0])
for i in idx:
txt += ' {:10d}{:15.7g}\n'.format(i + 1, v[i])
return txt
def plot(self, **kwargs):
"""
Plot 2-D, 3-D, transient 2-D, and stress period list (MfList)
package input data
Parameters
----------
**kwargs : dict
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
kper : int
MODFLOW zero-based stress period number to return. (default is
zero)
key : str
MfList dictionary key. (default is None)
Returns
----------
axes : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis are returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.dis.plot()
"""
# valid keyword arguments
if 'kper' in kwargs:
kper = kwargs.pop('kper')
else:
kper = 0
if 'filename_base' in kwargs:
fileb = kwargs.pop('filename_base')
else:
fileb = None
if 'mflay' in kwargs:
mflay = kwargs.pop('mflay')
else:
mflay = None
if 'file_extension' in kwargs:
fext = kwargs.pop('file_extension')
fext = fext.replace('.', '')
else:
fext = 'png'
if 'key' in kwargs:
key = kwargs.pop('key')
else:
key = None
if 'initial_fig' in kwargs:
ifig = int(kwargs.pop('initial_fig'))
else:
ifig = 0
inc = self.parent.nlay
if mflay is not None:
inc = 1
axes = []
for item, value in self.__dict__.items():
caxs = []
if isinstance(value, MfList):
if self.parent.verbose:
print('plotting {} package MfList instance: {}'.format(
self.name[0], item))
if key is None:
names = ['{} location stress period {} layer {}'.format(
self.name[0], kper + 1, k + 1)
for k in range(self.parent.nlay)]
colorbar = False
else:
names = ['{} {} data stress period {} layer {}'.format(
self.name[0], key, kper + 1, k + 1)
for k in range(self.parent.nlay)]
colorbar = True
fignum = list(range(ifig, ifig + inc))
ifig = fignum[-1] + 1
caxs.append(value.plot(key, names, kper,
filename_base=fileb,
file_extension=fext, mflay=mflay,
fignum=fignum, colorbar=colorbar,
**kwargs))
elif isinstance(value, Util3d):
if self.parent.verbose:
print('plotting {} package Util3d instance: {}'.format(
self.name[0], item))
# fignum = list(range(ifig, ifig + inc))
fignum = list(range(ifig, ifig + value.shape[0]))
ifig = fignum[-1] + 1
caxs.append(
value.plot(filename_base=fileb, file_extension=fext,
mflay=mflay,
fignum=fignum, colorbar=True))
elif isinstance(value, Util2d):
if len(value.shape) == 2:
if self.parent.verbose:
print('plotting {} package Util2d instance: {}'.format(
self.name[0], item))
fignum = list(range(ifig, ifig + 1))
ifig = fignum[-1] + 1
caxs.append(
value.plot(filename_base=fileb,
file_extension=fext,
fignum=fignum, colorbar=True))
elif isinstance(value, Transient2d):
if self.parent.verbose:
print(
'plotting {} package Transient2d instance: {}'.format(
self.name[0], item))
fignum = list(range(ifig, ifig + inc))
ifig = fignum[-1] + 1
caxs.append(
value.plot(filename_base=fileb, file_extension=fext,
kper=kper,
fignum=fignum, colorbar=True))
elif isinstance(value, list):
for v in value:
if isinstance(v, Util3d):
if self.parent.verbose:
print(
'plotting {} package Util3d instance: {}'.format(
self.name[0], item))
fignum = list(range(ifig, ifig + inc))
ifig = fignum[-1] + 1
caxs.append(
v.plot(filename_base=fileb,
file_extension=fext,
mflay=mflay,
fignum=fignum, colorbar=True))
else:
pass
# unroll nested lists os axes into a single list of axes
if isinstance(caxs, list):
for c in caxs:
if isinstance(c, list):
for cc in c:
axes.append(cc)
else:
axes.append(c)
else:
axes.append(caxs)
return axes
def to_shapefile(self, filename, **kwargs):
"""
Export 2-D, 3-D, and transient 2-D model data to shapefile (polygons).
Adds an attribute for each layer in each data array
Parameters
----------
filename : str
Shapefile name to write
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.lpf.to_shapefile('test_hk.shp')
"""
import warnings
warnings.warn("to_shapefile() is deprecated. use .export()")
self.export(filename)
def webdoc(self):
if self.parent.version == 'mf2k':
wb.open(
'http://water.usgs.gov/nrp/gwsoftware/modflow2000/Guide/' + self.url)
elif self.parent.version == 'mf2005':
wb.open(
'http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/' + self.url)
elif self.parent.version == 'ModflowNwt':
wb.open(
'http://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/' + self.url)
def write_file(self, check=False):
"""
Every Package needs its own write_file function
"""
print('IMPLEMENTATION ERROR: write_file must be overloaded')
return
@staticmethod
def load(model, pack_type, f, nper=None, pop_key_list=None, check=True,
unitnumber=None, ext_unit_dict=None):
"""
The load method has not been implemented for this package.
"""
bc_pack_types = []
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# check for parameters
nppak = 0
if "parameter" in line.lower():
t = line.strip().split()
# assert int(t[1]) == 0,"Parameters are not supported"
nppak = np.int(t[1])
mxl = 0
if nppak > 0:
mxl = np.int(t[2])
if model.verbose:
print(' Parameters detected. Number of parameters = ',
nppak)
line = f.readline()
# dataset 2a
t = line.strip().split()
ipakcb = 0
try:
ipakcb = int(t[1])
except:
pass
options = []
aux_names = []
if len(t) > 2:
it = 2
while it < len(t):
toption = t[it]
if toption.lower() is 'noprint':
options.append(toption)
elif 'aux' in toption.lower():
options.append(' '.join(t[it:it + 2]))
aux_names.append(t[it + 1].lower())
it += 1
it += 1
# set partype
# and read phiramp for modflow-nwt well package
partype = ['cond']
if "modflowwel" in str(pack_type).lower():
partype = ['flux']
if "nwt" in model.version.lower() and 'flopy.modflow.mfwel.modflowwel'.lower() in str(pack_type).lower():
specify = False
ipos = f.tell()
line = f.readline()
# test for specify keyword if a NWT well file - This is a temporary hack
if 'specify' in line.lower():
specify = True
t = line.strip().split()
phiramp = np.float32(t[1])
try:
phiramp_unit = np.int32(t[2])
except:
phiramp_unit = 2
options.append('specify {} {} '.format(phiramp, phiramp_unit))
else:
f.seek(ipos)
elif 'flopy.modflow.mfchd.modflowchd'.lower() in str(
pack_type).lower():
partype = ['shead', 'ehead']
# read parameter data
if nppak > 0:
dt = pack_type.get_empty(1, aux_names=aux_names,
structured=model.structured).dtype
pak_parms = mfparbc.load(f, nppak, dt, model.verbose)
# pak_parms = mfparbc.load(f, nppak, len(dt.names))
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
# read data for every stress period
bnd_output = None
stress_period_data = {}
for iper in range(nper):
if model.verbose:
print(
" loading " + str(
pack_type) + " for kper {0:5d}".format(
iper + 1))
line = f.readline()
if line == '':
break
t = line.strip().split()
itmp = int(t[0])
itmpp = 0
try:
itmpp = int(t[1])
except:
pass
if itmp == 0:
bnd_output = None
current = pack_type.get_empty(itmp, aux_names=aux_names,
structured=model.structured)
elif itmp > 0:
current = pack_type.get_empty(itmp, aux_names=aux_names,
structured=model.structured)
for ibnd in range(itmp):
line = f.readline()
if "open/close" in line.lower():
# need to strip out existing path seps and
# replace current-system path seps
raw = line.strip().split()
fname = raw[1]
if '/' in fname:
raw = fname.split('/')
elif '\\' in fname:
raw = fname.split('\\')
else:
raw = [fname]
fname = os.path.join(*raw)
oc_filename = os.path.join(model.model_ws, fname)
assert os.path.exists(
oc_filename), "Package.load() error: open/close filename " + \
oc_filename + " not found"
try:
current = np.genfromtxt(oc_filename,
dtype=current.dtype)
current = current.view(np.recarray)
except Exception as e:
raise Exception(
"Package.load() error loading open/close file " + oc_filename + \
" :" + str(e))
assert current.shape[
0] == itmp, "Package.load() error: open/close rec array from file " + \
oc_filename + " shape (" + str(
current.shape) + \
") does not match itmp: {0:d}".format(
itmp)
break
try:
t = line.strip().split()
current[ibnd] = tuple(t[:len(current.dtype.names)])
except:
t = []
for ivar in range(len(current.dtype.names)):
istart = ivar * 10
istop = istart + 10
t.append(line[istart:istop])
current[ibnd] = tuple(t[:len(current.dtype.names)])
# convert indices to zero-based
if model.structured:
current['k'] -= 1
current['i'] -= 1
current['j'] -= 1
else:
current['node'] -= 1
bnd_output = np.recarray.copy(current)
else:
bnd_output = np.recarray.copy(current)
for iparm in range(itmpp):
line = f.readline()
t = line.strip().split()
pname = t[0].lower()
iname = 'static'
try:
tn = t[1]
c = tn.lower()
instance_dict = pak_parms.bc_parms[pname][1]
if c in instance_dict:
iname = c
else:
iname = 'static'
except:
pass
par_dict, current_dict = pak_parms.get(pname)
data_dict = current_dict[iname]
par_current = pack_type.get_empty(par_dict['nlst'],
aux_names=aux_names)
# get appropriate parval
if model.mfpar.pval is None:
parval = np.float(par_dict['parval'])
else:
try:
parval = np.float(model.mfpar.pval.pval_dict[pname])
except:
parval = np.float(par_dict['parval'])
# fill current parameter data (par_current)
for ibnd, t in enumerate(data_dict):
par_current[ibnd] = tuple(t[:len(par_current.dtype.names)])
if model.structured:
par_current['k'] -= 1
par_current['i'] -= 1
par_current['j'] -= 1
else:
par_current['node'] -= 1
for ptype in partype:
par_current[ptype] *= parval
if bnd_output is None:
bnd_output = np.recarray.copy(par_current)
else:
bnd_output = stack_arrays((bnd_output, par_current),
asrecarray=True, usemask=False)
if bnd_output is None:
stress_period_data[iper] = itmp
else:
stress_period_data[iper] = bnd_output
dtype = pack_type.get_empty(0, aux_names=aux_names,
structured=model.structured).dtype
# set package unit number
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=pack_type.ftype())
if ipakcb > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb)
model.add_pop_key_list(ipakcb)
pak = pack_type(model, ipakcb=ipakcb,
stress_period_data=stress_period_data,
dtype=dtype, options=options,
unitnumber=unitnumber, filenames=filenames)
if check:
pak.check(f='{}.chk'.format(pak.name[0]),
verbose=pak.parent.verbose, level=0)
return pak
|
bdestombe/flopy-1
|
flopy/pakbase.py
|
Python
|
bsd-3-clause
| 35,922
|
#!/usr/bin/env python
"""
A script for automated nagging emails based on passed in queries
These can be collated into several 'queries' through the use of multiple query files with
a 'query_name' param set eg: 'Bugs tracked for Firefox Beta (13)'
Once the bugs have been collected from Bugzilla they are sorted into buckets cc: assignee manager
and to the assignee(s) or need-info? for each query
"""
import sys
import os
import smtplib
import subprocess
import tempfile
import collections
from datetime import datetime
from argparse import ArgumentParser
from auto_nag.bugzilla.agents import BMOAgent
import phonebook
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('templates'))
REPLY_TO_EMAIL = 'release-mgmt@mozilla.com'
DEFAULT_CC = ['release-mgmt@mozilla.com']
EMAIL_SUBJECT = ''
SMTP = 'smtp.mozilla.org'
# TODO - Sort by who a bug is blocked on (thanks @dturner)
# TODO - write tests!
# TODO - look into knocking out duplicated bugs in queries -- perhaps print out if there are dupes in queries when queries > 1
# TODO - should compare bugmail from API results to phonebook bugmail in to_lower()
def get_last_manager_comment(comments, manager, person):
# go through in reverse order to get most recent
for comment in comments[::-1]:
if person is not None:
if comment.creator.name == manager['mozillaMail'] or comment.creator.name == manager['bugzillaEmail']:
return comment.creation_time.replace(tzinfo=None)
return None
def get_last_assignee_comment(comments, person):
# go through in reverse order to get most recent
for comment in comments[::-1]:
if person is not None:
if comment.creator.name == person['mozillaMail'] or comment.creator.name == person['bugzillaEmail']:
return comment.creation_time.replace(tzinfo=None)
return None
def query_url_to_dict(url):
if (';')in url:
fields_and_values = url.split("?")[1].split(";")
else:
fields_and_values = url.split("?")[1].split("&")
d = collections.defaultdict(list)
for pair in fields_and_values:
(key, val) = pair.split("=")
if key != "list_id":
d[key].append(val)
return d
def generateEmailOutput(subject, queries, template, people, show_comment=False,
manager_email=None, rollup=False, rollupEmail=None):
cclist = []
toaddrs = []
template_params = {}
# stripping off the templates dir, just in case it gets passed in the args
template = env.get_template(template.replace('templates/', '', 1))
def addToAddrs(bug):
if bug.assigned_to.name in people.people_by_bzmail:
person = dict(people.people_by_bzmail[bug.assigned_to.name])
if person['mozillaMail'] not in toaddrs:
toaddrs.append(person['mozillaMail'])
for query in queries.keys():
# Avoid dupes in the cclist from several queries
query_cc = queries[query].get('cclist', [])
for qcc in query_cc:
if qcc not in cclist:
cclist.append(qcc)
if query not in template_params:
template_params[query] = {'buglist': []}
if len(queries[query]['bugs']) != 0:
for bug in queries[query]['bugs']:
if 'show_summary' in queries[query]:
if queries[query]['show_summary'] == '1':
summary = bug.summary
else:
summary = ""
else:
summary = ""
template_params[query]['buglist'].append(
{
'id': bug.id,
'summary': summary,
# 'comment': bug.comments[-1].creation_time.replace(tzinfo=None),
'assignee': bug.assigned_to.real_name,
'flags': bug.flags
}
)
# more hacking for JS special casing
if bug.assigned_to.name == 'general@js.bugs' and 'nihsanullah@mozilla.com' not in toaddrs:
toaddrs.append('nihsanullah@mozilla.com')
# if needinfo? in flags, add the flag.requestee to the toaddrs instead of bug assignee
if bug.flags:
for flag in bug.flags:
if 'needinfo' in flag.name and flag.status == '?':
try:
person = dict(people.people_by_bzmail[str(flag.requestee)])
if person['mozillaMail'] not in toaddrs:
toaddrs.append(person['mozillaMail'])
except:
if str(flag.requestee) not in toaddrs:
toaddrs.append(str(flag.requestee))
else:
addToAddrs(bug)
else:
addToAddrs(bug)
message_body = template.render(queries=template_params, show_comment=show_comment)
if manager_email is not None and manager_email not in cclist:
cclist.append(manager_email)
# no need to and cc the manager if more than one email
if len(toaddrs) > 1:
for email in toaddrs:
if email in cclist:
toaddrs.remove(email)
if cclist == ['']:
cclist = None
if rollup:
joined_to = ",".join(rollupEmail)
else:
joined_to = ",".join(toaddrs)
message = (
"From: %s\r\n" % REPLY_TO_EMAIL
+ "To: %s\r\n" % joined_to
+ "CC: %s\r\n" % ",".join(cclist)
+ "Subject: %s\r\n" % subject
+ "\r\n"
+ message_body)
toaddrs = toaddrs + cclist
return toaddrs, message
def sendMail(toaddrs, msg, username, password, dryrun=False):
if dryrun:
print "\n****************************\n* DRYRUN: not sending mail *\n****************************\n"
print msg
else:
server = smtplib.SMTP_SSL(SMTP, 465)
server.set_debuglevel(1)
server.login(username, password)
# note: toaddrs is required for transport agents, the msg['To'] header is not modified
server.sendmail(username, toaddrs, msg)
server.quit()
if __name__ == '__main__':
parser = ArgumentParser(__doc__)
parser.set_defaults(
dryrun=False,
username=None,
password=None,
roll_up=False,
show_comment=False,
email_cc_list=None,
queries=[],
days_since_comment=-1,
verbose=False,
keywords=None,
email_subject=None,
no_verification=False,
)
parser.add_argument("-d", "--dryrun", dest="dryrun", action="store_true",
help="just do the query, and print emails to console without emailing anyone")
parser.add_argument("-m", "--mozilla-email", dest="mozilla_mail",
help="specify a specific address for sending email"),
parser.add_argument("-p", "--email-password", dest="email_password",
help="specify a specific password for sending email")
parser.add_argument("-b", "--bz-api-key", dest="bz_api_key",
help="Bugzilla API key")
parser.add_argument("-t", "--template", dest="template",
required=True,
help="template to use for the buglist output")
parser.add_argument("-e", "--email-cc-list", dest="email_cc_list",
action="append",
help="email addresses to include in cc when sending mail")
parser.add_argument("-q", "--query", dest="queries",
action="append",
required=True,
help="a file containing a dictionary of a bugzilla query")
parser.add_argument("-k", "--keyword", dest="keywords",
action="append",
help="keywords to collate buglists")
parser.add_argument("-s", "--subject", dest="email_subject",
required=True,
help="The subject of the email being sent")
parser.add_argument("-r", "--roll-up", dest="roll_up", action="store_true",
help="flag to get roll-up output in one email instead of creating multiple emails")
parser.add_argument("--show-comment", dest="show_comment", action="store_true",
help="flag to display last comment on a bug in the message output")
parser.add_argument("--days-since-comment", dest="days_since_comment",
help="threshold to check comments against to take action based on days since comment")
parser.add_argument("--verbose", dest="verbose", action="store_true",
help="turn on verbose output")
parser.add_argument("--no-verification", dest="no_verification", action="store_true",
help="don't wait for human verification of every email")
options, args = parser.parse_known_args()
people = phonebook.PhonebookDirectory(dryrun=options.dryrun)
try:
int(options.days_since_comment)
except:
if options.days_since_comment is not None:
parser.error("Need to provide a number for days \
since last comment value")
if options.email_cc_list is None:
options.email_cc_list = DEFAULT_CC
# Load our agent for BMO
bmo = BMOAgent(options.bz_api_key)
# Get the buglist(s)
collected_queries = {}
for query in options.queries:
# import the query
if os.path.exists(query):
info = {}
execfile(query, info)
query_name = info['query_name']
if query_name not in collected_queries:
collected_queries[query_name] = {
'channel': info.get('query_channel', ''),
'bugs': [],
'show_summary': info.get('show_summary', 0),
'cclist': options.email_cc_list,
}
if 'cc' in info:
for c in info.get('cc').split(','):
collected_queries[query_name]['cclist'].append(c)
if 'query_params' in info:
print "Gathering bugs from query_params in %s" % query
collected_queries[query_name]['bugs'] = bmo.get_bug_list(info['query_params'])
elif 'query_url' in info:
print "Gathering bugs from query_url in %s" % query
collected_queries[query_name]['bugs'] = bmo.get_bug_list(query_url_to_dict(info['query_url']))
# print "DEBUG: %d bug(s) found for query %s" % \
# (len(collected_queries[query_name]['bugs']), info['query_url'])
else:
print "Error - no valid query params or url in the config file"
sys.exit(1)
else:
print "Not a valid path: %s" % query
total_bugs = 0
for channel in collected_queries.keys():
total_bugs += len(collected_queries[channel]['bugs'])
print "Found %s bugs total for %s queries" % (total_bugs, len(collected_queries.keys()))
print "Queries to collect: %s" % collected_queries.keys()
managers = people.managers
manual_notify = {}
counter = 0
def add_to_managers(manager_email, query, info={}):
if manager_email not in managers:
managers[manager_email] = {}
managers[manager_email]['nagging'] = {query: {'bugs': [bug],
'show_summary': info.get('show_summary', 0),
'cclist': info.get('cclist', [])}, }
return
if 'nagging' in managers[manager_email]:
if query in managers[manager_email]['nagging']:
managers[manager_email]['nagging'][query]['bugs'].append(bug)
if options.verbose:
print "Adding %s to %s in nagging for %s" % \
(bug.id, query, manager_email)
else:
managers[manager_email]['nagging'][query] = {
'bugs': [bug],
'show_summary': info.get('show_summary', 0),
'cclist': info.get('cclist', [])
}
if options.verbose:
print "Adding new query key %s for bug %s in nagging \
and %s" % (query, bug.id, manager_email)
else:
managers[manager_email]['nagging'] = {query: {'bugs': [bug],
'show_summary': info.get('show_summary', 0),
'cclist': info.get('cclist', [])}, }
if options.verbose:
print "Creating query key %s for bug %s in nagging and \
%s" % (query, bug.id, manager_email)
for query, info in collected_queries.items():
if len(collected_queries[query]['bugs']) != 0:
manual_notify[query] = {'bugs': [], 'show_summary': info.get('show_summary', 0)}
for b in collected_queries[query]['bugs']:
counter = counter + 1
send_mail = True
bug = bmo.get_bug(b.id)
manual_notify[query]['bugs'].append(bug)
assignee = bug.assigned_to.name
if "@" not in assignee:
print "Error - email address expect. Found '" + assignee + "' instead"
print "Check that the authentication worked correctly"
sys.exit(1)
if assignee in people.people_by_bzmail:
person = dict(people.people_by_bzmail[assignee])
else:
person = None
# kick bug out if days since comment check is on
if options.days_since_comment != -1:
# try to get last_comment by assignee & manager
if person is not None:
last_comment = get_last_assignee_comment(bug.comments, person)
if 'manager' in person and person['manager'] is not None:
manager_email = person['manager']['dn'].split('mail=')[1].split(',')[0]
manager = people.people[manager_email]
last_manager_comment = get_last_manager_comment(bug.comments,
people.people_by_bzmail[manager['bugzillaEmail']],
person)
# set last_comment to the most recent of last_assignee and last_manager
if last_manager_comment is not None and last_comment is not None and last_manager_comment > last_comment:
last_comment = last_manager_comment
# otherwise just get the last comment
else:
last_comment = bug.comments[-1].creation_time.replace(tzinfo=None)
if last_comment is not None:
timedelta = datetime.now() - last_comment
if timedelta.days <= int(options.days_since_comment):
if options.verbose:
print "Skipping bug %s since it's had an assignee or manager comment within the past %s days" % (bug.id, options.days_since_comment)
send_mail = False
counter = counter - 1
manual_notify[query]['bugs'].remove(bug)
else:
if options.verbose:
print "This bug needs notification, it's been %s since last comment of note" % timedelta.days
if send_mail:
if 'nobody' in assignee:
if options.verbose:
print "No one assigned to: %s, will be in the manual notification list..." % bug.id
# TODO - get rid of this, SUCH A HACK!
elif 'general@js.bugs' in assignee:
if options.verbose:
print "No one assigned to JS bug: %s, adding to Naveed's list..." % bug.id
add_to_managers('nihsanullah@mozilla.com', query, info)
else:
if bug.assigned_to.real_name is not None:
if person is not None:
# check if assignee is already a manager, add to their own list
if 'mozillaMail' in managers:
add_to_managers(person['mozillaMail'], query, info)
# otherwise we search for the assignee's manager
else:
# check for manager key first, a few people don't have them
if 'manager' in person and person['manager'] is not None:
manager_email = person['manager']['dn'].split('mail=')[1].split(',')[0]
if manager_email in managers:
add_to_managers(manager_email, query, info)
elif manager_email in people.vices:
# we're already at the highest level we'll go
if assignee in managers:
add_to_managers(assignee, query, info)
else:
if options.verbose:
print "%s has a V-level for a manager, and is not in the manager list" % assignee
managers[person['mozillaMail']] = {}
add_to_managers(person['mozillaMail'], query, info)
else:
# try to go up one level and see if we find a manager
if manager_email in people.people:
person = dict(people.people[manager_email])
manager_email = person['manager']['dn'].split('mail=')[1].split(',')[0]
if manager_email in managers:
add_to_managers(manager_email, query, info)
else:
print "Manager could not be found: %s" % manager_email
# if you don't have a manager listed, but are an employee, we'll nag you anyway
else:
add_to_managers(person['mozillaMail'], query, info)
print "%s's entry doesn't list a manager! Let's ask them to update phonebook but in the meantime they get the email directly." % person['name']
if options.roll_up:
# only send one email
toaddrs, msg = generateEmailOutput(subject=options.email_subject,
queries=manual_notify,
template=options.template,
people=people,
show_comment=options.show_comment,
rollup=options.roll_up,
rollupEmail=options.email_cc_list)
if options.email_password is None or options.mozilla_mail is None:
print "Please supply a username/password (-m, -p) for sending email"
sys.exit(1)
if not options.dryrun:
print "SENDING EMAIL"
sendMail(toaddrs, msg, options.mozilla_mail, options.email_password, options.dryrun)
else:
# Get yr nag on!
for email, info in managers.items():
inp = ''
if 'nagging' in info:
toaddrs, msg = generateEmailOutput(
subject=options.email_subject,
manager_email=email,
queries=info['nagging'],
people=people,
template=options.template,
show_comment=options.show_comment)
while True and not options.no_verification:
print "\nRelMan Nag is ready to send the following email:\n<------ MESSAGE BELOW -------->"
print msg
print "<------- END MESSAGE -------->\nWould you like to send now?"
inp = raw_input('\n Please select y/Y to send, v/V to edit, or n/N to skip and continue to next email: ')
if inp != 'v' and inp != 'V':
break
tempfilename = tempfile.mktemp()
temp_file = open(tempfilename, 'w')
temp_file.write(msg)
temp_file.close()
subprocess.call(['vi', tempfilename])
temp_file = open(tempfilename, 'r')
msg = temp_file.read()
toaddrs = msg.split("To: ")[1].split("\r\n")[0].split(',') + msg.split("CC: ")[1].split("\r\n")[0].split(',')
os.remove(tempfilename)
if inp == 'y' or inp == 'Y' or options.no_verification:
if options.email_password is None or options.mozilla_mail is None:
print "Please supply a username/password (-m, -p) for sending email"
sys.exit(1)
if not options.dryrun:
print "SENDING EMAIL"
sendMail(toaddrs, msg, options.mozilla_mail, options.email_password, options.dryrun)
sent_bugs = 0
for query, info in info['nagging'].items():
sent_bugs += len(info['bugs'])
# take sent bugs out of manual notification list
for bug in info['bugs']:
manual_notify[query]['bugs'].remove(bug)
counter = counter - sent_bugs
if not options.roll_up:
emailed_bugs = []
# Send RelMan the manual notification list only when there are bugs that didn't go out
msg_body = """\n******************************************\nNo nag emails were generated for these bugs because
they are either assigned to no one or to non-employees (though ni? on non-employees will get nagged).
\nYou will need to look at the following bugs:\n******************************************\n\n"""
for k, v in manual_notify.items():
if len(v['bugs']) != 0:
for bug in v['bugs']:
if bug.id not in emailed_bugs:
if k not in msg_body:
msg_body += "\n=== %s ===\n" % k
emailed_bugs.append(bug.id)
msg_body += "http://bugzil.la/" + "%s -- assigned to: %s\n -- Last commented on: %s\n" % (bug.id, bug.assigned_to.real_name, bug.comments[-1].creation_time.replace(tzinfo=None))
msg = ("From: %s\r\n" % REPLY_TO_EMAIL
+ "To: %s\r\n" % REPLY_TO_EMAIL
+ "Subject: RelMan Attention Needed: %s\r\n" % options.email_subject
+ "\r\n"
+ msg_body)
sendMail(['release-mgmt@mozilla.com'], msg, options.mozilla_mail, options.email_password, options.dryrun)
|
anoopvalluthadam/bztools
|
auto_nag/scripts/email_nag.py
|
Python
|
bsd-3-clause
| 24,245
|
import unittest
import operator
import six
from six.moves import range,reduce
import arybo.lib.mba_exprs as EX
from arybo.lib import MBA
from pytanque import expand_esf_inplace, simplify_inplace
class MBAExprsTest:
def setUp(self):
self.mba1 = MBA(1)
self.mba4 = MBA(4)
self.mba4.use_esf = False
self.mba8 = MBA(8)
self.mba8.use_esf = False
self.x4 = self.mba4.var('x')
self.y4 = self.mba4.var('y')
self.z4 = self.mba4.var('z')
self.x4_expr = EX.ExprBV(self.x4)
self.y4_expr = EX.ExprBV(self.y4)
self.z4_expr = EX.ExprBV(self.z4)
self.x8 = self.mba8.var('x')
self.y8 = self.mba8.var('y')
self.z8 = self.mba8.var('z')
self.x8_expr = EX.ExprBV(self.x8)
self.y8_expr = EX.ExprBV(self.y8)
self.z8_expr = EX.ExprBV(self.z8)
def exprEqual(self, expr, e):
expr = EX.eval_expr(expr, self.use_esf)
if self.use_esf:
expand_esf_inplace(expr.vec)
simplify_inplace(expr.vec)
simplify_inplace(e.vec)
simplify_inplace(expr.vec)
self.assertEqual(expr, e)
def test_leaves(self):
self.exprEqual(self.x4_expr, self.x4)
self.exprEqual(EX.ExprCst(0xff, 4), self.mba4.from_cst(0xff))
def test_unary(self):
self.exprEqual(
EX.ExprNot(self.x4_expr),
~self.x4)
self.exprEqual(
EX.ExprBroadcast(EX.ExprCst(1, 4), 0, 4),
self.mba4.from_cst(0xf))
self.exprEqual(
EX.ExprBroadcast(EX.ExprCst(1, 4), 1, 4),
self.mba4.from_cst(0))
def test_unaryops(self):
ops = (
(EX.ExprXor, operator.xor),
(EX.ExprAnd, operator.and_),
(EX.ExprOr, operator.or_),
)
args_expr = (self.x4_expr, self.y4_expr, self.z4_expr)
args = (self.x4, self.y4, self.z4)
for op in ops:
self.exprEqual(
op[0](*args_expr),
reduce(op[1], args))
def test_binaryops(self):
ops = (
(EX.ExprAdd, operator.add),
(EX.ExprSub, operator.sub),
(EX.ExprMul, operator.mul),
)
args_expr = (self.x4_expr, self.y4_expr, self.z4_expr)
args = (self.x4, self.y4, self.z4)
for op in ops:
self.exprEqual(
reduce(op[0], args_expr),
reduce(op[1], args))
E0 = EX.ExprAdd(self.x8_expr, self.x8_expr)
self.exprEqual(EX.ExprAdd(E0, E0), self.x8 << 2)
for i in range(1,16):
self.exprEqual(EX.ExprDiv(self.x4_expr, EX.ExprCst(i, 4)), self.x4.udiv(i))
def test_rotate_binop(self):
E0 = EX.ExprRor(
EX.ExprAdd(self.x4_expr, self.y4_expr),
EX.ExprCst(1, 4))
self.exprEqual(E0, (self.x4+self.y4).ror(1))
E0 = EX.ExprRor(
EX.ExprSub(self.x4_expr, self.y4_expr),
EX.ExprCst(1, 4))
self.exprEqual(E0, (self.x4-self.y4).ror(1))
E0 = EX.ExprRor(
EX.ExprMul(self.x4_expr, self.y4_expr),
EX.ExprCst(1, 4))
self.exprEqual(E0, (self.x4*self.y4).ror(1))
E0 = EX.ExprRol(
EX.ExprAdd(self.x4_expr, self.y4_expr),
EX.ExprCst(1, 4))
self.exprEqual(E0, (self.x4+self.y4).rol(1))
E0 = EX.ExprRol(
EX.ExprSub(self.x4_expr, self.y4_expr),
EX.ExprCst(1, 4))
self.exprEqual(E0, (self.x4-self.y4).rol(1))
E0 = EX.ExprRol(
EX.ExprMul(self.x4_expr, self.y4_expr),
EX.ExprCst(1, 4))
self.exprEqual(E0, (self.x4*self.y4).rol(1))
def test_logical(self):
ops = (
(EX.ExprLShr, operator.rshift),
(EX.ExprShl, operator.lshift),
(EX.ExprRol, lambda x,n: x.rol(n)),
(EX.ExprRor, lambda x,n: x.ror(n))
)
for op in ops:
for s in range(5):
self.exprEqual(
op[0](self.x4_expr, EX.ExprCst(s, 4)),
op[1](self.x4, s))
def test_zx_sx(self):
self.exprEqual(
EX.ExprZX(EX.ExprCst(0xf, 4), 8),
self.mba8.from_cst(0x0f))
self.exprEqual(
EX.ExprSX(EX.ExprCst(0x8, 4), 8),
self.mba8.from_cst(0xf8))
def test_extract_contract(self):
self.exprEqual(
EX.ExprSlice(self.x8_expr, slice(0, 4)),
self.x4)
self.exprEqual(
EX.ExprConcat(EX.ExprCst(0xf, 4), EX.ExprCst(0, 4)),
self.mba8.from_cst(0x0f))
def test_cmp(self):
e = EX.ExprCond(EX.ExprCmpEq(self.x8_expr, EX.ExprCst(10, 8)),
self.y8_expr,
self.z8_expr)
e = EX.eval_expr(e)
for i in range(256):
eref = self.z8 if i != 10 else self.y8
self.assertEqual(e.eval({self.x8: i}), eref.vec)
class MBAExprsTestNoEsf(MBAExprsTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.use_esf = False
class MBAExprsTestEsf(MBAExprsTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.use_esf = True
if __name__ == "__main__":
unittest.main()
|
quarkslab/arybo
|
tests/arybo/mba_exprs.py
|
Python
|
bsd-3-clause
| 5,358
|
#!/usr/bin/python
"""
This is a tool to verify checksum hashes produced by LOCKSS against hashes
provided by a BagIt manifest document.
Invoke with -h for usage help.
Written by Stephen Eisenhauer
At University of North Texas Libraries
On 2013-04-17
Notes:
* The LOCKSS hash list will have more entries than we actually care about
(checksums for Apache directory listing pages, etc.), so we should just
go down the list of bag manifest entries and ensure that everything
there is also present (and identical) in the LOCKSS list.
"""
import argparse
import os
import re
import urllib
def load_lockss_hashes(hashcus_path):
prefix = None
hashes = dict()
f = open(hashcus_path, 'r')
for line in f:
m = re.match('[0-9A-F]{32} (.+)', line)
if m:
if not prefix:
prefix = len(m.group(1)) + 1
continue
hashes[m.group(1)[prefix:]] = line[:32]
f.close()
print "Found %d hashes in HashCUS file" % len(hashes)
return hashes
def compare_manifest_hashes(manifest_path, hashes):
records = 0
errors = 0
f = open(manifest_path, 'r')
for line in f:
m = re.match('[0-9a-f]{32} (.+)', line)
if m:
records += 1
path = urllib.quote(m.group(1), safe="%/:=&?~#+!$,;'@()*[]")
if not path in hashes:
print "No LOCKSS hash found for path: %s" % path
errors += 1
elif line[:32].upper() != hashes[path]:
print "Hash mismatch: %s != %s for path %s" % (line[:32], hashes[path], path)
errors += 1
f.close()
print "Compared %d records, encountered %d errors." % (records, errors)
def _make_arg_parser():
parser = argparse.ArgumentParser(
description='Compare a LOCKSS hash list to a bag manifest.')
parser.add_argument('HashCUS',
help="path to the HashCUS.txt file downloaded from LOCKSS")
parser.add_argument('manifest',
help="path to the bag manifest (e.g. mybag/manifest-md5.txt")
return parser
if __name__ == "__main__":
parser = _make_arg_parser()
args = parser.parse_args()
hascus_path = os.path.abspath(args.HashCUS)
manifest_path = os.path.abspath(args.manifest)
hashes = load_lockss_hashes(hascus_path)
compare_manifest_hashes(manifest_path, hashes)
|
MetaArchive/metaarchive-qa-tools
|
lockss-manifest-validate/lockss-manifest-validate.py
|
Python
|
bsd-3-clause
| 2,391
|
import json
import os
import six
import tensorflow as tf
from PIL import Image
from luminoth.tools.dataset.readers import InvalidDataDirectory
from luminoth.tools.dataset.readers.object_detection import (
ObjectDetectionReader
)
from luminoth.utils.dataset import read_xml, read_image
WNIDS_FILE = 'data/imagenet_wnids.json'
class ImageNetReader(ObjectDetectionReader):
def __init__(self, data_dir, split, **kwargs):
super(ImageNetReader, self).__init__(**kwargs)
self._split = split
self._data_dir = data_dir
self._imagesets_path = os.path.join(self._data_dir, 'ImageSets', 'DET')
self._images_path = os.path.join(self._data_dir, 'Data', 'DET',)
self._annotations_path = os.path.join(
self._data_dir, 'Annotations', 'DET'
)
self.yielded_records = 0
self.errors = 0
# Validate Imagenet structure in `data_dir`.
self._validate_structure()
# Load wnids from file.
wnids_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
WNIDS_FILE
)
with tf.gfile.GFile(wnids_path) as wnidsjson:
self._wnids = json.load(wnidsjson)
def get_total(self):
return sum(1 for _ in self._get_record_names())
def get_classes(self):
return sorted(list(self._wnids.values()))
def iterate(self):
for image_id in self._get_record_names():
if self._stop_iteration():
return
if self._should_skip(image_id):
continue
try:
annotation_path = self._get_image_annotation(image_id)
image_path = self._get_image_path(image_id)
# Read both the image and the annotation into memory.
annotation = read_xml(annotation_path)
image = read_image(image_path)
except tf.errors.NotFoundError:
tf.logging.debug(
'Error reading image or annotation for "{}".'.format(
image_id))
self.errors += 1
continue
objects = annotation.get('object')
if objects is None:
# If there's no bounding boxes, we don't want it.
continue
image_pil = Image.open(six.BytesIO(image))
width = image_pil.width
height = image_pil.height
gt_boxes = []
for b in annotation['object']:
try:
label_id = self.classes.index(self._wnids[b['name']])
except ValueError:
continue
(xmin, ymin, xmax, ymax) = self._adjust_bbox(
xmin=int(b['bndbox']['xmin']),
ymin=int(b['bndbox']['ymin']),
xmax=int(b['bndbox']['xmax']),
ymax=int(b['bndbox']['ymax']),
old_width=int(annotation['size']['width']),
old_height=int(annotation['size']['height']),
new_width=width, new_height=height
)
gt_boxes.append({
'label': label_id,
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
})
if len(gt_boxes) == 0:
continue
record = {
'width': width,
'height': height,
'depth': 3,
'filename': annotation['filename'],
'image_raw': image,
'gt_boxes': gt_boxes,
}
self._will_add_record(record)
self.yielded_records += 1
yield record
def _validate_structure(self):
if not tf.gfile.Exists(self._data_dir):
raise InvalidDataDirectory(
'"{}" does not exist.'.format(self._data_dir)
)
if not tf.gfile.Exists(self._imagesets_path):
raise InvalidDataDirectory('ImageSets path is missing')
if not tf.gfile.Exists(self._images_path):
raise InvalidDataDirectory('Images path is missing')
if not tf.gfile.Exists(self._annotations_path):
raise InvalidDataDirectory('Annotations path is missing')
def _get_split_path(self):
return os.path.join(
self._imagesets_path, '{}.txt'.format(self._split)
)
def _get_image_path(self, image_id):
return os.path.join(
self._images_path, '{}.JPEG'.format(image_id)
)
def _get_image_annotation(self, image_id):
return os.path.join(self._annotations_path, '{}.xml'.format(image_id))
def _get_record_names(self):
split_path = self._get_split_path()
if not tf.gfile.Exists(split_path):
raise ValueError('"{}" not found'.format(self._split))
with tf.gfile.GFile(split_path) as f:
for line in f:
# The images in 'extra' directories don't have annotations.
if 'extra' in line:
continue
filename = line.split()[0]
filename = os.path.join(self._split, filename)
yield filename.strip()
def _adjust_bbox(self, xmin, ymin, xmax, ymax, old_width, old_height,
new_width, new_height):
# TODO: consider reusing luminoth.utils.image.adjust_bboxes instead of
# this, but note it uses tensorflow, and using tf and np here may
# introduce too many problems.
xmin = (xmin / old_width) * new_width
ymin = (ymin / old_height) * new_height
xmax = (xmax / old_width) * new_width
ymax = (ymax / old_height) * new_height
return xmin, ymin, xmax, ymax
|
tryolabs/luminoth
|
luminoth/tools/dataset/readers/object_detection/imagenet.py
|
Python
|
bsd-3-clause
| 5,855
|
from abc import ABCMeta, abstractmethod
import six
from django.db.models import Q
from dimagi.utils.chunked import chunked
class DomainFilter(six.with_metaclass(ABCMeta)):
@abstractmethod
def get_filters(self, domain_name):
"""Return a list of filters. Each filter will be applied to a queryset independently
of the others."""
raise NotImplementedError()
class SimpleFilter(DomainFilter):
def __init__(self, filter_kwarg):
self.filter_kwarg = filter_kwarg
def get_filters(self, domain_name):
return [Q(**{self.filter_kwarg: domain_name})]
class UsernameFilter(DomainFilter):
def get_filters(self, domain_name):
"""
:return: A generator of filters each filtering for at most 500 users.
"""
from corehq.apps.users.dbaccessors.all_commcare_users import get_all_usernames_by_domain
usernames = get_all_usernames_by_domain(domain_name)
for chunk in chunked(usernames, 500):
filter = Q()
for username in chunk:
filter |= Q(username__iexact=username)
yield filter
class UserIDFilter(DomainFilter):
def __init__(self, user_id_field, include_web_users=True):
self.user_id_field = user_id_field
self.include_web_users = include_web_users
def get_filters(self, domain_name):
"""
:return: A generator of filters each filtering for at most 1000 users.
"""
from corehq.apps.users.dbaccessors.all_commcare_users import get_all_user_ids_by_domain
user_ids = get_all_user_ids_by_domain(domain_name, include_web_users=self.include_web_users)
for chunk in chunked(user_ids, 1000):
query_kwarg = '{}__in'.format(self.user_id_field)
yield Q(**{query_kwarg: chunk})
|
qedsoftware/commcare-hq
|
corehq/apps/dump_reload/sql/filters.py
|
Python
|
bsd-3-clause
| 1,812
|
# -*- coding: utf-8 -*-
"""Defines fixtures available to all tests."""
import pytest
from webtest import TestApp
from p101stat.app import create_app
from p101stat.database import db as _db
from p101stat.settings import TestConfig
from .factories import IdolFactory
@pytest.yield_fixture(scope='function')
def app():
"""An application for the tests."""
_app = create_app(TestConfig)
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture(scope='function')
def testapp(app):
"""A Webtest app."""
return TestApp(app)
@pytest.yield_fixture(scope='function')
def db(app):
"""A database for the tests."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
@pytest.fixture
def idol(db):
"""A idol for the tests."""
idol = IdolFactory()
db.session.commit()
return idol
|
pmrowla/p101stat
|
tests/conftest.py
|
Python
|
bsd-3-clause
| 969
|
"""
Turn entities to and fro various representations.
This is the base Class and interface Class used to
transform strings of various forms to model objects
and model objects to strings of various forms.
"""
from tiddlyweb.serializer import NoSerializationError
from tiddlyweb.model.tiddler import string_to_tags_list
class SerializationInterface(object):
"""
A Serialization is a collection of methods that
either turn an input string into the object named
by the method, or turn the object into a string
form.
The interface is fairly simple: For the data
entities that exist in the TiddlyWeb system there
(optionally) exists <entity>_as and as_<entity> methods
in each Serialization.
*_as returns a string form of the entity, perhaps as
HTML, Text, YAML, Atom, whatever the Serialization does.
as_* takes a provided entity and string and updates
the skeletal entity to represent the information
contained in the string (in the Serialization format).
There are also three supporting methods, list_tiddlers,
list_recipes() and list_bags() that provide convenience
methods for presenting a collection of either in the
Serialization form. A string is returned.
If a method doesn't exist a NoSerializationError is raised
and the calling code is expected to do something intelligent
when trapping it.
"""
def __init__(self, environ=None):
if environ is None:
environ = {}
self.environ = environ
def recipe_as(self, recipe):
"""
Serialize a Recipe into this serializer's form.
"""
raise NoSerializationError
def as_recipe(self, recipe, input_string):
"""
Take input_string, which is a serialized recipe
and turn it into a Recipe (if possible).
"""
raise NoSerializationError
def bag_as(self, bag):
"""
Serialize a Bag into this serializer's form.
"""
raise NoSerializationError
def as_bag(self, bag, input_string):
"""
Take input_string, which is a serialized bag
and turn it into a Bag (if possible).
"""
raise NoSerializationError
def tiddler_as(self, tiddler):
"""
Serialize a Bag into this serializer's form.
"""
raise NoSerializationError
def as_tiddler(self, tiddler, input_string):
"""
Take input_string, which is a serialized tiddler
and turn it into a Tiddler (if possible).
"""
raise NoSerializationError
def list_tiddlers(self, bag):
"""
Provided a bag, output the included tiddlers.
"""
raise NoSerializationError
def list_recipes(self, recipes):
"""
Provided a List of RecipeS, make a serialized
list of those recipes (e.g. a a list of HTML
links).
"""
raise NoSerializationError
def list_bags(self, bags):
"""
Provided a List of BagS, make a serialized
list of those bags (e.g. a a list of HTML
links).
"""
raise NoSerializationError
def as_tags(self, string):
"""
Not called directly, put made public for future
use. Turn a string into a list of tags.
"""
return string_to_tags_list(string)
def tags_as(self, tags):
"""
Not called directly, put made public for future
use. Turn a list of tags into a serialized list.
"""
tag_string_list = []
for tag in tags:
if ' ' in tag:
tag = '[[%s]]' % tag
tag_string_list.append(tag)
return u' '.join(tag_string_list)
|
funkyeah/tiddlyweb
|
tiddlyweb/serializations/__init__.py
|
Python
|
bsd-3-clause
| 3,732
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.0.11832 on 2017-03-22.
# 2017, SMART Health IT.
import io
import json
import os
import unittest
from . import riskassessment
from .fhirdate import FHIRDate
class RiskAssessmentTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("RiskAssessment", js["resourceType"])
return riskassessment.RiskAssessment(js)
def testRiskAssessment1(self):
inst = self.instantiate_from("riskassessment-example-cardiac.json")
self.assertIsNotNone(inst, "Must have instantiated a RiskAssessment instance")
self.implRiskAssessment1(inst)
js = inst.as_json()
self.assertEqual("RiskAssessment", js["resourceType"])
inst2 = riskassessment.RiskAssessment(js)
self.implRiskAssessment1(inst2)
def implRiskAssessment1(self, inst):
self.assertEqual(inst.id, "cardiac")
self.assertEqual(inst.identifier.system, "http://example.org")
self.assertEqual(inst.identifier.use, "official")
self.assertEqual(inst.identifier.value, "risk-assessment-cardiac")
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2014-07-19T16:04:00Z").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2014-07-19T16:04:00Z")
self.assertEqual(inst.prediction[0].outcome.text, "Heart Attack")
self.assertEqual(inst.prediction[0].probabilityDecimal, 0.02)
self.assertEqual(inst.prediction[0].whenRange.high.code, "a")
self.assertEqual(inst.prediction[0].whenRange.high.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[0].whenRange.high.unit, "years")
self.assertEqual(inst.prediction[0].whenRange.high.value, 49)
self.assertEqual(inst.prediction[0].whenRange.low.code, "a")
self.assertEqual(inst.prediction[0].whenRange.low.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[0].whenRange.low.unit, "years")
self.assertEqual(inst.prediction[0].whenRange.low.value, 39)
self.assertEqual(inst.status, "final")
self.assertEqual(inst.text.status, "additional")
def testRiskAssessment2(self):
inst = self.instantiate_from("riskassessment-example-population.json")
self.assertIsNotNone(inst, "Must have instantiated a RiskAssessment instance")
self.implRiskAssessment2(inst)
js = inst.as_json()
self.assertEqual("RiskAssessment", js["resourceType"])
inst2 = riskassessment.RiskAssessment(js)
self.implRiskAssessment2(inst2)
def implRiskAssessment2(self, inst):
self.assertEqual(inst.id, "population")
self.assertEqual(inst.status, "final")
self.assertEqual(inst.text.status, "generated")
def testRiskAssessment3(self):
inst = self.instantiate_from("riskassessment-example-prognosis.json")
self.assertIsNotNone(inst, "Must have instantiated a RiskAssessment instance")
self.implRiskAssessment3(inst)
js = inst.as_json()
self.assertEqual("RiskAssessment", js["resourceType"])
inst2 = riskassessment.RiskAssessment(js)
self.implRiskAssessment3(inst2)
def implRiskAssessment3(self, inst):
self.assertEqual(inst.id, "prognosis")
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2010-11-22").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2010-11-22")
self.assertEqual(inst.prediction[0].outcome.coding[0].code, "249943000:363698007=72098002,260868000=6934004")
self.assertEqual(inst.prediction[0].outcome.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.prediction[0].outcome.text, "permanent weakness of the left arm")
self.assertEqual(inst.prediction[0].qualitativeRisk.coding[0].code, "moderate")
self.assertEqual(inst.prediction[0].qualitativeRisk.coding[0].display, "moderate likelihood")
self.assertEqual(inst.prediction[0].qualitativeRisk.coding[0].system, "http://hl7.org/fhir/risk-probability")
self.assertEqual(inst.status, "final")
self.assertEqual(inst.text.status, "additional")
def testRiskAssessment4(self):
inst = self.instantiate_from("riskassessment-example.json")
self.assertIsNotNone(inst, "Must have instantiated a RiskAssessment instance")
self.implRiskAssessment4(inst)
js = inst.as_json()
self.assertEqual("RiskAssessment", js["resourceType"])
inst2 = riskassessment.RiskAssessment(js)
self.implRiskAssessment4(inst2)
def implRiskAssessment4(self, inst):
self.assertEqual(inst.comment, "High degree of certainty")
self.assertEqual(inst.id, "genetic")
self.assertEqual(inst.method.coding[0].code, "BRCAPRO")
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2006-01-13T23:01:00Z").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2006-01-13T23:01:00Z")
self.assertEqual(inst.prediction[0].outcome.text, "Breast Cancer")
self.assertEqual(inst.prediction[0].probabilityDecimal, 0.000168)
self.assertEqual(inst.prediction[0].whenRange.high.code, "a")
self.assertEqual(inst.prediction[0].whenRange.high.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[0].whenRange.high.unit, "years")
self.assertEqual(inst.prediction[0].whenRange.high.value, 53)
self.assertEqual(inst.prediction[1].outcome.text, "Breast Cancer")
self.assertEqual(inst.prediction[1].probabilityDecimal, 0.000368)
self.assertEqual(inst.prediction[1].whenRange.high.code, "a")
self.assertEqual(inst.prediction[1].whenRange.high.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[1].whenRange.high.unit, "years")
self.assertEqual(inst.prediction[1].whenRange.high.value, 57)
self.assertEqual(inst.prediction[1].whenRange.low.code, "a")
self.assertEqual(inst.prediction[1].whenRange.low.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[1].whenRange.low.unit, "years")
self.assertEqual(inst.prediction[1].whenRange.low.value, 54)
self.assertEqual(inst.prediction[2].outcome.text, "Breast Cancer")
self.assertEqual(inst.prediction[2].probabilityDecimal, 0.000594)
self.assertEqual(inst.prediction[2].whenRange.high.code, "a")
self.assertEqual(inst.prediction[2].whenRange.high.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[2].whenRange.high.unit, "years")
self.assertEqual(inst.prediction[2].whenRange.high.value, 62)
self.assertEqual(inst.prediction[2].whenRange.low.code, "a")
self.assertEqual(inst.prediction[2].whenRange.low.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[2].whenRange.low.unit, "years")
self.assertEqual(inst.prediction[2].whenRange.low.value, 58)
self.assertEqual(inst.prediction[3].outcome.text, "Breast Cancer")
self.assertEqual(inst.prediction[3].probabilityDecimal, 0.000838)
self.assertEqual(inst.prediction[3].whenRange.high.code, "a")
self.assertEqual(inst.prediction[3].whenRange.high.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[3].whenRange.high.unit, "years")
self.assertEqual(inst.prediction[3].whenRange.high.value, 67)
self.assertEqual(inst.prediction[3].whenRange.low.code, "a")
self.assertEqual(inst.prediction[3].whenRange.low.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[3].whenRange.low.unit, "years")
self.assertEqual(inst.prediction[3].whenRange.low.value, 63)
self.assertEqual(inst.prediction[4].outcome.text, "Breast Cancer")
self.assertEqual(inst.prediction[4].probabilityDecimal, 0.001089)
self.assertEqual(inst.prediction[4].whenRange.high.code, "a")
self.assertEqual(inst.prediction[4].whenRange.high.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[4].whenRange.high.unit, "years")
self.assertEqual(inst.prediction[4].whenRange.high.value, 72)
self.assertEqual(inst.prediction[4].whenRange.low.code, "a")
self.assertEqual(inst.prediction[4].whenRange.low.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[4].whenRange.low.unit, "years")
self.assertEqual(inst.prediction[4].whenRange.low.value, 68)
self.assertEqual(inst.prediction[5].outcome.text, "Breast Cancer")
self.assertEqual(inst.prediction[5].probabilityDecimal, 0.001327)
self.assertEqual(inst.prediction[5].whenRange.high.code, "a")
self.assertEqual(inst.prediction[5].whenRange.high.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[5].whenRange.high.unit, "years")
self.assertEqual(inst.prediction[5].whenRange.high.value, 77)
self.assertEqual(inst.prediction[5].whenRange.low.code, "a")
self.assertEqual(inst.prediction[5].whenRange.low.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[5].whenRange.low.unit, "years")
self.assertEqual(inst.prediction[5].whenRange.low.value, 73)
self.assertEqual(inst.prediction[6].outcome.text, "Breast Cancer")
self.assertEqual(inst.prediction[6].probabilityDecimal, 0.00153)
self.assertEqual(inst.prediction[6].whenRange.high.code, "a")
self.assertEqual(inst.prediction[6].whenRange.high.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[6].whenRange.high.unit, "years")
self.assertEqual(inst.prediction[6].whenRange.high.value, 82)
self.assertEqual(inst.prediction[6].whenRange.low.code, "a")
self.assertEqual(inst.prediction[6].whenRange.low.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[6].whenRange.low.unit, "years")
self.assertEqual(inst.prediction[6].whenRange.low.value, 78)
self.assertEqual(inst.prediction[7].outcome.text, "Breast Cancer")
self.assertEqual(inst.prediction[7].probabilityDecimal, 0.001663)
self.assertEqual(inst.prediction[7].whenRange.high.code, "a")
self.assertEqual(inst.prediction[7].whenRange.high.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[7].whenRange.high.unit, "years")
self.assertEqual(inst.prediction[7].whenRange.high.value, 88)
self.assertEqual(inst.prediction[7].whenRange.low.code, "a")
self.assertEqual(inst.prediction[7].whenRange.low.system, "http://unitsofmeasure.org")
self.assertEqual(inst.prediction[7].whenRange.low.unit, "years")
self.assertEqual(inst.prediction[7].whenRange.low.value, 83)
self.assertEqual(inst.status, "final")
self.assertEqual(inst.text.status, "generated")
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_3_0_0/models/riskassessment_tests.py
|
Python
|
bsd-3-clause
| 11,177
|
from django.conf import settings
from django.http import Http404
import os
import shutil
import time
import re
import urlparse
import urllib
from subprocess import call
from exceptions import ImageMagickException, ImageMagickConversionError, ImageMagickOSFileError
from cache_util import file_hash
from django.db.models import ImageField
from django.db.models.fields.files import ImageFieldFile
#Settings
ALLOWED_PARAMS = getattr(settings, "ALLOWED_PARAMS", "adaptive-resize resize extent gravity strip thumbnail trim quality crop liquid-rescale scale rotate shave unsharp watermark".split())
ERROR_IMAGE_URL = getattr(settings, 'ERROR_IMAGE_URL', '')
IMAGEUTIL_CACHE_PATH = getattr(settings, "IMAGEUTIL_CACHE_PATH", "imageutil_cache/")
IMAGEMAGICK_CONVERT_PATH = getattr(settings, 'IMAGEMAGICK_CONVERT_PATH', 'convert')
IMAGEUTIL_SHORTCUTS = getattr(settings, "IMAGEUTIL_SHORTCUTS", {})
#If an image takes more than 5secs or 10mb of memory, IM will quit.
IMAGEMAGICK_ALWAYS_PASS = getattr(settings, "IMAGEMAGICK_ALWAYS_PASS", "-limit area 10mb") # -limit time 5")
#no settings
_IMAGEUTIL_CACHE_ROOT = os.path.join(settings.MEDIA_ROOT, IMAGEUTIL_CACHE_PATH)
def convert(original_image_path, arg):
"""
Takes the file name (relative to MEDIA_ROOT), and a specification of the conversion.
Returns a URL to retrieve the converted file.
See http://www.imagemagick.org/script/command-line-options.php for the possible options.
Does the conversion, if it's not cached, and caches it in MEDIA_ROOT/IMAGEUTIL_CACHE.
Pseudocode for filter:
1. generate the result filename.
2. does it exist? Yes = return it. No = create it.
3. do the conversion; save the file as the result filename.
@accepts:
original_image_path - string - filename of the image; if the file specified lives outside MEDIA_ROOT ImageMagickException will be raised
arg - string - list of arguments. all arguments must be included in ALLOWED_PARAMS, otherwise, ImageMagickException will be raised
@returns:
string - image url
"""
try:
# check that all arguments are in ALLOWED_PARAMS
# we are assuming that all of the params that actually determine action start with dash
panic = [a for a in arg.split() if (a.startswith("-") and not a[1:] in ALLOWED_PARAMS)]
if panic:
raise ImageMagickException("One of the arguments is not in a whitelist. List of arguments supplied: %s" % panic)
arg = IMAGEUTIL_SHORTCUTS.get(arg, arg)
if not original_image_path:
raise ImageMagickOSFileError('No file specified')
if isinstance(original_image_path, ImageField):
original_image_path = original_image_path.path
if isinstance(original_image_path, ImageFieldFile):
original_image_path = original_image_path.path
if not (isinstance(original_image_path, str) or isinstance(original_image_path, unicode)):
raise ImageMagickException('Original image path is a %s, but it must be a string or unicode.' % str(type(original_image_path)))
op = os.path.abspath(os.path.join(settings.MEDIA_ROOT, original_image_path))
if not op.startswith(os.path.normpath(settings.MEDIA_ROOT)): # someone's trying to access an image outsite MEDIA_ROOT; good luck with that!
raise ImageMagickException("Image not under media root")
if arg == "":
#no processing to do.
return urllib.quote(urlparse.urljoin(settings.MEDIA_URL,os.path.relpath(op, settings.MEDIA_ROOT)))
#generate the cache filename
try:
#this depends on the file existing, so we needn't check elsewhere
ophash = file_hash(op)
except OSError, exc:
raise ImageMagickOSFileError(*exc.args)
try:
foldername, filename = op.rsplit(os.path.sep, 1)
except ValueError:
foldername, filename = '', op
try:
name, extension = filename.rsplit(".", 1)
except ValueError:
raise ImageMagickException("Filename does not include extension")
arg_hash = hash(arg)
destination_filename = "o%sa%s.%s" % (ophash, arg_hash, extension)
rel_destination_folder = os.path.join(IMAGEUTIL_CACHE_PATH, filename)
abs_destination_folder = os.path.join(_IMAGEUTIL_CACHE_ROOT, filename)
rel_destination_file = os.path.join(rel_destination_folder, destination_filename)
abs_destination_file = os.path.join(abs_destination_folder, destination_filename)
url = urllib.quote(urlparse.urljoin(settings.MEDIA_URL, rel_destination_file))
#is it in the cache? then return it
if os.path.exists(abs_destination_file):
os.utime(abs_destination_file, None) #update the modified timestamp (for cache purposes)
return url
if not os.path.exists(abs_destination_folder):
os.makedirs(abs_destination_folder)
# make sure that all supplied arguments are in the whitelist
arg = re.sub("\s+", " ", IMAGEMAGICK_ALWAYS_PASS + " " + arg).strip() #having empty args in seems to break 'convert'
arglist = [IMAGEMAGICK_CONVERT_PATH, op,] + arg.split(' ') + [abs_destination_file,]
try:
status = call(arglist)
except OSError:
raise OSError, "Check if your IMAGEMAGICK_CONVERT_PATH is correct. It is currently set to %s" % IMAGEMAGICK_CONVERT_PATH
if status == 0:
return url
else:
cmd = ' '.join(arglist)
raise ImageMagickException, "Error converting %s: ImageMagick returned status %s (command was '%s')" % (op, status, cmd)
except ImageMagickException, e:
# something went wrong. return a filler image or nothing.
# TODO - log, or process the error somehow.
if settings.DEBUG:
raise e
else:
return urllib.quote(ERROR_IMAGE_URL)
def tidy_cache(age=60*60*24*7): #1 week
"""
Walks settings.IMAGE_CACHE_ROOT, deleting all files with a last modified date of more than `age` seconds ago.
"""
cutoff = time.time()-age #num secs since epoch
for path, folders, files in os.walk(_IMAGEUTIL_CACHE_ROOT):
for f in files:
fullpath = os.path.join(path,f)
if os.path.getmtime(fullpath) < cutoff:
os.remove(fullpath)
|
gregplaysguitar/glamkit
|
glamkit/incubated/imageutil/imagemagick_util.py
|
Python
|
bsd-3-clause
| 6,569
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class ContactFormConfig(AppConfig):
"""The default AppConfig for admin which does autodiscovery."""
name = 'django_contact'
verbose_name = _("Contact")
|
arkanister/django-contact-form-site
|
django_contact/apps.py
|
Python
|
bsd-3-clause
| 255
|
"""
A *lock* defines access to a particular subsystem or property of
Evennia. For example, the "owner" property can be impmemented as a
lock. Or the disability to lift an object or to ban users.
A lock consists of three parts:
- access_type - this defines what kind of access this lock regulates. This
just a string.
- function call - this is one or many calls to functions that will determine
if the lock is passed or not.
- lock function(s). These are regular python functions with a special
set of allowed arguments. They should always return a boolean depending
on if they allow access or not.
A lock function is defined by existing in one of the modules
listed by settings.LOCK_FUNC_MODULES. It should also always
take four arguments looking like this:
funcname(accessing_obj, accessed_obj, *args, **kwargs):
[...]
The accessing object is the object wanting to gain access.
The accessed object is the object this lock resides on
args and kwargs will hold optional arguments and/or keyword arguments
to the function as a list and a dictionary respectively.
Example:
perm(accessing_obj, accessed_obj, *args, **kwargs):
"Checking if the object has a particular, desired permission"
if args:
desired_perm = args[0]
return desired_perm in accessing_obj.permissions.all()
return False
Lock functions should most often be pretty general and ideally possible to
re-use and combine in various ways to build clever locks.
Lock definition ("Lock string")
A lock definition is a string with a special syntax. It is added to
each object's lockhandler, making that lock available from then on.
The lock definition looks like this:
'access_type:[NOT] func1(args)[ AND|OR][NOT] func2() ...'
That is, the access_type, a colon followed by calls to lock functions
combined with AND or OR. NOT negates the result of the following call.
Example:
We want to limit who may edit a particular object (let's call this access_type
for 'edit', it depends on what the command is looking for). We want this to
only work for those with the Permission 'Builders'. So we use our lock
function above and define it like this:
'edit:perm(Builders)'
Here, the lock-function perm() will be called with the string
'Builders' (accessing_obj and accessed_obj are added automatically,
you only need to add the args/kwargs, if any).
If we wanted to make sure the accessing object was BOTH a Builders and a
GoodGuy, we could use AND:
'edit:perm(Builders) AND perm(GoodGuy)'
To allow EITHER Builders and GoodGuys, we replace AND with OR. perm() is just
one example, the lock function can do anything and compare any properties of
the calling object to decide if the lock is passed or not.
'lift:attrib(very_strong) AND NOT attrib(bad_back)'
To make these work, add the string to the lockhandler of the object you want
to apply the lock to:
obj.lockhandler.add('edit:perm(Builders)')
From then on, a command that wants to check for 'edit' access on this
object would do something like this:
if not target_obj.lockhandler.has_perm(caller, 'edit'):
caller.msg("Sorry, you cannot edit that.")
All objects also has a shortcut called 'access' that is recommended to
use instead:
if not target_obj.access(caller, 'edit'):
caller.msg("Sorry, you cannot edit that.")
Permissions
Permissions are just text strings stored in a comma-separated list on
typeclassed objects. The default perm() lock function uses them,
taking into account settings.PERMISSION_HIERARCHY. Also, the
restricted @perm command sets them, but otherwise they are identical
to any other identifier you can use.
"""
from __future__ import print_function
from builtins import object
import re
import inspect
from django.conf import settings
from evennia.utils import logger, utils
from django.utils.translation import ugettext as _
__all__ = ("LockHandler", "LockException")
WARNING_LOG = "lockwarnings.log"
#
# Exception class. This will be raised
# by errors in lock definitions.
#
class LockException(Exception):
"""
Raised during an error in a lock.
"""
pass
#
# Cached lock functions
#
_LOCKFUNCS = {}
def _cache_lockfuncs():
"""
Updates the cache.
"""
global _LOCKFUNCS
_LOCKFUNCS = {}
for modulepath in settings.LOCK_FUNC_MODULES:
_LOCKFUNCS.update(utils.callables_from_module(modulepath))
#
# pre-compiled regular expressions
#
_RE_FUNCS = re.compile(r"\w+\([^)]*\)")
_RE_SEPS = re.compile(r"(?<=[ )])AND(?=\s)|(?<=[ )])OR(?=\s)|(?<=[ )])NOT(?=\s)")
_RE_OK = re.compile(r"%s|and|or|not")
#
#
# Lock handler
#
#
class LockHandler(object):
"""
This handler should be attached to all objects implementing
permission checks, under the property 'lockhandler'.
"""
def __init__(self, obj):
"""
Loads and pre-caches all relevant locks and their functions.
Args:
obj (object): The object on which the lockhandler is
defined.
"""
if not _LOCKFUNCS:
_cache_lockfuncs()
self.obj = obj
self.locks = {}
self.reset()
def __str__(self):
return ";".join(self.locks[key][2] for key in sorted(self.locks))
def _log_error(self, message):
"Try to log errors back to object"
raise LockException(message)
def _parse_lockstring(self, storage_lockstring):
"""
Helper function. This is normally only called when the
lockstring is cached and does preliminary checking. locks are
stored as a string
atype:[NOT] lock()[[ AND|OR [NOT] lock()[...]];atype...
Args:
storage_locksring (str): The lockstring to parse.
"""
locks = {}
if not storage_lockstring:
return locks
duplicates = 0
elist = [] # errors
wlist = [] # warnings
for raw_lockstring in storage_lockstring.split(';'):
if not raw_lockstring:
continue
lock_funcs = []
try:
access_type, rhs = (part.strip() for part in raw_lockstring.split(':', 1))
except ValueError:
logger.log_trace()
return locks
# parse the lock functions and separators
funclist = _RE_FUNCS.findall(rhs)
evalstring = rhs
for pattern in ('AND', 'OR', 'NOT'):
evalstring = re.sub(r"\b%s\b" % pattern, pattern.lower(), evalstring)
nfuncs = len(funclist)
for funcstring in funclist:
funcname, rest = (part.strip().strip(')') for part in funcstring.split('(', 1))
func = _LOCKFUNCS.get(funcname, None)
if not callable(func):
elist.append(_("Lock: lock-function '%s' is not available.") % funcstring)
continue
args = list(arg.strip() for arg in rest.split(',') if arg and not '=' in arg)
kwargs = dict([arg.split('=', 1) for arg in rest.split(',') if arg and '=' in arg])
lock_funcs.append((func, args, kwargs))
evalstring = evalstring.replace(funcstring, '%s')
if len(lock_funcs) < nfuncs:
continue
try:
# purge the eval string of any superfluous items, then test it
evalstring = " ".join(_RE_OK.findall(evalstring))
eval(evalstring % tuple(True for func in funclist), {}, {})
except Exception:
elist.append(_("Lock: definition '%s' has syntax errors.") % raw_lockstring)
continue
if access_type in locks:
duplicates += 1
wlist.append(_("LockHandler on %(obj)s: access type '%(access_type)s' changed from '%(source)s' to '%(goal)s' " % \
{"obj":self.obj, "access_type":access_type, "source":locks[access_type][2], "goal":raw_lockstring}))
locks[access_type] = (evalstring, tuple(lock_funcs), raw_lockstring)
if wlist:
# a warning text was set, it's not an error, so only report
logger.log_file("\n".join(wlist), WARNING_LOG)
if elist:
# an error text was set, raise exception.
raise LockException("\n".join(elist))
# return the gathered locks in an easily executable form
return locks
def _cache_locks(self, storage_lockstring):
"""
Store data
"""
self.locks = self._parse_lockstring(storage_lockstring)
def _save_locks(self):
"""
Store locks to obj
"""
self.obj.lock_storage = ";".join([tup[2] for tup in self.locks.values()])
def cache_lock_bypass(self, obj):
"""
We cache superuser bypass checks here for efficiency. This
needs to be re-run when a player is assigned to a character.
We need to grant access to superusers. We need to check both
directly on the object (players), through obj.player and using
the get_player() method (this sits on serversessions, in some
rare cases where a check is done before the login process has
yet been fully finalized)
Args:
obj (object): This is checked for the `is_superuser` property.
"""
self.lock_bypass = hasattr(obj, "is_superuser") and obj.is_superuser
def add(self, lockstring):
"""
Add a new lockstring to handler.
Args:
lockstring (str): A string on the form
`"<access_type>:<functions>"`. Multiple access types
should be separated by semicolon (`;`).
Returns:
success (bool): The outcome of the addition, `False` on
error.
"""
# sanity checks
for lockdef in lockstring.split(';'):
if not ':' in lockstring:
self._log_error(_("Lock: '%s' contains no colon (:).") % lockdef)
return False
access_type, rhs = [part.strip() for part in lockdef.split(':', 1)]
if not access_type:
self._log_error(_("Lock: '%s' has no access_type (left-side of colon is empty).") % lockdef)
return False
if rhs.count('(') != rhs.count(')'):
self._log_error(_("Lock: '%s' has mismatched parentheses.") % lockdef)
return False
if not _RE_FUNCS.findall(rhs):
self._log_error(_("Lock: '%s' has no valid lock functions.") % lockdef)
return False
# get the lock string
storage_lockstring = self.obj.lock_storage
if storage_lockstring:
storage_lockstring = storage_lockstring + ";" + lockstring
else:
storage_lockstring = lockstring
# cache the locks will get rid of eventual doublets
self._cache_locks(storage_lockstring)
self._save_locks()
return True
def replace(self, lockstring):
"""
Replaces the lockstring entirely.
Args:
lockstring (str): The new lock definition.
Return:
success (bool): False if an error occurred.
Raises:
LockException: If a critical error occurred.
If so, the old string is recovered.
"""
old_lockstring = str(self)
self.clear()
try:
return self.add(lockstring)
except LockException:
self.add(old_lockstring)
raise
def get(self, access_type=None):
"""
Get the full lockstring or the lockstring of a particular
access type.
Args:
access_type (str, optional):
Returns:
lockstring (str): The matched lockstring, or the full
lockstring if no access_type was given.
"""
if access_type:
return self.locks.get(access_type, ["", "", ""])[2]
return str(self)
def remove(self, access_type):
"""
Remove a particular lock from the handler
Args:
access_type (str): The type of lock to remove.
Returns:
success (bool): If the access_type was not found
in the lock, this returns `False`.
"""
if access_type in self.locks:
del self.locks[access_type]
self._save_locks()
return True
return False
delete = remove # alias for historical reasons
def clear(self):
"""
Remove all locks in the handler.
"""
self.locks = {}
self.lock_storage = ""
self._save_locks()
def reset(self):
"""
Set the reset flag, so the the lock will be re-cached at next
checking. This is usually called by @reload.
"""
self._cache_locks(self.obj.lock_storage)
self.cache_lock_bypass(self.obj)
def check(self, accessing_obj, access_type, default=False, no_superuser_bypass=False):
"""
Checks a lock of the correct type by passing execution off to
the lock function(s).
Args:
accessing_obj (object): The object seeking access.
access_type (str): The type of access wanted.
default (bool, optional): If no suitable lock type is
found, default to this result.
no_superuser_bypass (bool): Don't use this unless you
really, really need to, it makes supersusers susceptible
to the lock check.
Notes:
A lock is executed in the follwoing way:
Parsing the lockstring, we (during cache) extract the valid
lock functions and store their function objects in the right
order along with their args/kwargs. These are now executed in
sequence, creating a list of True/False values. This is put
into the evalstring, which is a string of AND/OR/NOT entries
separated by placeholders where each function result should
go. We just put those results in and evaluate the string to
get a final, combined True/False value for the lockstring.
The important bit with this solution is that the full
lockstring is never blindly evaluated, and thus there (should
be) no way to sneak in malign code in it. Only "safe" lock
functions (as defined by your settings) are executed.
"""
try:
# check if the lock should be bypassed (e.g. superuser status)
if accessing_obj.locks.lock_bypass and not no_superuser_bypass:
return True
except AttributeError:
# happens before session is initiated.
if not no_superuser_bypass and ((hasattr(accessing_obj, 'is_superuser') and accessing_obj.is_superuser)
or (hasattr(accessing_obj, 'player') and hasattr(accessing_obj.player, 'is_superuser') and accessing_obj.player.is_superuser)
or (hasattr(accessing_obj, 'get_player') and (not accessing_obj.get_player() or accessing_obj.get_player().is_superuser))):
return True
# no superuser or bypass -> normal lock operation
if access_type in self.locks:
# we have a lock, test it.
evalstring, func_tup, raw_string = self.locks[access_type]
# execute all lock funcs in the correct order, producing a tuple of True/False results.
true_false = tuple(bool(tup[0](accessing_obj, self.obj, *tup[1], **tup[2])) for tup in func_tup)
# the True/False tuple goes into evalstring, which combines them
# with AND/OR/NOT in order to get the final result.
return eval(evalstring % true_false)
else:
return default
def _eval_access_type(self, accessing_obj, locks, access_type):
"""
Helper method for evaluating the access type using eval().
Args:
accessing_obj (object): Object seeking access.
locks (dict): The pre-parsed representation of all access-types.
access_type (str): An access-type key to evaluate.
"""
evalstring, func_tup, raw_string = locks[access_type]
true_false = tuple(tup[0](accessing_obj, self.obj, *tup[1], **tup[2])
for tup in func_tup)
return eval(evalstring % true_false)
def check_lockstring(self, accessing_obj, lockstring, no_superuser_bypass=False,
default=False, access_type=None):
"""
Do a direct check against a lockstring ('atype:func()..'),
without any intermediary storage on the accessed object.
Args:
accessing_obj (object or None): The object seeking access.
Importantly, this can be left unset if the lock functions
don't access it, no updating or storage of locks are made
against this object in this method.
lockstring (str): Lock string to check, on the form
`"access_type:lock_definition"` where the `access_type`
part can potentially be set to a dummy value to just check
a lock condition.
no_superuser_bypass (bool, optional): Force superusers to heed lock.
default (bool, optional): Fallback result to use if `access_type` is set
but no such `access_type` is found in the given `lockstring`.
access_type (str, bool): If set, only this access_type will be looked up
among the locks defined by `lockstring`.
Return:
access (bool): If check is passed or not.
"""
try:
if accessing_obj.locks.lock_bypass and not no_superuser_bypass:
return True
except AttributeError:
if no_superuser_bypass and ((hasattr(accessing_obj, 'is_superuser') and accessing_obj.is_superuser)
or (hasattr(accessing_obj, 'player') and hasattr(accessing_obj.player, 'is_superuser') and accessing_obj.player.is_superuser)
or (hasattr(accessing_obj, 'get_player') and (not accessing_obj.get_player() or accessing_obj.get_player().is_superuser))):
return True
if not ":" in lockstring:
lockstring = "%s:%s" % ("_dummy", lockstring)
locks = self._parse_lockstring(lockstring)
if access_type:
if not access_type in locks:
return default
else:
return self._eval_access_type(
accessing_obj, locks, access_type)
for access_type in locks:
return self._eval_access_type(accessing_obj, locks, access_type)
def _test():
# testing
class TestObj(object):
pass
import pdb
obj1 = TestObj()
obj2 = TestObj()
#obj1.lock_storage = "owner:dbref(#4);edit:dbref(#5) or perm(Wizards);examine:perm(Builders);delete:perm(Wizards);get:all()"
#obj1.lock_storage = "cmd:all();admin:id(1);listen:all();send:all()"
obj1.lock_storage = "listen:perm(Immortals)"
pdb.set_trace()
obj1.locks = LockHandler(obj1)
obj2.permissions.add("Immortals")
obj2.id = 4
#obj1.locks.add("edit:attr(test)")
print("comparing obj2.permissions (%s) vs obj1.locks (%s)" % (obj2.permissions, obj1.locks))
print(obj1.locks.check(obj2, 'owner'))
print(obj1.locks.check(obj2, 'edit'))
print(obj1.locks.check(obj2, 'examine'))
print(obj1.locks.check(obj2, 'delete'))
print(obj1.locks.check(obj2, 'get'))
print(obj1.locks.check(obj2, 'listen'))
|
ergodicbreak/evennia
|
evennia/locks/lockhandler.py
|
Python
|
bsd-3-clause
| 19,738
|
"""Kraken Framework."""
import logging
import os
__all__ = ['core', 'helpers', 'plugins', 'ui']
krakenPath = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..'))
if os.environ.get('KRAKEN_PATH', None) is None:
os.environ['KRAKEN_PATH'] = krakenPath
krakenExtsPath = os.path.join(krakenPath, 'Exts')
if 'FABRIC_EXTS_PATH' in os.environ:
if krakenExtsPath not in os.environ['FABRIC_EXTS_PATH']:
os.environ['FABRIC_EXTS_PATH'] = krakenExtsPath + os.pathsep + os.environ['FABRIC_EXTS_PATH']
krakenPresetsPath = os.path.join(krakenPath, 'Presets', 'DFG')
if 'FABRIC_DFG_PATH' in os.environ:
if krakenPresetsPath not in os.environ['FABRIC_DFG_PATH']:
os.environ['FABRIC_DFG_PATH'] = krakenPresetsPath + os.pathsep + os.environ['FABRIC_DFG_PATH']
logging.basicConfig(format='[KRAKEN] %(levelname)s: %(message)s', level=logging.INFO)
# Custom inform level for use with UI label getting added to the status bar.
logging.INFORM = 25
logging.addLevelName(logging.INFORM, 'INFORM')
logging.Logger.inform = lambda inst, msg, *args, **kwargs: inst.log(logging.INFORM, msg, *args, **kwargs)
|
goshow-jp/Kraken
|
Python/kraken/__init__.py
|
Python
|
bsd-3-clause
| 1,147
|
"""
Simple Scatter Plot with Labels
===============================
This example shows a basic scatter plot with labels created with Altair.
"""
# category: scatter plots
import altair as alt
import pandas as pd
data = pd.DataFrame({
'x': [1, 3, 5, 7, 9],
'y': [1, 3, 5, 7, 9],
'label': ['A', 'B', 'C', 'D', 'E']
})
bars = alt.Chart(data).mark_point().encode(
x='x:Q',
y='y:Q'
)
text = bars.mark_text(
align='left',
baseline='middle',
dx=7
).encode(
text='label'
)
bars + text
|
ellisonbg/altair
|
altair/vegalite/v2/examples/scatter_with_labels.py
|
Python
|
bsd-3-clause
| 517
|
from datetime import datetime, timedelta
from rdr_service.dao.ghost_check_dao import GhostCheckDao
from tests.helpers.unittest_base import BaseTestCase
class GhostCheckDaoTest(BaseTestCase):
def test_loads_only_vibrent(self):
"""We might accidentally start flagging CE participants as ghosts if they're returned"""
vibrent_participant = self.data_generator.create_database_participant(participantOrigin='vibrent')
self.data_generator.create_database_participant(participantOrigin='careevolution')
self.data_generator.create_database_participant(participantOrigin='anotherplatform')
participants = GhostCheckDao.get_participants_needing_checked(
session=self.data_generator.session,
earliest_signup_time=datetime.now() - timedelta(weeks=1)
)
self.assertEqual(1, len(participants), 'Should only be the Vibrent participant')
self.assertEqual(vibrent_participant.participantId, participants[0].participantId)
def test_ghost_flag_returned(self):
"""Ensure we get back the ghost data field"""
ghost_participant = self.data_generator.create_database_participant(
participantOrigin='vibrent',
isGhostId=True
)
self.data_generator.create_database_participant(
participantOrigin='vibrent',
isGhostId=None
)
self.data_generator.create_database_participant(
participantOrigin='vibrent',
isGhostId=False
)
results = GhostCheckDao.get_participants_needing_checked(
session=self.data_generator.session,
earliest_signup_time=datetime.now() - timedelta(weeks=1)
)
for participant in results:
if participant.participantId == ghost_participant.participantId:
self.assertTrue(participant.isGhostId)
else:
self.assertFalse(participant.isGhostId)
|
all-of-us/raw-data-repository
|
tests/dao_tests/test_ghost_check_dao.py
|
Python
|
bsd-3-clause
| 1,959
|
"""
General Character commands usually availabe to all characters
"""
from django.conf import settings
from evennia.utils import utils, prettytable
from evennia.commands.default.muxcommand import MuxCommand
# limit symbol import for API
__all__ = ("CmdHome", "CmdLook", "CmdNick",
"CmdInventory", "CmdGet", "CmdDrop", "CmdGive",
"CmdSay", "CmdPose", "CmdAccess")
class CmdHome(MuxCommand):
"""
move to your character's home location
Usage:
home
Teleports you to your home location.
"""
key = "home"
locks = "cmd:perm(home) or perm(Builders)"
arg_regex = r"$"
def func(self):
"Implement the command"
caller = self.caller
home = caller.home
if not home:
caller.msg("You have no home!")
elif home == caller.location:
caller.msg("You are already home!")
else:
caller.move_to(home)
caller.msg("There's no place like home ...")
class CmdLook(MuxCommand):
"""
look at location or object
Usage:
look
look <obj>
look *<player>
Observes your location or objects in your vicinity.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"""
Handle the looking.
"""
caller = self.caller
args = self.args
if args:
# Use search to handle duplicate/nonexistant results.
looking_at_obj = caller.search(args, use_nicks=True)
if not looking_at_obj:
return
else:
looking_at_obj = caller.location
if not looking_at_obj:
caller.msg("You have no location to look at!")
return
if not hasattr(looking_at_obj, 'return_appearance'):
# this is likely due to us having a player instead
looking_at_obj = looking_at_obj.character
if not looking_at_obj.access(caller, "view"):
caller.msg("Could not find '%s'." % args)
return
# get object's appearance
caller.msg(looking_at_obj.return_appearance(caller))
# the object's at_desc() method.
looking_at_obj.at_desc(looker=caller)
class CmdNick(MuxCommand):
"""
define a personal alias/nick
Usage:
nick[/switches] <nickname> = [<string>]
alias ''
Switches:
object - alias an object
player - alias a player
clearall - clear all your aliases
list - show all defined aliases (also "nicks" works)
Examples:
nick hi = say Hello, I'm Sarah!
nick/object tom = the tall man
A 'nick' is a personal shortcut you create for your own use. When
you enter the nick, the alternative string will be sent instead.
The switches control in which situations the substitution will
happen. The default is that it will happen when you enter a
command. The 'object' and 'player' nick-types kick in only when
you use commands that requires an object or player as a target -
you can then use the nick to refer to them.
Note that no objects are actually renamed or changed by this
command - the nick is only available to you. If you want to
permanently add keywords to an object for everyone to use, you
need build privileges and to use the @alias command.
"""
key = "nick"
aliases = ["nickname", "nicks", "@nick", "alias"]
locks = "cmd:all()"
def func(self):
"Create the nickname"
caller = self.caller
switches = self.switches
nicks = caller.nicks.get(return_obj=True)
if 'list' in switches:
table = prettytable.PrettyTable(["{wNickType",
"{wNickname",
"{wTranslates-to"])
for nick in utils.make_iter(nicks):
table.add_row([nick.db_category, nick.db_key, nick.db_strvalue])
string = "{wDefined Nicks:{n\n%s" % table
caller.msg(string)
return
if 'clearall' in switches:
caller.nicks.clear()
caller.msg("Cleared all aliases.")
return
if not self.args or not self.lhs:
caller.msg("Usage: nick[/switches] nickname = [realname]")
return
nick = self.lhs
real = self.rhs
if real == nick:
caller.msg("No point in setting nick same as the string to replace...")
return
# check so we have a suitable nick type
if not any(True for switch in switches if switch in ("object", "player", "inputline")):
switches = ["inputline"]
string = ""
for switch in switches:
oldnick = caller.nicks.get(key=nick, category=switch)
if not real:
# removal of nick
if oldnick:
# clear the alias
string += "\nNick '%s' (= '%s') was cleared." % (nick, oldnick)
caller.nicks.delete(nick, category=switch)
else:
string += "\nNo nick '%s' found, so it could not be removed." % nick
else:
# creating new nick
if oldnick:
string += "\nNick %s changed from '%s' to '%s'." % (nick, oldnick, real)
else:
string += "\nNick set: '%s' = '%s'." % (nick, real)
caller.nicks.add(nick, real, category=switch)
caller.msg(string)
class CmdInventory(MuxCommand):
"""
view inventory
Usage:
inventory
inv
Shows your inventory.
"""
key = "inventory"
aliases = ["inv", "i"]
locks = "cmd:all()"
arg_regex = r"$"
def func(self):
"check inventory"
items = self.caller.contents
if not items:
string = "You are not carrying anything."
else:
table = prettytable.PrettyTable(["name", "desc"])
table.header = False
table.border = False
for item in items:
table.add_row(["{C%s{n" % item.name, item.db.desc and item.db.desc or ""])
string = "{wYou are carrying:\n%s" % table
self.caller.msg(string)
class CmdGet(MuxCommand):
"""
pick up something
Usage:
get <obj>
Picks up an object from your location and puts it in
your inventory.
"""
key = "get"
aliases = "grab"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"implements the command."
caller = self.caller
if not self.args:
caller.msg("Get what?")
return
#print "general/get:", caller, caller.location, self.args, caller.location.contents
obj = caller.search(self.args, location=caller.location)
if not obj:
return
if caller == obj:
caller.msg("You can't get yourself.")
return
if not obj.access(caller, 'get'):
if obj.db.get_err_msg:
caller.msg(obj.db.get_err_msg)
else:
caller.msg("You can't get that.")
return
obj.move_to(caller, quiet=True)
caller.msg("You pick up %s." % obj.name)
caller.location.msg_contents("%s picks up %s." %
(caller.name,
obj.name),
exclude=caller)
# calling hook method
obj.at_get(caller)
class CmdDrop(MuxCommand):
"""
drop something
Usage:
drop <obj>
Lets you drop an object from your inventory into the
location you are currently in.
"""
key = "drop"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"Implement command"
caller = self.caller
if not self.args:
caller.msg("Drop what?")
return
# Because the DROP command by definition looks for items
# in inventory, call the search function using location = caller
obj = caller.search(self.args, location=caller,
nofound_string="You aren't carrying %s." % self.args,
multimatch_string="You carry more than one %s:" % self.args)
if not obj:
return
obj.move_to(caller.location, quiet=True)
caller.msg("You drop %s." % (obj.name,))
caller.location.msg_contents("%s drops %s." %
(caller.name, obj.name),
exclude=caller)
# Call the object script's at_drop() method.
obj.at_drop(caller)
class CmdGive(MuxCommand):
"""
give away something to someone
Usage:
give <inventory obj> = <target>
Gives an items from your inventory to another character,
placing it in their inventory.
"""
key = "give"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"Implement give"
caller = self.caller
if not self.args or not self.rhs:
caller.msg("Usage: give <inventory object> = <target>")
return
to_give = caller.search(self.lhs, location=caller,
nofound_string="You aren't carrying %s." % self.lhs,
multimatch_string="You carry more than one %s:" % self.lhs)
target = caller.search(self.rhs)
if not (to_give and target):
return
if target == caller:
caller.msg("You keep %s to yourself." % to_give.key)
return
if not to_give.location == caller:
caller.msg("You are not holding %s." % to_give.key)
return
# give object
caller.msg("You give %s to %s." % (to_give.key, target.key))
to_give.move_to(target, quiet=True)
target.msg("%s gives you %s." % (caller.key, to_give.key))
class CmdDesc(MuxCommand):
"""
describe yourself
Usage:
desc <description>
Add a description to yourself. This
will be visible to people when they
look at you.
"""
key = "desc"
locks = "cmd:all()"
arg_regex = r"\s|$"
def func(self):
"add the description"
if not self.args:
self.caller.msg("You must add a description.")
return
self.caller.db.desc = self.args.strip()
self.caller.msg("You set your description.")
class CmdSay(MuxCommand):
"""
speak as your character
Usage:
say <message>
Talk to those in your current location.
"""
key = "say"
aliases = ['"', "'"]
locks = "cmd:all()"
def func(self):
"Run the say command"
caller = self.caller
if not self.args:
caller.msg("Say what?")
return
speech = self.args
# calling the speech hook on the location
speech = caller.location.at_say(caller, speech)
# Feedback for the object doing the talking.
caller.msg('You say, "%s{n"' % speech)
# Build the string to emit to neighbors.
emit_string = '%s says, "%s{n"' % (caller.name,
speech)
caller.location.msg_contents(emit_string,
exclude=caller)
class CmdPose(MuxCommand):
"""
strike a pose
Usage:
pose <pose text>
pose's <pose text>
Example:
pose is standing by the wall, smiling.
-> others will see:
Tom is standing by the wall, smiling.
Describe an action being taken. The pose text will
automatically begin with your name.
"""
key = "pose"
aliases = [":", "emote"]
locks = "cmd:all()"
def parse(self):
"""
Custom parse the cases where the emote
starts with some special letter, such
as 's, at which we don't want to separate
the caller's name and the emote with a
space.
"""
args = self.args
if args and not args[0] in ["'", ",", ":"]:
args = " %s" % args.strip()
self.args = args
def func(self):
"Hook function"
if not self.args:
msg = "What do you want to do?"
self.caller.msg(msg)
else:
msg = "%s%s" % (self.caller.name, self.args)
self.caller.location.msg_contents(msg)
class CmdAccess(MuxCommand):
"""
show your current game access
Usage:
access
This command shows you the permission hierarchy and
which permission groups you are a member of.
"""
key = "access"
aliases = ["groups", "hierarchy"]
locks = "cmd:all()"
arg_regex = r"$"
def func(self):
"Load the permission groups"
caller = self.caller
hierarchy_full = settings.PERMISSION_HIERARCHY
string = "\n{wPermission Hierarchy{n (climbing):\n %s" % ", ".join(hierarchy_full)
#hierarchy = [p.lower() for p in hierarchy_full]
if self.caller.player.is_superuser:
cperms = "<Superuser>"
pperms = "<Superuser>"
else:
cperms = ", ".join(caller.permissions.all())
pperms = ", ".join(caller.player.permissions.all())
string += "\n{wYour access{n:"
string += "\nCharacter {c%s{n: %s" % (caller.key, cperms)
if hasattr(caller, 'player'):
string += "\nPlayer {c%s{n: %s" % (caller.player.key, pperms)
caller.msg(string)
|
mrkulk/text-world
|
evennia/commands/default/general.py
|
Python
|
bsd-3-clause
| 13,667
|
from corehq.dbaccessors.couchapps.all_docs import \
get_all_doc_ids_for_domain_grouped_by_db, get_doc_count_by_type, \
delete_all_docs_by_doc_type, get_doc_count_by_domain_type
from dimagi.utils.couch.database import get_db
from django.test import TestCase
class AllDocsTest(TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
cls.main_db = get_db(None)
cls.users_db = get_db('users')
cls.doc_types = ('Application', 'CommCareUser')
delete_all_docs_by_doc_type(cls.main_db, cls.doc_types)
delete_all_docs_by_doc_type(cls.users_db, cls.doc_types)
cls.domain1 = 'all-docs-domain1'
cls.domain2 = 'all-docs-domain2'
cls.main_db_doc = {'_id': 'main_db_doc', 'doc_type': 'Application'}
cls.users_db_doc = {'_id': 'users_db_doc', 'doc_type': 'CommCareUser'}
for doc_type in cls.doc_types:
for domain in (cls.domain1, cls.domain2):
db_alias = 'main' if doc_type == 'Application' else 'users'
doc_id = '{}_db_doc_{}'.format(db_alias, domain)
doc = {'_id': doc_id, 'doc_type': doc_type, 'domain': domain}
if doc_type == 'Application':
cls.main_db.save_doc(doc)
else:
cls.users_db.save_doc(doc)
@classmethod
def tearDownClass(cls):
delete_all_docs_by_doc_type(cls.main_db, cls.doc_types)
delete_all_docs_by_doc_type(cls.users_db, cls.doc_types)
def test_get_all_doc_ids_for_domain_grouped_by_db(self):
self.assertEqual(
{key.uri: list(value) for key, value in
get_all_doc_ids_for_domain_grouped_by_db(self.domain1)},
{get_db(None).uri: ['main_db_doc_all-docs-domain1'],
get_db('users').uri: ['users_db_doc_all-docs-domain1'],
get_db('meta').uri: [],
get_db('fixtures').uri: [],
get_db('domains').uri: [],
get_db('apps').uri: []}
)
def test_get_doc_count_by_type(self):
self.assertEqual(get_doc_count_by_type(get_db(None), 'Application'), 2)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'CommCareUser'), 2)
self.assertEqual(get_doc_count_by_type(get_db(None), 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'Application'), 0)
def test_get_doc_count_by_domain_type(self):
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain1, 'Application'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain2, 'Application'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), 'other', 'Application'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain1, 'CommCareUser'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain2, 'CommCareUser'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), 'other', 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain1, 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain1, 'Application'), 0)
|
qedsoftware/commcare-hq
|
corehq/couchapps/tests/test_all_docs.py
|
Python
|
bsd-3-clause
| 3,246
|
# ~*~ coding: utf-8 ~*~
"""
tests.marshmallow.test_extension
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for the :class:`MarshmallowAwareApp` to ensure that it will properly
register the extension and can be used, as well as testing the top level
schema.
"""
import pytest
from flask_marshmallow import fields
from fleaker import Schema
from fleaker.marshmallow import MarshmallowAwareApp, marsh
SERVER_NAME = 'localhost'
def _create_app():
"""Create the app for testing."""
app = MarshmallowAwareApp.create_app('tests.marshmallow')
app.config['SERVER_NAME'] = SERVER_NAME
@app.route('/test')
def test():
"""Test route for Flask URL generation."""
return b'test'
return app
def test_marshmallow_extension_creation():
"""Ensure creating the MM Aware app registers the extension."""
app = _create_app()
# now check for the proper extension
assert 'flask-marshmallow' in app.extensions
assert app.extensions['flask-marshmallow'] is marsh
def test_marshmallow_extension_url_for():
"""Ensure that the UrlFor field with Flask-Marshmallow works."""
app = _create_app()
class TestSchema(Schema):
"""Only has a link field"""
link = fields.UrlFor('test', _external=False)
ext_link = fields.UrlFor('test', _scheme='https', _external=True)
schema = TestSchema()
# not in an app context, should fail
with pytest.raises(RuntimeError):
schema.dump({})
with app.app_context():
data = schema.dump({}).data
assert data['link'] == '/test'
assert data['ext_link'] == 'https://{}/test'.format(SERVER_NAME)
|
croscon/fleaker
|
tests/marshmallow/test_extension.py
|
Python
|
bsd-3-clause
| 1,644
|
"""
DataLab survey class. Gets data from any survey
available through the NOAO datalab-client.
"""
import pdb
import numpy as np
import warnings
from astropy.table import Table
from astropy import units
import warnings
import sys, os
try:
from dl import queryClient as qc, authClient as ac
from dl.helpers.utils import convert
except:
print("Warning: datalab-client is not installed or will not properly connect")
from frb.surveys import surveycoord
class DL_Survey(surveycoord.SurveyCoord):
"""
A survey class for all databases hosted
by NOAO's DataLab. Inherits from SurveyCoord
"""
def __init__(self, coord, radius, **kwargs):
surveycoord.SurveyCoord.__init__(self, coord, radius, **kwargs)
#Define photmetric band names.
self.token = ac.login('anonymous')
self.bands = []
#Instantiate sia service
self.svc = None
#Generate query
self.query = None
self.qc_profile = None
def _parse_cat_band(self,band):
return None, None, None
def _gen_cat_query(self,query_fields=None):
pass
def _select_best_img(self,imgTable,verbose,timeout=120):
pass
def get_catalog(self, query=None, query_fields=None, print_query=False,timeout=120):
"""
Get catalog sources around the given coordinates
within self.radius.
Args:
query (str, optional): SQL query to generate the catalog
query_fields (list, optional): Over-ride list of items to query
print_query (bool): Print the SQL query generated
Returns:
astropy.table.Table: Catalog of sources obtained from the SQL query.
"""
qc.set_profile(self.qc_profile)
# Generate the query
if query is None:
self._gen_cat_query(query_fields)
query = self.query
if print_query:
print(query)
# Do it while silencing print statements
result = qc.query(self.token, sql=query,timeout=timeout)
self.catalog = convert(result,outfmt="table")
self.catalog.meta['radius'] = self.radius
self.catalog.meta['survey'] = self.survey
# Validate
self.validate_catalog()
# Return
return self.catalog.copy()
def get_image(self, imsize, band, timeout=120, verbose=False):
"""
Get images from the catalog if available
for a given fov and band.
Args:
imsize (Quantity): FOV for the desired image
band (str): Band for the image (e.g. 'r')
timeout (int, optional): Time to wait in seconds before timing out
verbose (bool, optional):
Returns:
HDU: Image header data unit
"""
ra = self.coord.ra.value
dec = self.coord.dec.value
fov = imsize.to(units.deg).value
if band.lower() not in self.bands and band not in self.bands:
raise TypeError("Allowed filters (case-insensitive) for {:s} photometric bands are {}".format(self.survey,self.bands))
table_cols, col_vals, bandstr = self._parse_cat_band(band)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
imgTable = self.svc.search((ra,dec), (fov/np.cos(dec*np.pi/180), fov), verbosity=2).to_table()
if verbose:
print("The full image list contains", len(imgTable), "entries")
#Select band
selection = imgTable['obs_bandpass'].astype(str)==bandstr
#from IPython import embed; embed(header='117')
#Select images in that band
for column, value in zip(table_cols,col_vals):
selection = selection & ((imgTable[column].astype(str)==value))
imgTable = imgTable[selection]
if(len(imgTable)>0):
imagedat = self._select_best_img(imgTable,verbose=True,timeout=timeout)
img_hdu = imagedat[0]
else:
print('No image available')
img_hdu = None
return img_hdu
def get_cutout(self, imsize, band=None):
"""
Get cutout (and header)
Args:
imsize (Quantity): e.g 10*units.arcsec
band (str): e.g. 'r'
Returns:
ndarray, Header: cutout image, cutout image header
"""
self.cutout_size = imsize
if band is None:
if "r" in self.bands:
band = "r"
elif band is None:
band = self.bands[-1]
warnings.warn("Retrieving cutout in {:s} band".format(band))
img_hdu = self.get_image(imsize, band)
if img_hdu is not None:
self.cutout = img_hdu.data
self.cutout_hdr = img_hdu.header
else:
self.cutout = None
self.cutout_hdr = None
return self.cutout, self.cutout_hdr
def _default_query_str(query_fields, database, coord, radius):
"""
Generates default query string for a catalog search.
Args:
query_fields (list of str): A list of query fields to
retrieve from the database
database (str): Name of the database
coord (astropy.coordinates.SkyCoord): Central coordinate of the search
radius (astropy.units.Quantity or Angle): Search radius
Returns:
str: A query to be fed to datalab's SQL client
"""
query_field_str = ""
for field in query_fields:
query_field_str += " {:s},".format(field)
# Remove last comma
query_field_str = query_field_str[:-1]
default_query = """SELECT{:s}
FROM {:s}
WHERE q3c_radial_query(ra,dec,{:f},{:f},{:f})
""".format(query_field_str,database,coord.ra.value,
coord.dec.value,radius.to(units.deg).value)
return default_query
|
FRBs/DM
|
frb/surveys/dlsurvey.py
|
Python
|
bsd-3-clause
| 5,919
|
from __future__ import absolute_import, division, print_function
from itertools import chain
from dynd import nd
import datashape
from datashape.internal_utils import IndexCallable
from datashape import discover
from functools import partial
from ..dispatch import dispatch
from blaze.expr import Projection, Field
from blaze.expr import Expr, UnaryOp
from .utils import validate, coerce, coerce_to_ordered, ordered_index
from ..utils import partition_all
__all__ = ['DataDescriptor', 'discover', 'compute_up']
def isdimension(ds):
return isinstance(ds, (datashape.Var, datashape.Fixed))
class DataDescriptor(object):
"""
Standard interface to data storage
Data descriptors provide read and write access to common data storage
systems like csv, json, HDF5, and SQL.
They provide Pythonic iteration over these resources as well as efficient
chunked access with DyND arrays.
Data Descriptors implement the following methods:
__iter__ - iterate over storage, getting results as Python objects
chunks - iterate over storage, getting results as DyND arrays
extend - insert new data into storage (if possible.)
Consumes a sequence of core Python objects
extend_chunks - insert new data into storage (if possible.)
Consumes a sequence of DyND arrays
as_dynd - load entire dataset into memory as a DyND array
"""
def extend(self, rows):
""" Extend data with many rows
"""
rows = iter(rows)
row = next(rows)
rows = chain([row], rows)
if not validate(self.schema, row):
raise ValueError('Invalid data:\n\t %s \nfor dshape \n\t%s' %
(str(row), self.schema))
if isinstance(row, dict):
rows = map(partial(coerce_to_ordered, self.schema), rows)
self._extend(rows)
def extend_chunks(self, chunks):
def dtype_of(chunk):
return str(len(chunk) * self.schema)
self._extend_chunks((nd.array(chunk, type=dtype_of(chunk))
for chunk in chunks))
def _extend_chunks(self, chunks):
self.extend((row for chunk in chunks
for row in nd.as_py(chunk, tuple=True)))
def chunks(self, **kwargs):
def dshape(chunk):
return str(len(chunk) * self.dshape.subshape[0])
for chunk in self._chunks(**kwargs):
yield nd.array(chunk, type=dshape(chunk))
def _chunks(self, blen=100):
return partition_all(blen, iter(self))
def as_dynd(self):
return self.dynd[:]
def as_py(self):
if isdimension(self.dshape[0]):
return tuple(self)
else:
return tuple(nd.as_py(self.as_dynd(), tuple=True))
def __array__(self):
return nd.as_numpy(self.as_dynd())
def __getitem__(self, key):
return self.get_py(key)
@property
def dynd(self):
return IndexCallable(self.get_dynd)
def get_py(self, key):
key = ordered_index(key, self.dshape)
subshape = self.dshape._subshape(key)
if hasattr(self, '_get_py'):
result = self._get_py(key)
elif hasattr(self, '_get_dynd'):
result = self._get_dynd(key)
else:
raise AttributeError("Data Descriptor defines neither "
"_get_py nor _get_dynd. Can not index")
return coerce(subshape, result)
def get_dynd(self, key):
key = ordered_index(key, self.dshape)
subshape = self.dshape._subshape(key)
if hasattr(self, '_get_dynd'):
result = self._get_dynd(key)
elif hasattr(self, '_get_py'):
result = nd.array(self._get_py(key), type=str(subshape))
else:
raise AttributeError("Data Descriptor defines neither "
"_get_py nor _get_dynd. Can not index")
# Currently nd.array(result, type=discover(result)) is oddly slower
# than just nd.array(result) , even though no type coercion should be
# necessary. As a short-term solution we check if this is the case and
# short-circuit the `type=` call
# This check can be deleted once these two run at similar speeds
ds_result = discover(result)
if (subshape == ds_result or
(isdimension(subshape[0]) and isdimension(ds_result[0]) and
subshape.subshape[0] == subshape.subshape[0])):
return nd.array(result)
else:
return nd.array(result, type=str(subshape))
def __iter__(self):
if not isdimension(self.dshape[0]):
raise TypeError("Data Descriptor not iterable, has dshape %s" %
self.dshape)
schema = self.dshape.subshape[0]
try:
seq = self._iter()
except NotImplementedError:
seq = iter(nd.as_py(self.as_dynd(), tuple=True))
if not isdimension(self.dshape[0]):
yield coerce(self.dshape, nd.as_py(self.as_dynd(), tuple=True))
else:
for block in partition_all(100, seq):
x = coerce(len(block) * schema, block)
for row in x:
yield row
def _iter(self):
raise NotImplementedError()
_dshape = None
@property
def dshape(self):
return datashape.dshape(self._dshape or datashape.Var() * self.schema)
_schema = None
@property
def schema(self):
if self._schema:
return datashape.dshape(self._schema)
if isdimension(self.dshape[0]):
return self.dshape.subarray(1)
raise TypeError('Datashape is not indexable to schema\n%s' %
self.dshape)
@property
def columns(self):
rec = self.schema[0]
if isinstance(rec, datashape.Record):
return rec.names
else:
raise TypeError('Columns attribute only valid on tabular '
'datashapes of records, got %s' % self.dshape)
@dispatch((Expr, UnaryOp), DataDescriptor)
def compute_up(t, ddesc, **kwargs):
return compute_up(t, iter(ddesc)) # use Python streaming by default
@dispatch(Projection, DataDescriptor)
def compute_up(t, ddesc, **kwargs):
return ddesc[:, t.fields]
@dispatch(Field, DataDescriptor)
def compute_up(t, ddesc, **kwargs):
return ddesc[:, t.fields[0]]
@dispatch(DataDescriptor)
def discover(dd):
return dd.dshape
|
vitan/blaze
|
blaze/data/core.py
|
Python
|
bsd-3-clause
| 6,508
|
from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class AuthTest(AcceptanceTestCase):
def enter_auth(self, username, password):
# disable captcha as it makes these tests flakey (and requires waiting
# on external resources)
with self.settings(RECAPTCHA_PUBLIC_KEY=None):
self.browser.get('/auth/login/')
self.browser.find_element_by_id('id_username').send_keys(username)
self.browser.find_element_by_id('id_password').send_keys(password)
self.browser.find_element_by_xpath("//button[contains(text(), 'Login')]").click()
def test_renders(self):
self.browser.get('/auth/login/')
self.browser.snapshot(name='login')
def test_no_credentials(self):
self.enter_auth('', '')
self.browser.snapshot(name='login fields required')
def test_invalid_credentials(self):
self.enter_auth('bad-username', 'bad-username')
self.browser.snapshot(name='login fields invalid')
def test_success(self):
email = 'dummy@example.com'
password = 'dummy'
user = self.create_user(email=email)
user.set_password(password)
user.save()
self.enter_auth(email, password)
self.browser.snapshot(name='login success')
|
mitsuhiko/sentry
|
tests/acceptance/test_auth.py
|
Python
|
bsd-3-clause
| 1,321
|
import os
from celery import Celery
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'songaday_searcher.settings')
app = Celery('songaday_searcher')
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
|
zaneswafford/songaday_searcher
|
songaday_searcher/celery.py
|
Python
|
bsd-3-clause
| 352
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_Anscombe/trend_Lag1Trend/cycle_0/ar_12/test_artificial_32_Anscombe_Lag1Trend_0_12_20.py
|
Python
|
bsd-3-clause
| 263
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import integrate
from .kern import Kern
from ...core.parameterization import Param
from ...util.linalg import tdot
from ... import util
from ...util.config import config # for assesing whether to use cython
from paramz.caching import Cache_this
from paramz.transformations import Logexp
try:
from . import stationary_cython
except ImportError:
print('warning in stationary: failed to import cython module: falling back to numpy')
config.set('cython', 'working', 'false')
class Stationary(Kern):
"""
Stationary kernels (covariance functions).
Stationary covariance fucntion depend only on r, where r is defined as
.. math::
r(x, x') = \\sqrt{ \\sum_{q=1}^Q (x_q - x'_q)^2 }
The covariance function k(x, x' can then be written k(r).
In this implementation, r is scaled by the lengthscales parameter(s):
.. math::
r(x, x') = \\sqrt{ \\sum_{q=1}^Q \\frac{(x_q - x'_q)^2}{\ell_q^2} }.
By default, there's only one lengthscale: seaprate lengthscales for each
dimension can be enables by setting ARD=True.
To implement a stationary covariance function using this class, one need
only define the covariance function k(r), and it derivative.
```
def K_of_r(self, r):
return foo
def dK_dr(self, r):
return bar
```
The lengthscale(s) and variance parameters are added to the structure automatically.
"""
def __init__(self, input_dim, variance, lengthscale, ARD, active_dims, name, useGPU=False):
super(Stationary, self).__init__(input_dim, active_dims, name,useGPU=useGPU)
self.ARD = ARD
if not ARD:
if lengthscale is None:
lengthscale = np.ones(1)
else:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size == 1, "Only 1 lengthscale needed for non-ARD kernel"
else:
if lengthscale is not None:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size in [1, input_dim], "Bad number of lengthscales"
if lengthscale.size != input_dim:
lengthscale = np.ones(input_dim)*lengthscale
else:
lengthscale = np.ones(self.input_dim)
self.lengthscale = Param('lengthscale', lengthscale, Logexp())
self.variance = Param('variance', variance, Logexp())
assert self.variance.size==1
self.link_parameters(self.variance, self.lengthscale)
def K_of_r(self, r):
raise NotImplementedError("implement the covariance function as a fn of r to use this class")
def dK_dr(self, r):
raise NotImplementedError("implement derivative of the covariance function wrt r to use this class")
@Cache_this(limit=3, ignore_args=())
def dK2_drdr(self, r):
raise NotImplementedError("implement second derivative of covariance wrt r to use this method")
@Cache_this(limit=3, ignore_args=())
def dK2_drdr_diag(self):
"Second order derivative of K in r_{i,i}. The diagonal entries are always zero, so we do not give it here."
raise NotImplementedError("implement second derivative of covariance wrt r_diag to use this method")
@Cache_this(limit=3, ignore_args=())
def K(self, X, X2=None):
"""
Kernel function applied on inputs X and X2.
In the stationary case there is an inner function depending on the
distances from X to X2, called r.
K(X, X2) = K_of_r((X-X2)**2)
"""
r = self._scaled_dist(X, X2)
return self.K_of_r(r)
@Cache_this(limit=3, ignore_args=())
def dK_dr_via_X(self, X, X2):
"""
compute the derivative of K wrt X going through X
"""
#a convenience function, so we can cache dK_dr
return self.dK_dr(self._scaled_dist(X, X2))
@Cache_this(limit=3, ignore_args=())
def dK2_drdr_via_X(self, X, X2):
#a convenience function, so we can cache dK_dr
return self.dK2_drdr(self._scaled_dist(X, X2))
def _unscaled_dist(self, X, X2=None):
"""
Compute the Euclidean distance between each row of X and X2, or between
each pair of rows of X if X2 is None.
"""
#X, = self._slice_X(X)
if X2 is None:
Xsq = np.sum(np.square(X),1)
r2 = -2.*tdot(X) + (Xsq[:,None] + Xsq[None,:])
util.diag.view(r2)[:,]= 0. # force diagnoal to be zero: sometime numerically a little negative
r2 = np.clip(r2, 0, np.inf)
return np.sqrt(r2)
else:
#X2, = self._slice_X(X2)
X1sq = np.sum(np.square(X),1)
X2sq = np.sum(np.square(X2),1)
r2 = -2.*np.dot(X, X2.T) + X1sq[:,None] + X2sq[None,:]
r2 = np.clip(r2, 0, np.inf)
return np.sqrt(r2)
@Cache_this(limit=3, ignore_args=())
def _scaled_dist(self, X, X2=None):
"""
Efficiently compute the scaled distance, r.
..math::
r = \sqrt( \sum_{q=1}^Q (x_q - x'q)^2/l_q^2 )
Note that if thre is only one lengthscale, l comes outside the sum. In
this case we compute the unscaled distance first (in a separate
function for caching) and divide by lengthscale afterwards
"""
if self.ARD:
if X2 is not None:
X2 = X2 / self.lengthscale
return self._unscaled_dist(X/self.lengthscale, X2)
else:
return self._unscaled_dist(X, X2)/self.lengthscale
def Kdiag(self, X):
ret = np.empty(X.shape[0])
ret[:] = self.variance
return ret
def update_gradients_diag(self, dL_dKdiag, X):
"""
Given the derivative of the objective with respect to the diagonal of
the covariance matrix, compute the derivative wrt the parameters of
this kernel and stor in the <parameter>.gradient field.
See also update_gradients_full
"""
self.variance.gradient = np.sum(dL_dKdiag)
self.lengthscale.gradient = 0.
def update_gradients_full(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective wrt the covariance matrix
(dL_dK), compute the gradient wrt the parameters of this kernel,
and store in the parameters object as e.g. self.variance.gradient
"""
self.variance.gradient = np.sum(self.K(X, X2)* dL_dK)/self.variance
#now the lengthscale gradient(s)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
if self.ARD:
tmp = dL_dr*self._inv_dist(X, X2)
if X2 is None: X2 = X
if config.getboolean('cython', 'working'):
self.lengthscale.gradient = self._lengthscale_grads_cython(tmp, X, X2)
else:
self.lengthscale.gradient = self._lengthscale_grads_pure(tmp, X, X2)
else:
r = self._scaled_dist(X, X2)
self.lengthscale.gradient = -np.sum(dL_dr*r)/self.lengthscale
def _inv_dist(self, X, X2=None):
"""
Compute the elementwise inverse of the distance matrix, expecpt on the
diagonal, where we return zero (the distance on the diagonal is zero).
This term appears in derviatives.
"""
dist = self._scaled_dist(X, X2).copy()
return 1./np.where(dist != 0., dist, np.inf)
def _lengthscale_grads_pure(self, tmp, X, X2):
return -np.array([np.sum(tmp * np.square(X[:,q:q+1] - X2[:,q:q+1].T)) for q in range(self.input_dim)])/self.lengthscale**3
def _lengthscale_grads_cython(self, tmp, X, X2):
N,M = tmp.shape
Q = self.input_dim
X, X2 = np.ascontiguousarray(X), np.ascontiguousarray(X2)
grads = np.zeros(self.input_dim)
stationary_cython.lengthscale_grads(N, M, Q, tmp, X, X2, grads)
return -grads/self.lengthscale**3
def gradients_X(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective wrt K (dL_dK), compute the derivative wrt X
"""
if config.getboolean('cython', 'working'):
return self._gradients_X_cython(dL_dK, X, X2)
else:
return self._gradients_X_pure(dL_dK, X, X2)
def gradients_XX(self, dL_dK, X, X2=None):
"""
Given the derivative of the objective K(dL_dK), compute the second derivative of K wrt X and X2:
returns the full covariance matrix [QxQ] of the input dimensionfor each pair or vectors, thus
the returned array is of shape [NxNxQxQ].
..math:
\frac{\partial^2 K}{\partial X2 ^2} = - \frac{\partial^2 K}{\partial X\partial X2}
..returns:
dL2_dXdX2: [NxMxQxQ] in the cov=True case, or [NxMxQ] in the cov=False case,
for X [NxQ] and X2[MxQ] (X2 is X if, X2 is None)
Thus, we return the second derivative in X2.
"""
# According to multivariable chain rule, we can chain the second derivative through r:
# d2K_dXdX2 = dK_dr*d2r_dXdX2 + d2K_drdr * dr_dX * dr_dX2:
invdist = self._inv_dist(X, X2)
invdist2 = invdist**2
dL_dr = self.dK_dr_via_X(X, X2) #* dL_dK # we perform this product later
tmp1 = dL_dr * invdist
dL_drdr = self.dK2_drdr_via_X(X, X2) #* dL_dK # we perofrm this product later
tmp2 = dL_drdr*invdist2
l2 = np.ones(X.shape[1])*self.lengthscale**2 #np.multiply(np.ones(X.shape[1]) ,self.lengthscale**2)
if X2 is None:
X2 = X
tmp1 -= np.eye(X.shape[0])*self.variance
else:
tmp1[invdist2==0.] -= self.variance
#grad = np.empty((X.shape[0], X2.shape[0], X2.shape[1], X.shape[1]), dtype=np.float64)
dist = X[:,None,:] - X2[None,:,:]
dist = (dist[:,:,:,None]*dist[:,:,None,:])
I = np.ones((X.shape[0], X2.shape[0], X2.shape[1], X.shape[1]))*np.eye((X2.shape[1]))
grad = (((dL_dK*(tmp1*invdist2 - tmp2))[:,:,None,None] * dist)/l2[None,None,:,None]
- (dL_dK*tmp1)[:,:,None,None] * I)/l2[None,None,None,:]
return grad
def gradients_XX_diag(self, dL_dK_diag, X):
"""
Given the derivative of the objective dL_dK, compute the second derivative of K wrt X:
..math:
\frac{\partial^2 K}{\partial X\partial X}
..returns:
dL2_dXdX: [NxQxQ]
"""
dL_dK_diag = dL_dK_diag.copy().reshape(-1, 1, 1)
assert (dL_dK_diag.size == X.shape[0]) or (dL_dK_diag.size == 1), "dL_dK_diag has to be given as row [N] or column vector [Nx1]"
l4 = np.ones(X.shape[1])*self.lengthscale**2
return dL_dK_diag * (np.eye(X.shape[1]) * -self.dK2_drdr_diag()/(l4))[None, :,:]# np.zeros(X.shape+(X.shape[1],))
#return np.ones(X.shape) * d2L_dK * self.variance/self.lengthscale**2 # np.zeros(X.shape)
def _gradients_X_pure(self, dL_dK, X, X2=None):
invdist = self._inv_dist(X, X2)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
tmp = invdist*dL_dr
if X2 is None:
tmp = tmp + tmp.T
X2 = X
#The high-memory numpy way:
#d = X[:, None, :] - X2[None, :, :]
#grad = np.sum(tmp[:,:,None]*d,1)/self.lengthscale**2
#the lower memory way with a loop
grad = np.empty(X.shape, dtype=np.float64)
for q in range(self.input_dim):
np.sum(tmp*(X[:,q][:,None]-X2[:,q][None,:]), axis=1, out=grad[:,q])
return grad/self.lengthscale**2
def _gradients_X_cython(self, dL_dK, X, X2=None):
invdist = self._inv_dist(X, X2)
dL_dr = self.dK_dr_via_X(X, X2) * dL_dK
tmp = invdist*dL_dr
if X2 is None:
tmp = tmp + tmp.T
X2 = X
X, X2 = np.ascontiguousarray(X), np.ascontiguousarray(X2)
grad = np.zeros(X.shape)
stationary_cython.grad_X(X.shape[0], X.shape[1], X2.shape[0], X, X2, tmp, grad)
return grad/self.lengthscale**2
def gradients_X_diag(self, dL_dKdiag, X):
return np.zeros(X.shape)
def input_sensitivity(self, summarize=True):
return self.variance*np.ones(self.input_dim)/self.lengthscale**2
class Exponential(Stationary):
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Exponential'):
super(Exponential, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, r):
return self.variance * np.exp(-r)
def dK_dr(self, r):
return -self.K_of_r(r)
# def sde(self):
# """
# Return the state space representation of the covariance.
# """
# F = np.array([[-1/self.lengthscale]])
# L = np.array([[1]])
# Qc = np.array([[2*self.variance/self.lengthscale]])
# H = np.array([[1]])
# Pinf = np.array([[self.variance]])
# # TODO: return the derivatives as well
#
# return (F, L, Qc, H, Pinf)
class OU(Stationary):
"""
OU kernel:
.. math::
k(r) = \\sigma^2 \exp(- r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^{\text{input_dim}} \\frac{(x_i-y_i)^2}{\ell_i^2} }
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='OU'):
super(OU, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, r):
return self.variance * np.exp(-r)
def dK_dr(self,r):
return -1.*self.variance*np.exp(-r)
class Matern32(Stationary):
"""
Matern 3/2 kernel:
.. math::
k(r) = \\sigma^2 (1 + \\sqrt{3} r) \exp(- \sqrt{3} r) \\ \\ \\ \\ \\text{ where } r = \sqrt{\sum_{i=1}^{\\text{input_dim}} \\frac{(x_i-y_i)^2}{\ell_i^2} }
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Mat32'):
super(Matern32, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, r):
return self.variance * (1. + np.sqrt(3.) * r) * np.exp(-np.sqrt(3.) * r)
def dK_dr(self,r):
return -3.*self.variance*r*np.exp(-np.sqrt(3.)*r)
def Gram_matrix(self, F, F1, F2, lower, upper):
"""
Return the Gram matrix of the vector of functions F with respect to the
RKHS norm. The use of this function is limited to input_dim=1.
:param F: vector of functions
:type F: np.array
:param F1: vector of derivatives of F
:type F1: np.array
:param F2: vector of second derivatives of F
:type F2: np.array
:param lower,upper: boundaries of the input domain
:type lower,upper: floats
"""
assert self.input_dim == 1
def L(x, i):
return(3. / self.lengthscale ** 2 * F[i](x) + 2 * np.sqrt(3) / self.lengthscale * F1[i](x) + F2[i](x))
n = F.shape[0]
G = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
G[i, j] = G[j, i] = integrate.quad(lambda x : L(x, i) * L(x, j), lower, upper)[0]
Flower = np.array([f(lower) for f in F])[:, None]
F1lower = np.array([f(lower) for f in F1])[:, None]
return(self.lengthscale ** 3 / (12.*np.sqrt(3) * self.variance) * G + 1. / self.variance * np.dot(Flower, Flower.T) + self.lengthscale ** 2 / (3.*self.variance) * np.dot(F1lower, F1lower.T))
def sde(self):
"""
Return the state space representation of the covariance.
"""
variance = float(self.variance.values)
lengthscale = float(self.lengthscale.values)
foo = np.sqrt(3.)/lengthscale
F = np.array([[0, 1], [-foo**2, -2*foo]])
L = np.array([[0], [1]])
Qc = np.array([[12.*np.sqrt(3) / lengthscale**3 * variance]])
H = np.array([[1, 0]])
Pinf = np.array([[variance, 0],
[0, 3.*variance/(lengthscale**2)]])
# Allocate space for the derivatives
dF = np.empty([F.shape[0],F.shape[1],2])
dQc = np.empty([Qc.shape[0],Qc.shape[1],2])
dPinf = np.empty([Pinf.shape[0],Pinf.shape[1],2])
# The partial derivatives
dFvariance = np.zeros([2,2])
dFlengthscale = np.array([[0,0],
[6./lengthscale**3,2*np.sqrt(3)/lengthscale**2]])
dQcvariance = np.array([12.*np.sqrt(3)/lengthscale**3])
dQclengthscale = np.array([-3*12*np.sqrt(3)/lengthscale**4*variance])
dPinfvariance = np.array([[1,0],[0,3./lengthscale**2]])
dPinflengthscale = np.array([[0,0],
[0,-6*variance/lengthscale**3]])
# Combine the derivatives
dF[:,:,0] = dFvariance
dF[:,:,1] = dFlengthscale
dQc[:,:,0] = dQcvariance
dQc[:,:,1] = dQclengthscale
dPinf[:,:,0] = dPinfvariance
dPinf[:,:,1] = dPinflengthscale
return (F, L, Qc, H, Pinf, dF, dQc, dPinf)
class Matern52(Stationary):
"""
Matern 5/2 kernel:
.. math::
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r)
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Mat52'):
super(Matern52, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, r):
return self.variance*(1+np.sqrt(5.)*r+5./3*r**2)*np.exp(-np.sqrt(5.)*r)
def dK_dr(self, r):
return self.variance*(10./3*r -5.*r -5.*np.sqrt(5.)/3*r**2)*np.exp(-np.sqrt(5.)*r)
def Gram_matrix(self, F, F1, F2, F3, lower, upper):
"""
Return the Gram matrix of the vector of functions F with respect to the RKHS norm. The use of this function is limited to input_dim=1.
:param F: vector of functions
:type F: np.array
:param F1: vector of derivatives of F
:type F1: np.array
:param F2: vector of second derivatives of F
:type F2: np.array
:param F3: vector of third derivatives of F
:type F3: np.array
:param lower,upper: boundaries of the input domain
:type lower,upper: floats
"""
assert self.input_dim == 1
def L(x,i):
return(5*np.sqrt(5)/self.lengthscale**3*F[i](x) + 15./self.lengthscale**2*F1[i](x)+ 3*np.sqrt(5)/self.lengthscale*F2[i](x) + F3[i](x))
n = F.shape[0]
G = np.zeros((n,n))
for i in range(n):
for j in range(i,n):
G[i,j] = G[j,i] = integrate.quad(lambda x : L(x,i)*L(x,j),lower,upper)[0]
G_coef = 3.*self.lengthscale**5/(400*np.sqrt(5))
Flower = np.array([f(lower) for f in F])[:,None]
F1lower = np.array([f(lower) for f in F1])[:,None]
F2lower = np.array([f(lower) for f in F2])[:,None]
orig = 9./8*np.dot(Flower,Flower.T) + 9.*self.lengthscale**4/200*np.dot(F2lower,F2lower.T)
orig2 = 3./5*self.lengthscale**2 * ( np.dot(F1lower,F1lower.T) + 1./8*np.dot(Flower,F2lower.T) + 1./8*np.dot(F2lower,Flower.T))
return(1./self.variance* (G_coef*G + orig + orig2))
class ExpQuad(Stationary):
"""
The Exponentiated quadratic covariance function.
.. math::
k(r) = \sigma^2 (1 + \sqrt{5} r + \\frac53 r^2) \exp(- \sqrt{5} r)
notes::
- Yes, this is exactly the same as the RBF covariance function, but the
RBF implementation also has some features for doing variational kernels
(the psi-statistics).
"""
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='ExpQuad'):
super(ExpQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, r):
return self.variance * np.exp(-0.5 * r**2)
def dK_dr(self, r):
return -r*self.K_of_r(r)
class Cosine(Stationary):
def __init__(self, input_dim, variance=1., lengthscale=None, ARD=False, active_dims=None, name='Cosine'):
super(Cosine, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
def K_of_r(self, r):
return self.variance * np.cos(r)
def dK_dr(self, r):
return -self.variance * np.sin(r)
class RatQuad(Stationary):
"""
Rational Quadratic Kernel
.. math::
k(r) = \sigma^2 \\bigg( 1 + \\frac{r^2}{2} \\bigg)^{- \\alpha}
"""
def __init__(self, input_dim, variance=1., lengthscale=None, power=2., ARD=False, active_dims=None, name='RatQuad'):
super(RatQuad, self).__init__(input_dim, variance, lengthscale, ARD, active_dims, name)
self.power = Param('power', power, Logexp())
self.link_parameters(self.power)
def K_of_r(self, r):
r2 = np.square(r)
# return self.variance*np.power(1. + r2/2., -self.power)
return self.variance*np.exp(-self.power*np.log1p(r2/2.))
def dK_dr(self, r):
r2 = np.square(r)
# return -self.variance*self.power*r*np.power(1. + r2/2., - self.power - 1.)
return-self.variance*self.power*r*np.exp(-(self.power+1)*np.log1p(r2/2.))
def update_gradients_full(self, dL_dK, X, X2=None):
super(RatQuad, self).update_gradients_full(dL_dK, X, X2)
r = self._scaled_dist(X, X2)
r2 = np.square(r)
# dK_dpow = -self.variance * np.power(2., self.power) * np.power(r2 + 2., -self.power) * np.log(0.5*(r2+2.))
dK_dpow = -self.variance * np.exp(self.power*(np.log(2.)-np.log1p(r2+1)))*np.log1p(r2/2.)
grad = np.sum(dL_dK*dK_dpow)
self.power.gradient = grad
def update_gradients_diag(self, dL_dKdiag, X):
super(RatQuad, self).update_gradients_diag(dL_dKdiag, X)
self.power.gradient = 0.
|
avehtari/GPy
|
GPy/kern/src/stationary.py
|
Python
|
bsd-3-clause
| 21,805
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferScene
Gaffer.Metadata.registerNode(
GafferScene.SceneSwitch,
"description",
"""
Chooses between multiple input scene, passing through the
chosen input to the output.
""",
plugs = {
"index" : [
"description",
"""
The index of the input which is passed through. A value
of 0 chooses the first input, 1 the second and so on. Values
larger than the number of available inputs wrap back around to
the beginning.
"""
]
}
)
GafferUI.PlugValueWidget.registerCreator( GafferScene.SceneSwitch, "in[0-9]*", None )
|
goddardl/gaffer
|
python/GafferSceneUI/SceneSwitchUI.py
|
Python
|
bsd-3-clause
| 2,393
|
from . import *
class TestTemplateUse(TestCase):
def test_resized_img_src(self):
@self.app.route('/resized_img_src')
def use():
return render_template_string('''
<img src="{{ resized_img_src('cc.png') }}" />
'''.strip())
res = self.client.get('/resized_img_src')
self.assert200(res)
self.assertIn('src="/imgsizer/cc.png?', res.data)
def test_url_for(self):
@self.app.route('/url_for')
def use():
return render_template_string('''
<img src="{{ url_for('images', filename='cc.png') }}" />
'''.strip())
res = self.client.get('/url_for')
self.assert200(res)
self.assertIn('src="/imgsizer/cc.png?', res.data)
|
knadir/Flask-Images
|
tests/test_template_use.py
|
Python
|
bsd-3-clause
| 780
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from cStringIO import StringIO
from datetime import datetime
import errno
import hashlib
import os.path
import posixpath
import re
import shutil
import sys
import unicodedata
from genshi.builder import tag
from trac.admin import AdminCommandError, IAdminCommandProvider, PrefixList, \
console_datetime_format, get_dir_list
from trac.config import BoolOption, IntOption
from trac.core import *
from trac.mimeview import *
from trac.perm import PermissionError, IPermissionPolicy
from trac.resource import *
from trac.search import search_to_sql, shorten_result
from trac.util import content_disposition, create_zipinfo, get_reporter_id
from trac.util.datefmt import datetime_now, format_datetime, from_utimestamp, \
to_datetime, to_utimestamp, utc
from trac.util.text import exception_to_unicode, path_to_unicode, \
pretty_size, print_table, stripws, unicode_unquote
from trac.util.translation import _, tag_
from trac.web import HTTPBadRequest, IRequestHandler, RequestDone
from trac.web.chrome import (INavigationContributor, add_ctxtnav, add_link,
add_stylesheet, web_context, add_warning)
from trac.web.href import Href
from trac.wiki.api import IWikiSyntaxProvider
from trac.wiki.formatter import format_to
class InvalidAttachment(TracError):
"""Exception raised when attachment validation fails."""
class IAttachmentChangeListener(Interface):
"""Extension point interface for components that require
notification when attachments are created or deleted."""
def attachment_added(attachment):
"""Called when an attachment is added."""
def attachment_deleted(attachment):
"""Called when an attachment is deleted."""
def attachment_reparented(attachment, old_parent_realm, old_parent_id):
"""Called when an attachment is reparented."""
class IAttachmentManipulator(Interface):
"""Extension point interface for components that need to
manipulate attachments.
Unlike change listeners, a manipulator can reject changes being
committed to the database."""
def prepare_attachment(req, attachment, fields):
"""Not currently called, but should be provided for future
compatibility."""
def validate_attachment(req, attachment):
"""Validate an attachment after upload but before being stored
in Trac environment.
Must return a list of ``(field, message)`` tuples, one for
each problem detected. ``field`` can be any of
``description``, ``username``, ``filename``, ``content``, or
`None` to indicate an overall problem with the
attachment. Therefore, a return value of ``[]`` means
everything is OK."""
class ILegacyAttachmentPolicyDelegate(Interface):
"""Interface that can be used by plugins to seamlessly participate
to the legacy way of checking for attachment permissions.
This should no longer be necessary once it becomes easier to
setup fine-grained permissions in the default permission store.
"""
def check_attachment_permission(action, username, resource, perm):
"""Return the usual `True`/`False`/`None` security policy
decision appropriate for the requested action on an
attachment.
:param action: one of ATTACHMENT_VIEW, ATTACHMENT_CREATE,
ATTACHMENT_DELETE
:param username: the user string
:param resource: the `~trac.resource.Resource` for the
attachment. Note that when
ATTACHMENT_CREATE is checked, the
resource ``.id`` will be `None`.
:param perm: the permission cache for that username and resource
"""
class AttachmentModule(Component):
implements(IRequestHandler, INavigationContributor, IWikiSyntaxProvider,
IResourceManager)
realm = 'attachment'
is_valid_default_handler = False
change_listeners = ExtensionPoint(IAttachmentChangeListener)
manipulators = ExtensionPoint(IAttachmentManipulator)
CHUNK_SIZE = 4096
max_size = IntOption('attachment', 'max_size', 262144,
"""Maximum allowed file size (in bytes) for attachments.""")
max_zip_size = IntOption('attachment', 'max_zip_size', 2097152,
"""Maximum allowed total size (in bytes) for an attachment list to be
downloadable as a `.zip`. Set this to -1 to disable download as `.zip`.
(''since 1.0'')""")
render_unsafe_content = BoolOption('attachment', 'render_unsafe_content',
'false',
"""Whether attachments should be rendered in the browser, or
only made downloadable.
Pretty much any file may be interpreted as HTML by the browser,
which allows a malicious user to attach a file containing cross-site
scripting attacks.
For public sites where anonymous users can create attachments it is
recommended to leave this option disabled.""")
# INavigationContributor methods
def get_active_navigation_item(self, req):
return req.args.get('realm')
def get_navigation_items(self, req):
return []
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/(raw-|zip-)?attachment/([^/]+)(?:/(.*))?$',
req.path_info)
if match:
format, realm, path = match.groups()
if format:
req.args['format'] = format[:-1]
req.args['realm'] = realm
if path:
req.args['path'] = path
return True
def process_request(self, req):
parent_id = None
parent_realm = req.args.get('realm')
path = req.args.get('path')
filename = None
if not parent_realm or not path:
raise HTTPBadRequest(_('Bad request'))
if parent_realm == 'attachment':
raise TracError(tag_("%(realm)s is not a valid parent realm",
realm=tag.code(parent_realm)))
parent_realm = Resource(parent_realm)
action = req.args.get('action', 'view')
if action == 'new':
parent_id = path.rstrip('/')
else:
last_slash = path.rfind('/')
if last_slash == -1:
parent_id, filename = path, ''
else:
parent_id, filename = path[:last_slash], path[last_slash + 1:]
parent = parent_realm(id=parent_id)
if not resource_exists(self.env, parent):
raise ResourceNotFound(
_("Parent resource %(parent)s doesn't exist",
parent=get_resource_name(self.env, parent)))
# Link the attachment page to parent resource
parent_name = get_resource_name(self.env, parent)
parent_url = get_resource_url(self.env, parent, req.href)
add_link(req, 'up', parent_url, parent_name)
add_ctxtnav(req, _('Back to %(parent)s', parent=parent_name),
parent_url)
if not filename: # there's a trailing '/'
if req.args.get('format') == 'zip':
self._download_as_zip(req, parent)
elif action != 'new':
return self._render_list(req, parent)
attachment = Attachment(self.env, parent.child(self.realm, filename))
if req.method == 'POST':
if action == 'new':
data = self._do_save(req, attachment)
elif action == 'delete':
self._do_delete(req, attachment)
else:
raise HTTPBadRequest(_("Invalid request arguments."))
elif action == 'delete':
data = self._render_confirm_delete(req, attachment)
elif action == 'new':
data = self._render_form(req, attachment)
else:
data = self._render_view(req, attachment)
add_stylesheet(req, 'common/css/code.css')
return 'attachment.html', data, None
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('raw-attachment', self._format_link)
yield ('attachment', self._format_link)
# Public methods
def viewable_attachments(self, context):
"""Return the list of viewable attachments in the given context.
:param context: the `~trac.mimeview.api.RenderingContext`
corresponding to the parent
`~trac.resource.Resource` for the attachments
"""
parent = context.resource
attachments = []
for attachment in Attachment.select(self.env, parent.realm, parent.id):
if 'ATTACHMENT_VIEW' in context.perm(attachment.resource):
attachments.append(attachment)
return attachments
def attachment_data(self, context):
"""Return a data dictionary describing the list of viewable
attachments in the current context.
"""
attachments = self.viewable_attachments(context)
parent = context.resource
total_size = sum(attachment.size for attachment in attachments)
new_att = parent.child(self.realm)
return {'attach_href': get_resource_url(self.env, new_att,
context.href),
'download_href': get_resource_url(self.env, new_att,
context.href, format='zip')
if total_size <= self.max_zip_size else None,
'can_create': 'ATTACHMENT_CREATE' in context.perm(new_att),
'attachments': attachments,
'parent': context.resource}
def get_history(self, start, stop, realm):
"""Return an iterable of tuples describing changes to attachments on
a particular object realm.
The tuples are in the form (change, realm, id, filename, time,
description, author). `change` can currently only be `created`.
FIXME: no iterator
"""
for realm, id, filename, ts, description, author in \
self.env.db_query("""
SELECT type, id, filename, time, description, author
FROM attachment WHERE time > %s AND time < %s AND type = %s
""", (to_utimestamp(start), to_utimestamp(stop), realm)):
time = from_utimestamp(ts or 0)
yield ('created', realm, id, filename, time, description, author)
def get_timeline_events(self, req, resource_realm, start, stop):
"""Return an event generator suitable for ITimelineEventProvider.
Events are changes to attachments on resources of the given
`resource_realm.realm`.
"""
for change, realm, id, filename, time, descr, author in \
self.get_history(start, stop, resource_realm.realm):
attachment = resource_realm(id=id).child(self.realm, filename)
if 'ATTACHMENT_VIEW' in req.perm(attachment):
yield ('attachment', time, author, (attachment, descr), self)
def render_timeline_event(self, context, field, event):
attachment, descr = event[3]
if field == 'url':
return self.get_resource_url(attachment, context.href)
elif field == 'title':
name = get_resource_name(self.env, attachment.parent)
title = get_resource_summary(self.env, attachment.parent)
return tag_("%(attachment)s attached to %(resource)s",
attachment=tag.em(os.path.basename(attachment.id)),
resource=tag.em(name, title=title))
elif field == 'description':
return format_to(self.env, None, context.child(attachment.parent),
descr)
def get_search_results(self, req, resource_realm, terms):
"""Return a search result generator suitable for ISearchSource.
Search results are attachments on resources of the given
`resource_realm.realm` whose filename, description or author match
the given terms.
"""
with self.env.db_query as db:
sql_query, args = search_to_sql(
db, ['filename', 'description', 'author'], terms)
for id, time, filename, desc, author in db("""
SELECT id, time, filename, description, author
FROM attachment WHERE type = %s AND """ + sql_query,
(resource_realm.realm,) + args):
attachment = resource_realm(id=id).child(self.realm, filename)
if 'ATTACHMENT_VIEW' in req.perm(attachment):
yield (get_resource_url(self.env, attachment, req.href),
get_resource_shortname(self.env, attachment),
from_utimestamp(time), author,
shorten_result(desc, terms))
# IResourceManager methods
def get_resource_realms(self):
yield self.realm
def get_resource_url(self, resource, href, **kwargs):
"""Return an URL to the attachment itself.
A `format` keyword argument equal to `'raw'` will be converted
to the raw-attachment prefix.
"""
if not resource.parent:
return None
format = kwargs.get('format')
prefix = 'attachment'
if format in ('raw', 'zip'):
kwargs.pop('format')
prefix = format + '-attachment'
parent_href = unicode_unquote(get_resource_url(self.env,
resource.parent(version=None), Href('')))
if not resource.id:
# link to list of attachments, which must end with a trailing '/'
# (see process_request)
return href(prefix, parent_href, '', **kwargs)
else:
return href(prefix, parent_href, resource.id, **kwargs)
def get_resource_description(self, resource, format=None, **kwargs):
if not resource.parent:
return _("Unparented attachment %(id)s", id=resource.id)
if format == 'compact':
return '%s (%s)' % (resource.id,
get_resource_name(self.env, resource.parent))
elif format == 'summary':
return Attachment(self.env, resource).description
if resource.id:
return _("Attachment '%(id)s' in %(parent)s", id=resource.id,
parent=get_resource_name(self.env, resource.parent))
else:
return _("Attachments of %(parent)s",
parent=get_resource_name(self.env, resource.parent))
def resource_exists(self, resource):
try:
attachment = Attachment(self.env, resource)
return os.path.exists(attachment.path)
except ResourceNotFound:
return False
# Internal methods
def _do_save(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_CREATE')
parent_resource = attachment.resource.parent
if 'cancel' in req.args:
req.redirect(get_resource_url(self.env, parent_resource, req.href))
upload = req.args.getfirst('attachment')
if not hasattr(upload, 'filename') or not upload.filename:
raise TracError(_("No file uploaded"))
if hasattr(upload.file, 'fileno'):
size = os.fstat(upload.file.fileno())[6]
else:
upload.file.seek(0, 2) # seek to end of file
size = upload.file.tell()
upload.file.seek(0)
if size == 0:
raise TracError(_("Can't upload empty file"))
# Maximum attachment size (in bytes)
max_size = self.max_size
if 0 <= max_size < size:
raise TracError(_("Maximum attachment size: %(num)s",
num=pretty_size(max_size)), _("Upload failed"))
filename = _normalized_filename(upload.filename)
if not filename:
raise TracError(_("No file uploaded"))
# Now the filename is known, update the attachment resource
attachment.filename = filename
attachment.description = req.args.get('description', '')
attachment.author = get_reporter_id(req, 'author')
attachment.ipnr = req.remote_addr
# Validate attachment
valid = True
for manipulator in self.manipulators:
for field, message in manipulator.validate_attachment(req,
attachment):
valid = False
if field:
add_warning(req,
_('Attachment field %(field)s is invalid: %(message)s',
field=field, message=message))
else:
add_warning(req,
_('Invalid attachment: %(message)s', message=message))
if not valid:
# Display the attach form with pre-existing data
# NOTE: Local file path not known, file field cannot be repopulated
add_warning(req, _('Note: File must be selected again.'))
data = self._render_form(req, attachment)
data['is_replace'] = req.args.get('replace')
return data
if req.args.get('replace'):
try:
old_attachment = Attachment(self.env,
attachment.resource(id=filename))
if not (req.authname and req.authname != 'anonymous'
and old_attachment.author == req.authname) \
and 'ATTACHMENT_DELETE' \
not in req.perm(attachment.resource):
raise PermissionError(msg=_("You don't have permission to "
"replace the attachment %(name)s. You can only "
"replace your own attachments. Replacing other's "
"attachments requires ATTACHMENT_DELETE permission.",
name=filename))
if (not attachment.description.strip() and
old_attachment.description):
attachment.description = old_attachment.description
old_attachment.delete()
except TracError:
pass # don't worry if there's nothing to replace
attachment.insert(filename, upload.file, size)
req.redirect(get_resource_url(self.env, attachment.resource(id=None),
req.href))
def _do_delete(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_DELETE')
parent_href = get_resource_url(self.env, attachment.resource.parent,
req.href)
if 'cancel' in req.args:
req.redirect(parent_href)
attachment.delete()
req.redirect(parent_href)
def _render_confirm_delete(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_DELETE')
return {'mode': 'delete',
'title': _('%(attachment)s (delete)',
attachment=get_resource_name(self.env,
attachment.resource)),
'attachment': attachment}
def _render_form(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_CREATE')
return {'mode': 'new', 'author': get_reporter_id(req),
'attachment': attachment, 'max_size': self.max_size}
def _download_as_zip(self, req, parent, attachments=None):
if attachments is None:
attachments = self.viewable_attachments(web_context(req, parent))
total_size = sum(attachment.size for attachment in attachments)
if total_size > self.max_zip_size:
raise TracError(_("Maximum total attachment size: %(num)s",
num=pretty_size(self.max_zip_size)), _("Download failed"))
req.send_response(200)
req.send_header('Content-Type', 'application/zip')
filename = 'attachments-%s-%s.zip' % \
(parent.realm, re.sub(r'[/\\:]', '-', unicode(parent.id)))
req.send_header('Content-Disposition',
content_disposition('inline', filename))
from zipfile import ZipFile, ZIP_DEFLATED
buf = StringIO()
zipfile = ZipFile(buf, 'w', ZIP_DEFLATED)
for attachment in attachments:
zipinfo = create_zipinfo(attachment.filename,
mtime=attachment.date,
comment=attachment.description)
try:
with attachment.open() as fd:
zipfile.writestr(zipinfo, fd.read())
except ResourceNotFound:
pass # skip missing files
zipfile.close()
zip_str = buf.getvalue()
req.send_header("Content-Length", len(zip_str))
req.end_headers()
req.write(zip_str)
raise RequestDone()
def _render_list(self, req, parent):
data = {
'mode': 'list',
'attachment': None, # no specific attachment
'attachments': self.attachment_data(web_context(req, parent))
}
return 'attachment.html', data, None
def _render_view(self, req, attachment):
req.perm(attachment.resource).require('ATTACHMENT_VIEW')
can_delete = 'ATTACHMENT_DELETE' in req.perm(attachment.resource)
req.check_modified(attachment.date, str(can_delete))
data = {'mode': 'view',
'title': get_resource_name(self.env, attachment.resource),
'attachment': attachment}
with attachment.open() as fd:
mimeview = Mimeview(self.env)
# MIME type detection
str_data = fd.read(1000)
fd.seek(0)
mime_type = mimeview.get_mimetype(attachment.filename, str_data)
# Eventually send the file directly
format = req.args.get('format')
if format == 'zip':
self._download_as_zip(req, attachment.resource.parent,
[attachment])
elif format in ('raw', 'txt'):
if not self.render_unsafe_content:
# Force browser to download files instead of rendering
# them, since they might contain malicious code enabling
# XSS attacks
req.send_header('Content-Disposition', 'attachment')
if format == 'txt':
mime_type = 'text/plain'
elif not mime_type:
mime_type = 'application/octet-stream'
if 'charset=' not in mime_type:
charset = mimeview.get_charset(str_data, mime_type)
mime_type = mime_type + '; charset=' + charset
req.send_file(attachment.path, mime_type)
# add ''Plain Text'' alternate link if needed
if (self.render_unsafe_content and
mime_type and not mime_type.startswith('text/plain')):
plaintext_href = get_resource_url(self.env,
attachment.resource,
req.href, format='txt')
add_link(req, 'alternate', plaintext_href, _('Plain Text'),
mime_type)
# add ''Original Format'' alternate link (always)
raw_href = get_resource_url(self.env, attachment.resource,
req.href, format='raw')
add_link(req, 'alternate', raw_href, _('Original Format'),
mime_type)
self.log.debug("Rendering preview of file %s with mime-type %s",
attachment.filename, mime_type)
data['preview'] = mimeview.preview_data(
web_context(req, attachment.resource), fd,
os.fstat(fd.fileno()).st_size, mime_type,
attachment.filename, raw_href, annotations=['lineno'])
return data
def _format_link(self, formatter, ns, target, label):
link, params, fragment = formatter.split_link(target)
ids = link.split(':', 2)
attachment = None
if len(ids) == 3:
known_realms = ResourceSystem(self.env).get_known_realms()
# new-style attachment: TracLinks (filename:realm:id)
if ids[1] in known_realms:
attachment = Resource(ids[1], ids[2]).child(self.realm,
ids[0])
else: # try old-style attachment: TracLinks (realm:id:filename)
if ids[0] in known_realms:
attachment = Resource(ids[0], ids[1]).child(self.realm,
ids[2])
else: # local attachment: TracLinks (filename)
attachment = formatter.resource.child(self.realm, link)
if attachment and 'ATTACHMENT_VIEW' in formatter.perm(attachment):
try:
model = Attachment(self.env, attachment)
raw_href = get_resource_url(self.env, attachment,
formatter.href, format='raw')
if ns.startswith('raw'):
return tag.a(label, class_='attachment',
href=raw_href + params,
title=get_resource_name(self.env, attachment))
href = get_resource_url(self.env, attachment, formatter.href)
title = get_resource_name(self.env, attachment)
return tag(tag.a(label, class_='attachment', title=title,
href=href + params),
tag.a(u'\u200b', class_='trac-rawlink',
href=raw_href + params, title=_("Download")))
except ResourceNotFound:
pass
# FIXME: should be either:
#
# model = Attachment(self.env, attachment)
# if model.exists:
# ...
#
# or directly:
#
# if attachment.exists:
#
# (related to #4130)
return tag.a(label, class_='missing attachment')
class Attachment(object):
"""Represents an attachment (new or existing).
:since 1.0.5: `ipnr` is deprecated and will be removed in 1.3.1
"""
realm = AttachmentModule.realm
@property
def resource(self):
return Resource(self.parent_realm, self.parent_id) \
.child(self.realm, self.filename)
def __init__(self, env, parent_realm_or_attachment_resource,
parent_id=None, filename=None):
if isinstance(parent_realm_or_attachment_resource, Resource):
resource = parent_realm_or_attachment_resource
self.parent_realm = resource.parent.realm
self.parent_id = unicode(resource.parent.id)
self.filename = resource.id
else:
self.parent_realm = parent_realm_or_attachment_resource
self.parent_id = unicode(parent_id)
self.filename = filename
self.env = env
if self.filename:
self._fetch(self.filename)
else:
self.filename = None
self.description = None
self.size = None
self.date = None
self.author = None
self.ipnr = None
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.filename)
def _from_database(self, filename, description, size, time, author, ipnr):
self.filename = filename
self.description = description
self.size = int(size) if size else 0
self.date = from_utimestamp(time or 0)
self.author = author
self.ipnr = ipnr
def _fetch(self, filename):
for row in self.env.db_query("""
SELECT filename, description, size, time, author, ipnr
FROM attachment WHERE type=%s AND id=%s AND filename=%s
ORDER BY time
""", (self.parent_realm, unicode(self.parent_id), filename)):
self._from_database(*row)
break
else:
self.filename = filename
raise ResourceNotFound(_("Attachment '%(title)s' does not exist.",
title=self.title),
_('Invalid Attachment'))
# _get_path() and _get_hashed_filename() are class methods so that they
# can be used in db28.py.
@classmethod
def _get_path(cls, env_path, parent_realm, parent_id, filename):
"""Get the path of an attachment.
WARNING: This method is used by db28.py for moving attachments from
the old "attachments" directory to the "files" directory. Please check
all changes so that they don't break the upgrade.
"""
path = os.path.join(env_path, 'files', 'attachments',
parent_realm)
hash = hashlib.sha1(parent_id.encode('utf-8')).hexdigest()
path = os.path.join(path, hash[0:3], hash)
if filename:
path = os.path.join(path, cls._get_hashed_filename(filename))
return os.path.normpath(path)
_extension_re = re.compile(r'\.[A-Za-z0-9]+\Z')
@classmethod
def _get_hashed_filename(cls, filename):
"""Get the hashed filename corresponding to the given filename.
WARNING: This method is used by db28.py for moving attachments from
the old "attachments" directory to the "files" directory. Please check
all changes so that they don't break the upgrade.
"""
hash = hashlib.sha1(filename.encode('utf-8')).hexdigest()
match = cls._extension_re.search(filename)
return hash + match.group(0) if match else hash
@property
def path(self):
return self._get_path(self.env.path, self.parent_realm, self.parent_id,
self.filename)
@property
def title(self):
return '%s:%s: %s' % (self.parent_realm, self.parent_id, self.filename)
def delete(self):
"""Delete the attachment, both the record in the database and
the file itself.
"""
assert self.filename, "Cannot delete non-existent attachment"
with self.env.db_transaction as db:
db("""
DELETE FROM attachment WHERE type=%s AND id=%s AND filename=%s
""", (self.parent_realm, self.parent_id, self.filename))
path = self.path
if os.path.isfile(path):
try:
os.unlink(path)
except OSError as e:
self.env.log.error("Failed to delete attachment "
"file %s: %s",
path,
exception_to_unicode(e, traceback=True))
raise TracError(_("Could not delete attachment"))
self.env.log.info("Attachment removed: %s", self.title)
for listener in AttachmentModule(self.env).change_listeners:
listener.attachment_deleted(self)
def reparent(self, new_realm, new_id):
assert self.filename, "Cannot reparent non-existent attachment"
new_id = unicode(new_id)
new_path = self._get_path(self.env.path, new_realm, new_id,
self.filename)
# Make sure the path to the attachment is inside the environment
# attachments directory
attachments_dir = os.path.join(os.path.normpath(self.env.path),
'files', 'attachments')
commonprefix = os.path.commonprefix([attachments_dir, new_path])
if commonprefix != attachments_dir:
raise TracError(_('Cannot reparent attachment "%(att)s" as '
'%(realm)s:%(id)s is invalid',
att=self.filename, realm=new_realm, id=new_id))
if os.path.exists(new_path):
raise TracError(_('Cannot reparent attachment "%(att)s" as '
'it already exists in %(realm)s:%(id)s',
att=self.filename, realm=new_realm, id=new_id))
with self.env.db_transaction as db:
db("""UPDATE attachment SET type=%s, id=%s
WHERE type=%s AND id=%s AND filename=%s
""", (new_realm, new_id, self.parent_realm, self.parent_id,
self.filename))
dirname = os.path.dirname(new_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
path = self.path
if os.path.isfile(path):
try:
os.rename(path, new_path)
except OSError as e:
self.env.log.error("Failed to move attachment file %s: %s",
path,
exception_to_unicode(e, traceback=True))
raise TracError(_("Could not reparent attachment %(name)s",
name=self.filename))
old_realm, old_id = self.parent_realm, self.parent_id
self.parent_realm, self.parent_id = new_realm, new_id
self.env.log.info("Attachment reparented: %s", self.title)
for listener in AttachmentModule(self.env).change_listeners:
if hasattr(listener, 'attachment_reparented'):
listener.attachment_reparented(self, old_realm, old_id)
def insert(self, filename, fileobj, size, t=None):
"""Create a new Attachment record and save the file content.
"""
self.size = int(size) if size else 0
self.filename = None
if t is None:
t = datetime_now(utc)
elif not isinstance(t, datetime): # Compatibility with 0.11
t = to_datetime(t, utc)
self.date = t
parent_resource = Resource(self.parent_realm, self.parent_id)
if not resource_exists(self.env, parent_resource):
raise ResourceNotFound(
_("%(parent)s doesn't exist, can't create attachment",
parent=get_resource_name(self.env, parent_resource)))
# Make sure the path to the attachment is inside the environment
# attachments directory
attachments_dir = os.path.join(os.path.normpath(self.env.path),
'files', 'attachments')
dir = self.path
commonprefix = os.path.commonprefix([attachments_dir, dir])
if commonprefix != attachments_dir:
raise TracError(_('Cannot create attachment "%(att)s" as '
'%(realm)s:%(id)s is invalid',
att=filename, realm=self.parent_realm,
id=self.parent_id))
if not os.access(dir, os.F_OK):
os.makedirs(dir)
filename, targetfile = self._create_unique_file(dir, filename)
with targetfile:
with self.env.db_transaction as db:
db("INSERT INTO attachment VALUES (%s,%s,%s,%s,%s,%s,%s,%s)",
(self.parent_realm, self.parent_id, filename, self.size,
to_utimestamp(t), self.description, self.author,
self.ipnr))
shutil.copyfileobj(fileobj, targetfile)
self.filename = filename
self.env.log.info("New attachment: %s by %s", self.title,
self.author)
for listener in AttachmentModule(self.env).change_listeners:
listener.attachment_added(self)
@classmethod
def select(cls, env, parent_realm, parent_id):
"""Iterator yielding all `Attachment` instances attached to
resource identified by `parent_realm` and `parent_id`.
:returns: a tuple containing the `filename`, `description`, `size`,
`time`, `author` and `ipnr`.
:since 1.0.5: use of `ipnr` is deprecated and will be removed in 1.3.1
"""
for row in env.db_query("""
SELECT filename, description, size, time, author, ipnr
FROM attachment WHERE type=%s AND id=%s ORDER BY time
""", (parent_realm, unicode(parent_id))):
attachment = Attachment(env, parent_realm, parent_id)
attachment._from_database(*row)
yield attachment
@classmethod
def delete_all(cls, env, parent_realm, parent_id):
"""Delete all attachments of a given resource.
"""
attachment_dir = None
with env.db_transaction as db:
for attachment in cls.select(env, parent_realm, parent_id):
attachment_dir = os.path.dirname(attachment.path)
attachment.delete()
if attachment_dir:
try:
os.rmdir(attachment_dir)
except OSError as e:
env.log.error("Can't delete attachment directory %s: %s",
attachment_dir,
exception_to_unicode(e, traceback=True))
@classmethod
def reparent_all(cls, env, parent_realm, parent_id, new_realm, new_id):
"""Reparent all attachments of a given resource to another resource."""
attachment_dir = None
with env.db_transaction as db:
for attachment in list(cls.select(env, parent_realm, parent_id)):
attachment_dir = os.path.dirname(attachment.path)
attachment.reparent(new_realm, new_id)
if attachment_dir:
try:
os.rmdir(attachment_dir)
except OSError as e:
env.log.error("Can't delete attachment directory %s: %s",
attachment_dir,
exception_to_unicode(e, traceback=True))
def open(self):
path = self.path
self.env.log.debug('Trying to open attachment at %s', path)
try:
fd = open(path, 'rb')
except IOError:
raise ResourceNotFound(_("Attachment '%(filename)s' not found",
filename=self.filename))
return fd
def _create_unique_file(self, dir, filename):
parts = os.path.splitext(filename)
flags = os.O_CREAT + os.O_WRONLY + os.O_EXCL
if hasattr(os, 'O_BINARY'):
flags += os.O_BINARY
idx = 1
while 1:
path = os.path.join(dir, self._get_hashed_filename(filename))
try:
return filename, os.fdopen(os.open(path, flags, 0666), 'w')
except OSError as e:
if e.errno != errno.EEXIST:
raise
idx += 1
# A sanity check
if idx > 100:
raise Exception('Failed to create unique name: ' + path)
filename = '%s.%d%s' % (parts[0], idx, parts[1])
class LegacyAttachmentPolicy(Component):
implements(IPermissionPolicy)
delegates = ExtensionPoint(ILegacyAttachmentPolicyDelegate)
realm = AttachmentModule.realm
# IPermissionPolicy methods
_perm_maps = {
'ATTACHMENT_CREATE': {'ticket': 'TICKET_APPEND', 'wiki': 'WIKI_MODIFY',
'milestone': 'MILESTONE_MODIFY'},
'ATTACHMENT_VIEW': {'ticket': 'TICKET_VIEW', 'wiki': 'WIKI_VIEW',
'milestone': 'MILESTONE_VIEW'},
'ATTACHMENT_DELETE': {'ticket': 'TICKET_ADMIN', 'wiki': 'WIKI_DELETE',
'milestone': 'MILESTONE_DELETE'},
}
def check_permission(self, action, username, resource, perm):
perm_map = self._perm_maps.get(action)
if not perm_map or not resource or resource.realm != self.realm:
return
legacy_action = perm_map.get(resource.parent.realm)
if legacy_action:
decision = legacy_action in perm(resource.parent)
if not decision:
self.log.debug('LegacyAttachmentPolicy denied %s access to '
'%s. User needs %s',
username, resource, legacy_action)
return decision
else:
for d in self.delegates:
decision = d.check_attachment_permission(action, username,
resource, perm)
if decision is not None:
return decision
class AttachmentAdmin(Component):
"""trac-admin command provider for attachment administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('attachment list', '<realm:id>',
"""List attachments of a resource
The resource is identified by its realm and identifier.""",
self._complete_list, self._do_list)
yield ('attachment add', '<realm:id> <path> [author] [description]',
"""Attach a file to a resource
The resource is identified by its realm and identifier. The
attachment will be named according to the base name of the file.
""",
self._complete_add, self._do_add)
yield ('attachment remove', '<realm:id> <name>',
"""Remove an attachment from a resource
The resource is identified by its realm and identifier.""",
self._complete_remove, self._do_remove)
yield ('attachment export', '<realm:id> <name> [destination]',
"""Export an attachment from a resource to a file or stdout
The resource is identified by its realm and identifier. If no
destination is specified, the attachment is output to stdout.
""",
self._complete_export, self._do_export)
def get_realm_list(self):
rs = ResourceSystem(self.env)
return PrefixList([each + ":" for each in rs.get_known_realms()])
def split_resource(self, resource):
result = resource.split(':', 1)
if len(result) != 2:
raise AdminCommandError(_("Invalid resource identifier '%(id)s'",
id=resource))
return result
def get_attachment_list(self, resource):
(realm, id) = self.split_resource(resource)
return [a.filename for a in Attachment.select(self.env, realm, id)]
def _complete_list(self, args):
if len(args) == 1:
return self.get_realm_list()
def _complete_add(self, args):
if len(args) == 1:
return self.get_realm_list()
elif len(args) == 2:
return get_dir_list(args[1])
def _complete_remove(self, args):
if len(args) == 1:
return self.get_realm_list()
elif len(args) == 2:
return self.get_attachment_list(args[0])
def _complete_export(self, args):
if len(args) < 3:
return self._complete_remove(args)
elif len(args) == 3:
return get_dir_list(args[2])
def _do_list(self, resource):
(realm, id) = self.split_resource(resource)
print_table([(a.filename, pretty_size(a.size), a.author,
format_datetime(a.date, console_datetime_format),
a.description)
for a in Attachment.select(self.env, realm, id)],
[_('Name'), _('Size'), _('Author'), _('Date'),
_('Description')])
def _do_add(self, resource, path, author='trac', description=''):
(realm, id) = self.split_resource(resource)
attachment = Attachment(self.env, realm, id)
attachment.author = author
attachment.description = description
filename = _normalized_filename(os.path.basename(path))
with open(path, 'rb') as f:
attachment.insert(filename, f, os.path.getsize(path))
def _do_remove(self, resource, name):
(realm, id) = self.split_resource(resource)
attachment = Attachment(self.env, realm, id, name)
attachment.delete()
def _do_export(self, resource, name, destination=None):
(realm, id) = self.split_resource(resource)
attachment = Attachment(self.env, realm, id, name)
if destination is not None:
if os.path.isdir(destination):
destination = os.path.join(destination, name)
if os.path.isfile(destination):
raise AdminCommandError(_("File '%(name)s' exists",
name=path_to_unicode(destination)))
with attachment.open() as input:
output = open(destination, "wb") if destination is not None \
else sys.stdout
try:
shutil.copyfileobj(input, output)
finally:
if destination is not None:
output.close()
_control_codes_re = re.compile(
'[' +
''.join(filter(lambda c: unicodedata.category(c) == 'Cc',
map(unichr, xrange(0x10000)))) +
']')
def _normalized_filename(filepath):
# We try to normalize the filename to unicode NFC if we can.
# Files uploaded from OS X might be in NFD.
if not isinstance(filepath, unicode):
filepath = unicode(filepath, 'utf-8')
filepath = unicodedata.normalize('NFC', filepath)
# Replace control codes with spaces, e.g. NUL, LF, DEL, U+009F
filepath = _control_codes_re.sub(' ', filepath)
# Replace backslashes with slashes if filename is Windows full path
if filepath.startswith('\\') or re.match(r'[A-Za-z]:\\', filepath):
filepath = filepath.replace('\\', '/')
# We want basename to be delimited by only slashes on all platforms
filename = posixpath.basename(filepath)
filename = stripws(filename)
return filename
|
walty8/trac
|
trac/attachment.py
|
Python
|
bsd-3-clause
| 47,208
|
import time
from prometheus_client import Counter, Histogram
from prometheus_client import start_http_server
from flask import request
FLASK_REQUEST_LATENCY = Histogram('flask_request_latency_seconds', 'Flask Request Latency',
['method', 'endpoint'])
FLASK_REQUEST_COUNT = Counter('flask_request_count', 'Flask Request Count',
['method', 'endpoint', 'http_status'])
def before_request():
request.start_time = time.time()
def after_request(response):
request_latency = time.time() - request.start_time
FLASK_REQUEST_LATENCY.labels(request.method, request.path).observe(request_latency)
FLASK_REQUEST_COUNT.labels(request.method, request.path, response.status_code).inc()
return response
def monitor(app, port=8000, addr=''):
app.before_request(before_request)
app.after_request(after_request)
start_http_server(port, addr)
if __name__ == '__main__':
from flask import Flask
app = Flask(__name__)
monitor(app, port=8000)
@app.route('/')
def index():
return "Hello"
# Run the application!
app.run()
|
sbarratt/flask-prometheus
|
flask_prometheus/__init__.py
|
Python
|
bsd-3-clause
| 1,126
|
from datetime import datetime
import functools
import os
import uuid
from time import time
from django.conf import settings
from django_statsd.clients import statsd
from mock import Mock
from requests import post
from suds import client as sudsclient
from suds.transport import Reply
from suds.transport.http import HttpTransport
from solitude.logger import getLogger
from .constants import (ACCESS_DENIED, HEADERS_SERVICE, INTERNAL_ERROR,
SERVICE_UNAVAILABLE)
from .errors import AuthError, BangoError, BangoFormError, ProxyError
root = os.path.join(settings.ROOT, 'lib', 'bango', 'wsdl', settings.BANGO_ENV)
wsdl = {
'exporter': 'file://' + os.path.join(root, 'mozilla_exporter.wsdl'),
'billing': 'file://' + os.path.join(root, 'billing_configuration.wsdl'),
'direct': 'file://' + os.path.join(root, 'direct_billing.wsdl'),
}
# Add in the whitelist of supported methods here.
exporter = [
'AcceptSBIAgreement',
'CreateBangoNumber',
'CreateBankDetails',
'CreatePackage',
'DeleteVATNumber',
'GetAcceptedSBIAgreement',
'GetPackage',
'GetSBIAgreement',
'MakePremiumPerAccess',
'SetVATNumber',
'UpdateAddressDetails',
'UpdateFinanceEmailAddress',
'UpdateRating',
'UpdateSupportEmailAddress',
]
billing = [
'CreateBillingConfiguration',
]
direct = [
'DoRefund',
'GetRefundStatus',
]
# Status codes from the proxy that raise an error and stop processing.
FATAL_PROXY_STATUS_CODES = (404, 500,)
# Turn the method into the approiate name. If the Bango WSDL diverges this will
# need to change.
def get_request(name):
return name + 'Request'
def get_response(name):
return name + 'Response'
def get_result(name):
return name + 'Result'
log = getLogger('s.bango')
class Client(object):
def __getattr__(self, attr):
for name, methods in (['exporter', exporter],
['billing', billing],
['direct', direct]):
if attr in methods:
return functools.partial(self.call, attr, wsdl=str(name))
raise AttributeError('Unknown request: %s' % attr)
def call(self, name, data, wsdl='exporter'):
client = self.client(wsdl)
package = client.factory.create(get_request(name))
for k, v in data.iteritems():
setattr(package, k, v)
package.username = settings.BANGO_AUTH.get('USER', '')
package.password = settings.BANGO_AUTH.get('PASSWORD', '')
# Actually call Bango.
with statsd.timer('solitude.bango.request.%s' % name.lower()):
response = getattr(client.service, name)(package)
self.is_error(response.responseCode, response.responseMessage)
return response
def client(self, name):
return sudsclient.Client(wsdl[name])
def is_error(self, code, message):
# Count the numbers of responses we get.
statsd.incr('solitude.bango.response.%s' % code.lower())
# If there was an error raise it.
if code == ACCESS_DENIED:
raise AuthError(ACCESS_DENIED, message)
# These are fatal Bango errors that the data can't really do much
# about.
elif code in (INTERNAL_ERROR, SERVICE_UNAVAILABLE):
raise BangoError(code, message)
# Assume that all other errors are errors from the data.
elif code != 'OK':
raise BangoFormError(code, message)
class Proxy(HttpTransport):
def send(self, request):
response = post(settings.BANGO_PROXY,
data=request.message,
headers={HEADERS_SERVICE: request.url},
verify=False)
if response.status_code in FATAL_PROXY_STATUS_CODES:
msg = ('Proxy returned: %s from: %s' %
(response.status_code, request.url))
log.error(msg)
raise ProxyError(msg)
return Reply(response.status_code, {}, response.content)
class ClientProxy(Client):
def client(self, name):
return sudsclient.Client(wsdl[name], transport=Proxy())
# Add in your mock method data here. If the method only returns a
# responseCode and a responseMessage, there's no need to add the method.
#
# Use of time() for ints, mean that tests work and so do requests from the
# command line using mock. As long as you don't do them too fast.
ltime = lambda: str(int(time() * 1000000))[8:]
mock_data = {
'CreateBangoNumber': {
'bango': 'some-bango-number',
},
'CreatePackage': {
'packageId': ltime,
'adminPersonId': ltime,
'supportPersonId': ltime,
'financePersonId': ltime
},
'UpdateSupportEmailAddress': {
'personId': ltime,
'personPassword': 'xxxxx',
},
'UpdateFinanceEmailAddress': {
'personId': ltime,
'personPassword': 'xxxxx',
},
'CreateBillingConfiguration': {
'billingConfigurationId': uuid.uuid4,
},
'GetAcceptedSBIAgreement': {
'sbiAgreementAccepted': True,
'acceptedSBIAgreement': '2013-01-23 00:00:00',
'sbiAgreementExpires': '2014-01-23 00:00:00'
},
'GetSBIAgreement': {
'sbiAgreement': 'Blah...',
# Although its a date, the WSDL has this as a date time.
'sbiAgreementValidFrom': '2010-08-31T00:00:00',
},
'DoRefund': {
'refundTransactionId': uuid.uuid4
},
'GetPackage': {
'adminEmailAddress': 'admin@email.com',
'supportEmailAddress': 'support@email.com',
'financeEmailAddress': 'finance@email.com',
'paypalEmailAddress': 'paypal@email.com',
'vendorName': 'Some Vendor',
'companyName': 'Some Company',
'address1': 'Address 1',
'address2': 'Address 2',
'addressCity': 'City',
'addressState': 'State',
'addressZipCode': '90210',
'addressPhone': '1234567890',
'addressFax': '1234567890',
'vatNumber': '1234567890',
'countryIso': 'BMU',
'currencyIso': 'EUR',
'homePageURL': 'http://mozilla.org',
'eventNotificationEnabled': False,
'eventNotificationURL': '',
'status': 'LIC',
'sbiAgreementAccepted': True,
'acceptedSBIAgreement': datetime.today,
'sbiAgreementExpires': datetime.today,
}
}
class ClientMock(Client):
def mock_results(self, key, data=None):
"""
Returns result for a key. Data can be passed in to override mock_data.
"""
result = data or mock_data.get(key, {}).copy()
for key, value in (['responseCode', 'OK'], ['responseMessage', '']):
if key not in result:
result[key] = value
return result
def call(self, name, data, wsdl=''):
"""
This fakes out the client and just looks up the values in mock_results
for that service.
"""
bango = dict_to_mock(self.mock_results(name), callables=True)
self.is_error(bango.responseCode, bango.responseMessage)
return bango
def response_to_dict(resp):
"""Converts a suds response into a dictionary suitable for JSON"""
return dict((k, getattr(resp, k)) for k in resp.__keylist__)
def dict_to_mock(data, callables=False):
"""
Converts a dictionary into a suds like mock.
callables: will call any value if its callable, default False.
"""
result = Mock()
result.__keylist__ = data.keys()
for k, v in data.iteritems():
if callables and callable(v):
v = v()
setattr(result, k, v)
return result
def get_client():
"""
Use this to get the right client and communicate with Bango.
"""
if settings.BANGO_MOCK:
return ClientMock()
if settings.BANGO_PROXY:
return ClientProxy()
return Client()
|
muffinresearch/solitude
|
lib/bango/client.py
|
Python
|
bsd-3-clause
| 7,873
|
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db import models
import mozdns
from mozdns.domain.models import Domain
from mozdns.view.models import View
from mozdns.mixins import ObjectUrlMixin, DisplayMixin
from mozdns.validation import validate_first_label, validate_name
from mozdns.validation import validate_ttl
class LabelDomainMixin(models.Model):
"""
This class provides common functionality that many DNS record
classes share. This includes a foreign key to the ``domain`` table
and a ``label`` CharField.
If you plan on using the ``unique_together`` constraint on a Model
that inherits from ``LabelDomainMixin``, you must include ``domain`` and
``label`` explicitly if you need them to.
All common records have a ``fqdn`` field. This field is updated
every time the object is saved::
fqdn = name + domain.name
or if name == ''
fqdn = domain.name
This field makes searching for records much easier. Instead of
looking at ``obj.label`` together with ``obj.domain.name``, you can
just search the ``obj.fqdn`` field.
"the total number of octets that represent a name (i.e., the sum of
all label octets and label lengths) is limited to 255" - RFC 4471
"""
domain = models.ForeignKey(Domain, null=False, help_text="FQDN of the "
"domain after the short hostname. "
"(Ex: <i>Vlan</i>.<i>DC</i>.mozilla.com)")
# "The length of any one label is limited to between 1 and 63 octets."
# -- RFC218
label = models.CharField(max_length=63, blank=True, null=True,
validators=[validate_first_label],
help_text="Short name of the fqdn")
fqdn = models.CharField(max_length=255, blank=True, null=True,
validators=[validate_name], db_index=True)
class Meta:
abstract = True
class ViewMixin(models.Model):
def validate_views(instance, views):
for view in views:
instance.clean_views(views)
views = models.ManyToManyField(
View, blank=True, validators=[validate_views]
)
class Meta:
abstract = True
def clean_views(self, views):
"""cleaned_data is the data that is going to be called with for
updating an existing or creating a new object. Classes should implement
this function according to their specific needs.
"""
for view in views:
if hasattr(self, 'domain'):
self.check_no_ns_soa_condition(self.domain, view=view)
if hasattr(self, 'reverse_domain'):
self.check_no_ns_soa_condition(self.reverse_domain, view=view)
def check_no_ns_soa_condition(self, domain, view=None):
if domain.soa:
fail = False
root_domain = domain.soa.root_domain
if root_domain and not root_domain.nameserver_set.exists():
fail = True
elif (view and
not root_domain.nameserver_set.filter(views=view).exists()):
fail = True
if fail:
raise ValidationError(
"The zone you are trying to assign this record into does "
"not have an NS record, thus cannnot support other "
"records.")
class MozdnsRecord(ViewMixin, DisplayMixin, ObjectUrlMixin):
ttl = models.PositiveIntegerField(default=3600, blank=True, null=True,
validators=[validate_ttl],
help_text="Time to Live of this record")
description = models.CharField(max_length=1000, blank=True, null=True,
help_text="A description of this record.")
# fqdn = label + domain.name <--- see set_fqdn
def __str__(self):
self.set_fqdn()
return self.bind_render_record()
def __repr__(self):
return "<{0} '{1}'>".format(self.rdtype, str(self))
class Meta:
abstract = True
@classmethod
def get_api_fields(cls):
"""
The purpose of this is to help the API decide which fields to expose
to the user when they are creating and updateing an Object. This
function should be implemented in inheriting models and overriden to
provide additional fields. Tastypie ignores any relational fields on
the model. See the ModelResource definitions for view and domain
fields.
"""
return ['fqdn', 'ttl', 'description', 'views']
def clean(self):
# The Nameserver and subclasses of BaseAddressRecord do not call this
# function
self.set_fqdn()
self.check_TLD_condition()
self.check_no_ns_soa_condition(self.domain)
self.check_for_delegation()
if self.rdtype != 'CNAME':
self.check_for_cname()
def delete(self, *args, **kwargs):
if self.domain.soa:
self.domain.soa.schedule_rebuild()
from mozdns.utils import prune_tree
call_prune_tree = kwargs.pop('call_prune_tree', True)
objs_domain = self.domain
super(MozdnsRecord, self).delete(*args, **kwargs)
if call_prune_tree:
prune_tree(objs_domain)
def save(self, *args, **kwargs):
self.full_clean()
if self.pk:
# We need to get the domain from the db. If it's not our current
# domain, call prune_tree on the domain in the db later.
db_domain = self.__class__.objects.get(pk=self.pk).domain
if self.domain == db_domain:
db_domain = None
else:
db_domain = None
no_build = kwargs.pop("no_build", False)
super(MozdnsRecord, self).save(*args, **kwargs)
if no_build:
pass
else:
# Mark the soa
if self.domain.soa:
self.domain.soa.schedule_rebuild()
if db_domain:
from mozdns.utils import prune_tree
prune_tree(db_domain)
def set_fqdn(self):
try:
if self.label == '':
self.fqdn = self.domain.name
else:
self.fqdn = "{0}.{1}".format(self.label,
self.domain.name)
except ObjectDoesNotExist:
return
def check_for_cname(self):
"""
"If a CNAME RR is preent at a node, no other data should be
present; this ensures that the data for a canonical name and its
aliases cannot be different."
-- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_
Call this function in models that can't overlap with an existing
CNAME.
"""
CNAME = mozdns.cname.models.CNAME
if hasattr(self, 'label'):
if CNAME.objects.filter(domain=self.domain,
label=self.label).exists():
raise ValidationError("A CNAME with this name already exists.")
else:
if CNAME.objects.filter(label='', domain=self.domain).exists():
raise ValidationError("A CNAME with this name already exists.")
def check_for_delegation(self):
"""
If an object's domain is delegated it should not be able to
be changed. Delegated domains cannot have objects created in
them.
"""
try:
if not self.domain.delegated:
return
except ObjectDoesNotExist:
return
if not self.pk: # We don't exist yet.
raise ValidationError("No objects can be created in the {0}"
"domain. It is delegated."
.format(self.domain.name))
def check_TLD_condition(self):
domain = Domain.objects.filter(name=self.fqdn)
if not domain:
return
if self.label == '' and domain[0] == self.domain:
return # This is allowed
else:
raise ValidationError("You cannot create an record that points "
"to the top level of another domain.")
|
rtucker-mozilla/mozilla_inventory
|
mozdns/models.py
|
Python
|
bsd-3-clause
| 8,260
|
from pandac.PandaModules import *
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.task import Task
from .DistributedNodeAI import DistributedNodeAI
from .CartesianGridBase import CartesianGridBase
class DistributedCartesianGridAI(DistributedNodeAI, CartesianGridBase):
notify = directNotify.newCategory("DistributedCartesianGridAI")
RuleSeparator = ":"
def __init__(self, air, startingZone, gridSize, gridRadius, cellWidth,
style="Cartesian"):
DistributedNodeAI.__init__(self, air)
self.style = style
self.startingZone = startingZone
self.gridSize = gridSize
self.gridRadius = gridRadius
self.cellWidth = cellWidth
# Keep track of all AI objects added to the grid
self.gridObjects = {}
self.updateTaskStarted = 0
def delete(self):
DistributedNodeAI.delete(self)
self.stopUpdateGridTask()
def isGridParent(self):
# If this distributed object is a DistributedGrid return 1.
# 0 by default
return 1
def getCellWidth(self):
return self.cellWidth
def getParentingRules(self):
self.notify.debug("calling getter")
rule = ("%i%s%i%s%i" % (self.startingZone, self.RuleSeparator,
self.gridSize, self.RuleSeparator,
self.gridRadius))
return [self.style, rule]
# Reparent and setLocation on av to DistributedOceanGrid
def addObjectToGrid(self, av, useZoneId=-1, startAutoUpdate=True):
self.notify.debug("setting parent to grid %s" % self)
avId = av.doId
# Create a grid parent
#gridParent = self.attachNewNode("gridParent-%s" % avId)
#self.gridParents[avId] = gridParent
self.gridObjects[avId] = av
# Put the avatar on the grid
self.handleAvatarZoneChange(av, useZoneId)
if (not self.updateTaskStarted) and startAutoUpdate:
self.startUpdateGridTask()
def removeObjectFromGrid(self, av):
# TODO: WHAT LOCATION SHOULD WE SET THIS TO?
#av.wrtReparentTo(self.parentNP)
#av.setLocation(self.air.districtId, 1000)
# Remove grid parent for this av
avId = av.doId
if avId in self.gridObjects:
del self.gridObjects[avId]
# Stop task if there are no more av's being managed
if len(self.gridObjects) == 0:
self.stopUpdateGridTask()
#####################################################################
# updateGridTask
# This task is similar to the processVisibility task for the local client.
# A couple differences:
# - we are not doing setInterest on the AI (that is a local client
# specific call).
# - we assume that the moving objects on the grid are parented to a
# gridParent, and are broadcasting their position relative to that
# gridParent. This makes the task's math easy. Just check to see
# when our position goes out of the current grid cell. When it does,
# call handleAvatarZoneChange
def startUpdateGridTask(self):
self.stopUpdateGridTask()
self.updateTaskStarted = 1
taskMgr.add(self.updateGridTask, self.taskName("updateGridTask"))
def stopUpdateGridTask(self):
taskMgr.remove(self.taskName("updateGridTask"))
self.updateTaskStarted = 0
def updateGridTask(self, task=None):
# Run through all grid objects and update their parents if needed
missingObjs = []
for avId in self.gridObjects.keys():
av = self.gridObjects[avId]
# handle a missing object after it is already gone?
if (av.isEmpty()):
task.setDelay(1.0)
del self.gridObjects[avId]
continue
pos = av.getPos()
if ((pos[0] < 0 or pos[1] < 0) or
(pos[0] > self.cellWidth or pos[1] > self.cellWidth)):
# we are out of the bounds of this current cell
self.handleAvatarZoneChange(av)
# Do this every second, not every frame
if (task):
task.setDelay(1.0)
return Task.again
def handleAvatarZoneChange(self, av, useZoneId=-1):
# Calculate zone id
# Get position of av relative to this grid
if (useZoneId == -1):
pos = av.getPos(self)
zoneId = self.getZoneFromXYZ(pos)
else:
# zone already calculated, position of object might not
# give the correct zone
pos = None
zoneId = useZoneId
if not self.isValidZone(zoneId):
self.notify.warning(
"%s handleAvatarZoneChange %s: not a valid zone (%s) for pos %s" %(self.doId, av.doId, zoneId, pos))
return
# Set the location on the server.
# setLocation will update the gridParent
av.b_setLocation(self.doId, zoneId)
def handleSetLocation(self, av, parentId, zoneId):
pass
#if (av.parentId != parentId):
# parent changed, need to look up instance tree
# to see if avatar's named area location information
# changed
#av.requestRegionUpdateTask(regionegionUid)
|
brakhane/panda3d
|
direct/src/distributed/DistributedCartesianGridAI.py
|
Python
|
bsd-3-clause
| 5,309
|
from .AlFeatureTemplate import AlFeatureTemplate
from .sensorCountRoutine import AlFeatureSensorCountRoutine
import numpy as np
class AlFeatureSensorCount(AlFeatureTemplate):
def __init__(self, normalize=False):
"""
Initialization of Template Class
:return:
"""
AlFeatureTemplate.__init__(self,
name='sensorCount',
description='Number of Events in the window related to the sensor',
per_sensor=True,
enabled=True,
routine=AlFeatureSensorCountRoutine())
# Normalize the number between 0 to 1
self.normalize = normalize
def get_feature_value(self, data_list, cur_index, window_size, sensor_name=None):
"""
Counts the number of occurrence of the sensor specified in current window.
:param data_list: list of sensor data
:param cur_index: current data record index
:param window_size: window size
:param sensor_name: name of sensor
:return: a double value
"""
if self.normalize:
return np.float(self.routine.sensor_count[sensor_name])/(window_size * 2)
else:
return np.float(self.routine.sensor_count[sensor_name])
|
TinghuiWang/ActivityLearning
|
actlearn/feature/sensorCount.py
|
Python
|
bsd-3-clause
| 1,358
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from collections import OrderedDict
import pytest
import numpy as np
import pandas as pd
from pandas import Index, Series, DataFrame, date_range
from pandas.core.indexes.datetimes import Timestamp
from pandas.compat import range
from pandas import compat
import pandas.io.formats.printing as printing
from pandas.util.testing import (assert_series_equal,
ensure_clean)
import pandas.util.testing as tm
from .common import TestData
class SharedWithSparse(object):
"""
A collection of tests Series and SparseSeries can share.
In generic tests on this class, use ``self._assert_series_equal()``
which is implemented in sub-classes.
"""
def _assert_series_equal(self, left, right):
"""Dispatch to series class dependent assertion"""
raise NotImplementedError
def test_scalarop_preserve_name(self):
result = self.ts * 2
assert result.name == self.ts.name
def test_copy_name(self):
result = self.ts.copy()
assert result.name == self.ts.name
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
assert self.ts.index.name is None
assert self.ts is self.ts
cp = self.ts.copy()
cp.index.name = 'foo'
printing.pprint_thing(self.ts.index.name)
assert self.ts.index.name is None
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
assert result.name == self.ts.name
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
assert result.name == self.ts.name
result = self.ts.mul(self.ts)
assert result.name == self.ts.name
result = self.ts * self.ts[:-2]
assert result.name == self.ts.name
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
assert result.name is None
result = self.ts.add(cp)
assert result.name is None
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow']
ops = ops + ['r' + op for op in ops]
for op in ops:
# names match, preserve
s = self.ts.copy()
result = getattr(s, op)(s)
assert result.name == self.ts.name
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'changed'
result = getattr(s, op)(cp)
assert result.name is None
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
assert result.name == self.ts.name
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
assert result.name == self.ts.name
result = self.ts[[0, 2, 4]]
assert result.name == self.ts.name
result = self.ts[5:10]
assert result.name == self.ts.name
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
assert result.name == self.ts.name
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
assert result.name == self.ts.name
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
assert result.name == self.ts.name
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = self.series_klass(d)
expected = self.series_klass(d, index=sorted(d.keys()))
self._assert_series_equal(result, expected)
result = self.series_klass(d, index=['b', 'c', 'd', 'a'])
expected = self.series_klass([1, 2, np.nan, 0],
index=['b', 'c', 'd', 'a'])
self._assert_series_equal(result, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = self.series_klass(data)
expected = self.series_klass(dict(compat.iteritems(data)))
self._assert_series_equal(series, expected)
def test_constructor_ordereddict(self):
# GH3283
data = OrderedDict(
('col%s' % i, np.random.random()) for i in range(12))
series = self.series_klass(data)
expected = self.series_klass(list(data.values()), list(data.keys()))
self._assert_series_equal(series, expected)
# Test with subclass
class A(OrderedDict):
pass
series = self.series_klass(A(data))
self._assert_series_equal(series, expected)
def test_constructor_dict_multiindex(self):
d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}
_d = sorted(d.items())
result = self.series_klass(d)
expected = self.series_klass(
[x[1] for x in _d],
index=pd.MultiIndex.from_tuples([x[0] for x in _d]))
self._assert_series_equal(result, expected)
d['z'] = 111.
_d.insert(0, ('z', d['z']))
result = self.series_klass(d)
expected = self.series_klass([x[1] for x in _d],
index=pd.Index([x[0] for x in _d],
tupleize_cols=False))
result = result.reindex(index=expected.index)
self._assert_series_equal(result, expected)
def test_constructor_dict_timedelta_index(self):
# GH #12169 : Resample category data with timedelta index
# construct Series from dict as data and TimedeltaIndex as index
# will result NaN in result Series data
expected = self.series_klass(
data=['A', 'B', 'C'],
index=pd.to_timedelta([0, 10, 20], unit='s')
)
result = self.series_klass(
data={pd.to_timedelta(0, unit='s'): 'A',
pd.to_timedelta(10, unit='s'): 'B',
pd.to_timedelta(20, unit='s'): 'C'},
index=pd.to_timedelta([0, 10, 20], unit='s')
)
self._assert_series_equal(result, expected)
def test_from_array_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.series_klass.from_array([1, 2, 3])
class TestSeriesMisc(TestData, SharedWithSparse):
series_klass = Series
# SharedWithSparse tests use generic, series_klass-agnostic assertion
_assert_series_equal = staticmethod(tm.assert_series_equal)
def test_tab_completion(self):
# GH 9910
s = Series(list('abcd'))
# Series of str values should have .str but not .dt/.cat in __dir__
assert 'str' in dir(s)
assert 'dt' not in dir(s)
assert 'cat' not in dir(s)
# similiarly for .dt
s = Series(date_range('1/1/2015', periods=5))
assert 'dt' in dir(s)
assert 'str' not in dir(s)
assert 'cat' not in dir(s)
# Similarly for .cat, but with the twist that str and dt should be
# there if the categories are of that type first cat and str.
s = Series(list('abbcd'), dtype="category")
assert 'cat' in dir(s)
assert 'str' in dir(s) # as it is a string categorical
assert 'dt' not in dir(s)
# similar to cat and str
s = Series(date_range('1/1/2015', periods=5)).astype("category")
assert 'cat' in dir(s)
assert 'str' not in dir(s)
assert 'dt' in dir(s) # as it is a datetime categorical
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
pytest.raises(TypeError, hash, s_empty)
pytest.raises(TypeError, hash, s)
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_iter(self):
for i, val in enumerate(self.series):
assert val == self.series[i]
for i, val in enumerate(self.ts):
assert val == self.ts[i]
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
assert getkeys() is self.ts.index
def test_values(self):
tm.assert_almost_equal(self.ts.values, self.ts, check_dtype=False)
def test_iteritems(self):
for idx, val in compat.iteritems(self.series):
assert val == self.series[idx]
for idx, val in compat.iteritems(self.ts):
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(self.series.iteritems(), 'reverse')
def test_items(self):
for idx, val in self.series.items():
assert val == self.series[idx]
for idx, val in self.ts.items():
assert val == self.ts[idx]
# assert is lazy (genrators don't define reverse, lists do)
assert not hasattr(self.series.items(), 'reverse')
def test_raise_on_info(self):
s = Series(np.random.randn(10))
with pytest.raises(AttributeError):
s.info()
def test_copy(self):
for deep in [None, False, True]:
s = Series(np.arange(10), dtype='float64')
# default deep is True
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[::2] = np.NaN
if deep is None or deep is True:
# Did not modify original Series
assert np.isnan(s2[0])
assert not np.isnan(s[0])
else:
# we DID modify the original Series
assert np.isnan(s2[0])
assert np.isnan(s[0])
# GH 11794
# copy of tz-aware
expected = Series([Timestamp('2012/01/01', tz='UTC')])
expected2 = Series([Timestamp('1999/01/01', tz='UTC')])
for deep in [None, False, True]:
s = Series([Timestamp('2012/01/01', tz='UTC')])
if deep is None:
s2 = s.copy()
else:
s2 = s.copy(deep=deep)
s2[0] = pd.Timestamp('1999/01/01', tz='UTC')
# default deep is True
if deep is None or deep is True:
# Did not modify original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected)
else:
# we DID modify the original Series
assert_series_equal(s2, expected2)
assert_series_equal(s, expected2)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))
assert s.dropna().sum('rows') == 3
assert s._get_axis_number('rows') == 0
assert s._get_axis_name('rows') == 'index'
def test_class_axis(self):
# https://github.com/pandas-dev/pandas/issues/18147
Series.index # no exception!
def test_numpy_unique(self):
# it works!
np.unique(self.ts)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=date_range('1/1/2000', periods=1000))
def f(x):
return x[x.idxmax()]
result = tsdf.apply(f)
expected = tsdf.max()
tm.assert_series_equal(result, expected)
# .item()
s = Series([1])
result = s.item()
assert result == 1
assert s.item() == s.iloc[0]
# using an ndarray like function
s = Series(np.random.randn(10))
result = Series(np.ones_like(s))
expected = Series(1, index=range(10), dtype='float64')
tm.assert_series_equal(result, expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order='F'), s.values.ravel(order='F'))
# compress
# GH 6658
s = Series([0, 1., -1], index=list('abc'))
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.], index=['b']))
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='object'))
tm.assert_series_equal(result, exp)
s = Series([0, 1., -1], index=[.1, .2, .3])
result = np.compress(s > 0, s)
tm.assert_series_equal(result, Series([1.], index=[.2]))
result = np.compress(s < -1, s)
# result empty Float64Index as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='float64'))
tm.assert_series_equal(result, exp)
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
s = Series([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Series([getattr(str, method)(x) for x in s.values])
assert_series_equal(getattr(Series.str, method)(s.str), expected)
# str accessor only valid with string values
s = Series(range(5))
with tm.assert_raises_regex(AttributeError,
'only use .str accessor'):
s.str.repeat(2)
def test_empty_method(self):
s_empty = pd.Series()
assert s_empty.empty
for full_series in [pd.Series([1]), pd.Series(index=[1])]:
assert not full_series.empty
def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip('IPython', minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; s = pd.Series()"
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('s.', 1))
|
winklerand/pandas
|
pandas/tests/series/test_api.py
|
Python
|
bsd-3-clause
| 14,504
|
# c: 19.05.2008, r: 19.05.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
dim = 2
field_1 = {
'name' : 'scalar_field',
'dtype' : 'real',
'shape' : 'scalar',
'region' : 'Omega',
'approx_order' : 1,
}
field_2 = {
'name' : 'vector_field',
'dtype' : 'real',
'shape' : 'vector',
'region' : 'Omega',
'approx_order' : 1,
}
variables = {
'us' : ('unknown field', 'scalar_field', 0),
'ts' : ('test field', 'scalar_field', 'us'),
'ps1' : ('parameter field', 'scalar_field', 'us'),
'ps2' : ('parameter field', 'scalar_field', 'us'),
'uv' : ('unknown field', 'vector_field', 1),
'tv' : ('test field', 'vector_field', 'uv'),
'pv1' : ('parameter field', 'vector_field', 'uv'),
'pv2' : ('parameter field', 'vector_field', 'uv'),
}
regions = {
'Omega' : ('all', {}),
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d2',
}
material_1 = {
'name' : 'm',
'function' : 'get_pars',
}
fe = {
'chunk_size' : 100
}
##
# c: 19.05.2008, r: 19.05.2008
def get_pars( ts, coor, mode=None, region=None, ig=None, term = None ):
if mode == 'qp':
n_nod, dim = coor.shape
sym = (dim + 1) * dim / 2
if term == 'biot':
val = nm.zeros( (sym, 1), dtype = nm.float64 )
val[:dim] = 0.132
val[dim:sym] = 0.092
elif term == 'biot_m':
val = 1.0 / nm.array( [3.8], dtype = nm.float64 )
elif term == 'permeability':
val = nm.eye( dim, dtype = nm.float64 )
else:
raise ValueError
return {'val' : nm.tile(val, (coor.shape[0], 1, 1))}
functions = {
'get_pars' : (get_pars,),
}
# (eval term prefix, parameter corresponding to test variable, 'd' variables,
# 'dw' variables (test must be paired with unknown, which should be at
# index 2!), mat mode)
test_terms = [
('%s_biot.i1.Omega( m.val, %s, %s )',
('dw', 'ps1', ('pv1', 'ps1'), ('pv1', 'ts', 'us', 'uv', 'tv'), 'biot')),
('%s_biot.i1.Omega( m.val, %s, %s )',
('dw', 'pv1', ('pv1', 'ps1'), ('tv', 'ps1', 'uv', 'us', 'ts'), 'biot')),
('%s_diffusion.i1.Omega( m.val, %s, %s )',
('dw', 'ps1', ('ps1', 'ps2'), ('ts', 'ps1', 'us'), 'permeability')),
('%s_volume_dot_w.i1.Omega( m.val, %s, %s )',
('dw', 'ps1', ('ps1', 'ps2'), ('ts', 'ps1', 'us'), 'biot_m')),
]
import numpy as nm
from sfepy.base.testing import TestCommon
from sfepy.base.base import debug, pause
##
# c: 19.05.2008
class Test( TestCommon ):
##
# c: 19.05.2008, r: 19.05.2008
def from_conf( conf, options ):
from sfepy.fem import ProblemDefinition
problem = ProblemDefinition.from_conf(conf, init_equations=False)
test = Test( problem = problem,
conf = conf, options = options )
return test
from_conf = staticmethod( from_conf )
##
# c: 19.05.2008, r: 19.05.2008
def test_consistency_d_dw( self ):
from sfepy.fem import Function, Variables
ok = True
pb = self.problem
for aux in test_terms:
term_template, (prefix, par_name, d_vars, dw_vars, mat_mode) = aux
print term_template, prefix, par_name, d_vars, dw_vars, mat_mode
term1 = term_template % ((prefix,) + d_vars)
variables = Variables.from_conf(self.conf.variables, pb.fields)
for var_name in d_vars:
var = variables[var_name]
n_dof = var.field.n_nod * var.field.shape[0]
aux = nm.arange( n_dof, dtype = nm.float64 )
var.data_from_data(aux)
pb.materials['m'].function.set_extra_args(term = mat_mode)
if prefix == 'd':
val1 = pb.evaluate(term1, var_dict=variables.as_dict())
else:
val1 = pb.evaluate(term1, call_mode='d_eval',
var_dict=variables.as_dict())
self.report( '%s: %s' % (term1, val1) )
term2 = term_template % (('dw',) + dw_vars[:2])
vec, vv = pb.evaluate(term2, mode='weak',
var_dict=variables.as_dict(),
ret_variables=True)
pvec = vv.get_state_part_view(vec, dw_vars[2])
val2 = nm.dot( variables[par_name](), pvec )
self.report( '%s: %s' % (term2, val2) )
err = nm.abs( val1 - val2 ) / nm.abs( val1 )
_ok = err < 1e-12
self.report( 'relative difference: %e -> %s' % (err, _ok) )
ok = ok and _ok
return ok
|
olivierverdier/sfepy
|
tests/test_term_consistency.py
|
Python
|
bsd-3-clause
| 4,677
|
from builtins import str
from builtins import range
from builtins import object
import json
import os
import solnlib.utils as utils
from splunktaucclib.global_config import GlobalConfig, GlobalConfigSchema
'''
Usage Examples:
setup_util = Setup_Util(uri, session_key)
setup_util.get_log_level()
setup_util.get_proxy_settings()
setup_util.get_credential_account("my_account_name")
setup_util.get_customized_setting("my_customized_field_name")
'''
'''
setting object structure.
It is stored in self.__cached_global_settings
Note, this structure is only maintained in this util.
setup_util transforms global settings in os environment or from ucc into this structure.
{
"proxy_settings": {
"proxy_enabled": False/True,
"proxy_url": "example.com",
"proxy_port": "1234",
"proxy_username": "",
"proxy_password": "",
"proxy_type": "http",
"proxy_rdns": False/True
},
"log_settings": {
"loglevel": "DEBUG"
},
"credential_settings": [{
"name": "account_id",
"username": "example_account",
"password": "example_password"
}, { # supported by ucc, not seen any usage in AoB
"api_key": "admin",
"api_uuid": "admin",
"endpoint": "some url",
"name": "account1"
}],
"customized_settings": {
"text_name": "content",
"pass_name": "password",
"checkbox": 0/1
}
}
'''
GLOBAL_SETTING_KEY = "global_settings"
AOB_TEST_FLAG = 'AOB_TEST'
PROXY_SETTINGS = "proxy_settings"
LOG_SETTINGS = "log_settings"
CREDENTIAL_SETTINGS = "credential_settings"
CUSTOMIZED_SETTINGS = "customized_settings"
UCC_PROXY = "proxy"
UCC_LOGGING = "logging"
UCC_CUSTOMIZED = "additional_parameters"
UCC_CREDENTIAL = "account"
CONFIGS = [CREDENTIAL_SETTINGS]
SETTINGS = [PROXY_SETTINGS, LOG_SETTINGS, CUSTOMIZED_SETTINGS]
PROXY_ENABLE_KEY = 'proxy_enabled'
PROXY_RDNS_KEY = 'proxy_rdns'
LOG_LEVEL_KEY = 'loglevel'
LOG_LEVEL_KEY_ENV = 'log_level'
TYPE_CHECKBOX = "checkbox"
ALL_SETTING_TYPES = ['text', 'password', 'checkbox', 'dropdownlist', 'multi_dropdownlist', 'radiogroup']
def get_schema_path():
dirname = os.path.dirname
basedir = dirname(dirname(dirname(dirname((dirname(__file__))))))
return os.path.join(basedir, 'appserver', 'static', 'js', 'build', 'globalConfig.json')
class Setup_Util(object):
def __init__(self, uri, session_key, logger=None):
self.__uri = uri
self.__session_key = session_key
self.__logger = logger
self.scheme, self.host, self.port = utils.extract_http_scheme_host_port(
self.__uri)
self.__cached_global_settings = {}
self.__global_config = None
def init_global_config(self):
if self.__global_config is not None:
return
schema_file = get_schema_path()
if not os.path.isfile(schema_file):
self.log_error("Global config JSON file not found!")
self.__global_config = None
else:
with open(get_schema_path()) as f:
json_schema = ''.join([l for l in f])
self.__global_config = GlobalConfig(self.__uri, self.__session_key,
GlobalConfigSchema(json.loads(json_schema)))
def log_error(self, msg):
if self.__logger:
self.__logger.error(msg)
def log_info(self, msg):
if self.__logger:
self.__logger.info(msg)
def log_debug(self, msg):
if self.__logger:
self.__logger.debug(msg)
def _parse_conf(self, key):
if os.environ.get(AOB_TEST_FLAG, 'false') == 'true':
global_settings = self._parse_conf_from_env(json.loads(os.environ.get(GLOBAL_SETTING_KEY, '{}')))
return global_settings.get(key)
else:
return self._parse_conf_from_global_config(key)
def _parse_conf_from_env(self, global_settings):
'''
this is run in test env
'''
if not self.__cached_global_settings:
# format the settings, the setting from env is from global_setting
# meta
self.__cached_global_settings = {}
for s_k, s_v in list(global_settings.items()):
if s_k == PROXY_SETTINGS:
proxy_enabled = s_v.get(PROXY_ENABLE_KEY)
proxy_rdns = s_v.get(PROXY_RDNS_KEY)
if type(proxy_enabled) != bool:
s_v[PROXY_ENABLE_KEY] = utils.is_true(proxy_enabled)
if type(proxy_rdns) != bool:
s_v[PROXY_RDNS_KEY] = utils.is_true(proxy_rdns)
self.__cached_global_settings[PROXY_SETTINGS] = s_v
elif s_k == LOG_SETTINGS:
self.__cached_global_settings[LOG_SETTINGS] = {
LOG_LEVEL_KEY: s_v.get(LOG_LEVEL_KEY_ENV)
}
elif s_k == CREDENTIAL_SETTINGS:
# add account id to accounts
for i in range(0, len(s_v)):
s_v[i]['name'] = 'account' + str(i)
self.__cached_global_settings[CREDENTIAL_SETTINGS] = s_v
else: # should be customized settings
self.__cached_global_settings[CUSTOMIZED_SETTINGS] = {}
for s in s_v:
field_type = s.get('type')
if not field_type:
self.log_error(
'unknown type for customized var:{}'.format(s))
continue
self.__cached_global_settings['customized_settings'][s.get('name', '')] = self._transform(
s.get("value", ""), field_type)
return self.__cached_global_settings
def _parse_conf_from_global_config(self, key):
if self.__cached_global_settings and key in self.__cached_global_settings:
return self.__cached_global_settings.get(key)
self.init_global_config()
if self.__global_config is None:
return None
if key in CONFIGS:
accounts = self.__global_config.configs.load().get(UCC_CREDENTIAL, [])
if accounts:
for account in accounts:
if 'disabled' in account:
del account['disabled']
self.__cached_global_settings[CREDENTIAL_SETTINGS] = accounts
elif key in SETTINGS:
settings = self.__global_config.settings.load()
self.__cached_global_settings.update({UCC_PROXY: None, UCC_LOGGING: None, UCC_CUSTOMIZED: None})
customized_setting = {}
for setting in settings.get('settings', []):
# filter out disabled setting page and 'disabled' field
if setting.get('disabled', False):
continue
if setting['name'] == UCC_LOGGING:
self.__cached_global_settings[LOG_SETTINGS] = {
LOG_LEVEL_KEY: setting.get(LOG_LEVEL_KEY)
}
elif setting['name'] == UCC_PROXY:
if 'disabled' in setting:
del setting['disabled']
setting[PROXY_ENABLE_KEY] = utils.is_true(setting.get(PROXY_ENABLE_KEY, '0'))
setting[PROXY_RDNS_KEY] = utils.is_true(setting.get(PROXY_RDNS_KEY, '0'))
self.__cached_global_settings[PROXY_SETTINGS] = setting
else: # should be customized settings
if 'disabled' in setting:
del setting['disabled']
customized_setting.update(setting)
self.__cached_global_settings[CUSTOMIZED_SETTINGS] = customized_setting
return self.__cached_global_settings.get(key)
def get_log_level(self):
log_level = "INFO"
log_settings = self._parse_conf(LOG_SETTINGS)
if log_settings is None:
self.log_info("Log level is not set, use default INFO")
else:
log_level = log_settings.get(LOG_LEVEL_KEY, None)
if not log_level:
self.log_info("Log level is not set, use default INFO")
log_level = "INFO"
return log_level
def get_proxy_settings(self):
proxy_settings = self._parse_conf(PROXY_SETTINGS)
if proxy_settings is None:
self.log_info("Proxy is not set!")
return {}
proxy_enabled = proxy_settings.get(PROXY_ENABLE_KEY)
if not proxy_enabled:
self.log_info("Proxy is not enabled!")
return {}
proxy_settings = {
"proxy_url": proxy_settings.get("proxy_url", ""),
"proxy_port": proxy_settings.get("proxy_port", None),
"proxy_username": proxy_settings.get("proxy_username", ""),
"proxy_password": proxy_settings.get("proxy_password", ""),
"proxy_type": proxy_settings.get("proxy_type", ""),
"proxy_rdns": proxy_settings.get("proxy_rdns")
}
self._validate_proxy_settings(proxy_settings)
return proxy_settings
def get_credential_by_id(self, account_id):
credential_settings = self._parse_conf(CREDENTIAL_SETTINGS)
for account in credential_settings:
if account.get('name', None) == account_id:
return account
self.log_error("Credential account with account id {} can not be found".format(account_id))
return None
def get_credential_by_username(self, username):
credential_settings = self._parse_conf(CREDENTIAL_SETTINGS)
for account in credential_settings:
if account.get('username', None) == username:
return account
self.log_error("Credential account with username {} can not be found".format(username))
return None
def get_customized_setting(self, key):
customized_settings = self._parse_conf(CUSTOMIZED_SETTINGS)
if customized_settings is None:
self.log_info("Customized setting is not set")
return None
if key not in customized_settings:
self.log_info("Customized key can not be found")
return None
customized_setting = customized_settings.get(key, None)
if customized_setting is None:
self.log_error("Cannot find customized setting with key %s" % key)
return customized_setting
def _validate_proxy_settings(self, proxy_settings):
if proxy_settings:
if proxy_settings.get('proxy_url') == "":
raise Exception("Proxy host must not be empty!")
proxy_port = proxy_settings.get('proxy_port')
if proxy_port is None or not proxy_port.isdigit():
raise Exception("Proxy port must be a number!")
def _transform(self, value, field_type):
'''
This is method is only used when parsing customized global params from env.
Only checkbox type needs transform. Other types will be extracted automatically when apply json.loads.
:param value:
:param field_type: can be checkbox, text, password, dropdownlist, multi_dropdownlist, radiogroup
:return:
'''
if field_type == TYPE_CHECKBOX:
return utils.is_true(value)
elif field_type in ALL_SETTING_TYPES:
return value
else:
raise Exception("Type of this customized setting is corrupted. Value: {}, type: {}"
.format(value, field_type))
'''
# the following methods is used by AoB internally
# user should not use this
# These methods returns the similiar structure like ucc libs
the output of config is like
{
"account": [
{
"username": "admin",
"credential": "a",
"name": "ddddd",
"disabled": false
}
]
}
the output of settings is like
{
"settings": [
{
"additional_parameters": {
"checkbox": "1",
"text": "msn",
"disabled": false
}
},
{
"proxy": {
"proxy_type": "http",
"proxy_port": "9999",
"proxy_url": "localhost",
"proxy_rdns": "1",
"disabled": false,
"proxy_password": "a",
"proxy_username": "admin",
"proxy_enabled": "1"
}
},
{
"logging": {
"loglevel": "ERROR",
"disabled": false
}
}
]
}
'''
def get_ucc_log_setting(self):
return {UCC_LOGGING: self._parse_conf(LOG_SETTINGS)}
def get_ucc_proxy_setting(self):
p = dict(self.get_proxy_settings())
p[PROXY_ENABLE_KEY] = True if p else False
return {
UCC_PROXY: p
}
def get_ucc_customized_setting(self):
customized_settings = self._parse_conf(CUSTOMIZED_SETTINGS)
if customized_settings:
return {
UCC_CUSTOMIZED: customized_settings
}
else:
return {}
# account belongs to the configs
def get_ucc_account_config(self):
return {
UCC_CREDENTIAL: self._parse_conf(CREDENTIAL_SETTINGS)
}
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks
|
Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunk_aoblib/setup_util.py
|
Python
|
isc
| 13,234
|
# -*- coding: utf-8 -*-
import abc
import math
import six
import sqlalchemy as sa
from marshmallow_sqlalchemy.convert import ModelConverter
from marshmallow_pagination import pages
converter = ModelConverter()
def convert_value(row, attr):
field = converter._get_field_class_for_property(attr.property)
value = getattr(row, attr.key)
return field()._serialize(value, None, None)
class BasePaginator(six.with_metaclass(abc.ABCMeta, object)):
def __init__(self, cursor, per_page, count=None):
self.cursor = cursor
self.count = count or self._count()
self.per_page = per_page or self.count
def _count(self):
return self.cursor.count()
@abc.abstractproperty
def page_type(self):
pass
@property
def pages(self):
if self.per_page:
return int(math.ceil(self.count / self.per_page))
return 0
@abc.abstractproperty
def get_page(self):
pass
class OffsetPaginator(BasePaginator):
"""Paginator based on offsets and limits. Not performant for large result sets.
"""
page_type = pages.OffsetPage
def get_page(self, page, eager=True):
offset, limit = self.per_page * (page - 1), self.per_page
return self.page_type(self, page, self._fetch(offset, limit, eager=eager))
def _fetch(self, offset, limit, eager=True):
offset += (self.cursor._offset or 0)
if self.cursor._limit:
limit = min(limit, self.cursor._limit - offset)
query = self.cursor.offset(offset).limit(limit)
return query.all() if eager else query
class SeekPaginator(BasePaginator):
"""Paginator using keyset pagination for performance on large result sets.
See http://use-the-index-luke.com/no-offset for details.
"""
page_type = pages.SeekPage
def __init__(self, cursor, per_page, index_column, sort_column=None, count=None):
self.index_column = index_column
self.sort_column = sort_column
super(SeekPaginator, self).__init__(cursor, per_page, count=count)
def get_page(self, last_index=None, sort_index=None, eager=True):
limit = self.per_page
return self.page_type(self, self._fetch(last_index, sort_index, limit, eager=eager))
def _fetch(self, last_index, sort_index=None, limit=None, eager=True):
cursor = self.cursor
direction = self.sort_column[1] if self.sort_column else sa.asc
lhs, rhs = (), ()
if sort_index is not None:
lhs += (self.sort_column[0], )
rhs += (sort_index, )
if last_index is not None:
lhs += (self.index_column, )
rhs += (last_index, )
lhs = sa.tuple_(*lhs)
rhs = sa.tuple_(*rhs)
if rhs.clauses:
filter = lhs > rhs if direction == sa.asc else lhs < rhs
cursor = cursor.filter(filter)
query = cursor.order_by(direction(self.index_column)).limit(limit)
return query.all() if eager else query
def _get_index_values(self, result):
"""Get index values from last result, to be used in seeking to the next
page. Optionally include sort values, if any.
"""
ret = {'last_index': convert_value(result, self.index_column)}
if self.sort_column:
key = 'last_{0}'.format(self.sort_column[0].key)
ret[key] = convert_value(result, self.sort_column[0])
return ret
|
jmcarp/marshmallow-pagination
|
marshmallow_pagination/paginators.py
|
Python
|
mit
| 3,436
|
import sys
import uos
try:
uos.VfsFat
except AttributeError:
print("SKIP")
sys.exit()
class RAMFS:
SEC_SIZE = 512
def __init__(self, blocks):
self.data = bytearray(blocks * self.SEC_SIZE)
def readblocks(self, n, buf):
#print("readblocks(%s, %x(%d))" % (n, id(buf), len(buf)))
for i in range(len(buf)):
buf[i] = self.data[n * self.SEC_SIZE + i]
def writeblocks(self, n, buf):
#print("writeblocks(%s, %x)" % (n, id(buf)))
for i in range(len(buf)):
self.data[n * self.SEC_SIZE + i] = buf[i]
def ioctl(self, op, arg):
#print("ioctl(%d, %r)" % (op, arg))
if op == 4: # BP_IOCTL_SEC_COUNT
return len(self.data) // self.SEC_SIZE
if op == 5: # BP_IOCTL_SEC_SIZE
return self.SEC_SIZE
try:
bdev = RAMFS(48)
except MemoryError:
print("SKIP")
sys.exit()
uos.VfsFat.mkfs(bdev)
assert b"FOO_FILETXT" not in bdev.data
assert b"hello!" not in bdev.data
vfs = uos.VfsFat(bdev, "/ramdisk")
f = vfs.open("foo_file.txt", "w")
f.write("hello!")
f.close()
f2 = vfs.open("foo_file.txt")
print(f2.read())
f2.close()
assert b"FOO_FILETXT" in bdev.data
assert b"hello!" in bdev.data
|
danicampora/micropython
|
tests/extmod/vfs_fat_ramdisk.py
|
Python
|
mit
| 1,230
|
# -*- encoding: utf-8 -*-
import StringIO
import xlsxwriter
"""
Web app module.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
def generate_cwr_report_excel(cwr):
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output, {'in_memory': True})
_generate_cwr_report_excel_general(workbook, cwr)
for group in cwr.transmission.groups:
_generate_cwr_report_excel_group(workbook, group)
workbook.close()
output.seek(0)
return output.read()
def _generate_cwr_report_excel_group(workbook, group):
results_sheet = workbook.add_worksheet(group.group_header.transaction_type)
bold = workbook.add_format({'bold': 1})
row = 1
col = 0
for transaction in group.transactions:
for record in transaction:
results_sheet.write(row, col + 1, record.record_type)
row += 1
def _generate_cwr_report_excel_general(workbook, cwr):
results_sheet = workbook.add_worksheet('General info')
bold = workbook.add_format({'bold': 1})
header = cwr.transmission.header
trailer = cwr.transmission.trailer
row = 1
col = 0
results_sheet.write(row, col, 'Sender ID', bold)
results_sheet.write(row, col + 1, header.sender_id)
row += 1
results_sheet.write(row, col, 'Sender Name', bold)
results_sheet.write(row, col + 1, header.sender_name)
row += 1
results_sheet.write(row, col, 'Sender Type', bold)
results_sheet.write(row, col + 1, header.sender_name)
row += 1
row += 1
results_sheet.write(row, col, 'Creation Date', bold)
results_sheet.write(row, col + 1, header.creation_date_time)
row += 1
results_sheet.write(row, col, 'Transmission Date', bold)
results_sheet.write(row, col + 1, header.transmission_date)
row += 1
row += 1
results_sheet.write(row, col, 'EDI Standard', bold)
results_sheet.write(row, col + 1, header.edi_standard)
row += 1
results_sheet.write(row, col, 'Character Set', bold)
results_sheet.write(row, col + 1, header.character_set)
row += 1
row += 1
results_sheet.write(row, col, 'Counts', bold)
row += 1
results_sheet.write(row, col, 'Groups', bold)
results_sheet.write(row, col + 1, trailer.group_count)
row += 1
results_sheet.write(row, col, 'Transactions', bold)
results_sheet.write(row, col + 1, trailer.transaction_count)
row += 1
results_sheet.write(row, col, 'Records', bold)
results_sheet.write(row, col + 1, trailer.record_count)
|
weso/CWR-WebClient
|
cwr_webclient/report/cwr.py
|
Python
|
mit
| 2,566
|
# -*- coding: utf-8 -*-
'''
Created on 2015年8月24日
@author: hustcc
'''
import datetime
import time
# 当前时间,可用于mysql datetime
def now_datetime_string():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def now_datetime():
return datetime.datetime.now()
def now_date_string():
return datetime.datetime.now().strftime("%Y-%m-%d")
def now_timestamp():
return time.time()
if __name__ == '__main__':
print(now_datetime())
print(now_timestamp())
print(now_date_string())
|
NetEaseGame/git-webhook
|
app/utils/DateUtil.py
|
Python
|
mit
| 535
|
#!/usr/bin/env python
import numpy as np
import pycuda.driver as drv
from neon.backends.nervanagpu import NervanaGPU
from openai_gemm import matmul
ng = NervanaGPU()
print drv.Context.get_current().get_device().name()
config = (
# m, n, k, AT, BT (row order)
( 16, 1760, 1760, False, False),
( 32, 1760, 1760, False, False),
( 64, 1760, 1760, False, False),
( 128, 1760, 1760, False, False),
( 7000, 1760, 1760, False, False),
( 16, 2048, 2048, False, False),
( 32, 2048, 2048, False, False),
( 64, 2048, 2048, False, False),
( 128, 2048, 2048, False, False),
( 7000, 2048, 2048, False, False),
( 16, 2560, 2560, False, False),
( 32, 2560, 2560, False, False),
( 64, 2560, 2560, False, False),
( 128, 2560, 2560, False, False),
( 7000, 2560, 2560, False, False),
( 16, 4096, 4096, False, False),
( 32, 4096, 4096, False, False),
( 64, 4096, 4096, False, False),
( 128, 4096, 4096, False, False),
( 7000, 4096, 4096, False, False),
( 16, 1760, 1760, False, True),
( 32, 1760, 1760, False, True),
( 64, 1760, 1760, False, True),
( 128, 1760, 1760, False, True),
( 7000, 1760, 1760, False, True),
( 16, 2048, 2048, False, True),
( 32, 2048, 2048, False, True),
( 64, 2048, 2048, False, True),
( 128, 2048, 2048, False, True),
( 7000, 2048, 2048, False, True),
( 16, 2560, 2560, False, True),
( 32, 2560, 2560, False, True),
( 64, 2560, 2560, False, True),
( 128, 2560, 2560, False, True),
( 7000, 2560, 2560, False, True),
( 16, 4096, 4096, False, True),
( 32, 4096, 4096, False, True),
( 64, 4096, 4096, False, True),
( 128, 4096, 4096, False, True),
( 7000, 4096, 4096, False, True),
( 7133, 1760, 1760, True , False),
( 7133, 2048, 2048, True , False),
( 7133, 2560, 2560, True , False),
( 7133, 4096, 4096, True , False),
( 9124, 5124, 1760, False, False),
( 9124, 5124, 2048, False, False),
( 9124, 5124, 2560, False, False),
( 9124, 5124, 4096, False, False),
( 9124, 5124, 1760, False, True),
( 9124, 5124, 2048, False, True),
( 9124, 5124, 2560, False, True),
( 9124, 5124, 4096, False, True),
( 8457, 35, 1760, False, False),
( 8457, 35, 2048, False, False),
( 8457, 35, 2560, False, False),
( 8457, 35, 4096, False, False),
( 8457, 35, 1760, False, True),
( 8457, 35, 2048, False, True),
( 8457, 35, 2560, False, True),
( 8457, 35, 4096, False, True),
( 16, 7680, 2560, False, False),
( 32, 7680, 2560, False, False),
( 64, 7680, 2560, False, False),
( 128, 7680, 2560, False, False),
( 16, 7680, 2560, False, True),
( 32, 7680, 2560, False, True),
( 64, 7680, 2560, False, True),
( 128, 7680, 2560, False, True),
( 16, 3072, 1024, False, False),
( 32, 3072, 1024, False, False),
( 64, 3072, 1024, False, False),
( 128, 3072, 1024, False, False),
( 16, 3072, 1024, False, True),
( 32, 3072, 1024, False, True),
( 64, 3072, 1024, False, True),
( 128, 3072, 1024, False, True),
( 7435, 3072, 1024, True , False),
( 5481, 7680, 2560, True , False),
# (60000, 32, 32, True , False),
# (60000, 256, 256, True , False),
# ( 4096, 4096, 32, True , False),
# ( 3456, 3456, 32, True , False),
# ( 896, 896, 32, True , False),
)
print "| M| N| K| Op|OpenAI_32|cuBLAS_32|ratio_32|OpenAI_16|cuBLAS_16|ratio_16|"
print "|------|------|------|---|---------|---------|--------|---------|---------|--------|"
for m, n, k, at, bt in config:
dimA = (k,m) if at else (m,k)
dimB = (n,k) if bt else (k,n)
dimC = (m,n)
opA = 'T' if at else 'N'
opB = 'T' if bt else 'N'
op = opA + opB
dtype_data = list()
for dtype in ( np.float32, np.float16 ): #np.float32, np.float16,
A = ng.empty(dimA, dtype=dtype)
B = ng.empty(dimB, dtype=dtype)
C = ng.empty(dimC, dtype=dtype)
if at: A = A.T
if bt: B = B.T
data = matmul(A, B, C, bench=True)
# if dtype is np.float16:
# print ""
# for d in sorted(data):
# print "%7.3f %5.0f %22s %5d" % d
cublas = data.pop()
openai = sorted(data)[0]
text = "%9.0f|%9.0f|%8.1f" % (openai[1], cublas[1], openai[1] / cublas[1])
dtype_data.append(text)
print "|%6d|%6d|%6d|%3s|%s|" % (m, n, k, op, "|".join(dtype_data))
|
ekelsen/openai-gemm
|
benchmark.py
|
Python
|
mit
| 4,622
|
#!/usr/bin/env python
#
# XRootD
#
# XRootD package installer.
#
# Author M Mottram - 15/04/2016 <m.mottram@qmul.ac.uk> : First revision
#######################################################################
import localpackage
import os
import stat
import shutil
class XRootD(localpackage.LocalPackage):
""" Base XRootD installer."""
def __init__(self, name, system, version):
""" Initialise the XRootD package."""
super(XRootD, self).__init__(name, system)
self._version = version
def get_tar_name(self):
""" Return the tarball name"""
return "xrootd-%s.tar.gz" % self._version
# Functions to override
def get_dependencies(self):
""" Return the dependency names as a list of names."""
return ["openssl-dev", "cmake-2.8.12"]
def _is_downloaded(self):
""" Check the tarball has been downloaded"""
return self._system.file_exists(self.get_tar_name())
def _is_installed(self):
""" Check the script has been marked as executable."""
return self._system.file_exists("xrootd", os.path.join(self.get_install_path(), "bin")) and \
bool(os.stat(os.path.join(self.get_install_path(), "bin/xrootd")).st_mode & stat.S_IXUSR)
def _download(self):
""" Download XRootD"""
self._system.download_file("http://xrootd.org/download/v%s/%s" % (self._version,
self.get_tar_name()))
def _install(self):
""" Mark the script as executable"""
source_path = os.path.join(self._system.get_install_path(), "%s-source" % self._name)
self._system.untar_file(self.get_tar_name(), source_path, 1)
if not os.path.exists(self.get_install_path()):
os.makedirs(self.get_install_path())
cmake_opts = [source_path,
"-DCMAKE_INSTALL_PREFIX=%s" % self.get_install_path(),
"-DENABLE_PERL=FALSE"]
cmake_command = "cmake"
if self._dependency_paths["cmake-2.8.12"] is not None:
cmake_command = "%s/bin/cmake" % self._dependency_paths["cmake-2.8.12"]
self._system.configure_command(cmake_command, cmake_opts, self.get_install_path(),
config_type="xrootd")
self._system.execute_command("make", [], self.get_install_path())
self._system.execute_command("make", ["install"], self.get_install_path())
shutil.rmtree(source_path)
|
mjmottram/snoing
|
packages/xrootd.py
|
Python
|
mit
| 2,509
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Hello Flask
Latest version can be found at https://github.com/dakside/pydemo
References:
Python documentation:
https://docs.python.org/
Flask documentation:
http://flask.pocoo.org/
PEP 0008 - Style Guide for Python Code
https://www.python.org/dev/peps/pep-0008/
PEP 0257 - Python Docstring Conventions:
https://www.python.org/dev/peps/pep-0257/
@author: Le Tuan Anh <tuananh.ke@gmail.com>
'''
# Copyright (c) 2016, Le Tuan Anh <tuananh.ke@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "Le Tuan Anh <tuananh.ke@gmail.com>"
__copyright__ = "Copyright 2016, pydemo"
__credits__ = []
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Le Tuan Anh"
__email__ = "<tuananh.ke@gmail.com>"
__status__ = "Prototype"
########################################################################
from flask import Flask
from flask import render_template
########################################################################
app = Flask(__name__)
@app.route('/')
def rootpage():
title = "My First Flask App that uses template"
content = "<h1>Flask is beautiful</h1>Lorem ipsum dolor sit amet"
return render_template('layout.html', title=title, content=content)
if __name__ == '__main__':
app.debug = True # To show error message
app.run()
# app.run(host='0.0.0.0') to listen on all public IPs
|
dakside/pydemo
|
flask/hellojinja/app.py
|
Python
|
mit
| 2,459
|
"""
Functional test
Anonymous Epic
Storyboard is defined within the comments of the program itself
"""
import unittest
from flask import url_for
from biblib.views.http_errors import NO_PERMISSION_ERROR
from biblib.tests.stubdata.stub_data import UserShop, LibraryShop
from biblib.tests.base import TestCaseDatabase, MockSolrBigqueryService, MockEndPoint
class TestAnonymousEpic(TestCaseDatabase):
"""
Base class used to test the Big Share Admin Epic
"""
def test_anonymous_epic(self):
"""
Carries out the epic 'Anonymous', where a user tries to access a
private library and also a public library. The user also (artificial)
tries to access any other endpoints that do not have any scopes set
:return: no return
"""
# Define two sets of stub data
# user: who makes a library (e.g., Dave the librarian)
# anon: someone using the BBB client
user_anonymous = UserShop()
user_dave = UserShop()
library_dave_private = LibraryShop(public=False)
library_dave_public = LibraryShop(public=True)
# Dave makes two libraries
# One private library
# One public library
url = url_for('userview')
response = self.client.post(
url,
data=library_dave_private.user_view_post_data_json,
headers=user_dave.headers
)
library_id_private = response.json['id']
self.assertEqual(response.status_code, 200, response)
response = self.client.post(
url,
data=library_dave_public.user_view_post_data_json,
headers=user_dave.headers
)
library_id_public = response.json['id']
self.assertEqual(response.status_code, 200, response)
# Anonymous user tries to access the private library. But cannot.
url = url_for('libraryview', library=library_id_private)
with MockSolrBigqueryService(number_of_bibcodes=0) as BQ, \
MockEndPoint([user_dave, user_anonymous]) as EP:
response = self.client.get(
url,
headers=user_anonymous.headers
)
self.assertEqual(response.status_code, NO_PERMISSION_ERROR['number'])
self.assertEqual(response.json['error'], NO_PERMISSION_ERROR['body'])
# Anonymous user tries to access the public library. And can.
url = url_for('libraryview', library=library_id_public)
with MockSolrBigqueryService(number_of_bibcodes=0) as BQ, \
MockEndPoint([user_dave, user_anonymous]) as EP:
response = self.client.get(
url,
headers=user_anonymous.headers
)
self.assertEqual(response.status_code, 200)
self.assertIn('documents', response.json)
def test_scopes(self):
"""
Separately test the number of scopes that are scopeless. This will only
fail during staging when the scopes are all set to be open. In the
production system, there is only once end point that will be scopeless.
"""
response = self.client.get('/resources')
end_points = []
for end_point in response.json.keys():
if len(response.json[end_point]['scopes']) == 0:
end_points.append(end_point)
self.assertEqual(1, len(end_points))
self.assertEqual('/libraries/<string:library>', end_points[0])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
adsabs/biblib-service
|
biblib/tests/functional_tests/test_anonymous_epic.py
|
Python
|
mit
| 3,528
|
"""
neighs information
------------------
Auxiliar class in order to manage the information of the neighbourhood
returned by the retrievers.
Due to the complexity of the structure it is convenient to put altogether
in a single class and manage in a centralized way all the different
interactions with neighs_info in the whole package.
possible inputs
---------------
* integer {neighs}
* list of integers {neighs}
* list of lists of integers {neighs for some iss}
* list of lists of lists of integers {neighs for some iss and ks}
* numpy array 1d, 2d, 3d {neighs}
* tuple of neighs
standart storing
----------------
- neighs:
- array 3d (ks, iss, neighs)
- lists [ks][iss][neighs]
- list arrays: [ks](iss, neighs), [ks][iss](neighs)
- sp_relative_pos:
- array 3d (ks, iss, neighs)
- lists [ks][iss][neighs]
- list arrays [ks](iss, neighs), [ks][iss](neighs)
standart output
---------------
- neighs:
- array 3d (ks, iss, neighs)
- lists [ks][iss][neighs]
- list arrays: [ks](iss, neighs), [ks][iss](neighs)
Parameters
----------
staticneighs: all the ks have the same information. They are static.
It is useful information for the getters. The information is stored with
deep=2.
staticneighs_set: all the same information but it is setted as if there was
set with deep=3. If True, deep=2, if False, deep=3.
constant_neighs: all the iss have the same number of neighs for all ks.
level: the format level expected. First one is only neighs, second one has
different iss and the third one different ks.
_kret: maximum number of perturbations of the system. It could be useful for
open systems expressed in a staticneighs way to find errors or delimitate
ouptut.
n: maximum number of id of elements retrieved.
"""
import numpy as np
from copy import deepcopy
import warnings
warnings.filterwarnings("always")
from auxiliar_joinning_neighs import join_neighsinfo_AND_static_dist,\
join_neighsinfo_OR_static_dist, join_neighsinfo_XOR_static_dist,\
join_neighsinfo_AND_static_notdist, join_neighsinfo_OR_static_notdist,\
join_neighsinfo_XOR_static_notdist, join_neighsinfo_AND_notstatic_dist,\
join_neighsinfo_OR_notstatic_dist, join_neighsinfo_XOR_notstatic_dist,\
join_neighsinfo_AND_notstatic_notdist,\
join_neighsinfo_OR_notstatic_notdist, join_neighsinfo_XOR_notstatic_notdist
pos_structure = [None, 'raw', 'tuple', 'tuple_only', 'tuple_tuple',
'list_tuple_only', 'tuple_list_tuple']
pos_levels = [None, 0, 1, 2, 3]
pos_format_set_iss = [None, "general", "null", "int", "list"]
pos_types_neighs = [None, "general", "list", "array", "slice"]
pos_types_rel_pos = [None, "general", "list", "array"]
inttypes = [int, np.int32, np.int64]
class Neighs_Info:
"""Class to store, move and manage the neighbourhood information retrieved.
"""
type_ = "pySpatialTools.Neighs_Info"
def __init__(self, constant_neighs=False, kret=1, format_structure=None,
n=0, format_get_info=None, format_get_k_info=None,
format_set_iss=None, staticneighs=None, ifdistance=None,
type_neighs=None, type_sp_rel_pos=None, format_level=None):
"""The instanciation of the container object for all the neighbourhood
information.
Parameters
----------
constant_neighs: boolean (default=False)
if there are always the same number of neighs across all the
possible neighs.
kret: int (default=1)
the total perturbations applied (maximum k size).
format_structure: str, optional (default=None)
the type of structure in which we are going to set the
neighbourhood information.
n: int (default=0)
the maximum number of possible neighs code.
format_get_info: str optional (default=None)
in which format the information is returned to the user.
format_get_k_info: str optional (default=None)
in which format of the ks we set.
format_set_iss: str optional (default=None)
in which format of elements iss we set.
staticneighs: boolean (default=None)
if there is constant neighbourhood across the perturbations.
ifdistance: boolean (default=None)
if we set the distance or the relative position information.
type_neighs: str optional (default=None)
the type of object describing the neighs of the neighbourhood.
type_sp_rel_pos: str optional (default=None)
the type of object describing the relative position of the
neighbourhood.
format_level: int (default=None)
the level in which the information of the neighborhood will be set.
"""
## Initialize class
self._set_init()
## Extra info
self._constant_neighs = constant_neighs
# Constrain information
self._kret = kret
self._n = n
# Setting and formatting information
self.format_set_info = format_structure, type_neighs, type_sp_rel_pos,\
format_set_iss
self.format_get_info = format_get_info, format_get_k_info
## Formatters
# Global information
self._format_globalpars(staticneighs, ifdistance, format_level)
# Format setters
self._format_setters(format_structure, type_neighs,
type_sp_rel_pos, format_set_iss)
# Format getters
self._format_getters(format_get_info, format_get_k_info)
# Format joining
self._format_joining_functions()
def __iter__(self):
"""Get information sequentially.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i`.
sp_relpos: list or np.ndarray
the relative position information for each element `i`.
ks: list or np.ndarray
the perturbations indices associated with the returned information.
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
"""
for i in range(len(self.ks)):
yield self.get_neighs([i]), self.get_sp_rel_pos([i]),\
[self.ks[i]], self.iss
def empty(self):
"""If it is empty."""
return not self.any()
def any(self):
"""If it is not empty."""
boolean = True
if type(self.idxs) == np.ndarray:
boolean = all(self.idxs.shape)
elif type(self.idxs) == list:
sh = np.array(self.idxs).shape
if len(sh) >= 2:
boolean = np.all(sh)
return boolean
def reset(self):
"""Reset all the class to empty all the neighbourhood information."""
self._set_init()
def copy(self):
"""Deep copy of the container."""
return deepcopy(self)
@property
def shape(self):
"""Return the number of indices, neighbours and ks considered. For
irregular cases the neighbours number is set as None.
Returns
-------
sh0: int
the number of elements we want to get their neighbourhood.
sh1: int
the number of neighs they have it is constant.
sh2: int
the number of perturbations applied.
"""
if not self._setted:
return None, None, None
if type(self.idxs) == slice:
sh0 = len(self.iss)
step = self.idxs.step
sh1 = (self.idxs.stop + step - 1 - self.idxs.start)/step
sh1 = 0 if self.ks is None else len(self.ks)
elif type(self.idxs) == np.ndarray:
sh0 = 0 if self.idxs is None else len(self.idxs)
sh1 = 0 if self.idxs is None else self.idxs.shape[1]
elif type(self.idxs) == list:
sh0 = len(self.idxs)
sh1 = len(self.idxs[0])
sh2 = len(self.ks) if self.ks is not None else None
return sh0, sh1, sh2
###########################################################################
############################ GENERAL SETTINGS #############################
###########################################################################
def set_information(self, k_perturb=0, n=0):
"""Set specific global information.
Parameters
----------
kret: int (default=0)
the total perturbations applied (maximum k size).
n: int (default=0)
the maximum number of possible neighs code.
"""
self._n = n
self._kret = k_perturb
def _set_ks_static(self, ks):
"""External set ks for staticneighs.
Parameters
----------
ks: list or np.ndarray
the perturbations indices associated with the stored information.
"""
self.ks = ks
if np.max(self.ks) > self._kret:
self._kret = np.max(self.ks)
def _set_ks_dynamic(self, ks):
"""External set ks for non-staticneighs.
Parameters
----------
ks: list or np.ndarray
the perturbations indices associated with the stored information.
"""
assert(len(ks) == len(self.idxs))
self.ks = ks
if np.max(self.ks) > self._kret:
self._kret = np.max(self.ks)
def direct_set(self, neighs, sp_relative_pos=None):
"""Direct set of neighs_info.
Parameters
----------
neighs: list or np.ndarray
the neighs information for each element `i` and for each
perturbation `k`.
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
self.idxs = neighs
self.sp_relative_pos = sp_relative_pos
self.assert_goodness()
def reset_functions(self):
"""Reset the function regarding the parameters set."""
if type(self.idxs) == list:
type_neighs = 'list'
elif type(self.idxs) == slice:
type_neighs = 'slice'
elif type(self.idxs) == np.ndarray:
type_neighs = 'array'
if type(self.sp_relative_pos) == list:
type_sp_rel_pos = 'list'
elif type(self.sp_relative_pos) == np.ndarray:
type_sp_rel_pos = 'array'
else:
type_sp_rel_pos = None
self.set_types(type_neighs, type_sp_rel_pos)
def reset_structure(self, format_structure):
"""Reset structure regarding the parameters set and the
`format_structure` input.
Parameters
----------
format_structure: str, optional
the type of structure in which we are going to set the
neighbourhood information.
"""
assert(format_structure in pos_structure)
_, aux1, aux2, aux3 = self.format_set_info
self.format_set_info = format_structure, aux1, aux2, aux3
self.reset_format()
def reset_level(self, format_level):
"""Reset level regarding the parameters set and the new input.
Parameters
----------
format_level: int
the level in which the information of the neighborhood will be set.
"""
assert(format_level in pos_levels)
self.level = format_level
self.reset_format()
def reset_format(self):
"""Reset format regarding the parameters set."""
## Formatters
self._format_setters(*self.format_set_info)
self._format_getters(*self.format_get_info)
self._format_joining_functions()
def set_types(self, type_neighs=None, type_sp_rel_pos=None):
"""Set type of objects in which the information will be given.
Parameters
----------
type_neighs: str optional (default=None)
the type of object describing the neighs of the neighbourhood.
type_sp_rel_pos: str optional (default=None)
the type of object describing the relative position of the
neighbourhood.
"""
## 1. Set set_sp_rel_pos
self.type_neighs, self.type_sp_rel_pos = type_neighs, type_sp_rel_pos
if self.ifdistance is False:
self.set_sp_rel_pos = self._null_set_rel_pos
self.get_sp_rel_pos = self._null_get_rel_pos
else:
self.get_sp_rel_pos = self._general_get_rel_pos
if self.level < 2:
self.get_sp_rel_pos = self._static_get_rel_pos
if type_sp_rel_pos is None or type_sp_rel_pos == 'general':
self.set_sp_rel_pos = self._general_set_rel_pos
elif type_sp_rel_pos == 'array':
if self.level is None:
self.set_sp_rel_pos = self._set_rel_pos_general_array
elif self.level == 0:
self.set_sp_rel_pos = self._set_rel_pos_dim
elif self.level == 1:
self.set_sp_rel_pos = self._array_only_set_rel_pos
elif self.level == 2:
self.set_sp_rel_pos = self._array_array_set_rel_pos
elif self.level == 3:
self.set_sp_rel_pos = self._array_array_array_set_rel_pos
elif type_sp_rel_pos == 'list':
if self.level is None:
self.set_sp_rel_pos = self._set_rel_pos_general_list
elif self.level == 0:
self.set_sp_rel_pos = self._set_rel_pos_dim
elif self.level == 1:
self.set_sp_rel_pos = self._list_only_set_rel_pos
elif self.level == 2:
self.set_sp_rel_pos = self._list_list_only_set_rel_pos
elif self.level == 3:
self.set_sp_rel_pos = self._list_list_set_rel_pos
## 2. Set set_neighs
if type_neighs is None or type_neighs == 'general':
self.set_neighs = self._general_set_neighs
elif type_neighs == 'array':
# Format get neighs
if self.staticneighs:
self.get_neighs = self._get_neighs_array_static
else:
self.get_neighs = self._get_neighs_array_dynamic
# Format set neighs
if self.level is None:
self.set_neighs = self._set_neighs_general_array
elif self.level == 0:
self.set_neighs = self._set_neighs_number
elif self.level == 1:
self.set_neighs = self._set_neighs_array_lvl1
elif self.level == 2:
self.set_neighs = self._set_neighs_array_lvl2
elif self.level == 3:
self.set_neighs = self._set_neighs_array_lvl3
elif type_neighs == 'list':
# Format get neighs
if self._constant_neighs:
if self.staticneighs:
self.get_neighs = self._get_neighs_array_static
else:
self.get_neighs = self._get_neighs_array_dynamic
else:
if self.staticneighs:
self.get_neighs = self._get_neighs_list_static
else:
self.get_neighs = self._get_neighs_list_dynamic
# Format set neighs
if self.level is None:
self.set_neighs = self._set_neighs_general_list
elif self.level == 0:
self.set_neighs = self._set_neighs_number
elif self.level == 1:
self.set_neighs = self._set_neighs_list_only
elif self.level == 2:
self.set_neighs = self._set_neighs_list_list
elif self.level == 3:
self.set_neighs = self._set_neighs_list_list_list
elif type_neighs == 'slice':
self.set_neighs = self._set_neighs_slice
self.get_neighs = self._get_neighs_slice
self.staticneighs_set = True
def set_structure(self, format_structure=None):
"""Set the structure in which the neighbourhood information will be
given.
Parameters
----------
format_structure: str, optional (default=None)
the type of structure in which we are going to set the
neighbourhood information.
"""
if format_structure is None:
self._set_info = self._set_general
elif format_structure == 'raw':
self._set_info = self._set_raw_structure
self.ifdistance = False
self.set_sp_rel_pos = self._null_set_rel_pos
self.get_sp_rel_pos = self._null_get_rel_pos
elif format_structure == 'tuple':
self._set_info = self._set_tuple_structure
self.set_sp_rel_pos = self._null_set_rel_pos
self.get_sp_rel_pos = self._null_get_rel_pos
elif format_structure == 'tuple_only':
self._set_info = self._set_tuple_only_structure
elif format_structure == 'tuple_k':
self._set_info = self._set_tuple_k_structure
elif format_structure == 'tuple_tuple':
self._set_info = self._set_tuple_tuple_structure
elif format_structure == 'list_tuple_only':
# assert(self.level == 2)
self._set_info = self._set_list_tuple_only_structure
self.staticneighs_set = False
if self.level != 2:
raise Exception("Not correct inputs.")
else:
self.level = 3
elif format_structure == 'tuple_list_tuple':
# assert(self.level == 2)
self._set_info = self._set_tuple_list_tuple_structure
self.staticneighs_set = False
if self.level != 2:
raise Exception("Not correct inputs.")
else:
self.level = 3
###########################################################################
################################# FORMATS #################################
###########################################################################
############################### Formatters ################################
###########################################################################
def _format_globalpars(self, staticneighs, ifdistance, format_level):
"""Global information non-mutable and mutable in order to force or keep
other information and functions.
Parameters
----------
staticneighs: boolean
if there is constant neighbourhood across the perturbations.
ifdistance: boolean
if we set the distance or the relative position information.
format_level: int
the level in which the information of the neighborhood will be set.
"""
## Basic information how it will be input neighs_info
self.level = format_level
## Global known information about relative position
self.ifdistance = ifdistance
## Global known information about get information
self.staticneighs = staticneighs
## Setting changable information about static neighs setting
self.staticneighs_set = None
if self.level is None:
self.staticneighs_set = None
elif self.level <= 2:
self.staticneighs_set = True
if self.level == 3:
self.staticneighs_set = False
def _format_setters(self, format_structure, type_neighs=None,
type_sp_rel_pos=None, format_set_iss=None):
"""Format the setter functions.
Parameters
----------
format_structure: str, optional
the type of structure in which we are going to set the
neighbourhood information.
type_neighs: str optional (default=None)
the type of object describing the neighs of the neighbourhood.
type_sp_rel_pos: str optional (default=None)
the type of object describing the relative position of the
neighbourhood.
format_set_iss: str optional (default=None)
in which format of elements iss we set.
"""
## 1. Format structure
self.set_structure(format_structure)
## 2. Set types
self.set_types(type_neighs, type_sp_rel_pos)
## 3. Post-format
if self._constant_neighs:
self._main_postformat = self._cte_postformat
else:
self._main_postformat = self._null_postformat
self._iss_postformat = self._assert_iss_postformat
self._ks_postformat = self._assert_ks_postformat
if self._constant_neighs and type_neighs != 'slice':
self._idxs_postformat = self._idxs_postformat_array
else:
self._idxs_postformat = self._idxs_postformat_null
## 4. Format iss
self._format_set_iss(format_set_iss)
## 5. Format set ks
if self.staticneighs:
self.set_ks = self._set_ks_static
else:
self.set_ks = self._set_ks_dynamic
## 6. General set
self.set = self._general_set
def _format_set_iss(self, format_set_iss=None):
"""Format the setter iss function.
Parameters
----------
format_set_iss: str optional (default=None)
in which format of elements iss we set.
"""
## Format iss
if format_set_iss is None or format_set_iss == 'general':
self._set_iss = self._general_set_iss
elif format_set_iss == 'null':
self._set_iss = self._null_set_iss
elif format_set_iss == 'int':
self._set_iss = self._int_set_iss
elif format_set_iss == 'list':
self._set_iss = self._list_set_iss
def _format_getters(self, format_get_info=None, format_get_k_info=None):
"""Function to program this class according to the stored idxs.
Parameters
----------
format_get_info: str optional (default=None)
in which format the information is returned to the user.
format_get_k_info: str optional (default=None)
in which format of the ks we set.
"""
## Get info setting
if format_get_k_info is None:
self.get_k = self._general_get_k
elif format_get_k_info == "default":
self.get_k = self._default_get_k
elif format_get_k_info == "general":
self.get_k = self._general_get_k
elif format_get_k_info == "list":
self.get_k = self._list_get_k
elif format_get_k_info == "integer":
self.get_k = self._integer_get_k
## Get information setting
if format_get_info is None:
self.get_information = self._general_get_information
elif format_get_info == "default":
self.get_information = self._default_get_information
elif format_get_info == "general":
self.get_information = self._general_get_information
## Other getters
if self.staticneighs:
self.get_copy_iss = self._staticneighs_get_copy_iss
self.get_copy_iss_by_ind = self._staticneighs_get_copy_iss_by_ind
else:
self.get_copy_iss = self._notstaticneighs_get_copy_iss
self.get_copy_iss_by_ind =\
self._notstaticneighs_get_copy_iss_by_ind
def _postformat(self):
"""Format properly."""
self._main_postformat()
self._iss_postformat()
self._assert_ks_postformat()
self._idxs_postformat()
def _cte_postformat(self):
"""To array because of constant neighs."""
# if type(self.idxs) == list:
# self.idxs = np.array(self.idxs)
if self.sp_relative_pos is not None:
if type(self.sp_relative_pos) == list:
self.sp_relative_pos = np.array(self.sp_relative_pos)
def _assert_iss_postformat(self):
"""Assert if the iss is correctly formatted, if not, format properly.
"""
if type(self.idxs) in [list, np.ndarray]:
# print self.idxs, self.iss, self.set_neighs
if self.staticneighs:
### WARNING: Redefinition of iss.
if len(self.idxs) != len(self.iss):
if len(self.idxs[0]) == len(self.iss):
self.idxs = self.idxs[0]
else:
self.iss = range(len(self.idxs))
else:
assert(all([len(k) == len(self.idxs[0]) for k in self.idxs]))
def _assert_ks_postformat(self):
"""Assert proper postformatting for the ks."""
if type(self.idxs) in [list, np.ndarray]:
if self.ks is None:
if self.staticneighs:
pass
else:
self.ks = range(len(self.idxs))
if self.staticneighs:
pass
else:
# print self.ks, self.idxs, self.set_neighs, self.set_sp_rel_pos
assert(len(self.ks) == len(self.idxs))
## Defining functions
if self.sp_relative_pos is not None and self.staticneighs:
self.get_sp_rel_pos = self._static_get_rel_pos
elif not self.staticneighs:
if type(self.sp_relative_pos) == list:
self.get_sp_rel_pos = self._dynamic_rel_pos_list
else:
self.get_sp_rel_pos = self._dynamic_rel_pos_array
if self.sp_relative_pos is None:
self.set_sp_rel_pos = self._null_set_rel_pos
self.get_sp_rel_pos = self._null_get_rel_pos
## Ensure correct k_ret
if np.max(self.ks) > self._kret:
self._kret = np.max(self.ks)
# def _array_ele_postformat(self, ele):
# return np.array(ele)
#
# def _null_ele_postformat(self, ele):
# return ele
def _null_postformat(self):
"""Not change anything."""
pass
def _idxs_postformat_array(self):
"""The neighs information postformatting. It format in an array-form
the neighs stored in the instance.
"""
self.idxs = np.array(self.idxs)
def _idxs_postformat_null(self):
"""The neighs information postformatting. It doesnt change the format.
"""
pass
###########################################################################
################################## SETS ###################################
###########################################################################
########################### Setters candidates ############################
###########################################################################
def _general_set(self, neighs_info, iss=None):
"""General set.
Parameters
----------
neighs_info: int, float, slice, np.ndarray, list, tuple or instance
the neighbourhood information given with the proper indicated
structure.
iss: list or np.ndarray (default=None)
the indices of the elements we stored their neighbourhood.
"""
## Set function
self._preset(neighs_info, iss)
## Post-set functions
self._postset()
self.assert_goodness()
def _preset(self, neighs_info, iss=None):
"""Set the class.
Parameters
----------
neighs_info: int, float, slice, np.ndarray, list, tuple or instance
the neighbourhood information given with the proper indicated
structure.
iss: list or np.ndarray (default=None)
the indices of the elements we stored their neighbourhood.
"""
self._reset_stored()
self._set_iss(iss)
self._set_info(neighs_info)
self._postformat()
def _postset(self):
"""Postsetting class."""
if type(self.idxs) == np.ndarray:
pass
if type(self.idxs) == slice:
self.get_neighs = self._get_neighs_slice
elif type(self.idxs) == np.ndarray:
# if len(self.idxs.shape) == 3 and self.ks is None:
# self.ks = list(range(len(self.idxs)))
# else:
# self.staticneighs_set = True
if self.staticneighs:
self.get_neighs = self._get_neighs_array_static
else:
self.get_neighs = self._get_neighs_array_dynamic
elif type(self.idxs) == list:
if self.staticneighs:
self.get_neighs = self._get_neighs_list_static
else:
self.get_neighs = self._get_neighs_list_dynamic
## Format coreget by iss
if type(self.idxs) == slice:
self._staticneighs_get_corestored_by_inds =\
self._staticneighs_get_corestored_by_inds_slice
self._notstaticneighs_get_corestored_by_inds =\
self._notstaticneighs_get_corestored_by_inds_slice
else:
self._staticneighs_get_corestored_by_inds =\
self._staticneighs_get_corestored_by_inds_notslice
self._notstaticneighs_get_corestored_by_inds =\
self._notstaticneighs_get_corestored_by_inds_notslice
def _set_init(self):
"""Reset variables to default."""
## Main information
self.idxs = None
self.sp_relative_pos = None
## Auxiliar information
self.ks = None
self.iss = [0]
## Class structural information
self._setted = False
self._constant_rel_pos = False
self.staticneighs = None
self.staticneighs_set = None
def _reset_stored(self):
"""Reset the stored parameters and neighbourhood information."""
## Main information
self.idxs = None
self.sp_relative_pos = None
self._setted = False
self.ks = None
self.iss = [0]
def _set_general(self, neighs_info):
"""Setting neighs info with heterogenous ways to do it.
Parameters
----------
neighs_info: int, float, slice, np.ndarray, list, tuple or instance
the neighbourhood information given with the proper indicated
structure. The standards of the inputs are:
* neighs [int, float, list, slice or np.ndarray]
* (i, k)
* (neighs, k)
* (neighs_info, k) where neighs_info is a tuple which could
contain (neighs, dists) or (neighs,)
* neighs_info in the form of pst.Neighs_Info
"""
## 0. Format inputs
# If int is a neighs
if type(neighs_info) in [int, float, np.int32, np.int64, np.float]:
self._set_neighs_number(neighs_info)
self.set_sp_rel_pos = self._null_set_rel_pos
self.get_sp_rel_pos = self._null_get_rel_pos
# If slice is a neighs
elif type(neighs_info) == slice:
self._set_neighs_slice(neighs_info)
self.set_sp_rel_pos = self._null_set_rel_pos
self.get_sp_rel_pos = self._null_get_rel_pos
# If array is a neighs
elif type(neighs_info) == np.ndarray:
self._set_neighs_general_array(neighs_info)
self.set_sp_rel_pos = self._null_set_rel_pos
self.get_sp_rel_pos = self._null_get_rel_pos
# If int could be neighs or list of tuples
elif type(neighs_info) == list:
self._set_structure_list(neighs_info)
# If tuple there are more information than neighs
elif type(neighs_info) == tuple:
self._set_structure_tuple(neighs_info)
else:
assert(type(neighs_info).__name__ == 'instance')
## Substitution main information
self.idxs = neighs_info.idxs
self.ks = neighs_info.ks
self.iss = neighs_info.iss
## Copying class information
self._constant_neighs = neighs_info._constant_neighs
self._kret = neighs_info._kret
self._n = neighs_info._n
self.format_set_info = neighs_info.format_set_info
self.format_get_info = neighs_info.format_get_info
self._format_globalpars(neighs_info.staticneighs,
neighs_info.ifdistance, neighs_info.level)
self._format_setters(*neighs_info.format_set_info)
self._format_getters(*neighs_info.format_get_info)
self._format_joining_functions()
############################## Set Structure ##############################
###########################################################################
def _set_raw_structure(self, key):
"""Set the neighbourhood information in a form of raw structure.
Parameters
----------
neighs_info: tuple
the neighborhood information for each element `i` and perturbations
`k`. The standards to set that information are:
* neighs{any form}
"""
self.set_neighs(key)
self.ifdistance = False
def _set_structure_tuple(self, key):
"""Set the neighbourhood information in a form of tuple general.
Parameters
----------
neighs_info: tuple
the neighborhood information for each element `i` and perturbations
`k`. The standards to set that information are:
* (neighs, )
* (neighs_info{any form}, ks)
* (neighs_info{list of typle only}, ks)
* (neighs{any form}, sp_relative_pos{any form})
* ((neighs{any form}, sp_relative_pos{any form}), ks)
* (neighs_info{list of typle only}, ks)
"""
if len(key) == 2:
msg = "Ambiguous input in `set` function of pst.Neighs_Info."
warnings.warn(msg, SyntaxWarning)
if type(key[0]) == tuple:
self.ks = list(np.array([key[1]]).ravel())
self._set_structure_tuple(key[0])
else:
aux_bool = type(key[0]) in [np.ndarray, list]
if type(key[0]) == list and type(key[0][0]) == tuple:
self._set_tuple_list_tuple_structure(key)
elif type(key[0]) == type(key[1]) and aux_bool:
if len(key[0]) == len(key[1]):
self._set_tuple_only_structure(key)
else:
self.ks = list(np.array(key[1]))
self.set_neighs(key[0])
else:
self._set_tuple_only_structure(key)
else:
self.set_neighs(key[0])
def _set_tuple_structure(self, key):
"""Set the neighbourhood information in a form of tuple structure.
Parameters
----------
neighs_info: tuple
the neighborhood information for each element `i` and perturbations
`k`. The standards to set that information are:
* (neighs_info{any form}, ks)
"""
if len(key) == 2:
self.ks = list(np.array(key[1]))
self.set_neighs(key[0])
def _set_tuple_only_structure(self, key):
"""Set the neighbourhood information in a form of tuple only structure.
Parameters
----------
neighs_info: tuple
the neighborhood information for each element `i` and perturbations
`k`. The standards to set that information are:
* (neighs{any form}, sp_relative_pos{any form})
"""
self.set_neighs(key[0])
if len(key) == 2:
self.set_sp_rel_pos(key[1])
elif len(key) > 2:
raise TypeError("Not correct input.")
def _set_tuple_tuple_structure(self, key):
"""Set the neighbourhood information in a form of tuple tuple
structure.
Parameters
----------
neighs_info: tuple
the neighborhood information for each element `i` and perturbations
`k`. The standards to set that information are:
* ((neighs{any form}, sp_relative_pos{any form}), ks)
"""
if len(key) == 2:
ks = [key[1]] if type(key[1]) == int else key[1]
self.ks = list(np.array([ks]).ravel())
self._set_tuple_only_structure(key[0])
# def _set_tuple_list_tuple_only(self, key):
# """
# * (neighs_info{list of typle only}, ks)
# """
# self.ks = list(np.array(key[1]))
# self._set_list_tuple_only_structure(key[0])
def _set_tuple_k_structure(self, key):
"""Set the neighbourhood information in a form of tuple structure.
Parameters
----------
neighs_info: tuple
the neighborhood information for each element `i` and perturbations
`k`. The standards to set that information are:
* (idxs, ks)
"""
self.ks = [key[1]] if type(key[1]) == int else key[1]
self.set_neighs(key[0])
def _set_structure_list(self, key):
"""Set the neighbourhood information in a form of general list
structure.
Parameters
----------
neighs_info: tuple
the neighborhood information for each element `i` and perturbations
`k`. The standards to set that information are:
* [neighs_info{tuple form}]
"""
if len(key) == 0:
self.set_neighs = self._set_neighs_general_list
self.set_neighs(key)
elif type(key[0]) == tuple:
self._set_info = self._set_list_tuple_only_structure
self._set_info(key)
elif type(key[0]) == list:
if self._constant_neighs:
if self.staticneighs:
self.get_neighs = self._get_neighs_array_static
else:
self.get_neighs = self._get_neighs_array_dynamic
else:
if self.staticneighs:
self.get_neighs = self._get_neighs_list_static
else:
self.get_neighs = self._get_neighs_list_dynamic
# Format set neighs
self.set_neighs = self._set_neighs_general_list
self.set_neighs(key)
elif type(key[0]) == np.ndarray:
self.set_neighs = self._general_set_neighs
self.set_neighs(np.array(key))
elif type(key[0]) in [int, float, np.int32, np.int64]:
self.level = 1
self._set_info = self._set_raw_structure
self.ifdistance = False
self.set_sp_rel_pos = self._null_set_rel_pos
if self.staticneighs:
self.get_neighs = self._get_neighs_array_static
else:
self.get_neighs = self._get_neighs_array_dynamic
# Format set neighs
self.set_neighs = self._set_neighs_array_lvl1
self.set_neighs(np.array(key))
def _set_list_tuple_only_structure(self, key):
"""Set the neighbourhood information in a form of list tuple only
structure.
Parameters
----------
neighs_info: tuple
the neighborhood information for each element `i` and perturbations
`k`. The standards to set that information are:
* [(neighs{any form}, sp_relative_pos{any form})]
"""
## Change to list and whatever it was
self.set_neighs([e[0] for e in key])
self.set_sp_rel_pos([e[1] for e in key])
def _set_tuple_list_tuple_structure(self, key):
"""Set the neighbourhood information in a form of tuple, list tuple
structure.
Parameters
----------
neighs_info: tuple
the neighborhood information for each element `i` and perturbations
`k`. The standards to set that information are:
* (neighs_info{list of typle only}, ks)
"""
self.ks = [key[1]] if type(key[1]) == int else key[1]
if not self.staticneighs:
assert(len(key[0]) == len(self.ks))
self._set_list_tuple_only_structure(key[0])
############################### Set Neighs ################################
###########################################################################
## After that has to be set:
# - self.idxs
# - self.ks
#
def _general_set_neighs(self, key):
"""General setting of only neighs.
Parameters
----------
neighs: list or np.ndarray
the neighs information for each element `i`. The standards to set
that information are:
* neighs {number form}
* neighs {list form}
* neighs {array form}
"""
if type(key) == list:
self._set_neighs_general_list(key)
elif type(key) == np.ndarray:
self._set_neighs_general_array(key)
elif type(key) in inttypes:
self._set_neighs_number(key)
else:
# print key
raise TypeError("Incorrect neighs input in pst.Neighs_Info")
def _set_neighs_number(self, key):
"""Only one neighbor expressed in a number way.
Parameters
----------
neighs: int
the neighborhood information for each element `i`. The standards to
set that information are:
* indice{int form}
"""
if self.staticneighs:
self.idxs = np.array([[key]]*len(self.iss))
else:
if self.ks is None:
self.ks = range(1)
len_ks = len(self.ks)
self.idxs = np.array([[[key]]*len(self.iss)]*len_ks)
self._constant_neighs = True
self._setted = True
def _set_neighs_slice(self, key):
"""Set neighs in a slice-form.
Parameters
----------
neighs: slice
the neighs information for each element `i`. The standards to set
that information are:
* indices{slice form}
"""
## Condition to use slice type
self._constant_neighs = True
self.ks = range(1) if self.ks is None else self.ks
## Possible options
if key is None:
self.idxs = slice(0, self._n, 1)
elif isinstance(key, slice):
start = 0 if key.start is None else key.start
stop = self._n if key.stop is None else key.stop
stop = self._n if key.stop > 10*16 else key.stop
step = 1 if key.step is None else key.step
self.idxs = slice(start, stop, step)
elif type(key) in inttypes:
self.idxs = slice(0, key, 1)
elif type(key) == tuple:
self.idxs = slice(key[0], key[1], 1)
self._setted = True
def _set_neighs_array_lvl1(self, key):
"""Set neighs as a array level 1 form.
Parameters
----------
neighs: np.ndarray
the neighs information for each element `i`. The standards to set
that information are:
* indices{np.ndarray form} shape: (neighs)
"""
#sh = key.shape
## If only array of neighs
if self.staticneighs:
self.idxs = np.array([key for i in range(len(self.iss))])
else:
self.ks = range(1) if self.ks is None else self.ks
len_ks = len(self.ks)
self.idxs = np.array([[key for i in range(len(self.iss))]
for i in range(len_ks)])
self._setted = True
def _set_neighs_array_lvl2(self, key):
"""Set neighs as array level 2 form.
Parameters
----------
neighs: np.ndarray
the neighs information for each element `i`. The standards to set
that information are:
* indices{np.ndarray form} shape: (iss, neighs)
"""
sh = key.shape
## If only iss and neighs
self.idxs = key
if self.staticneighs:
self.idxs = np.array(key)
else:
len_ks = len(self.ks) if self.ks is not None else 1
self.ks = range(1) if self.ks is None else self.ks
self.idxs = np.array([key for k in range(len_ks)])
self._setted = True
if sh[0] != len(self.iss):
self.iss = list(range(sh[0]))
def _set_neighs_array_lvl3(self, key):
"""Set neighs as array level 3 form.
Parameters
----------
neighs: np.ndarray
the neighs information for each element `i`. The standards to set
that information are:
* indices{np.ndarray form} shape: (ks, iss, neighs)
"""
self.idxs = np.array(key)
self.ks = range(len(self.idxs)) if self.ks is None else self.ks
if self.staticneighs:
self.idxs = np.array(key[0])
if len(self.idxs) != len(self.iss):
self.iss = list(range(len(self.idxs)))
else:
if len(self.idxs[0]) != len(self.iss):
self.iss = list(range(len(self.idxs[0])))
self._setted = True
def _set_neighs_general_array(self, key):
"""Set neighs as a general array form.
Parameters
----------
neighs: np.ndarray
the neighs information for each element `i`. The standards to set
that information are:
* indices{np.ndarray form} shape: (neighs)
* indices{np.ndarray form} shape: (iss, neighs)
* indices{np.ndarray form} shape: (ks, iss, neighs)
"""
key = np.array([key]) if type(key) in inttypes else key
sh = key.shape
## If only array of neighs
if len(sh) == 0:
self._set_neighs_number(key)
# self._setted = False
# if self.staticneighs:
# self.idxs = np.array([[]])
# else:
# self.idxs = np.array([[[]]])
elif len(sh) == 1:
self._set_neighs_array_lvl1(key)
## If only iss and neighs
elif len(sh) == 2:
self._set_neighs_array_lvl2(key)
elif len(sh) == 3:
self._set_neighs_array_lvl3(key)
def _set_neighs_general_list(self, key):
"""Set neighs as a general list form.
Parameters
----------
neighs: list
the neighs information for each element `i`. The standards to set
that information are:
* indices {list of list form [neighs]} [neighs]
* [neighs_info{array-like form}, ...] [iss][neighs]
* [neighs_info{array-like form}, ...] [ks][iss][neighs]
"""
### WARNING: NOT WORK WITH EMPTY NEIGHS
if '__len__' not in dir(key):
self._set_neighs_number(key)
else:
if len(key) == 0:
self._set_neighs_list_only(key)
elif '__len__' not in dir(key[0]):
self._set_neighs_list_only(key)
else:
if all([len(key[i]) == 0 for i in range(len(key))]):
self._setted = False
if self.staticneighs:
self.idxs = np.array([[]])
else:
self.idxs = np.array([[[]]])
elif '__len__' not in dir(key[0][0]):
self._set_neighs_list_list(key)
else:
self._set_neighs_list_list_list(key)
def _set_neighs_list_only(self, key):
"""Set the level 1 list
Parameters
----------
neighs: list
the neighs information for each element `i`. The standards to set
that information are:
* indices {list of list form [neighs]} [neighs]
"""
self._set_neighs_array_lvl1(np.array(key))
def _set_neighs_list_list(self, key):
"""Set the level 2 list.
Parameters
----------
neighs: list
the neighs information for each element `i`. The standards to set
that information are:
* [neighs_info{array-like form}, ...] [iss][neighs]
"""
if self._constant_neighs:
key = np.array(key)
if self.staticneighs:
self.idxs = key
self.ks = range(1) if self.ks is None else self.ks
else:
self.ks = range(1) if self.ks is None else self.ks
len_ks = len(self.ks)
self.idxs = [key for k in range(len_ks)]
if type(key) == np.ndarray:
self.idxs = np.array(self.idxs)
if len(self.iss) != len(key):
if len(self.iss) != len(key):
self.iss = range(len(key))
# if len(self.idxs[0]) > 0:
# self.iss = list(range(len(self.idxs)))
self._setted = True
def _set_neighs_list_list_list(self, key):
"""Set neighs as a level 3 list form.
Parameters
----------
neighs: list
the neighs information for each element `i`. The standards to set
that information are:
* [neighs_info{array-like form}, ...] [ks][iss][neighs]
"""
self.ks = list(range(len(key))) if self.ks is None else self.ks
if self._constant_neighs:
self.idxs = np.array(key)
else:
self.idxs = key
if len(self.idxs[0]) != len(self.iss):
self.iss = list(range(len(self.idxs[0])))
if self.staticneighs:
self.idxs = self.idxs[0]
self._setted = True
########################### Set Sp_relative_pos ###########################
###########################################################################
def _general_set_rel_pos(self, rel_pos):
"""Set the general relative position.
Parameters
----------
rel_pos: int, float, list or np.ndarray
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
* None
* list of arrays len(iss) -> unique rel_pos for ks
* list of lists of arrays -> complete
"""
if rel_pos is None or self.ifdistance is False:
self._null_set_rel_pos(rel_pos)
self.get_sp_rel_pos = self._null_get_rel_pos
elif type(rel_pos) == list:
self._set_rel_pos_general_list(rel_pos)
elif type(rel_pos) == np.ndarray:
self._set_rel_pos_general_array(rel_pos)
elif type(rel_pos) in [float, int, np.float, np.int32, np.int64]:
self._set_rel_pos_number(rel_pos)
else:
# print rel_pos
msg = "Incorrect relative position input in pst.Neighs_Info"
raise TypeError(msg)
def _set_rel_pos_general_list(self, rel_pos):
"""Set of relative position in a general list form.
Parameters
----------
rel_pos: list
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
* None
* list of arrays len(iss) -> unique rel_pos for ks
* list of lists of arrays -> complete
"""
if self.level is not None:
if self.level == 0:
self._set_rel_pos_dim(rel_pos)
elif self.level == 1:
self._list_only_set_rel_pos(rel_pos)
elif self.level == 2:
self._list_list_only_set_rel_pos(rel_pos)
elif self.level == 3:
self._list_list_set_rel_pos(rel_pos)
else:
if len(rel_pos) == 0:
self._set_rel_pos_number(rel_pos)
elif type(rel_pos[0]) not in [list, np.ndarray]:
self._list_only_set_rel_pos(rel_pos)
else:
if len(rel_pos[0]) == 0:
self._list_only_set_rel_pos(rel_pos)
elif type(rel_pos[0][0]) not in [list, np.ndarray]:
self._list_only_set_rel_pos(rel_pos)
else:
if len(rel_pos[0][0]) == 0:
self._list_list_only_set_rel_pos(rel_pos)
elif type(rel_pos[0][0][0]) not in [list, np.ndarray]:
self._list_list_only_set_rel_pos(rel_pos)
else:
self._list_list_set_rel_pos(rel_pos)
def _null_set_rel_pos(self, rel_pos):
"""Not consider the input.
Parameters
----------
rel_pos: list or np.ndarray
the relative position of the neighbourhood respect the centroid.
"""
self.get_sp_rel_pos = self._null_get_rel_pos
def _set_rel_pos_number(self, rel_pos):
"""Number set pos.
Parameters
----------
rel_pos: int or float
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
* int or float
"""
self.sp_relative_pos = self._set_rel_pos_dim([rel_pos])
def _set_rel_pos_dim(self, rel_pos):
"""Set rel pos with zero level.
Parameters
----------
rel_pos: list or np.ndarray
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
* rel_pos{array or list form} [dim]
"""
if not '__len__' in dir(rel_pos):
rel_pos = np.array([rel_pos])
if self.staticneighs:
rel_pos_f = []
for i in range(len(self.idxs)):
rel_pos_i = [rel_pos for nei in range(len(self.idxs[i]))]
rel_pos_f.append(rel_pos_i)
else:
rel_pos_f = []
for k in range(len(self.idxs)):
rel_pos_k = []
for i in range(len(self.idxs[k])):
n_nei = len(self.idxs[k][i])
rel_pos_k.append([rel_pos for nei in range(n_nei)])
rel_pos_f.append(rel_pos_k)
if self._constant_neighs:
rel_pos_f = np.array(rel_pos_f)
self.sp_relative_pos = rel_pos_f
# self.sp_relative_pos = np.array([[[rel_pos]]])
# self.get_sp_rel_pos = self._constant_get_rel_pos
# self.staticneighs = True
def _set_rel_pos_general_array(self, rel_pos):
"""Array set rel pos.
Parameters
----------
rel_pos: list or np.ndarray
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
* rel_pos{np.ndarray form} shape: (neighs, dim)
* rel_pos{np.ndarray form} shape: (iss, neighs, dim)
* rel_pos{np.ndarray form} shape: (ks, iss, neighs, dim)
"""
n_shape = len(rel_pos.shape)
if n_shape == 2:
self._array_only_set_rel_pos(rel_pos)
elif n_shape == 3:
self._array_array_set_rel_pos(rel_pos)
elif n_shape == 4:
self._array_array_array_set_rel_pos(rel_pos)
def _array_only_set_rel_pos(self, rel_pos):
"""Set the array form relative position.
Parameters
----------
rel_pos: list or np.ndarray
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
* Array only. [nei][dim] or [nei]
"""
## Preformatting
rel_pos = np.array(rel_pos)
if len(rel_pos.shape) == 1:
rel_pos = rel_pos.reshape((len(rel_pos), 1))
n_iss = len(self.iss)
sp_relative_pos = np.array([rel_pos for i in range(n_iss)])
## Not staticneighs
if not self.staticneighs:
n_k = len(self.idxs)
sp_relative_pos = np.array([sp_relative_pos for i in range(n_k)])
self.sp_relative_pos = sp_relative_pos
def _array_array_set_rel_pos(self, rel_pos):
"""Set the array-array (level 2) relative position.
Parameters
----------
rel_pos: list or np.ndarray
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
*Array or arrays. [iss][nei][dim] or [nei].
"""
# self.staticneighs = True
if self.staticneighs:
self.sp_relative_pos = np.array(rel_pos)
else:
len_ks = 1 if self.ks is None else len(self.ks)
self.sp_relative_pos = np.array([rel_pos for k in range(len_ks)])
def _array_array_array_set_rel_pos(self, rel_pos):
"""Set the level 3 array relative position.
Parameters
----------
rel_pos: list or np.ndarray
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
* Array or arrays. [ks][iss][nei][dim] or [ks][nei].
"""
if self.staticneighs:
self.sp_relative_pos = rel_pos[0]
else:
self.sp_relative_pos = rel_pos
def _list_only_set_rel_pos(self, rel_pos):
"""List only relative pos. Every iss and ks has the same neighs with
the same relative information.
Parameters
----------
rel_pos: list or np.ndarray
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
* [nei][dim] or [nei]
"""
self._array_only_set_rel_pos(rel_pos)
def _list_list_only_set_rel_pos(self, rel_pos):
"""List list only relative pos. Every ks has the same neighs with the
same relative information.
Parameters
----------
rel_pos: list or np.ndarray
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
*[iss][nei][dim] or [iss][nei]
"""
if self.staticneighs is not True:
assert(self.ks is not None)
n_ks = len(self.ks)
self.sp_relative_pos = [rel_pos]*n_ks
else:
self.sp_relative_pos = rel_pos
def _list_list_set_rel_pos(self, rel_pos):
"""List list list relative pos.
Parameters
----------
rel_pos: list or np.ndarray
the relative position of the neighbourhood respect the centroid.
The standard inputs form are:
* [ks][iss][nei][dim] or [ks][iss][nei]
"""
if self.staticneighs:
self.sp_relative_pos = rel_pos[0]
else:
self.sp_relative_pos = rel_pos
############################### Setter iss ################################
###########################################################################
def _general_set_iss(self, iss):
"""General set iss input.
Parameters
----------
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
"""
if type(iss) == int:
self._int_set_iss(iss)
elif type(iss) in [list, np.ndarray]:
self._list_set_iss(iss)
else:
if type(self.idxs) in [list, np.ndarray]:
if self.staticneighs:
if len(self.iss) != len(self.idxs):
self.iss = range(len(self.idxs))
else:
if len(self.iss) != len(self.idxs[0]):
self.iss = range(len(self.idxs[0]))
def _int_set_iss(self, iss):
"""Input iss always integer.
Parameters
----------
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
"""
self.iss = [iss]
def _list_set_iss(self, iss):
"""Input iss always array-like.
Parameters
----------
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
"""
self.iss = list(iss)
def _null_set_iss(self, iss):
"""Not consider the input.
Parameters
----------
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
"""
pass
###########################################################################
################################## GETS ###################################
###########################################################################
############################# Getter rel_pos ##############################
###########################################################################
def _general_get_rel_pos(self, k_is=[0]):
"""Get the relative position.
Parameters
----------
ks: int, slice, list or np.ndarray (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
if self.sp_relative_pos is None:
return self._null_get_rel_pos(k_is)
elif self.staticneighs:
return self._static_get_rel_pos(k_is)
# elif self._constant_rel_pos:
# return self._constant_get_rel_pos(k_is)
else:
if type(self.sp_relative_pos) == list:
return self._dynamic_rel_pos_list(k_is)
else:
return self._dynamic_rel_pos_array(k_is)
def _null_get_rel_pos(self, k_is=[0]):
"""Get the relative position.
Parameters
----------
ks: int, slice, list or np.ndarray (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
return [[None]*len(self.iss)]*len(k_is)
# def _constant_get_rel_pos(self, k_is=[0]):
# neighs = self.get_neighs(k_is)
# rel_pos = []
# for k in range(len(neighs)):
# rel_pos_k = []
# for i in range(len(neighs[k])):
# rel_pos_k.append(len(neighs[k][i])*[self.sp_relative_pos])
# rel_pos.append(rel_pos_k)
# if self._constant_neighs:
# rel_pos = np.array(rel_pos)
# return rel_pos
def _static_get_rel_pos(self, k_is=[0]):
"""Get the relative position.
Parameters
----------
ks: int, slice, list or np.ndarray (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
return [self.sp_relative_pos for k in k_is]
# def _static_rel_pos_list(self, k_is=[0]):
# return self.sp_relative_pos*len(k_is)
#
# def _static_rel_pos_array(self, k_is=[0]):
# return np.array([self.sp_relative_pos for i in range(len(k_is))])
def _dynamic_rel_pos_list(self, k_is=[0]):
"""Get the relative position.
Parameters
----------
ks: int, slice, list or np.ndarray (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
# [[e[k_i] for e in self.sp_relative_pos] for k_i in k_is]
return [self.sp_relative_pos[i] for i in k_is]
def _dynamic_rel_pos_array(self, k_is=[0]):
"""Get the relative position.
Parameters
----------
ks: int, slice, list or np.ndarray (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
# [[e[k_i] for e in self.sp_relative_pos] for k_i in k_is]
return [self.sp_relative_pos[i] for i in k_is]
################################ Getters k ################################
###########################################################################
def _general_get_k(self, k=None):
"""General get k.
Parameters
----------
ks: int, slice, list or np.ndarray
the perturbations indices associated unformatted.
Returns
-------
ks: int, slice, list or np.ndarray
the perturbations indices associated formatted.
"""
## Format k
if k is None:
ks = self._default_get_k()
elif type(k) in [np.ndarray, list]:
ks = self._list_get_k(k)
elif type(k) in inttypes:
ks = self._integer_get_k(k)
return ks
def _default_get_k(self, k=None):
"""Default get ks.
Parameters
----------
ks: int, slice, list or np.ndarray
the perturbations indices associated unformatted.
Returns
-------
ks: int, slice, list or np.ndarray
the perturbations indices associated formatted.
"""
if self.ks is None:
return [0]
else:
return self.ks
def _integer_get_k(self, k):
"""Integer get k.
Parameters
----------
ks: int, slice, list or np.ndarray
the perturbations indices associated unformatted.
Returns
-------
ks: int, slice, list or np.ndarray
the perturbations indices associated formatted.
"""
if type(k) == list:
return [self._integer_get_k(e)[0] for e in k]
if k >= 0 and k <= self._kret:
ks = [k]
else:
raise TypeError("k index out of bounds.")
return ks
def _list_get_k(self, k):
"""List get k.
Parameters
----------
ks: int, slice, list or np.ndarray
the perturbations indices associated unformatted.
Returns
-------
ks: int, slice, list or np.ndarray
the perturbations indices associated formatted.
"""
ks = [self._integer_get_k(k_i)[0] for k_i in k]
return ks
def _get_k_indices(self, ks):
"""List of indices of ks.
Parameters
----------
ks: int, slice, list or np.ndarray
the perturbations indices associated with the returned information.
Returns
-------
idx_ks: list
the associated indices to the perturbation indices. Get the index
order.
"""
if self.staticneighs:
idx_ks = ks
else:
idx_ks = [self.ks.index(e) for e in ks]
return idx_ks
############################ Getters information ##########################
###########################################################################
def _general_get_information(self, k=None):
"""Get information stored in this class.
Parameters
----------
ks: int, slice, list or np.ndarray (default=None)
the perturbations indices associated with the returned information.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k` required in the input.
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
ks: int, slice, list or np.ndarray (default=None)
the perturbations indices associated with the returned information.
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
"""
## Format k
ks = self.get_k(k)
idx_ks = self._get_k_indices(ks)
## Get iss
iss = self.iss
## Format idxs
assert(type(idx_ks) == list)
neighs = self.get_neighs(idx_ks)
sp_relative_pos = self.get_sp_rel_pos(idx_ks)
self.check_output_standards(neighs, sp_relative_pos, ks, iss)
# print '3'*50, neighs, sp_relative_pos, ks, iss
return neighs, sp_relative_pos, ks, iss
def _default_get_information(self, k=None):
"""For the unset instances.
Parameters
----------
ks: int, slice, list or np.ndarray (default=None)
the perturbations indices associated with the returned information.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k` required in the input.
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
ks: int, slice, list or np.ndarray (default=None)
the perturbations indices associated with the returned information.
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
"""
raise Exception("Information not set in pst.Neighs_Info.")
################################ Get neighs ###############################
def _get_neighs_general(self, k_is=[0]):
"""General getting neighs.
Parameters
----------
ks: int, slice, list or np.ndarray (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k` required in the input.
"""
if type(self.idxs) == slice:
neighs = self._get_neighs_slice(k_is)
elif type(self.idxs) == np.ndarray:
if self.staticneighs:
neighs = self._get_neighs_array_static(k_is)
else:
neighs = self._get_neighs_array_dynamic(k_is)
elif type(self.idxs) == list:
if self.staticneighs:
neighs = self._get_neighs_list_static(k_is)
else:
neighs = self._get_neighs_list_dynamic(k_is)
# else:
# self._default_get_neighs()
return neighs
def _get_neighs_slice(self, k_is=[0]):
"""Getting neighs from slice.
Parameters
----------
ks: slice (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k` required in the input.
"""
neighs = [np.array([range(self.idxs.start, self.idxs.stop,
self.idxs.step)
for j in range(len(self.iss))])
for i in range(len(k_is))]
neighs = np.array(neighs)
return neighs
def _get_neighs_array_dynamic(self, k_is=[0]):
"""Getting neighs from array.
Parameters
----------
ks: np.ndarray (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k` required in the input.
"""
neighs = self.idxs[k_is, :, :]
return neighs
def _get_neighs_array_static(self, k_is=[0]):
"""Getting neighs from array.
Parameters
----------
ks: np.ndarray (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k` required in the input.
"""
neighs = [self.idxs for i in range(len(k_is))]
neighs = np.array(neighs)
return neighs
def _get_neighs_list_dynamic(self, k_is=[0]):
"""Getting neighs from list.
Parameters
----------
ks: list (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k` required in the input.
"""
neighs = [self.idxs[k_i] for k_i in k_is]
return neighs
def _get_neighs_list_static(self, k_is=[0]):
"""Getting neighs from list.
Parameters
----------
ks: list or np.ndarray (default=[0])
the perturbations indices associated with the returned information.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k` required in the input.
"""
neighs = [self.idxs for k_i in k_is]
return neighs
def _default_get_neighs(self, k_i=0):
"""Default get neighs (when it is not set)
Parameters
----------
ks: int, list or np.ndarray (default=0)
the perturbations indices associated with the returned information.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k` required in the input.
"""
raise Exception("Information not set in pst.Neighs_Info.")
########################## Get by coreinfo by iss #########################
## Get the neighs_info copy object with same information but iss reduced.
## Format into get_copy_iss and get_copy_iss_by_ind
def _staticneighs_get_copy_iss(self, iss):
"""Get the neighs_info copy object with same information but iss
reduced.
Parameters
----------
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
Returns
-------
neighs_info: pst.Neighs_Info
the neighbourhood information of the elements `i` for the
perturbations `k`.
"""
inds = self._get_indices_from_iss(iss)
return self._staticneighs_get_copy_iss_by_ind(inds)
def _notstaticneighs_get_copy_iss(self, iss):
"""Get the neighs_info copy object with same information but iss
reduced.
Parameters
----------
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
Returns
-------
neighs_info: pst.Neighs_Info
the neighbourhood information of the elements `i` for the
perturbations `k`.
"""
inds = self._get_indices_from_iss(iss)
return self._notstaticneighs_get_copy_iss_by_ind(inds)
def _staticneighs_get_copy_iss_by_ind(self, indices):
"""Get the neighs_info copy object with same information but iss
reduced.
Parameters
----------
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
Returns
-------
neighs_info: pst.Neighs_Info
the neighbourhood information of the elements `i` for the
perturbations `k`.
"""
indices = [indices] if type(indices) == int else indices
iss = [self.iss[i] for i in indices]
idxs, sp_relpos = self._staticneighs_get_corestored_by_inds(indices)
## Copy of information in new container
neighs_info = self.copy()
neighs_info.idxs = idxs
neighs_info.sp_relative_pos = sp_relpos
neighs_info.iss = iss
return neighs_info
def _notstaticneighs_get_copy_iss_by_ind(self, indices):
"""Get the neighs_info copy object with same information but iss
reduced.
Parameters
----------
inds: list
the indices of the elements codes we stored their neighbourhood.
Returns
-------
neighs_info: pst.Neighs_Info
the neighbourhood information of the elements `i` for the
perturbations `k`.
"""
indices = [indices] if type(indices) == int else indices
iss = [self.iss[i] for i in indices]
idxs, sp_relpos = self._notstaticneighs_get_corestored_by_inds(indices)
## Copy of information in new container
neighs_info = self.copy()
neighs_info.idxs = idxs
neighs_info.sp_relative_pos = sp_relpos
neighs_info.iss = iss
return neighs_info
## Auxiliar functions
def _staticneighs_get_corestored_by_inds_notslice(self, inds):
"""Get the neighborhood information from the indices.
Parameters
----------
inds: list
the indices of the elements codes we stored their neighbourhood.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` and for each
perturbation `k`.
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
inds = [inds] if type(inds) == int else inds
idxs = [self.idxs[i] for i in inds]
idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs
if self.sp_relative_pos is not None:
sp_relative_pos = [self.sp_relative_pos[i] for i in inds]
else:
sp_relative_pos = None
return idxs, sp_relative_pos
def _notstaticneighs_get_corestored_by_inds_notslice(self, inds):
"""Get the neighborhood information from the indices.
Parameters
----------
inds: list
the indices of the elements codes we stored their neighbourhood.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` and for each
perturbation `k`.
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
inds = [inds] if type(inds) == int else inds
idxs = []
for k in range(len(self.idxs)):
idxs.append([self.idxs[k][i] for i in inds])
idxs = np.array(idxs) if type(self.idxs) == np.ndarray else idxs
if self.sp_relative_pos is not None:
sp_relative_pos = []
for k in range(len(self.sp_relative_pos)):
sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]
else:
sp_relative_pos = None
return idxs, sp_relative_pos
def _staticneighs_get_corestored_by_inds_slice(self, inds):
"""Get the neighborhood information from the indices.
Parameters
----------
inds: list
the indices of the elements codes we stored their neighbourhood.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` and for each
perturbation `k`.
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
inds = [inds] if type(inds) == int else inds
idxs = self.idxs
if self.sp_relative_pos is not None:
sp_relative_pos = [self.sp_relative_pos[i] for i in inds]
else:
sp_relative_pos = None
return idxs, sp_relative_pos
def _notstaticneighs_get_corestored_by_inds_slice(self, inds):
"""Get the neighborhood information from the indices.
Parameters
----------
inds: list
the indices of the elements codes we stored their neighbourhood.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` and for each
perturbation `k`.
sp_relpos: list or np.ndarray (default=None)
the relative position information for each element `i` and for each
perturbation `k`.
"""
inds = [inds] if type(inds) == int else inds
idxs = self.idxs
if self.sp_relative_pos is not None:
sp_relative_pos = []
for k in range(len(self.sp_relative_pos)):
sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]
else:
sp_relative_pos = None
return idxs, sp_relative_pos
def _get_indices_from_iss(self, iss):
"""Indices of iss from self.iss.
Parameters
----------
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
Returns
-------
inds: list
the indices of the elements codes we stored their neighbourhood.
"""
iss = [iss] if type(iss) not in [np.ndarray, list] else iss
if self.iss is not None:
inds = []
for i in iss:
inds.append(list(self.iss).index(i))
# else:
# inds = iss
return inds
###########################################################################
################################ CHECKERS #################################
###########################################################################
### Only activate that in a testing process
def assert_goodness(self):
"""Assert standarts of storing."""
if self._setted:
self.assert_stored_iss()
self.assert_stored_ks()
## Check idxs
self.assert_stored_idxs()
## Check sp_relative_pos
self.assert_stored_sp_rel_pos()
def assert_stored_sp_rel_pos(self):
"""Definition of the standart store for sp_relative_pos."""
# ## Temporal
# if self.sp_relative_pos is not None:
# if self._constant_neighs:
# if self.staticneighs:
# assert(len(np.array(self.sp_relative_pos).shape) == 3)
# else:
# assert(len(np.array(self.sp_relative_pos).shape) == 4)
# #################
array_types = [list, np.ndarray]
if self.sp_relative_pos is not None:
assert(type(self.sp_relative_pos) in [list, np.ndarray])
# if type(self.sp_relative_pos) in [float, int, np.int32, np.int64]:
# ### Probably redundant
# # it is needed or possible this situation?
# pass
assert(type(self.sp_relative_pos) in [list, np.ndarray])
# if self.ks is None:
# assert(self.staticneighs)
# assert(len(self.sp_relative_pos) == len(self.iss))
if self.staticneighs:
assert(len(self.sp_relative_pos) == len(self.iss))
## Assert deep 3
if len(self.iss):
assert(type(self.sp_relative_pos[0]) in array_types)
else:
assert(self.ks is not None)
assert(len(self.sp_relative_pos) == len(self.ks))
if type(self.sp_relative_pos[0]) in array_types:
if not self.staticneighs:
assert(len(self.sp_relative_pos[0]) == len(self.iss))
if len(self.sp_relative_pos[0]) > 0:
assert(type(self.sp_relative_pos[0][0]) in array_types)
def assert_stored_iss(self):
"""Definition of the standart store for iss."""
assert(type(self.iss) == list)
assert(len(self.iss) > 0)
def assert_stored_ks(self):
"""Definition of the standart store for ks."""
assert(self.ks is None or type(self.ks) in [list, np.ndarray])
if self.ks is not None:
assert(type(self.ks[0]) in inttypes)
def assert_stored_idxs(self):
"""Definition of the standart store for sp_relative_pos."""
if type(self.idxs) == list:
assert(type(self.idxs[0]) in [list, np.ndarray])
if not self.staticneighs:
assert(type(self.idxs[0][0]) in [list, np.ndarray])
else:
if '__len__' in dir(self.idxs[0]):
if len(self.idxs[0]):
assert(type(self.idxs[0][0]) in inttypes)
else:
assert(not any(self.idxs[0]))
elif type(self.idxs) == np.ndarray:
if self.staticneighs:
assert(len(self.idxs.shape) == 2)
else:
assert(len(self.idxs.shape) == 3)
# if self.ks is not None and not self.staticneighs:
# assert(len(self.idxs) == len(self.ks))
# else:
# assert(len(self.idxs.shape) == 2)
if self.staticneighs:
assert(len(self.idxs) == len(self.iss))
else:
assert(len(self.idxs[0]) == len(self.iss))
elif type(self.idxs) == slice:
pass
else:
### Probably redundant (Only testing purposes)
# print type(self.idxs), self.idxs
types = str(type(self.idxs))
raise Exception("Not proper type in self.idxs. Type: %s." % types)
def check_output_standards(self, neighs, sp_relative_pos, ks, iss):
"""Check output standarts.
Parameters
----------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k`.
sp_relpos: list or np.ndarray
the relative position information for each element `i` for each
perturbation `k`.
ks: list or np.ndarray
the perturbations indices associated with the returned information.
iss: list or np.ndarray
the indices of the elements we stored their neighbourhood.
"""
self.check_output_neighs(neighs, ks)
self.check_output_rel_pos(sp_relative_pos, ks)
assert(len(iss) == len(self.iss))
def check_output_neighs(self, neighs, ks):
"""Check standart outputs of neighs.
Parameters
----------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k`.
ks: list or np.ndarray
the perturbations indices associated with the returned information.
"""
if type(neighs) == list:
assert(len(neighs) == len(ks))
#assert(type(neighs[0]) == list)
assert(len(neighs[0]) == len(self.iss))
elif type(neighs) == np.ndarray:
assert(len(neighs.shape) == 3)
assert(len(neighs) == len(ks))
assert(neighs.shape[1] == len(self.iss))
else:
### Probably redundant (Only testing purposes)
# print neighs
types = str(type(neighs))
raise Exception("Not correct neighs output.Type: %s." % types)
def check_output_rel_pos(self, sp_relative_pos, ks):
"""Check standart outputs of rel_pos.
Parameters
----------
sp_relpos: list or np.ndarray
the relative position information for each element `i` for each
perturbation `k`.
ks: list or np.ndarray
the perturbations indices associated with the returned information.
"""
assert(type(sp_relative_pos) in [np.ndarray, list])
assert(len(sp_relative_pos) == len(ks))
assert(len(sp_relative_pos[0]) == len(self.iss))
########################### Joinning functions ############################
def _format_joining_functions(self):
"""Format the joining functions to use."""
## TODO: Extend to n possible neighs_info elements
if self.staticneighs:
if self.ifdistance:
self.join_neighs_and = join_neighsinfo_AND_static_dist
self.join_neighs_or = join_neighsinfo_OR_static_dist
self.join_neighs_xor = join_neighsinfo_XOR_static_dist
else:
self.join_neighs_and = join_neighsinfo_AND_static_notdist
self.join_neighs_or = join_neighsinfo_OR_static_notdist
self.join_neighs_xor = join_neighsinfo_XOR_static_notdist
else:
if self.ifdistance:
self.join_neighs_and = join_neighsinfo_AND_notstatic_dist
self.join_neighs_or = join_neighsinfo_OR_notstatic_dist
self.join_neighs_xor = join_neighsinfo_XOR_notstatic_dist
else:
self.join_neighs_and = join_neighsinfo_AND_notstatic_notdist
self.join_neighs_or = join_neighsinfo_OR_notstatic_notdist
self.join_neighs_xor = join_neighsinfo_XOR_notstatic_notdist
def join_neighs(self, neighs_info, mode='and', joiner_pos=None):
"""General joining function.
Parameters
----------
neighs_info: pst.Neighs_Info
the neighbourhood information of the other neighs we want to join.
mode: str optional ['and', 'or', 'xor']
the type of joining process we want to do.
joiner_pos: function (default=None)
the function to join the relative positions of the different
neighbourhood.
Returns
-------
new_neighs_info: pst.Neighs_Info
the neighbourhood information of joined neighbourhood.
"""
assert(mode in ['and', 'or', 'xor'])
if mode == 'and':
if self.ifdistance:
new_neighs_info = self.join_neighs_and(self, neighs_info,
joiner_pos)
else:
new_neighs_info = self.join_neighs_and(self, neighs_info)
elif mode == 'or':
if self.ifdistance:
new_neighs_info = self.join_neighs_or(self, neighs_info,
joiner_pos)
else:
new_neighs_info = self.join_neighs_or(self, neighs_info)
elif mode == 'xor':
if self.ifdistance:
new_neighs_info = self.join_neighs_xor(self, neighs_info,
joiner_pos)
else:
new_neighs_info = self.join_neighs_xor(self, neighs_info)
return new_neighs_info
###############################################################################
######################### Auxiliar inspect functions ##########################
###############################################################################
def ensuring_neighs_info(neighs_info, k):
"""Ensuring that the neighs_info is in Neighs_Info object container.
Parameters
----------
neighs_info: pst.Neighs_Info or tuple
the neighbourhood information.
k: list
the list of perturbation indices.
Returns
-------
neighs_info: pst.Neighs_Info
the properly formatted neighbourhood information.
"""
if not type(neighs_info).__name__ == 'instance':
parameters = inspect_raw_neighs(neighs_info, k=k)
parameters['format_structure'] = 'tuple_k'
neighs_info_object = Neighs_Info(**parameters)
neighs_info_object.set((neighs_info, k))
neighs_info = neighs_info_object
return neighs_info
def inspect_raw_neighs(neighs_info, k=0):
"""Useful class to inspect a raw structure neighs, in order to set
some parts of the class in order to a proper settting adaptation.
Parameters
----------
neighs_info: pst.Neighs_Info or tuple
the neighbourhood information.
k: int or list (default=0)
the list of perturbation indices.
Returns
-------
parameters: dict
the parameters to reinstantiate the neighbourhood information
properly.
"""
deep = find_deep(neighs_info)
k = [k] if type(k) == int else k
parameters = {'format_structure': 'raw'}
parameters['format_level'] = deep
if deep == 3:
assert(np.max(k) <= len(neighs_info))
parameters['kret'] = len(neighs_info)
parameters['staticneighs'] = False
else:
parameters['staticneighs'] = True
parameters['kret'] = np.max(k)
return parameters
def find_deep(neighs_info):
"""Find deep from a raw structure.
Parameters
----------
neighs_info: tuple
the neighbourhood information.
Returns
-------
deep: int
the level in which the information is provided.
"""
if '__len__' not in dir(neighs_info):
deep = 0
else:
if len(neighs_info) == 0:
deep = 1
elif '__len__' not in dir(neighs_info[0]):
deep = 1
else:
logi = [len(neighs_info[i]) == 0 for i in range(len(neighs_info))]
if all(logi):
deep = 2
elif '__len__' not in dir(neighs_info[0][0]):
deep = 2
else:
deep = 3
return deep
def neighsinfo_features_preformatting_tuple(key, k_perturb):
"""Preformatting tuple.
Parameters
----------
neighs_info: tuple
the neighborhood information. Assumed that tuple input:
* idxs, ks
k_perturb: int
the number of perturbations.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k`.
ks: list or np.ndarray
the perturbations indices associated with the returned information.
sp_relpos: list or np.ndarray
the relative position information for each element `i` for each
perturbation `k`.
"""
deep = find_deep(key[0])
if deep == 1:
ks = [key[1]] if type(key[1]) == int else key[1]
i, k, d = neighsinfo_features_preformatting_list(key[0], ks)
else:
neighs_info = Neighs_Info()
neighs_info.set_information(k_perturb)
neighs_info.set(key)
# Get information
i, d, k, _ = neighs_info.get_information()
return i, k, d
def neighsinfo_features_preformatting_list(key, k_perturb):
"""Preformatting list.
Parameters
----------
neighs_info: list
the neighborhood information. Assumed that tuple input:
* idxs, ks
k_perturb: int
the number of perturbations.
Returns
-------
neighs: list or np.ndarray
the neighs information for each element `i` for each possible
perturbation `k`.
ks: list or np.ndarray
the perturbations indices associated with the returned information.
sp_relpos: list or np.ndarray
the relative position information for each element `i` for each
perturbation `k`.
"""
kn = range(k_perturb+1) if type(k_perturb) == int else k_perturb
key = [[idx] for idx in key]
i, k, d = np.array([key]*len(kn)), kn, [[None]*len(key)]*len(kn)
return i, k, d
###############################################################################
####################### Complementary Joinning function #######################
###############################################################################
def join_by_iss(list_neighs_info):
"""Joinning by iss.
Parameters
----------
list_neighs_info: list of pst.Neighs_Info
the list of different neighbourhood information, with overlapping
set of iss.
Returns
-------
neighs_info: tuple
the joined neighbourhood information.
"""
## Computation
if len(list_neighs_info) == 1:
return list_neighs_info[0]
static = list_neighs_info[0].staticneighs
ifdistance = list_neighs_info[0].sp_relative_pos is not None
assert([nei.sp_relative_pos == ifdistance for nei in list_neighs_info])
assert([nei.staticneighs == static for nei in list_neighs_info])
ks = list_neighs_info[0].ks
# print ks
# print [nei.ks for nei in list_neighs_info]
assert(all([len(nei.ks) == len(ks) for nei in list_neighs_info]))
assert(all([nei.ks == ks for nei in list_neighs_info]))
if static:
sp_relative_pos = None if not ifdistance else []
iss, idxs = [], []
for nei in list_neighs_info:
if type(nei.idxs) != slice:
idxs += list(nei.idxs)
else:
idxs.append(nei.idxs)
iss += nei.iss
if ifdistance:
sp_relative_pos += list(nei.sp_relative_pos)
else:
sp_relative_pos = None if not ifdistance else []
iss = list(np.hstack([nei.iss for nei in list_neighs_info]))
idxs = []
for k in range(len(ks)):
idxs_k = []
sp_relative_pos_k = None if not ifdistance else []
for nei in list_neighs_info:
idxs_k += list(nei.idxs[k])
if ifdistance:
sp_relative_pos_k += list(nei.sp_relative_pos[k])
idxs.append(idxs_k)
if ifdistance:
sp_relative_pos.append(sp_relative_pos_k)
constant = list_neighs_info[0]._constant_neighs
assert([nei._constant_neighs == constant for nei in list_neighs_info])
if constant:
idxs = np.array(idxs)
## Formatting
level = 2 if static else 3
_, type_neighs, type_sp_rel_pos, _ = list_neighs_info[0].format_set_info
format_get_info, format_get_k_info = list_neighs_info[0].format_get_info
type_neighs = 'array' if constant else 'list'
nei = Neighs_Info(constant_neighs=constant, format_structure='tuple_only',
format_get_info=None, format_get_k_info=None,
format_set_iss='list', staticneighs=static,
ifdistance=ifdistance, type_neighs=type_neighs,
format_level=level)
neighs_nfo = (idxs, sp_relative_pos) if ifdistance else (idxs,)
nei.set(neighs_nfo, iss)
nei.set_ks(ks)
return nei
|
tgquintela/pythonUtils
|
pythonUtils/NeighsManager/neighs_info.py
|
Python
|
mit
| 99,145
|
from cadnano.gui.views.styles import *
from PyQt5.QtGui import QColor, QFont, QFontMetricsF
# Path Sizing
VIRTUALHELIXHANDLEITEM_RADIUS = 30
VIRTUALHELIXHANDLEITEM_STROKE_WIDTH = 2
PATH_BASE_WIDTH = 20 # used to size bases (grid squares, handles, etc)
PATH_HELIX_HEIGHT = 2 * PATH_BASE_WIDTH # staple + scaffold
PATH_HELIX_PADDING = 50 # gap between PathHelix objects in path view
PATH_GRID_STROKE_WIDTH = 0.5
SLICE_HANDLE_STROKE_WIDTH = 1
PATH_STRAND_STROKE_WIDTH = 3
PATH_STRAND_HIGHLIGHT_STROKE_WIDTH = 8
PATH_SELECTBOX_STROKE_WIDTH = 1.5
PCH_BORDER_PADDING = 1
PATH_BASE_HL_STROKE_WIDTH = 2 # PathTool highlight box
MINOR_GRID_STROKE_WIDTH = 0.5
MAJOR_GRID_STROKE_WIDTH = 0.5
OLIGO_LEN_BELOW_WHICH_HIGHLIGHT = 20
OLIGO_LEN_ABOVE_WHICH_HIGHLIGHT = 49
# Path Drawing
PATH_XOVER_LINE_SCALE_X = 0.035
PATH_XOVER_LINE_SCALE_Y = 0.035
# Path Colors
SCAFFOLD_BKG_FILL = QColor(230, 230, 230)
ACTIVE_SLICE_HANDLE_FILL = QColor(255, 204, 153, 128) # ffcc99
ACTIVE_SLICE_HANDLE_STROKE = QColor(204, 102, 51, 128) # cc6633
MINOR_GRID_STROKE = QColor(204, 204, 204) # 999999
MAJOR_GRID_STROKE = QColor(153, 153, 153) # 333333
SCAF_STROKE = QColor(0, 102, 204) # 0066cc
HANDLE_FILL = QColor(0, 102, 204) # 0066cc
PXI_SCAF_STROKE = QColor(0, 102, 204, 153)
PXI_STAP_STROKE = QColor(204, 0, 0, 153)
PXI_DISAB_STROKE = QColor(204, 204, 204, 255)
RED_STROKE = QColor(204, 0, 0)
ERASE_FILL = QColor(204, 0, 0, 63)
FORCE_FILL = QColor(0, 255, 255, 63)
BREAK_FILL = QColor(204, 0, 0, 255)
COLORBOX_FILL = QColor(204, 0, 0)
COLORBOX_STROKE = QColor(102, 102, 102)
STAP_COLORS = [QColor(204, 0, 0),
QColor(247, 67, 8),
QColor(247, 147, 30),
QColor(170, 170, 0),
QColor(87, 187, 0),
QColor(0, 114, 0),
QColor(3, 182, 162),
QColor(23, 0, 222),
QColor(115, 0, 222),
QColor(184, 5, 108),
QColor(51, 51, 51),
QColor(136, 136, 136)]
SCAF_COLORS = [QColor(0, 102, 204)]
# QColor(64, 138, 212),
# QColor(0, 38, 76),
# QColor(23, 50, 76),
# QColor(0, 76, 153)]
DEFAULT_STAP_COLOR = "#888888"
DEFAULT_SCAF_COLOR = "#0066cc"
SELECTED_COLOR = QColor(255, 51, 51)
# brightColors = [QColor() for i in range(10)]
# for i in range(len(brightColors)):
# brightColors[i].setHsvF(i/12.0, 1.0, 1.0)
# bright_palette = Palette(brightColors)
# cadnn1_palette = Palette(cadnn1Colors)
# default_palette = cadnn1_palette
SELECTIONBOX_PEN_WIDTH = 2.5
# Loop/Insertion path details
INSERTWIDTH = 2
SKIPWIDTH = 2
# Add Sequence Tool
INVALID_DNA_COLOR = QColor(204, 0, 0)
UNDERLINE_INVALID_DNA = True
#Z values
#bottom
ZACTIVESLICEHANDLE = 10
ZPATHHELIXGROUP = 20
ZPATHHELIX = 30
ZPATHSELECTION = 40
ZXOVERITEM = 90
ZPATHTOOL = 130
ZSTRANDITEM = 140
ZENDPOINTITEM = 150
ZINSERTHANDLE = 160
#top
# sequence stuff Font stuff
SEQUENCEFONT = None
SEQUENCEFONTH = 15
SEQUENCEFONTCHARWIDTH = 12
SEQUENCEFONTCHARHEIGHT = 12
SEQUENCEFONTEXTRAWIDTH = 3
SEQUENCETEXTXCENTERINGOFFSET = 0
def setFontMetrics():
""" Application must be running before you mess
too much with Fonts in Qt5
"""
global SEQUENCEFONT
global SEQUENCEFONTMETRICS
global SEQUENCEFONTCHARWIDTH
global SEQUENCEFONTCHARHEIGHT
global SEQUENCEFONTEXTRAWIDTH
global SEQUENCETEXTXCENTERINGOFFSET
global SEQUENCETEXTYCENTERINGOFFSET
SEQUENCEFONT = QFont("Monaco")
if hasattr(QFont, 'Monospace'):
SEQUENCEFONT.setStyleHint(QFont.Monospace)
SEQUENCEFONT.setFixedPitch(True)
SEQUENCEFONTH = int(PATH_BASE_WIDTH / 3.)
SEQUENCEFONT.setPixelSize(SEQUENCEFONTH)
SEQUENCEFONTMETRICS = QFontMetricsF(SEQUENCEFONT)
SEQUENCEFONTCHARWIDTH = SEQUENCEFONTMETRICS.width("A")
SEQUENCEFONTCHARHEIGHT = SEQUENCEFONTMETRICS.height()
SEQUENCEFONTEXTRAWIDTH = PATH_BASE_WIDTH - SEQUENCEFONTCHARWIDTH
SEQUENCEFONT.setLetterSpacing(QFont.AbsoluteSpacing,
SEQUENCEFONTEXTRAWIDTH)
SEQUENCETEXTXCENTERINGOFFSET = SEQUENCEFONTEXTRAWIDTH / 4.
SEQUENCETEXTYCENTERINGOFFSET = PATH_BASE_WIDTH * 0.6
#end def
XOVER_LABEL_FONT = QFont(THE_FONT, THE_FONT_SIZE, QFont.Bold)
VIRTUALHELIXHANDLEITEM_FONT = QFont(THE_FONT, 3*THE_FONT_SIZE, QFont.Bold)
XOVER_LABEL_COLOR = QColor(0,0,0)
|
amylittleyang/OtraCAD
|
cadnano25/cadnano/gui/views/pathview/pathstyles.py
|
Python
|
mit
| 4,318
|
import pytest
from plumbum.colorlib.styles import ANSIStyle, Color, AttributeNotFound, ColorNotFound
from plumbum.colorlib.names import color_html, FindNearest
class TestNearestColor:
def test_exact(self):
assert FindNearest(0,0,0).all_fast() == 0
for n,color in enumerate(color_html):
# Ignoring duplicates
if n not in (16, 21, 46, 51, 196, 201, 226, 231, 244):
rgb = (int(color[1:3],16), int(color[3:5],16), int(color[5:7],16))
assert FindNearest(*rgb).all_fast() == n
def test_nearby(self):
assert FindNearest(1,2,2).all_fast() == 0
assert FindNearest(7,7,9).all_fast() == 232
def test_simplecolor(self):
assert FindNearest(1,2,4).only_basic() == 0
assert FindNearest(0,255,0).only_basic() == 2
assert FindNearest(100,100,0).only_basic() == 3
assert FindNearest(140,140,140).only_basic() == 7
class TestColorLoad:
def test_rgb(self):
blue = Color(0,0,255) # Red, Green, Blue
assert blue.rgb == (0,0,255)
def test_simple_name(self):
green = Color.from_simple('green')
assert green.number == 2
def test_different_names(self):
assert Color('Dark Blue') == Color('Dark_Blue')
assert Color('Dark_blue') == Color('Dark_Blue')
assert Color('DARKBLUE') == Color('Dark_Blue')
assert Color('DarkBlue') == Color('Dark_Blue')
assert Color('Dark Green') == Color('Dark_Green')
def test_loading_methods(self):
assert Color("Yellow") == Color.from_full("Yellow")
assert (Color.from_full("yellow").representation !=
Color.from_simple("yellow").representation)
class TestANSIColor:
@classmethod
def setup_class(cls):
ANSIStyle.use_color = True
def test_ansi(self):
assert str(ANSIStyle(fgcolor=Color('reset'))) == '\033[39m'
assert str(ANSIStyle(fgcolor=Color.from_full('green'))) == '\033[38;5;2m'
assert str(ANSIStyle(fgcolor=Color.from_simple('red'))) == '\033[31m'
class TestNearestColor:
def test_allcolors(self):
myrange = (0,1,2,5,17,39,48,73,82,140,193,210,240,244,250,254,255)
for r in myrange:
for g in myrange:
for b in myrange:
near = FindNearest(r,g,b)
assert near.all_slow() == near.all_fast(), 'Tested: {0}, {1}, {2}'.format(r,g,b)
|
vodik/plumbum
|
tests/test_color.py
|
Python
|
mit
| 2,444
|
#----------------------------------------------------------------------
# Copyright (c) 2014 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
from sfa.util.sfalogging import logger
from sfa.trust.credential import Credential
from sfa.trust.abac_credential import ABACCredential
import json
import re
# Factory for creating credentials of different sorts by type.
# Specifically, this factory can create standard SFA credentials
# and ABAC credentials from XML strings based on their identifying content
class CredentialFactory:
UNKNOWN_CREDENTIAL_TYPE = 'geni_unknown'
# Static Credential class method to determine the type of a credential
# string depending on its contents
@staticmethod
def getType(credString):
credString_nowhitespace = re.sub('\s', '', credString)
if credString_nowhitespace.find('<type>abac</type>') > -1:
return ABACCredential.ABAC_CREDENTIAL_TYPE
elif credString_nowhitespace.find('<type>privilege</type>') > -1:
return Credential.SFA_CREDENTIAL_TYPE
else:
st = credString_nowhitespace.find('<type>')
end = credString_nowhitespace.find('</type>', st)
return credString_nowhitespace[st + len('<type>'):end]
# return CredentialFactory.UNKNOWN_CREDENTIAL_TYPE
# Static Credential class method to create the appropriate credential
# (SFA or ABAC) depending on its type
@staticmethod
def createCred(credString=None, credFile=None):
if not credString and not credFile:
raise Exception("CredentialFactory.createCred called with no argument")
if credFile:
try:
credString = open(credFile).read()
except Exception, e:
logger.info("Error opening credential file %s: %s" % credFile, e)
return None
# Try to treat the file as JSON, getting the cred_type from the struct
try:
credO = json.loads(credString, encoding='ascii')
if credO.has_key('geni_value') and credO.has_key('geni_type'):
cred_type = credO['geni_type']
credString = credO['geni_value']
except Exception, e:
# It wasn't a struct. So the credString is XML. Pull the type directly from the string
logger.debug("Credential string not JSON: %s" % e)
cred_type = CredentialFactory.getType(credString)
if cred_type == Credential.SFA_CREDENTIAL_TYPE:
try:
cred = Credential(string=credString)
return cred
except Exception, e:
if credFile:
msg = "credString started: %s" % credString[:50]
raise Exception("%s not a parsable SFA credential: %s. " % (credFile, e) + msg)
else:
raise Exception("SFA Credential not parsable: %s. Cred start: %s..." % (e, credString[:50]))
elif cred_type == ABACCredential.ABAC_CREDENTIAL_TYPE:
try:
cred = ABACCredential(string=credString)
return cred
except Exception, e:
if credFile:
raise Exception("%s not a parsable ABAC credential: %s" % (credFile, e))
else:
raise Exception("ABAC Credential not parsable: %s. Cred start: %s..." % (e, credString[:50]))
else:
raise Exception("Unknown credential type '%s'" % cred_type)
if __name__ == "__main__":
c2 = open('/tmp/sfa.xml').read()
cred1 = CredentialFactory.createCred(credFile='/tmp/cred.xml')
cred2 = CredentialFactory.createCred(credString=c2)
print "C1 = %s" % cred1
print "C2 = %s" % cred2
c1s = cred1.dump_string()
print "C1 = %s" % c1s
# print "C2 = %s" % cred2.dump_string()
|
yippeecw/sfa
|
sfa/trust/credential_factory.py
|
Python
|
mit
| 5,023
|
from __future__ import absolute_import, division
from klein.app import Klein, run, route, resource
from klein._plating import Plating
from ._version import __version__ as _incremental_version
# Make it a str, for backwards compatibility
__version__ = _incremental_version.base()
__author__ = "The Klein contributors (see AUTHORS)"
__license__ = "MIT"
__copyright__ = "Copyright 2016 {0}".format(__author__)
__all__ = [
'Klein',
'Plating',
'__author__',
'__copyright__',
'__license__',
'__version__',
'resource',
'route',
'run',
]
|
joac/klein
|
src/klein/__init__.py
|
Python
|
mit
| 572
|
from dask.callbacks import Callback
from os import getcwd, remove
from os.path import join, exists
from dask.diagnostics import ProgressBar
from dask.multiprocessing import get as get_proc
import toolz
import json
class NekCallback(Callback):
def __init__(self, case):
self.case = case
self.cwd = getcwd()
self.cache = {}
if exists(join(self.cwd, "HALT")):
remove(join(self.cwd, "HALT"))
def _posttask(self, key, result, dsk, state, id):
self.cache.update(state['cache'])
with open(join(self.cwd, "{}.cache".format(self.case["prefix"])), "w") as f:
json.dump(self.cache, f)
if exists(join(self.cwd, "HALT")):
for k in state['ready']:
state['cache'][k] = None
for k in state['waiting']:
state['cache'][k] = None
state['ready'] = []
state['waiting'] = []
return
def run_all(values, base, get=get_proc, num_workers = 4):
full_dask = toolz.merge(val.dask for val in values)
full_keys = [val._key for val in values]
cache = {}
if exists("{}.cache".format(base["prefix"])):
with open("{}.cache".format(base["prefix"]), "r") as f:
cache = json.load(f)
full_dask.update(cache)
with ProgressBar(), NekCallback(base) as rprof:
res = get(full_dask, full_keys, cache=cache, num_workers=num_workers, optimize_graph=False)
return res
|
NekBox/nekpy
|
nekpy/dask/runner.py
|
Python
|
mit
| 1,462
|
from .base import *
from .controller import *
|
jdzero/foundation
|
foundation/backend/views/__init__.py
|
Python
|
mit
| 46
|
"""
WSGI config for geology project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "geology.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
MuhammadSohaib/colorado-geology-geodjango
|
geology/geology/wsgi.py
|
Python
|
mit
| 1,562
|
import theano
import numpy
import scipy
from theano import tensor
from blocks.bricks import Initializable, Linear
from blocks.bricks.parallel import Parallel
from blocks.bricks.base import lazy, application
from blocks.bricks.attention import (
GenericSequenceAttention, SequenceContentAttention,
ShallowEnergyComputer)
from blocks.utils import (put_hook, ipdb_breakpoint, shared_floatx,
shared_floatx_nans)
from lvsr.expressions import conv1d
floatX = theano.config.floatX
import logging
logger = logging.getLogger(__name__)
class Conv1D(Initializable):
def __init__(self, num_filters, filter_length, **kwargs):
self.num_filters = num_filters
self.filter_length = filter_length
super(Conv1D, self).__init__(**kwargs)
def _allocate(self):
self.parameters = [shared_floatx_nans((self.num_filters, self.filter_length),
name="filters")]
def _initialize(self):
self.weights_init.initialize(self.parameters[0], self.rng)
def apply(self, input_):
return conv1d(input_, self.parameters[0], border_mode="full")
class SequenceContentAndConvAttention(GenericSequenceAttention, Initializable):
@lazy()
def __init__(self, match_dim, conv_n, conv_num_filters=1,
state_transformer=None,
attended_transformer=None, energy_computer=None,
prior=None, energy_normalizer=None, **kwargs):
super(SequenceContentAndConvAttention, self).__init__(**kwargs)
if not state_transformer:
state_transformer = Linear(use_bias=False)
self.match_dim = match_dim
self.state_transformer = state_transformer
self.state_transformers = Parallel(input_names=self.state_names,
prototype=state_transformer,
name="state_trans")
if not attended_transformer:
# Only this contributor to the match vector
# is allowed to have biases
attended_transformer = Linear(name="preprocess")
if not energy_normalizer:
energy_normalizer = 'softmax'
self.energy_normalizer = energy_normalizer
if not energy_computer:
energy_computer = ShallowEnergyComputer(
name="energy_comp",
use_bias=self.energy_normalizer != 'softmax')
self.filter_handler = Linear(name="handler", use_bias=False)
self.attended_transformer = attended_transformer
self.energy_computer = energy_computer
if not prior:
prior = dict(type='expanding', initial_begin=0, initial_end=10000,
min_speed=0, max_speed=0)
self.prior = prior
self.conv_n = conv_n
self.conv_num_filters = conv_num_filters
self.conv = Conv1D(conv_num_filters, 2 * conv_n + 1)
self.children = [self.state_transformers, self.attended_transformer,
self.energy_computer, self.filter_handler, self.conv]
def _push_allocation_config(self):
self.state_transformers.input_dims = self.state_dims
self.state_transformers.output_dims = [self.match_dim
for name in self.state_names]
self.attended_transformer.input_dim = self.attended_dim
self.attended_transformer.output_dim = self.match_dim
self.energy_computer.input_dim = self.match_dim
self.energy_computer.output_dim = 1
self.filter_handler.input_dim = self.conv_num_filters
self.filter_handler.output_dim = self.match_dim
@application
def compute_energies(self, attended, preprocessed_attended,
previous_weights, states):
if not preprocessed_attended:
preprocessed_attended = self.preprocess(attended)
transformed_states = self.state_transformers.apply(as_dict=True,
**states)
# Broadcasting of transformed states should be done automatically
match_vectors = sum(transformed_states.values(),
preprocessed_attended)
conv_result = self.conv.apply(previous_weights)
match_vectors += self.filter_handler.apply(
conv_result[:, :, self.conv_n:-self.conv_n]
.dimshuffle(0, 2, 1)).dimshuffle(1, 0, 2)
energies = self.energy_computer.apply(match_vectors).reshape(
match_vectors.shape[:-1], ndim=match_vectors.ndim - 1)
return energies
@staticmethod
def mask_row(offset, length, empty_row):
return tensor.set_subtensor(empty_row[offset:offset+length], 1)
@application(outputs=['weighted_averages', 'weights', 'energies', 'step'])
def take_glimpses(self, attended, preprocessed_attended=None,
attended_mask=None, weights=None, step=None, **states):
# Cut the considered window.
p = self.prior
length = attended.shape[0]
prior_type = p.get('type', 'expanding')
if prior_type=='expanding':
begin = p['initial_begin'] + step[0] * p['min_speed']
end = p['initial_end'] + step[0] * p['max_speed']
begin = tensor.maximum(0, tensor.minimum(length - 1, begin))
end = tensor.maximum(0, tensor.minimum(length, end))
additional_mask = None
elif prior_type.startswith('window_around'):
#check whether we want the mean or median!
if prior_type == 'window_around_mean':
position_in_attended = tensor.arange(length, dtype=floatX)[None, :]
expected_last_source_pos = (weights * position_in_attended).sum(axis=1)
elif prior_type == 'window_around_median':
ali_to_05 = tensor.extra_ops.cumsum(weights, axis=1) - 0.5
ali_to_05 = (ali_to_05>=0)
ali_median_pos = ali_to_05[:,1:] - ali_to_05[:,:-1]
expected_last_source_pos = tensor.argmax(ali_median_pos, axis=1)
expected_last_source_pos = theano.gradient.disconnected_grad(
expected_last_source_pos)
else:
raise ValueError
#the window taken around each element
begins = tensor.floor(expected_last_source_pos - p['before'])
ends = tensor.ceil(expected_last_source_pos + p['after'])
#the global window to optimize computations
begin = tensor.maximum(0, begins.min()).astype('int64')
end = tensor.minimum(length, ends.max()).astype('int64')
#the new mask, already cut to begin:end
position_in_attended_cut = tensor.arange(
begin * 1., end * 1., 1., dtype=floatX)[None, :]
additional_mask = ((position_in_attended_cut > begins[:,None]) *
(position_in_attended_cut < ends[:,None]))
else:
raise Exception("Unknown prior type: %s", prior_type)
begin = tensor.floor(begin).astype('int64')
end = tensor.ceil(end).astype('int64')
attended_cut = attended[begin:end]
preprocessed_attended_cut = (preprocessed_attended[begin:end]
if preprocessed_attended else None)
attended_mask_cut = (
(attended_mask[begin:end] if attended_mask else None)
* (additional_mask.T if additional_mask else 1))
weights_cut = weights[:, begin:end]
# Call
energies_cut = self.compute_energies(attended_cut, preprocessed_attended_cut,
weights_cut, states)
weights_cut = self.compute_weights(energies_cut, attended_mask_cut)
weighted_averages = self.compute_weighted_averages(weights_cut, attended_cut)
# Paste
new_weights = new_energies = tensor.zeros_like(weights.T)
new_weights = tensor.set_subtensor(new_weights[begin:end],
weights_cut)
new_energies = tensor.set_subtensor(new_energies[begin:end],
energies_cut)
return weighted_averages, new_weights.T, new_energies.T, step + 1
@take_glimpses.property('inputs')
def take_glimpses_inputs(self):
return (['attended', 'preprocessed_attended',
'attended_mask', 'weights', 'step'] +
self.state_names)
@application
def compute_weights(self, energies, attended_mask):
if self.energy_normalizer == 'softmax':
logger.debug("Using softmax attention weights normalization")
energies = energies - energies.max(axis=0)
unnormalized_weights = tensor.exp(energies)
elif self.energy_normalizer == 'logistic':
logger.debug("Using smoothfocus (logistic sigm) "
"attention weights normalization")
unnormalized_weights = tensor.nnet.sigmoid(energies)
elif self.energy_normalizer == 'relu':
logger.debug("Using ReLU attention weights normalization")
unnormalized_weights = tensor.maximum(energies/1000., 0.0)
else:
raise Exception("Unknown energey_normalizer: {}"
.format(self.energy_computer))
if attended_mask:
unnormalized_weights *= attended_mask
# If mask consists of all zeros use 1 as the normalization coefficient
normalization = (unnormalized_weights.sum(axis=0) +
tensor.all(1 - attended_mask, axis=0))
return unnormalized_weights / normalization
@application
def initial_glimpses(self, batch_size, attended):
return ([tensor.zeros((batch_size, self.attended_dim))]
+ 2 * [tensor.concatenate([
tensor.ones((batch_size, 1)),
tensor.zeros((batch_size, attended.shape[0] - 1))],
axis=1)]
+ [tensor.zeros((batch_size,), dtype='int64')])
@initial_glimpses.property('outputs')
def initial_glimpses_outputs(self):
return ['weight_averages', 'weights', 'energies', 'step']
@application(inputs=['attended'], outputs=['preprocessed_attended'])
def preprocess(self, attended):
return self.attended_transformer.apply(attended)
def get_dim(self, name):
if name in ['weighted_averages']:
return self.attended_dim
if name in ['weights', 'energies', 'step']:
return 0
return super(SequenceContentAndConvAttention, self).get_dim(name)
|
rizar/attention-lvcsr
|
lvsr/bricks/attention.py
|
Python
|
mit
| 10,681
|
from django.conf.urls import url, patterns
urlpatterns = patterns(
"phileo.views",
url(r"^like/(?P<content_type_id>\d+):(?P<object_id>\d+)/$", "like_toggle", name="phileo_like_toggle")
)
|
rizumu/pinax-likes
|
phileo/urls.py
|
Python
|
mit
| 197
|