code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
mcinif='mcini_weihera'
mcpick='gen_weihera.pickle'
runname='gen_test3213'
pathdir='/beegfs/work/ka_oj4748/echoRD'
wdir='/beegfs/work/ka_oj4748/gen_tests'
update_prec=0.06
legacy_pick=True
import sys
sys.path.append(pathdir)
import run_echoRD as rE
rE.echoRD_job(mcinif=mcinif,mcpick=mcpick,runname=runname,wdir=wdir,pathdir=pathdir,update_prec=update_prec,legacy_pick=legacy_pick)
| cojacoo/testcases_echoRD | gen_test3213a.py | Python | gpl-3.0 | 383 |
# This file is part of EAP.
#
# EAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# EAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with EAP. If not, see <http://www.gnu.org/licenses/>.
import random
import operator
import numpy
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp
def if_then_else(condition, out1, out2):
return out1 if condition else out2
# Initialize Multiplexer problem input and output vectors
MUX_SELECT_LINES = 3
MUX_IN_LINES = 2 ** MUX_SELECT_LINES
MUX_TOTAL_LINES = MUX_SELECT_LINES + MUX_IN_LINES
# input : [A0 A1 A2 D0 D1 D2 D3 D4 D5 D6 D7] for a 8-3 mux
inputs = [[0] * MUX_TOTAL_LINES for i in range(2 ** MUX_TOTAL_LINES)]
outputs = [None] * (2 ** MUX_TOTAL_LINES)
for i in range(2 ** MUX_TOTAL_LINES):
value = i
divisor = 2 ** MUX_TOTAL_LINES
# Fill the input bits
for j in range(MUX_TOTAL_LINES):
divisor /= 2
if value >= divisor:
inputs[i][j] = 1
value -= divisor
# Determine the corresponding output
indexOutput = MUX_SELECT_LINES
for j, k in enumerate(inputs[i][:MUX_SELECT_LINES]):
indexOutput += k * 2**j
outputs[i] = inputs[i][indexOutput]
pset = gp.PrimitiveSet("MAIN", MUX_TOTAL_LINES, "IN")
pset.addPrimitive(operator.and_, 2)
pset.addPrimitive(operator.or_, 2)
pset.addPrimitive(operator.not_, 1)
pset.addPrimitive(if_then_else, 3)
pset.addTerminal(1)
pset.addTerminal(0)
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("expr", gp.genFull, pset=pset, min_=2, max_=4)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("compile", gp.compile, pset=pset)
def evalMultiplexer(individual):
func = toolbox.compile(expr=individual)
return sum(func(*in_) == out for in_, out in zip(inputs, outputs)),
toolbox.register("evaluate", evalMultiplexer)
toolbox.register("select", tools.selTournament, tournsize=7)
toolbox.register("mate", gp.cxOnePoint)
toolbox.register("expr_mut", gp.genGrow, min_=0, max_=2)
toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
def main():
# random.seed(10)
pop = toolbox.population(n=40)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
algorithms.eaSimple(pop, toolbox, 0.8, 0.1, 40, stats, halloffame=hof)
return pop, stats, hof
if __name__ == "__main__":
main()
| marcioweck/PSSLib | reference/deap/examples/gp/multiplexer.py | Python | lgpl-3.0 | 3,307 |
from rest_framework import viewsets, permissions
import models
import serializers
class PageViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = models.Page.objects.all()
serializer_class = serializers.PageSerializer
permission_classes = (permissions.IsAdminUser, )
def pre_save(self, obj):
obj.owner = self.request.user
class PostViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
"""
queryset = models.Post.objects.all()
serializer_class = serializers.PostSerializer
permission_classes = (permissions.IsAdminUser, )
def pre_save(self, obj):
obj.owner = self.request.user
| esitamon/django-skeleton | app/website/api.py | Python | gpl-3.0 | 840 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
# #############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
# #############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, Vt = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
V = pca.components_.T
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| glemaitre/scikit-learn | examples/decomposition/plot_pca_3d.py | Python | bsd-3-clause | 2,311 |
from collections import deque
import io
import itertools
import logging
import multiprocessing
import multiprocessing.pool
import numpy
import PIL.Image
import PIL.ImageFile
from smqtk.algorithms.descriptor_generator import \
DescriptorGenerator, \
DFLT_DESCRIPTOR_FACTORY
from smqtk.utils.bin_utils import report_progress
try:
import caffe
except ImportError, ex:
logging.getLogger(__name__).warning("Failed to import caffe module: %s",
str(ex))
caffe = None
__author__ = 'paul.tunison@kitware.com, jacob.becker@kitware.com'
__all__ = [
"CaffeDescriptorGenerator",
]
class CaffeDescriptorGenerator (DescriptorGenerator):
"""
Compute images against a Caffe model, extracting a layer as the content
descriptor.
"""
@classmethod
def is_usable(cls):
valid = caffe is not None
if not valid:
cls.logger().debug("Caffe python module cannot be imported")
return valid
def __init__(self, network_prototxt_filepath, network_model_filepath,
image_mean_filepath,
return_layer='fc7',
batch_size=1, use_gpu=False, gpu_device_id=0,
network_is_bgr=True, data_layer='data',
load_truncated_images=False, pixel_rescale=None,
input_scale=None):
"""
Create a Caffe CNN descriptor generator
:param network_prototxt_filepath: Path to the text file defining the
network layout.
:type network_prototxt_filepath: str
:param network_model_filepath: The path to the trained ``.caffemodel``
file to use.
:type network_model_filepath: str
:param image_mean_filepath: Path to the image mean ``.binaryproto``
file, a ``.npy`` file, or a file-like object that could otherwise be
passed to ``numpy.load``
:type image_mean_filepath: str | file | StringIO.StringIO
:param return_layer: The label of the layer we take data from to compose
output descriptor vector.
:type return_layer: str
:param batch_size: The maximum number of images to process in one feed
forward of the network. This is especially important for GPUs since
they can only process a batch that will fit in the GPU memory space.
:type batch_size: int
:param use_gpu: If Caffe should try to use the GPU
:type use_gpu: bool
:param gpu_device_id: Integer ID of the GPU device to use. Only used if
``use_gpu`` is True.
:type gpu_device_id: int
:param network_is_bgr: If the network is expecting BGR format pixels.
For example, the BVLC default caffenet does (thus the default is
True).
:type network_is_bgr: bool
:param data_layer: String label of the network's data layer.
We assume its 'data' by default.
:type data_layer: str
:param load_truncated_images: If we should be lenient and force loading
of truncated image bytes. This is False by default.
:type load_truncated_images: bool
:param pixel_rescale: Re-scale image pixel values before being
transformed by caffe (before mean subtraction, etc)
into the given tuple ``(min, max)`` range. By default, images are
loaded in the ``[0, 255]`` range. Refer to the image mean being used
for desired input pixel scale.
:type pixel_rescale: None | (float, float)
:param input_scale: Optional floating-point scalar value to scale values
of caffe network input data AFTER mean subtraction. This value is
directly multiplied against the pixel values.
:type input_scale: None | float
"""
super(CaffeDescriptorGenerator, self).__init__()
self.network_prototxt_filepath = str(network_prototxt_filepath)
self.network_model_filepath = str(network_model_filepath)
self.image_mean_filepath = image_mean_filepath
self.return_layer = str(return_layer)
self.batch_size = int(batch_size)
self.use_gpu = bool(use_gpu)
self.gpu_device_id = int(gpu_device_id)
self.network_is_bgr = bool(network_is_bgr)
self.data_layer = str(data_layer)
self.load_truncated_images = bool(load_truncated_images)
self.pixel_rescale = pixel_rescale
self.input_scale = input_scale
assert self.batch_size > 0, \
"Batch size must be greater than 0 (got %d)" \
% self.batch_size
assert self.gpu_device_id >= 0, \
"GPU Device ID must be greater than 0 (got %d)" \
% self. gpu_device_id
# Network setup variables
self.network = None
self.net_data_shape = ()
self.transformer = None
self._setup_network()
def __getstate__(self):
return self.get_config()
def __setstate__(self, state):
self.__dict__.update(state)
self._setup_network()
def _setup_network(self):
"""
Initialize Caffe and the network
"""
if self.use_gpu:
self._log.debug("Using GPU")
caffe.set_device(self.gpu_device_id)
caffe.set_mode_gpu()
else:
self._log.debug("using CPU")
caffe.set_mode_cpu()
# Questions:
# - ``caffe.TEST`` indicates phase of either TRAIN or TEST
self._log.debug("Initializing network")
self.network = caffe.Net(self.network_prototxt_filepath,
self.network_model_filepath,
caffe.TEST)
# Assuming the network has a 'data' layer and notion of data shape
self.net_data_shape = self.network.blobs[self.data_layer].data.shape
self._log.debug("Network data shape: %s", self.net_data_shape)
# Crating input data transformer
self._log.debug("Initializing data transformer")
self.transformer = caffe.io.Transformer(
{self.data_layer: self.network.blobs[self.data_layer].data.shape}
)
self._log.debug("Initializing data transformer -> %s",
self.transformer.inputs)
self._log.debug("Loading image mean")
try:
a = numpy.load(self.image_mean_filepath)
except IOError:
self._log.debug("Image mean file not a numpy array, assuming "
"protobuf binary.")
blob = caffe.proto.caffe_pb2.BlobProto()
with open(self.image_mean_filepath, 'rb') as f:
blob.ParseFromString(f.read())
a = numpy.array(caffe.io.blobproto_to_array(blob))
assert a.shape[0] == 1, \
"Input image mean blob protobuf consisted of more than one " \
"image. Not sure how to handle this yet."
a = a.reshape(a.shape[1:])
assert a.shape[0] in [1, 3], \
"Currently asserting that we either get 1 or 3 channel images. " \
"Got a %d channel image." % a[0]
a_mean = a.mean(1).mean(1)
self._log.debug("Initializing data transformer -- mean")
self.transformer.set_mean(self.data_layer, a_mean)
self._log.debug("Initializing data transformer -- transpose")
self.transformer.set_transpose(self.data_layer, (2, 0, 1))
if self.network_is_bgr:
self._log.debug("Initializing data transformer -- channel swap")
self.transformer.set_channel_swap(self.data_layer, (2, 1, 0))
if self.input_scale:
self._log.debug("Initializing data transformer -- input scale")
self.transformer.set_input_scale(self.data_layer, self.input_scale)
def get_config(self):
"""
Return a JSON-compliant dictionary that could be passed to this class's
``from_config`` method to produce an instance with identical
configuration.
In the common case, this involves naming the keys of the dictionary
based on the initialization argument names as if it were to be passed
to the constructor via dictionary expansion.
:return: JSON type compliant configuration dictionary.
:rtype: dict
"""
return {
"network_prototxt_filepath": self.network_prototxt_filepath,
"network_model_filepath": self.network_model_filepath,
"image_mean_filepath": self.image_mean_filepath,
"return_layer": self.return_layer,
"batch_size": self.batch_size,
"use_gpu": self.use_gpu,
"gpu_device_id": self.gpu_device_id,
"network_is_bgr": self.network_is_bgr,
"data_layer": self.data_layer,
"load_truncated_images": self.load_truncated_images,
"pixel_rescale": self.pixel_rescale,
}
def valid_content_types(self):
"""
:return: A set valid MIME type content types that this descriptor can
handle.
:rtype: set[str]
"""
return {
'image/tiff',
'image/png',
'image/jpeg',
}
def _compute_descriptor(self, data):
raise NotImplementedError("Shouldn't get here as "
"compute_descriptor[_async] is being "
"overridden")
def compute_descriptor(self, data, descr_factory=DFLT_DESCRIPTOR_FACTORY,
overwrite=False):
"""
Given some kind of data, return a descriptor element containing a
descriptor vector.
This abstract super method should be invoked for common error checking.
:raises RuntimeError: Descriptor extraction failure of some kind.
:raises ValueError: Given data element content was not of a valid type
with respect to this descriptor.
:param data: Some kind of input data for the feature descriptor.
:type data: smqtk.representation.DataElement
:param descr_factory: Factory instance to produce the wrapping
descriptor element instance. In-Memory descriptor factory by
default.
:type descr_factory: smqtk.representation.DescriptorElementFactory
:param overwrite: Whether or not to force re-computation of a descriptor
vector for the given data even when there exists a precomputed
vector in the generated DescriptorElement as generated from the
provided factory. This will overwrite the persistently stored vector
if the provided factory produces a DescriptorElement implementation
with such storage.
:type overwrite: bool
:return: Result descriptor element. UUID of this output descriptor is
the same as the UUID of the input data element.
:rtype: smqtk.representation.DescriptorElement
"""
m = self.compute_descriptor_async([data], descr_factory, overwrite,
procs=1)
return m[data]
def compute_descriptor_async(self, data_iter,
descr_factory=DFLT_DESCRIPTOR_FACTORY,
overwrite=False, procs=None, **kwds):
"""
Asynchronously compute feature data for multiple data items.
:param data_iter: Iterable of data elements to compute features for.
These must have UIDs assigned for feature association in return
value.
:type data_iter: collections.Iterable[smqtk.representation.DataElement]
:param descr_factory: Factory instance to produce the wrapping
descriptor element instances. In-Memory descriptor factory by
default.
:type descr_factory: smqtk.representation.DescriptorElementFactory
:param overwrite: Whether or not to force re-computation of a descriptor
vectors for the given data even when there exists precomputed
vectors in the generated DescriptorElements as generated from the
provided factory. This will overwrite the persistently stored
vectors if the provided factory produces a DescriptorElement
implementation such storage.
:type overwrite: bool
:param procs: Optional specification of how many processors to use
when pooling sub-tasks. If None, we attempt to use all available
cores.
:type procs: int
:raises ValueError: An input DataElement was of a content type that we
cannot handle.
:return: Mapping of input DataElement instances to the computed
descriptor element.
DescriptorElement UUID's are congruent with the UUID of the data
element it is the descriptor of.
:rtype: dict[smqtk.representation.DataElement,
smqtk.representation.DescriptorElement]
"""
# Create DescriptorElement instances for each data elem.
#: :type: dict[collections.Hashable, smqtk.representation.DataElement]
data_elements = {}
#: :type: dict[collections.Hashable, smqtk.representation.DescriptorElement]
descr_elements = {}
self._log.debug("Checking content types; aggregating data/descriptor "
"elements.")
prog_rep_state = [0] * 7
for d in data_iter:
ct = d.content_type()
if ct not in self.valid_content_types():
raise ValueError("Cannot compute descriptor of content type "
"'%s', (DE: %s" % (ct, d))
data_elements[d.uuid()] = d
descr_elements[d.uuid()] = descr_factory.new_descriptor(self.name, d.uuid())
report_progress(self._log.debug, prog_rep_state, 1.0)
self._log.debug("Given %d unique data elements", len(data_elements))
# Reduce procs down to the number of elements to process if its smaller
if len(data_elements) < (procs or multiprocessing.cpu_count()):
procs = len(data_elements)
if procs == 0:
raise ValueError("No data elements provided")
# For thread safely, only use .append() and .popleft() (queue)
uuid4proc = deque()
def check_get_uuid(d):
if overwrite or not d.has_vector():
# noinspection PyUnresolvedReferences
uuid4proc.append(d.uuid())
p = multiprocessing.pool.ThreadPool(procs)
try:
p.map(check_get_uuid, descr_elements.itervalues())
finally:
p.close()
p.join()
del p
self._log.debug("%d descriptors already computed",
len(data_elements) - len(uuid4proc))
if uuid4proc:
self._log.debug("Converting deque to tuple for segmentation")
uuid4proc = tuple(uuid4proc)
# Split UUIDs into groups equal to our batch size, and an option
# tail group that is less than our batch size.
tail_size = len(uuid4proc) % self.batch_size
batch_groups = (len(uuid4proc) - tail_size) // self.batch_size
self._log.debug("Processing %d batches of size %d", batch_groups,
self.batch_size)
if tail_size:
self._log.debug("Processing tail group of size %d", tail_size)
if batch_groups:
for g in xrange(batch_groups):
self._log.debug("Starting batch: %d of %d",
g + 1, batch_groups)
batch_uuids = \
uuid4proc[g*self.batch_size:(g+1)*self.batch_size]
self._process_batch(batch_uuids, data_elements,
descr_elements, procs)
if tail_size:
batch_uuids = uuid4proc[-tail_size:]
self._log.debug("Starting tail batch (size=%d)",
len(batch_uuids))
self._process_batch(batch_uuids, data_elements, descr_elements,
procs)
self._log.debug("forming output dict")
return dict((data_elements[k], descr_elements[k])
for k in data_elements)
def _process_batch(self, uuids4proc, data_elements, descr_elements, procs):
"""
Run a number of data elements through the network, based on the number
of UUIDs given, returning the vectors of
:param uuids4proc: UUIDs of the source data to run in the network as a
batch.
:type uuids4proc: collections.Sequence[collections.Hashable]
:param data_elements: Mapping of UUID to data element for input data.
:type data_elements: dict[collections.Hashable,
smqtk.representation.DataElement]
:param descr_elements: Mapping of UUID to descriptor element based on
input data elements.
:type descr_elements: dict[collections.Hashable,
smqtk.representation.DescriptorElement]
:param procs: The number of asynchronous processes to run for loading
images. This may be None to just use all available cores.
:type procs: None | int
"""
self._log.debug("Updating network data layer shape (%d images)",
len(uuids4proc))
self.network.blobs[self.data_layer].reshape(len(uuids4proc),
*self.net_data_shape[1:4])
self._log.debug("Loading image pixel arrays")
uid_num = len(uuids4proc)
p = multiprocessing.Pool(procs)
img_arrays = p.map(
_process_load_img_array,
zip(
(data_elements[uid] for uid in uuids4proc),
itertools.repeat(self.transformer, uid_num),
itertools.repeat(self.data_layer, uid_num),
itertools.repeat(self.load_truncated_images, uid_num),
itertools.repeat(self.pixel_rescale, uid_num),
)
)
p.close()
p.join()
self._log.debug("Loading image bytes into network layer '%s'",
self.data_layer)
self.network.blobs[self.data_layer].data[...] = img_arrays
self._log.debug("Moving network forward")
self.network.forward()
self._log.debug("extracting return layer '%s' into descriptors",
self.return_layer)
for uid, v in zip(uuids4proc, self.network.blobs[self.return_layer].data):
if v.ndim > 1:
# In case caffe generates multidimensional array (rows, 1, 1)
descr_elements[uid].set_vector(numpy.ravel(v))
else:
descr_elements[uid].set_vector(v)
def _process_load_img_array((data_element, transformer,
data_layer, load_truncated_images,
pixel_rescale)):
"""
Helper function for multiprocessing image data loading
:param data_element: DataElement providing the bytes
:type data_element: smqtk.representation.DataElement
:param transformer: Caffe Transformer instance for pre-processing
:type transformer: caffe.io.Transformer
:param load_truncated_images: If PIL should be allowed to load truncated
image data. If false, and exception will be raised when encountering
such imagery.
:return: Pre-processed numpy array.
"""
PIL.ImageFile.LOAD_TRUNCATED_IMAGES = load_truncated_images
img = PIL.Image.open(io.BytesIO(data_element.get_bytes()))
if img.mode != "RGB":
img = img.convert("RGB")
# Caffe natively uses float types (32-bit)
try:
# This can fail if the image is truncated and we're not allowing the
# loading of those images
img_a = numpy.asarray(img, numpy.float32)
except:
logging.getLogger(__name__).error(
"Failed array-ifying data element. Image may be truncated: %s",
data_element
)
raise
assert img_a.ndim == 3, \
"Loaded invalid RGB image with shape %s" \
% img_a.shape
if pixel_rescale:
pmin, pmax = min(pixel_rescale), max(pixel_rescale)
r = pmax - pmin
img_a = (img_a / (255. / r)) + pmin
img_at = transformer.preprocess(data_layer, img_a)
return img_at
| Purg/SMQTK | python/smqtk/algorithms/descriptor_generator/caffe_descriptor.py | Python | bsd-3-clause | 20,609 |
import hashlib
import os
import os.path
import re
import zipfile
from io import BytesIO
from urllib.parse import urlencode
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.core.paginator import Paginator
from django.http import (Http404, HttpResponse, HttpResponseRedirect,
JsonResponse)
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.encoding import force_bytes
from django.utils.functional import Promise, cached_property
from django.utils.translation import gettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.generic import TemplateView, View
from polib import pofile
from . import get_version as get_rosetta_version
from .access import can_translate, can_translate_language
from .conf import settings as rosetta_settings
from .poutil import find_pos, pagination_range, timestamp_with_timezone
from .signals import entry_changed, post_save
from .storage import get_storage
from .translate_utils import TranslationException, translate
def get_app_name(path):
return path.split('/locale')[0].split('/')[-1]
class LoginURL(Promise):
"""
Tests friendly login URL, url is resolved at runtime.
"""
def __str__(self):
return rosetta_settings.LOGIN_URL
@method_decorator(never_cache, 'dispatch')
@method_decorator(
user_passes_test(lambda user: can_translate(user), LoginURL()), 'dispatch'
)
class RosettaBaseMixin(object):
"""A mixin class for Rosetta's class-based views. It provides:
* security (see class decorators)
* a property for the 'po_filter' url argument
"""
def dispatch(self, *args, **kwargs):
return super(RosettaBaseMixin, self).dispatch(*args, **kwargs)
@cached_property
def po_filter(self):
"""Return the filter applied to all of the .po files under consideration
to determine which file is currently being translated. Options are:
'all', 'django', 'third-party', 'project'.
If the filter isn't in this list, throw a 404.
"""
po_filter = self.kwargs.get('po_filter')
if po_filter not in {'all', 'django', 'third-party', 'project'}:
raise Http404
return po_filter
class RosettaFileLevelMixin(RosettaBaseMixin):
"""Mixin for dealing with views that work specifically with a single
.po file. In addition to what the super class brings, it adds the following
properties:
* language_id (e.g. 'fr'); derived from url, and validated
* po_file_path (filesystem path to catalog)
* po_file (pofile object)
* po_file_is_writable (bool: do we have filesystem write perms to file)
"""
def _request_request(self, key, default=None):
if key in self.request.GET:
return self.request.GET.get(key)
elif key in self.request.POST:
return self.request.POST.get(key)
return default
@cached_property
def language_id(self):
"""Determine/return the language id from the url kwargs, after
validating that:
1. the language is in rosetta_settings.ROSETTA_LANGUAGES, and
2. the current user is permitted to translate that language
(If either of the above fail, throw a 404.)
"""
# (Formerly known as "rosetta_i18n_lang_code")
lang_id = self.kwargs['lang_id']
if lang_id not in {lang[0] for lang in rosetta_settings.ROSETTA_LANGUAGES}:
raise Http404
if not can_translate_language(self.request.user, lang_id):
raise Http404
return lang_id
@cached_property
def po_file_path(self):
"""Based on the url kwargs, infer and return the path to the .po file to
be shown/updated.
Throw a 404 if a file isn't found.
"""
# This was formerly referred to as 'rosetta_i18n_fn'
idx = self.kwargs['idx']
idx = int(idx) # idx matched url re expression; calling int() is safe
third_party_apps = self.po_filter in ('all', 'third-party')
django_apps = self.po_filter in ('all', 'django')
project_apps = self.po_filter in ('all', 'project')
po_paths = find_pos(
self.language_id,
project_apps=project_apps,
django_apps=django_apps,
third_party_apps=third_party_apps,
)
po_paths.sort(key=get_app_name)
try:
path = po_paths[idx]
except IndexError:
raise Http404
return path
@cached_property
def po_file(self):
"""Return the parsed .po file that is currently being translated/viewed.
(Note that this parsing also involves marking up each entry with a hash
of its contents.)
"""
if self.po_file_is_writable:
# If we can write changes to file, then we pull it up fresh with
# each request.
# XXX: brittle; what if this path doesn't exist? Isn't a .po file?
po_file = pofile(
self.po_file_path, wrapwidth=rosetta_settings.POFILE_WRAP_WIDTH
)
for entry in po_file:
# Entry is an object representing a single entry in the catalog.
# We iterate through the *entire catalog*, pasting a hashed
# value of the meat of each entry on its side in an attribute
# called "md5hash".
str_to_hash = (
str(entry.msgid) + str(entry.msgstr) + str(entry.msgctxt or '')
).encode('utf8')
entry.md5hash = hashlib.md5(str_to_hash).hexdigest()
else:
storage = get_storage(self.request)
po_file = storage.get(self.po_file_cache_key, None)
if not po_file:
po_file = pofile(self.po_file_path)
for entry in po_file:
# Entry is an object representing a single entry in the
# catalog. We iterate through the entire catalog, pasting
# a hashed value of the meat of each entry on its side in
# an attribute called "md5hash".
str_to_hash = (
str(entry.msgid) + str(entry.msgstr) + str(entry.msgctxt or '')
).encode('utf8')
entry.md5hash = hashlib.new('md5', str_to_hash).hexdigest()
storage.set(self.po_file_cache_key, po_file)
return po_file
@cached_property
def po_file_cache_key(self):
"""Return the cache key used to save/access the .po file (when actually
persisted in cache).
"""
return 'po-file-%s' % self.po_file_path
@cached_property
def po_file_is_writable(self):
"""Return True if we're able (in terms of file system permissions) to
write out changes to the .po file we're translating.
"""
# (This was formerly called 'rosetta_i18n_write'.)
return os.access(self.po_file_path, os.W_OK)
class TranslationFileListView(RosettaBaseMixin, TemplateView):
"""Lists the languages, the gettext catalog files that can be translated,
and their translation progress for a filtered list of apps/projects.
"""
http_method_names = ['get']
template_name = 'rosetta/file-list.html'
def get_context_data(self, **kwargs):
context = super(TranslationFileListView, self).get_context_data(**kwargs)
third_party_apps = self.po_filter in ('all', 'third-party')
django_apps = self.po_filter in ('all', 'django')
project_apps = self.po_filter in ('all', 'project')
languages = []
has_pos = False
for language in rosetta_settings.ROSETTA_LANGUAGES:
if not can_translate_language(self.request.user, language[0]):
continue
po_paths = find_pos(
language[0],
project_apps=project_apps,
django_apps=django_apps,
third_party_apps=third_party_apps,
)
po_files = [
(get_app_name(lang), os.path.realpath(lang), pofile(lang))
for lang in po_paths
]
po_files.sort(key=lambda app: app[0])
languages.append((language[0], _(language[1]), po_files))
has_pos = has_pos or bool(po_paths)
context['version'] = get_rosetta_version()
context['languages'] = languages
context['has_pos'] = has_pos
context['po_filter'] = self.po_filter
return context
class TranslationFormView(RosettaFileLevelMixin, TemplateView):
"""Show a form with a page's worth of messages to be translated; handle its
submission by updating cached pofile and, if possible, writing out changes
to existing .po file.
Query strings that affect what's shown:
* msg_filter: filters which messages are displayed. One of 'all', 'fuzzy',
'translated', and 'untranslated'
* ref_lang: specifies which language should be shown as the source. Only
applicable when REF_LANG setting is set to True
* page: which page (number) should be shown of the paginated results (with
msg_filter or query applied)
* query: a search string, where only matches are shown. Fields that are
searched include: source, translated text, "occurence" file path, or
context hints.
"""
# Note: due to the unorthodox nature of the form itself, we're not using
# Django's generic FormView as our base class.
http_method_names = ['get', 'post']
template_name = 'rosetta/form.html'
def fix_nls(self, in_, out_):
"""Fixes submitted translations by filtering carriage returns and pairing
newlines at the begging and end of the translated string with the original
"""
if 0 == len(in_) or 0 == len(out_):
return out_
if "\r" in out_ and "\r" not in in_:
out_ = out_.replace("\r", '')
if "\n" == in_[0] and "\n" != out_[0]:
out_ = "\n" + out_
elif "\n" != in_[0] and "\n" == out_[0]:
out_ = out_.lstrip()
if 0 == len(out_):
pass
elif "\n" == in_[-1] and "\n" != out_[-1]:
out_ = out_ + "\n"
elif "\n" != in_[-1] and "\n" == out_[-1]:
out_ = out_.rstrip()
return out_
def post(self, request, *args, **kwargs):
"""The only circumstances when we POST is to submit the main form, both
updating translations (if any changed) and advancing to the next page of
messages.
There is no notion of validation of this content; as implemented, unknown
fields are ignored and a generic failure message is shown.
Submitted changes are saved out to the specified .po file on the
filesystem if that file is writable, otherwise the cached version of the
file is updated (so it can be downloaded). Then the user is redirected
to the next page of messages (if there is one; otherwise they're
redirected back to the current page).
"""
# The message text inputs are captured as hashes of their initial
# contents, preceded by "m_". Messages with plurals end with their
# variation number.
single_text_input_regex = re.compile(r'^m_([0-9a-f]+)$')
plural_text_input_regex = re.compile(r'^m_([0-9a-f]+)_([0-9]+)$')
file_change = False
for field_name, new_msgstr in request.POST.items():
md5hash = None
if plural_text_input_regex.match(field_name):
md5hash, plural_id = plural_text_input_regex.match(field_name).groups()
md5hash = str(md5hash)
# polib parses .po files into unicode strings, but
# doesn't bother to convert plural indexes to int,
# so we need unicode here.
plural_id = str(plural_id)
# Above no longer true as of Polib 1.0.4
if plural_id and plural_id.isdigit():
plural_id = int(plural_id)
elif single_text_input_regex.match(field_name):
md5hash = str(single_text_input_regex.match(field_name).groups()[0])
plural_id = None
if md5hash is not None: # Empty string should be processed!
entry = self.po_file.find(md5hash, 'md5hash')
# If someone did a makemessage, some entries might
# have been removed, so we need to check.
if entry:
old_msgstr = entry.msgstr
if plural_id is not None: # 0 is ok!
entry.msgstr_plural[plural_id] = self.fix_nls(
entry.msgid_plural, new_msgstr
)
else:
entry.msgstr = self.fix_nls(entry.msgid, new_msgstr)
is_fuzzy = bool(self.request.POST.get('f_%s' % md5hash, False))
old_fuzzy = 'fuzzy' in entry.flags
if old_fuzzy and not is_fuzzy:
entry.flags.remove('fuzzy')
elif not old_fuzzy and is_fuzzy:
entry.flags.append('fuzzy')
file_change = True
if old_msgstr != new_msgstr or old_fuzzy != is_fuzzy:
entry_changed.send(
sender=entry,
user=request.user,
old_msgstr=old_msgstr,
old_fuzzy=old_fuzzy,
pofile=self.po_file_path,
language_code=self.language_id,
)
else:
messages.error(
self.request,
_(
"Some items in your last translation block couldn't "
"be saved: this usually happens when the catalog file "
"changes on disk after you last loaded it."
),
)
if file_change and self.po_file_is_writable:
try:
self.po_file.metadata['Last-Translator'] = "{} {} <{}>".format(
getattr(self.request.user, 'first_name', 'Anonymous'),
getattr(self.request.user, 'last_name', 'User'),
getattr(self.request.user, 'email', 'anonymous@user.tld'),
)
self.po_file.metadata['X-Translated-Using'] = u"django-rosetta %s" % (
get_rosetta_version()
)
self.po_file.metadata['PO-Revision-Date'] = timestamp_with_timezone()
except UnicodeDecodeError:
pass
try:
self.po_file.save()
po_filepath, ext = os.path.splitext(self.po_file_path)
if rosetta_settings.AUTO_COMPILE:
self.po_file.save_as_mofile(po_filepath + '.mo')
post_save.send(
sender=None, language_code=self.language_id, request=self.request
)
# Try auto-reloading via the WSGI daemon mode reload mechanism
should_try_wsgi_reload = (
rosetta_settings.WSGI_AUTO_RELOAD
and 'mod_wsgi.process_group' in self.request.environ
and self.request.environ.get('mod_wsgi.process_group', None)
and 'SCRIPT_FILENAME' in self.request.environ
and int(self.request.environ.get('mod_wsgi.script_reloading', 0))
)
if should_try_wsgi_reload:
try:
os.utime(self.request.environ.get('SCRIPT_FILENAME'), None)
except OSError:
pass
# Try auto-reloading via uwsgi daemon reload mechanism
if rosetta_settings.UWSGI_AUTO_RELOAD:
try:
import uwsgi
uwsgi.reload() # pretty easy right?
except Exception:
pass # we may not be running under uwsgi :P
# XXX: It would be nice to add a success message here!
except Exception as e:
messages.error(self.request, e)
if file_change and not self.po_file_is_writable:
storage = get_storage(self.request)
storage.set(self.po_file_cache_key, self.po_file)
# Reconstitute url to redirect to. Start with determining whether the
# page number can be incremented.
paginator = Paginator(self.get_entries(), rosetta_settings.MESSAGES_PER_PAGE)
try:
page = int(self._request_request('page', 1))
except ValueError:
page = 1 # fall back to page 1
else:
if not (0 < page <= paginator.num_pages):
page = 1
if page < paginator.num_pages:
page += 1
query_string_args = {
'msg_filter': self.msg_filter,
'query': self.query,
'ref_lang': self.ref_lang,
'page': page,
}
# Winnow down the query string args to non-blank ones
query_string_args = {k: v for k, v in query_string_args.items() if v}
return HttpResponseRedirect(
"{url}?{qs}".format(
url=reverse('rosetta-form', kwargs=self.kwargs),
qs=urlencode_safe(query_string_args),
)
)
def get_context_data(self, **kwargs):
context = super(TranslationFormView, self).get_context_data(**kwargs)
entries = self.get_entries()
paginator = Paginator(entries, rosetta_settings.MESSAGES_PER_PAGE)
# Handle REF_LANG setting; mark up our entries with the reg lang's
# corresponding translations
LANGUAGES = list(rosetta_settings.ROSETTA_LANGUAGES)
if rosetta_settings.ENABLE_REFLANG:
if self.ref_lang_po_file:
for o in paginator.object_list:
ref_entry = self.ref_lang_po_file.find(o.msgid)
if ref_entry and ref_entry.msgstr:
o.ref_txt = ref_entry.msgstr
else:
o.ref_txt = o.msgid
else:
for o in paginator.object_list:
o.ref_txt = o.msgid
# XXX: having "MSGID" at the end of the dropdown is really odd, no?
# Why not instead do this?
# LANGUAGES = [('', '----')] + list(settings.LANGUAGES)
LANGUAGES.append(('msgid', 'MSGID'))
# Determine page number & how pagination links should be displayed
try:
page = int(self._request_request('page', 1))
except ValueError:
page = 1 # fall back to page 1
else:
if not (0 < page <= paginator.num_pages):
page = 1
needs_pagination = paginator.num_pages > 1
if needs_pagination:
if paginator.num_pages >= 10:
page_range = pagination_range(1, paginator.num_pages, page)
else:
page_range = range(1, 1 + paginator.num_pages)
rosetta_messages = paginator.page(page).object_list
# Handle MAIN_LANGUAGE setting, if applicable; mark up each entry
# in the pagination window with the "main language"'s string.
main_language_id = rosetta_settings.MAIN_LANGUAGE
main_language = None
if main_language_id and main_language_id != self.language_id:
# Translate from id to language name
for language in rosetta_settings.ROSETTA_LANGUAGES:
if language[0] == main_language_id:
main_language = _(language[1])
break
if main_language:
main_lang_po_path = self.po_file_path.replace(
'/%s/' % self.language_id, '/%s/' % main_language_id
)
# XXX: brittle; what if this path doesn't exist? Isn't a .po file?
main_lang_po = pofile(main_lang_po_path)
for message in rosetta_messages:
message.main_lang = main_lang_po.find(message.msgid).msgstr
# Collect some constants for the template
rosetta_i18n_lang_name = str(
dict(rosetta_settings.ROSETTA_LANGUAGES).get(self.language_id)
)
# "bidi" as in "bi-directional"
rosetta_i18n_lang_bidi = self.language_id.split('-')[0] in settings.LANGUAGES_BIDI
query_string_args = {}
if self.msg_filter:
query_string_args['msg_filter'] = self.msg_filter
if self.query:
query_string_args['query'] = self.query
if self.ref_lang:
query_string_args['ref_lang'] = self.ref_lang
# Base for pagination links; the page num itself is added in template
pagination_query_string_base = urlencode_safe(query_string_args)
# Base for msg filter links; it doesn't make sense to persist page
# numbers in these links. We just pass in ref_lang, if it's set.
filter_query_string_base = urlencode_safe(
{k: v for k, v in query_string_args.items() if k == 'ref_lang'}
)
deepl_language_code = None
if rosetta_settings.DEEPL_LANGUAGES:
deepl_language_code = rosetta_settings.DEEPL_LANGUAGES.get(
self.language_id, None
)
context.update(
{
'version': get_rosetta_version(),
'LANGUAGES': LANGUAGES,
'rosetta_settings': rosetta_settings,
'rosetta_i18n_lang_name': rosetta_i18n_lang_name,
'rosetta_i18n_lang_code': self.language_id,
'rosetta_i18n_lang_code_normalized': self.language_id.replace('_', '-'),
'rosetta_i18n_lang_bidi': rosetta_i18n_lang_bidi,
'rosetta_i18n_filter': self.msg_filter,
'rosetta_i18n_write': self.po_file_is_writable,
'rosetta_messages': rosetta_messages,
'page_range': needs_pagination and page_range,
'needs_pagination': needs_pagination,
'main_language': main_language,
'rosetta_i18n_app': get_app_name(self.po_file_path),
'page': page,
'query': self.query,
'pagination_query_string_base': pagination_query_string_base,
'filter_query_string_base': filter_query_string_base,
'paginator': paginator,
'rosetta_i18n_pofile': self.po_file,
'ref_lang': self.ref_lang,
'deepl_language_code': deepl_language_code,
}
)
return context
@cached_property
def ref_lang(self):
"""Return the language id for the "reference language" (the language to
be translated *from*, if not English).
Throw a 404 if it's not in rosetta_settings.ROSETTA_LANGUAGES.
"""
ref_lang = self._request_request('ref_lang', 'msgid')
if ref_lang != 'msgid':
allowed_languages = {lang[0] for lang in rosetta_settings.ROSETTA_LANGUAGES}
if ref_lang not in allowed_languages:
raise Http404
return ref_lang
@cached_property
def ref_lang_po_file(self):
"""Return a parsed .po file object for the "reference language", if one
exists, otherwise None.
"""
ref_pofile = None
if rosetta_settings.ENABLE_REFLANG and self.ref_lang != 'msgid':
replacement = '{separator}locale{separator}{ref_lang}'.format(
separator=os.sep, ref_lang=self.ref_lang
)
pattern = r'\{separator}locale\{separator}[a-z]{{2}}'.format(separator=os.sep)
ref_fn = re.sub(pattern, replacement, self.po_file_path)
try:
ref_pofile = pofile(ref_fn)
except IOError:
# there's a syntax error in the PO file and polib can't
# open it. Let's just do nothing and thus display msgids.
# XXX: :-/
pass
return ref_pofile
@cached_property
def msg_filter(self):
"""Validate/return msg_filter from request (e.g. 'fuzzy', 'untranslated'),
or a default.
If a query is also specified in the request, then return None.
"""
if self.query:
msg_filter = None
else:
msg_filter = self._request_request('msg_filter', 'all')
available_msg_filters = {'untranslated', 'translated', 'fuzzy', 'all'}
if msg_filter not in available_msg_filters:
msg_filter = 'all'
return msg_filter
@cached_property
def query(self):
"""Strip and return the query (for searching the catalog) from the
request, or None.
"""
return self._request_request('query', '').strip() or None
def get_entries(self):
"""Return a list of the entries (messages) that would be part of the
current "view"; that is, all of the ones from this .po file matching the
current query or msg_filter.
"""
if self.query:
# Scenario #1: terms matching a search query
rx = re.compile(re.escape(self.query), re.IGNORECASE)
def concat_entry(e):
return (
str(e.msgstr)
+ str(e.msgid)
+ str(e.msgctxt)
+ str(e.comment)
+ u''.join([o[0] for o in e.occurrences])
+ str(e.msgid_plural)
+ u''.join(e.msgstr_plural.values())
)
entries = [
e_
for e_ in self.po_file
if not e_.obsolete and rx.search(concat_entry(e_))
]
else:
# Scenario #2: filtered list of messages
if self.msg_filter == 'untranslated':
entries = self.po_file.untranslated_entries()
elif self.msg_filter == 'translated':
entries = self.po_file.translated_entries()
elif self.msg_filter == 'fuzzy':
entries = [e_ for e_ in self.po_file.fuzzy_entries() if not e_.obsolete]
else:
# ("all")
entries = [e_ for e_ in self.po_file if not e_.obsolete]
return entries
class TranslationFileDownload(RosettaFileLevelMixin, View):
"""Download a zip file for a specific catalog including both the raw (.po)
and compiled (.mo) files, either as they exist on disk, or, if what's on
disk is unwritable (permissions-wise), return what's in the cache.
"""
http_method_names = [u'get']
def get(self, request, *args, **kwargs):
try:
if len(self.po_file_path.split('/')) >= 5:
offered_fn = '_'.join(self.po_file_path.split('/')[-5:])
else:
offered_fn = self.po_file_path.split('/')[-1]
po_fn = str(self.po_file_path.split('/')[-1])
mo_fn = str(po_fn.replace('.po', '.mo')) # not so smart, huh
zipdata = BytesIO()
with zipfile.ZipFile(zipdata, mode="w") as zipf:
zipf.writestr(po_fn, str(self.po_file).encode("utf8"))
zipf.writestr(mo_fn, self.po_file.to_binary())
zipdata.seek(0)
response = HttpResponse(zipdata.read())
filename = 'filename=%s.%s.zip' % (offered_fn, self.language_id)
response['Content-Disposition'] = 'attachment; %s' % filename
response['Content-Type'] = 'application/x-zip'
return response
except Exception:
# XXX: should add a message!
return HttpResponseRedirect(
reverse('rosetta-file-list', kwargs={'po_filter': 'project'})
)
@user_passes_test(lambda user: can_translate(user), LoginURL())
def translate_text(request):
language_from = request.GET.get('from', None)
language_to = request.GET.get('to', None)
text = request.GET.get('text', None)
if language_from == language_to:
data = {'success': True, 'translation': text}
else:
try:
translated_text = translate(text, language_from, language_to)
data = {'success': True, 'translation': translated_text}
except TranslationException as e:
data = {'success': False, 'error': str(e)}
return JsonResponse(data)
def urlencode_safe(query):
return urlencode({k: force_bytes(v) for k, v in query.items()})
| mbi/django-rosetta | rosetta/views.py | Python | mit | 29,017 |
import datetime
import random
import numpy as np
from hyperopt import hp, fmin, tpe
import os
import sys
from sklearn.linear_model import Ridge
sys.path.insert(0, os.getcwd())
import qml_workdir.classes.config
from qml.cv import QCV
from qml.helpers import get_engine
from qml.models import QXgb, QAvg, QRankedAvg, QRankedByLineAvg, QStackModel
from qml_workdir.classes.models import qm
if __name__ == "__main__":
_, conn = get_engine()
cv = QCV(qm)
CV_SCORE_TO_SELECT = 0.53745
CV_SCORE_TO_STOP = 0.542
ROUNDS = 1000
res = conn.execute(
"""
select data_id, cls, descr,
substring_index(group_concat(model_id order by cv_score), ',', 20) as models
from qml_results r
inner join qml_models m using(model_id)
where m.level=2 and cv_score < {}
group by data_id, cls, descr
""".format(CV_SCORE_TO_SELECT)
).fetchall()
results = []
for r in res:
for m in r['models'].split(','):
results.append([int(m), r['data_id'], 1000])
for i in range(5000):
random.shuffle(results)
models = list(results[:random.randint(2, 20)])
models = sorted(models, key=lambda x: (x[0], x[1]))
print('{}/{}'.format(i, ROUNDS), models)
model_id = qm.add_by_params(
QAvg(models)
)
print(cv.cross_val(model_id, -1, early_stop_cv=lambda x: x>CV_SCORE_TO_STOP))
conn.execute("update qml_models set level=3 where model_id={}".format(model_id))
# model_id = qm.add_by_params(
# QAvg(models, is_geom=True)
# )
# print(cv.cross_val(model_id, -1, early_stop_cv=lambda x: x>CV_SCORE_TO_STOP))
# conn.execute("update qml_models set level=3 where model_id={}".format(model_id))
#
# model_id = qm.add_by_params(
# QRankedAvg(models)
# )
# print(cv.cross_val(model_id, -1, early_stop_cv=lambda x: x>CV_SCORE_TO_STOP))
# conn.execute("update qml_models set level=3 where model_id={}".format(model_id))
# model_id = qm.add_by_params(
# QRankedByLineAvg(models)
# )
# print(cv.cross_val(model_id, -1, early_stop_cv=lambda x: x>CV_SCORE_TO_STOP))
# conn.execute("update qml_models set level=3 where model_id={}".format(model_id))
# if len(models) > 8:
# model_id2 = qm.add_by_params(
# Ridge(alpha=0.01),
# )
# model_id = qm.add_by_params(
# QStackModel(models, second_layer_model=model_id2, nsplits=2)
# )
# print(cv.cross_val(model_id, -1, early_stop_cv=lambda x: x>CV_SCORE_TO_STOP))
# conn.execute("update qml_models set level=3 where model_id={}".format(model_id))
#
#
# model_id2 = qm.add_by_params(
# Ridge(alpha=0.05),
# )
# model_id = qm.add_by_params(
# QStackModel(models, second_layer_model=model_id2, nsplits=2)
# )
# print(cv.cross_val(model_id, -1, early_stop_cv=lambda x: x>CV_SCORE_TO_STOP))
# conn.execute("update qml_models set level=3 where model_id={}".format(model_id))
conn.execute(
"update qml_models set level=3 where level=1 and cls in ('qavg', 'qrankedavg', 'QRankedByLineAvg', 'QStackModel')")
| quantum13/mlbootcamp5 | qml_workdir/ensembling/level3_model02s.py | Python | mit | 3,403 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
import pycurl
import unittest
import nose.tools
import nose.plugins.skip
from . import util
class GlobalInitTest(unittest.TestCase):
def test_global_init_default(self):
# initialize libcurl with DEFAULT flags
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
pycurl.global_cleanup()
def test_global_init_ack_eintr(self):
# the GLOBAL_ACK_EINTR flag was introduced in libcurl-7.30, but can also
# be backported for older versions of libcurl at the distribution level
if util.pycurl_version_less_than(7, 30) and not hasattr(pycurl, 'GLOBAL_ACK_EINTR'):
raise nose.plugins.skip.SkipTest('libcurl < 7.30.0 or no GLOBAL_ACK_EINTR')
# initialize libcurl with the GLOBAL_ACK_EINTR flag
pycurl.global_init(pycurl.GLOBAL_ACK_EINTR)
pycurl.global_cleanup()
@nose.tools.raises(ValueError)
def test_global_init_bogus(self):
# initialize libcurl with bogus flags
pycurl.global_init(0xffff)
| buaabyl/pycurl-win32 | tests/global_init_test.py | Python | lgpl-2.1 | 1,063 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
if sys.version > '3':
basestring = str
from pyspark import since, keyword_only, SparkContext
from pyspark.ml.base import Estimator, Model, Transformer
from pyspark.ml.param import Param, Params
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaParams, JavaWrapper
from pyspark.ml.common import inherit_doc, _java2py, _py2java
@inherit_doc
class Pipeline(Estimator, MLReadable, MLWritable):
"""
A simple pipeline, which acts as an estimator. A Pipeline consists
of a sequence of stages, each of which is either an
:py:class:`Estimator` or a :py:class:`Transformer`. When
:py:meth:`Pipeline.fit` is called, the stages are executed in
order. If a stage is an :py:class:`Estimator`, its
:py:meth:`Estimator.fit` method will be called on the input
dataset to fit a model. Then the model, which is a transformer,
will be used to transform the dataset as the input to the next
stage. If a stage is a :py:class:`Transformer`, its
:py:meth:`Transformer.transform` method will be called to produce
the dataset for the next stage. The fitted model from a
:py:class:`Pipeline` is a :py:class:`PipelineModel`, which
consists of fitted models and transformers, corresponding to the
pipeline stages. If stages is an empty list, the pipeline acts as an
identity transformer.
.. versionadded:: 1.3.0
"""
stages = Param(Params._dummy(), "stages", "a list of pipeline stages")
@keyword_only
def __init__(self, stages=None):
"""
__init__(self, stages=None)
"""
super(Pipeline, self).__init__()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@since("1.3.0")
def setStages(self, value):
"""
Set pipeline stages.
:param value: a list of transformers or estimators
:return: the pipeline instance
"""
return self._set(stages=value)
@since("1.3.0")
def getStages(self):
"""
Get pipeline stages.
"""
return self.getOrDefault(self.stages)
@keyword_only
@since("1.3.0")
def setParams(self, stages=None):
"""
setParams(self, stages=None)
Sets params for Pipeline.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _fit(self, dataset):
stages = self.getStages()
for stage in stages:
if not (isinstance(stage, Estimator) or isinstance(stage, Transformer)):
raise TypeError(
"Cannot recognize a pipeline stage of type %s." % type(stage))
indexOfLastEstimator = -1
for i, stage in enumerate(stages):
if isinstance(stage, Estimator):
indexOfLastEstimator = i
transformers = []
for i, stage in enumerate(stages):
if i <= indexOfLastEstimator:
if isinstance(stage, Transformer):
transformers.append(stage)
dataset = stage.transform(dataset)
else: # must be an Estimator
model = stage.fit(dataset)
transformers.append(model)
if i < indexOfLastEstimator:
dataset = model.transform(dataset)
else:
transformers.append(stage)
return PipelineModel(transformers)
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance.
:param extra: extra parameters
:returns: new instance
"""
if extra is None:
extra = dict()
that = Params.copy(self, extra)
stages = [stage.copy(extra) for stage in that.getStages()]
return that.setStages(stages)
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
allStagesAreJava = PipelineSharedReadWrite.checkStagesForJava(self.getStages())
if allStagesAreJava:
return JavaMLWriter(self)
return PipelineWriter(self)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return PipelineReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java Pipeline, create and return a Python wrapper of it.
Used for ML persistence.
"""
# Create a new instance of this stage.
py_stage = cls()
# Load information from java_stage to the instance.
py_stages = [JavaParams._from_java(s) for s in java_stage.getStages()]
py_stage.setStages(py_stages)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java Pipeline. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
gateway = SparkContext._gateway
cls = SparkContext._jvm.org.apache.spark.ml.PipelineStage
java_stages = gateway.new_array(cls, len(self.getStages()))
for idx, stage in enumerate(self.getStages()):
java_stages[idx] = stage._to_java()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.Pipeline", self.uid)
_java_obj.setStages(java_stages)
return _java_obj
def _make_java_param_pair(self, param, value):
"""
Makes a Java param pair.
"""
sc = SparkContext._active_spark_context
param = self._resolveParam(param)
java_param = sc._jvm.org.apache.spark.ml.param.Param(param.parent, param.name, param.doc)
if isinstance(value, Params) and hasattr(value, "_to_java"):
# Convert JavaEstimator/JavaTransformer object or Estimator/Transformer object which
# implements `_to_java` method (such as OneVsRest, Pipeline object) to java object.
# used in the case of an estimator having another estimator as a parameter
# the reason why this is not in _py2java in common.py is that importing
# Estimator and Model in common.py results in a circular import with inherit_doc
java_value = value._to_java()
else:
java_value = _py2java(sc, value)
return java_param.w(java_value)
def _transfer_param_map_to_java(self, pyParamMap):
"""
Transforms a Python ParamMap into a Java ParamMap.
"""
paramMap = JavaWrapper._new_java_obj("org.apache.spark.ml.param.ParamMap")
for param in self.params:
if param in pyParamMap:
pair = self._make_java_param_pair(param, pyParamMap[param])
paramMap.put([pair])
return paramMap
def _transfer_param_map_from_java(self, javaParamMap):
"""
Transforms a Java ParamMap into a Python ParamMap.
"""
sc = SparkContext._active_spark_context
paramMap = dict()
for pair in javaParamMap.toList():
param = pair.param()
if self.hasParam(str(param.name())):
java_obj = pair.value()
if sc._jvm.Class.forName("org.apache.spark.ml.PipelineStage").isInstance(java_obj):
# Note: JavaParams._from_java support both JavaEstimator/JavaTransformer class
# and Estimator/Transformer class which implements `_from_java` static method
# (such as OneVsRest, Pipeline class).
py_obj = JavaParams._from_java(java_obj)
else:
py_obj = _java2py(sc, java_obj)
paramMap[self.getParam(param.name())] = py_obj
return paramMap
@inherit_doc
class PipelineWriter(MLWriter):
"""
(Private) Specialization of :py:class:`MLWriter` for :py:class:`Pipeline` types
"""
def __init__(self, instance):
super(PipelineWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
stages = self.instance.getStages()
PipelineSharedReadWrite.validateStages(stages)
PipelineSharedReadWrite.saveImpl(self.instance, stages, self.sc, path)
@inherit_doc
class PipelineReader(MLReader):
"""
(Private) Specialization of :py:class:`MLReader` for :py:class:`Pipeline` types
"""
def __init__(self, cls):
super(PipelineReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if 'language' not in metadata['paramMap'] or metadata['paramMap']['language'] != 'Python':
return JavaMLReader(self.cls).load(path)
else:
uid, stages = PipelineSharedReadWrite.load(metadata, self.sc, path)
return Pipeline(stages=stages)._resetUid(uid)
@inherit_doc
class PipelineModelWriter(MLWriter):
"""
(Private) Specialization of :py:class:`MLWriter` for :py:class:`PipelineModel` types
"""
def __init__(self, instance):
super(PipelineModelWriter, self).__init__()
self.instance = instance
def saveImpl(self, path):
stages = self.instance.stages
PipelineSharedReadWrite.validateStages(stages)
PipelineSharedReadWrite.saveImpl(self.instance, stages, self.sc, path)
@inherit_doc
class PipelineModelReader(MLReader):
"""
(Private) Specialization of :py:class:`MLReader` for :py:class:`PipelineModel` types
"""
def __init__(self, cls):
super(PipelineModelReader, self).__init__()
self.cls = cls
def load(self, path):
metadata = DefaultParamsReader.loadMetadata(path, self.sc)
if 'language' not in metadata['paramMap'] or metadata['paramMap']['language'] != 'Python':
return JavaMLReader(self.cls).load(path)
else:
uid, stages = PipelineSharedReadWrite.load(metadata, self.sc, path)
return PipelineModel(stages=stages)._resetUid(uid)
@inherit_doc
class PipelineModel(Model, MLReadable, MLWritable):
"""
Represents a compiled pipeline with transformers and fitted models.
.. versionadded:: 1.3.0
"""
def __init__(self, stages):
super(PipelineModel, self).__init__()
self.stages = stages
def _transform(self, dataset):
for t in self.stages:
dataset = t.transform(dataset)
return dataset
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance.
:param extra: extra parameters
:returns: new instance
"""
if extra is None:
extra = dict()
stages = [stage.copy(extra) for stage in self.stages]
return PipelineModel(stages)
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
allStagesAreJava = PipelineSharedReadWrite.checkStagesForJava(self.stages)
if allStagesAreJava:
return JavaMLWriter(self)
return PipelineModelWriter(self)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return PipelineModelReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java PipelineModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
# Load information from java_stage to the instance.
py_stages = [JavaParams._from_java(s) for s in java_stage.stages()]
# Create a new instance of this stage.
py_stage = cls(py_stages)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java PipelineModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
gateway = SparkContext._gateway
cls = SparkContext._jvm.org.apache.spark.ml.Transformer
java_stages = gateway.new_array(cls, len(self.stages))
for idx, stage in enumerate(self.stages):
java_stages[idx] = stage._to_java()
_java_obj =\
JavaParams._new_java_obj("org.apache.spark.ml.PipelineModel", self.uid, java_stages)
return _java_obj
@inherit_doc
class PipelineSharedReadWrite():
"""
Functions for :py:class:`MLReader` and :py:class:`MLWriter` shared between
:py:class:`Pipeline` and :py:class:`PipelineModel`
.. versionadded:: 2.3.0
"""
@staticmethod
def checkStagesForJava(stages):
return all(isinstance(stage, JavaMLWritable) for stage in stages)
@staticmethod
def validateStages(stages):
"""
Check that all stages are Writable
"""
for stage in stages:
if not isinstance(stage, MLWritable):
raise ValueError("Pipeline write will fail on this pipeline " +
"because stage %s of type %s is not MLWritable",
stage.uid, type(stage))
@staticmethod
def saveImpl(instance, stages, sc, path):
"""
Save metadata and stages for a :py:class:`Pipeline` or :py:class:`PipelineModel`
- save metadata to path/metadata
- save stages to stages/IDX_UID
"""
stageUids = [stage.uid for stage in stages]
jsonParams = {'stageUids': stageUids, 'language': 'Python'}
DefaultParamsWriter.saveMetadata(instance, path, sc, paramMap=jsonParams)
stagesDir = os.path.join(path, "stages")
for index, stage in enumerate(stages):
stage.write().save(PipelineSharedReadWrite
.getStagePath(stage.uid, index, len(stages), stagesDir))
@staticmethod
def load(metadata, sc, path):
"""
Load metadata and stages for a :py:class:`Pipeline` or :py:class:`PipelineModel`
:return: (UID, list of stages)
"""
stagesDir = os.path.join(path, "stages")
stageUids = metadata['paramMap']['stageUids']
stages = []
for index, stageUid in enumerate(stageUids):
stagePath = \
PipelineSharedReadWrite.getStagePath(stageUid, index, len(stageUids), stagesDir)
stage = DefaultParamsReader.loadParamsInstance(stagePath, sc)
stages.append(stage)
return (metadata['uid'], stages)
@staticmethod
def getStagePath(stageUid, stageIdx, numStages, stagesDir):
"""
Get path for saving the given stage.
"""
stageIdxDigits = len(str(numStages))
stageDir = str(stageIdx).zfill(stageIdxDigits) + "_" + stageUid
stagePath = os.path.join(stagesDir, stageDir)
return stagePath
| kevinyu98/spark | python/pyspark/ml/pipeline.py | Python | apache-2.0 | 15,535 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# web-of-science documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import web-of-science
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'web-of-science'
copyright = u'2015, Daniel Chen'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = web-of-science.__version__
# The full version, including alpha/beta/rc tags.
release = web-of-science.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'web-of-sciencedoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'web-of-science.tex',
u'web-of-science Documentation',
u'Daniel Chen', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'web-of-science',
u'web-of-science Documentation',
[u'Daniel Chen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'web-of-science',
u'web-of-science Documentation',
u'Daniel Chen',
'web-of-science',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| chendaniely/web-of-science | docs/conf.py | Python | mit | 8,474 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=unused-variable
import os
import re
import sys
import logging
import importlib
from datetime import datetime
from logging.handlers import SMTPHandler
import jinja2
from celery import Celery
from werkzeug.urls import iri_to_uri
from werkzeug.utils import import_string
from werkzeug.middleware.proxy_fix import ProxyFix
from werkzeug.exceptions import HTTPException
from flask import Flask, current_app, request, g, jsonify
from flask import json as flask_json
from flask import sessions as flask_sessions
import flask_babel
from flask_login import LoginManager, current_user
from flask.logging import default_handler
from flask_wtf.csrf import CSRFProtect, CSRFError
from flask_cors import CORS
from pony.flask import Pony
from mini_fiction import models # pylint: disable=unused-import
from mini_fiction import database, tasks, context_processors, ratelimit
from mini_fiction.bl import init_bl
from mini_fiction.logic import frontend
__all__ = ['create_app']
class LazySecureCookieSession(flask_sessions.SecureCookieSession):
# Flask-Login дёргает сессию, даже когда нет изменений
# Таким образом спасаемся от ненужного обновления сессионной куки
def __setitem__(self, key, value):
if self.get(key) != value:
super().__setitem__(key, value)
class LazySecureCookieSessionInterface(flask_sessions.SecureCookieSessionInterface):
session_class = LazySecureCookieSession
def create_app():
select_default_settings()
config_obj = import_string(os.environ.get('MINIFICTION_SETTINGS'))
app = Flask(
__name__,
static_folder=config_obj.STATIC_ROOT,
static_url_path=config_obj.STATIC_URL,
)
app.config.from_object(config_obj)
default_handler.setLevel(app.config['LOGLEVEL'])
logging.basicConfig(level=app.config['LOGLEVEL'], format=app.config['LOGFORMAT'])
app.session_interface = LazySecureCookieSessionInterface()
if app.config['UMASK'] is not None:
if isinstance(app.config['UMASK'], str):
app.config['UMASK'] = int(app.config['UMASK'], 8)
os.umask(app.config['UMASK'])
if app.config['TESTING']:
if not os.path.isdir(app.config['TESTING_DIRECTORY']):
os.makedirs(app.config['TESTING_DIRECTORY'])
elif os.listdir(app.config['TESTING_DIRECTORY']):
raise RuntimeError('Testing directory %r is not empty' % app.config['TESTING_DIRECTORY'])
# Flask's after_request/teardown_request hooks are executed in the reversed
# order of their declaration. after_request_callbacks must be executed
# outside of the db_session context (after commit), so attach it before Pony ORM
configure_after_request_callbacks(app)
Pony(app) # binds db_session to before_request/teardown_request
init_bl()
configure_user_agent(app)
configure_i18n(app)
configure_cache(app)
configure_rate_limit(app)
configure_forms(app)
configure_users(app)
configure_error_handlers(app)
configure_views(app)
configure_admin_views(app)
configure_staticfiles(app)
configure_ajax(app)
configure_errorpages(app)
configure_templates(app)
if not app.config['SPHINX_DISABLED']:
configure_search(app)
configure_celery(app)
configure_captcha(app)
configure_story_voting(app)
configure_misc(app)
configure_development(app)
configure_frontend(app)
configure_sidebar(app)
app.context_processor(templates_context)
init_plugins(app)
database.configure_for_app(app)
CORS(app, resources={r"/static/*": {"origins": "*"}})
return app
def select_default_settings():
if os.environ.get('MINIFICTION_SETTINGS'):
return
if os.path.isfile(os.path.join(os.getcwd(), 'local_settings.py')):
os.environ.setdefault(
'MINIFICTION_SETTINGS',
'local_settings.Test' if os.environ.get('FLASK_ENV') == 'test' else 'local_settings.Local'
)
elif os.environ.get('FLASK_ENV') == 'test': # see tests/conftest.py
os.environ.setdefault('MINIFICTION_SETTINGS', 'mini_fiction.settings.Test')
elif os.environ.get('FLASK_ENV') == 'development': # uses .env file if started by mini_fiction command
os.environ.setdefault('MINIFICTION_SETTINGS', 'mini_fiction.settings.Development')
else:
os.environ.setdefault('MINIFICTION_SETTINGS', 'mini_fiction.settings.Config')
def configure_after_request_callbacks(app):
# We have to use teardown_request instead of after_request because Pony ORM uses it
@app.teardown_request
def call_after_request_callbacks(exc=None):
if exc is not None:
return
for f, args, kwargs in getattr(g, 'after_request_callbacks', ()):
f(*args, **kwargs)
def configure_user_agent(app):
# pylint: disable=E1101
if app.config.get('USER_AGENT'):
app.user_agent = str(app.config.get('USER_AGENT'))
return
import platform
import urllib.request as urequest
import mini_fiction
context = {
'system': platform.system() or 'NA',
'machine': platform.machine() or 'NA',
'release': platform.release() or 'NA',
'pyi': platform.python_implementation() or 'Python',
'pyv': platform.python_version(),
'pyiv': platform.python_version(),
'urv': urequest.__version__,
'mfv': mini_fiction.__version__,
}
if context['pyi'] == 'PyPy':
context['pyiv'] = '{}.{}.{}'.format(
sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro,
)
if sys.pypy_version_info.releaselevel != 'final':
context['pyiv'] = context['pyiv'] + sys.pypy_version_info.releaselevel
app.user_agent = (
'mini_fiction/{mfv} ({system} {machine} {release}) '
'Python/{pyv} {pyi}/{pyiv} urllib/{urv}'
).format(**context)
postfix = None
if app.config.get('USER_AGENT_POSTFIX'):
postfix = str(app.config.get('USER_AGENT_POSTFIX')).strip()
if postfix:
app.user_agent += ' ' + postfix
def configure_i18n(app):
babel = flask_babel.Babel(app)
@babel.localeselector
def get_locale():
if not request:
if hasattr(g, 'locale'):
return g.locale
raise RuntimeError('Babel is used outside of request context, please set g.locale')
locales = app.config['LOCALES'].keys()
locale = request.cookies.get('locale')
if locale in locales:
return locale
return request.accept_languages.best_match(locales)
@babel.timezoneselector
def get_timezone():
if not request:
if hasattr(g, 'timezone'):
return g.timezone
raise RuntimeError('Babel is used outside of request context, please set g.timezone')
return current_user.timezone or None
@app.before_request
def before_request():
g.locale = flask_babel.get_locale()
g.timezone = flask_babel.get_timezone()
def configure_cache(app):
kwargs = dict(app.config['CACHE_PARAMS'])
cache_class = 'cachelib.base.NullCache'
cache_type = app.config['CACHE_TYPE']
if cache_type == 'memcached':
cache_class = 'cachelib.memcached.MemcachedCache'
elif cache_type == 'redis':
cache_class = 'cachelib.redis.RedisCache'
elif cache_type == 'filesystem':
cache_class = 'cachelib.file.FileSystemCache'
elif cache_type == 'uwsgi':
cache_class = 'cachelib.uwsgi.UWSGICache'
elif cache_type == 'simple':
cache_class = 'cachelib.simple.SimpleCache'
elif '.' in cache_type:
cache_class = cache_type
elif cache_type != 'null':
raise ValueError(f'Unknown cache type: {cache_type!r}')
app.cache = import_string(cache_class)(**kwargs)
def configure_rate_limit(app):
if app.config.get('RATE_LIMIT_BACKEND'):
app.rate_limiter = ratelimit.RedisRateLimiter(app)
else:
app.rate_limiter = ratelimit.NullRateLimiter(app)
def configure_forms(app):
app.csrf = CSRFProtect(app)
def configure_users(app):
app.login_manager = LoginManager(app)
app.login_manager.login_view = 'auth.login'
app.login_manager.anonymous_user = models.AnonymousUser
@app.login_manager.user_loader
def load_user(token):
try:
user_id = token.split('#', 1)[0]
except Exception:
return
user = models.Author.get(id=user_id, is_active=1)
if not user or user.get_id() != token:
return
if user.id != current_app.config['SYSTEM_USER_ID']:
tm = datetime.utcnow()
if not user.last_visit or (tm - user.last_visit).total_seconds() >= 60:
user.last_visit = tm
return user
def templates_context():
context = {}
context.update(current_app.templatetags)
for c in context_processors.context_processors:
context.update(c() or {})
return context
def configure_error_handlers(app):
class RequestErrorFormatter(logging.Formatter):
def format(self, record):
from pony.orm import db_session
record.remote_addr = request.remote_addr
with db_session:
record.user_id = current_user.id if current_user.is_authenticated else None
record.username = current_user.username if current_user.is_authenticated else None
return super().format(record)
if app.config['ADMINS'] and app.config['ERROR_EMAIL_HANDLER_PARAMS']:
params = dict(app.config['ERROR_EMAIL_HANDLER_PARAMS'])
params['toaddrs'] = app.config['ADMINS']
params['fromaddr'] = app.config['ERROR_EMAIL_FROM']
params['subject'] = app.config['ERROR_EMAIL_SUBJECT']
smtp_handler = SMTPHandler(**params)
smtp_handler.setLevel(logging.ERROR)
smtp_handler.setFormatter(RequestErrorFormatter(app.config['ERROR_LOGFORMAT']))
app.logger.addHandler(smtp_handler)
def configure_views(app):
from mini_fiction.views import index, auth, story, chapter, editlog, search, author, stream, object_lists
from mini_fiction.views import story_comment, story_local_comment, feeds, staticpages, news, news_comment
from mini_fiction.views import notifications, abuse, sitemap, tags
from mini_fiction.views import misc
app.register_blueprint(index.bp)
app.register_blueprint(auth.bp, url_prefix='/accounts')
app.register_blueprint(story.bp, url_prefix='/story')
app.register_blueprint(chapter.bp) # /story/%d/chapter/* and /chapter/*
app.register_blueprint(editlog.bp, url_prefix='/editlog')
app.register_blueprint(search.bp, url_prefix='/search')
app.csrf.exempt(search.bp)
app.register_blueprint(author.bp, url_prefix='/accounts')
app.register_blueprint(stream.bp, url_prefix='/stream')
app.register_blueprint(object_lists.bp)
app.register_blueprint(story_comment.bp)
app.register_blueprint(story_local_comment.bp)
app.register_blueprint(feeds.bp, url_prefix='/feeds')
app.register_blueprint(staticpages.bp)
app.register_blueprint(news.bp, url_prefix='/news')
app.register_blueprint(news_comment.bp)
app.register_blueprint(notifications.bp, url_prefix='/notifications')
app.register_blueprint(abuse.bp, url_prefix='/abuse')
app.register_blueprint(sitemap.bp)
app.register_blueprint(tags.bp)
app.add_url_rule('/dump/', 'dump', misc.dump)
def configure_admin_views(app):
from mini_fiction.views.admin import index, characters, charactergroups
from mini_fiction.views.admin import logopics, htmlblocks, staticpages, news, abuse_reports, votes
from mini_fiction.views.admin import authors, registrations, tag_categories, tags
app.register_blueprint(index.bp, url_prefix='/admin')
app.register_blueprint(logopics.bp, url_prefix='/admin/logopics')
app.register_blueprint(htmlblocks.bp, url_prefix='/admin/htmlblocks')
app.register_blueprint(characters.bp, url_prefix='/admin/characters')
app.register_blueprint(charactergroups.bp, url_prefix='/admin/charactergroups')
app.register_blueprint(staticpages.bp, url_prefix='/admin/staticpages')
app.register_blueprint(news.bp, url_prefix='/admin/news')
app.register_blueprint(abuse_reports.bp, url_prefix='/admin/abuse_reports')
app.register_blueprint(votes.bp, url_prefix='/admin/votes')
app.register_blueprint(authors.bp, url_prefix='/admin/authors')
app.register_blueprint(registrations.bp, url_prefix='/admin/registrations')
app.register_blueprint(tag_categories.bp, url_prefix='/admin/tag_categories')
app.register_blueprint(tags.bp, url_prefix='/admin/tags')
def configure_staticfiles(app):
from mini_fiction.views import misc
app.add_url_rule('/{}/<path:filename>'.format(app.config['MEDIA_URL'].strip('/')), 'media', misc.media)
if app.config['LOCALSTATIC_ROOT']:
app.add_url_rule('/{}/<path:filename>'.format(app.config['LOCALSTATIC_URL'].strip('/')), 'localstatic', misc.localstatic)
def configure_ajax(app):
@app.before_request
def is_request_ajax():
g.is_ajax = request.headers.get('X-AJAX') == '1' or request.args.get('isajax') == '1'
@app.after_request
def ajax_template_response(response):
if response.headers.get('Vary'):
response.headers['Vary'] = 'X-AJAX, ' + response.headers['Vary']
else:
response.headers['Vary'] = 'X-AJAX'
if not getattr(g, 'is_ajax', False):
return response
if not response.direct_passthrough and response.data and response.data.startswith(b'{') and response.content_type == 'text/html; charset=utf-8':
response.content_type = 'application/json'
# for github-fetch polyfill:
response.headers['X-Request-URL'] = iri_to_uri(request.url)
elif response.status_code == 302:
# Люблю разрабов js во всех смыслах
response.data = flask_json.dumps({'page_content': {'redirect': response.headers.get('Location')}})
response.content_type = 'application/json'
response.status_code = 200
return response
def configure_errorpages(app):
from flask import render_template
def _error_common(template, template_modal, code, e):
# g.is_ajax здесь не всегда присутствует, так что так
is_ajax = request.headers.get('X-AJAX') == '1' or request.args.get('isajax') == '1'
if is_ajax:
html = render_template(template_modal, error=e, error_code=code)
response = jsonify({'page_content': {'modal': html}})
response.status_code = code
# for github-fetch polyfill:
response.headers['X-Request-URL'] = iri_to_uri(request.url)
return response
html = render_template(template, error=e, error_code=code)
return html, code
def _page403(e):
return _error_common('403.html', '403_modal.html', 403, e)
def _page404(e):
return _error_common('404.html', '404_modal.html', 404, e)
def _page500(e):
return _error_common('500.html', '500_modal.html', 500, e)
def _page_rate_limit(e):
response = _error_common('rate_limit.html', 'rate_limit_modal.html', 429, e)
if e.ttl > 0:
response.headers['Retry-After'] = str(e.ttl)
return response
def _pagecsrf(e):
return _error_common('csrf.html', 'csrf_modal.html', 400, e)
def _pageall(e):
if e.code and e.code < 400:
return e
return _error_common('error.html', 'error_modal.html', e.code or 500, e)
app.errorhandler(403)(_page403)
app.errorhandler(404)(_page404)
app.errorhandler(500)(_page500)
app.errorhandler(ratelimit.RateLimitExceeded)(_page_rate_limit)
app.errorhandler(CSRFError)(_pagecsrf)
app.errorhandler(HTTPException)(_pageall)
def configure_templates(app):
from mini_fiction.templatetags import random_stories, logopic, submitted_stories_count
from mini_fiction.templatetags import story_comments_delta, html_block, hook, shown_newsitem
from mini_fiction.templatetags import get_comment_threshold, notifications, misc
from mini_fiction.templatetags import i18n, generate_captcha, unread_abuse_reports_count
from mini_fiction.templatetags import registry
app.templatetags = dict(registry.tags)
app.jinja_env.filters['tojson_raw'] = flask_json.dumps # not escapes &, < and >
if app.config['LOCALTEMPLATES']:
paths = app.config['LOCALTEMPLATES']
if isinstance(paths, str):
paths = [paths]
loaders = []
for x in paths:
loaders.append(jinja2.FileSystemLoader(os.path.abspath(x)))
loaders.append(app.jinja_loader)
app.jinja_loader = jinja2.ChoiceLoader(loaders)
from mini_fiction.templatefilters import timesince
from mini_fiction.templatefilters import registry as filters_registry
app.jinja_env.filters.update(filters_registry.filters)
def configure_search(app):
from mini_fiction.apis import amsphinxql
app.sphinx = amsphinxql.SphinxPool(app.config['SPHINX_CONFIG']['connection_params'])
def configure_celery(app):
app.celery = Celery('mini_fiction', broker=app.config['CELERY_CONFIG']['broker_url'])
app.celery.conf.update(app.config['CELERY_CONFIG'])
TaskBase = app.celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
app.celery.Task = ContextTask
tasks.apply_for_app(app)
def configure_captcha(app):
captcha_path = app.config.get('CAPTCHA_CLASS')
if not captcha_path or '.' not in captcha_path:
app.captcha = None
return
module, cls = captcha_path.rsplit('.', 1)
Captcha = getattr(importlib.import_module(module), cls)
del module, cls
app.captcha = Captcha(app)
def configure_story_voting(app):
story_voting_path = app.config.get('STORY_VOTING_CLASS')
if not story_voting_path or '.' not in story_voting_path:
app.story_voting = None
return
module, cls = story_voting_path.rsplit('.', 1)
StoryVoting = getattr(importlib.import_module(module), cls)
del module, cls
app.story_voting = StoryVoting(app)
def configure_misc(app):
@app.after_request
def disable_cache(response):
if not getattr(response, 'cache_control_exempt', False):
response.cache_control.max_age = 0
response.cache_control.private = True
return response
# Pass proxies for correct request_addr
if app.config['PROXIES_COUNT'] > 0:
app.wsgi_app = ProxyFix(
app.wsgi_app,
x_for=app.config['PROXIES_COUNT'],
x_proto=app.config['PROXIES_COUNT'],
)
def configure_development(app):
if app.config.get('DEBUG_TB_ENABLED'):
import time
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
if (
'mini_fiction.utils.debugtoolbar.PonyDebugPanel' in app.config.get('DEBUG_TB_PANELS', ()) and
(app.debug or app.config.get('PONYORM_RECORD_QUERIES'))
):
from mini_fiction.utils import debugtoolbar as ponydbg
from flask_debugtoolbar import module
old_exec_sql = database.db._exec_sql
def my_exec_sql(sql, arguments=None, *args, **kwargs):
t = time.time()
result = old_exec_sql(sql, arguments, *args, **kwargs)
t = time.time() - t
ponydbg.record_query({
'statement': sql,
'parameters': arguments or (),
'duration': t,
})
return result
database.db._exec_sql = my_exec_sql
app.before_request(ponydbg.clear_queries)
app.csrf.exempt(module)
def configure_frontend(app: Flask):
app.add_template_global(frontend.stylesheets, name='stylesheets')
app.add_template_global(frontend.scripts, name='scripts')
app.add_template_global(frontend.favicon_bundle, name='favicon_bundle')
def configure_sidebar(app: Flask):
app.index_sidebar = {}
for block_name, func_path in app.config['INDEX_SIDEBAR'].items():
if not re.match(r'^[A-Za-z][A-Za-z0-9_]*$', block_name):
raise ValueError('Invalid sidebar block name: {!r}'.format(block_name))
module_name, func_name = func_path.rsplit('.', 1)
module = importlib.import_module(module_name)
func = getattr(module, func_name)
app.index_sidebar[block_name] = func
def init_plugins(app):
app.plugins = []
app.hooks = {}
with app.app_context():
for plugin_module in app.config['PLUGINS']:
if ':' in plugin_module:
plugin_module, func_name = plugin_module.split(':', 1)
else:
func_name = 'configure_app'
plugin = importlib.import_module(plugin_module)
app.plugins.append(plugin)
getattr(plugin, func_name)(register_hook)
def register_hook(name, func):
if 'name' not in current_app.hooks:
current_app.hooks[name] = []
current_app.hooks[name].append(func)
| andreymal/mini_fiction | mini_fiction/application.py | Python | gpl-3.0 | 21,619 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A function to build an object detection box coder from configuration."""
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.box_coders import square_box_coder
from object_detection.protos import box_coder_pb2
def build(box_coder_config):
"""Builds a box coder object based on the box coder config.
Args:
box_coder_config: A box_coder.proto object containing the config for the
desired box coder.
Returns:
BoxCoder based on the config.
Raises:
ValueError: On empty box coder proto.
"""
if not isinstance(box_coder_config, box_coder_pb2.BoxCoder):
raise ValueError('box_coder_config not of type box_coder_pb2.BoxCoder.')
if box_coder_config.WhichOneof('box_coder_oneof') == 'faster_rcnn_box_coder':
return faster_rcnn_box_coder.FasterRcnnBoxCoder(scale_factors=[
box_coder_config.faster_rcnn_box_coder.y_scale,
box_coder_config.faster_rcnn_box_coder.x_scale,
box_coder_config.faster_rcnn_box_coder.height_scale,
box_coder_config.faster_rcnn_box_coder.width_scale
])
if (box_coder_config.WhichOneof('box_coder_oneof') ==
'mean_stddev_box_coder'):
return mean_stddev_box_coder.MeanStddevBoxCoder()
if box_coder_config.WhichOneof('box_coder_oneof') == 'square_box_coder':
return square_box_coder.SquareBoxCoder(scale_factors=[
box_coder_config.square_box_coder.y_scale,
box_coder_config.square_box_coder.x_scale,
box_coder_config.square_box_coder.length_scale
])
raise ValueError('Empty box coder.')
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/object_detection/builders/box_coder_builder.py | Python | bsd-2-clause | 2,313 |
# -*- coding: utf-8 -*-
"""Manages settings and config file.
Manages the settings of the program.
Settings are loaded from a configuration file. If they don't exists, default
values are provided.
When an option is set, the config file is updated.
Before any use, the module must be initialized by calling ``load()``.
"""
try:
import configparser
except ImportError:
import ConfigParser as configparser # Python2
import logging
import os.path
import sys
from . import path as bajoo_path
_logger = logging.getLogger(__name__)
# Default config dict. Values not present in this dict are not valid.
# Each entry contains the type expected, and the default value.
_default_config = {
'auto_update': {'type': bool, 'default': True},
'autorun': {'type': bool, 'default': True},
'lang': {'type': str, 'default': None},
# TODO: set default debug_mode to False for stable release
'debug_mode': {'type': bool, 'default': True},
'exclude_hidden_files': {'type': bool, 'default': True},
'log_levels': {'type': dict, 'default': {}},
# Can be "no_proxy", "system_settings" or "manual_settings"
'proxy_mode': {'type': str, 'default': 'system_settings'},
# can be "SOCKS4", "SOCKS5" or "HTTP".
'proxy_type': {'type': str, 'default': 'HTTP'},
'proxy_url': {'type': str, 'default': None},
'proxy_port': {'type': int, 'default': None},
'proxy_user': {'type': str, 'default': None},
'proxy_password': {'type': str, 'default': None},
'contextual_icon': {'type': bool, 'default': True},
'notifications': {'type': bool, 'default': True},
'download_max_speed': {'type': float, 'default': None},
'upload_max_speed': {'type': float, 'default': None},
# These credentials are valid, but are intended for test purpose only.
# They can be revoked at any moment. If you want to develop your own
# client, either for distributing it or for your personal use, please
# contact us at <support-en@bajoo.fr> and we will be happy to provide you
# some credentials.
'client_id': {'type': str, 'default': '24e1aba5d72046dd8db4410176613698'},
'client_secret': {'type': str,
'default': '4f2c73941fda45eaa5e72a0ade8e7b80'},
'identity_api_url': {'type': str, 'default': 'https://beta.bajoo.fr'},
'storage_api_url': {'type': str, 'default': 'https://storage.bajoo.fr/v1'},
}
# Actual config parser
_config_parser = configparser.ConfigParser()
_config_parser.add_section('config')
def _get_config_file_path():
return os.path.join(bajoo_path.get_config_dir(), 'bajoo.ini')
def load():
"""Find and load the config file.
This function must be called before any use of the module.
"""
global _config_parser
config_file_path = _get_config_file_path()
if not _config_parser.read(config_file_path):
_logger.warning('Unable to load config file: %s' % config_file_path)
def get(key, unicode=False):
"""Find and return a configuration entry
If the entry is not specified in the config file, a default value is
returned.
Args:
key (string): the entry key.
unicode (boolean): if True, and if the value is a string, it will be
returned in unicode. Note that it's only used with Python2.
Returns:
The corresponding value found.
Raises:
KeyError: if the config entry doesn't exists.
"""
if key not in _default_config:
raise KeyError
try:
if _default_config[key]['type'] is bool:
return _config_parser.getboolean('config', key)
elif _default_config[key]['type'] is int:
return _config_parser.getint('config', key)
elif _default_config[key]['type'] is dict:
# Dict entries are in the form 'key=value;key2=value2'
dict_str = _config_parser.get('config', key)
result = {}
for pair in filter(None, dict_str.split(';')):
try:
(k, v) = pair.split('=')
try:
v = int(v)
except ValueError:
pass # v is not a number.
result[k] = v
except ValueError:
_logger.warning('Unable to parse pair key=value: "%s"'
% pair)
pass
return result
else:
if unicode and sys.version_info[0] is 2:
return _config_parser.get('config', key).decode('utf-8')
return _config_parser.get('config', key)
except configparser.NoOptionError:
return _default_config[key]['default']
except ValueError:
_logger.warn('Invalid config value for key %s. '
'Returns default value instead' % key)
return _default_config[key]['default']
def set(key, value):
"""Set a configuration entry.
Args:
key (string): the entry key.
value: the new value to set. If unicode (Python 2), it will be
converted into str using utf-8 encoding.
Raises:
KeyError: if the config entry is not valid.
"""
if key not in _default_config:
raise KeyError
if value is None:
_config_parser.remove_option('config', key)
else:
if type(value) is not str:
value = str(value)
if sys.version_info[0] is 2 and isinstance(value, unicode):
value = value.encode('utf-8')
_config_parser.set('config', key, value)
config_file_path = _get_config_file_path()
try:
# Note: configParser.write use the default string type
with open(config_file_path, 'w') as config_file:
_config_parser.write(config_file)
_logger.debug('Config file modified.')
except IOError:
_logger.warning('Unable to write in the config file', exc_info=True)
def main():
logging.basicConfig()
load()
debug_mode = get('debug_mode')
print('"debug_mode" config is %s (type %s).'
% (debug_mode, type(debug_mode)))
try:
get('foo')
except KeyError:
print("The key foo doesn't exists, as expected")
set('debug_mode', False)
debug_mode = get('debug_mode')
print('debug_mode should now be false: %s (type %s)'
% (debug_mode, type(debug_mode)))
if __name__ == "__main__":
main()
| Bajoo/client-pc | bajoo/common/config.py | Python | gpl-3.0 | 6,387 |
from citrination_client.search.pif.query.core.base_object_query import BaseObjectQuery
from citrination_client.search.pif.query.core.field_query import FieldQuery
from citrination_client.search.pif.query.core.value_query import ValueQuery
class ProcessStepQuery(BaseObjectQuery):
"""
Class to query against a process step.
"""
def __init__(self, logic=None, weight=None, simple=None, simple_weight=None, extract_as=None, extract_all=None,
extract_when_missing=None, tags=None, length=None, offset=None, name=None, details=None,
query=None, **kwargs):
"""
Constructor.
:param logic: Logic for this filter. Must be equal to one of "MUST", "MUST_NOT", "SHOULD", or "OPTIONAL".
:param weight: Weight of the query.
:param simple: String with the query to run against all fields.
:param simple_weight: Dictionary of relative paths to their weights for simple queries.
:param extract_as: String with the alias to save this field under.
:param extract_all: Boolean setting whether all values in an array should be extracted.
:param extract_when_missing: Any valid JSON-supported object or PIF object. This value is returned when a value is missing that should be extracted (and the overall query is still satisfied).
:param tags: One or more :class:`FieldQuery` operations against the tags field.
:param length: One or more :class:`FieldQuery` operations against the length field.
:param offset: One or more :class:`FieldQuery` operations against the offset field.
:param name: One or more :class:`FieldQuery` operations against the name field.
:param details: One or more :class:`ValueQuery` operations against the details of the step.
:param query: One or more :class:`ProcessStepQuery` objects with nested queries.
"""
super(ProcessStepQuery, self).__init__(
logic=logic, weight=weight, simple=simple, simple_weight=simple_weight, extract_as=extract_as,
extract_all=extract_all, extract_when_missing=extract_when_missing, tags=tags, length=length,
offset=offset, **kwargs)
self._name = None
self.name = name
self._details = None
self.details = details
self._query = None
self.query = query
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = self._get_object(FieldQuery, name)
@name.deleter
def name(self):
self._name = None
@property
def details(self):
return self._details
@details.setter
def details(self, details):
self._details = self._get_object(ValueQuery, details)
@details.deleter
def details(self):
self._details = None
@property
def query(self):
return self._query
@query.setter
def query(self, query):
self._query = self._get_object(ProcessStepQuery, query)
@query.deleter
def query(self):
self._query = None
| CitrineInformatics/python-citrination-client | citrination_client/search/pif/query/core/process_step_query.py | Python | apache-2.0 | 3,084 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=no-init,attribute-defined-outside-init
import systemtesting
from mantid import *
from mantid.simpleapi import *
import math
class MagnetismReflectometryReductionTest(systemtesting.MantidSystemTest):
def runTest(self):
wsg = MRFilterCrossSections(Filename="REF_M_24949")
MagnetismReflectometryReduction(InputWorkspace=wsg[0],
NormalizationRunNumber=24945,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=False,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=40,
TimeAxisRange=[25000, 54000],
SpecularPixel=126.9,
ConstantQBinning=False,
OutputWorkspace="r_24949")
def validate(self):
# Be more tolerant with the output, mainly because of the errors.
# The following tolerance check the errors up to the third digit.
self.disableChecking.append('Instrument')
self.disableChecking.append('Sample')
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Axes')
return "r_24949", 'MagnetismReflectometryReductionTest.nxs'
class MagnetismReflectometryReductionConstQTest(systemtesting.MantidSystemTest):
def runTest(self):
wsg = MRFilterCrossSections(Filename="REF_M_24949")
MagnetismReflectometryReduction(InputWorkspace=wsg[0],
NormalizationRunNumber=24945,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=False,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=40,
TimeAxisRange=[25000, 54000],
SpecularPixel=126.9,
ConstantQBinning=True,
OutputWorkspace="r_24949")
def validate(self):
refl = mtd["r_24949"].dataY(0)
return math.fabs(refl[1] - 0.648596877775159) < 0.002
class MagnetismReflectometryReductionSkipRebinTest(systemtesting.MantidSystemTest):
def runTest(self):
wsg = MRFilterCrossSections(Filename="REF_M_24949")
MagnetismReflectometryReduction(InputWorkspace=wsg[0],
NormalizationRunNumber=24945,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=False,
FinalRebin=False,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=40,
TimeAxisRange=[25000, 54000],
SpecularPixel=126.9,
ConstantQBinning=False,
OutputWorkspace="r_24949")
def validate(self):
q_values = mtd["r_24949"].dataX(0)
return math.fabs(q_values[0] - 0.005) > 0.001
class MagnetismReflectometryReductionConstQWLCutTest(systemtesting.MantidSystemTest):
def runTest(self):
wsg = MRFilterCrossSections(Filename="REF_M_24949")
MagnetismReflectometryReduction(InputWorkspace=wsg[0],
NormalizationRunNumber=24945,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=True,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=0.007,
TimeAxisRange=[4.5, 10.5],
SpecularPixel=126.9,
ConstantQBinning=True,
OutputWorkspace="r_24949")
def validate(self):
refl = mtd["r_24949"].dataY(0)
return math.fabs(refl[1] - 0.648596877775159) < 0.002
class MRFilterCrossSectionsTest(systemtesting.MantidSystemTest):
""" Test data loading and cross-section extraction """
def runTest(self):
wsg = MRFilterCrossSections(Filename="REF_M_24949")
MagnetismReflectometryReduction(InputWorkspace=str(wsg[0]),
NormalizationRunNumber=24945,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=False,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=40,
TimeAxisRange=[25000, 54000],
SpecularPixel=126.9,
ConstantQBinning=False,
OutputWorkspace="r_24949")
def validate(self):
# Be more tolerant with the output, mainly because of the errors.
# The following tolerance check the errors up to the third digit.
self.disableChecking.append('Instrument')
self.disableChecking.append('Sample')
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Axes')
return "r_24949", 'MagnetismReflectometryReductionTest.nxs'
class MRFilterCrossSectionsWithWorkspaceTest(systemtesting.MantidSystemTest):
""" Test data loading and cross-section extraction """
def runTest(self):
ws_input = LoadEventNexus(Filename="REF_M_24949",
NXentryName="entry-Off_Off",
OutputWorkspace="r_24949")
# Since we are using a older data file for testing, add the
# polarizer/analyzer info. This will also test the edge case where
# there is no analyzer or polarizer, which should just be the
# same as a simple load.
AddSampleLog(Workspace=ws_input, LogName='polarizer',
LogText="0",
LogType='Number Series', LogUnit='')
AddSampleLog(Workspace=ws_input, LogName='analyzer',
LogText="0",
LogType='Number Series', LogUnit='')
wsg = MRFilterCrossSections(InputWorkspace=ws_input)
MagnetismReflectometryReduction(InputWorkspace=wsg[0],
NormalizationRunNumber=24945,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=False,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=40,
TimeAxisRange=[25000, 54000],
SpecularPixel=126.9,
ConstantQBinning=False,
OutputWorkspace="r_24949")
def validate(self):
# Be more tolerant with the output, mainly because of the errors.
# The following tolerance check the errors up to the third digit.
self.disableChecking.append('Instrument')
self.disableChecking.append('Sample')
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Axes')
return "r_24949", 'MagnetismReflectometryReductionTest.nxs'
class MRNormaWorkspaceTest(systemtesting.MantidSystemTest):
""" Test data loading and cross-section extraction """
def runTest(self):
wsg = MRFilterCrossSections(Filename="REF_M_24949")
ws_norm = LoadEventNexus(Filename="REF_M_24945",
NXentryName="entry-Off_Off",
OutputWorkspace="r_24945")
MagnetismReflectometryReduction(InputWorkspace=wsg[0],
NormalizationWorkspace=ws_norm,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=False,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=40,
TimeAxisRange=[25000, 54000],
SpecularPixel=126.9,
ConstantQBinning=False,
OutputWorkspace="r_24949")
def validate(self):
# Be more tolerant with the output, mainly because of the errors.
# The following tolerance check the errors up to the third digit.
self.disableChecking.append('Instrument')
self.disableChecking.append('Sample')
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Axes')
return "r_24949", 'MagnetismReflectometryReductionTest.nxs'
class MRDIRPIXTest(systemtesting.MantidSystemTest):
""" Test data loading and cross-section extraction """
def runTest(self):
wsg = MRFilterCrossSections(Filename="REF_M_24949")
ws_norm = LoadEventNexus(Filename="REF_M_24945",
NXentryName="entry-Off_Off",
OutputWorkspace="r_24945")
#sc_angle = MRGetTheta(Workspace=wsg[0])
# The logs have DANGLE0 = 4.50514 and DIRPIX = 204
# Scatt angle = 0
# 131.9: 0.00989410349765
MagnetismReflectometryReduction(InputWorkspace=wsg[0],
NormalizationWorkspace=ws_norm,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=False,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=40,
TimeAxisRange=[25000, 54000],
SpecularPixel=136.9,
UseSANGLE=False,
DirectPixelOverwrite=214,
ConstantQBinning=False,
OutputWorkspace="r_24949")
def validate(self):
# Be more tolerant with the output, mainly because of the errors.
# The following tolerance check the errors up to the third digit.
self.disableChecking.append('Instrument')
self.disableChecking.append('Sample')
self.disableChecking.append('SpectraMap')
return "r_24949", 'MagnetismReflectometryReductionTest.nxs'
class MRDANGLE0Test(systemtesting.MantidSystemTest):
""" Test data loading and cross-section extraction """
def runTest(self):
wsg = MRFilterCrossSections(Filename="REF_M_24949")
ws_norm = LoadEventNexus(Filename="REF_M_24945",
NXentryName="entry-Off_Off",
OutputWorkspace="r_24945")
theta = MRGetTheta(Workspace=wsg[0], UseSANGLE=False, SpecularPixel=127.9)
theta0 = MRGetTheta(Workspace=wsg[0], UseSANGLE=False, SpecularPixel=126.9)
dangle0 = wsg[0].getRun()['DANGLE0'].getStatistics().mean
dangle0 += (theta-theta0)*2.0*180./math.pi
MagnetismReflectometryReduction(InputWorkspace=wsg[0],
NormalizationWorkspace=ws_norm,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=False,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=40,
TimeAxisRange=[25000, 54000],
SpecularPixel=127.9,
UseSANGLE=False,
DAngle0Overwrite=dangle0,
ConstantQBinning=False,
OutputWorkspace="r_24949")
def validate(self):
# Be more tolerant with the output, mainly because of the errors.
# The following tolerance check the errors up to the third digit.
self.disableChecking.append('Instrument')
self.disableChecking.append('Sample')
self.disableChecking.append('SpectraMap')
return "r_24949", 'MagnetismReflectometryReductionTest.nxs'
class MROutputTest(systemtesting.MantidSystemTest):
""" Test the MR output algorithm """
def runTest(self):
wsg = MRFilterCrossSections(Filename="REF_M_24949")
ws_norm = LoadEventNexus(Filename="REF_M_24945",
NXentryName="entry-Off_Off",
OutputWorkspace="r_24945")
MagnetismReflectometryReduction(InputWorkspace=wsg[0],
NormalizationWorkspace=ws_norm,
SignalPeakPixelRange=[125, 129],
SubtractSignalBackground=True,
SignalBackgroundPixelRange=[15, 105],
ApplyNormalization=True,
NormPeakPixelRange=[201, 205],
SubtractNormBackground=True,
NormBackgroundPixelRange=[10,127],
CutLowResDataAxis=True,
LowResDataAxisPixelRange=[91, 161],
CutLowResNormAxis=True,
LowResNormAxisPixelRange=[86, 174],
CutTimeAxis=True,
UseWLTimeAxis=False,
QMin=0.005,
QStep=-0.01,
TimeAxisStep=40,
TimeAxisRange=[25000, 54000],
SpecularPixel=126.9,
ConstantQBinning=False,
OutputWorkspace="r_24949")
def validate(self):
# Be more tolerant with the output, mainly because of the errors.
# The following tolerance check the errors up to the third digit.
self.disableChecking.append('Instrument')
self.disableChecking.append('Sample')
self.disableChecking.append('SpectraMap')
self.disableChecking.append('Axes')
return "r_24949", 'MagnetismReflectometryReductionTest.nxs'
class MRInspectionTest(systemtesting.MantidSystemTest):
def runTest(self):
nxs_data = LoadEventNexus(Filename="REF_M_24949",
NXentryName="entry-Off_Off",
OutputWorkspace="r_24949")
MRInspectData(Workspace=nxs_data)
def validate(self):
# Simple test to verify that we flagged the data correctly
return mtd["r_24949"].getRun().getProperty("is_direct_beam").value == "False"
class MRInspectionOverwritesTest(systemtesting.MantidSystemTest):
def runTest(self):
nxs_data = LoadEventNexus(Filename="REF_M_24949",
NXentryName="entry-Off_Off",
OutputWorkspace="r_24949")
MRInspectData(Workspace=nxs_data, DirectPixelOverwrite=208.0, DAngle0Overwrite=5.0)
def validate(self):
# Simple test to verify that we flagged the data correctly
return mtd["r_24949"].getRun().getProperty("is_direct_beam").value == "False"
class MRGetThetaTest(systemtesting.MantidSystemTest):
""" Test that the MRGetTheta algorithm produces correct results """
def runTest(self):
nxs_data = LoadEventNexus(Filename="REF_M_24949",
NXentryName="entry-Off_Off",
OutputWorkspace="r_24949")
self.assertAlmostEqual(MRGetTheta(Workspace=nxs_data, UseSANGLE=True), 0.606127/180.0*math.pi)
self.assertAlmostEqual(MRGetTheta(Workspace=nxs_data, UseSANGLE=True, AngleOffset=math.pi), 180.606127/180.0*math.pi)
self.assertAlmostEqual(MRGetTheta(Workspace=nxs_data, SpecularPixel=126.1), 0.61249193272/180.0*math.pi)
# In the present case, DANGLE = DANGLE0, so we expect 0 if nothing else is passed
self.assertAlmostEqual(MRGetTheta(Workspace=nxs_data), 0.0)
# The logs have DANGLE0 = 4.50514 and DIRPIX = 204
# Setting DIRPIX without setting a specular pixel shouldn't change anything
self.assertAlmostEqual(MRGetTheta(Workspace=nxs_data, DirectPixelOverwrite=145), 0.0)
# Setting DIRPIX and the specular pixel with move things
# Move everything by 4 pixels and we should get the same answer (which depends only on the difference of the two)
self.assertAlmostEqual(MRGetTheta(Workspace=nxs_data, DirectPixelOverwrite=208, SpecularPixel=130.1), 0.61249193272/180.0*math.pi)
dangle0 = nxs_data.getRun()['DANGLE0'].value[0]
self.assertAlmostEqual(MRGetTheta(Workspace=nxs_data, DAngle0Overwrite=dangle0+180.0), math.pi/2.0)
def validate(self):
return True
| mganeva/mantid | Testing/SystemTests/tests/analysis/MagnetismReflectometryReductionTest.py | Python | gpl-3.0 | 25,025 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Dave Lasley <dave@laslabs.com>
# Copyright: 2015 LasLabs, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models
class MedicalMedicamentAttributeColor(models.Model):
_name = 'medical.medicament.attribute.color'
_description = 'Medical Medicament Physical Attributes - Color'
_inherit = 'medical.medicament.attribute.abstract'
| ShaheenHossain/eagle-medical | medical_medicament_attributes/models/medical_medicament_attribute_color.py | Python | agpl-3.0 | 1,207 |
__author__ = 'tiefkuehlpizze'
__version__ = '0.1'
__all__ = [
'exception',
'session',
'client',
'user',
'anime',
'manga',
'character',
'staff',
'studio',
'review',
'forum',
'anilist',
]
from pyanilist import *
| Tiefkuehlpizze/pyanilist | pyanilist/__init__.py | Python | gpl-3.0 | 260 |
from KTrade import Trade
from KTradeReader import TradeReader
from KPortfolio import Portfolio
class ResultsViewer(object):
''' Responsible for presenting the results.
'''
def __init__(self, filePath):
self.filePath = filePath
def PrintPortfolioInformation(self):
trades = TradeReader.TradesFromFile(self.filePath)
portfolio = Portfolio(trades)
print '{0:15}{1:17}{2:25}'.format('Stock', 'Quantity', 'RPL')
print 40*'-'
for stockName in portfolio.StockNames():
position = portfolio.PositionByStockName(stockName)
print '{0:10}{1:10}{2:15}'.format(stockName, int(position.Quantity()), int(position.RPL()))
print 40*'-'
print 'Total RPL: {0:15}{1:10}'.format('', int(portfolio.RPL()))
viewer = ResultsViewer('trades.csv')
viewer.PrintPortfolioInformation() | Kolguyev/samples | compute_rpl/main.py | Python | gpl-2.0 | 865 |
from c2corg_api.search import create_search, elasticsearch_config, \
get_text_query_on_title
from c2corg_api.views.document_listings import get_documents
from elasticsearch_dsl.search import MultiSearch
def search_for_types(search_types, search_term, limit, lang):
"""Get results for all given types.
"""
if not search_types:
return {}
document_id = try_to_parse_document_id(search_term)
if document_id is not None:
# search by document id for every type
results_for_type = [([document_id], None)] * len(search_types)
else:
# search in ElasticSearch
results_for_type = do_multi_search_for_types(
search_types, search_term, limit, lang)
# load the documents using the document ids returned from the search
results = {}
for search_type, result_for_type in zip(search_types, results_for_type):
(key, get_documents_config) = search_type
(document_ids, total) = result_for_type
def search_documents(_, __):
return document_ids, total
results[key] = get_documents(
get_documents_config, {'lang': lang}, search_documents)
return results
def do_multi_search_for_types(search_types, search_term, limit, lang):
""" Executes a multi-search for all document types in a single request
and returns a list of tuples (document_ids, total) containing the results
for each type.
"""
multi_search = MultiSearch(index=elasticsearch_config['index'])
for search_type in search_types:
(_, get_documents_config) = search_type
search = create_search(get_documents_config.document_type).\
query(get_text_query_on_title(search_term, lang)).\
fields([]).\
extra(from_=0, size=limit)
multi_search = multi_search.add(search)
responses = multi_search.execute()
results_for_type = []
for response in responses:
# only requesting the document ids from ES
document_ids = [int(doc.meta.id) for doc in response]
total = response.hits.total
results_for_type.append((document_ids, total))
return results_for_type
def try_to_parse_document_id(search_term):
try:
return int(search_term)
except (ValueError, TypeError):
return None
| c2corg/v6_api | c2corg_api/search/search.py | Python | agpl-3.0 | 2,314 |
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
'''
This test is meant to exercise activation of the first version bits soft fork
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
'''
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in xrange(2):
b25times = []
for b25 in xrange(2):
b22times = []
for b22 in xrange(2):
b18times = []
for b18 in xrange(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in xrange(2):
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def __init__(self):
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(1, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in xrange(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in xrange(2):
b25txs = []
for b25 in xrange(2):
b22txs = []
for b22 in xrange(2):
b18txs = []
for b18 in xrange(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_NOP3, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in xrange(2):
b25txs = []
for b25 in xrange(2):
b22txs = []
for b22 in xrange(2):
b18txs = []
for b18 in xrange(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_NOP3, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_NOP3, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int ("0x" + self.nodes[0].getbestblockhash() + "L", 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in xrange(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in xrange(2):
inputs = []
for i in xrange(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in xrange(2):
inputs = []
for i in xrange(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash + "L", 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in xrange(2):
for b18 in xrange(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in xrange(2):
for b18 in xrange(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in xrange(2):
for b22 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in xrange(2):
for b18 in xrange(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in xrange(2):
for b18 in xrange(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in xrange(2):
for b18 in xrange(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
| jimmykiselak/lbrycrd | qa/rpc-tests/bip68-112-113-p2p.py | Python | mit | 27,304 |
""" Use the following config with caution.
"""
from parsl.config import Config
from parsl.executors.ipp import IPyParallelExecutor
from parsl.executors.ipp_controller import Controller
config = Config(
executors=[
IPyParallelExecutor(
label='local_ipp_reuse',
),
],
controller=Controller(reuse=True),
)
| swift-lang/swift-e-lab | parsl/configs/local_ipp_reuse.py | Python | apache-2.0 | 344 |
#!/usr/bin/env python
# Simple script for shutting down the raspberry Pi at the press of a button.
# by Inderpreet Singh
import RPi.GPIO as GPIO
import time
import os
# Use the Broadcom SOC Pin numbers
# Setup the Pin with Internal pullups enabled and PIN in reading mode.
GPIO.setmode(GPIO.BCM)
GPIO.setup(4, GPIO.IN, pull_up_down = GPIO.PUD_UP)
# Our function on what to do when the button is pressed
def Shutdown(channel):
os.system("sudo shutdown -h now")
print "edge detected!"
os.system('echo "1=0" > /dev/pi-blaster')
## this won't work, since shutdown will terminate this process while it still waits :-/
# time.sleep(60)
# GPIO.setup(4, GPIO.OUT, pull_up_down = GPIO.PUD_DOWN)
# Add our function to execute when the button pressed event happens
GPIO.add_event_detect(4, GPIO.FALLING, callback = Shutdown, bouncetime = 2000)
# Now wait!
while 1:
time.sleep(1)
| JohSchneider/piboy-zero | software/powerbutton/power.py | Python | mit | 901 |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from subways import views as subways
urlpatterns = patterns('',
url(r'^(?P<map_name>[\w.]+)/$', subways.map, {'template_name': "subways/subwayMap.htm"}, name='subway_map'),
url(r'^longest_ride/(?P<map_name>[\w.]+)/$', subways.longest_ride, {'template_name': "subways/subwayMap.htm"}, name='subway_longest_ride'),
url(r'^ride/(?P<map_name>[\w.]+)/(?P<here>[\w.]+)/(?P<there>[\w.]+)/$', subways.ride, {'template_name': "subways/subwayMap.htm"}, name='subway_ride'),
)
| mouradmourafiq/django-subways | subways/urls.py | Python | bsd-2-clause | 564 |
# -*- coding: utf-8 -*-
from webob import Request
from webob import Response
from webob import exc
from .compat import b
def input_app(environ, start_response):
resp = Response()
req = Request(environ)
if req.path_info == '/':
resp.body = b('<input name="youyou" type="text" value="" />')
elif req.path_info == '/submit':
resp.body = b('<input type="submit" value="OK" />')
elif req.path_info.startswith('/html'):
resp.body = b('<html><p>Success</p></html>')
else:
resp.body = ''
return resp(environ, start_response)
def application(environ, start_response):
req = Request(environ)
response = Response()
if req.method == 'GET':
response.body = b('<pre>Yeah !</pre>')
else:
response.body = b('<a href="/plop">Yeah !</a>')
return response(environ, start_response)
def secure_application(environ, start_response):
if 'REMOTE_USER' not in environ:
return exc.HTTPUnauthorized('vomis')(environ, start_response)
return application(environ, start_response)
| ekeyme/pyquery | tests/apps.py | Python | bsd-3-clause | 1,068 |
#/usr/bin/python
# -*- coding: utf-8 -*-
#
# parse Gcode
#
"""Module to simulate a plotter"""
import threading
import sys
import logging
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
import pygame
import time
class PlotterSimulator(threading.Thread):
"""Pygame Simulation of Plotter"""
def __init__(self, automatic):
"""automatic inidcates whether the user has to press a key on ever update cycle"""
threading.Thread.__init__(self)
self.automatic = automatic
# initialize pygame
pygame.init()
pygame.display.set_caption("Plotter Simulator")
self.background = pygame.display.set_mode((1024, 600))
# font to use in text surface
self.font = pygame.font.Font(None, 28)
self.text_surface = self.background.subsurface((0, 0, 324, 600))
# drawing subsurface, where grid, plot and pen are blited to
self.draw_surface = self.background.subsurface((324, 0, 700, 600))
# surface to draw grid on
self.grid_surface = pygame.Surface((700, 600))
self.grid_surface.set_colorkey((0, 0, 0))
self.grid_surface.fill((0, 0, 0))
self.draw_surface.blit(self.grid_surface, (0, 0))
# surface to draw the wires on
self.plot_surface = pygame.Surface((700, 600))
self.plot_surface.set_colorkey((0, 0, 0))
self.plot_surface.fill((0, 0, 0))
self.draw_surface.blit(self.plot_surface, (0, 0))
# surface to draw the pen on
self.pen_surface = pygame.Surface((700, 600))
self.pen_surface.set_colorkey((0, 0, 0))
self.pen_surface.fill((0, 0, 0))
self.draw_surface.blit(self.pen_surface, (0, 0))
# draw grid and display
pygame.display.flip()
self.draw_grid()
# controller related
self.controller = None
self.step_counter = 0
self.width = None
self.height = None
self.scale = None
self.old_position = None
self.new_position = None
self.draw_scale = None
# parser related
self.parser = None
self.command_counter = 0
self.start_time = time.time()
# pen related
self.pen_color = (0, 0, 0)
# start thread
self.stop_flag = False
self.start()
def set_controller(self, controller):
"""called to set controller object"""
self.controller = controller
self.width = controller.transformer.width
self.height = controller.transformer.width
self.scale = controller.transformer.scale
self.draw_scale = self.scale * self.draw_surface.get_width() / self.width
self.old_position = (controller.position.X, controller.position.Y)
self.new_position = (controller.position.X, controller.position.Y)
def set_parser(self, parser):
"""called to set parser object"""
self.parser = parser
def controller_cb(self, *args):
"""called from controller to inform about changes"""
self.old_position = self.new_position
self.step_counter += 1
# set pen color according to z position,
# z below zero indicates drawing
self.pen_color = (32, 32, 32)
if self.controller.position.Z < 0.0 :
self.pen_color = (0, 255, 0)
self.new_position = (self.controller.position.X * self.draw_scale, self.controller.position.Y * self.draw_scale)
self.update()
def parser_cb(self, *args):
"""called from parser to inform about changes"""
self.command_counter += 1
self.update()
def draw_grid(self):
"""
draw grid on pygame window
first determine, which axis are to draw
second determine what the min_position and max_positions of each motor are
surface.X : self.motors["X"].min_position <-> surface.get_width() = self.motors["X"].max_position
surface.Y : self.motors["Y"].min_position <-> surface.get_height() = self.motors["Y"].max_position
"""
self.grid_surface.fill((0, 0, 0))
width = self.grid_surface.get_width()
height = self.grid_surface.get_height()
color = pygame.Color(0, 50, 0, 255)
for x_steps in range(0, height, 10):
pygame.draw.line(self.grid_surface, color, (x_steps, 0), (x_steps, height), 1)
for y_steps in range(0, width, 10):
pygame.draw.line(self.grid_surface, color, (0, y_steps), (width, y_steps), 1)
# thicker lines through origin
color = pygame.Color(0, 100, 0, 255)
pygame.draw.line(self.grid_surface, color, (width / 2, 0), (width / 2, height))
pygame.draw.line(self.grid_surface, color, (0, height / 2), (width, height / 2))
def draw_motors(self):
"""
paints motors on surface
origin for plotter is in the middle / bottom of the page, thats (0,0)
"""
self.plot_surface.fill((0, 0, 0))
position_a = (15, 0)
position_b = (self.plot_surface.get_width() - 15, 0)
pygame.draw.circle(self.plot_surface, (255, 255, 255), position_a, 15, 1)
pygame.draw.circle(self.plot_surface, (255, 255, 255), position_b, 15, 1)
pygame.draw.line(self.plot_surface, (255, 0, 0), position_a, self.new_position, 1)
pygame.draw.line(self.plot_surface, (0, 0, 255), position_b, self.new_position, 1)
def draw_pen(self):
"""
paints motors on surface
origin for plotter is in the middle / bottom of the page, thats (0,0)
dont blank this surface on every update
"""
# only if z > 0.0 use a solid color
pygame.draw.line(self.pen_surface, self.pen_color, self.old_position, self.new_position, 1)
def update_text(self):
"""display textual informations"""
font_height = self.font.get_height()
textcolor = (255, 255, 255)
self.text_surface.fill((0, 0, 0))
text = self.font.render("Max-X : %0.2f" % self.controller.stats.max_x, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 0 + 1))
text = self.font.render("Max-Y : %0.2f" % self.controller.stats.max_y, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 1 + 1))
text = self.font.render("Width : %05s" % self.width, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 2 + 1))
text = self.font.render("Height: %05s" % self.height, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 3 + 1))
text = self.font.render("Scale : %05s" % self.scale, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 4 + 1))
text = self.font.render("Motor Positions:", 1, textcolor)
self.text_surface.blit(text, (0, font_height * 5 + 1))
text = self.font.render("X : %05s" % (self.controller.motors["X"].position * self.scale), 1, textcolor)
self.text_surface.blit(text, (0, font_height * 6 + 1))
text = self.font.render("Y : %05s" % (self.controller.motors["Y"].position * self.scale), 1, textcolor)
self.text_surface.blit(text, (0, font_height * 7 + 1))
text = self.font.render("Z : %05s" % (self.controller.motors["Z"].position * self.scale), 1, textcolor)
self.text_surface.blit(text, (0, font_height * 8 + 1))
text = self.font.render("Tranformer Positions:", 1, textcolor)
self.text_surface.blit(text, (0, font_height * 9 + 1))
text = self.font.render("A : %05s" % self.controller.transformer.get_motor_A(), 1, textcolor)
self.text_surface.blit(text, (0, font_height * 10 + 1))
text = self.font.render("B : %05s" % self.controller.transformer.get_motor_B(), 1, textcolor)
self.text_surface.blit(text, (0, font_height * 11 + 1))
text = self.font.render("Controller Positions:", 1, textcolor)
self.text_surface.blit(text, (0, font_height * 12 + 1))
text = self.font.render("X: %0.2f" % self.controller.position.X, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 13 + 1))
text = self.font.render("Y: %0.2f" % self.controller.position.Y, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 14 + 1))
text = self.font.render("Z: %0.2f" % self.controller.position.Z, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 15 + 1))
text = self.font.render("C-Steps: %05s" % self.step_counter, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 16 + 1))
text = self.font.render("P-Commands: %05s" % self.command_counter, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 17 + 1))
text = self.font.render("Elapsed Time: %s s" % int(time.time() - self.start_time), 1, textcolor)
self.text_surface.blit(text, (0, font_height * 18 + 1))
text = self.font.render("Last Command", 1, textcolor)
self.text_surface.blit(text, (0, font_height * 19 + 1))
text = self.font.render("%s" % self.parser.command, 1, textcolor)
self.text_surface.blit(text, (0, font_height * 20 + 1))
def update(self):
"""do pygame update stuff"""
self.draw_motors()
self.draw_pen()
def run(self):
"""do pygame update stuff in endless loop"""
# draw grid surface only the first time, this surface will not change
self.draw_grid()
clock = pygame.time.Clock()
while self.stop_flag is False:
clock.tick(30) # not more than 60 frames per seconds
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
sys.exit(0)
keyinput = pygame.key.get_pressed()
if keyinput is not None:
# print keyinput
if keyinput[pygame.K_ESCAPE]:
sys.exit(1)
self.draw_surface.fill((0, 0, 0))
self.update_text()
# blit subsurfaces
self.draw_surface.blit(self.grid_surface, (0, 0))
self.draw_surface.blit(self.plot_surface, (0, 0))
self.draw_surface.blit(self.pen_surface, (0, 0))
pygame.display.flip()
| gunny26/python-gcode | PlotterSimulator.py | Python | gpl-2.0 | 10,231 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import sentry.db.models.fields.gzippeddict
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Node',
fields=[
('id', models.CharField(max_length=40, serialize=False, primary_key=True)),
('data', sentry.db.models.fields.gzippeddict.GzippedDictField()),
('timestamp', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
],
),
]
| beeftornado/sentry | src/sentry/nodestore/migrations/0001_initial.py | Python | bsd-3-clause | 654 |
from __future__ import division
from __future__ import print_function
from datetime import datetime
import sys, string, os
import logging
sys.path.insert(0, "/home/lom/users/cll/pytroll/install/lib/python2.6/site-packages")
from mpop.satellites import GeostationaryFactory
from mpop.projector import get_area_def
from mpop.utils import debug_on
from pyresample import plot
import numpy as np
from pydecorate import DecoratorAGG
import aggdraw
from PIL import ImageFont, ImageDraw
from os.path import exists
from os import makedirs
from mpop.imageo.HRWimage import HRWimage, HRW_2dfield, HRWstreamplot, HRWscatterplot
from datetime import timedelta
from wind_shift import read_HRW
import scp_settings
scpOutputDir = scp_settings.scpOutputDir
scpID = scp_settings.scpID
# debug_on()
#plot_modes = ['channel','pressure','correlation','conf_nwp','conf_no_nwp', 'stream']
#plot_modes = ['pressure']
#plot_modes = ['channel']
#plot_modes = ['stream']
plot_modes = ['pressure', 'stream'] #
#plot_modes = ['scatter']
#interpol_method='RBF'
#interpol_method="cubic + nearest"
interpol_method=None
detailed = True
delay=5
add_title=True
title_color=(255,255,255)
#layer=''
add_rivers=False
add_borders=False
legend=False
ntimes=1
print("timesteps ", ntimes)
min_correlation = 85
min_conf_nwp = 80
min_conf_no_nwp = 80
#cloud_type = [5,6,7,8,9,10,11,12,13,14]
cloud_type = None
#levels = ['L','M','H']
#levels = ['A']
#levels = ['H']
#levels = ['M']
levels = ['L']
area="ccs4"
#area="alps95"
#area="EuropeCanaryS95"
#area="EuropeCanary95"
#area="ticino"
# ------------------- end of input options -------------------------------
# ------------------- end of input options -------------------------------
HRWimages = ['channel','pressure','correlation','conf_nwp','conf_no_nwp']
if len(sys.argv) > 1:
if len(sys.argv) < 6:
print("*** ")
print("*** Warning, please specify date and time completely, e.g.")
print("*** python "+inspect.getfile(inspect.currentframe())+" 2014 07 23 16 10 ")
print("*** ")
quit() # quit at this point
else:
year = int(sys.argv[1])
month = int(sys.argv[2])
day = int(sys.argv[3])
hour = int(sys.argv[4])
minute = int(sys.argv[5])
else:
if True: # automatic choise of last 5min
from my_msg_module import get_last_SEVIRI_date
datetime1 = get_last_SEVIRI_date(True)
if delay != 0:
datetime1 -= timedelta(minutes=delay)
year = datetime1.year
month = datetime1.month
day = datetime1.day
hour = datetime1.hour
minute = datetime1.minute
else: # fixed date for text reasons
year=2014 # 2014 09 15 21 35
month= 7 # 2014 07 23 18 30
day= 23
hour= 18
minute=00
# read data for the current time
time_slot = datetime(year, month, day, hour, minute)
#print time_slot
#m_per_s_to_knots = 1.944
#for wid in range(len(global_data['HRW'].HRW_detailed.wind_id)):
# print '%6s %3d %10.7f %10.7f %7.2f %7.1f %8.1f' % (global_data['HRW'].HRW_detailed.channel[wid], global_data['HRW'].HRW_detailed.wind_id[wid], \
# global_data['HRW'].HRW_detailed.lon[wid], global_data['HRW'].HRW_detailed.lat[wid], \
# global_data['HRW'].HRW_detailed.wind_speed[wid]*m_per_s_to_knots, \
# global_data['HRW'].HRW_detailed.wind_direction[wid], global_data['HRW'].HRW_detailed.pressure[wid])
obj_area = get_area_def(area)
yearS = str(year)
#yearS = yearS[2:]
monthS = "%02d" % month
dayS = "%02d" % day
hourS = "%02d" % hour
minS = "%02d" % minute
dateS = yearS+'-'+monthS+'-'+dayS
timeS = hourS+':'+minS+" UTC"
#output_dir='/data/COALITION2/PicturesSatellite/'+yearS+'-'+monthS+'-'+dayS+'/'+yearS+'-'+monthS+'-'+dayS+'_HRW_'+area+'/'
#output_dir='/data/cinesat/out/'
output_dir='./pics/'
if not exists(output_dir):
print('... create output directory: ' + output_dir)
makedirs(output_dir)
image_type ='.png'
# preparation for adding the title
if add_title:
# get font for title
fontsize=18
font = ImageFont.truetype("/usr/openv/java/jre/lib/fonts/LucidaTypewriterBold.ttf", fontsize)
if detailed:
print("*** plot detailed winds")
detailed_str = 'detailed' # hrw_channels=None, min_correlation=None, cloud_type=None, style='barbs'
detailed_char = 'd'
else:
print("*** plot basic winds")
detailed_str = 'basic'
detailed_char = 'b'
# read HRW wind vectors
print("... read HRW data")
sat_nr = 9
global_data = read_HRW("meteosat", str(sat_nr).zfill(2), "seviri", time_slot, ntimes, \
min_correlation=min_correlation, min_conf_nwp=min_conf_nwp, \
min_conf_no_nwp=min_conf_no_nwp, cloud_type=cloud_type)
# loop over height levels
for level in levels:
level_str=''
vmax=60
if level=='L':
level_str='low '
vmax=20
if level=='M':
level_str='middle '
vmax=40
if level=='H':
level_str='high '
vmax=60
print("... make plot for level " + level_str)
print("... filter "+detailed_str+" data for level ", level)
# choose basic or detailed (and get a fresh copy)
if detailed:
HRW_data = global_data['HRW'].HRW_detailed.filter(level=level)
else:
HRW_data = global_data['HRW'].HRW_basic.filter(level=level)
level = level.replace("A","")
for plot_mode in plot_modes:
print(" create HRW plot, plot mode = ", plot_mode)
if plot_mode == 'stream':
layer=' 3rd layer'
else:
layer=' 2nd layer'
# get y position and layer string for the title
if layer.find('2nd') != -1:
y_pos_title=20
elif layer.find('3rd') != -1:
y_pos_title=40
else:
y_pos_title=5
layer = dateS+' '+timeS
if len(layer) > 0:
layer=layer+':'
if plot_mode in HRWimages:
PIL_image = HRWimage( HRW_data, obj_area, color_mode=plot_mode, legend=legend) #
# possible options: color_mode='pressure', legend=False, hrw_channels=None, min_correlation=None, cloud_type=None, style='barbs'
if plot_mode=='pressure':
color_char='p'
elif plot_mode=='channel':
color_char='c'
elif plot_mode=='correlation':
color_char='r'
elif plot_mode=='conf_nwp':
color_char='cnwp'
elif plot_mode=='conf_no_nwp':
color_char='cnnwp'
outputFile = output_dir+'/MSG_hrw'+detailed_char+color_char+level+'-'+area+'_'+yearS[2:]+monthS+dayS+hourS+minS
title = layer+' '+detailed_str+' high resolution '+level_str+'winds' # [white v. weak, green weak, yellow med., red strong]
elif plot_mode == 'stream':
# get gridded wind field
u2d, v2d = HRW_2dfield( HRW_data, obj_area, level=level, interpol_method=interpol_method )
# create PIL image
PIL_image = HRWstreamplot( u2d, v2d, obj_area, HRW_data.interpol_method, color_mode='speed', vmax=vmax) # , legend=True, legend_loc=3
outputFile = output_dir+'/MSG_stream'+detailed_char+level+'-'+area+'_'+yearS[2:]+monthS+dayS+hourS+minS
title = layer+' '+level_str+'High Resolution Winds stream plot' # [white v. weak, green weak, yellow med., red strong]
elif plot_mode == 'scatter':
title = "MSG-"+str(sat_nr-7) +' '+level_str+'HRW scatter' +', '+ dateS+' '+hourS+':'+minS+'UTC, '+area
PIL_image = HRWscatterplot( HRW_data, title=title)
title=''
outputFile = output_dir+'/MSG_HRWscat'+detailed_char+level+'-'+area+'_'+yearS[2:]+monthS+dayS+hourS+minS
else:
print("*** Error in plot_hrw.py")
print(" unknown plot_mode")
quit()
# create decorator
dc = DecoratorAGG(PIL_image)
draw = ImageDraw.Draw(PIL_image)
if add_borders:
from pycoast import ContourWriterAGG
# define contour write for coasts, borders, rivers
cw = ContourWriterAGG('/data/OWARNA/hau/maps_pytroll/')
# define area
from mpop.projector import get_area_def
# obj_area = get_area_def('ccs4')
proj4_string = obj_area.proj4_string
# e.g. proj4_string = '+proj=geos +lon_0=0.0 +a=6378169.00 +b=6356583.80 +h=35785831.0'
area_extent = obj_area.area_extent
# e.g. area_extent = (-5570248.4773392612, -5567248.074173444, 5567248.074173444, 5570248.4773392612)
area_def = (proj4_string, area_extent)
resolution='h'
cw.add_borders(PIL_image, area_def, outline=(255, 0, 0), resolution=resolution, width=1) #, outline_opacity=0
if add_title:
draw = ImageDraw.Draw(PIL_image)
draw.text((0, y_pos_title),title, title_color, font=font)
print('... save image as ', outputFile+image_type)
PIL_image.save(outputFile+image_type)
# copy to another place
if False:
import subprocess
# if in_msg.verbose:
# print "... secure copy "+outputFile+ " to "+in_msg.scpOutputDir
print("scp "+scpID+" "+outputFile+image_type +" "+" "+scpOutputDir+" 2>&1 &")
subprocess.call("scp "+scpID+" "+outputFile+image_type +" "+" "+scpOutputDir+" 2>&1 &", shell=True)
# if in_msg.compress_to_8bit:
# if in_msg.verbose:
# print "... secure copy "+outputFile.replace(".png","-fs8.png")+ " to "+in_msg.scpOutputDir
# subprocess.call("scp "+in_msg.scpID+" "+outputFile.replace(".png","-fs8.png")+" "+in_msg.scpOutputDir+" 2>&1 &", shell=True)
# make composite and scp composite
if False:
import subprocess
if plot_mode in ['channel','pressure']:
product = 'hrw'+detailed_char+color_char
elif plot_mode == 'stream':
product = 'stream'+detailed_char
ir_file = output_dir+'/MSG_ir108-'+area+'_'+yearS[2:]+monthS+dayS+hourS+minS+".png"
hrv_file = output_dir+'/MSG_HRV-' +area+'_'+yearS[2:]+monthS+dayS+hourS+minS+".png"
ir_outfile = output_dir+'/MSG_'+product+'-ir108-'+area+'_'+yearS[2:]+monthS+dayS+hourS+minS+".png"
hrv_outfile = output_dir+'/MSG_'+product+'-HRV-' +area+'_'+yearS[2:]+monthS+dayS+hourS+minS+".png"
print("/usr/bin/composite "+outputFile+image_type+" "+ir_file+" "+" "+ir_outfile+" && sleep 1")
subprocess.call("/usr/bin/composite "+outputFile+image_type+" "+ir_file +" "+" "+ir_outfile +" 2>&1 && sleep 1 ", shell=True)
print("/usr/bin/composite "+outputFile+image_type+" "+hrv_file+" "+" "+hrv_outfile+" && sleep 1")
subprocess.call("/usr/bin/composite "+outputFile+image_type+" "+hrv_file+" "+" "+hrv_outfile+" 2>&1 && sleep 1 ", shell=True)
if True:
print("scp "+scpID+" "+ir_outfile +" "+" "+scpOutputDir+" 2>&1 &")
subprocess.call("scp "+scpID+" "+ir_outfile +" "+" "+scpOutputDir+" 2>&1 && sleep 1", shell=True)
print("scp "+scpID+" "+hrv_outfile +" "+" "+scpOutputDir+" 2>&1 &")
subprocess.call("scp "+scpID+" "+hrv_outfile +" "+" "+scpOutputDir+" 2>&1 && sleep 1", shell=True)
# make composite and scp composite
if False:
from get_input_msg import get_input_msg
in_msg = get_input_msg('input_template')
from postprocessing import postprocessing
#in_msg.postprocessing_areas=['ccs4']
in_msg.scpOutput = True
in_msg.datetime = global_data.time_slot
in_msg.outputDir = output_dir
in_msg.postprocessing_composite=["hrwdp"+level+"-streamd"+level+"-HRV", "hrwdp"+level+"-streamd"+level+"-ir108"]
postprocessing(in_msg, global_data.time_slot, global_data.number, 'ccs4')
| meteoswiss-mdr/monti-pytroll | scripts/plot_hrw.py | Python | lgpl-3.0 | 12,338 |
import zstackwoodpecker.test_state as ts_header
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template1", \
path_list=[[TestAction.create_volume_snapshot, "vm1-root", "snapshot1"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.create_volume_snapshot, "vm1-root", "snapshot2"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.use_volume_snapshot, "snapshot2"], \
[TestAction.start_vm, "vm1"], \
[TestAction.batch_delete_volume_snapshot, ["snapshot2", "snapshot1"]]
])
| zstackorg/zstack-woodpecker | integrationtest/vm/multihosts/snapshots/paths/path7.py | Python | apache-2.0 | 717 |
import json
from urllib import quote_plus
from urlparse import urljoin
import requests
METHODS = ['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS']
class Method(object):
def __init__(self, method, url, auth, serialize_payload, headers):
self.method = getattr(requests, method.lower())
self.url = url
self.auth = auth
self.serialize_payload = serialize_payload
self.headers = headers
def __call__(self, *args, **kwargs):
if self.method in (requests.get, requests.options):
kw = {'params': kwargs}
else:
# TODO: assertions for the payload
data = args or kwargs
if self.serialize_payload:
data = json.dumps(data)
kw = {'data': data}
return self.method(
self.url,
auth=self.auth,
headers=self.headers,
**kw
)
class EndPoint(object):
def __init__(self, api, url):
self.api = api
self.url = url
def __getitem__(self, item):
url = self.url + '/' if not self.url.endswith('/') else self.url
return EndPoint(
self.api,
urljoin(url, quote_plus(unicode(item))),
)
def __getattr__(self, attr):
if attr in METHODS:
return Method(
method=attr,
url=self.url,
auth=self.api.auth,
serialize_payload=self.api.serialize_payload,
headers=self.api.headers,
)
return self[attr]
class Api(EndPoint):
def __init__(self, base_url, auth=None, serialize_payload=True, headers=None):
super(Api, self).__init__(self, base_url)
self.auth = auth
self.serialize_payload = serialize_payload
self.headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
}
if headers:
self.headers.update(headers)
def __call__(self, url):
return EndPoint(self, url)
| olegpidsadnyi/restart | restart/api.py | Python | mit | 2,054 |
'''
Created on May 7, 2011
@author: jake
'''
from decimal import Decimal
from warnings import warn
import moneyed
from django.test import TestCase
from moneyed import Money
from .testapp.forms import MoneyForm, MoneyModelForm
from .testapp.models import ModelWithVanillaMoneyField
class MoneyFormTestCase(TestCase):
def testRender(self):
warn('Rendering depends on localization.', DeprecationWarning)
def testValidate(self):
m = Money(Decimal(10), moneyed.SEK)
form = MoneyForm({"money_0": m.amount, "money_1": m.currency})
self.assertTrue(form.is_valid())
result = form.cleaned_data['money']
self.assertTrue(isinstance(result, Money))
self.assertEquals(result.amount, Decimal("10"))
self.assertEquals(result.currency, moneyed.SEK)
self.assertEquals(result, m)
def testAmountIsNotANumber(self):
form = MoneyForm({"money_0": "xyz*|\\", "money_1": moneyed.SEK})
self.assertFalse(form.is_valid())
def testAmountExceedsMaxValue(self):
form = MoneyForm({"money_0": 10000, "money_1": moneyed.SEK})
self.assertFalse(form.is_valid())
def testAmountExceedsMinValue(self):
form = MoneyForm({"money_0": 1, "money_1": moneyed.SEK})
self.assertFalse(form.is_valid())
def testNonExistentCurrency(self):
m = Money(Decimal(10), moneyed.EUR)
form = MoneyForm({"money_0": m.amount, "money_1": m.currency})
self.assertFalse(form.is_valid())
class MoneyModelFormTestCase(TestCase):
def testSave(self):
m = Money(Decimal("10"), moneyed.SEK)
form = MoneyModelForm({"money_0": m.amount, "money_1": m.currency})
self.assertTrue(form.is_valid())
model = form.save()
retrieved = ModelWithVanillaMoneyField.objects.get(pk=model.pk)
self.assertEqual(m, retrieved.money)
| renefs87/project-payment-manager | app/djmoney/tests/form_tests.py | Python | gpl-3.0 | 1,880 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
import time
from datetime import datetime
import sys
from ics import Calendar, Event
import progressbar
import logging
import sys
import os
debug = False
if os.isatty(sys.stdin.fileno()):
debug = True
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
pass
else:
debug = False
logging.basicConfig(stream=sys.stderr, level=logging.WARNING)
pass
username = ''
password = ''
#location of the ics file, it's recommended to write to a web folder
ical_loc = '/var/www/kieran/'
r = requests.post('https://hull.ombiel.co.uk/campusm/ldap/282',
params={'username': username, 'password': password})
cookie = dict(a=r.cookies['a'], __a=r.cookies['__a'])
r = r.json()
surname = r['surname']
if surname[-1] != "s":
surname = surname + "'s"
else:
surname = surname + "'"
student_name = r['firstname'] + ' ' + surname
logging.info('Downloading timetable for: ' + student_name)
c = Calendar()
cyear = datetime.today().year
endyear = 2016
ids = []
cday = datetime.today().timetuple().tm_yday
day = cday - cday % 7 + 5
#day = 4
totaldays = (endyear - cyear + 1) * 372
if debug:
bar = progressbar.ProgressBar(max_value=totaldays,
redirect_stdout=True)
prog = day
year = cyear
while year <= endyear:
logging.debug('Day: ' + str(day) + ' Year: ' + str(year))
if debug:
bar.update(prog)
date = str(year) + str(day).zfill(3)
url = 'https://hull.ombiel.co.uk/campusm/sso/calendar/course_timetable/' + date
r = requests.get(url, cookies=cookie)
r = r.json()
day = day + 7
prog = prog + 7
if day > 364:
day = 4
year += 1
locode = ''
i = 1
for event in r['events']:
if event['id'] not in ids:
e = Event()
i = i + 1
ids.append(event['id'])
#this code is really bad, like I don't even, Should probably refactor this in to something better, It basically finds events with the same time and merges them
locode = event['locCode']
if locode[-1:] == "." and locode[-2:-1].isalpha():
locode = locode[:-2]
if locode[:-1].isalpha():
locode = locode[:-1]
x = 1
length = len(r['events'])
if i + 1 < length:
for otherevent in r['events']:
if (event['locCode'][:-2] == otherevent['locCode'][:-2]
and event['desc2'] == otherevent['desc2'] \
and event['start'] == otherevent['start'] \
and event['locCode'] != otherevent['locCode']):
locode = locode + otherevent['locCode'][-2:-1]
ids.append(otherevent['id'])
#I wanted to add the tye of lecture to the start of the title, Again this could probably be improved
if "[" in event['desc2']:
class_name = event['desc2'].split('[')
e.name = '[' + class_name[1] + ' ' \
+ (class_name[0])[:-2] + ' (' + event['desc1'] + ')'
else:
class_name = event['desc2']
e.name = class_name + ' (' + event['desc1'] + ')'
#That mess of a code is over now, lets just add everything to the event now
logging.debug(e.name + ' - ' + locode)
e.begin = event['start']
e.end = event['end']
e.description = event.get('teacherName', '')
e.location = locode
c.events.append(e)
#write it all to file
icalfile = ical_loc + username + '.ics'
open(icalfile, 'w').writelines(c)
with open(icalfile, 'r') as file:
lines = file.readlines()
lines[1] = lines[1] + 'X-WR-CALNAME: ' + student_name \
+ ' Uni Timetable\nX-PUBLISHED-TTL:PT12H'
with open(icalfile, 'w') as file:
for line in lines:
file.write(line)
if debug:
bar.update(totaldays)
| Epictek/Hull-Timetable-ical | app.py | Python | gpl-2.0 | 3,990 |
# NSDict - Generate dictionaries from Nationstates API queries and dumps
# Copyright 2017 Khronion <khronion@gmail.com>
#
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the software. If not, see <http://www.gnu.org/licenses/>.
import xml.etree.cElementTree as ElementTree
import xml.etree.ElementTree
import collections
import gzip
class EntityDict(collections.OrderedDict):
"""Extended ordered dict that contains every game in the region and all their attributes"""
class Entity(object):
"""Simple utility class to allow object notation access to data"""
def __init__(self, attr, kind=''):
self.kind = kind
self.__dict__.update(attr)
def __init__(self, dump, entity_tag, entity_name, int_types, float_types):
super().__init__()
try:
self.entity_tree = ElementTree.fromstring(dump)
except xml.etree.ElementTree.ParseError:
try:
with gzip.open(dump) as entities_xml:
try:
self.entity_tree = ElementTree.parse(entities_xml).getroot()
except xml.etree.ElementTree.ParseError:
raise RuntimeError("Invalid dump or API query.")
except OSError:
with open(dump) as entities_xml:
try:
self.entity_tree = ElementTree.parse(entities_xml).getroot()
except xml.etree.ElementTree.ParseError:
raise RuntimeError("Invalid dump or API query.")
for entity in self.entity_tree.iter(entity_tag):
attributes = {}
name = None
# Store all attributes in dictionary. This approach guarantees new shards are automatically included.
for attribute in entity:
if attribute.tag == entity_name:
name = attribute.text.lower()
elif attribute.tag in int_types: # deal with ints
attributes[attribute.tag.lower()] = int(attribute.text)
elif attribute.tag in float_types: # deal with floats
attributes[attribute.tag.lower()] = float(attribute.text)
else:
attributes[attribute.tag.lower()] = attribute.text
# handle special cases
self._custom_attribute_process(attribute, attributes)
# store data
self._post_entity_process(attributes)
self[name] = self.Entity(attributes, entity_tag)
# handle special cases
self._post_process()
def _custom_attribute_process(self, attribute, attributes):
"""This can be overriden to do processing after an attribute is parsed"""
pass
def _post_entity_process(self, attributes):
"""This can be overriden to do processing on a dictionary of attributes generated after an entity is parsed"""
pass
def _post_process(self):
"""This can be overriden to do processing once EntityDict is done populating itself from the xml"""
pass
class NationDict(EntityDict):
def __init__(self, dump):
super().__init__(dump, 'NATION', 'NAME',
['POPULATION', 'FACTBOOKS', 'DISPATCHES', 'FIRSTLOGIN', 'LASTLOGIN'],
['TAX'])
def _custom_attribute_process(self, attribute, attributes):
if attribute.tag == 'ENDORSEMENTS':
if attribute.text is not None:
attributes['endorsements'] = attribute.text.split(',')
else:
attributes['endorsements'] = []
elif attribute.tag == 'FREEDOM':
attributes['freedom'] = {}
for freedom in attribute:
attributes['freedom'][freedom.tag] = freedom.text
elif attribute.tag == 'FREEDOMSCORES':
attributes['freedomscores'] = {}
for score in attribute:
attributes['freedomscores'][score.tag] = int(score.text)
elif attribute.tag == 'GOVT':
attributes['govt'] = {}
for priority in attribute:
attributes['govt'][priority.tag.lower()] = float(priority.text)
elif attribute.tag == 'DEATHS':
attributes['deaths'] = {}
for cause in attribute:
name = cause.attrib[list(cause.attrib.keys())[0]]
attributes['deaths'][name] = float(cause.text)
class RegionDict(EntityDict):
def __init__(self, dump):
super().__init__(dump, 'REGION', 'NAME',
['NUMNATIONS', 'LASTUPDATE', 'DELEGATEVOTES', 'FIRSTLOGIN', 'LASTLOGIN'],
['TAX', 'PUBLICSECTOR'])
def _custom_attribute_process(self, attribute, attributes):
if attribute.tag == 'EMBASSIES':
attributes['embassies'] = []
for embassy in attribute:
attributes['embassies'].append(embassy.text)
# special RO logic
elif attribute.tag == 'OFFICERS':
attributes['officers'] = []
for officer in attribute:
officer_attributes = {}
for officer_attribute in officer_attributes:
officer[officer_attribute.tag.lower()] = officer_attribute.text
if len(officer) > 0:
attributes['officers'].append(self.Entity(officer, 'OFFICER'))
# special nations logic
elif attribute.tag == 'NATIONS':
if attribute.text is not None:
attributes['nations'] = attribute.text.split(":")
else:
attributes['nations'] = []
# special delegate logic
elif attribute.tag == 'DELEGATE':
if attribute.text == '0':
attributes['delegate'] = None
else:
attributes['delegate'] = attribute.text
def _post_entity_process(self, attributes):
"""Handle cumulative population generation"""
try:
last = self[next(reversed(self))]
attributes['cumulative_population'] = last.cumulative_population + last.numnations
except StopIteration:
attributes['cumulative_population'] = 0
def _post_process(self):
"""Derive useful world analyses from dataset"""
self.total_population = next(reversed(self.values())).cumulative_population
self.total_regions = len(self)
self.update_start = list(self.items())[0][1].lastupdate
self.update_end = list(self.items())[-1][1].lastupdate
self.update_length = list(self.items())[-1][1].lastupdate - list(self.items())[0][1].lastupdate
self.reverse_lookup = collections.OrderedDict()
for region in self:
for nation in self[region].nations:
self.reverse_lookup[nation] = region | khronion/ns_utilities | NSDict.py | Python | gpl-3.0 | 7,368 |
##
# Copyright 2019-2020 Bart Oldeman, McGill University, Compute Canada
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing the Intel compiler suite, implemented as an easyblock
@author: Bart Oldeman (McGill University, Compute Canada)
"""
import os
from easybuild.easyblocks.icc import EB_icc
from easybuild.easyblocks.ifort import EB_ifort
class EB_iccifort(EB_ifort, EB_icc):
"""
Class that can be used to install iccifort
"""
def sanity_check_step(self):
"""Custom sanity check paths for iccifort."""
EB_icc.sanity_check_step(self)
EB_ifort.sanity_check_step(self)
def make_module_extra(self):
txt = super(EB_iccifort, self).make_module_extra()
# also define $EBROOT* and $EBVERSION* for icc/ifort
txt += self.module_generator.set_environment('EBROOTICC', self.installdir)
txt += self.module_generator.set_environment('EBROOTIFORT', self.installdir)
txt += self.module_generator.set_environment('EBVERSIONICC', self.version)
txt += self.module_generator.set_environment('EBVERSIONIFORT', self.version)
return txt
def make_module_req_guess(self):
# Use EB_icc because its make_module_req_guess deliberately omits 'include' for CPATH:
# including it causes problems, e.g. with complex.h and std::complex
# cfr. https://software.intel.com/en-us/forums/intel-c-compiler/topic/338378
# whereas EB_ifort adds 'include' but that's only needed if icc and ifort are separate
guesses = EB_icc.make_module_req_guess(self)
# remove entries from LIBRARY_PATH that icc and co already know about at compile time
# only do this for iccifort merged installations so that icc can still find ifort
# libraries and vice versa for split installations
if self.comp_libs_subdir is not None:
compiler_library_paths = [os.path.join(self.comp_libs_subdir, p)
for p in ('lib', 'compiler/lib/intel64', 'lib/ia32', 'lib/intel64')]
guesses['LIBRARY_PATH'] = [p for p in guesses['LIBRARY_PATH'] if p not in compiler_library_paths]
return guesses
| pescobar/easybuild-easyblocks | easybuild/easyblocks/i/iccifort.py | Python | gpl-2.0 | 3,264 |
#
# helpers.py
#
# Copyright (C) 2011, 2013, 2015 Uli Fouquet
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Helpers for trivial jobs.
"""
import base64
import cssutils
import logging
import os
import re
import shutil
import tempfile
import zipfile
from bs4 import BeautifulSoup, UnicodeDammit
try:
from cStringIO import StringIO # Python 2.x
except ImportError: # pragma: no cover
from io import StringIO # Python 3.x
from pkg_resources import iter_entry_points
try:
from urlparse import urlparse # Python 2.x
except ImportError: # pragma: no cover
from urllib.parse import urlparse # Python 3.x
from six import string_types
try:
basestring = basestring # Python 2.x
except NameError: # pragma: no cover
basestring = (str, bytes) # Python 3.x
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy an entire directory tree rooted at `src`. The
destination directory, named by `dst`, might exist already; if
not, thenit will be created as well as missing parent
directories. Permissions and times of directories are copied with
:func:`shutil.copystat`, individual files are copied using
:func:`shutil.copy2`.
If `symlinks` is true, symbolic links in the source tree are
represented as symbolic links in the new tree; if false or
omitted, the contents of the linked files are copied to the new
tree.
If ignore is given, it must be a callable that will receive as its
arguments the directory being visited by :func:`shutil.copytree`,
and a list of its contents, as returned by
:func:`os.listdir`. Since :func:`copytree` is called recursively,
the ignore callable will be called once for each directory that is
copied. The callable must return a sequence of directory and file
names relative to the current directory (i.e. a subset of the
items in its second argument); these names will then be ignored in
the copy process. :func:`shutil.ignore_patterns` can be used to
create such a callable that ignores names based on glob-style
patterns.
If exception(s) occur, a :exc:`shutil.Error` is raised with a list
of reasons.
.. note:: This is a plain copy of the :func:`shutil.copytree`
implementation as provided with Python >= 2.6. There is,
however, one difference: this version will try to go on
if the destination directory already exists.
It is the callers responsibility to make sure that the
`dst` directory is in a proper state for
:func:`copytree`.
"""
if src in dst:
raise ValueError("Cannot copy %s to %s: trees are nested" % (
src, dst))
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
try:
os.makedirs(dst)
except os.error:
pass
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the Error from the recursive copytree so that we can
# continue with other files
except (shutil.Error) as why: # pragma: no cover
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except (OSError) as why: # pragma: no cover
errors.extend((src, dst, str(why)))
if errors:
raise shutil.Error(errors)
def copy_to_secure_location(src):
"""Copy `src` to a temporay location.
If `src` is a file, the complete directory containing this file
will be copied. If `src` is a directory this directory will be
copied.
Returns the path of the newly created directory.
To copy the filetree we use :func:`shutil.copytree` with no
additional parameters. That means that symlinks won't be copied
and other restrictions apply. See :func:`shutil.copytree` docs to
check.
"""
if os.path.isfile(src):
src = os.path.dirname(src)
dst = tempfile.mkdtemp()
copytree(src, dst)
return dst
def get_entry_points(group):
"""Get all entry point plugins registered for group `group`.
The found entry points are returned as a dict with ``<NAME>`` as
key and ``<PLUGIN>`` as value where ``<NAME>`` is the name under
which the respective plugin was registered with setuptools and
``<PLUGIN>`` is the registered component itself.
"""
return dict(
[(x.name, x.load())
for x in iter_entry_points(group=group)])
def unzip(path, dst_dir):
"""Unzip the files stored in zipfile `path` in `dst_dir`.
`dst_dir` is the directory where all contents of the ZIP file is
stored into.
"""
zf = zipfile.ZipFile(path)
# Create all dirs
dirs = sorted([name for name in zf.namelist() if name.endswith('/')])
for dir in dirs:
new_dir = os.path.join(dst_dir, dir)
if not os.path.exists(new_dir):
os.mkdir(new_dir)
# Create all files
for name in zf.namelist():
if name.endswith('/'):
continue
outfile = open(os.path.join(dst_dir, name), 'wb')
outfile.write(zf.read(name))
outfile.flush()
outfile.close()
zf.close()
return
def zip(path):
"""Create a ZIP file out of `path`.
If `path` points to a file then a ZIP archive is created with this
file in compressed form in a newly created directory. The name of
the created zipfile is the basename of the input file with a
``.zip`` extension appended.
If `path` points to a directory then files and directories
_inside_ this directory are added to the archive.
Also empty directories are added although it cannot be guaranteed
that these entries are recovered correctly later on with all tools
and utilities on all platforms.
.. note:: It is the callers responsibility to remove the directory
the zipfile is created in after usage.
"""
if not os.path.isdir(path) and not os.path.isfile(path):
raise ValueError('Must be an existing path or directory: %s' % path)
new_dir = tempfile.mkdtemp()
basename = os.path.basename(path)
new_path = os.path.join(new_dir, basename) + '.zip'
zout = zipfile.ZipFile(new_path, 'w', zipfile.ZIP_DEFLATED)
if os.path.isfile(path):
zout.write(path, basename)
zout.close()
return new_path
for root, dirs, files in os.walk(path):
for dir in dirs:
# XXX: Maybe the wrong way to store directories?
dir_path = os.path.join(root, dir)
arc_name = dir_path[len(path) + 1:] + '/'
info = zipfile.ZipInfo(arc_name)
zout.writestr(info, '')
for file in files:
file_path = os.path.join(root, file)
arc_name = file_path[len(path) + 1:]
zout.write(file_path, arc_name)
zout.close()
return new_path
def remove_file_dir(path):
"""Remove a directory.
If `path` points to a file, the directory containing the file is
removed. If `path` is a directory, this directory is removed.
"""
if not isinstance(path, string_types):
return
if not os.path.exists(path):
return
if os.path.isfile(path):
path = os.path.dirname(path)
assert path not in ['/', '/tmp'] # Safety belt
shutil.rmtree(path)
return
RE_CSS_TAG = re.compile('(.+?)(\.?\s*){')
RE_CSS_STMT_START = re.compile('\s*(.*?{.*?)')
RE_CURLY_OPEN = re.compile('{([^ ])')
RE_CURLY_CLOSE = re.compile('([^ ])}')
RE_EMPTY_COMMENTS = re.compile('/\*\s*\*/')
RE_CDATA_MASSAGE = '(((/\*)?<!\[CDATA\[(\*/)?)((.*?)<!--)?'
RE_CDATA_MASSAGE += '(.*?)(-->(.*?))?((/\*)?]]>(\*/)?))'
MARKUP_MASSAGE = [
(re.compile('(<[^<>]*)/>'), lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
CDATA_MASSAGE = MARKUP_MASSAGE
CDATA_MASSAGE.extend([
(re.compile(RE_CDATA_MASSAGE, re.M + re.S),
lambda match: match.group(7))])
def extract_css(html_input, basename='sample.html', prettify_html=False):
"""Scan `html_input` and replace all styles with single link to a CSS
file.
Returns tuple ``<MODIFIED_HTML>, <CSS-CODE>``.
If the `html_input` contains any ``<style>`` tags, their content
is aggregated and returned in ``<CSS-CODE``.
The tags are all stripped from `html` input and replaced by a link
to a stylesheet file named ``<basename>.css``. Any extension in
`basename` is stripped. So ``sample.html`` as `basename` will
result in a link to ``sample.css``. The same applies for a
`basename` ``sample.css`` or ``sample``. The modified HTML code is
returned as first item of the result tuple.
If `pretify_html` is True, the generated HTML code is prettified
by BeautifulSoup. This might result in unexpected, visible gaps in
rendered output.
"""
# create HTML massage that removes CDATA and HTML comments in styles
for fix, m in CDATA_MASSAGE:
html_input = fix.sub(m, html_input)
soup = BeautifulSoup(html_input, 'html.parser')
css = '\n'.join([style.text for style in soup.findAll('style')])
if '<style>' in css:
css = css.replace('<style>', '\n')
# lowercase leading tag names
css = re.sub(
RE_CSS_TAG,
lambda match:
match.group(1).lower() + match.group(2) + '{', css)
# set indent of all CSS statement lines to nil.
css = re.sub(RE_CSS_STMT_START,
lambda match: '\n' + match.group(1), css)
# insert spaces after and before curly brackets.
css = re.sub(RE_CURLY_OPEN, lambda match: '{ ' + match.group(1), css)
css = re.sub(RE_CURLY_CLOSE, lambda match: match.group(1) + ' }', css)
css_name = os.path.splitext(basename)[0] + '.css'
# Remove empty style comments
css = re.sub(RE_EMPTY_COMMENTS, lambda match: '', css)
if css.startswith('\n'):
css = css[1:]
for num, style in enumerate(soup.findAll('style')):
if num == 0 and css != '':
# replace first style with link to stylesheet
# if there are any styles contained
new_tag = soup.new_tag(
'link', rel='stylesheet', type='text/css', href=css_name)
style.replace_with(new_tag)
else:
style.extract()
if css == '':
css = None
if prettify_html:
return soup.prettify(), css
return UnicodeDammit(str(soup)).markup, css
RE_HEAD_NUM = re.compile('(<h[1-6][^>]*>\s*)(([\d\.]+)+)([^\d])',
re.M + re.S)
def cleanup_html(html_input, basename,
fix_head_nums=True, fix_img_links=True, fix_sdfields=True):
"""Clean up HTML code.
If `fix_head_nums` is ``True``, we look for heading contents of
style ``1.1Heading`` where the number is not separated from the
real heading text. In that case we wrap the heading number in a
``<span class="u-o-headnum"> tag.
If `fix_img_links` is ``True`` we run
:func:`rename_html_img_links` over the result.
If `fix_sdfields` is ``True`` we rename all ``<sdfield>`` tags to
``<span>``. See :func:`rename_sdfield_tags` for details.
Returns a tuple ``(<HTML_OUTPUT>, <IMG_NAME_MAP>)`` where
``<HTML_OUTPUT>`` is the modified HTML code and ``<IMG_NAME_MAP>``
a mapping from old filenames to new ones (see
:func:`rename_html_img_links`) for details.
"""
img_name_map = {}
if fix_img_links is True:
html_input, img_name_map = rename_html_img_links(html_input, basename)
if fix_sdfields is True:
html_input = rename_sdfield_tags(html_input)
if fix_head_nums is not True:
return html_input, img_name_map
# Wrap leading num-dots in headings in own span-tag.
html_input = re.sub(
RE_HEAD_NUM,
lambda match: ''.join([
match.group(1),
'<span class="u-o-headnum">',
match.group(3),
'</span>',
match.group(4)]),
html_input)
return html_input, img_name_map
def cleanup_css(css_input, minified=True):
"""Cleanup CSS code delivered in `css_input`, a string.
Returns 2-item tuple ``(<CSS>, <ERRORS>)`` where ``<CSS>`` is the
cleaned and minimized CSS code and ``<ERRORS>`` is a multiline
string containing warnings and errors occured during processing
the CSS.
By default the ``<CSS>`` returned is minified to reduce network
load, etc. If you want pretty non-minified output, set `minified`
to ``False``.
We expect and return texts, not bytestreams.
"""
# Set up a local logger for warnings and errors
local_log = StringIO()
handler = logging.StreamHandler(local_log)
handler.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
handler.propagate = False
handler.setLevel(logging.WARNING)
logger = logging.getLogger()
logger.addHandler(handler)
cssutils.log.setLog(logger)
cssutils.ser.prefs.useDefaults()
if minified is True:
cssutils.ser.prefs.useMinified()
sheet = cssutils.parseString(css_input)
local_log.flush()
encoding = sheet.encoding or 'utf-8'
css_text = sheet.cssText.decode(encoding)
return css_text, local_log.getvalue()
def rename_html_img_links(html_input, basename):
"""Rename all ``<img>`` tag ``src`` attributes based on `basename`.
Each `src` of each ``<img>`` tag in `html_input` is renamed to a
new location of form ``<BASENAME>_<NUM>.<EXT>`` where
``<BASENAME>`` is the basename of `basename`, ``<NUM>`` a unique
number starting with 1 (one) and ``<EXT>`` the filename extension
of the original ``src`` file.
For example:
``<img src="foo_m1234.jpeg">``
with a `basename` ``sample.html`` will be replaced by
``<img src="sample_1.jpeg">``
if this is the first ``<img>`` tag in the document.
Returns a tuple ``<HTML_OUTPUT>, <NAME_MAP>`` where
``<HTML_OUTPUT>`` is the modified HTML and ``<NAME_MAP>`` is a
dictionary with a mapping from old filenames to new ones. The
latter can be used to rename any real files (which is not done by
this function).
Links to 'external' sources (http and similar) are ignored.
This funtion expects text as input and returns text, not bytes.
I.e. you will get unicode snippets under Python 2.x and text
(or `str`) under Python 3.x.
"""
soup = BeautifulSoup(html_input, 'html.parser')
img_tags = soup.findAll('img')
img_map = {}
num = 1
basename = os.path.splitext(basename)[0]
basename = basename.replace('.', '_')
for tag in img_tags:
src = tag.get('src', None)
if src is None:
continue
if src in img_map.keys():
# We found a link to the same image already
tag['src'] = img_map[src]
continue
scheme = urlparse(src)[0]
if scheme not in ['file', '']:
# only handle local files
continue
ext = ''
if '.' in src:
ext = os.path.splitext(src)[1]
new_src = '%s_%s%s' % (basename, num, ext)
num += 1
tag['src'] = new_src
img_map[src] = new_src
return soup.decode(), img_map
RE_SDFIELD_OPEN = re.compile('<sdfield([^>]*)>', re.M + re.S + re.I)
RE_SDFIELD_CLOSE = re.compile('</sdfield>', re.M + re.S + re.I)
def rename_sdfield_tags(html_input):
"""Rename all ``<sdfield>`` tags to ``<span class="sdfield">``
Any attributes are preserved. `html_input` must be a text, not a
bytes stream.
"""
html_input = re.sub(
RE_SDFIELD_OPEN, lambda match: '<span %s%s>' % (
'class="sdfield"', match.group(1)), html_input)
return re.sub(
RE_SDFIELD_CLOSE, lambda match: '</span>', html_input)
def base64url_encode(string):
"""Get a base64url encoding of string.
base64url is regular base64 encoding with ``/`` and ``+`` in the
result substituted by ``_`` and ``-`` respectively.
This encoding is better suited for generating file system paths
out of binary data.
"""
if isinstance(string, str):
try:
string = string.encode("latin-1")
except UnicodeDecodeError: # pragma: no cover
# Python 2.x
pass
result = base64.urlsafe_b64encode(string)
if not isinstance(result, str): # pragma: no cover
# Python 3.x only
result = result.decode("ascii")
return result
def base64url_decode(string):
"""Decode the base64url encoded `string`.
.. seealso:: base64url_encode
"""
result = base64.urlsafe_b64decode(string)
if not isinstance(result, str): # pragma: no cover
# Python 3.x only.
result = result.decode("latin-1")
return result
def string_to_bool(string):
"""Turn string into a boolean value.
``yes``, ``1``, and ``true`` are considered as ``True``. ``no``,
``0``, and ``false`` are considered ``False``. If none of that
applies, ``None`` is returned. The case does not matter, so you
can use upper, lower or mixed case.
If, by accident, you pass in a boolean value this will be returned
unchanged.
Other values result in ``None``.
"""
if not isinstance(string, string_types):
if string is True or string is False:
return string
return None
if string.lower() in ['yes', '1', 'true']:
return True
if string.lower() in ['no', '0', 'false']:
return False
return None
def strict_string_to_bool(string):
"""A variant of `string_to_bool` which raises a `ValueError` if no
valid boolean value can be parsed from `string`.
"""
result = string_to_bool(string)
if result is None:
raise ValueError(
'%s is not a valid boolean. Use "yes" or "no".' % string)
return result
def string_to_stringtuple(string, strict=False):
"""Convert a single string into a tuple of strings.
The input string is expected to contain comma-separated string
values. The single values are stripped (whitespaces removed at
beginning and ending).
>>> string_to_stringtuple('foo, bar,baz')
('foo', 'bar', 'baz')
By default empty strings (``',,,,'`` and similar) are filtered
out.
This function is _not_ 'strict' by default. If `strict` is set to
``True`` it does not accept empty strings or ``None`` as input.
"""
if not string:
if strict:
raise ValueError('`string` must contain at least some string')
else:
return ()
result = [x.strip() for x in string.split(',') if x]
return tuple(result)
def filelike_cmp(file1, file2, chunksize=512):
"""Compare `file1` and `file2`.
Returns ``True`` if both are equal, ``False`` else.
Both, `file1` and `file2` can be paths to files or file-like
objects already open for reading.
If both are arguments are paths, consider using `filecmp.cmp` from
the standard library instead.
`chunksize` gives chunk size in bytes used during comparison.
"""
f1 = file1
f2 = file2
result = True
if isinstance(file1, string_types) or isinstance(file1, bytes):
f1 = open(file1, 'rb')
if isinstance(file2, string_types) or isinstance(file2, bytes):
f2 = open(file2, 'rb')
f1.seek(0) # make sure we read from beginning, especially whe used
f2.seek(0) # in loops.
try:
while True:
chunk1 = f1.read(chunksize)
chunk2 = f2.read(chunksize)
try:
chunk1 = chunk1.encode('utf-8')
except AttributeError: # pragma: no cover
# already a bytes object, or py2.x
pass
try:
chunk2 = chunk2.encode('utf-8')
except AttributeError: # pragma: no cover
# already a bytes object, or py2.x
pass
if chunk1 != chunk2:
result = False
break
if not chunk1:
break
finally:
if isinstance(file1, string_types) or isinstance(file1, bytes):
f1.close()
if isinstance(file2, string_types) or isinstance(file2, bytes):
f2.close()
return result
def write_filelike(file_obj, path, chunksize=512):
"""Write contents of `file_obj` to `path`.
`file_obj` can be a string or some file-like object. If it is a
file-like object, it must be opened for reading.
Content is written in chunks of `chunksize`.
"""
f1 = file_obj
if isinstance(file_obj, string_types):
f1 = StringIO(file_obj)
elif isinstance(file_obj, bytes): # pragma: no cover
f1 = StringIO(file_obj.decode('utf-8'))
f2 = open(path, 'w')
try:
while True:
chunk = f1.read(512)
if chunk:
f2.write(chunk)
else:
break
finally:
f2.close()
return
| ulif/ulif.openoffice | src/ulif/openoffice/helpers.py | Python | gpl-2.0 | 22,408 |
#!/usr/bin/python
# Copyright (c) 2015, Michael LeBeane
# The University of Texas at Austin
# The Laboratory for Computer Architecture (LCA)
# All rights reserved.
#
# Redistribution of this source or derived binaries is not authorized without
# the express written consent of the original copyright holders.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AN
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, math, re, collections, re, csv, argparse, subprocess
import buildstack, sniper_lib
from multiprocessing.pool import ThreadPool
# Parts of this function was modified from the Sniper simulator's McPAT plugin.
def run_mcpat(input_dir, mcpatdir, stats, CORES, HW_THREADS):
all_items = [
[ 'core', .01, 'core-ooo' ],
[ 'ifetch', .01, 'core-ifetch' ],
[ 'alu', .01, 'core-alu-complex' ],
[ 'int', .01, 'core-alu-int' ],
[ 'fp', .01, 'core-alu-fp' ],
[ 'mem', .01, 'core-mem' ],
[ 'icache', .01, 'core-icache' ],
[ 'dcache', .01, 'core-dcache' ],
[ 'l2', .01, 'l2' ],
[ 'l3', .01, 'l3' ],
[ 'noc', .01, 'noc' ],
[ 'other', .01, 'other' ],
]
def mcpat_run(inputfile,mcpatdir):
return subprocess.check_output("LD_LIBRARY_PATH=$LD_LIBRARY_PATH:" + mcpatdir + " " + mcpatdir + "/mcpat -print_level 5 -opt_for_clk 1 -infile " + inputfile, shell=True)
def power_stack(power_dat, scale=[1.0], powertype = 'total', core = 'all', nocollapse = False):
def getpower(powers, index=-1, key = None):
def getcomponent(suffix):
if key: return powers.get(key+'/'+suffix, 0)
else: return powers.get(suffix, 0)
index = -1
if index == -1:
scale_factor = 1.0
else:
scale_factor = scale[index]
if powertype == 'dynamic':
return getcomponent('Runtime Dynamic')
elif powertype == 'static':
return getcomponent('Subthreshold Leakage') * scale_factor + getcomponent('Subthreshold Leakage with power gating') * (1 - scale_factor) + getcomponent('Gate Leakage')
elif powertype == 'total':
dyn=getcomponent('Runtime Dynamic')
sub_leak = getcomponent('Subthreshold Leakage') * scale_factor
sub_leak_gate = getcomponent('Subthreshold Leakage with power gating') * (1 - scale_factor)
gate_leak = getcomponent('Gate Leakage')
return dyn + sub_leak + sub_leak_gate + gate_leak
elif powertype == 'area':
return getcomponent('Area') + getcomponent('Area Overhead')
else:
raise ValueError('Unknown powertype %s' % powertype)
if core == "all":
data = {
'l2': 5 * sum([ getpower(cache,index) for index,cache in enumerate(power_dat.get('L2', []) )]) # shared L2
+ 5 * sum([ getpower(core, index, 'L2') for index,core in enumerate(power_dat['Core']) ]), # private L2
'l3': 5 * sum([ getpower(cache) for index,cache in enumerate(power_dat.get('L3', [])) ]),
'core-ooo': sum([ getpower(core, index, 'Execution Unit/Instruction Scheduler')
+ getpower(core, index, 'Execution Unit/Register Files')
+ getpower(core, index, 'Execution Unit/Results Broadcast Bus')
+ getpower(core, index, 'Renaming Unit')
for index,core in enumerate(power_dat['Core'])
]),
'core-ifetch': sum([ getpower(core, index, 'Instruction Fetch Unit/Branch Predictor')
+ getpower(core, index, 'Instruction Fetch Unit/Branch Target Buffer')
+ getpower(core, index, 'Instruction Fetch Unit/Instruction Buffer')
+ getpower(core, index, 'Instruction Fetch Unit/Instruction Decoder')
for index,core in enumerate(power_dat['Core'])
]),
'core-icache': sum([ getpower(core, index, 'Instruction Fetch Unit/Instruction Cache') for index, core in enumerate(power_dat['Core']) ]),
'core-dcache': sum([ getpower(core, index, 'Load Store Unit/Data Cache') for index,core in enumerate(power_dat['Core'] )]),
'core-alu-complex': sum([ getpower(core, index, 'Execution Unit/Complex ALUs') for index,core in enumerate(power_dat['Core']) ]),
'core-alu-fp': sum([ getpower(core, index, 'Execution Unit/Floating Point Units') for index,core in enumerate(power_dat['Core'] )]),
'core-alu-int': sum([ getpower(core, index, 'Execution Unit/Integer ALUs') for index,core in enumerate(power_dat['Core']) ]),
'core-mem': sum([ getpower(core, index, 'Load Store Unit/LoadQ')
+ getpower(core, index, 'Load Store Unit/StoreQ')
+ getpower(core, index, 'Memory Management Unit')
for index,core in enumerate(power_dat['Core'])
]),
}
#data['other'] = getpower(power_dat["Processor"]) - (sum(data.values()))# - data['dram'])
else:
data = {
'l2': 5 * getpower(power_dat['Core'][core], -1, 'L2'), # private L2
'core-ooo': getpower(power_dat['Core'][core], -1, 'Execution Unit/Instruction Scheduler')
+ getpower(power_dat['Core'][core], -1, 'Execution Unit/Register Files')
+ getpower(power_dat['Core'][core], -1, 'Execution Unit/Results Broadcast Bus')
+ getpower(power_dat['Core'][core], -1, 'Renaming Unit'),
'core-ifetch': getpower(power_dat['Core'][core], -1, 'Instruction Fetch Unit/Branch Predictor')
+ getpower(power_dat['Core'][core], -1, 'Instruction Fetch Unit/Branch Target Buffer')
+ getpower(power_dat['Core'][core], -1, 'Instruction Fetch Unit/Instruction Buffer')
+ getpower(power_dat['Core'][core], -1, 'Instruction Fetch Unit/Instruction Decoder'),
'core-icache': getpower(power_dat['Core'][core], -1, 'Instruction Fetch Unit/Instruction Cache'),
'core-dcache': getpower(power_dat['Core'][core], -1, 'Load Store Unit/Data Cache'),
'core-alu-complex': getpower(power_dat['Core'][core], -1, 'Execution Unit/Complex ALUs'),
'core-alu-fp': getpower(power_dat['Core'][core], -1, 'Execution Unit/Floating Point Units'),
'core-alu-int': getpower(power_dat['Core'][core], -1, 'Execution Unit/Integer ALUs'),
'core-mem': getpower(power_dat['Core'][core], -1, 'Load Store Unit/LoadQ')
+ getpower(power_dat['Core'][core], -1, 'Load Store Unit/StoreQ')
+ getpower(power_dat['Core'][core], -1, 'Memory Management Unit'),
}
return data
onlyfiles = [ f for f in os.listdir(input_dir) if os.path.isfile(os.path.join(input_dir,f)) ]
WORKER_THREADS = 1
power_threads = [None] * len(onlyfiles)
pt_results = [None] * WORKER_THREADS
power = {}
#TODO: Mutlithreaded McPAT invocation occasionally causes DB corruption
# disabled for now
timestamps = stats.itervalues().next().keys()
START_TIME = timestamps[0]
pool = ThreadPool(processes=WORKER_THREADS)
for i in range(0, len(onlyfiles), WORKER_THREADS):
print "launching mcpat thread"
for j in range(0,WORKER_THREADS):
if (i + j) < len(onlyfiles):
pt_results[j] = pool.apply_async(mcpat_run, (input_dir + onlyfiles[i + j],mcpatdir))
for j in range(0,WORKER_THREADS):
if (i + j) < len(onlyfiles):
power_threads[i + j] = pt_results[j].get()
print WORKER_THREADS, " mcpat runs finished" , i
for i,files in enumerate(onlyfiles):
result = re.findall('\_(.*?)\.', files)
timestamp = float(result[0]) + START_TIME
power[timestamp] = {}
components = power_threads[i].split('*'*89)[2:-1]
# Parse output
power_dat = {}
for component in components:
lines = component.strip().split('\n')
componentname = lines[0].strip().strip(':')
values = {}
prefix = []; spaces = []
for line in lines[1:]:
if not line.strip():
continue
elif '=' in line:
res = re.match(' *([^=]+)= *([-+0-9.e]+)(nan)?', line)
if res:
name = ('/'.join(prefix + [res.group(1)])).strip()
if res.groups()[-1] == 'nan':
# Result is -nan. Happens for instance with 'Subthreshold Leakage with power gating'
# on components with 0 area, such as the Instruction Scheduler for in-order cores
value = 0.
else:
try:
value = float(res.group(2))
except:
print >> sys.stderr, 'Invalid float:', line, res.groups()
raise
values[name] = value
else:
res = re.match('^( *)([^:(]*)', line)
if res:
j = len(res.group(1))
while(spaces and j <= spaces[-1]):
spaces = spaces[:-1]
prefix = prefix[:-1]
spaces.append(j)
name = res.group(2).strip()
prefix.append(name)
if componentname in ('Core', 'L2', 'L3'):
# Translate whatever level we used for NUCA back into NUCA
outputname = componentname
if outputname not in power_dat:
power_dat[outputname] = []
power_dat[outputname].append(values)
else:
assert componentname not in power_dat
power_dat[componentname] = values
if not power_dat:
raise ValueError('No valid McPAT output found')
# Now, we will massage the power consumption based on how idle/active the core was for this quanta of time
# For core level stats, we need to merge all the HW_Threads into their shared physical resources
# scale represents precent active
threads_per_core = HW_THREADS / CORES
core_id = 0
active = []
for k in range(0,CORES,1):
active.append(0)
active[k] =max([stats["CPU" + str(j)][timestamp]["busy_cycles"] for j in range(k, HW_THREADS, CORES)]) / 2400000000.0
# Plot stack
print_stack = 1
power[timestamp]["TOTAL"] = {}
power[timestamp]["TOTAL"]["static"] = 0
# TODO: this is very primitive, need a better way to model idle states
for k in range(0,CORES,1):
power[timestamp]["CPU" + str(k)] = {}
power[timestamp]["CPU" + str(k)]["dynamic"] = power_stack(power_dat, active, "dynamic", k)
power[timestamp]["CPU" + str(k)]["static"] = active[k] * 2.2 + (1-active[k]) * 1.00
power[timestamp]["TOTAL"]["static"] += active[k] * 2.2 + (1-active[k]) * 1.00
power[timestamp]["TOTAL"]["dynamic"] = power_stack(power_dat, active, "dynamic", "all")
return power
# Run in standalone script mode
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Runs McPAT and reports results")
parser.add_argument("input_dir", help="directory containg McPAT inputs")
parser.add_argument("mcpatdir", help="directory containing McPAT")
parser.add_argument("stats", help="stats dictionary")
parser.add_argument("cores", help="hw cores")
parser.add_argument("threads", help="threads per core")
args = parser.parse_args()
run_mcpat(args.input_dir, args.mcpatdir, args.stats, args.cores, args.threads)
| mlebeane/wattwatcher | run_mcpat.py | Python | bsd-3-clause | 11,231 |
"""
Plugin for ResolveUrl
Copyright (C) 2015 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from resolveurl.plugins.lib import aadecode
from resolveurl import common
from resolveurl.plugins.lib import helpers
from resolveurl.resolver import ResolveUrl, ResolverError
class VideowoodResolver(ResolveUrl):
name = "videowood"
domains = ['videowood.tv']
pattern = r'(?://|\.)(videowood\.tv)/(?:embed/|video/)([0-9a-z]+)'
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'Referer': web_url, 'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
try:
html = html.encode('utf-8')
except:
pass
if "This video doesn't exist." in html:
raise ResolverError('The requested video was not found.')
match = re.search(r"split\('\|'\)\)\)\s*(.*?)</script>", html)
if match:
aa_text = aadecode.decode(match.group(1))
match = re.search("'([^']+)", aa_text)
if match:
stream_url = match.group(1)
return stream_url + helpers.append_headers({'User-Agent': common.FF_USER_AGENT})
raise ResolverError('Video Link Not Found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, 'http://{host}/embed/{media_id}')
| dknlght/dkodi | src/script.module.resolveurl/lib/resolveurl/plugins/videowood.py | Python | gpl-2.0 | 2,046 |
# Loosely based on https://github.com/Skarlso/SublimeGmailPlugin by Skarlso
import sublime
import sublime_plugin
from smtplib import SMTP
from email.mime.text import MIMEText
from email.header import Header
# from email.headerregistry import Address
# from email.utils import parseaddr, formataddr
config = {
# TODO You need to change these values
# Default value for each field of the email message
"default_value": {
"smtp_login": "example@gmail.com",
"smtp_passwd": "c1everP@ssword",
"from": "example@gmail.com",
"display_name": u"Firstname Lastname",
"recipients": "first@recipient.com; second@recipient.com; third@recipient.com",
"subject": u"Sent from SublimeText"
},
# TODO Set to "true" to be prompted to edit the value, "false" to silently use the default_value from above
"interactive": {
"smtp_login": False,
"smtp_passwd": False,
"from": False,
"display_name": True,
"recipients": False,
"subject": True
},
# The prompt message to the user for each field
"prompt": {
"smtp_login": "GMail User ID",
"smtp_passwd": "GMail Password",
"from": "Sender e-mail address",
"display_name": "Sender's display name",
"recipients": "Recipients (semicolon or comma separated list)",
"subject": "Subject"
}
}
class GmailCommand(sublime_plugin.TextCommand):
def run(self, edit):
# Collect all the text regions and send together
text = ''
for region in self.view.sel():
if not region.empty():
# Get the selected text
text = '%s%s\n\n' % (text, self.view.substr(region))
# Only send an email if there is some content to send
if text:
self.values = {}
self.values['body'] = text
self.stack = ["smtp_login", "smtp_passwd", "from", "display_name", "recipients", "subject"]
self.handle_input()
else:
sublime.status_message('Please select some text to send (via gmail)')
def handle_input(self, key=None, value=None):
if key:
# self.values[key] = value
self.values[key] = value
if len(self.stack) == 0:
sublime.set_timeout_async(lambda : self.send_email(), 0)
else:
key = self.stack.pop(0)
if config['interactive'][key]:
# get the value from the user
on_done = lambda s: self.handle_input(key, s)
sublime.active_window().show_input_panel(config['prompt'][key], config['default_value'][key], on_done, None, None)
pass
else:
# use the default
self.values[key] = config['default_value'][key]
self.handle_input()
def send_email(self):
# Parse the recipients list
recipients = self.values['recipients']
recipient_list = [recipients]
for sep in [';', ',']:
if sep in recipients:
recipient_list = recipients.split(sep)
break
msg = MIMEText(str(self.values['body']), 'plain', 'UTF-8')
msg['From'] = "\"%s\" <%s>" % (Header(str(self.values['display_name']), 'utf-8'), self.values['from'])
msg['To'] = ', '.join(recipient_list)
msg['Subject'] = Header(str(self.values['subject']), 'UTF-8')
try:
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(self.values['smtp_login'], self.values['smtp_passwd'])
# FIXME should we use msg['From'] or self.values['from'] here?
mailServer.sendmail(self.values['from'], recipient_list, msg.as_string())
mailServer.close()
except:
message = "There was an error sending the email to: %s " % recipients
print(message)
sublime.status_message(message)
| jbjornson/SublimeGMail | GMail.py | Python | mit | 4,056 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class SubnetsOperations(object):
"""SubnetsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01"
self.config = config
def _delete_initial(
self, resource_group_name, virtual_network_name, subnet_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_name, subnet_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def get(
self, resource_group_name, virtual_network_name, subnet_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Subnet or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_06_01.models.Subnet or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Subnet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def _create_or_update_initial(
self, resource_group_name, virtual_network_name, subnet_name, subnet_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(subnet_parameters, 'Subnet')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Subnet', response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_name, subnet_name, subnet_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update
subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2017_06_01.models.Subnet
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Subnet or
ClientRawResponse<Subnet> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_06_01.models.Subnet]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_06_01.models.Subnet]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Subnet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'}
def list(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Subnet
:rtype:
~azure.mgmt.network.v2017_06_01.models.SubnetPaged[~azure.mgmt.network.v2017_06_01.models.Subnet]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.SubnetPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.SubnetPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'}
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_06_01/operations/subnets_operations.py | Python | mit | 17,678 |
#!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
import os
import json
from boundary import MetricCreateBatch
from boundary import MetricDelete
from metric_test import MetricTest
from boundary import MetricExport
from cli_test import CLITest
from cli_runner import CLIRunner
class MetricCreateBatchTest(TestCase):
def setUp(self):
self.cli = MetricCreateBatch()
self.filename = os.path.join(os.path.dirname(__file__), 'metric_import_data.json')
def test_get_description(self):
CLITest.check_description(self, self.cli)
def test_cli_help(self):
CLITest.check_cli_help(self, self.cli)
def test_create_metric_batch(self):
filename = os.path.join(os.path.dirname(__file__), 'metric_import_data.json')
print(filename)
runner_create = CLIRunner(MetricCreateBatch())
create = runner_create.get_output(['-f', filename])
runner_export = CLIRunner(MetricExport())
export = runner_export.get_output(['-p', 'TEST_METRIC_IMPORT'])
metrics = json.loads(export)
MetricTest.metric_assert(self,
metrics['TEST_METRIC_IMPORT_A'],
'My Number of Files',
'My Files',
'My Number Of Files',
'number',
'SUM',
2000,
False)
# runner_delete = CLIRunner(MetricDelete())
# delete = runner_delete.get_output(['-n', metric_name])
| boundary/pulse-api-cli | tests/unit/boundary/metric_create_batch_test.py | Python | apache-2.0 | 2,094 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='pwd',
version='0.3.5',
description="pwd",
long_description=readme + '\n\n' + history,
author="Wes Turner",
author_email='wes@wrd.nu',
url='https://github.com/westurner/pwd',
packages=[
'pwd',
],
package_dir={'pwd':
'pwd'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='pwd',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
)
| westurner/pwd | setup.py | Python | bsd-3-clause | 1,441 |
import os
import subprocess
import time
import pytest
from kazoo.testing import KazooTestHarness
from kazoo.exceptions import (
AuthFailedError,
NoAuthError,
)
from kazoo.tests.util import CI_ZK_VERSION
class TestLegacySASLDigestAuthentication(KazooTestHarness):
def setUp(self):
try:
import puresasl # NOQA
except ImportError:
pytest.skip("PureSASL not available.")
os.environ["ZOOKEEPER_JAAS_AUTH"] = "digest"
self.setup_zookeeper()
if CI_ZK_VERSION:
version = CI_ZK_VERSION
else:
version = self.client.server_version()
if not version or version < (3, 4):
pytest.skip("Must use Zookeeper 3.4 or above")
def tearDown(self):
self.teardown_zookeeper()
def test_connect_sasl_auth(self):
from kazoo.security import make_acl
username = "jaasuser"
password = "jaas_password"
acl = make_acl("sasl", credential=username, all=True)
sasl_auth = "%s:%s" % (username, password)
client = self._get_client(auth_data=[("sasl", sasl_auth)])
client.start()
try:
client.create("/1", acl=(acl,))
# give ZK a chance to copy data to other node
time.sleep(0.1)
with pytest.raises(NoAuthError):
self.client.get("/1")
finally:
client.delete("/1")
client.stop()
client.close()
def test_invalid_sasl_auth(self):
client = self._get_client(auth_data=[("sasl", "baduser:badpassword")])
with pytest.raises(AuthFailedError):
client.start()
class TestSASLDigestAuthentication(KazooTestHarness):
def setUp(self):
try:
import puresasl # NOQA
except ImportError:
pytest.skip("PureSASL not available.")
os.environ["ZOOKEEPER_JAAS_AUTH"] = "digest"
self.setup_zookeeper()
if CI_ZK_VERSION:
version = CI_ZK_VERSION
else:
version = self.client.server_version()
if not version or version < (3, 4):
pytest.skip("Must use Zookeeper 3.4 or above")
def tearDown(self):
self.teardown_zookeeper()
def test_connect_sasl_auth(self):
from kazoo.security import make_acl
username = "jaasuser"
password = "jaas_password"
acl = make_acl("sasl", credential=username, all=True)
client = self._get_client(
sasl_options={
"mechanism": "DIGEST-MD5",
"username": username,
"password": password,
}
)
client.start()
try:
client.create("/1", acl=(acl,))
# give ZK a chance to copy data to other node
time.sleep(0.1)
with pytest.raises(NoAuthError):
self.client.get("/1")
finally:
client.delete("/1")
client.stop()
client.close()
def test_invalid_sasl_auth(self):
client = self._get_client(
sasl_options={
"mechanism": "DIGEST-MD5",
"username": "baduser",
"password": "badpassword",
}
)
with pytest.raises(AuthFailedError):
client.start()
class TestSASLGSSAPIAuthentication(KazooTestHarness):
def setUp(self):
try:
import puresasl # NOQA
except ImportError:
pytest.skip("PureSASL not available.")
try:
import kerberos # NOQA
except ImportError:
pytest.skip("Kerberos support not available.")
if not os.environ.get("KRB5_TEST_ENV"):
pytest.skip("Test Kerberos environ not setup.")
os.environ["ZOOKEEPER_JAAS_AUTH"] = "gssapi"
self.setup_zookeeper()
if CI_ZK_VERSION:
version = CI_ZK_VERSION
else:
version = self.client.server_version()
if not version or version < (3, 4):
pytest.skip("Must use Zookeeper 3.4 or above")
def tearDown(self):
self.teardown_zookeeper()
def test_connect_gssapi_auth(self):
from kazoo.security import make_acl
# Ensure we have a client ticket
subprocess.check_call(
[
"kinit",
"-kt",
os.path.expandvars("${KRB5_TEST_ENV}/client.keytab"),
"client",
]
)
acl = make_acl("sasl", credential="client@KAZOOTEST.ORG", all=True)
client = self._get_client(sasl_options={"mechanism": "GSSAPI"})
client.start()
try:
client.create("/1", acl=(acl,))
# give ZK a chance to copy data to other node
time.sleep(0.1)
with pytest.raises(NoAuthError):
self.client.get("/1")
finally:
client.delete("/1")
client.stop()
client.close()
def test_invalid_gssapi_auth(self):
# Request a post-datated ticket, so that it is currently invalid.
subprocess.check_call(
[
"kinit",
"-kt",
os.path.expandvars("${KRB5_TEST_ENV}/client.keytab"),
"-s",
"30min",
"client",
]
)
client = self._get_client(sasl_options={"mechanism": "GSSAPI"})
with pytest.raises(AuthFailedError):
client.start()
| python-zk/kazoo | kazoo/tests/test_sasl.py | Python | apache-2.0 | 5,528 |
"""Untappd plugin forms."""
from django import forms
WIDE_TEXT = forms.TextInput(attrs={"class": "input-block-level"})
class SiteSettingsForm(forms.Form):
client_id = forms.CharField(
required=False, widget=WIDE_TEXT, help_text="Untappd API Client ID."
)
client_secret = forms.CharField(
required=False, widget=WIDE_TEXT, help_text="Untappd API Client Secret"
)
class UserSettingsForm(forms.Form):
enable_checkins = forms.BooleanField(
initial=True, required=False, help_text="Check in when you join a session."
)
| Kegbot/kegbot-server | pykeg/contrib/untappd/forms.py | Python | gpl-2.0 | 568 |
"""
Created on Mar 13, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import functools
from models.User import User
from tornado.options import options
def authenticated(method):
""" Checks to see if a user has been authenticated """
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.session is not None:
if self.session.ip_address == self.request.remote_ip:
if (
self.request.remote_ip
not in self.application.settings["blacklisted_ips"]
):
user = self.get_current_user()
if user is None:
self.session.delete()
self.clear_all_cookies()
self.redirect(self.application.settings["login_url"])
elif not user.locked:
return method(self, *args, **kwargs)
else:
self.session.delete()
self.clear_all_cookies()
self.redirect("/403?locked=true")
else:
self.session.delete()
self.clear_all_cookies()
self.redirect(self.application.settings["login_url"])
else:
logging.warning(
"Session hijack attempt from %s?" % (self.request.remote_ip,)
)
self.session.delete()
self.clear_all_cookies()
self.redirect(self.application.settings["login_url"])
else:
self.redirect(self.application.settings["login_url"])
return wrapper
def game_started(method):
""" Checks to see if the game is running """
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.application.settings["game_started"]:
user = self.get_current_user()
if user is None or not user.is_admin():
self.redirect("/gamestatus")
return method(self, *args, **kwargs)
return wrapper
def restrict_ip_address(method):
""" Only allows access to ip addresses in a provided list """
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if (
len(self.application.settings["admin_ips"]) == 0
or self.request.remote_ip in self.application.settings["admin_ips"]
):
return method(self, *args, **kwargs)
else:
logging.warning(
"Attempted unauthorized access from %s to %s"
% (self.request.remote_ip, self.request.uri)
)
self.redirect(self.application.settings["forbidden_url"])
return wrapper
def blacklist_ips(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.remote_ip not in self.application.settings["blacklisted_ips"]:
return method(self, *args, **kwargs)
else:
self.render("public/login.html", errors=None)
return wrapper
def authorized(permission):
""" Checks user's permissions """
def func(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.session is not None:
user = User.by_handle(self.session["handle"])
if user is not None and user.has_permission(permission):
return method(self, *args, **kwargs)
logging.warning(
"Attempted unauthorized access from %s to %s"
% (self.request.remote_ip, self.request.uri)
)
self.redirect(self.application.settings["forbidden_url"])
return wrapper
return func
def debug(method):
""" Logs a method call/return """
@functools.wraps(method)
def wrapper(*args, **kwargs):
class_name = args[0].__class__.__name__
logging.debug("Call to -> %s.%s()" % (class_name, method.__name__))
value = method(*args, **kwargs)
logging.debug("Return from <- %s.%s()" % (class_name, method.__name__))
return value
return wrapper
def has_item(name):
""" Checks user's team owns an unlock/item """
def func(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
user = self.get_current_user()
if user is not None and user.has_item(name):
return method(self, *args, **kwargs)
else:
logging.warning(
"Attempted unauthorized access from %s to %s"
% (self.request.remote_ip, self.request.uri)
)
self.redirect(self.application.settings["forbidden_url"])
return wrapper
return func
def use_bots(method):
""" Checks to see if a user has been authenticated """
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if options.use_bots:
return method(self, *args, **kwargs)
else:
self.render("public/404.html")
return wrapper
def use_black_market(method):
""" Checks to see if a user has been authenticated """
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if options.use_black_market:
return method(self, *args, **kwargs)
else:
self.render("public/404.html")
return wrapper
| moloch--/RootTheBox | libs/SecurityDecorators.py | Python | apache-2.0 | 6,035 |
"""add_staging_tables
Revision ID: 50faca665f9c
Revises: 7833b2378161
Create Date: 2016-08-01 13:52:53.038526
"""
# revision identifiers, used by Alembic.
revision = '50faca665f9c'
down_revision = '7833b2378161'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.create_table('appropriation',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('appropriation_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('adjustments_to_unobligated_cpe', sa.Numeric(), nullable=True),
sa.Column('agency_identifier', sa.Text(), nullable=True),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availa', sa.Text(), nullable=True),
sa.Column('borrowing_authority_amount_cpe', sa.Numeric(), nullable=True),
sa.Column('budget_authority_appropria_cpe', sa.Numeric(), nullable=True),
sa.Column('budget_authority_available_cpe', sa.Numeric(), nullable=True),
sa.Column('budget_authority_unobligat_fyb', sa.Numeric(), nullable=True),
sa.Column('contract_authority_amount_cpe', sa.Numeric(), nullable=True),
sa.Column('deobligations_recoveries_r_cpe', sa.Numeric(), nullable=True),
sa.Column('ending_period_of_availabil', sa.Text(), nullable=True),
sa.Column('gross_outlay_amount_by_tas_cpe', sa.Numeric(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=True),
sa.Column('obligations_incurred_total_cpe', sa.Numeric(), nullable=True),
sa.Column('other_budgetary_resources_cpe', sa.Numeric(), nullable=True),
sa.Column('spending_authority_from_of_cpe', sa.Numeric(), nullable=True),
sa.Column('status_of_budgetary_resour_cpe', sa.Numeric(), nullable=True),
sa.Column('sub_account_code', sa.Text(), nullable=True),
sa.Column('unobligated_balance_cpe', sa.Numeric(), nullable=True),
sa.Column('tas', sa.Text(), nullable=False),
sa.Column('is_first_quarter', sa.Boolean(), server_default='False', nullable=False),
sa.PrimaryKeyConstraint('appropriation_id')
)
op.create_index(op.f('ix_appropriation_job_id'), 'appropriation', ['job_id'], unique=False)
op.create_index(op.f('ix_appropriation_submission_id'), 'appropriation', ['submission_id'], unique=False)
op.create_index(op.f('ix_appropriation_tas'), 'appropriation', ['tas'], unique=False)
op.create_table('award_financial',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('award_financial_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('agency_identifier', sa.Text(), nullable=True),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availa', sa.Text(), nullable=True),
sa.Column('by_direct_reimbursable_fun', sa.Text(), nullable=True),
sa.Column('deobligations_recov_by_awa_cpe', sa.Numeric(), nullable=True),
sa.Column('ending_period_of_availabil', sa.Text(), nullable=True),
sa.Column('fain', sa.Text(), nullable=True),
sa.Column('gross_outlay_amount_by_awa_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlay_amount_by_awa_fyb', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_delivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_delivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_undelivered_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_undelivered_fyb', sa.Numeric(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=True),
sa.Column('object_class', sa.Text(), nullable=True),
sa.Column('obligations_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_delivered_orde_fyb', sa.Numeric(), nullable=True),
sa.Column('obligations_incurred_byawa_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('parent_award_id', sa.Text(), nullable=True),
sa.Column('piid', sa.Text(), nullable=True),
sa.Column('program_activity_code', sa.Text(), nullable=True),
sa.Column('program_activity_name', sa.Text(), nullable=True),
sa.Column('sub_account_code', sa.Text(), nullable=True),
sa.Column('transaction_obligated_amou', sa.Numeric(), nullable=True),
sa.Column('uri', sa.Text(), nullable=True),
sa.Column('ussgl480100_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl480100_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl480200_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl480200_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl483100_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl483200_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl487100_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl487200_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl488100_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl488200_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490100_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490100_delivered_orde_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl490200_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490800_authority_outl_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490800_authority_outl_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl493100_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl497100_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl497200_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl498100_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl498200_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('tas', sa.Text(), nullable=False),
sa.Column('is_first_quarter', sa.Boolean(), server_default='False', nullable=False),
sa.PrimaryKeyConstraint('award_financial_id')
)
op.create_index(op.f('ix_award_financial_fain'), 'award_financial', ['fain'], unique=False)
op.create_index(op.f('ix_award_financial_job_id'), 'award_financial', ['job_id'], unique=False)
op.create_index(op.f('ix_award_financial_piid'), 'award_financial', ['piid'], unique=False)
op.create_index(op.f('ix_award_financial_submission_id'), 'award_financial', ['submission_id'], unique=False)
op.create_index('ix_award_financial_tas_oc_pa', 'award_financial', ['tas', 'object_class', 'program_activity_code'], unique=False)
op.create_index(op.f('ix_award_financial_uri'), 'award_financial', ['uri'], unique=False)
op.create_table('award_financial_assistance',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('award_financial_assistance_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('action_date', sa.Text(), nullable=True),
sa.Column('action_type', sa.Text(), nullable=True),
sa.Column('assistance_type', sa.Text(), nullable=True),
sa.Column('award_description', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_legal', sa.Text(), nullable=True),
sa.Column('awardee_or_recipient_uniqu', sa.Text(), nullable=True),
sa.Column('awarding_agency_code', sa.Text(), nullable=True),
sa.Column('awarding_agency_name', sa.Text(), nullable=True),
sa.Column('awarding_office_code', sa.Text(), nullable=True),
sa.Column('awarding_office_name', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_c', sa.Text(), nullable=True),
sa.Column('awarding_sub_tier_agency_n', sa.Text(), nullable=True),
sa.Column('award_modification_amendme', sa.Text(), nullable=True),
sa.Column('business_funds_indicator', sa.Text(), nullable=True),
sa.Column('business_types', sa.Text(), nullable=True),
sa.Column('cfda_number', sa.Text(), nullable=True),
sa.Column('cfda_title', sa.Text(), nullable=True),
sa.Column('correction_late_delete_ind', sa.Text(), nullable=True),
sa.Column('face_value_loan_guarantee', sa.Numeric(), nullable=True),
sa.Column('fain', sa.Text(), nullable=True),
sa.Column('federal_action_obligation', sa.Numeric(), nullable=True),
sa.Column('fiscal_year_and_quarter_co', sa.Text(), nullable=True),
sa.Column('funding_agency_code', sa.Text(), nullable=True),
sa.Column('funding_agency_name', sa.Text(), nullable=True),
sa.Column('funding_office_name', sa.Text(), nullable=True),
sa.Column('funding_office_code', sa.Text(), nullable=True),
sa.Column('funding_sub_tier_agency_co', sa.Text(), nullable=True),
sa.Column('funding_sub_tier_agency_na', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line1', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line2', sa.Text(), nullable=True),
sa.Column('legal_entity_address_line3', sa.Text(), nullable=True),
sa.Column('legal_entity_city_code', sa.Text(), nullable=True),
sa.Column('legal_entity_city_name', sa.Text(), nullable=True),
sa.Column('legal_entity_congressional', sa.Text(), nullable=True),
sa.Column('legal_entity_country_code', sa.Text(), nullable=True),
sa.Column('legal_entity_county_code', sa.Text(), nullable=True),
sa.Column('legal_entity_county_name', sa.Text(), nullable=True),
sa.Column('legal_entity_foreign_city', sa.Text(), nullable=True),
sa.Column('legal_entity_foreign_posta', sa.Text(), nullable=True),
sa.Column('legal_entity_foreign_provi', sa.Text(), nullable=True),
sa.Column('legal_entity_state_code', sa.Text(), nullable=True),
sa.Column('legal_entity_state_name', sa.Text(), nullable=True),
sa.Column('legal_entity_zip5', sa.Text(), nullable=True),
sa.Column('legal_entity_zip_last4', sa.Text(), nullable=True),
sa.Column('non_federal_funding_amount', sa.Numeric(), nullable=True),
sa.Column('original_loan_subsidy_cost', sa.Numeric(), nullable=True),
sa.Column('period_of_performance_curr', sa.Text(), nullable=True),
sa.Column('period_of_performance_star', sa.Text(), nullable=True),
sa.Column('place_of_performance_city', sa.Text(), nullable=True),
sa.Column('place_of_performance_code', sa.Text(), nullable=True),
sa.Column('place_of_performance_congr', sa.Text(), nullable=True),
sa.Column('place_of_perform_country_c', sa.Text(), nullable=True),
sa.Column('place_of_perform_county_na', sa.Text(), nullable=True),
sa.Column('place_of_performance_forei', sa.Text(), nullable=True),
sa.Column('place_of_perform_state_nam', sa.Text(), nullable=True),
sa.Column('place_of_performance_zip4a', sa.Text(), nullable=True),
sa.Column('record_type', sa.Integer(), nullable=True),
sa.Column('sai_number', sa.Text(), nullable=True),
sa.Column('total_funding_amount', sa.Numeric(), nullable=True),
sa.Column('uri', sa.Text(), nullable=True),
sa.Column('is_first_quarter', sa.Boolean(), server_default='False', nullable=False),
sa.PrimaryKeyConstraint('award_financial_assistance_id')
)
op.create_index(op.f('ix_award_financial_assistance_fain'), 'award_financial_assistance', ['fain'], unique=False)
op.create_index(op.f('ix_award_financial_assistance_job_id'), 'award_financial_assistance', ['job_id'], unique=False)
op.create_index(op.f('ix_award_financial_assistance_submission_id'), 'award_financial_assistance', ['submission_id'], unique=False)
op.create_index(op.f('ix_award_financial_assistance_uri'), 'award_financial_assistance', ['uri'], unique=False)
op.create_table('cgac',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('cgac_id', sa.Integer(), nullable=False),
sa.Column('cgac_code', sa.Text(), nullable=False),
sa.Column('agency_name', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('cgac_id')
)
op.create_index(op.f('ix_cgac_cgac_code'), 'cgac', ['cgac_code'], unique=True)
op.create_table('object_class',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('object_class_id', sa.Integer(), nullable=False),
sa.Column('object_class_code', sa.Text(), nullable=False),
sa.Column('object_class_name', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('object_class_id')
)
op.create_index(op.f('ix_object_class_object_class_code'), 'object_class', ['object_class_code'], unique=True)
op.create_table('object_class_program_activity',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('object_class_program_activity_id', sa.Integer(), nullable=False),
sa.Column('submission_id', sa.Integer(), nullable=False),
sa.Column('job_id', sa.Integer(), nullable=False),
sa.Column('row_number', sa.Integer(), nullable=False),
sa.Column('agency_identifier', sa.Text(), nullable=True),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availa', sa.Text(), nullable=True),
sa.Column('by_direct_reimbursable_fun', sa.Text(), nullable=True),
sa.Column('deobligations_recov_by_pro_cpe', sa.Numeric(), nullable=True),
sa.Column('ending_period_of_availabil', sa.Text(), nullable=True),
sa.Column('gross_outlay_amount_by_pro_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlay_amount_by_pro_fyb', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_delivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_delivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_undelivered_cpe', sa.Numeric(), nullable=True),
sa.Column('gross_outlays_undelivered_fyb', sa.Numeric(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=True),
sa.Column('object_class', sa.Text(), nullable=True),
sa.Column('obligations_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_delivered_orde_fyb', sa.Numeric(), nullable=True),
sa.Column('obligations_incurred_by_pr_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('obligations_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('program_activity_code', sa.Text(), nullable=True),
sa.Column('program_activity_name', sa.Text(), nullable=True),
sa.Column('sub_account_code', sa.Text(), nullable=True),
sa.Column('ussgl480100_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl480100_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl480200_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl480200_undelivered_or_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl483100_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl483200_undelivered_or_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl487100_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl487200_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl488100_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl488200_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490100_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490100_delivered_orde_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl490200_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490800_authority_outl_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl490800_authority_outl_fyb', sa.Numeric(), nullable=True),
sa.Column('ussgl493100_delivered_orde_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl497100_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl497200_downward_adjus_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl498100_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('ussgl498200_upward_adjustm_cpe', sa.Numeric(), nullable=True),
sa.Column('tas', sa.Text(), nullable=False),
sa.Column('is_first_quarter', sa.Boolean(), server_default='False', nullable=False),
sa.PrimaryKeyConstraint('object_class_program_activity_id')
)
op.create_index(op.f('ix_object_class_program_activity_job_id'), 'object_class_program_activity', ['job_id'], unique=False)
op.create_index(op.f('ix_object_class_program_activity_submission_id'), 'object_class_program_activity', ['submission_id'], unique=False)
op.create_index('ix_oc_pa_tas_oc_pa', 'object_class_program_activity', ['tas', 'object_class', 'program_activity_code'], unique=False)
op.create_table('program_activity',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('program_activity_id', sa.Integer(), nullable=False),
sa.Column('budget_year', sa.Text(), nullable=False),
sa.Column('agency_id', sa.Text(), nullable=False),
sa.Column('allocation_transfer_id', sa.Text(), nullable=True),
sa.Column('account_number', sa.Text(), nullable=False),
sa.Column('program_activity_code', sa.Text(), nullable=False),
sa.Column('program_activity_name', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('program_activity_id')
)
op.create_index('ix_pa_tas_pa', 'program_activity', ['budget_year', 'agency_id', 'allocation_transfer_id', 'account_number', 'program_activity_code', 'program_activity_name'], unique=True)
op.create_table('sf_133',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('sf133_id', sa.Integer(), nullable=False),
sa.Column('agency_identifier', sa.Text(), nullable=False),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availa', sa.Text(), nullable=True),
sa.Column('ending_period_of_availabil', sa.Text(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=False),
sa.Column('sub_account_code', sa.Text(), nullable=False),
sa.Column('tas', sa.Text(), nullable=False),
sa.Column('fiscal_year', sa.Text(), nullable=True),
sa.Column('period', sa.Text(), nullable=True),
sa.Column('line', sa.Integer(), nullable=False),
sa.Column('amount', sa.Numeric(), server_default='0', nullable=False),
sa.PrimaryKeyConstraint('sf133_id')
)
op.create_index('ix_sf_133_tas', 'sf_133', ['tas', 'line'], unique=True)
op.create_table('tas_lookup',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('tas_id', sa.Integer(), nullable=False),
sa.Column('allocation_transfer_agency', sa.Text(), nullable=True),
sa.Column('agency_identifier', sa.Text(), nullable=True),
sa.Column('beginning_period_of_availability', sa.Text(), nullable=True),
sa.Column('ending_period_of_availability', sa.Text(), nullable=True),
sa.Column('availability_type_code', sa.Text(), nullable=True),
sa.Column('main_account_code', sa.Text(), nullable=True),
sa.Column('sub_account_code', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('tas_id')
)
op.create_index('ix_tas', 'tas_lookup', ['allocation_transfer_agency', 'agency_identifier', 'beginning_period_of_availability', 'ending_period_of_availability', 'availability_type_code', 'main_account_code', 'sub_account_code'], unique=True)
op.create_index(op.f('ix_tas_lookup_agency_identifier'), 'tas_lookup', ['agency_identifier'], unique=False)
op.create_index(op.f('ix_tas_lookup_allocation_transfer_agency'), 'tas_lookup', ['allocation_transfer_agency'], unique=False)
op.create_index(op.f('ix_tas_lookup_availability_type_code'), 'tas_lookup', ['availability_type_code'], unique=False)
op.create_index(op.f('ix_tas_lookup_beginning_period_of_availability'), 'tas_lookup', ['beginning_period_of_availability'], unique=False)
op.create_index(op.f('ix_tas_lookup_ending_period_of_availability'), 'tas_lookup', ['ending_period_of_availability'], unique=False)
op.create_index(op.f('ix_tas_lookup_main_account_code'), 'tas_lookup', ['main_account_code'], unique=False)
op.create_index(op.f('ix_tas_lookup_sub_account_code'), 'tas_lookup', ['sub_account_code'], unique=False)
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_tas_lookup_sub_account_code'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_main_account_code'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_ending_period_of_availability'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_beginning_period_of_availability'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_availability_type_code'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_allocation_transfer_agency'), table_name='tas_lookup')
op.drop_index(op.f('ix_tas_lookup_agency_identifier'), table_name='tas_lookup')
op.drop_index('ix_tas', table_name='tas_lookup')
op.drop_table('tas_lookup')
op.drop_index('ix_sf_133_tas', table_name='sf_133')
op.drop_table('sf_133')
op.drop_index('ix_pa_tas_pa', table_name='program_activity')
op.drop_table('program_activity')
op.drop_index('ix_oc_pa_tas_oc_pa', table_name='object_class_program_activity')
op.drop_index(op.f('ix_object_class_program_activity_submission_id'), table_name='object_class_program_activity')
op.drop_index(op.f('ix_object_class_program_activity_job_id'), table_name='object_class_program_activity')
op.drop_table('object_class_program_activity')
op.drop_index(op.f('ix_object_class_object_class_code'), table_name='object_class')
op.drop_table('object_class')
op.drop_index(op.f('ix_cgac_cgac_code'), table_name='cgac')
op.drop_table('cgac')
op.drop_index(op.f('ix_award_financial_assistance_uri'), table_name='award_financial_assistance')
op.drop_index(op.f('ix_award_financial_assistance_submission_id'), table_name='award_financial_assistance')
op.drop_index(op.f('ix_award_financial_assistance_job_id'), table_name='award_financial_assistance')
op.drop_index(op.f('ix_award_financial_assistance_fain'), table_name='award_financial_assistance')
op.drop_table('award_financial_assistance')
op.drop_index(op.f('ix_award_financial_uri'), table_name='award_financial')
op.drop_index('ix_award_financial_tas_oc_pa', table_name='award_financial')
op.drop_index(op.f('ix_award_financial_submission_id'), table_name='award_financial')
op.drop_index(op.f('ix_award_financial_piid'), table_name='award_financial')
op.drop_index(op.f('ix_award_financial_job_id'), table_name='award_financial')
op.drop_index(op.f('ix_award_financial_fain'), table_name='award_financial')
op.drop_table('award_financial')
op.drop_index(op.f('ix_appropriation_tas'), table_name='appropriation')
op.drop_index(op.f('ix_appropriation_submission_id'), table_name='appropriation')
op.drop_index(op.f('ix_appropriation_job_id'), table_name='appropriation')
op.drop_table('appropriation')
### end Alembic commands ###
| chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend | dataactcore/migrations/versions/50faca665f9c_add_staging_tables.py | Python | cc0-1.0 | 24,606 |
from chiplotle.hpgl.abstract.hpglprimitive import _HPGLPrimitive
from chiplotle.geometry.core.coordinatearray import CoordinateArray
class _TwoPoint(_HPGLPrimitive):
'''Abstract class for commands with 2 coordinate pairs: x1, y1, x2, y2.'''
def __init__(self, coords=None):
self.coords = coords
if self.coords and len(self.coords) != 2:
raise ValueError('Only two coordinate pairs allowed.')
@apply
def coords( ):
def fget(self):
return self._coords
def fset(self, arg):
self._coords = CoordinateArray(arg)
return property(**locals())
@property
def format(self):
if self.coords:
coords = self.coords[0].xy + self.coords[1].xy
coords = map(str, coords)
coords = ','.join(coords)
return '%s%s%s' % (self._name, coords, _HPGLPrimitive._terminator)
else:
return '%s%s' % (self._name, _HPGLPrimitive._terminator)
| drepetto/chiplotle | chiplotle/hpgl/abstract/twopoint.py | Python | gpl-3.0 | 994 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-06 08:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hkm', '0003_productorder_product_name'),
]
operations = [
migrations.RemoveField(
model_name='productorder',
name='crop_image_height',
),
migrations.RemoveField(
model_name='productorder',
name='crop_image_width',
),
migrations.AddField(
model_name='productorder',
name='original_height',
field=models.IntegerField(blank=True, null=True, verbose_name='Original image height'),
),
migrations.AddField(
model_name='productorder',
name='original_width',
field=models.IntegerField(blank=True, null=True, verbose_name='Original image width'),
),
migrations.AlterField(
model_name='productorder',
name='crop_x',
field=models.IntegerField(blank=True, null=True, verbose_name='Crop x coordinate from left'),
),
migrations.AlterField(
model_name='productorder',
name='crop_y',
field=models.IntegerField(blank=True, null=True, verbose_name='Crop y coordinate from top'),
),
]
| andersinno/kuvaselaamo | hkm/migrations/0004_auto_20170206_1023.py | Python | mit | 1,395 |
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
queues = [
"style-queue",
"chromium-ews",
"qt-ews",
"mac-ews",
"gtk-ews",
"commit-queue",
]
| cattleprod/samsung-kernel-gt-i9100 | external/webkit/WebKitTools/QueueStatusServer/model/queues.py | Python | gpl-2.0 | 1,648 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.data.experimental` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.
See [Importing Data](https://tensorflow.org/guide/datasets) for an overview.
@@AutoShardPolicy
@@Counter
@@CheckpointInputPipelineHook
@@CsvDataset
@@DatasetStructure
@@DistributeOptions
@@ExternalStatePolicy
@@MapVectorizationOptions
@@OptimizationOptions
@@Optional
@@OptionalStructure
@@RaggedTensorStructure
@@RandomDataset
@@Reducer
@@SparseTensorStructure
@@SqlDataset
@@StatsAggregator
@@StatsOptions
@@Structure
@@TFRecordWriter
@@TensorArrayStructure
@@TensorStructure
@@ThreadingOptions
@@assert_cardinality
@@bucket_by_sequence_length
@@bytes_produced_stats
@@cardinality
@@choose_from_datasets
@@copy_to_device
@@dense_to_ragged_batch
@@dense_to_sparse_batch
@@distribute
@@enable_debug_mode
@@enumerate_dataset
@@from_variant
@@get_next_as_optional
@@get_single_element
@@get_structure
@@group_by_reducer
@@group_by_window
@@ignore_errors
@@latency_stats
@@load
@@make_batched_features_dataset
@@make_csv_dataset
@@make_saveable_from_iterator
@@map_and_batch
@@map_and_batch_with_legacy_function
@@parallel_interleave
@@parse_example_dataset
@@prefetch_to_device
@@rejection_resample
@@sample_from_datasets
@@save
@@scan
@@shuffle_and_repeat
@@snapshot
@@take_while
@@to_variant
@@unbatch
@@unique
@@AUTOTUNE
@@INFINITE_CARDINALITY
@@SHARD_HINT
@@UNKNOWN_CARDINALITY
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.data.experimental import service
from tensorflow.python.data.experimental.ops.batching import dense_to_ragged_batch
from tensorflow.python.data.experimental.ops.batching import dense_to_sparse_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch
from tensorflow.python.data.experimental.ops.batching import map_and_batch_with_legacy_function
from tensorflow.python.data.experimental.ops.batching import unbatch
from tensorflow.python.data.experimental.ops.cardinality import assert_cardinality
from tensorflow.python.data.experimental.ops.cardinality import cardinality
from tensorflow.python.data.experimental.ops.cardinality import INFINITE as INFINITE_CARDINALITY
from tensorflow.python.data.experimental.ops.cardinality import UNKNOWN as UNKNOWN_CARDINALITY
from tensorflow.python.data.experimental.ops.counter import Counter
from tensorflow.python.data.experimental.ops.distribute import SHARD_HINT
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
from tensorflow.python.data.experimental.ops.distribute_options import DistributeOptions
from tensorflow.python.data.experimental.ops.distribute_options import ExternalStatePolicy
from tensorflow.python.data.experimental.ops.enumerate_ops import enumerate_dataset
from tensorflow.python.data.experimental.ops.error_ops import ignore_errors
from tensorflow.python.data.experimental.ops.get_single_element import get_single_element
from tensorflow.python.data.experimental.ops.grouping import bucket_by_sequence_length
from tensorflow.python.data.experimental.ops.grouping import group_by_reducer
from tensorflow.python.data.experimental.ops.grouping import group_by_window
from tensorflow.python.data.experimental.ops.grouping import Reducer
from tensorflow.python.data.experimental.ops.interleave_ops import choose_from_datasets
from tensorflow.python.data.experimental.ops.interleave_ops import parallel_interleave
from tensorflow.python.data.experimental.ops.interleave_ops import sample_from_datasets
from tensorflow.python.data.experimental.ops.io import load
from tensorflow.python.data.experimental.ops.io import save
from tensorflow.python.data.experimental.ops.iterator_ops import CheckpointInputPipelineHook
from tensorflow.python.data.experimental.ops.iterator_ops import make_saveable_from_iterator
from tensorflow.python.data.experimental.ops.optimization_options import MapVectorizationOptions
from tensorflow.python.data.experimental.ops.optimization_options import OptimizationOptions
from tensorflow.python.data.experimental.ops.parsing_ops import parse_example_dataset
from tensorflow.python.data.experimental.ops.prefetching_ops import copy_to_device
from tensorflow.python.data.experimental.ops.prefetching_ops import prefetch_to_device
from tensorflow.python.data.experimental.ops.random_ops import RandomDataset
from tensorflow.python.data.experimental.ops.readers import CsvDataset
from tensorflow.python.data.experimental.ops.readers import make_batched_features_dataset
from tensorflow.python.data.experimental.ops.readers import make_csv_dataset
from tensorflow.python.data.experimental.ops.readers import SqlDataset
from tensorflow.python.data.experimental.ops.resampling import rejection_resample
from tensorflow.python.data.experimental.ops.scan_ops import scan
from tensorflow.python.data.experimental.ops.shuffle_ops import shuffle_and_repeat
from tensorflow.python.data.experimental.ops.snapshot import snapshot
from tensorflow.python.data.experimental.ops.stats_aggregator import StatsAggregator
from tensorflow.python.data.experimental.ops.stats_ops import bytes_produced_stats
from tensorflow.python.data.experimental.ops.stats_ops import latency_stats
from tensorflow.python.data.experimental.ops.stats_options import StatsOptions
from tensorflow.python.data.experimental.ops.take_while_ops import take_while
from tensorflow.python.data.experimental.ops.threading_options import ThreadingOptions
from tensorflow.python.data.experimental.ops.unique import unique
from tensorflow.python.data.experimental.ops.writers import TFRecordWriter
from tensorflow.python.data.ops.dataset_ops import AUTOTUNE
from tensorflow.python.data.ops.dataset_ops import DatasetSpec as DatasetStructure
from tensorflow.python.data.ops.dataset_ops import enable_debug_mode
from tensorflow.python.data.ops.dataset_ops import from_variant
from tensorflow.python.data.ops.dataset_ops import get_structure
from tensorflow.python.data.ops.dataset_ops import to_variant
from tensorflow.python.data.ops.iterator_ops import get_next_as_optional
from tensorflow.python.data.ops.optional_ops import Optional
from tensorflow.python.data.ops.optional_ops import OptionalSpec as OptionalStructure
from tensorflow.python.data.util.structure import _RaggedTensorStructure as RaggedTensorStructure
from tensorflow.python.data.util.structure import _SparseTensorStructure as SparseTensorStructure
from tensorflow.python.data.util.structure import _TensorArrayStructure as TensorArrayStructure
from tensorflow.python.data.util.structure import _TensorStructure as TensorStructure
from tensorflow.python.framework.type_spec import TypeSpec as Structure
# pylint: enable=unused-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
"service",
]
remove_undocumented(__name__, _allowed_symbols)
| petewarden/tensorflow | tensorflow/python/data/experimental/__init__.py | Python | apache-2.0 | 7,881 |
# Python 3 program for soundscape generation. (C) P.B.L. Meijer 2015
# Direct port of the hificode.c C program
# Last update: October 6, 2015; released under the Creative
# Commons Attribution 4.0 International License (CC BY 4.0),
# see http://www.seeingwithsound.com/im2sound.htm for details
#
# Beware that this program runs excruciatingly slowly under Python,
# while the PyPy python JIT compiler does not (yet) support OpenCV
import math
import os
import struct
import sys
import wave
import cv2 as cv
import numpy as np
file_name = 'hificode.wav' # User-defined parameters
min_frequency = 500 # Lowest frequency (Hz) in soundscape
max_frequency = 5000 # Highest frequency (Hz)
sample_frequency = 44100 # Sample frequency (Hz)
image_to_sound_conversion_time = 1.05 # Image to sound conversion time (s)
use_exponential = False # Linear|Exponential=0|1 distribution
hifi = 1 # 8-bit|16-bit=0|1 sound quality
stereo = 1 # Mono|Stereo=0|1 sound selection
delay = 1 # Nodelay|Delay=0|1 model (stereo=1)
relative_fade = 1 # Relative fade No|Yes=0|1 (stereo=1)
diffraction = 1 # Diffraction No|Yes=0|1 (stereo=1)
use_b_spline = 1 # Rectangular|B-spline=0|1 time window
gray_levels = 0 # 16|2-level=0|1 gray format in P[][]
use_camera = 1 # Use OpenCV camera input No|Yes=0|1
use_screen = 1 # Screen view for debugging No|Yes=0|1
class Soundscape(object):
IR = 0
IA = 9301
IC = 49297
IM = 233280
TwoPi = 6.283185307179586476925287
WHITE = 1.00
BLACK = 0.00
def __init__(self, file_name='hificode.wav', min_frequency=500, max_frequency=5000, sample_frequency=44100,
image_to_sound_conversion_time=1.05, is_exponential=False, hifi=True, stereo=True, delay=True,
relative_fade=True, diffraction=True, use_b_spline=True, gray_levels=16, use_camera=True,
use_screen=True):
"""
:param file_name:
:type file_name: str
:param min_frequency:
:type min_frequency: int
:param max_frequency:
:type max_frequency: int
:param sample_frequency:
:type sample_frequency: int
:param image_to_sound_conversion_time:
:type image_to_sound_conversion_time: float
:param is_exponential:
:type is_exponential: bool
:param hifi:
:type hifi: bool
:param stereo:
:type stereo: bool
:param delay:
:type delay: bool
:param relative_fade:
:type relative_fade: bool
:param diffraction:
:type diffraction: bool
:param use_b_spline:
:type use_b_spline: bool
:param gray_levels:
:type gray_levels: int
:param use_camera:
:type use_camera: bool
:param use_screen:
:type use_screen: bool
:return:
:rtype:
"""
self.file_name = file_name
self.min_frequency = min_frequency
self.max_frequency = max_frequency
self.sample_frequency = sample_frequency
self.image_to_sound_conversion_time = image_to_sound_conversion_time
self.is_exponential = is_exponential
self.hifi = hifi
self.stereo = stereo
self.delay = delay
self.relative_fade = relative_fade
self.diffraction = diffraction
self.use_b_spline = use_b_spline
self.gray_levels = gray_levels
self.use_camera = use_camera
self.use_screen = use_screen
self.hist = (1 + self.hifi) * (1 + self.stereo)
if use_camera:
self.num_columns = 176
self.num_rows = 64
else:
self.num_columns = 64
self.num_rows = 64
self.k = 0
self.b = 0
self.num_frames = 2 * int(0.5 * self.sample_frequency * self.image_to_sound_conversion_time)
self.frames_per_column = int(self.num_frames / self.num_columns)
self.sso = 0 if self.hifi else 128
self.ssm = 32768 if self.hifi else 128
self.scale = 0.5 / math.sqrt(self.num_rows)
self.dt = 1.0 / self.sample_frequency
self.v = 340.0 # v = speed of sound (m/s)
self.hs = 0.20 # hs = characteristic acoustical size of head (m)
self.w = np.arange(self.num_rows, dtype=np.float)
self.phi0 = np.zeros(self.num_rows, dtype=np.float)
self.A = np.zeros((self.num_columns, self.num_rows), dtype=np.uint8)
# Coefficients used in rnd()
IR = 0
IA = 9301
IC = 49297
IM = 233280
TwoPi = 6.283185307179586476925287
HIST = (1 + hifi) * (1 + stereo)
WHITE = 1.00
BLACK = 0.00
if use_camera:
num_columns = 176
num_rows = 64
else:
num_columns = 64
num_rows = 64
# if gray_levels:
# else:
try:
# noinspection PyUnresolvedReferences
import winsound
except ImportError:
def playsound(frequency, duration):
# sudo dnf -y install beep
os.system('beep -f %s -l %s' % (frequency, duration))
else:
def playsound(frequency, duration):
winsound.Beep(frequency, duration)
# def playSound(file):
# if sys.platform == "win32":
# winsound.PlaySound(file, winsound.SND_FILENAME) # Windows only
# # os.system('start %s' %file) # Windows only
# elif sys.platform.startswith('linux'):
# print("No audio player called for Linux")
# else:
# print("No audio player called for your platform")
def wi(file_object, i):
b0 = int(i % 256)
b1 = int((i - b0) / 256)
file_object.write(struct.pack('B', b0 & 0xff))
file_object.write(struct.pack('B', b1 & 0xff))
def wl(fp, l):
i0 = l % 65536
i1 = (l - i0) / 65536
wi(fp, i0)
wi(fp, i1)
def rnd():
global IR, IA, IC, IM
IR = (IR * IA + IC) % IM
return IR / (1.0 * IM)
def main():
current_frame = 0
b = 0
num_frames = 2 * int(0.5 * sample_frequency * image_to_sound_conversion_time)
frames_per_column = int(num_frames / num_columns)
sso = 0 if hifi else 128
ssm = 32768 if hifi else 128
scale = 0.5 / math.sqrt(num_rows)
dt = 1.0 / sample_frequency
v = 340.0 # v = speed of sound (m/s)
hs = 0.20 # hs = characteristic acoustical size of head (m)
w = np.arange(num_rows, dtype=np.float)
phi0 = np.zeros(num_rows)
A = np.zeros((num_columns, num_rows), dtype=np.uint8)
# w = [0 for i in range(num_rows)]
# phi0 = [0 for i in range(num_rows)]
# A = [[0 for j in range(num_columns)] for i in range(num_rows)] # num_rows x num_columns pixel matrix
# Set lin|exp (0|1) frequency distribution and random initial phase
freq_ratio = max_frequency / float(min_frequency)
if use_exponential:
w = TwoPi * min_frequency * np.power(freq_ratio, w / (num_rows - 1))
for i in range(0, num_rows):
w[i] = TwoPi * min_frequency * pow(freq_ratio, 1.0 * i / (num_rows - 1))
else:
for i in range(0, num_rows):
w[i] = TwoPi * min_frequency + TwoPi * (max_frequency - min_frequency) * i / (
num_rows - 1)
for i in range(0, num_rows): phi0[i] = TwoPi * rnd()
cam_id = 0 # First available OpenCV camera
# Optionally override ID from command line parameter: python hificode_OpenCV.py cam_id
if len(sys.argv) > 1:
cam_id = int(sys.argv[1])
try:
# noinspection PyArgumentList
cap = cv.VideoCapture(cam_id)
if not cap.isOpened():
raise ValueError('camera ID')
except ValueError:
print("Could not open camera", cam_id)
raise
# Setting standard capture size, may fail; resize later
cap.read() # Dummy read needed with some devices
# noinspection PyUnresolvedReferences
cap.set(cv.CAP_PROP_FRAME_WIDTH, 176)
# noinspection PyUnresolvedReferences
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 144)
if use_screen: # Screen views only for debugging
cv.namedWindow('Large', cv.WINDOW_AUTOSIZE)
cv.namedWindow('Small', cv.WINDOW_AUTOSIZE)
key = 0
while key != 27: # Escape key
ret, frame = cap.read()
if not ret:
# Sometimes initial frames fail
print("Capture failed\n")
key = cv.waitKey(100)
continue
tmp = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if frame.shape[1] != num_rows or frame.shape[0] != num_columns:
# cv.resize(tmp, gray, Size(num_columns,num_rows))
gray = cv.resize(tmp, (num_columns, num_rows), interpolation=cv.INTER_AREA)
else:
gray = tmp
if use_screen: # Screen views only for debugging
cv.imwrite("hificodeLarge.jpg", frame)
cv.imshow('Large', frame)
cv.moveWindow('Large', 20, 20)
cv.imwrite("hificodeSmall.jpg", gray)
cv.imshow('Small', gray)
cv.moveWindow('Small', 220, 20)
key = cv.waitKey(10)
if use_camera: # Set live camera image
mVal = gray / 16
A[mVal == 0] = 0
A[mVal > 0] = np.power(10.0, (mVal[mVal > 0] - 15) / 10.0)
# Write 8/16-bit mono/stereo .wav file
with open(file_name, 'wb') as nf:
fp = wave.open(nf)
fp.setnchannels(2 if stereo else 1)
fp.setframerate(sample_frequency)
fp.setsampwidth(2 if hifi else 1)
tau1 = 0.5 / w[num_rows - 1]
tau2 = 0.25 * (tau1 * tau1)
y = yl = yr = z = zl = zr = 0.0
while current_frame < num_frames and not stereo:
if use_b_spline:
q = 1.0 * (current_frame % frames_per_column) / (frames_per_column - 1)
q2 = 0.5 * q * q
j = int(current_frame / frames_per_column)
j = num_columns - 1 if j > num_columns - 1 else j
s = 0.0
t = current_frame * dt
if current_frame < num_frames / (5 * num_columns):
s = (2.0 * rnd() - 1.0) / scale # "click"
else:
for i in range(0, num_rows):
if use_b_spline: # Quadratic B-spline for smooth C1 time window
if j == 0:
a = (1.0 - q2) * A[i][j] + q2 * A[i][j + 1]
elif j == num_columns - 1:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q2) * A[i][j]
else:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q * q) * A[i][j] + q2 * A[i][j + 1]
else:
a = A[i][j] # Rectangular time window
s += a * math.sin(w[i] * t + phi0[i])
yp = y
y = tau1 / dt + tau2 / (dt * dt)
y = (s + y * yp + tau2 / dt * z) / (1.0 + y)
z = (y - yp) / dt
l = sso + 0.5 + scale * ssm * y # y = 2nd order filtered s
if l >= sso - 1 + ssm: l = sso - 1 + ssm
if l < sso - ssm: l = sso - ssm
ss = int(l) & 0xFFFFFFFF # Make unsigned int
if hifi:
wi(fp, ss)
else:
fp.write(struct.pack('B', ss & 0xff))
current_frame += 1
while current_frame < num_frames and stereo:
if use_b_spline:
q = 1.0 * (current_frame % frames_per_column) / (frames_per_column - 1)
q2 = 0.5 * q * q
j = int(current_frame / frames_per_column)
j = num_columns - 1 if j > num_columns - 1 else j
r = 1.0 * current_frame / (num_frames - 1) # Binaural attenuation/delay parameter
theta = (r - 0.5) * TwoPi / 3
x = 0.5 * hs * (theta + math.sin(theta))
tl = tr = current_frame * dt
if delay:
tr += x / v # Time delay model
x = abs(x)
sl = sr = 0.0
hrtfl = hrtfr = 1.0
for i in range(0, num_rows):
if diffraction:
# First order frequency-dependent azimuth diffraction model
hrtf = 1.0 if (TwoPi * v / w[i] > x) else TwoPi * v / (x * w[i])
if theta < 0.0:
hrtfl = 1.0
hrtfr = hrtf
else:
hrtfl = hrtf
hrtfr = 1.0
if relative_fade:
# Simple frequency-independent relative fade model
hrtfl *= (1.0 - 0.7 * r)
hrtfr *= (0.3 + 0.7 * r)
if use_b_spline:
if j == 0:
a = (1.0 - q2) * A[i][j] + q2 * A[i][j + 1]
elif j == num_columns - 1:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q2) * A[i][j]
else:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q * q) * A[i][j] + q2 * A[i][j + 1]
else:
a = A[i][j]
sl += hrtfl * a * math.sin(w[i] * tl + phi0[i])
sr += hrtfr * a * math.sin(w[i] * tr + phi0[i])
sl = (2.0 * rnd() - 1.0) / scale if (current_frame < num_frames / (5 * num_columns)) else sl # Left "click"
if tl < 0.0: sl = 0.0;
if tr < 0.0: sr = 0.0;
ypl = yl
yl = tau1 / dt + tau2 / (dt * dt)
yl = (sl + yl * ypl + tau2 / dt * zl) / (1.0 + yl)
zl = (yl - ypl) / dt
ypr = yr
yr = tau1 / dt + tau2 / (dt * dt)
yr = (sr + yr * ypr + tau2 / dt * zr) / (1.0 + yr)
zr = (yr - ypr) / dt
l = sso + 0.5 + scale * ssm * yl
if l >= sso - 1 + ssm: l = sso - 1 + ssm
if l < sso - ssm: l = sso - ssm
ss = int(l) & 0xFFFFFFFF
# Left channel
if hifi:
wi(fp, ss)
else:
fp.write(struct.pack('B', ss & 0xff))
l = sso + 0.5 + scale * ssm * yr
if l >= sso - 1 + ssm: l = sso - 1 + ssm
if l < sso - ssm: l = sso - ssm
ss = int(l) & 0xFFFFFFFF
# Right channel
if hifi:
wi(fp, ss)
else:
fp.write(struct.pack('B', ss & 0xff))
current_frame += 1
fp.close()
playSound("hificode.wav") # Play the soundscape
current_frame = 0 # Reset sample count
cap.release()
cv.destroyAllWindows()
return 0
main()
| joshainglis/python-soundscape | soundscape.py | Python | mit | 14,885 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTabulate(PythonPackage):
"""Pretty-print tabular data"""
homepage = "https://bitbucket.org/astanin/python-tabulate"
pypi = "tabulate/tabulate-0.8.6.tar.gz"
version('0.8.9', sha256='eb1d13f25760052e8931f2ef80aaf6045a6cceb47514db8beab24cded16f13a7')
version('0.8.7', sha256='db2723a20d04bcda8522165c73eea7c300eda74e0ce852d9022e0159d7895007')
version('0.8.6', sha256='5470cc6687a091c7042cee89b2946d9235fe9f6d49c193a4ae2ac7bf386737c8')
version('0.8.5', sha256='d0097023658d4dea848d6ae73af84532d1e86617ac0925d1adf1dd903985dac3')
version('0.8.3', sha256='8af07a39377cee1103a5c8b3330a421c2d99b9141e9cc5ddd2e3263fea416943')
version('0.7.7', sha256='83a0b8e17c09f012090a50e1e97ae897300a72b35e0c86c0b53d3bd2ae86d8c6')
depends_on('py-setuptools', type='build')
| LLNL/spack | var/spack/repos/builtin/packages/py-tabulate/package.py | Python | lgpl-2.1 | 1,021 |
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup for pip package."""
import setuptools
_VERSION = '0.0.1'
def _parse_requirements(requirements_txt_path):
parse_line = lambda l: l.split('#')[0].strip()
with open(requirements_txt_path) as f:
return [parse_line(l) for l in f]
setuptools.setup(
name='dmvr',
version=_VERSION,
url='https://github.com/deepmind/dmvr',
license='Apache 2.0',
author='DeepMind',
description=(
'DMVR is a library for reading and processing multimodal datasets.'),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author_email='dmvr-dev-os@google.com',
# Contained modules and scripts.
packages=setuptools.find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements('requirements.txt'),
tests_require=_parse_requirements('requirements-test.txt'),
requires_python='>=3.6',
include_package_data=True,
zip_safe=False,
# PyPI package information.
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
)
| deepmind/dmvr | setup.py | Python | apache-2.0 | 2,126 |
import re
from pyrem_torq.utility import split_to_strings_iter
from _prepscript_default_defs import build_decoder
import _prepscript_util as _pu
import pyrem_torq.expression as _pte
from nodeformatter import *
_optiion_description = """
--annotation: keep annotations.
--array_initialization: keeps array initialization code.
--field: keeps field definitions.
--import: keep import statements.
--interface: keeps interface definitions.
--javadoc: keeps javadocs.
--metadata: keep annotation definitions.
--package: keeps package declarations.
"""
class _Options(object):
descriptions = tuple(s.strip() for s in (_optiion_description[1:-1].splitlines()))
strs = tuple(od.split(":")[0] for od in descriptions)
def __init__(self, optionStrs):
nos = []
for o in optionStrs:
if o in _Options.strs:
nos.append(o)
else:
raise SystemError("java: unknown option %s" % o)
self.normalizedStrs = sorted(nos)
noset = set(nos)
self.annotMod = "--annotation" in noset
self.annotDef = "--metadata" in noset
self.arryInit = "--array_initialization" in noset
self.field = "--field" in noset
self.oImport = "--import" in noset
self.interf = "--interface" in noset
self.javaDoc = "--javadoc" in noset
self.package = "--package" in noset
_reservedWordDescriptions = [tuple(v.strip().split("<-")) for v in re.compile(";").split("""
r_abstract<-abstract;r_assert<-assert;
r_bool<-boolean;r_break<-break;r_byte<-byte;
r_case<-case;r_catch<-catch;m_charAt<-charAt;r_char<-char;r_class<-class;m_clone<-clone;m_compareTo<-compareTo;r_continue<-continue;r_const<-const;
r_default<-default;m_dispose<-dispose;r_double<-double;r_do<-do;
r_else<-else;r_enum<-enum;m_equals<-equals;r_extends<-extends;
r_false<-false;r_finally<-finally;r_final<-final;r_float<-float;r_for<-for;
m_getClass<-getClass;m_get<-get;r_goto<-goto;
m_hashCode<-hashCode;m_hasNext<-hasNext;
r_if<-if;r_implements<-implements;r_import<-import;r_instanceof<-instanceof;r_interface<-interface;r_int<-int;m_iterator<-iterator;
m_length<-length;r_long<-long;
r_native<-native;r_new<-new;m_next<-next;r_null<-null;
r_package<-package;r_private<-private;r_protected<-protected;r_public<-public;
r_return<-return;m_run<-run;
r_short<-short;m_size<-size;r_static<-static;r_strictfp<-strictfp;r_switch<-switch;r_synchronized<-synchronized;
m_toArray<-toArray;m_toString<-toString;
r_throws<-throws;r_throw<-throw;r_transient<-transient;r_true<-true;r_try<-try;
r_void<-void;r_volatile<-volatile;
r_while<-while;
"""[1:-1])][:-1]
def build_whitelist(options):
return list(rd[0] for rd in _reservedWordDescriptions)
def build_exprs(options):
comp = _pu.expr_compile
search = _pte.Search.build
assign_marker_expr = _pte.assign_marker_expr
def replaces_from_locals(ld): return dict(( k, v ) for k, v in ld.items() if isinstance(v, _pte.TorqExpression))
opts = _Options(options)
exprs = []
eolExpr = comp(r'("\r" | "\n" | "\r\n");')
wordLikeExpr = comp(r'ri"^[a-z_]";')
extractJavadocOrMultilineCommentExpr = comp("""
javadoc <- "/", "*", "*", *(+r"^[^*]" | any^("*", "/")), "*", "/";
multiline_comment <- "/", "*", *(+r"^[^*]" | any^("*", "/")), "*", "/";
""")
extractLiteralsExpr = comp(r"""
l_string <- ?"L", "\"", *("\\", any | any^("\"" | @eolExpr)), "\"";
l_char <- ?"L", "'", *("\\", any | any^("'" | @eolExpr)), "'";
l_int <- ri"^0x[a-f0-9]+$";
l_int <- r"^\d+$", req^(ri"^e" | i"f");
l_float <- r"^[0-9][0-9.]+$", ?(ri"^e" | i"e", ?("-" | "+"), r"^\d");
""", replaces=replaces_from_locals(locals()))
exprs.append(search(comp(r"""
r"^\s";
word <- @wordLikeExpr;
@extractJavadocOrMultilineCommentExpr;
(singleline_comment <- "/", "/", *any^(@eolExpr)), req(@eolExpr);
@extractLiteralsExpr;
semicolon <- ";";
comma <- ",";
(LB <- "{") | (RB <- "}");
(LP <- "(") | (RP <- ")");
(LK <- "[") | (RK <- "]");
# 4 char operator
op_signed_rshift_assign <- ">", ">", ">", "=";
# 3 char operators
(op_lshift_assign <- "<", "<", "=") | (op_rshift_assign <- ">", ">", "=");
op_signed_rshift <- ">", ">", ">";
# 2 char operators
op_lshift <- "<", "<";
(OG <- ">"), (non_splitted <-), (OG <- ">"); # right shift operator is regarded as two ">"s at this time
(op_increment <- "+", "+") | (op_decrement <- "-", "-");
(op_le <- "<", "=") | (op_ge <- ">", "=");
(op_eq <- "=", "=") | (op_ne <- "!", "=");
(op_add_assign <- "+", "=") | (op_sub_assign <- "-", "=");
(op_mul_assign <- "*", "=") | (op_div_assign <- "/", "=");
(op_mod_assign <- "%", "=") | (op_and_assign <- "&", "=");
(op_xor_assign <- "^", "=") | (op_or_assign <- "|", "=");
(op_logical_and <- "&", "&") | (op_logical_or <- "|", "|");
# single char operators
op_star <- "*"; # may mean mul or wildcard
(op_div <- "/") | (op_mod <- "%");
(op_plus <- "+") | (op_minus <- "-"); # may mean add(sub) or sign plus(minus)
op_amp <- "&";
op_logical_neg <- "!";
op_complement <- "~";
(op_or <- "|") | (op_xor <- "^");
op_assign <- "=";
(OL <- "<") | (OG <- ">"); # may mean less(greater) than or template parameter
atmark <- "@";
(ques <- "?") | (colon <- ":") | (dot <- ".");
""", replaces=replaces_from_locals(locals()))))
exprs.append(( "remove whitespace", search(comp(r"""
(null <- multiline_comment | singleline_comment | "\\", *r"[ \t]", @eolExpr | @eolExpr | r"^\s");
""", replaces=replaces_from_locals(locals()))) ))
expr = _pte.Search.build(_pu.ReservedwordNode.build(_reservedWordDescriptions))
exprs.append(( 'identify reserved words', expr ))
exprs.append(search(comp("""
# normalizes type names
(r_int <- r_long | r_short) | (r_double <- r_float) | (l_bool <- r_true | r_false)
| (l_string <- <>word, <>dot, (<>word :: "getString"), <>LP, <>l_string, <>RP); # support for externalized string
""")))
exprs.append(search(comp("""
# extracts blocks, params, and indices
any^(LB | RB | LP | RP | LK | RK)
| (block <- LB, *@0, RB)
| (param <- LP, *@0, RP)
| (index <- LK, *@0, RK);
""")))
exprs.append(search(comp("""
word, *(dot, word), (template_param <-
OL,
?(ques, ((word :: "super") | r_extends)), @0,
*((comma | op_amp), ?(ques, ((word :: "super") | r_extends)), @0),
OG, ?<>non_splitted
)
| (null <-
OL,
(word, *(dot, word), ((word, ::, "super") | r_extends), @0 | @0),
*((comma | op_amp), (word, *(dot, word), ((word :: "super") | r_extends), @0 | @0)),
OG, ?<>non_splitted
)
| (op_rshift <- <>OG, <>non_splitted, <>OG)
| <>non_splitted
| word, *(dot, word), *index | ques, *index
| (block :: ~@0) | (param :: ~@0) | (index :: ~@0); # recurse into block, param, and index
""")))
exprs.append(('id identification', search(comp("""
?(null <- (word :: "this"), dot),
(id <- <>word, *(<>dot, <>word, req^(param)), ?(template_param :: *<>any_node))
| (id <- (<>word :: "this"))
| (l_string <- <>l_string, +(<>op_plus, <>l_string))
| (block :: ~@0) | (param :: ~@0) | (index :: ~@0); # recurse into block, index, and param
""")) ))
if not opts.oImport:
if not opts.package:
exprs.append(('remove package/import',
search(comp('null <- r_package, id, semicolon | r_import, id, ?(dot, op_star), semicolon;')) ))
else:
exprs.append(('remove import',
search(comp('r_import, id, ?(dot, op_star), semicolon;')) ))
else:
if not opts.package:
exprs.append(('remove package', search(comp('null <- r_package, id, semicolon')) ))
extractAnnotationExpr = comp('(annot <- atmark, id, ?param);')
exprs.append(search(comp("""
(null <- r_private | r_public | r_protected | r_synchronized | r_final | r_abstract | r_strictfp | r_volatile | r_transient)
| (null <- r_static, req^(LB))
| (null <- +(r_extends, id, *(comma, id) | r_implements, id, *(comma, id)))
| (null <- r_throws, id, *(comma, id))
| (anotation_block <- (def_block <- ?javadoc, atmark, r_interface, id, block))
| (interface_block <- (def_block <- *(javadoc | @extractAnnotationExpr),
r_interface, id, ?(r_extends, id, *(comma, id)), block))
| @extractAnnotationExpr
| (block :: ~@0) | (param :: ~@0); # recurse into block and param
""", replaces=replaces_from_locals(locals()))))
someLiteralExpr = comp("(l_bool | l_string | l_int | l_char | l_float);")
eRemoveAryInit = """
op_assign, (initialization_block <- req(block)), (null <- block), semicolon
| index, (initialization_block <- req(block)), (null <- block)
| """
exprs.append(search(comp("""%(eRemoveAryInit)s
(value_list <- (@someLiteralExpr | id), +(comma, (@someLiteralExpr | id), ?comma))
| (block :: ~@0) # recurse into block
| (param :: ~@0) | (index :: ~@0); # recurse into expression
""" % { 'eRemoveAryInit' : eRemoveAryInit }, replaces=replaces_from_locals(locals()))))
exprs.append(search(comp("""
any^(id | param | index | l_float | l_int | block), (null <- op_minus) # remove unary minus
| (method_like <- m_charAt | m_compareTo | m_dispose | m_equals | m_getClass | m_get | m_hashCode | m_hasNext | m_iterator | m_length | m_next | m_run | m_size | m_toArray | m_toString)
| (block :: ~@0) # recurse into block
| (param :: ~@0) | (index :: ~@0); # recurse into expression
""")))
def removeRedundantParenExpr():
tbl = _pte.ExprDict()
someAssignOpExpr = comp("(op_assign | op_add_assign | op_sub_assign | op_mul_assign | op_div_assign | op_mod_assign | op_and_assign | op_xor_assign | op_or_assign | op_lshift_assign | op_rshift_assign | op_signed_rshift_assign);")
tbl["someAssignOpExpr"] = someAssignOpExpr
eRemoveParenExpr = comp("""
(<>param :: (null <- LP), req(param, RP), @0, (null <- RP))
| (<>param :: (null <- LP), *(req^(RP), @er), (null <- RP));
""")
tbl["eRemoveParenExpr"] = eRemoveParenExpr
er = comp("""
(r_return | @someAssignOpExpr), req(param, semicolon), @eRemoveParenExpr , semicolon, ?(null <- +semicolon)
| semicolon, ?(null <- +semicolon)
| (param :: LP, req(param, RP), @eRemoveParenExpr, RP) | (param :: ~@0)
| (index :: LP, req(param, RP), @eRemoveParenExpr, RP) | (index :: ~@0)
| (block :: ~@0)
| any;
""", replaces=tbl)
tbl["er"] = er
return [0,]*er
exprs.append(("remove redundant paren/semicolon", removeRedundantParenExpr()))
someTypeKeywordExpr = comp("(r_bool | r_byte | r_char | r_double | r_float | r_int | r_short | r_object | r_string);")
exprs.append(( "remove delegation/getter/setter/empty method", search(comp("""
(null <- (r_void | @someTypeKeywordExpr | id), *index,
(id | method_like), param, ((block :: LB, ?r_return, id, dot, id, param, semicolon, RB) | (block :: LB, RB)))
| (null <- (@someTypeKeywordExpr | id), *index,
(id | method_like), (param :: LP, RP), (block :: LB, r_return, (id | @someLiteralExpr, semicolon, RB))
| (null <- r_void, (id | method_like), param, (block :: LB, id, op_assign, id, semicolon, RB))
| r_return, (param :: (null <- LP), *any^(RP)), (null <- RP)), semicolon
| (null <- r_assert, *any^(semicolon | eof), semicolon)
| (block :: ~@0) | (param :: ~@0); # recurse into block and param
""", replaces=replaces_from_locals(locals()))) ))
someCompoundStmtKeywordExpr = comp("(r_if | r_while | r_for | r_do | r_try | r_catch | r_switch);")
shouldBeBlockExpr = comp("((block :: ~@0) | (block <- (LB<-), @er, (RB<-)));")
er = search(comp("""
r_if, param, @shouldBeBlockExpr, *(r_else, r_if, param, @shouldBeBlockExpr), ?(r_else, @shouldBeBlockExpr)
| r_else, @shouldBeBlockExpr
| (r_while | r_for) , param, @shouldBeBlockExpr
| r_do, @shouldBeBlockExpr, r_while, param, semicolon
| r_try, (block :: ~@0), *((r_catch, param | r_finally), (block :: ~@0))
| (r_catch, ?param | r_finally), (block :: ~@0)
| +((r_case, (id | @someLiteralExpr) | r_default), colon),
(
(block :: ~@0), ?(null <- r_break, semicolon)
| (block <- ((LB<-), *(req^(r_break | r_case | r_default), @0), (RB<-))),
?(null <- r_break, semicolon) # enclose each case clause by block
)
| r_switch, (block :: ~@0)
| (r_return | r_break | r_continue | op_assign), *any^(block | LB | semicolon), semicolon
| *any^(block | LB | semicolon | @someCompoundStmtKeywordExpr), semicolon
| (block :: ~@0) | (param :: ~@0); # recurse into block and param
""", replaces=replaces_from_locals(locals())))
assign_marker_expr(shouldBeBlockExpr, 'er', er)
exprs.append(er)
del shouldBeBlockExpr
del er
exprs.append(( "simple-statement identification", search(comp("""
r_if, param, block, *(r_else, r_if, param, block), ?(r_else, block)
| r_else, block
| (r_while | r_for | r_switch), param, block
| r_do, block, r_while, param, semicolon
| r_try, block, *((r_catch, param | r_finally), block)
| (r_catch, ?param | r_finally), block
| (simple_statement <- *(javadoc | annot), *any^(block | LB | semicolon | @someCompoundStmtKeywordExpr | r_finally| r_case | r_default), semicolon)
| (block :: ~@0) | (param :: ~@0); # recurse into block and param
""", replaces=replaces_from_locals(locals()))) ))
if not opts.field:
eRemoveFieldExpr = comp('(null <- simple_statement) | @er;')
er = comp('r_class, id, (block :: ~@eRemoveFieldExpr) | (block :: ~@0) | (param :: ~@0) | any;', replaces=replaces_from_locals(locals()))
assign_marker_expr(eRemoveFieldExpr, 'er', er)
exprs.append(("remove field definitions", [0,]*er))
exprs.append(( "definition-block identification", search(comp("""
(def_block <- *(javadoc | annot), r_class, id, (block :: ~@0))
| (def_block <- *(javadoc | annot), (r_void | @someTypeKeywordExpr | id), *(index :: LK, RK), (id | method_like), param, (block :: ~@0))
| (def_block <- *(javadoc | annot), id, param, (block :: ~@0)) # constructor
| (block :: ~@0) | (param :: ~@0); # recurse into block and param
""", replaces=replaces_from_locals(locals()))) ))
exprs.append(( "control-token insertion", search(comp("""
(id | @someTypeKeywordExpr), id, (param :: ~@0), *(comma, id, ?(param :: ~@0)), semicolon # perhaps a variable decl&init.
| (r_for | r_while), (c_loop<-)
| (r_if | r_switch | ques), (c_cond<-)
| (id | method_like), (c_func<-), (param :: ~@0)
| (<>simple_statement :: ~@0) # recuse into simple_statement and expand the simple_statement
| (def_block :: ~@0) | (block :: ~@0) | (param :: ~@0) | (index :: ~@0) | (simple_statement :: ~@0); # recurse into block, param, index
""", replaces=replaces_from_locals(locals()))) ))
return exprs
__nodefmtTbl = {
'code' : NodeFlatten(), # top
'id' : NodeFormatString('id|%s'),
'block' : NodeFlatten(), 'LB' : NodeString('(brace'), 'RB' : NodeString(')brace'),
'word' : NodeFlatten(),
'param' : NodeFlatten(), 'LP' : NodeString('(paren'), 'RP' : NodeString(')paren'),
'index' : NodeFlatten(), 'LK' : NodeString('(braket'), 'RK' : NodeString(')braket'),
'semicolon' : NodeString('suffix:semicolon'),
'def_block' : NodeRecurse('(def_block', ')def_block'),
'value_list' : NodeFlatten(),
}
__someLiteral = "l_bool,l_string,l_int,l_char,l_float"
__nodefmtTbl.update(( li, NodeFormatString(li + "|%s") ) for li in __someLiteral.split(","))
def build_nodeformattable(options):
opts = _Options(options)
class SetNodeNameStringAsDefault(dict):
def __missing__(self, k):
v = NodeString(k)
self.__setitem__(k, v)
return v
d = SetNodeNameStringAsDefault(__nodefmtTbl)
d['annotation_block'] = NodeFlatten() if opts.annotDef else NodeHide()
d['interface_block'] = NodeFlatten() if opts.interf else NodeHide()
d['annot'] = NodeRecurse('(annot', ')annot') if opts.annotMod else NodeHide()
d['javadoc'] = NodeFormatString("javadoc|%s") if opts.javaDoc else NodeHide()
return d
# build_decoder is default one
def build_tokenizer(options):
textSplitPattern = re.compile(r"0x[0-9a-f]+([.][0-9a-f]+)?|[0-9.]+|[a-z_]\w*|[ \t]+|\r\n|.", re.DOTALL | re.IGNORECASE)
def tkn(inputText, inputFilePath=None):
seq = [ 'code' ]; seq.extend(split_to_strings_iter(inputText, textSplitPattern))
seq.append([ 'eof' ]) # insert eof node at the end of sequence
return seq
return tkn
def normalized_option_strs(options):
return _Options(options).normalizedStrs
def get_option_description():
return list(_Options.descriptions)
def get_version():
return (0, 1)
def get_target_file_predicate(options):
def pred(filePath):
i = filePath.rfind(".")
if i >= 0:
ext = filePath[i:]
return ext == ".java"
return False
return pred
if __name__ == '__main__':
r = [ 'prep_java.py, preprocess script for java source files.',
' to run this script, use "prep.py java"',
'options' ]
for s in _Options.descriptions:
r.append(" " + s)
print "\n".join(r)
| viswimmer1/PythonGenerator | data/python_files/28486512/prep_java.py | Python | gpl-2.0 | 18,061 |
from setuptools import setup
setup(name="MySQLdatagen",
version="0.1",
description="A package to help generate data for a mysql database",
url="https://github.com/zombiecollin/MySQLdatagen",
author="zombiecollin",
license="MIT",
packages=['MySQLdatagen'],
install_requires=[
'MYSQL-python',
'configparser',
'fake-factory',
],
scripts=[
'bin/generate-row',
],
zip_safe=False)
| zombiecollin/MySQLdatagen | setup.py | Python | mit | 398 |
from datetime import timedelta
from unittest.mock import patch, MagicMock
from libfaketime import fake_time
import pytest
def test_purge(SETTINGS):
SETTINGS.CACHE_LIFE = 10
SETTINGS.REPOSITORIES = 'owner/repository1 owner/repository2'
from jenkins_epo.cache import MemoryCache
cache = MemoryCache()
with fake_time('2012-12-21 00:00:00 UTC') as time:
cache.set('key', 'data')
cache.purge()
data = cache.get('key')
assert 'data' == data
time.tick(timedelta(seconds=1800))
cache.purge()
with pytest.raises(KeyError):
cache.get('key')
@patch('jenkins_epo.cache.fcntl')
@patch('jenkins_epo.cache.shelve.open')
def test_corruptions(dbopen, fcntl):
from jenkins_epo.cache import FileCache
dbopen.side_effect = [Exception(), MagicMock()]
my = FileCache()
my.open()
with pytest.raises(KeyError):
my.storage.__getitem__.side_effect = Exception()
my.get('key')
my.storage.keys.return_value = ['key']
my.purge()
@patch('jenkins_epo.cache.fcntl')
@patch('jenkins_epo.cache.shelve.open')
def test_close(dbopen, fcntl):
from jenkins_epo.cache import FileCache
my = FileCache()
my.close()
assert my.storage.sync.mock_calls
@patch('jenkins_epo.cache.os.unlink')
@patch('jenkins_epo.cache.FileCache.close')
def test_destroy(close, unlink):
from jenkins_epo.cache import FileCache
my = FileCache()
my.destroy()
assert unlink.mock_calls
assert close.mock_calls
| novafloss/jenkins-epo | tests/test_cache.py | Python | gpl-3.0 | 1,527 |
from doajtest.helpers import DoajTestCase
from portality import models
import time
class TestTick(DoajTestCase):
def setUp(self):
super(TestTick, self).setUp()
self.j_correct = models.Journal(created_date="2014-06-28T11:26:42Z")
self.j_correct.set_in_doaj(True)
self.j_correct.save()
self.j_not_in_doaj_excplicit = models.Journal(created_date="2014-06-28T11:26:42Z")
self.j_not_in_doaj_excplicit.set_in_doaj(False)
self.j_not_in_doaj_excplicit.save()
self.j_not_in_doaj_implicit = models.Journal(created_date="2014-06-28T11:26:42Z")
self.j_not_in_doaj_implicit.save()
self.j_too_old = models.Journal(created_date="2012-06-28T11:26:42Z")
self.j_too_old.set_in_doaj(True)
self.j_too_old.save()
self.j_too_old_not_in = models.Journal(created_date="2012-06-28T11:26:42Z")
self.j_too_old_not_in.set_in_doaj(False)
self.j_too_old_not_in.save()
self.sugg = models.Suggestion(created_date="2014-06-28T11:26:42Z")
self.sugg.save()
# update request tests
self.j_old_cd_old_reapp = models.Journal(created_date="2012-06-28T11:26:42Z")
self.j_old_cd_old_reapp.add_related_application("123456789", date_accepted="2012-07-28T11:26:42Z")
self.j_old_cd_old_reapp.set_in_doaj(True)
self.j_old_cd_old_reapp.save()
self.j_old_cd_new_reapp = models.Journal(created_date="2012-06-28T11:26:42Z")
self.j_old_cd_new_reapp.add_related_application("123456789", date_accepted="2015-01-01T11:26:42Z")
self.j_old_cd_new_reapp.set_in_doaj(True)
self.j_old_cd_new_reapp.save()
self.j_old_cd_new_reapp_out = models.Journal(created_date="2012-06-28T11:26:42Z")
self.j_old_cd_new_reapp_out.add_related_application("123456789", date_accepted="2015-01-01T11:26:42Z")
self.j_old_cd_new_reapp_out.set_in_doaj(False)
self.j_old_cd_new_reapp_out.save()
# Refresh the type to force changes in the index, then wait for it to be done
models.Journal.refresh()
models.Suggestion.refresh()
time.sleep(2)
def tearDown(self):
super(TestTick, self).tearDown()
def test_01_tick(self):
assert self.j_correct.is_ticked()
assert not self.j_not_in_doaj_excplicit.is_ticked()
assert not self.j_not_in_doaj_implicit.is_ticked()
assert not self.j_too_old.is_ticked()
assert not self.j_too_old_not_in.is_ticked()
assert not self.j_old_cd_old_reapp.is_ticked()
assert self.j_old_cd_new_reapp.is_ticked()
assert not self.j_old_cd_new_reapp_out.is_ticked()
| DOAJ/doaj | doajtest/unit/test_tick.py | Python | apache-2.0 | 2,665 |
#!/usr/bin/python3
import sqlite3
import os, sys, time, datetime, random, string
import urllib.request, urllib.error
import configparser
from flask import Flask, request, session, redirect
from flask import render_template, g, flash, url_for
from contextlib import closing
from .modules import Pagi
from pxeat import app
from config import *
def prt_help():
print("To start the service:\n\n\t" + sys.argv[0] + " server\n")
print("Listen to \"localhost:5000\" by default, \nDeploy on production (Apache, Nginx...) with \"pxeat.wsgi\"")
def chk_args():
if len(sys.argv) == 2:
if sys.argv[1] == 'server':
if not os.path.isfile(DATABASE):
print("Database is not available!\nCreate with --initdb")
sys.exit()
if not os.path.isfile(PXE_FILE):
print("PXE file is not available!\nPlease check the configuration")
sys.exit()
if not os.path.isfile("./config.py"):
print("PXEAT Config file is missing!")
sys.exit()
elif sys.argv[1] == '--initdb':
init_db()
else:
prt_help()
sys.exit()
else:
prt_help()
sys.exit()
# Defaults
items_num = int(ITEMS_NUM)
form_default = ["", \
"http://", \
REPO_KERNEL_DEFAULT, \
REPO_INITRD_DEFAULT, \
"def", \
""]
loader_dir = TFTP_ROOT + LOADER_PATH
postfix_kernelfn = '-0'
postfix_initrdfn = '-1'
items = {}
def chk_input(chk_string, chk_type):
if chk_type == 'pxe_title':
if chk_string == '':
raise ValueError("The title can not be empty!")
return
elif chk_type == 'file_path':
if chk_string[0] != '/' or chk_string[-1] == '/':
raise ValueError("Path format is invalid!")
return
elif chk_type == 'repo_url':
chk_elements = chk_string.split('//')
if chk_elements[1] == '':
raise ValueError("The repository can not be empty!")
return
elif chk_elements[0] not in ['http:','https:']:
raise ValueError("Invalid format!"+\
" (Only support http:// or https://)")
return
else:
sys.exit("chk_type error!")
def grab_file(base_url, file_path, saved_file):
errmsg0 = "<br />Something wrong, please contact the administrator."
errmsg1 = "<br />Something wrong, please check the repository link \
and kernel/initrd file path."
dbginfo_local = "Debug info: Configuration error! \
Failed to open/write local kernel&initrd file. \
Check your \'LOADER_PATH\' setting in config file. \
Make sure the path exist and you have permission to write.\n\
Current path: " + saved_file
file_url = base_url + file_path
try:
f = urllib.request.urlopen(file_url)
except urllib.error.HTTPError as e:
return str(e.code) + " " + str(e.reason) + str(errmsg1)
except:
return str(errmsg0)
try:
local_file = open(saved_file, "wb")
local_file.write(f.read())
except:
print(dbginfo_local)
return str(errmsg0)
local_file.close()
def boot_opts_gen(opt_flag):
if opt_flag == "vnc":
return(DEFAULT_BOOT_OPTS + \
" console=ttyS0 vnc=1 vncpassword=" + \
VNC_PASSWD)
elif opt_flag == "ssh":
return(DEFAULT_BOOT_OPTS + \
" console=ttyS0 usessh=1 sshpassword=" + \
SSH_PASSWD)
else:
return(DEFAULT_BOOT_OPTS)
def connect_db():
return sqlite3.connect(DATABASE)
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def form():
default_val = {}
for i,k in enumerate(['title', \
'repo_url', \
'repo_kernel', \
'repo_initrd', \
'inst_method', \
'comment']):
default_val[k] = form_default[i]
return render_template('form.html', default_val=default_val)
@app.route('/history/', defaults={'page': 1})
@app.route('/history/page/<int:page>')
def history(page):
count = g.db.execute('select count(*) from pxeitems').fetchone()[0]
per_page = 10
pagination = Pagi(page, per_page, count)
try:
cur = g.db.execute('select id,\
pxe_title,\
repo_url,\
repo_kernel,\
repo_initrd,\
pxe_comment,\
unix_time,\
inst_flag from pxeitems order by id desc')
except sqlite3.Error as e:
return render_template('failed.html', \
failed_msg = "Database error: "+str(e))
history_entries = [ dict(pxe_id=row[0], \
pxe_title=row[1], \
repo_url=row[2], \
repo_kernel=row[3], \
repo_initrd=row[4], \
pxe_comment=row[5], \
unix_time=datetime.datetime.fromtimestamp(int(row[6])), \
inst_flag=row[7]) \
for row in cur.fetchall()[(page-1)*per_page:page*per_page]\
]
if not history_entries and page != 1:
#Shoud do something here other than pass or abort(404)
pass
return render_template('history.html',\
pagination=pagination,\
history_entries=history_entries)
@app.route('/clone/<int:clone_id>')
def clone(clone_id):
row = g.db.execute('select pxe_title,\
repo_url,\
repo_kernel,\
repo_initrd,\
inst_flag,\
pxe_comment from pxeitems where id=?',[clone_id]).fetchone()
default_val = {}
for i,k in enumerate(['title', \
'repo_url', \
'repo_kernel', \
'repo_initrd', \
'inst_method', \
'comment']):
default_val[k] = row[i]
flash(u'Cloned Entry!','green')
return render_template('form.html', default_val=default_val)
@app.route('/about')
@app.route('/about/')
def about():
return render_template('about.html')
# For the pagination
def url_for_other_page(page):
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
app.jinja_env.globals['url_for_other_page'] = url_for_other_page
@app.route('/confirm', methods=['POST'])
def confirm_entry():
#Input checking
try:
for x,y in [[request.form['pxe_title'],'pxe_title'], \
[request.form['repo_url'], 'repo_url'], \
[request.form['repo_kernel'], 'file_path'], \
[request.form['repo_initrd'], 'file_path']]:
chk_input(x,y)
except ValueError as e:
flash(e.args[0],'error')
return redirect(url_for('form'))
# Assign to the dictionary
items['repo_kernel'] = request.form['repo_kernel']
items['repo_url'] = request.form['repo_url']
items['repo_initrd'] = request.form['repo_initrd']
items['pxe_title'] = request.form['pxe_title']
items['pxe_comment'] = request.form['pxe_comment']
items['inst_flag'] = request.form['inst_method']
# Generate a random string
items['random_str'] = ''.join(random.choice(string.ascii_lowercase) for _ in range(4))
items['unix_time'] = ''
# Show the entry which will be generated on the confirm page
gen_format = ["menu label ^a - " + items['pxe_title'], \
"kernel " + LOADER_PATH + "[random]" + postfix_kernelfn, \
"append initrd=" + LOADER_PATH + "[random]" + postfix_initrdfn + " " + \
boot_opts_gen(items['inst_flag']) + " " + \
"install=" + items['repo_url']]
return render_template('confirm.html', cfm_entries=items, cfm_fmt=gen_format)
@app.route('/add', methods=['POST'])
def add_entry():
items['unix_time'] = str(int(time.time()))
id_random = items['unix_time'] + items['random_str']
# Get kernel and initrd file
for f_name,i in [[items['repo_kernel'], postfix_kernelfn],\
[items['repo_initrd'], postfix_initrdfn]]:
ret = grab_file(items['repo_url'],\
f_name,\
loader_dir + id_random + i)
if ret:
return render_template('failed.html',\
failed_msg = f_name + ": " + str(ret))
else:
pass
# Add new entry to database
try:
g.db.execute('INSERT INTO pxeitems (\
pxe_title, \
repo_url, \
repo_kernel, \
repo_initrd, \
pxe_comment, \
unix_time, \
random_str, \
inst_flag) values (?, ?, ?, ?, ?, ?, ?, ?)', \
[items['pxe_title'], \
items['repo_url'], \
items['repo_kernel'], \
items['repo_initrd'], \
items['pxe_comment'], \
items['unix_time'], \
items['random_str'], \
items['inst_flag']\
])
except sqlite3.Error as e:
#Remove downloaded files here
for i in (postfix_kernelfn, postfix_initrdfn):
os.remove(TFTP_ROOT + LOADER_PATH + id_random + i)
return render_template('failed.html', \
failed_msg = "Database error: " + str(e))
g.db.commit()
# Fetch first items_num of entires from the database
cur = g.db.execute('SELECT pxe_title,\
repo_url,\
repo_kernel,\
repo_initrd,\
inst_flag FROM pxeitems order by id desc')
pxe_entries = [ dict(pxe_title=row[0], \
repo_url=row[1], \
repo_kernel=row[2], \
repo_initrd=row[3], \
inst_flag=row[4]) for row in cur.fetchall()[:items_num]\
]
# Write the entries to PXE configure file
try:
fpxe = open(PXE_FILE,'w')
except IOError as e:
for i in ("0","1"):
os.remove(TFTP_ROOT + LOADER_PATH + id_random + "-" + i)
g.db.execute('DELETE FROM pxeitems WHERE id = (SELECT max(id) FROM pxeitems)')
return render_template('failed.html', failed_msg = e)
fpxe.write(PXE_HEADER + '\n')
pxe_index = 'a'
for pxe_entry in pxe_entries:
fpxe.write('label {0}\n menu label ^{0} - {1}\n menu indent 2\n kernel {2}\n append initrd={3} {4} install={5}\n\n'.format(\
pxe_index,\
pxe_entry['pxe_title'],\
LOADER_PATH + items['unix_time'] + items['random_str'] + postfix_kernelfn, \
LOADER_PATH + items['unix_time'] + items['random_str'] + postfix_initrdfn, \
boot_opts_gen(pxe_entry['inst_flag']),items['repo_url']))
pxe_index = chr(ord(pxe_index)+1)
fpxe.write(PXE_FOOTER + '\n')
fpxe.close
# Remove the out of service kernel&initrd files
for root, dirs, files in os.walk(loader_dir, topdown=False):
names=sorted(files,reverse=True)
for i in names[items_num*2:]:
os.remove(os.path.join(root,i))
flash(u'New entry was successfully posted','green')
return redirect(url_for('form'))
| wnereiz/pxeat | pxeat/views.py | Python | gpl-3.0 | 12,105 |
# -*- coding: utf-8 -*-
from moha.vm import code as Code
class W_Root(object):
def str(self):
return ''
class Type(object):
def __init__(self, typeval):
self.typeval = typeval
class Null(W_Root):
def str(self):
return 'null'
def __repr__(self):
return 'null'
@classmethod
def singleton(cls):
return null
null = Null()
class Boolean(W_Root):
def __init__(self, boolval):
self.boolval = boolval
def str(self):
return 'true' if self.boolval else 'false'
def eq(self, other):
if not isinstance(other, Boolean):
return Boolean.from_raw(False)
return Boolean.from_raw(self.boolval == other.boolval)
def is_true(self):
return self.boolval
@classmethod
def from_raw(cls, b):
if b:
return true
else:
return false
true = Boolean(True)
false = Boolean(False)
def index_string(string, index):
return string.index(index)
def length_string(string):
return string.length()
class Object(W_Root):
def __init__(self):
self.dictionary = {}
def build_map(self, data):
size = len(data) / 2
kv = {}
for i in range(size):
self.dictionary[data[i].str()] = data[i+1]
def get(self, key):
return self.dictionary[key.str()]
def set(self, key, value):
self.dictionary[key.str()] = value
def has(self, key):
return Boolean.from_raw(key.str() in self.dictionary)
def delete(self, key):
del self.dictionary[key.str()]
def str(self):
return '{%s}' % ','.join(['%s:%s' % (key, value.str()) for key, value in self.dictionary.iteritems()])
class String(Object):
def __init__(self, strval):
self.strval = strval
self.dictionary = {'index': Function(None, None, instancefunc_2=index_string),
'length': Function(None, None, instancefunc_1=length_string),
}
def index(self, i):
char = self.strval[int(i.intval)]
return String(char)
def length(self):
return Integer(len(self.strval))
def eq(self, other):
return Boolean.from_raw(self.strval == other.str())
def add(self, other):
return String(self.strval + other.str())
def __repr__(self):
return "%s" % self.strval
def str(self):
return str(self.strval)
def push_array(array, elem):
return array.push(elem)
def pop_array(array):
return array.pop()
def index_array(array, index):
return array.index(index)
def length_array(array):
return array.length()
class Array(Object):
def __init__(self, array=None):
self.array = array or []
self.dictionary = {'push': Function(None, None, instancefunc_2=push_array),
'pop': Function(None, None, instancefunc_1=pop_array),
'index': Function(None, None, instancefunc_2=index_array),
'length': Function(None, None, instancefunc_1=length_array),
}
def get(self, i):
if isinstance(i, Integer):
return self.index(i)
return self.dictionary[i.str()]
def copy(self, array):
for item in array:
self.array.append(item)
def index(self, i):
return self.array[int(i.intval)]
def push(self, elem):
self.array.append(elem)
return Null.singleton()
def pop(self):
return self.array.pop()
def has(self, elem):
return Boolean.from_raw(elem in self.array)
def eq(self, other):
if not isinstance(other, Array):
return Boolean.from_raw(False)
if len(other.array) != len(self.array):
return Boolean.from_raw(False)
for index, elem in enumerate(self.array):
if not elem.eq(other.array[index]).is_true():
return Boolean.from_raw(False)
return Boolean.from_raw(True)
def length(self):
return Integer(len(self.array))
def str(self):
return '[%s]' % ','.join([a.str() for a in self.array])
def set(self, key, value):
self.array[key.intval] = value
class Integer(W_Root):
def __init__(self, intval):
self.intval = int(intval)
def __repr__(self):
return '%d' % self.intval
def str(self):
return self.__repr__()
def neg(self):
return Integer(-1 * self.intval)
def add(self, other):
if not isinstance(other, Integer):
raise Exception("wrong type")
return Integer(self.intval + other.intval)
def mul(self, other):
if not isinstance(other, Integer):
raise Exception("wrong type")
return Integer(self.intval * other.intval)
def div(self, other):
if not isinstance(other, Integer):
raise Exception("wrong type")
return Integer(self.intval / other.intval)
def mod(self, other):
if not isinstance(other, Integer):
raise Exception("wrong type")
return Integer(self.intval % other.intval)
def lt(self, other):
if not isinstance(other, Integer):
raise Exception("wrong type")
return Boolean.from_raw(self.intval < other.intval)
def gt(self, other):
if not isinstance(other, Integer):
raise Exception("wrong type")
return Boolean.from_raw(self.intval > other.intval)
def eq(self, other):
if not isinstance(other, Integer):
raise Exception("wrong type")
return Boolean.from_raw(self.intval == other.intval)
def is_true(self):
return self.intval != 0
def str(self):
return str(self.intval)
class Float(W_Root):
def __init__(self, floatval):
assert(isinstance(floatval, float))
self.floatval = floatval
def __repr__(self):
return "%f" % self.floatval
def str(self):
return self.__repr__()
def neg(self):
return Float(-1 * self.floatval)
def add(self, other):
if not isinstance(other, Float):
raise Exception("wrong type")
return Float(self.floatval + other.floatval)
def lt(self, other):
if not isinstance(other, Float):
raise Exception("wrong type")
return Boolean.from_raw(self.floatval < other.floatval)
def gt(self, other):
if not isinstance(other, Float):
raise Exception("wrong type")
return Boolean.from_raw(self.floatval > other.floatval)
def eq(self, other):
if not isinstance(other, Float):
raise Exception("wrong type")
return Boolean.from_raw(self.floatval == other.floatval)
def str(self):
return str(self.floatval)
class Function(W_Root):
def __init__(self, bytecode=None, interpfunc=None, instancefunc_0=None, instancefunc_1=None, instancefunc_2=None, instancefunc_3=None, obj=None):
self.bytecode = bytecode
self.interpfunc = interpfunc
self.obj = obj
self.instancefunc_0 = instancefunc_0
self.instancefunc_1 = instancefunc_1
self.instancefunc_2 = instancefunc_2
self.instancefunc_3 = instancefunc_3
def __repr__(self):
return '<func>'
def str(self):
return '<func>'
class CallableArgs(W_Root):
def __init__(self, args):
self.args = args
class Module(W_Root):
def __init__(self, frame):
self.frame = frame
def get(self, varname):
index = self.frame.bytecode.vars.keys_to_index[varname.str()]
return self.frame.vars[index]
class Sys(W_Root):
def __init__(self):
self.data = {}
def get_cwd(self):
return self.data['cwd']
def set_cwd(self, path):
self.data['cwd'] = path
def set_executable(self, path):
self.data['executable'] = path
def get_executable(self):
return self.data['executable']
def set_env_path(self, path):
self.data['env_path'] = path
def get_env_path(self):
return self.data['env_path']
def get_bin_path(self):
return '%s/bin' % self.data['env_path']
def get_libs_path(self):
env_path = self.get_env_path()
return '%s/libs' % env_path
class Bytecode(object):
_immutable_fields_ = ['code', 'constants[*]', 'numvars']
def __init__(self, code, constants, vars, names):
self.code = code
self.constants = constants
self.vars = vars
self.names = names
self.numvars = self.vars.size()
def __repr__(self):
return '<bytecode>'
def dump(self):
lines = []
i = 0
for i in range(0, len(self.code), 2):
_code = self.code[i]
arg = self.code[i + 1]
line = ""
attrname = Code.pretty(_code)
line += "%d %s %d" % (i, attrname, arg)
if attrname == 'LOAD_CONST':
line += " (%s)" % self.constants[arg]
elif attrname == 'LOAD_VAR' or attrname == 'STORE_VAR':
line += " (%s)" % self.vars.keys[arg]
lines.append(line)
return '\n'.join(lines)
| mohalang/moha | moha/vm/objects.py | Python | apache-2.0 | 9,139 |
SEQUENCE = [
'is_private',
]
| chazy/reviewboard | reviewboard/accounts/evolutions/__init__.py | Python | mit | 33 |
# -*- coding: utf-8 -*-
#:-----------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para enlaces a torrent y magnet versión mínima
# adaptadtado a livestream
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urllib
def get_video_url( page_url ):
data = urllib.quote_plus(page_url)
video_url = 'plugin://plugin.video.pulsar/play?uri='+data
return video_url
| gypogypo/plugin.video.gypo | servers/pulsar.py | Python | gpl-3.0 | 511 |
# -*- coding: utf-8 -*-
import os
import unittest
import dukpy
from diffreport import report_diff
from dukpy.lessc import LessCompilerError
class TestTranspilers(unittest.TestCase):
def test_coffee(self):
ans = dukpy.coffee_compile('''
fill = (container, liquid = "coffee") ->
"Filling the #{container} with #{liquid}..."
''')
assert ans == '''(function() {
var fill;
fill = function(container, liquid) {
if (liquid == null) {
liquid = "coffee";
}
return "Filling the " + container + " with " + liquid + "...";
};
}).call(this);
'''
def test_babel(self):
ans = dukpy.babel_compile('''
class Point {
constructor(x, y) {
this.x = x;
this.y = y;
}
toString() {
return '(' + this.x + ', ' + this.y + ')';
}
}
''')
assert '''var Point = function () {
function Point(x, y) {
''' in ans['code'], ans['code']
def test_typescript(self):
ans = dukpy.typescript_compile('''
class Greeter {
constructor(public greeting: string) { }
greet() {
return "<h1>" + this.greeting + "</h1>";
}
};
var greeter = new Greeter("Hello, world!");
''')
expected = """System.register([], function(exports_1) {
var Greeter, greeter;
return {
setters:[],
execute: function() {
var Greeter = (function () {
function Greeter(greeting) {
this.greeting = greeting;
}
Greeter.prototype.greet = function () {
return "<h1>" + this.greeting + "</h1>";
};
return Greeter;
})();
;
var greeter = new Greeter("Hello, world!");
}
}
});"""
assert expected in ans, report_diff(expected, ans)
def test_jsx(self):
ans = dukpy.jsx_compile('var react_hello = <h1>Hello, world!</h1>;')
expected = """"use strict";
var react_hello = React.createElement(\n "h1",\n null,\n "Hello, world!"\n);"""
assert expected == ans, report_diff(expected, ans)
def test_jsx6(self):
ans = dukpy.jsx_compile('''
import Component from 'react';
class HelloWorld extends Component {
render() {
return (
<div className="helloworld">
Hello {this.props.data.name}
</div>
);
}
}
''')
assert '_createClass(HelloWorld,' in ans, ans
def test_less(self):
ans = dukpy.less_compile('''
@import "files/colors.less";
.box-shadow(@style, @c) when (iscolor(@c)) {
-webkit-box-shadow: @style @c;
box-shadow: @style @c;
}
.box-shadow(@style, @alpha: 50%) when (isnumber(@alpha)) {
.box-shadow(@style, rgba(0, 0, 0, @alpha));
}
.box {
color: saturate(@green, 5%);
border-color: lighten(@green, 30%);
div { .box-shadow(0 0 5px, 30%) }
}
''', options={'paths': [os.path.dirname(__file__)]})
expected = '''box {
color: #7cb029;
border-color: #c2e191;
}
.box div {
-webkit-box-shadow: 0 0 5px rgba(0, 0, 0, 0.3);
box-shadow: 0 0 5px rgba(0, 0, 0, 0.3);
}'''
assert expected in ans, report_diff(expected, ans)
def test_less_errors(self):
with self.assertRaises(LessCompilerError) as err:
dukpy.less_compile('@import "files/missing.less";')
assert "files/missing.less' wasn't found." in str(err.exception)
| amol-/dukpy | tests/test_transpilers.py | Python | mit | 3,375 |
# -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2015 Rémi Duraffort
# This file is part of DataTag.
#
# DataTag is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DataTag is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with DataTag. If not, see <http://www.gnu.org/licenses/>
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as v_auth
from django.core.urlresolvers import reverse_lazy
from DataTag.views import account as v_account
from DataTag.views import category as v_category
from DataTag.views import main as v_main
from DataTag.views import media as v_media
from DataTag.views import tag as v_tag
from DataTag.views.account import DTAuthenticationForm, DTPasswordChangeForm
urlpatterns = [
# Main view
url(r'^$', v_main.index, name='index'),
# Authentication
url(r'^accounts/login/$', v_auth.login, {'template_name': 'DataTag/account/login.html', 'authentication_form': DTAuthenticationForm}, name='accounts.login'),
url(r'^accounts/logout/$', v_auth.logout, {'template_name': 'DataTag/account/logged_out.html'}, name='accounts.logout'),
url(r'^accounts/password/change/$', v_auth.password_change, {'template_name': 'DataTag/account/password_change.html', 'password_change_form': DTPasswordChangeForm, 'post_change_redirect': reverse_lazy('accounts.password_change_done')}, name='accounts.password_change'),
# Account
url(r'^accounts/register/$', v_account.register, name='accounts.register'),
url(r'^accounts/profile/$', v_account.profile, name='accounts.profile'),
url(r'^accounts/profile/update/$', v_account.update, name='accounts.profile.update'),
url(r'^accounts/password/change/done/$', v_account.password_change_done, name='accounts.password_change_done'),
# Medias
url(r'^medias/(?P<path>.*$)', v_media.get_media, name='media'),
# Tags
url(r'^tags(?P<path>/.+)/$', v_tag.details, name='tags.details'),
url(r'^browse/$', v_tag.browse, {'path': ''}, name='tags.browse.root'),
url(r'^browse(?P<path>/.*)/$', v_tag.browse, name='tags.browse'),
# Categories
url(r'^categories/$', v_category.browse, name='categories.browse'),
url(r'^categories/(?P<name>[^/]+)/$', v_category.details, {'path': ''}, name='categories.details.root'),
url(r'^categories/(?P<name>[^/]+)(?P<path>/.*)/$', v_category.details, name='categories.details'),
]
| ivoire/DataTag | DataTag/urls.py | Python | agpl-3.0 | 2,869 |
from app import app, gl
from database import db, rds
import json
from flask import jsonify
from flask import stream_with_context
from flask import render_template
from flask import url_for
from flask import redirect
from flask import Response
from flask import request
from flask import make_response
from flask import abort
import requests
from helpers import *
import datetime
from config import APP_ROOT, OPENCPUURL
import flask
import ast
import urlparse
import re
import os
import sys
# TODO: split up this
@app.route('/get_comparison_upload/', methods=['POST'])
def get_comparison_upload():
start = datetime.datetime.now()
with open(os.path.join(APP_ROOT, 'static', 'data', 'region_features.csv')) as f:
files = {'data': f.read()}
endpoint = 'ocpu/library/rlines/R/store_csv/'
d = ocpu_wrapper(url=endpoint, files=files)
d.perform()
input_data = json.loads(request.data)
data = {
'target.region':str(input_data['targetRegion']) ,
'comparison.vars':['completeness','b01001001','counts'],
'input_data':d,
}# Convert input data from unicode
header = { 'content-type': 'application/x-www-form-urlencoded' } # Set header for ocpu
endpoint = 'ocpu/library/rlines/R/get_features_data/'
print 'About to create ocpu object in %s' % str(datetime.datetime.now() - start)
d = ocpu_wrapper(url=endpoint, data=dict_to_r_args(data), header=header)
print ' ocpu object created in %s' % str(datetime.datetime.now() - start)
res = d.perform()
print 'Main query performed in %s' % str(datetime.datetime.now() - start)
if not res.ok:
return abort(res.status_code)
req = d.get_ocpu_response()
return make_response(req.content), req.status_code
# Note: jsonify() will not take a list of dicts
#return jsonify(d.get_result_object())
@app.route('/get_diffindiff_upload/', methods=['POST'])
def get_diffindiff_upload():
"""
This is currently a toy function which takes a file ID and does the diff-in-diff call here
"""
start = datetime.datetime.now()
input_data = json.loads(request.data)
with open(os.path.join(APP_ROOT, 'static', 'data', 'counts.csv')) as f:
files = {'data': f.read()}
endpoint = 'ocpu/library/rlines/R/store_csv/'
d = ocpu_wrapper(url=endpoint, files=files)
d.perform()
# Load in the counts.csv file
data = {
'target.region':str(input_data['targetRegion']),
'comparison.region.set':[str(i) for i in input_data['comparisonRegionSet']],
'event.date':str(input_data['eventDate']),
'input_data':d,
}# Convert input data from unicode
if input_data.has_key('logged'):
data['logged'] = input_data['logged']
if input_data.has_key('normalize'):
data['normalize'] = input_data['normalize']
header = { 'content-type': 'application/x-www-form-urlencoded' } # Set header for ocpu
endpoint = 'ocpu/library/rlines/R/diffindiff_data/'
print('About to create ocpu object in %s' % str(datetime.datetime.now() - start))
print(dict_to_r_args(data))
d = ocpu_wrapper(url=endpoint, data=dict_to_r_args(data), header=header)
print('ocpu object created in %s' % str(datetime.datetime.now() - start))
res = d.perform()
print('Main query performed in %s' % str(datetime.datetime.now() - start))
if not res.ok:
abort(res.status_code)
req = d.get_ocpu_response()
return make_response(req.content), req.status_code
# TODO: refactor geotag into its own module
@app.route('/rest/geotag/suggest/<q>')
def table_suggest(q):
endpoint = 'http://api.censusreporter.org/1.0/table/search?q={query}'
r = requests.get(endpoint.format(query=q))
try:
results = r.json()
except:
results = []
return jsonify({'results': results})
@app.route('/geotag/export/')
def exporter():
"""
Export additional census information for geotagged locations.
"""
tables = request.args.get('tables')
print tables
census_base = 'http://api.censusreporter.org/1.0/data/show/latest?table_ids={t}&geo_ids={g}'
q = db.session.execute('''SELECT geo_id
FROM location_data
WHERE geo_id IS NOT NULL''')
# hacky list of places that don't show up in the census reporter
ignore = {'31000US42060','31400US3562020764','31400US4790013644',
'31000US31100','31400US3562020764','31000US39100','31000US26180',
'31000US31100','33000US442','31000US43860'}
ids = [x[0] for x in q if x[0] not in ignore]
#############
# : HTTP GET is capped at 8kb requests, geo_ids are ~12 bytes each
# : Slightly conservative limit of 512 geo_ids per request
#############
n = 512
id_splits = [ids[i:i+n] for i in xrange(0, len(ids), n)]
results = []
for id_chunk in id_splits:
id_str = ','.join(id_chunk)
query_url = census_base.format(t=tables, g=id_str)
r = requests.get(query_url)
resp = r.json()
results.append(resp)
# TODO: merge results
return Response(json.dumps(results), mimetype='text/csv')
@app.route('/')
def geotagger():
"""
interactive geotagging home page
"""
q = db.session.execute('''SELECT loc_id, location, count, geo_name,
pop, lon, lat, discard
FROM location_data''')
trashed, ut, t = [], [], {}
for row in q:
rd = {k: v for k,v in row.items()}
if rd['discard']:
# filter out discarded place names
label = 'untagged' if not rd['pop'] else 'tagged'
trashed.append((rd['location'], rd['count'], rd['loc_id'], label))
elif not rd['pop']:
# place names without geolocation
ut.append((rd['location'], rd['count'], rd['loc_id']))
else:
# replace all single quotes
loc_stripped = re.sub("'", '', rd['geo_name'])
t[loc_stripped] = {
'count': rd['count'],
'ratio': float(rd['count'])/rd['pop'],
'lng': rd['lon'],
'lat': rd['lat'],
'id': rd['loc_id']
}
ut = sorted(ut, key=lambda x: x[1], reverse=True)
trashed = sorted(trashed, key=lambda x: x[1], reverse=True)
return render_template('geotag.html', untagged=ut, trashed=trashed, tagged=json.dumps(t))
@app.route('/geotag/discard/<loc_id>')
def geotag_discard(loc_id):
db.session.execute('''UPDATE location_data SET discard=true
WHERE loc_id=:ld''',
{'ld': loc_id})
db.session.commit()
# return blank page
return ''
@app.route('/geotag/update/', methods=['GET', 'POST'])
def geotag_update():
print request.form
loc_id = request.form['loc_id']
gid = request.form['geo_id']
name = request.form['geo_name']
pop = request.form['population']
lon = request.form['longitude']
lat = request.form['latitude']
db.session.execute('''UPDATE location_data SET geo_id=:gid,
geo_name=:name,
pop=:pop,
discard=false,
lat=:lat,
lon=:lon
WHERE loc_id=:ld''',
dict(gid=gid, name=name, pop=pop, ld=loc_id, lon=lon,
lat=lat))
db.session.commit()
# return blank page
return ''
@app.route('/rest/overview/suggest/<q>')
def autocomplete_name(q):
endpoint = 'http://api.censusreporter.org/1.0/geo/elasticsearch?q={query}'
r = requests.get(endpoint.format(query=q))
results = []
try:
d = r.json()['results']
for row in d:
results.append(
{
'name': row['display_name'],
'full_geoid': row['full_geoid'],
'population': row['population'],
'location': row['location']
})
except KeyError, IndexError:
pass
return jsonify({'results': results})
@app.route('/rest/overview/timeseries/')
def time_series():
"""
return time series data
"""
q = db.session.execute('SELECT * FROM temporal ORDER BY day')
result = []
for row in q:
rd = {k:v for k,v in row.items()}
r = {
'day': rd['day']*1000,
'count': rd['count']
}
result.append(r)
obj = {'results': result}
return jsonify(obj)
# TODO: move all the rest functions into a blueprint
@app.route('/rest/overview/locationtime/')
def location_time():
"""
return locationtime data
"""
resp = rds.get('locationtime')
if resp:
return Response(resp, status=200, mimetype='application/json')
q = db.session.execute('SELECT * FROM locationtime')
result = []
timeseries = []
current_location = 'unset'
r = {}
for row in q:
rd = {k:v for k,v in row.items()}
# construct the data structure
# it consists of a location, lat, lon,
# and a list of timeseries. Same locations
# are always adjacent in the database.
if current_location != rd['location']:
current_location = rd['location']
a = gl.parse(current_location)
city, state = a['place'] or '', a['state'] or ''
city_state = u'{} {}'.format(city, state)
place_data_str = rds.hget('_hits', city_state)
place_data = {}
if place_data_str:
place_data = ast.literal_eval(place_data_str)
if r:
r['timeseries'] = timeseries[:]
result.append(r.copy())
timeseries = []
r = {
'lat': str(rd['lat']),
'lon': str(rd['lon']),
'location': rd['location'],
'timeseries' : [],
'pop': place_data.get('population', None),
'display_name': place_data.get('display_name', None)
}
else:
timeseries.append({
'count': str(rd['count']),
'day': str(rd['day']*1000)
})
obj = json.dumps({'results': result})
rds.set('locationtime', obj)
return Response(obj, status=200, mimetype='application/json')
@app.route('/<path:path>')
def map(path):
return app.send_static_file(path)
# Generic proxy for OpenCPU endpoints
@app.route('/ocpu/<path:path>', methods=['GET', 'POST'])
def ocpu(path):
# OCPU is hosted on port 80, but this server might not be
endpoint = urlparse.urljoin(OPENCPUURL, 'ocpu/'+ path)
request_args = dict(
method = request.method,
url = endpoint,
allow_redirects=True,
# Required for OpenCPU
headers={'Content-Type': 'application/x-www-form-urlencoded'},
timeout=10000
)
if request.method == 'POST':
request_args['data'] = request.get_data()
resp = requests.request(**request_args)
return make_response(resp.content), resp.status_code
| giantoak/tempus | app/views.py | Python | mit | 11,432 |
import sublime
import sublime_plugin
from ... import utils
testbox_function_names = [
"describe",
"it",
"feature",
"scenario",
"story",
"given",
"when",
"then",
]
func_call_scope = "meta.function-call.cfml variable.function.cfml"
string_scope = "meta.function-call.cfml meta.function-call.parameters.cfml meta.string"
func_param_name_scope = "meta.function-call.cfml meta.function-call.parameters.cfml entity.other.function-parameter.cfml"
class TestboxSpecOutlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
# sanity check
if not self.view.match_selector(0, "embedding.cfml"):
return
self.outline = []
self.outline_regions = []
self.selected_index = 0, None
self.current_regions = [r for r in self.view.sel()]
self.viewport_position = self.view.viewport_position()
# collect title parameters
title_params = {}
for r in self.view.find_by_selector(func_param_name_scope):
if self.view.substr(r).lower() == "title":
title_params[r.begin()] = r
# collect strings
string_descriptions = {
r.begin(): r for r in self.view.find_by_selector(string_scope)
}
# build outline
for r in self.view.find_by_selector(func_call_scope):
func_name = self.view.substr(r).lower()
# continue if not a testbox func call
if func_name not in testbox_function_names:
continue
# find the `(` after the func name
next_pt = utils.get_next_character(self.view, r.end())
# find the next pt - could be string start or named title param
next_pt = utils.get_next_character(self.view, next_pt + 1)
# check for named title param
if next_pt in title_params:
# get `=`
next_pt = utils.get_next_character(
self.view, title_params[next_pt].end()
)
# look for title string
next_pt = utils.get_next_character(self.view, next_pt + 1)
if next_pt in string_descriptions:
# looks like a something we care about
string_region = string_descriptions[next_pt]
depth = (
self.view.scope_name(r.begin()).count("meta.function.body.cfml") - 1
)
indent = " " * depth
self.outline.append(
indent + func_name + ": " + self.view.substr(string_region)[1:-1]
)
self.outline_regions.append(string_region)
distance = self.distance(string_region)
if self.selected_index[1] is None or distance < self.selected_index[1]:
self.selected_index = len(self.outline_regions) - 1, distance
if len(self.outline) == 0:
return
self.view.window().show_quick_panel(
self.outline,
self.on_done,
selected_index=self.selected_index[0],
on_highlight=self.on_highlight,
)
def distance(self, region):
pt = self.current_regions[0].begin()
if region.contains(pt):
return 0
if pt < region.begin():
return region.begin() - pt
return pt - region.end()
def refresh(self):
# workaround the selection updates not being drawn
self.view.add_regions("force_refresh", [])
self.view.erase_regions("force_refresh")
def on_done(self, i):
if i == -1:
# nothing selected, reset
self.view.sel().clear()
self.view.sel().add_all(self.current_regions)
self.view.set_viewport_position(self.viewport_position)
def on_highlight(self, i):
self.view.sel().clear()
self.view.sel().add(self.outline_regions[i])
self.view.show_at_center(self.outline_regions[i])
self.refresh()
| jcberquist/sublimetext-cfml | src/plugins_/testbox/testbox_spec_outline.py | Python | mit | 4,033 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""Script to generate libyal m4 files."""
import argparse
import logging
import os
import sys
from yaldevtools import configuration
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Generates a m4 file for a libyal library.'))
argument_parser.add_argument(
'configuration_file', action='store', metavar='CONFIGURATION_FILE',
default='source.conf', help='The source generation configuration file.')
argument_parser.add_argument(
'-o', '--output', dest='output_directory', action='store',
metavar='OUTPUT_DIRECTORY', default=None,
help='path of the output files to write to.')
argument_parser.add_argument(
'-p', '--projects', dest='projects_directory', action='store',
metavar='PROJECTS_DIRECTORY', default=None,
help='path of the projects.')
options = argument_parser.parse_args()
if not options.configuration_file:
print('Configuration file missing.')
print('')
argument_parser.print_help()
print('')
return False
if not os.path.exists(options.configuration_file):
print('No such configuration file: {0:s}.'.format(
options.configuration_file))
print('')
return False
if options.output_directory and not os.path.exists(options.output_directory):
print('No such output directory: {0:s}.'.format(options.output_directory))
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
project_configuration = configuration.ProjectConfiguration()
project_configuration.ReadFromFile(options.configuration_file)
libyal_directory = os.path.abspath(__file__)
libyal_directory = os.path.dirname(libyal_directory)
libyal_directory = os.path.dirname(libyal_directory)
projects_directory = options.projects_directory
if not projects_directory:
projects_directory = os.path.dirname(libyal_directory)
# TODO: generate m4 file
return False
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| libyal/libyal | scripts/m4-generate.py | Python | apache-2.0 | 2,209 |
'''
Created on May 7, 2014
@author: cmills
'''
from client_test import TestTASRAppClient
import unittest
import tasr.registered_schema
import tasr.client_legacy
import copy
import httmock
APP = tasr.app.TASR_APP
APP.set_config_mode('local')
class TestTASRLegacyClientMethods(TestTASRAppClient):
def setUp(self):
super(TestTASRLegacyClientMethods, self).setUp()
self.event_type = "gold"
fix_rel_path = "schemas/%s.avsc" % (self.event_type)
self.avsc_file = self.get_fixture_file(fix_rel_path, "r")
self.schema_str = self.avsc_file.read()
# client settings
self.host = 'localhost' # should match netloc below
self.port = 8080 # should match netloc below
# clear out all the keys before beginning -- careful!
APP.ASR.redis.flushdb()
def tearDown(self):
# this clears out redis after each test -- careful!
APP.ASR.redis.flushdb()
########################################################################
# registration tests
########################################################################
def bare_register_schema_skeleton(self, schema_str):
'''register_schema_for_topic() - skeleton test'''
# whitespace gets normalized, so do that locally to the submitted
# schema string so we have an accurate target for comparison
ras = tasr.registered_schema.RegisteredAvroSchema()
ras.schema_str = schema_str
canonical_schema_str = ras.canonical_schema_str
with httmock.HTTMock(self.route_to_testapp):
func = tasr.client_legacy.register_schema
rs = func(self.event_type, schema_str, self.host, self.port)
self.assertEqual(canonical_schema_str, rs.schema_str,
'Schema string modified!')
self.assertIn(self.event_type, rs.group_names,
'Topic not in registered schema object.')
self.assertIn(self.event_type, rs.ts_dict.keys(),
'Topic not in registration timestamps.')
return rs
def test_bare_register_schema(self):
'''register_schema_for_topic() - as expected'''
self.bare_register_schema_skeleton(self.schema_str)
def test_bare_reg_fail_on_empty_schema(self):
'''register_schema_for_topic() - fail on empty schema'''
try:
self.bare_register_schema_skeleton(None)
self.fail('should have thrown a TASRError')
except tasr.client_legacy.TASRError as te:
self.assertTrue(te, 'Missing TASRError')
def test_bare_reg_fail_on_invalid_schema(self):
'''register_schema_for_topic() - fail on invalid schema'''
try:
bad_schema = '%s }' % self.schema_str
self.bare_register_schema_skeleton(bad_schema)
self.fail('should have thrown a ValueError')
except tasr.client_legacy.TASRError:
self.fail('should never have hit TASR, expected ValueError')
except ValueError:
pass
def test_bare_reg_and_rereg(self):
'''register_schema_for_topic() - multi calls, same schema'''
rs1 = self.bare_register_schema_skeleton(self.schema_str)
rs2 = self.bare_register_schema_skeleton(self.schema_str)
self.assertEqual(rs1, rs2, 'reg and rereg schemas unequal!')
########################################################################
# topic retrieval tests for TASR API
########################################################################
def test_bare_get_get_all_topics_with_none_present(self):
'''get_all_topics()'''
with httmock.HTTMock(self.route_to_testapp):
group_metas = tasr.client_legacy.get_all_topics(self.host, self.port)
self.assertEqual(0, len(group_metas), 'expected no groups')
def test_bare_get_get_all_topics_with_one_present(self):
'''get_all_topics()'''
self.bare_register_schema_skeleton(self.schema_str)
with httmock.HTTMock(self.route_to_testapp):
group_metas = tasr.client_legacy.get_all_topics(self.host, self.port)
self.assertListEqual(group_metas.keys(), [self.event_type, ],
'unexpected groups: %s' % group_metas.keys())
########################################################################
# schema retrieval tests for TASR API
########################################################################
def bare_get_for_id_str_skeleton(self, id_str):
with httmock.HTTMock(self.route_to_testapp):
func = tasr.client_legacy.schema_for_id_str
rs = func(id_str, self.host, self.port)
self.assertIn(id_str, (rs.sha256_id, rs.md5_id), 'ID missing')
return rs
def bare_get_for_topic_skeleton(self, topic, version):
with httmock.HTTMock(self.route_to_testapp):
func = tasr.client_legacy.get_schema_version
return func(topic, version, self.host, self.port)
def test_bare_reg_and_get_by_md5_id(self):
'''get_reg_schema_for_id_str() - with md5 ID'''
reg_rs = self.bare_register_schema_skeleton(self.schema_str)
get_rs = self.bare_get_for_id_str_skeleton(reg_rs.md5_id)
self.assertEqual(reg_rs, get_rs, 'got unexpected schema')
def test_bare_reg_and_get_by_sha256_id(self):
'''get_reg_schema_for_id_str() - with sha256 ID'''
reg_rs = self.bare_register_schema_skeleton(self.schema_str)
get_rs = self.bare_get_for_id_str_skeleton(reg_rs.sha256_id)
self.assertEqual(reg_rs, get_rs, 'got unexpected schema')
def test_bare_reg_and_get_non_existent_version(self):
'''get_reg_schema_for_topic() - bad version'''
reg_rs = self.bare_register_schema_skeleton(self.schema_str)
bad_ver = reg_rs.current_version(self.event_type) + 1
try:
self.bare_get_for_topic_skeleton(self.schema_str, bad_ver)
self.fail('Should have thrown an TASRError')
except tasr.client_legacy.TASRError as te:
self.assertTrue(te, 'Missing TASRError')
def test_bare_reg_50_and_get_by_version(self):
'''get_schema_for_topic() - multiple versions'''
schemas = []
for v in range(1, 50):
ver_schema_str = copy.copy(self.schema_str)
ver_schema_str = ver_schema_str.replace('tagged.events',
'tagged.events.%s' % v, 1)
# whitespace gets normalized, so do that locally to the submitted
# schema string so we have an accurate target for comparison
ras = tasr.registered_schema.RegisteredAvroSchema()
ras.schema_str = ver_schema_str
canonical_ver_schema_str = ras.canonical_schema_str
schemas.append(canonical_ver_schema_str)
# reg with the non-canonicalized schema string
rs = self.bare_register_schema_skeleton(ver_schema_str)
self.assertEqual(canonical_ver_schema_str, rs.schema_str,
'Schema string modified!')
self.assertIn(self.event_type, rs.group_names,
'Topic not in registered schema object.')
# now pull them by version and check they match what we sent originally
for v in range(1, 50):
rs = self.bare_get_for_topic_skeleton(self.event_type, v)
self.assertEqual(schemas[v - 1], rs.canonical_schema_str,
'Unexpected version.')
def test_bare_reg_regmod_reg_then_get_ver_1(self):
'''get_schema_for_topic() - non-sequential re-reg'''
alt_schema_str = copy.copy(self.schema_str)
alt_schema_str = alt_schema_str.replace('tagged.events',
'tagged.events.alt', 1)
rs1 = self.bare_register_schema_skeleton(self.schema_str)
self.bare_register_schema_skeleton(alt_schema_str)
rs3 = self.bare_register_schema_skeleton(self.schema_str)
self.assertEqual(3, rs3.current_version(self.event_type),
'unexpected version')
# now get version 1 -- should be same schema, and should list
# requested version as "current"
rs = self.bare_get_for_topic_skeleton(self.event_type, 1)
self.assertEqual(rs1.canonical_schema_str, rs.canonical_schema_str,
'Unexpected schema string change between v1 and v3.')
self.assertEqual(1, rs.current_version(self.event_type),
'Expected different current version value.')
if __name__ == "__main__":
LOADER = unittest.TestLoader()
SUITE = LOADER.loadTestsFromTestCase(TestTASRLegacyClientMethods)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| ifwe/tasr | test/pyunit/test_client_legacy_methods.py | Python | apache-2.0 | 8,858 |
# -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2017 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib import web_utils
from resources.lib import resolver_proxy
from resources.lib import download
from resources.lib.kodi_utils import get_kodi_version, get_selected_item_art, get_selected_item_label, get_selected_item_info
from resources.lib.menu_utils import item_post_treatment
import inputstreamhelper
import htmlement
import re
import json
import time
import urlquick
from kodi_six import xbmc
from kodi_six import xbmcgui
# Working for Python 2/3
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
# TO DO
# Add geoblock (info in JSON)
# Add Quality Mode
URL_EMISSIONS_AUVIO = 'https://www.rtbf.be/auvio/emissions'
URL_JSON_EMISSION_BY_ID = 'https://www.rtbf.be/api/media/video?' \
'method=getVideoListByEmissionOrdered&args[]=%s'
# emission_id
URL_CATEGORIES = 'https://www.rtbf.be/news/api/menu?site=media'
URL_SUB_CATEGORIES = 'https://www.rtbf.be/news/api/block?data[0][uuid]=%s&data[0][type]=widget&data[0][settings][id]=%s'
# data-uuid and part of data-uuid
URL_VIDEO_BY_ID = 'https://www.rtbf.be/auvio/embed/media?id=%s&autoplay=1'
# Video Id
URL_ROOT_IMAGE_RTBF = 'https://ds1.static.rtbf.be'
URL_JSON_LIVE = 'https://www.rtbf.be/api/partner/generic/live/' \
'planninglist?target_site=media&origin_site=media&category_id=0&' \
'start_date=&offset=0&limit=15&partner_key=%s&v=8'
URL_JSON_LIVE_CHANNEL = 'http://www.rtbf.be/api/partner/generic/live/' \
'planningcurrent?v=8&channel=%s&target_site=mediaz&partner_key=%s'
URL_LICENCE_KEY = 'https://wv-keyos.licensekeyserver.com/|%s|R{SSM}|'
URL_TOKEN = 'https://www.rtbf.be/api/partner/generic/drm/encauthxml?planning_id=%s&partner_key=%s'
# partener_key
URL_ROOT_LIVE = 'https://www.rtbf.be/auvio/direct#/'
def get_partener_key():
# Get partener key
resp = urlquick.get(URL_ROOT_LIVE, max_age=-1)
list_js_files = re.compile(
r'<script type="text\/javascript" src="(.*?)">').findall(resp.text)
# Brute force :)
partener_key_value = ''
for js_file in list_js_files:
resp2 = urlquick.get(js_file)
partener_key_datas = re.compile('partner_key: \'(.+?)\'').findall(
resp2.text)
if len(partener_key_datas) > 0:
partener_key_value = partener_key_datas[0]
break
# print 'partener_key_value : ' + partener_key_value
return partener_key_value
def format_hours(date, **kwargs):
"""Format hours"""
date_list = date.split('T')
date_hour = date_list[1][:5]
return date_hour
def format_day(date, **kwargs):
"""Format day"""
date_list = date.split('T')
date_dmy = date_list[0].replace('-', '/')
return date_dmy
@Route.register
def list_categories(plugin, item_id, **kwargs):
item = Listitem()
item.label = plugin.localize(30717)
item.set_callback(list_programs, item_id=item_id)
item_post_treatment(item)
yield item
resp = urlquick.get(URL_CATEGORIES)
json_parser = json.loads(resp.text)
for item_datas in json_parser["item"]:
if item_datas["@attributes"]["id"] == 'category':
for category_datas in item_datas["item"]:
if 'category-' in category_datas["@attributes"]["id"]:
category_title = category_datas["@attributes"]["name"]
category_url = category_datas["@attributes"]["url"]
item = Listitem()
item.label = category_title
item.set_callback(list_sub_categories,
item_id=item_id,
category_url=category_url)
item_post_treatment(item)
yield item
@Route.register
def list_programs(plugin, item_id, **kwargs):
resp = urlquick.get(URL_EMISSIONS_AUVIO)
root = resp.parse()
for program_datas in root.iterfind(
".//article[@class='rtbf-media-item rtbf-media-item--program-wide col-xxs-12 col-xs-6 col-md-4 col-lg-3 ']"
):
program_title = program_datas.find('.//a').get('title')
program_image = ''
list_program_image_datas = program_datas.find('.//img').get(
'data-srcset').split(' ')
for program_image_data in list_program_image_datas:
if 'jpg' in program_image_data:
if ',' in program_image_data:
program_image = program_image_data.split(',')[1]
else:
program_image = program_image_data
program_id = program_datas.get('data-id')
item = Listitem()
item.label = program_title
item.art['thumb'] = item.art['landscape'] = program_image
item.set_callback(list_videos_program,
item_id=item_id,
program_id=program_id)
item_post_treatment(item)
yield item
@Route.register
def list_videos_program(plugin, item_id, program_id, **kwargs):
resp = urlquick.get(URL_JSON_EMISSION_BY_ID % program_id)
json_parser = json.loads(resp.text)
for video_datas in json_parser['data']:
if video_datas["subtitle"]:
video_title = video_datas["title"] + ' - ' + video_datas["subtitle"]
else:
video_title = video_datas["title"]
video_image = URL_ROOT_IMAGE_RTBF + video_datas["thumbnail"][
"full_medium"]
video_plot = ''
if video_datas["description"]:
video_plot = video_datas["description"]
video_duration = video_datas["durations"]
date_value = time.strftime('%d-%m-%Y',
time.localtime(video_datas["liveFrom"]))
video_id = video_datas["id"]
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.info['plot'] = video_plot
item.info['duration'] = video_duration
item.info.date(date_value, '%d-%m-%Y')
item.set_callback(get_video_url,
item_id=item_id,
video_id=video_id)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
@Route.register
def list_sub_categories(plugin, item_id, category_url, **kwargs):
resp = urlquick.get(category_url)
root = resp.parse()
for sub_category_datas in root.iterfind(
".//section[@class='js-item-container']"):
if sub_category_datas.find('.//h2').text is not None:
sub_category_title = sub_category_datas.find('.//h2').text.strip()
else:
sub_category_title = sub_category_datas.find(
'.//h2/a').text.strip()
sub_category_id = sub_category_datas.get('id')
item = Listitem()
item.label = sub_category_title
item.set_callback(list_videos_sub_category,
item_id=item_id,
category_url=category_url,
sub_category_id=sub_category_id)
item_post_treatment(item)
yield item
list_data_uuid = re.compile(r'data-uuid\=\"(.*?)\"').findall(resp.text)
for sub_category_data_uuid in list_data_uuid:
resp2 = urlquick.get(
URL_SUB_CATEGORIES %
(sub_category_data_uuid, sub_category_data_uuid.split('-')[1]))
json_parser = json.loads(resp2.text)
if sub_category_data_uuid in json_parser["blocks"]:
parser = htmlement.HTMLement()
parser.feed(json_parser["blocks"][sub_category_data_uuid])
root_2 = parser.close()
for sub_category_dl_data in root_2.iterfind(
".//section[@class='js-item-container']"):
if sub_category_dl_data.find('.//h2').text is not None:
sub_category_dl_title = sub_category_dl_data.find(
'.//h2').text.strip()
else:
sub_category_dl_title = sub_category_dl_data.find(
'.//h2/a').text.strip()
sub_category_dl_id = sub_category_dl_data.get('id')
item = Listitem()
item.label = sub_category_dl_title
item.set_callback(
list_videos_sub_category_dl,
item_id=item_id,
sub_category_data_uuid=sub_category_data_uuid,
sub_category_id=sub_category_dl_id)
item_post_treatment(item)
yield item
@Route.register
def list_videos_sub_category(plugin, item_id, category_url, sub_category_id,
**kwargs):
resp = urlquick.get(category_url)
root = resp.parse()
for sub_category_datas in root.iterfind(
".//section[@class='js-item-container']"):
if sub_category_datas.get('id') == sub_category_id:
list_videos_datas = sub_category_datas.findall('.//article')
for video_datas in list_videos_datas:
if video_datas.get('data-card') is not None:
json_parser = json.loads(video_datas.get('data-card'))
if json_parser["isVideo"]:
if "mediaId" in json_parser:
video_title = json_parser["title"] + ' - ' + json_parser["subtitle"]
video_image = json_parser["illustration"]["format1248"]
video_id = json_parser["mediaId"]
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_id=video_id)
item_post_treatment(item,
is_playable=True,
is_downloadable=True)
yield item
@Route.register
def list_videos_sub_category_dl(plugin, item_id, sub_category_data_uuid,
sub_category_id, **kwargs):
resp = urlquick.get(
URL_SUB_CATEGORIES %
(sub_category_data_uuid, sub_category_data_uuid.split('-')[1]))
json_parser = json.loads(resp.text)
parser = htmlement.HTMLement()
parser.feed(json_parser["blocks"][sub_category_data_uuid])
root = parser.close()
for sub_category_dl_datas in root.iterfind(
".//section[@class='js-item-container']"):
if sub_category_dl_datas.get('id') == sub_category_id:
list_videos_datas = sub_category_dl_datas.findall('.//article')
for video_datas in list_videos_datas:
if video_datas.get('data-card') is not None:
data_card = video_datas.get('data-card')
if data_card:
json_parser = json.loads(data_card)
if json_parser["isVideo"]:
if "mediaId" in json_parser:
video_title = json_parser["title"] + ' - ' + json_parser["subtitle"]
video_image = json_parser["illustration"]["format1248"]
video_id = json_parser["mediaId"]
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_id=video_id)
item_post_treatment(item,
is_playable=True,
is_downloadable=True)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_id,
download_mode=False,
**kwargs):
resp = urlquick.get(URL_VIDEO_BY_ID % video_id, max_age=-1)
json_parser = json.loads(
re.compile('data-media=\"(.*?)\"').findall(resp.text)[0].replace(
'"', '"'))
if json_parser["urlHls"] is None:
if 'youtube.com' in json_parser["url"]:
video_id = json_parser["url"].rsplit('/', 1)[1]
return resolver_proxy.get_stream_youtube(plugin, video_id,
download_mode)
else:
return json_parser["url"]
else:
stream_url = json_parser["urlHls"]
if 'drm' in stream_url:
stream_url = json_parser["urlHlsAes128"]
if download_mode:
return download.download_video(stream_url)
return stream_url
@Resolver.register
def set_live_url(plugin, item_id, **kwargs):
resp = urlquick.get(URL_JSON_LIVE_CHANNEL % (item_id, get_partener_key()), max_age=-1)
json_parser = json.loads(resp.text)
if "url_streaming" in json_parser:
if 'url_dash' in json_parser["url_streaming"]:
live_url = json_parser["url_streaming"]["url_dash"]
live_id = json_parser["id"]
is_drm = True
else:
live_url = json_parser["url_streaming"]["url_hls"]
live_id = json_parser["id"]
is_drm = False
live_channel_title = json_parser["channel"]["label"]
# start_time_value = format_hours(json_parser["start_date"])
# end_time_value = format_hours(json_parser["end_date"])
# date_value = format_day(json_parser["start_date"])
live_title = live_channel_title + " - " + json_parser["title"]
if json_parser['subtitle']:
live_title += " - " + json_parser['subtitle']
live_plot = json_parser["description"]
live_image = json_parser["images"]["illustration"]["16x9"]["1248x702"]
item = Listitem()
item.label = live_title
item.art['thumb'] = item.art['landscape'] = live_image
item.info['plot'] = live_plot
item.set_callback(get_live_url, item_id=item_id, live_url=live_url, is_drm=is_drm, live_id=live_id)
item_post_treatment(item, is_playable=True)
yield item
@Route.register
def list_lives(plugin, item_id, **kwargs):
resp = urlquick.get(URL_JSON_LIVE % (get_partener_key()), max_age=-1)
json_parser = json.loads(resp.text)
for live_datas in json_parser:
if "url_streaming" in live_datas:
# check if we can add prochainnement if stream is not present
if 'url_dash' in live_datas["url_streaming"]:
live_url = live_datas["url_streaming"]["url_dash"]
live_id = live_datas["id"]
is_drm = True
else:
live_url = live_datas["url_streaming"]["url_hls"]
live_id = live_datas["id"]
is_drm = False
if type(live_datas["channel"]) is dict:
live_channel_title = live_datas["channel"]["label"]
else:
live_channel_title = 'Exclu Auvio'
if live_channel_title in ['La Une', 'La Deux', 'La Trois']:
continue
start_time_value = format_hours(live_datas["start_date"])
end_time_value = format_hours(live_datas["end_date"])
date_value = format_day(live_datas["start_date"])
live_title = live_channel_title + " - " + live_datas["title"]
if live_datas['subtitle']:
live_title += " - " + live_datas['subtitle']
live_plot = 'Début le %s à %s (CET)' % (date_value, start_time_value) + \
'\n\r' + 'Fin le %s à %s (CET)' % (date_value, end_time_value) + '\n\r' + \
'Accessibilité: ' + live_datas["geolock"]["title"] + '\n\r' + \
live_datas["description"]
live_image = live_datas["images"]["illustration"]["16x9"]["1248x702"]
item = Listitem()
item.label = live_title
item.art['thumb'] = item.art['landscape'] = live_image
item.info['plot'] = live_plot
# commented this line because othrewie sorting is made by date and then by title
# and doesn't help to find the direct
# item.info.date(date_time_value, '%Y/%m/%d')
item.set_callback(get_live_url, item_id=item_id, live_url=live_url, is_drm=is_drm, live_id=live_id)
item_post_treatment(item, is_playable=True)
yield item
@Resolver.register
def get_live_url(plugin, item_id, live_url, is_drm, live_id, **kwargs):
if is_drm:
if get_kodi_version() < 18:
xbmcgui.Dialog().ok('Info', plugin.localize(30602))
return False
is_helper = inputstreamhelper.Helper('mpd', drm='widevine')
if not is_helper.check_inputstream():
return False
token_url = URL_TOKEN % (live_id, get_partener_key())
token_value = urlquick.get(token_url, max_age=-1)
json_parser_token = json.loads(token_value.text)
item = Listitem()
item.path = live_url
item.property['inputstreamaddon'] = 'inputstream.adaptive'
item.property['inputstream.adaptive.manifest_type'] = 'mpd'
item.property[
'inputstream.adaptive.license_type'] = 'com.widevine.alpha'
headers2 = {
'customdata':
json_parser_token["auth_encoded_xml"],
}
item.property[
'inputstream.adaptive.license_key'] = URL_LICENCE_KEY % urlencode(headers2)
item.property['inputstream.adaptive.manifest_update_parameter'] = 'full'
item.label = get_selected_item_label()
item.art.update(get_selected_item_art())
item.info.update(get_selected_item_info())
return item
else:
return live_url
| SylvainCecchetto/plugin.video.catchuptvandmore | plugin.video.catchuptvandmore/resources/lib/channels/be/rtbf.py | Python | gpl-2.0 | 19,074 |
'''
neurosynth: plugin for the deepdive python package
functions for working with neurosynth database
'''
from neurosynth.base.dataset import Dataset
from neurosynth.base.dataset import FeatureTable
from wordfish.plugins.pubmed.functions import get_articles
from neurosynth.analysis import meta
from scipy.stats import pearsonr
from wordfish.vm import download_repo
from wordfish.utils import untar
from nibabel import nifti1
import nibabel as nb
import urllib2
import pandas
import pickle
import numpy
import re
import os
import sys
# IMPORTS FOR ALL PLUGINS
from wordfish.corpus import save_sentences
from wordfish.terms import save_terms
from wordfish.terms import save_relations
from wordfish.plugin import generate_job
from wordfish.utils import wordfish_home
home = wordfish_home()
# REQUIRED WORDFISH FUNCTION
def go_fish():
f,d = download_data()
features = pandas.read_csv(f,sep="\t")
database = pandas.read_csv(d,sep="\t")
pmids = database.id.unique().tolist()
print "NeuroSynth database has %s unique PMIDs" %(len(pmids))
# Generate brain maps to extract relationships with
terms = features.columns.tolist()
terms.pop(0) #pmid
maps_dir = "%s/terms/neurosynth/maps" %(home)
if not os.path.exists(maps_dir):
os.mkdir(maps_dir)
# jobs to download abstract texts
generate_job(func="generate_maps",inputs={"terms":terms},category="terms",batch_num=100)
generate_job(func="extract_text",category="corpus",inputs={"pmids":pmids},batch_num=100)
generate_job(func="extract_terms",category="terms")
generate_job(func="extract_relations",inputs={"terms":terms,"maps_dir":maps_dir},category="relations",batch_num=100)
# USER FUNCTIONS
def extract_text(pmids,output_dir):
email="wordfish@stanford.edu"
print "Downloading %s pubmed articles!" %(len(pmids))
try:
articles = get_articles(pmids,email)
except urllib2.URLError, e:
print "URLError: %e, There is a problem with your internet connection." %(e)
f,d = download_data()
features = pandas.read_csv(f,sep="\t")
# Prepare dictionary with key [pmid] and value [text]
features.index = features.pmid
features = features.drop("pmid",axis=1)
corpus_input = dict()
for pmid,article in articles.iteritems():
# Label the article with nonzero values
try:
labels = features.columns[features.loc[int(pmid)]!=0].tolist()
corpus_input[pmid] = {"text":article.getAbstract(),"labels":labels}
except:
pass
# Save articles to text files in output folder
save_sentences(corpus_input,output_dir=output_dir)
def extract_terms(output_dir):
f,d = download_data()
features = pandas.read_csv(f,sep="\t")
terms = features.columns.tolist()
terms.pop(0) #pmid
save_terms(terms,output_dir)
def generate_maps(terms,output_dir):
f,d = download_data()
features = pandas.read_csv(f,sep="\t")
database = pandas.read_csv(d,sep="\t")
output_dir = "%s/maps" %(output_dir)
print "Deriving pickled maps to extract relationships from..."
dataset = Dataset(d)
dataset.add_features(f)
for t in range(len(terms)):
term = terms[t]
print "Generating P(term|activation) for term %s, %s of %s" %(term,t,len(terms))
ids = dataset.get_ids_by_features(term)
maps = meta.MetaAnalysis(dataset,ids)
term_name = term.replace(" ","_")
pickle.dump(maps.images["pFgA_z"],open("%s/%s_pFgA_z.pkl" %(output_dir,term_name),"wb"))
def extract_relations(terms,maps_dir,output_dir):
if isinstance(terms,str):
terms = [terms]
f,d = download_data()
features = pandas.read_csv(f,sep="\t")
database = pandas.read_csv(d,sep="\t")
allterms = features.columns.tolist()
allterms.pop(0) #pmid
dataset = Dataset(d)
dataset.add_features(f)
image_matrix = pandas.DataFrame(columns=range(228453))
for t in range(len(allterms)):
term = allterms[t]
term_name = term.replace(" ","_")
pickled_map = "%s/%s_pFgA_z.pkl" %(maps_dir,term_name)
if not os.path.exists(pickled_map):
print "Generating P(term|activation) for term %s" %(term)
ids = dataset.get_ids_by_features(term)
maps = meta.MetaAnalysis(dataset,ids)
pickle.dump(maps.images["pFgA_z"],open(pickled_map,"wb"))
map_data = pickle.load(open(pickled_map,"rb"))
image_matrix.loc[term] = map_data
sims = pandas.DataFrame(columns=image_matrix.index)
tuples = []
for t1 in range(len(terms)):
term1 = terms[t1]
print "Extracting NeuroSynth relationships for term %s..." %(term1)
for t2 in range(len(terms)):
term2 = terms[t2]
if t1<t2:
score = pearsonr(image_matrix.loc[term1],image_matrix.loc[term2])[0]
tuples.append((term1,term2,score))
save_relations(output_dir=output_dir,relations=tuples)
def download_data(destination=None):
'''download_data
download neurosynth repo data to a temporary or specified destination
return path to features and database files
Parameters
==========
destination: path
full path to download destination. If none, will use temporary directory
Returns
=======
database,features: paths
full paths to database and features files
'''
print "Downloading neurosynth database..."
if destination==None:
destination = download_repo(repo_url="https://github.com/neurosynth/neurosynth-data")
else:
download_repo(repo_url="https://github.com/neurosynth/neurosynth-data",tmpdir=destination)
untar("%s/current_data.tar.gz" %(destination),destination)
features = "%s/features.txt" %(destination)
database = "%s/database.txt" %(destination)
return features,database
| word-fish/wordfish-plugins | neurosynth/functions.py | Python | mit | 5,917 |
#
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# <pep8 compliant>
from bl_operators.presets import AddPresetBase
from bpy.types import Operator
class AddPresetIntegrator(AddPresetBase, Operator):
'''Add an Integrator Preset'''
bl_idname = "render.cycles_integrator_preset_add"
bl_label = "Add Integrator Preset"
preset_menu = "CYCLES_MT_integrator_presets"
preset_defines = [
"cycles = bpy.context.scene.cycles"
]
preset_values = [
"cycles.max_bounces",
"cycles.min_bounces",
"cycles.diffuse_bounces",
"cycles.glossy_bounces",
"cycles.transmission_bounces",
"cycles.volume_bounces",
"cycles.transparent_min_bounces",
"cycles.transparent_max_bounces",
"cycles.use_transparent_shadows",
"cycles.caustics_reflective",
"cycles.caustics_refractive",
"cycles.blur_glossy"
]
preset_subdir = "cycles/integrator"
class AddPresetSampling(AddPresetBase, Operator):
'''Add a Sampling Preset'''
bl_idname = "render.cycles_sampling_preset_add"
bl_label = "Add Sampling Preset"
preset_menu = "CYCLES_MT_sampling_presets"
preset_defines = [
"cycles = bpy.context.scene.cycles"
]
preset_values = [
"cycles.samples",
"cycles.preview_samples",
"cycles.aa_samples",
"cycles.preview_aa_samples",
"cycles.diffuse_samples",
"cycles.glossy_samples",
"cycles.transmission_samples",
"cycles.ao_samples",
"cycles.mesh_light_samples",
"cycles.subsurface_samples",
"cycles.volume_samples",
"cycles.use_square_samples",
"cycles.progressive",
"cycles.seed",
"cycles.sample_clamp_direct",
"cycles.sample_clamp_indirect",
"cycles.sample_all_lights_direct",
"cycles.sample_all_lights_indirect",
]
preset_subdir = "cycles/sampling"
def register():
pass
def unregister():
pass
if __name__ == "__main__":
register()
| Microvellum/Fluid-Designer | win64-vc/2.78/scripts/addons/cycles/presets.py | Python | gpl-3.0 | 2,576 |
import pytz
from datetime import datetime
from catalyst.api import symbol
from catalyst.utils.run_algo import run_algorithm
coin = 'btc'
quote_currency = 'usd'
def initialize(context):
context.symbol = symbol('%s_%s' % (coin, quote_currency))
def handle_data_polo_partial_candles(context, data):
history = data.history(symbol('btc_usdt'), ['volume'],
bar_count=10,
frequency='1D')
print('\nnow: %s\n%s' % (data.current_dt, history))
if not hasattr(context, 'i'):
context.i = 0
context.i += 1
if context.i > 5:
raise Exception('stop')
run_algorithm(initialize=lambda ctx: True,
handle_data=handle_data_polo_partial_candles,
exchange_name='poloniex',
quote_currency='usdt',
algo_namespace='ns',
live=False,
data_frequency='minute',
capital_base=3000,
start=datetime(2018, 2, 2, 0, 0, 0, 0, pytz.utc),
end=datetime(2018, 2, 20, 0, 0, 0, 0, pytz.utc))
| enigmampc/catalyst | catalyst/support/issue_274.py | Python | apache-2.0 | 1,075 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('comercial', '0008_auto_20141023_1202'),
]
operations = [
migrations.AddField(
model_name='propostacomercial',
name='tipos',
field=models.ManyToManyField(related_name=b'proposta_por_tipos_set', null=True, to='comercial.TipoDeProposta', blank=True),
preserve_default=True,
),
]
| dudanogueira/microerp | microerp/comercial/migrations/0009_propostacomercial_tipos.py | Python | lgpl-3.0 | 532 |
from enigma import eConsoleAppContainer
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.ScrollLabel import ScrollLabel
from Components.Sources.StaticText import StaticText
class Console(Screen):
def __init__(self, session, title = "Console", cmdlist = None, finishedCallback = None, closeOnSuccess = False):
Screen.__init__(self, session)
self.finishedCallback = finishedCallback
self.closeOnSuccess = closeOnSuccess
self.errorOcurred = False
self["text"] = ScrollLabel("")
self["summary_description"] = StaticText("")
self["actions"] = ActionMap(["WizardActions", "DirectionActions"],
{
"ok": self.cancel,
"back": self.cancel,
"up": self["text"].pageUp,
"down": self["text"].pageDown
}, -1)
self.cmdlist = cmdlist
self.newtitle = title
self.onShown.append(self.updateTitle)
self.container = eConsoleAppContainer()
self.run = 0
self.container.appClosed.append(self.runFinished)
self.container.dataAvail.append(self.dataAvail)
self.onLayoutFinish.append(self.startRun) # dont start before gui is finished
def updateTitle(self):
self.setTitle(self.newtitle)
def startRun(self):
self["text"].setText(_("Execution progress:") + "\n\n")
self["summary_description"].setText(_("Execution progress:"))
print "Console: executing in run", self.run, " the command:", self.cmdlist[self.run]
if self.container.execute(self.cmdlist[self.run]): #start of container application failed...
self.runFinished(-1) # so we must call runFinished manual
def runFinished(self, retval):
if retval:
self.errorOcurred = True
self.run += 1
if self.run != len(self.cmdlist):
if self.container.execute(self.cmdlist[self.run]): #start of container application failed...
self.runFinished(-1) # so we must call runFinished manual
else:
lastpage = self["text"].isAtLastPage()
str = self["text"].getText()
str += _("Execution finished!!")
self["summary_description"].setText(_("Execution finished!!"))
self["text"].setText(str)
if lastpage:
self["text"].lastPage()
if self.finishedCallback is not None:
self.finishedCallback()
if not self.errorOcurred and self.closeOnSuccess:
self.cancel()
def cancel(self):
if self.run == len(self.cmdlist):
self.close()
self.container.appClosed.remove(self.runFinished)
self.container.dataAvail.remove(self.dataAvail)
def dataAvail(self, str):
lastpage = self["text"].isAtLastPage()
self["text"].setText(self["text"].getText() + str)
if lastpage:
self["text"].lastPage()
| sklnet/opendroid-enigma2 | lib/python/Screens/Console.py | Python | gpl-2.0 | 2,566 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.sosreport import main
import sys
main(sys.argv[1:])
| beagles/sosreport-neutron | __run__.py | Python | gpl-2.0 | 744 |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
from django.core import management
from django.core.management.base import CommandError
from django.utils.six import StringIO
from django.test import override_settings
from mock import patch
from ci import models, TimeUtils
from ci.tests import DBTester, utils
import json
from requests_oauthlib import OAuth2Session
from datetime import timedelta
@override_settings(INSTALLED_GITSERVERS=[utils.github_config()])
class Tests(DBTester.DBTester):
def setUp(self):
super(Tests, self).setUp()
self.create_default_recipes()
def _split_output(self, out):
tmp = out.getvalue().split("-"*50)
tmp = [ t.strip() for t in tmp]
print(tmp)
return tmp
@patch.object(OAuth2Session, 'get')
def test_sync_open_prs(self, mock_get):
r = models.Recipe.objects.first()
repo = r.repository
repo.active = True
repo.save()
pr = utils.create_pr(title="TESTPR")
pr.closed = False
pr.save()
pr0 = {"number": pr.number, "title": "PR 1", "html_url": "first_url" }
pr1 = {"number": pr.number + 1, "title": "PR 2", "html_url": "second_url" }
mock_get.return_value = utils.Response([pr1])
# A PR with recipe but its repository isn't active
out = StringIO()
management.call_command("sync_open_prs", stdout=out)
self.assertEqual('', self._split_output(out)[0])
pr.repository = repo
pr.save()
# A PR with a good repo, should be closed
out = StringIO()
management.call_command("sync_open_prs", stdout=out)
self.assertIn(pr.title, out.getvalue())
self.assertIn(str(pr.number), out.getvalue())
self.assertIn(str(pr.repository), out.getvalue())
pr.refresh_from_db()
self.assertEqual(pr.closed, True)
# Try to sync a specific repository that exists
out = StringIO()
pr.closed = False
pr.save()
management.call_command("sync_open_prs", "--dryrun", "--repo", str(repo), stdout=out)
self.assertIn(pr.title, out.getvalue())
self.assertIn(str(pr.number), out.getvalue())
self.assertIn(str(pr.repository), out.getvalue())
pr.refresh_from_db()
self.assertEqual(pr.closed, False)
# Try to sync a specific repository that exists
out = StringIO()
management.call_command("sync_open_prs", "--repo", str(repo), stdout=out)
self.assertIn(pr.title, out.getvalue())
self.assertIn(str(pr.number), out.getvalue())
self.assertIn(str(pr.repository), out.getvalue())
pr.refresh_from_db()
self.assertEqual(pr.closed, True)
# Make sure dry run doesn't change anything
out = StringIO()
pr.closed = False
pr.save()
management.call_command("sync_open_prs", "--dryrun", stdout=out)
self.assertIn(pr.title, out.getvalue())
self.assertIn(str(pr.number), out.getvalue())
self.assertIn(str(pr.repository), out.getvalue())
pr.refresh_from_db()
self.assertEqual(pr.closed, False)
mock_get.return_value = utils.Response([pr0, pr1])
# Server has other PRs that CIVET doesn't have
out = StringIO()
management.call_command("sync_open_prs", "--dryrun", stdout=out)
self.assertNotIn(pr.title, out.getvalue())
self.assertNotIn("#%s" % pr.number, out.getvalue())
self.assertIn("PRs open on server but not open on CIVET", out.getvalue())
self.assertIn("PR 2", out.getvalue())
self.assertIn("second_url", out.getvalue())
self.assertIn("#%s" % (pr.number+1), out.getvalue())
pr.refresh_from_db()
self.assertEqual(pr.closed, False)
# Try to sync a specific repository that doesn't exist
out = StringIO()
management.call_command("sync_open_prs", "--dryrun", "--repo", "foo/bar", stdout=out)
self.assertEqual("", out.getvalue())
# If the git server encounters an error then it shouldn't do anything
mock_get.return_value = utils.Response(status_code=404)
out = StringIO()
management.call_command("sync_open_prs", stdout=out)
self.assertIn("Error getting open PRs for %s" % repo, out.getvalue())
pr.refresh_from_db()
self.assertEqual(pr.closed, False)
def test_dump_latest(self):
out = StringIO()
management.call_command("dump_latest", stdout=out)
self.assertIn("Dumping 0 events", out.getvalue())
ev = utils.create_event()
management.call_command("dump_latest", stdout=out)
self.assertIn("Dumping 1 events", out.getvalue())
with open("out.json", "r") as f:
data = f.read()
out = json.loads(data)
count = 0
for entry in out:
if entry["model"] == "ci.event":
self.assertEqual(ev.pk, entry["pk"])
count = 1
self.assertEqual(count, 1)
def test_disable_repo(self):
out = StringIO()
with self.assertRaises(CommandError):
management.call_command("disable_repo", "--dry-run", stdout=out)
with self.assertRaises(CommandError):
management.call_command("disable_repo", "--dry-run", "--owner", "foo", stdout=out)
repo = utils.create_repo()
with self.assertRaises(CommandError):
management.call_command("disable_repo", "--dry-run", "--owner", repo.user.name, "--repo", "<repo>", stdout=out)
repo.active = True
repo.save()
branch = utils.create_branch(repo=repo)
branch.status = models.JobStatus.SUCCESS
branch.save()
pr = utils.create_pr(repo=repo)
pr.closed = False
pr.save()
management.call_command("disable_repo", "--dry-run", "--owner", repo.user.name, "--repo", repo.name, stdout=out)
repo.refresh_from_db()
self.assertIs(repo.active, True)
branch.refresh_from_db()
self.assertEqual(branch.status, models.JobStatus.SUCCESS)
pr.refresh_from_db()
self.assertIs(pr.closed, False)
management.call_command("disable_repo", "--owner", repo.user.name, "--repo", repo.name, stdout=out)
repo.refresh_from_db()
self.assertIs(repo.active, False)
branch.refresh_from_db()
self.assertEqual(branch.status, models.JobStatus.NOT_STARTED)
pr.refresh_from_db()
self.assertIs(pr.closed, True)
def test_load_recipes(self):
with utils.RecipeDir():
management.call_command("load_recipes", "--install-webhooks")
@patch.object(OAuth2Session, 'get')
def test_user_access(self, mock_get):
out = StringIO()
mock_get.return_value = utils.Response(status_code=404)
with self.assertRaises(CommandError):
management.call_command("user_access", stdout=out)
with self.assertRaises(models.GitUser.DoesNotExist):
management.call_command("user_access", "--master", "nobody", stdout=out)
with self.assertRaises(CommandError):
management.call_command("user_access", "--master", self.owner.name, stdout=out)
out = StringIO()
management.call_command("user_access", "--master", self.build_user.name, stdout=out)
repo1 = {'name': 'repo1', 'owner': {'login': 'owner'} }
repo2 = {'name': 'repo2', 'owner': {'login': 'owner'} }
mock_get.side_effect = [utils.Response([repo1]), utils.Response([repo2])]
out = StringIO()
management.call_command("user_access", "--master", self.build_user.name, "--user", "owner", stdout=out)
@patch.object(OAuth2Session, 'get')
@patch.object(OAuth2Session, 'post')
def test_cancel_old_jobs(self, mock_post, mock_get):
out = StringIO()
with self.assertRaises(CommandError):
management.call_command("cancel_old_jobs", stdout=out)
out = StringIO()
self.set_counts()
management.call_command("cancel_old_jobs", "--dryrun", "--days", "1", stdout=out)
self.compare_counts()
self.assertIn("No jobs to cancel", out.getvalue())
j = utils.create_job()
created = TimeUtils.get_local_time() - timedelta(days=2)
utils.update_job(j, ready=True, active=True, status=models.JobStatus.NOT_STARTED, created=created, complete=False)
# Make sure dryrun doesn't change anything
out = StringIO()
self.set_counts()
management.call_command("cancel_old_jobs", "--dryrun", "--days", "1", stdout=out)
self.compare_counts()
self.assertIn(str(j), out.getvalue())
j.refresh_from_db()
self.assertEqual(j.status, models.JobStatus.NOT_STARTED)
# Should update the job and event status
out = StringIO()
self.set_counts()
management.call_command("cancel_old_jobs", "--days", "1", stdout=out)
self.compare_counts(active_branches=1,
canceled=1,
events_canceled=1,
num_changelog=1,
num_events_completed=1,
num_jobs_completed=1)
self.assertIn(str(j), out.getvalue())
j.refresh_from_db()
j.event.refresh_from_db()
self.assertTrue(j.complete)
self.assertEqual(j.status, models.JobStatus.CANCELED)
self.assertEqual(j.event.status, models.JobStatus.CANCELED)
self.assertTrue(j.event.complete)
# Should not change anything since it isn't old enough
utils.update_job(j, status=models.JobStatus.NOT_STARTED, complete=False)
out = StringIO()
self.set_counts()
management.call_command("cancel_old_jobs", "--days", "3", stdout=out)
self.compare_counts()
self.assertIn("No jobs to cancel", out.getvalue())
self.assertNotIn(str(j), out.getvalue())
j.refresh_from_db()
self.assertEqual(j.status, models.JobStatus.NOT_STARTED)
# Should update the job and event status
created = TimeUtils.get_local_time() - timedelta(hours=2)
utils.update_job(j, status=models.JobStatus.NOT_STARTED, complete=False, created=created)
out = StringIO()
self.set_counts()
management.call_command("cancel_old_jobs", "--hours", "1", stdout=out)
self.compare_counts(canceled=1, num_changelog=1, num_jobs_completed=1)
self.assertIn(str(j), out.getvalue())
j.refresh_from_db()
self.assertEqual(j.status, models.JobStatus.CANCELED)
# Should not change anything since it isn't old enough
utils.update_job(j, status=models.JobStatus.NOT_STARTED, complete=False, created=created)
out = StringIO()
self.set_counts()
management.call_command("cancel_old_jobs", "--hours", "3", stdout=out)
self.compare_counts()
self.assertIn("No jobs to cancel", out.getvalue())
self.assertNotIn(str(j), out.getvalue())
j.refresh_from_db()
self.assertEqual(j.status, models.JobStatus.NOT_STARTED)
# Make sure setting allowed to fail works
utils.update_job(j, status=models.JobStatus.NOT_STARTED, complete=False, created=created)
out = StringIO()
self.set_counts()
management.call_command("cancel_old_jobs", "--hours", "1", "--allowed-fail", stdout=out)
self.compare_counts(events_canceled=-1, num_changelog=1, num_jobs_completed=1)
self.assertIn(str(j), out.getvalue())
j.refresh_from_db()
self.assertEqual(j.status, models.JobStatus.FAILED_OK)
# Check the --client-runner-user option only accepts <host>:<user> syntax
utils.update_job(j, status=models.JobStatus.NOT_STARTED, complete=False, created=created)
out = StringIO()
self.set_counts()
with self.assertRaises(CommandError):
management.call_command("cancel_old_jobs", "--hours", "1", '--client-runner-user', 'foo', stdout=out)
self.compare_counts()
# Valid --client-runner-user
self.set_counts()
management.call_command("cancel_old_jobs",
"--hours",
"1",
'--client-runner-user',
"%s:%s" % (j.recipe.build_user.server.name, j.recipe.build_user.name),
stdout=out)
self.compare_counts(canceled=1, num_changelog=1, num_jobs_completed=1, events_canceled=1)
# --client-runner-user with no jobs
utils.update_job(j, status=models.JobStatus.NOT_STARTED, complete=False, created=created)
other_user = utils.create_user(name="other_user")
self.set_counts()
management.call_command("cancel_old_jobs",
"--hours",
"1",
'--client-runner-user',
"%s:%s" % (other_user.server.name, other_user.name),
stdout=out)
self.compare_counts()
def test_sync_badges(self):
# Nothing configured
out = StringIO()
with self.settings(INSTALLED_GITSERVERS=[utils.github_config(repo_settings={"owner/repo": {}})]):
self.set_counts()
management.call_command("sync_badges", stdout=out)
self.compare_counts()
with self.settings(INSTALLED_GITSERVERS=[utils.github_config(repo_settings={"owner/repo":
{"badges": [{"recipe": "foo", "name": "badge"}]}})]):
# Does not match any recipes
self.set_counts()
management.call_command("sync_badges", stdout=out)
self.compare_counts()
# Match but no jobs
r = models.Recipe.objects.first()
r.filename = "foo"
r.save()
self.set_counts()
management.call_command("sync_badges", stdout=out)
self.compare_counts()
j = utils.create_job(recipe=r, )
utils.update_job(j, status=models.JobStatus.FAILED_OK)
j.event.cause = models.Event.PUSH
j.event.save()
j.save()
# Should create a new badge
self.set_counts()
management.call_command("sync_badges", "--dryrun", stdout=out)
self.compare_counts()
self.set_counts()
management.call_command("sync_badges", stdout=out)
self.compare_counts(badges=1)
# doing it again shouldn't change anything
self.set_counts()
management.call_command("sync_badges", stdout=out)
self.compare_counts()
# Now it should delete the one we just created since it no longer matches
r.filename = "bar"
r.save()
self.set_counts()
management.call_command("sync_badges", "--dryrun", stdout=out)
self.compare_counts()
self.set_counts()
management.call_command("sync_badges", stdout=out)
self.compare_counts(badges=-1)
| idaholab/civet | ci/tests/test_commands.py | Python | apache-2.0 | 15,609 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-24 08:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170721_1851'),
]
operations = [
migrations.AddField(
model_name='partida',
name='rodada',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| schiller/cartolafc | core/migrations/0005_partida_rodada.py | Python | mit | 482 |
#!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Software construction toolkit target management for SCons."""
import __builtin__
import SCons.Script
# Dict of target groups (TargetGroup indexed by group name)
__target_groups = {}
# Dict of targets (Target indexed by target name)
__targets = {}
# Dict of target modes (TargetMode indexed by mode name)
__target_modes = {}
#------------------------------------------------------------------------------
class TargetGroup(object):
"""Target group, as used by AddTargetGroup() and GetTargetGroups()."""
def __init__(self, name, description):
"""Initializes the target group.
Args:
name: Name of the target group.
description: Description of group.
"""
self.name = name
self.description = description
def GetTargetNames(self):
"""Returns a list of target name strings for the group."""
items = map(str, SCons.Script.Alias(self.name)[0].sources)
# Remove duplicates from multiple environments
return list(set(items))
#------------------------------------------------------------------------------
class TargetMode(object):
"""Target mode, as used by GetTargetModes()."""
def __init__(self, name, description):
"""Initializes the target mode.
Args:
name: Name of the target mode.
description: Description of mode.
"""
self.name = name
self.description = description
def GetTargetNames(self):
"""Returns a list of target name strings for the group."""
items = map(str, SCons.Script.Alias(self.name)[0].sources)
# Remove duplicates from multiple environments
return list(set(items))
#------------------------------------------------------------------------------
class Target(object):
"""Target object."""
def __init__(self, name):
"""Initializes the target.
Args:
name: Name of the target.
"""
self.name = name
self.properties = {} # Global properties
self.mode_properties = {} # Dict of modes to mode-specific properties
#------------------------------------------------------------------------------
def AddTargetGroup(name, description):
"""Adds a target group, used for printing help.
Args:
name: Name of target group. This should be the name of an alias which
points to other aliases for the specific targets.
description: Description of the target group. Should read properly when
appended to 'The following ' - for example, 'programs can be built'.
"""
# Warn if the target group already exists with a different description
if (name in __target_groups
and __target_groups[name].description != description):
print ('Warning: Changing description of target group "%s" from "%s" to '
'"%s"' % (name, __target_groups[name].description, description))
__target_groups[name].description = description
else:
__target_groups[name] = TargetGroup(name, description)
def GetTargetGroups():
"""Gets the dict of target groups.
Returns:
The dict of target groups, indexed by group name.
This dict is not fully populated until after BuildEnvironments() has been
called.
"""
return __target_groups
def GetTargetModes():
"""Gets the dict of target modes.
Returns:
The dict of target modes, indexed by mode name.
This dict is not fully populated until after BuildEnvironments() has been
called.
"""
# TODO: Better to rename this to # GetTargetBuildEnvironments()? That's a
# more description name.
return __target_modes
def GetTargets():
"""Gets the dict of targets.
Returns:
The dict of targets, indexed by target name.
This dict is not fully populated until after BuildEnvironments() has been
called.
"""
return __targets
def SetTargetProperty(self, target_name, all_modes=False, **kwargs):
"""Sets one or more properties for a target.
Args:
self: Environment context.
target_name: Name of the target.
all_modes: If True, property applies to all modes. If false, it applies
only to the current mode (determined by self['BUILD_TYPE']).
kwargs: Keyword args are used to set properties. Properties will be
converted to strings via env.subst().
For example:
foo_test = env.Program(...)[0]
env.SetTargetProperty('foo_test', global=True, DESCRIPTION='Foo test')
env.SetTargetProperty('foo_test', EXE=foo_test)
"""
# Get the target
if target_name not in __targets:
__targets[target_name] = Target(target_name)
target = __targets[target_name]
if all_modes:
add_to_dict = target.properties
else:
mode = self.get('BUILD_TYPE')
if mode not in target.mode_properties:
target.mode_properties[mode] = {}
add_to_dict = target.mode_properties[mode]
# Add values
for k, v in kwargs.items():
add_to_dict[k] = self.subst(str(v))
def AddTargetHelp():
"""Adds SCons help for the targets, groups, and modes.
This is called automatically by BuildEnvironments()."""
help_text = ''
for group in GetTargetGroups().values():
items = group.GetTargetNames()
items.sort()
if items:
help_text += '\nThe following %s:' % group.description
colwidth = max(map(len, items)) + 2
cols = 77 / colwidth
if cols < 1:
cols = 1 # If target names are really long, one per line
rows = (len(items) + cols - 1) / cols
for row in range(0, rows):
help_text += '\n '
for i in range(row, len(items), rows):
help_text += '%-*s' % (colwidth, items[i])
help_text += '\n %s (do all of the above)\n' % group.name
SCons.Script.Help(help_text)
def SetTargetDescription(self, target_name, description):
"""Convenience function to set a target's global DESCRIPTION property.
Args:
self: Environment context.
target_name: Name of the target.
description: Description of the target.
"""
self.SetTargetProperty(target_name, all_modes=True, DESCRIPTION=description)
def AddTargetMode(env):
"""Adds the environment as a target mode.
Args:
env: Environment context.
Called via env.Defer() for each build mode.
"""
# Save the build mode and description
mode = env.get('BUILD_TYPE')
__target_modes[mode] = TargetMode(mode, env.get('BUILD_TYPE_DESCRIPTION'))
#------------------------------------------------------------------------------
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
env = env # Silence gpylint
__builtin__.AddTargetGroup = AddTargetGroup
__builtin__.AddTargetHelp = AddTargetHelp
__builtin__.GetTargetGroups = GetTargetGroups
__builtin__.GetTargetModes = GetTargetModes
__builtin__.GetTargets = GetTargets
env.AddMethod(SetTargetDescription)
env.AddMethod(SetTargetProperty)
# Defer per-mode setup
env.Defer(AddTargetMode)
| plxaye/chromium | src/native_client_sdk/src/site_scons/site_tools/component_targets.py | Python | apache-2.0 | 8,395 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
import erpnext
import calendar
from erpnext.accounts.utils import get_fiscal_year
from frappe.utils import getdate, nowdate, add_days, flt
from erpnext.hr.doctype.salary_structure.salary_structure import make_salary_slip
from erpnext.hr.doctype.process_payroll.test_process_payroll import get_salary_component_account
from erpnext.hr.doctype.process_payroll.process_payroll import get_month_details
class TestSalarySlip(unittest.TestCase):
def setUp(self):
make_earning_salary_component(["Basic Salary", "Allowance", "HRA"])
make_deduction_salary_component(["Professional Tax", "TDS"])
for dt in ["Leave Application", "Leave Allocation", "Salary Slip"]:
frappe.db.sql("delete from `tab%s`" % dt)
self.make_holiday_list()
frappe.db.set_value("Company", erpnext.get_default_company(), "default_holiday_list", "Salary Slip Test Holiday List")
def tearDown(self):
frappe.db.set_value("HR Settings", None, "include_holidays_in_total_working_days", 0)
frappe.set_user("Administrator")
def test_salary_slip_with_holidays_included(self):
no_of_days = self.get_no_of_days()
frappe.db.set_value("HR Settings", None, "include_holidays_in_total_working_days", 1)
self.make_employee("test_employee@salary.com")
frappe.db.set_value("Employee", frappe.get_value("Employee", {"employee_name":"test_employee@salary.com"}, "name"), "relieving_date", None)
frappe.db.set_value("Employee", frappe.get_value("Employee", {"employee_name":"test_employee@salary.com"}, "name"), "status", "Active")
ss = frappe.get_doc("Salary Slip",
self.make_employee_salary_slip("test_employee@salary.com", "Monthly"))
self.assertEquals(ss.total_working_days, no_of_days[0])
self.assertEquals(ss.payment_days, no_of_days[0])
self.assertEquals(ss.earnings[0].amount, 25000)
self.assertEquals(ss.earnings[1].amount, 3000)
self.assertEquals(ss.deductions[0].amount, 5000)
self.assertEquals(ss.deductions[1].amount, 5000)
self.assertEquals(ss.gross_pay, 40500)
self.assertEquals(ss.net_pay, 29918)
def test_salary_slip_with_holidays_excluded(self):
no_of_days = self.get_no_of_days()
frappe.db.set_value("HR Settings", None, "include_holidays_in_total_working_days", 0)
self.make_employee("test_employee@salary.com")
frappe.db.set_value("Employee", frappe.get_value("Employee", {"employee_name":"test_employee@salary.com"}, "name"), "relieving_date", None)
frappe.db.set_value("Employee", frappe.get_value("Employee", {"employee_name":"test_employee@salary.com"}, "name"), "status", "Active")
ss = frappe.get_doc("Salary Slip",
self.make_employee_salary_slip("test_employee@salary.com", "Monthly"))
self.assertEquals(ss.total_working_days, no_of_days[0] - no_of_days[1])
self.assertEquals(ss.payment_days, no_of_days[0] - no_of_days[1])
self.assertEquals(ss.earnings[0].amount, 25000)
self.assertEquals(ss.earnings[0].default_amount, 25000)
self.assertEquals(ss.earnings[1].amount, 3000)
self.assertEquals(ss.deductions[0].amount, 5000)
self.assertEquals(ss.deductions[1].amount, 5000)
self.assertEquals(ss.gross_pay, 40500)
self.assertEquals(ss.net_pay, 29918)
def test_payment_days(self):
no_of_days = self.get_no_of_days()
# Holidays not included in working days
frappe.db.set_value("HR Settings", None, "include_holidays_in_total_working_days", 1)
# set joinng date in the same month
self.make_employee("test_employee@salary.com")
if getdate(nowdate()).day >= 15:
date_of_joining = getdate(add_days(nowdate(),-10))
relieving_date = getdate(add_days(nowdate(),-10))
elif getdate(nowdate()).day < 15 and getdate(nowdate()).day >= 5:
date_of_joining = getdate(add_days(nowdate(),-3))
relieving_date = getdate(add_days(nowdate(),-3))
elif getdate(nowdate()).day < 5 and not getdate(nowdate()).day == 1:
date_of_joining = getdate(add_days(nowdate(),-1))
relieving_date = getdate(add_days(nowdate(),-1))
elif getdate(nowdate()).day == 1:
date_of_joining = getdate(nowdate())
relieving_date = getdate(nowdate())
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"test_employee@salary.com"}, "name"), "date_of_joining", date_of_joining)
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"test_employee@salary.com"}, "name"), "relieving_date", None)
frappe.db.set_value("Employee", frappe.get_value("Employee",
{"employee_name":"test_employee@salary.com"}, "name"), "status", "Active")
ss = frappe.get_doc("Salary Slip",
self.make_employee_salary_slip("test_employee@salary.com", "Monthly"))
self.assertEquals(ss.total_working_days, no_of_days[0])
self.assertEquals(ss.payment_days, (no_of_days[0] - getdate(date_of_joining).day + 1))
# set relieving date in the same month
frappe.db.set_value("Employee", frappe.get_value("Employee", {"employee_name":"test_employee@salary.com"}, "name"), "date_of_joining", (add_days(nowdate(),-60)))
frappe.db.set_value("Employee", frappe.get_value("Employee", {"employee_name":"test_employee@salary.com"}, "name"), "relieving_date", relieving_date)
frappe.db.set_value("Employee", frappe.get_value("Employee", {"employee_name":"test_employee@salary.com"}, "name"), "status", "Left")
ss.save()
self.assertEquals(ss.total_working_days, no_of_days[0])
self.assertEquals(ss.payment_days, getdate(relieving_date).day)
frappe.db.set_value("Employee", frappe.get_value("Employee", {"employee_name":"test_employee@salary.com"}, "name"), "relieving_date", None)
frappe.db.set_value("Employee", frappe.get_value("Employee", {"employee_name":"test_employee@salary.com"}, "name"), "status", "Active")
def test_employee_salary_slip_read_permission(self):
self.make_employee("test_employee@salary.com")
salary_slip_test_employee = frappe.get_doc("Salary Slip",
self.make_employee_salary_slip("test_employee@salary.com", "Monthly"))
frappe.set_user("test_employee@salary.com")
self.assertTrue(salary_slip_test_employee.has_permission("read"))
def test_email_salary_slip(self):
frappe.db.sql("delete from `tabEmail Queue`")
hr_settings = frappe.get_doc("HR Settings", "HR Settings")
hr_settings.email_salary_slip_to_employee = 1
hr_settings.save()
self.make_employee("test_employee@salary.com")
ss = frappe.get_doc("Salary Slip",
self.make_employee_salary_slip("test_employee@salary.com", "Monthly"))
ss.submit()
email_queue = frappe.db.sql("""select name from `tabEmail Queue`""")
self.assertTrue(email_queue)
def test_loan_repayment_salary_slip(self):
from erpnext.hr.doctype.employee_loan.test_employee_loan import create_loan_type, create_employee_loan
employee = self.make_employee("test_employee@salary.com")
create_loan_type("Car Loan", 500000, 6.4)
employee_loan = create_employee_loan(employee, "Car Loan", 11000, "Repay Over Number of Periods", 20)
employee_loan.repay_from_salary = 1
employee_loan.submit()
ss = frappe.get_doc("Salary Slip",
self.make_employee_salary_slip("test_employee@salary.com", "Monthly"))
ss.submit()
self.assertEquals(ss.loan_repayment, 582)
self.assertEquals(ss.net_pay, (flt(ss.gross_pay) - (flt(ss.total_deduction) + flt(ss.loan_repayment))))
def test_payroll_frequency(self):
fiscal_year = get_fiscal_year(nowdate(), company="_Test Company")[0]
month = "%02d" % getdate(nowdate()).month
m = get_month_details(fiscal_year, month)
for payroll_frequncy in ["Monthly", "Bimonthly", "Fortnightly", "Weekly", "Daily"]:
self.make_employee(payroll_frequncy + "_test_employee@salary.com")
ss = frappe.get_doc("Salary Slip",
self.make_employee_salary_slip(payroll_frequncy + "_test_employee@salary.com", payroll_frequncy))
if payroll_frequncy == "Monthly":
self.assertEqual(ss.end_date, m['month_end_date'])
elif payroll_frequncy == "Bimonthly":
if getdate(ss.start_date).day <= 15:
self.assertEqual(ss.end_date, m['month_mid_end_date'])
else:
self.assertEqual(ss.end_date, m['month_end_date'])
elif payroll_frequncy == "Fortnightly":
self.assertEqual(ss.end_date, getdate(add_days(nowdate(),13)))
elif payroll_frequncy == "Weekly":
self.assertEqual(ss.end_date, getdate(add_days(nowdate(),6)))
elif payroll_frequncy == "Daily":
self.assertEqual(ss.end_date, getdate(nowdate()))
def make_employee(self, user):
if not frappe.db.get_value("User", user):
frappe.get_doc({
"doctype": "User",
"email": user,
"first_name": user,
"new_password": "password",
"user_roles": [{"doctype": "UserRole", "role": "Employee"}]
}).insert()
if not frappe.db.get_value("Employee", {"user_id": user}):
employee = frappe.get_doc({
"doctype": "Employee",
"naming_series": "EMP-",
"employee_name": user,
"company": erpnext.get_default_company(),
"user_id": user,
"date_of_birth": "1990-05-08",
"date_of_joining": "2013-01-01",
"department": frappe.get_all("Department", fields="name")[0].name,
"gender": "Female",
"company_email": user,
"prefered_contact_email": "Company Email",
"prefered_email": user,
"status": "Active",
"employment_type": "Intern"
}).insert()
return employee.name
else:
return frappe.get_value("Employee", {"employee_name":user}, "name")
def make_holiday_list(self):
fiscal_year = get_fiscal_year(nowdate(), company="_Test Company")
if not frappe.db.get_value("Holiday List", "Salary Slip Test Holiday List"):
holiday_list = frappe.get_doc({
"doctype": "Holiday List",
"holiday_list_name": "Salary Slip Test Holiday List",
"from_date": fiscal_year[1],
"to_date": fiscal_year[2],
"weekly_off": "Sunday"
}).insert()
holiday_list.get_weekly_off_dates()
holiday_list.save()
def make_employee_salary_slip(self, user, payroll_frequency):
employee = frappe.db.get_value("Employee", {"user_id": user})
salary_structure = make_salary_structure(payroll_frequency + " Salary Structure Test for Salary Slip", payroll_frequency, employee)
salary_slip = frappe.db.get_value("Salary Slip", {"employee": frappe.db.get_value("Employee", {"user_id": user})})
if not salary_slip:
salary_slip = make_salary_slip(salary_structure, employee = employee)
salary_slip.employee_name = frappe.get_value("Employee", {"name":frappe.db.get_value("Employee", {"user_id": user})}, "employee_name")
salary_slip.payroll_frequency = payroll_frequency
salary_slip.posting_date = nowdate()
salary_slip.insert()
# salary_slip.submit()
salary_slip = salary_slip.name
return salary_slip
def make_activity_for_employee(self):
activity_type = frappe.get_doc("Activity Type", "_Test Activity Type")
activity_type.billing_rate = 50
activity_type.costing_rate = 20
activity_type.wage_rate = 25
activity_type.save()
def get_no_of_days(self):
no_of_days_in_month = calendar.monthrange(getdate(nowdate()).year,
getdate(nowdate()).month)
no_of_holidays_in_month = len([1 for i in calendar.monthcalendar(getdate(nowdate()).year,
getdate(nowdate()).month) if i[6] != 0])
return [no_of_days_in_month[1], no_of_holidays_in_month]
def make_earning_salary_component(salary_components):
for salary_component in salary_components:
if not frappe.db.exists('Salary Component', salary_component):
sal_comp = frappe.get_doc({
"doctype": "Salary Component",
"salary_component": salary_component,
"type": "Earning"
})
sal_comp.insert()
get_salary_component_account(salary_component)
def make_deduction_salary_component(salary_components):
for salary_component in salary_components:
if not frappe.db.exists('Salary Component', salary_component):
sal_comp = frappe.get_doc({
"doctype": "Salary Component",
"salary_component": salary_component,
"type": "Deduction"
})
sal_comp.insert()
get_salary_component_account(salary_component)
def make_salary_structure(sal_struct, payroll_frequency, employee):
if not frappe.db.exists('Salary Structure', sal_struct):
frappe.get_doc({
"doctype": "Salary Structure",
"name": sal_struct,
"company": erpnext.get_default_company(),
"from_date": nowdate(),
"employees": get_employee_details(employee),
"earnings": get_earnings_component(),
"deductions": get_deductions_component(),
"payroll_frequency": payroll_frequency,
"payment_account": frappe.get_value('Account', {'account_type': 'Cash', 'company': erpnext.get_default_company(),'is_group':0}, "name")
}).insert()
elif not frappe.db.get_value("Salary Structure Employee",{'parent':sal_struct, 'employee':employee},'name'):
sal_struct = frappe.get_doc("Salary Structure", sal_struct)
sal_struct.append("employees", {"employee": employee,
"employee_name": employee,
"base": 32000,
"variable": 3200
})
sal_struct.save()
sal_struct = sal_struct.name
return sal_struct
def get_employee_details(employee):
return [{"employee": employee,
"base": 50000,
"variable": 5000
}
]
def get_earnings_component():
return [
{
"salary_component": 'Basic Salary',
"abbr":'BS',
"condition": 'base > 10000',
"formula": 'base*.5',
"idx": 1
},
{
"salary_component": 'Basic Salary',
"abbr":'BS',
"condition": 'base < 10000',
"formula": 'base*.2',
"idx": 2
},
{
"salary_component": 'HRA',
"abbr":'H',
"amount": 3000,
"idx": 3
},
{
"salary_component": 'Allowance',
"abbr":'A',
"condition": 'H < 10000',
"formula": 'BS*.5',
"idx": 4
},
]
def get_deductions_component():
return [
{
"salary_component": 'Professional Tax',
"abbr":'PT',
"condition": 'base > 10000',
"formula": 'base*.1',
"idx": 1
},
{
"salary_component": 'TDS',
"abbr":'T',
"formula": 'base*.1',
"idx": 2
},
{
"salary_component": 'TDS',
"abbr":'T',
"condition": 'employment_type=="Intern"',
"formula": 'base*.1',
"idx": 3
}
] | njmube/erpnext | erpnext/hr/doctype/salary_slip/test_salary_slip.py | Python | agpl-3.0 | 14,121 |
from guizero import App, Box, Text
from common_test import (
schedule_after_test,
schedule_repeat_test,
destroy_test,
display_test,
color_test,
size_pixel_test,
size_fill_test,
events_test,
cascading_enable_test,
cascading_properties_test,
inheriting_properties_test,
add_tk_widget_test,
grid_layout_test,
auto_layout_test
)
def test_default_values():
a = App()
b = Box(a)
assert b.master == a
assert b.layout == "auto"
assert b.grid == None
assert b.align == None
assert a.description > ""
a.destroy()
def test_alt_values():
a = App(layout = "grid")
b = Box(a, layout="grid", grid=[0,1], align="top", width=10, height=11)
assert b.layout == "grid"
assert b.grid[0] == 0
assert b.grid[1] == 1
assert b.align == "top"
assert b.width == 10
assert b.height == 11
a.destroy()
def test_border():
a = App()
b = Box(a)
assert not b.border
assert b.border == 0
b.border = True
assert b.border
assert b.border == 1
assert b._get_tk_config("highlightbackground") == "black"
b.border = False
assert not b.border
b.border = 10
assert b.border
assert b.border == 10
b.set_border(11, "red")
assert b.border
assert b.border == 11
assert b._get_tk_config("highlightbackground") == "red"
a.destroy()
def test_after_schedule():
a = App()
b = Box(a)
schedule_after_test(a, b)
a.destroy()
def test_repeat_schedule():
a = App()
b = Box(a)
schedule_repeat_test(a, b)
a.destroy()
def test_destroy():
a = App()
b = Box(a)
destroy_test(b)
a.destroy()
def test_display():
a = App()
b = Box(a)
display_test(b)
a.destroy()
def test_color():
a = App()
b = Box(a)
color_test(b)
a.destroy()
def test_size():
a = App()
b = Box(a)
size_pixel_test(b)
size_fill_test(b)
a.destroy()
def test_enable():
a = App()
b = Box(a)
t = Text(b)
cascading_enable_test(a)
cascading_enable_test(b)
a.destroy()
def test_events():
a = App()
b = Box(a)
events_test(b)
a.destroy()
def test_cascading_properties():
a = App()
b = Box(a)
cascading_properties_test(b)
a.destroy()
def test_inheriting_properties():
a = App()
b = Box(a)
inheriting_properties_test(b)
a.destroy()
def test_add_tk_widget():
a = App()
add_tk_widget_test(a)
a.destroy()
def test_auto_layout():
a = App()
w = Box(a)
auto_layout_test(w, None)
a.destroy()
def test_grid_layout():
a = App(layout="grid")
w = Box(a, grid=[1,2])
grid_layout_test(w, 1, 2, 1, 1, None)
ws = Box(a, grid=[1,2,3,4])
grid_layout_test(ws, 1, 2, 3, 4, None)
wa = Box(a, grid=[1,2], align="top")
grid_layout_test(wa, 1, 2, 1, 1, "top")
a.destroy()
| lawsie/guizero | tests/test_box.py | Python | bsd-3-clause | 2,906 |
from sympy import I, sqrt, log, exp, sin, asin, factorial
from sympy.core import Symbol, S, Rational, Integer, Dummy, Wild, Pow
from sympy.core.facts import InconsistentAssumptions
from sympy import simplify
from sympy.core.compatibility import range
from sympy.utilities.pytest import raises, XFAIL
def test_symbol_unset():
x = Symbol('x', real=True, integer=True)
assert x.is_real is True
assert x.is_integer is True
assert x.is_imaginary is False
assert x.is_noninteger is False
assert x.is_number is False
def test_zero():
z = Integer(0)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is False
assert z.is_negative is False
assert z.is_nonpositive is True
assert z.is_nonnegative is True
assert z.is_even is True
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_one():
z = Integer(1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_number is True
assert z.is_composite is False # issue 8807
def test_negativeone():
z = Integer(-1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is False
assert z.is_negative is True
assert z.is_nonpositive is True
assert z.is_nonnegative is False
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_infinity():
oo = S.Infinity
assert oo.is_commutative is True
assert oo.is_integer is None
assert oo.is_rational is None
assert oo.is_algebraic is None
assert oo.is_transcendental is None
assert oo.is_real is True
assert oo.is_complex is True
assert oo.is_noninteger is None
assert oo.is_irrational is None
assert oo.is_imaginary is False
assert oo.is_positive is True
assert oo.is_negative is False
assert oo.is_nonpositive is False
assert oo.is_nonnegative is True
assert oo.is_even is None
assert oo.is_odd is None
assert oo.is_finite is False
assert oo.is_infinite is True
assert oo.is_comparable is True
assert oo.is_prime is False
assert oo.is_composite is None
assert oo.is_number is True
def test_neg_infinity():
mm = S.NegativeInfinity
assert mm.is_commutative is True
assert mm.is_integer is None
assert mm.is_rational is None
assert mm.is_algebraic is None
assert mm.is_transcendental is None
assert mm.is_real is True
assert mm.is_complex is True
assert mm.is_noninteger is None
assert mm.is_irrational is None
assert mm.is_imaginary is False
assert mm.is_positive is False
assert mm.is_negative is True
assert mm.is_nonpositive is True
assert mm.is_nonnegative is False
assert mm.is_even is None
assert mm.is_odd is None
assert mm.is_finite is False
assert mm.is_infinite is True
assert mm.is_comparable is True
assert mm.is_prime is False
assert mm.is_composite is False
assert mm.is_number is True
def test_nan():
nan = S.NaN
assert nan.is_commutative is True
assert nan.is_integer is None
assert nan.is_rational is None
assert nan.is_algebraic is None
assert nan.is_transcendental is None
assert nan.is_real is None
assert nan.is_complex is None
assert nan.is_noninteger is None
assert nan.is_irrational is None
assert nan.is_imaginary is None
assert nan.is_positive is None
assert nan.is_negative is None
assert nan.is_nonpositive is None
assert nan.is_nonnegative is None
assert nan.is_even is None
assert nan.is_odd is None
assert nan.is_finite is None
assert nan.is_infinite is None
assert nan.is_comparable is False
assert nan.is_prime is None
assert nan.is_composite is None
assert nan.is_number is True
def test_pos_rational():
r = Rational(3, 4)
assert r.is_commutative is True
assert r.is_integer is False
assert r.is_rational is True
assert r.is_algebraic is True
assert r.is_transcendental is False
assert r.is_real is True
assert r.is_complex is True
assert r.is_noninteger is True
assert r.is_irrational is False
assert r.is_imaginary is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
assert r.is_nonnegative is True
assert r.is_even is False
assert r.is_odd is False
assert r.is_finite is True
assert r.is_infinite is False
assert r.is_comparable is True
assert r.is_prime is False
assert r.is_composite is False
r = Rational(1, 4)
assert r.is_nonpositive is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonnegative is True
r = Rational(5, 4)
assert r.is_negative is False
assert r.is_positive is True
assert r.is_nonpositive is False
assert r.is_nonnegative is True
r = Rational(5, 3)
assert r.is_nonnegative is True
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
def test_neg_rational():
r = Rational(-3, 4)
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-1, 4)
assert r.is_nonpositive is True
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-5, 4)
assert r.is_negative is True
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_nonnegative is False
r = Rational(-5, 3)
assert r.is_nonnegative is False
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonpositive is True
def test_pi():
z = S.Pi
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is True
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is True
assert z.is_irrational is True
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
def test_E():
z = S.Exp1
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is True
assert z.is_real is True
assert z.is_complex is True
assert z.is_noninteger is True
assert z.is_irrational is True
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
def test_I():
z = S.ImaginaryUnit
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_real is False
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is True
assert z.is_positive is False
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is False
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is False
assert z.is_prime is False
assert z.is_composite is False
def test_symbol_real():
# issue 3848
a = Symbol('a', real=False)
assert a.is_real is False
assert a.is_integer is False
assert a.is_negative is False
assert a.is_positive is False
assert a.is_nonnegative is False
assert a.is_nonpositive is False
assert a.is_zero is False
def test_symbol_zero():
x = Symbol('x', zero=True)
assert x.is_positive is False
assert x.is_nonpositive
assert x.is_negative is False
assert x.is_nonnegative
assert x.is_zero is True
assert x.is_nonzero is False
assert x.is_finite is True
def test_symbol_positive():
x = Symbol('x', positive=True)
assert x.is_positive is True
assert x.is_nonpositive is False
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is False
assert x.is_nonzero is True
def test_neg_symbol_positive():
x = -Symbol('x', positive=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is True
assert x.is_nonnegative is False
assert x.is_zero is False
assert x.is_nonzero is True
def test_symbol_nonpositive():
x = Symbol('x', nonpositive=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_nonpositive():
x = -Symbol('x', nonpositive=True)
assert x.is_positive is None
assert x.is_nonpositive is None
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsepositive():
x = Symbol('x', positive=False)
assert x.is_positive is False
assert x.is_nonpositive is None
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_falsepositive():
x = -Symbol('x', positive=False)
assert x.is_positive is None
assert x.is_nonpositive is None
assert x.is_negative is False
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsepositive_real():
x = Symbol('x', positive=False, real=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_falsepositive_real():
x = -Symbol('x', positive=False, real=True)
assert x.is_positive is None
assert x.is_nonpositive is None
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsenonnegative():
x = Symbol('x', nonnegative=False)
assert x.is_positive is False
assert x.is_nonpositive is None
assert x.is_negative is None
assert x.is_nonnegative is False
assert x.is_zero is False
assert x.is_nonzero is True
@XFAIL
def test_neg_symbol_falsenonnegative():
x = -Symbol('x', nonnegative=False)
assert x.is_positive is None
assert x.is_nonpositive is False # this currently returns None
assert x.is_negative is False # this currently returns None
assert x.is_nonnegative is None
assert x.is_zero is False # this currently returns None
assert x.is_nonzero is True # this currently returns None
def test_symbol_falsenonnegative_real():
x = Symbol('x', nonnegative=False, real=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is True
assert x.is_nonnegative is False
assert x.is_zero is False
assert x.is_nonzero is True
def test_neg_symbol_falsenonnegative_real():
x = -Symbol('x', nonnegative=False, real=True)
assert x.is_positive is True
assert x.is_nonpositive is False
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is False
assert x.is_nonzero is True
def test_prime():
assert S(-1).is_prime is False
assert S(-2).is_prime is False
assert S(-4).is_prime is False
assert S(0).is_prime is False
assert S(1).is_prime is False
assert S(2).is_prime is True
assert S(17).is_prime is True
assert S(4).is_prime is False
def test_composite():
assert S(-1).is_composite is False
assert S(-2).is_composite is False
assert S(-4).is_composite is False
assert S(0).is_composite is False
assert S(2).is_composite is False
assert S(17).is_composite is False
assert S(4).is_composite is True
x = Dummy(integer=True, positive=True, prime=False)
assert x.is_composite is None # x could be 1
assert (x + 1).is_composite is None
def test_prime_symbol():
x = Symbol('x', prime=True)
assert x.is_prime is True
assert x.is_integer is True
assert x.is_positive is True
assert x.is_negative is False
assert x.is_nonpositive is False
assert x.is_nonnegative is True
x = Symbol('x', prime=False)
assert x.is_prime is False
assert x.is_integer is None
assert x.is_positive is None
assert x.is_negative is None
assert x.is_nonpositive is None
assert x.is_nonnegative is None
def test_symbol_noncommutative():
x = Symbol('x', commutative=True)
assert x.is_complex is None
x = Symbol('x', commutative=False)
assert x.is_integer is False
assert x.is_rational is False
assert x.is_algebraic is False
assert x.is_irrational is False
assert x.is_real is False
assert x.is_complex is False
def test_other_symbol():
x = Symbol('x', integer=True)
assert x.is_integer is True
assert x.is_real is True
x = Symbol('x', integer=True, nonnegative=True)
assert x.is_integer is True
assert x.is_nonnegative is True
assert x.is_negative is False
assert x.is_positive is None
x = Symbol('x', integer=True, nonpositive=True)
assert x.is_integer is True
assert x.is_nonpositive is True
assert x.is_positive is False
assert x.is_negative is None
x = Symbol('x', odd=True)
assert x.is_odd is True
assert x.is_even is False
assert x.is_integer is True
x = Symbol('x', odd=False)
assert x.is_odd is False
assert x.is_even is None
assert x.is_integer is None
x = Symbol('x', even=True)
assert x.is_even is True
assert x.is_odd is False
assert x.is_integer is True
x = Symbol('x', even=False)
assert x.is_even is False
assert x.is_odd is None
assert x.is_integer is None
x = Symbol('x', integer=True, nonnegative=True)
assert x.is_integer is True
assert x.is_nonnegative is True
x = Symbol('x', integer=True, nonpositive=True)
assert x.is_integer is True
assert x.is_nonpositive is True
with raises(AttributeError):
x.is_real = False
x = Symbol('x', algebraic=True)
assert x.is_transcendental is False
x = Symbol('x', transcendental=True)
assert x.is_algebraic is False
assert x.is_rational is False
assert x.is_integer is False
def test_issue_3825():
"""catch: hash instability"""
x = Symbol("x")
y = Symbol("y")
a1 = x + y
a2 = y + x
a2.is_comparable
h1 = hash(a1)
h2 = hash(a2)
assert h1 == h2
def test_issue_4822():
z = (-1)**Rational(1, 3)*(1 - I*sqrt(3))
assert z.is_real in [True, None]
def test_hash_vs_typeinfo():
"""seemingly different typeinfo, but in fact equal"""
# the following two are semantically equal
x1 = Symbol('x', even=True)
x2 = Symbol('x', integer=True, odd=False)
assert hash(x1) == hash(x2)
assert x1 == x2
def test_hash_vs_typeinfo_2():
"""different typeinfo should mean !eq"""
# the following two are semantically different
x = Symbol('x')
x1 = Symbol('x', even=True)
assert x != x1
assert hash(x) != hash(x1) # This might fail with very low probability
def test_hash_vs_eq():
"""catch: different hash for equal objects"""
a = 1 + S.Pi # important: do not fold it into a Number instance
ha = hash(a) # it should be Add/Mul/... to trigger the bug
a.is_positive # this uses .evalf() and deduces it is positive
assert a.is_positive is True
# be sure that hash stayed the same
assert ha == hash(a)
# now b should be the same expression
b = a.expand(trig=True)
hb = hash(b)
assert a == b
assert ha == hb
def test_Add_is_pos_neg():
# these cover lines not covered by the rest of tests in core
n = Symbol('n', negative=True, infinite=True)
nn = Symbol('n', nonnegative=True, infinite=True)
np = Symbol('n', nonpositive=True, infinite=True)
p = Symbol('p', positive=True, infinite=True)
r = Dummy(real=True, finite=False)
x = Symbol('x')
xf = Symbol('xb', finite=True)
assert (n + p).is_positive is None
assert (n + x).is_positive is None
assert (p + x).is_positive is None
assert (n + p).is_negative is None
assert (n + x).is_negative is None
assert (p + x).is_negative is None
assert (n + xf).is_positive is False
assert (p + xf).is_positive is True
assert (n + xf).is_negative is True
assert (p + xf).is_negative is False
assert (x - S.Infinity).is_negative is None # issue 7798
# issue 8046, 16.2
assert (p + nn).is_positive
assert (n + np).is_negative
assert (p + r).is_positive is None
def test_Add_is_imaginary():
nn = Dummy(nonnegative=True)
assert (I*nn + I).is_imaginary # issue 8046, 17
def test_Add_is_algebraic():
a = Symbol('a', algebraic=True)
b = Symbol('a', algebraic=True)
na = Symbol('na', algebraic=False)
nb = Symbol('nb', algebraic=False)
x = Symbol('x')
assert (a + b).is_algebraic
assert (na + nb).is_algebraic is None
assert (a + na).is_algebraic is False
assert (a + x).is_algebraic is None
assert (na + x).is_algebraic is None
def test_Mul_is_algebraic():
a = Symbol('a', algebraic=True)
b = Symbol('a', algebraic=True)
na = Symbol('na', algebraic=False)
an = Symbol('an', algebraic=True, nonzero=True)
nb = Symbol('nb', algebraic=False)
x = Symbol('x')
assert (a*b).is_algebraic
assert (na*nb).is_algebraic is None
assert (a*na).is_algebraic is None
assert (an*na).is_algebraic is False
assert (a*x).is_algebraic is None
assert (na*x).is_algebraic is None
def test_Pow_is_algebraic():
e = Symbol('e', algebraic=True)
assert Pow(1, e, evaluate=False).is_algebraic
assert Pow(0, e, evaluate=False).is_algebraic
a = Symbol('a', algebraic=True)
na = Symbol('na', algebraic=False)
ia = Symbol('ia', algebraic=True, irrational=True)
ib = Symbol('ib', algebraic=True, irrational=True)
r = Symbol('r', rational=True)
x = Symbol('x')
assert (a**r).is_algebraic
assert (a**x).is_algebraic is None
assert (na**r).is_algebraic is False
assert (ia**r).is_algebraic
assert (ia**ib).is_algebraic is False
assert (a**e).is_algebraic is None
# Gelfond-Schneider constant:
assert Pow(2, sqrt(2), evaluate=False).is_algebraic is False
assert Pow(S.GoldenRatio, sqrt(3), evaluate=False).is_algebraic is False
def test_Mul_is_infinite():
x = Symbol('x')
f = Symbol('f', finite=True)
i = Symbol('i', infinite=True)
z = Dummy(zero=True)
nzf = Dummy(finite=True, zero=False)
from sympy import Mul
assert (x*f).is_finite is None
assert (x*i).is_finite is None
assert (f*i).is_finite is False
assert (x*f*i).is_finite is None
assert (z*i).is_finite is False
assert (nzf*i).is_finite is False
assert (z*f).is_finite is True
assert Mul(0, f, evaluate=False).is_finite is True
assert Mul(0, i, evaluate=False).is_finite is False
assert (x*f).is_infinite is None
assert (x*i).is_infinite is None
assert (f*i).is_infinite is None
assert (x*f*i).is_infinite is None
assert (z*i).is_infinite is S.NaN.is_infinite
assert (nzf*i).is_infinite is True
assert (z*f).is_infinite is False
assert Mul(0, f, evaluate=False).is_infinite is False
assert Mul(0, i, evaluate=False).is_infinite is S.NaN.is_infinite
def test_special_is_rational():
i = Symbol('i', integer=True)
i2 = Symbol('i2', integer=True)
ni = Symbol('ni', integer=True, nonzero=True)
r = Symbol('r', rational=True)
rn = Symbol('r', rational=True, nonzero=True)
nr = Symbol('nr', irrational=True)
x = Symbol('x')
assert sqrt(3).is_rational is False
assert (3 + sqrt(3)).is_rational is False
assert (3*sqrt(3)).is_rational is False
assert exp(3).is_rational is False
assert exp(ni).is_rational is False
assert exp(rn).is_rational is False
assert exp(x).is_rational is None
assert exp(log(3), evaluate=False).is_rational is True
assert log(exp(3), evaluate=False).is_rational is True
assert log(3).is_rational is False
assert log(ni + 1).is_rational is False
assert log(rn + 1).is_rational is False
assert log(x).is_rational is None
assert (sqrt(3) + sqrt(5)).is_rational is None
assert (sqrt(3) + S.Pi).is_rational is False
assert (x**i).is_rational is None
assert (i**i).is_rational is True
assert (i**i2).is_rational is None
assert (r**i).is_rational is None
assert (r**r).is_rational is None
assert (r**x).is_rational is None
assert (nr**i).is_rational is None # issue 8598
assert (nr**Symbol('z', zero=True)).is_rational
assert sin(1).is_rational is False
assert sin(ni).is_rational is False
assert sin(rn).is_rational is False
assert sin(x).is_rational is None
assert asin(r).is_rational is False
assert sin(asin(3), evaluate=False).is_rational is True
@XFAIL
def test_issue_6275():
x = Symbol('x')
# both zero or both Muls...but neither "change would be very appreciated.
# This is similar to x/x => 1 even though if x = 0, it is really nan.
assert isinstance(x*0, type(0*S.Infinity))
if 0*S.Infinity is S.NaN:
b = Symbol('b', finite=None)
assert (b*0).is_zero is None
def test_sanitize_assumptions():
# issue 6666
for cls in (Symbol, Dummy, Wild):
x = cls('x', real=1, positive=0)
assert x.is_real is True
assert x.is_positive is False
assert cls('', real=True, positive=None).is_positive is None
raises(ValueError, lambda: cls('', commutative=None))
raises(ValueError, lambda: Symbol._sanitize(dict(commutative=None)))
def test_special_assumptions():
e = -3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2
assert simplify(e < 0) is S.false
assert simplify(e > 0) is S.false
assert (e == 0) is False # it's not a literal 0
assert e.equals(0) is True
def test_inconsistent():
# cf. issues 5795 and 5545
raises(InconsistentAssumptions, lambda: Symbol('x', real=True,
commutative=False))
def test_issue_6631():
assert ((-1)**(I)).is_real is True
assert ((-1)**(I*2)).is_real is True
assert ((-1)**(I/2)).is_real is True
assert ((-1)**(I*S.Pi)).is_real is True
assert (I**(I + 2)).is_real is True
def test_issue_2730():
assert (1/(1 + I)).is_real is False
def test_issue_4149():
assert (3 + I).is_complex
assert (3 + I).is_imaginary is False
assert (3*I + S.Pi*I).is_imaginary
# as Zero.is_imaginary is False, see issue 7649
y = Symbol('y', real=True)
assert (3*I + S.Pi*I + y*I).is_imaginary is None
p = Symbol('p', positive=True)
assert (3*I + S.Pi*I + p*I).is_imaginary
n = Symbol('n', negative=True)
assert (-3*I - S.Pi*I + n*I).is_imaginary
i = Symbol('i', imaginary=True)
assert ([(i**a).is_imaginary for a in range(4)] ==
[False, True, False, True])
# tests from the PR #7887:
e = S("-sqrt(3)*I/2 + 0.866025403784439*I")
assert e.is_real is False
assert e.is_imaginary
def test_issue_2920():
n = Symbol('n', negative=True)
assert sqrt(n).is_imaginary
def test_issue_7899():
x = Symbol('x', real=True)
assert (I*x).is_real is None
assert ((x - I)*(x - 1)).is_zero is None
assert ((x - I)*(x - 1)).is_real is None
@XFAIL
def test_issue_7993():
x = Dummy(integer=True)
y = Dummy(noninteger=True)
assert (x - y).is_zero is False
def test_issue_8075():
raises(InconsistentAssumptions, lambda: Dummy(zero=True, finite=False))
raises(InconsistentAssumptions, lambda: Dummy(zero=True, infinite=True))
def test_issue_8642():
x = Symbol('x', real=True, integer=False)
assert (x*2).is_integer is None
def test_issues_8632_8633_8638_8675_8992():
p = Dummy(integer=True, positive=True)
nn = Dummy(integer=True, nonnegative=True)
assert (p - S.Half).is_positive
assert (p - 1).is_nonnegative
assert (nn + 1).is_positive
assert (-p + 1).is_nonpositive
assert (-nn - 1).is_negative
prime = Dummy(prime=True)
assert (prime - 2).is_nonnegative
assert (prime - 3).is_nonnegative is None
even = Dummy(positive=True, even=True)
assert (even - 2).is_nonnegative
p = Dummy(positive=True)
assert (p/(p + 1) - 1).is_negative
assert ((p + 2)**3 - S.Half).is_positive
n = Dummy(negative=True)
assert (n - 3).is_nonpositive
def test_issue_9115():
n = Dummy('n', integer=True, nonnegative=True)
assert (factorial(n) >= 1) == True
assert (factorial(n) < 1) == False
def test_issue_9165():
z = Symbol('z', zero=True)
f = Symbol('f', finite=False)
assert 0/z == S.NaN
assert 0*(1/z) == S.NaN
assert 0*f == S.NaN
| sahilshekhawat/sympy | sympy/core/tests/test_assumptions.py | Python | bsd-3-clause | 27,164 |
from pathlib import Path
import unittest
from saw_client import *
from saw_client.llvm import Contract, elem, field, i32, alias_ty
# like test_nested_struct.py but using cute __getitem__ indexing on SetupVals
class FContract1(Contract):
def specification(self):
tp = self.alloc(alias_ty('struct.t'))
b = self.fresh_var(i32, "b")
self.points_to(tp['n']['b'], b)
self.execute_func(tp)
self.returns(b)
class FContract2(Contract):
def specification(self):
tp = self.alloc(alias_ty('struct.t'))
b = self.fresh_var(i32, "b")
self.points_to(tp[1][1], b)
self.execute_func(tp)
self.returns(b)
class LLVMNestedStructTest(unittest.TestCase):
def test_llvm_struct(self):
connect(reset_server=True)
if __name__ == "__main__": view(LogResults())
bcname = str(Path('tests','saw','test-files', 'nested_struct.bc'))
mod = llvm_load_module(bcname)
result = llvm_verify(mod, 'f', FContract1())
self.assertIs(result.is_success(), True)
result = llvm_verify(mod, 'f', FContract2())
self.assertIs(result.is_success(), True)
if __name__ == "__main__":
unittest.main()
| GaloisInc/saw-script | saw-remote-api/python/tests/saw/test_nested_struct2.py | Python | bsd-3-clause | 1,218 |
import os
from optparse import OptionParser
import pandas as pd
from sklearn.preprocessing import normalize
from ..util import file_handling as fh
from ..util import dirs
def main():
usage = "%prog project lda_output_dir"
parser = OptionParser(usage=usage)
#parser.add_option('--keyword', dest='key', default=None,
# help='Keyword argument: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
project_name = args[0]
lda_dir = args[1]
dirs.make_base_dir(project_name)
process_lda_output(lda_dir)
def process_lda_output(lda_dir):
input_file = os.path.join(lda_dir, 'document_topics.csv')
df_temp = pd.read_csv(input_file, header=-1, index_col=0, sep='\t')
# get index
items = df_temp.index.tolist()
# remove last column because excel
counts = df_temp.values[:, :-1]
# normalize by row
doc_vectors = normalize(counts, norm='l1', axis=1)
# create column names
n_rows, n_cols = doc_vectors.shape
column_names = ['topic' + str(i) for i in range(n_cols)]
# save vectors
fh.pickle_data(doc_vectors, os.path.join(dirs.lda_dir, 'lda.pkl'))
# save headers
headers = {'items': items, 'features': column_names}
fh.write_to_json(headers, os.path.join(dirs.lda_dir, 'lda.json'), sort_keys=False)
# also copy and normalize the vocab-topic file
input_file = os.path.join(lda_dir, 'vocab_topics.csv')
df_temp = pd.read_csv(input_file, header=-1, index_col=0, sep='\t')
# get index
vocab = df_temp.index.tolist()
# remove last column because excel
counts = df_temp.values[:, :-1]
# normalize by column
topic_vectors = normalize(counts, norm='l1', axis=0)
# create column names
n_rows, n_cols = topic_vectors.shape
column_names = ['topic' + str(i) for i in range(n_cols)]
df = pd.DataFrame(topic_vectors, index=vocab, columns=column_names)
df.to_csv(os.path.join(dirs.lda_dir, 'topic_words.csv'))
if __name__ == '__main__':
main()
| dallascard/guac | core/lda/lda_postprocessing.py | Python | apache-2.0 | 2,175 |
# -*- coding: utf-8 -*-
'''Setup script
'''
import os
from setuptools import setup, find_packages
WORK_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(WORK_DIR)
os.sys.path.insert(1, WORK_DIR)
PKG_NAME = os.path.basename(WORK_DIR)
PKG_MOD = __import__(PKG_NAME)
PKG_AUTHOR_NAME, PKG_AUTHOR_EMAIL = PKG_MOD.__author__.rsplit(' ', 1)
PKG_AUTHOR_EMAIL = PKG_AUTHOR_EMAIL.strip('<>')
PKG_VERSION = PKG_MOD.__version__
PKG_CLASSIFIERS = PKG_MOD.__classifiers__
PKG_INFO = open(os.path.join(WORK_DIR, 'README.rst'), 'r').readlines()
PKG_DESC_SHORT = PKG_INFO[0]
PKG_DESC_LONG = ''.join(PKG_INFO)
PKG_LICENSE_FULL = open(os.path.join(WORK_DIR, 'LICENSE'), 'r').readlines()
PKG_LICENSE_NAME = PKG_LICENSE_FULL[0].strip()
PKG_REQS = open(os.path.join(WORK_DIR, 'requirements.txt')).readlines()
setup(
name=PKG_NAME,
version=PKG_VERSION,
author=PKG_AUTHOR_NAME,
author_email=PKG_AUTHOR_EMAIL,
url='https://github.com/soutys/' + PKG_NAME,
maintainer=PKG_AUTHOR_NAME,
maintainer_email=PKG_AUTHOR_EMAIL,
description=PKG_DESC_SHORT,
long_description=PKG_DESC_LONG,
classifiers=PKG_CLASSIFIERS,
install_requires=PKG_REQS,
packages=find_packages(),
license=PKG_LICENSE_NAME,
keywords='system services monitoring',
test_suite='tests',
entry_points={
'console_scripts': [
PKG_NAME + ' = ' + PKG_NAME + '.cmd:main',
],
},
)
# vim: ts=4:sw=4:et:fdm=indent:ff=unix
| soutys/metricol | setup.py | Python | mit | 1,465 |
#!/usr/bin/env python
#coding:utf-8
# Author: mozman (python version)
# Purpose: avl tree module (Julienne Walker's unbounded none recursive algorithm)
# source: http://eternallyconfuzzled.com/tuts/datastructures/jsw_tut_avl.aspx
# Created: 01.05.2010
# Copyright (c) 2010-2013 by Manfred Moitzi
# License: MIT License
# Conclusion of Julienne Walker
# AVL trees are about as close to optimal as balanced binary search trees can
# get without eating up resources. You can rest assured that the O(log N)
# performance of binary search trees is guaranteed with AVL trees, but the extra
# bookkeeping required to maintain an AVL tree can be prohibitive, especially
# if deletions are common. Insertion into an AVL tree only requires one single
# or double rotation, but deletion could perform up to O(log N) rotations, as
# in the example of a worst case AVL (ie. Fibonacci) tree. However, those cases
# are rare, and still very fast.
# AVL trees are best used when degenerate sequences are common, and there is
# little or no locality of reference in nodes. That basically means that
# searches are fairly random. If degenerate sequences are not common, but still
# possible, and searches are random then a less rigid balanced tree such as red
# black trees or Andersson trees are a better solution. If there is a significant
# amount of locality to searches, such as a small cluster of commonly searched
# items, a splay tree is theoretically better than all of the balanced trees
# because of its move-to-front design.
from __future__ import absolute_import
from .abctree import ABCTree
from array import array
__all__ = ['AVLTree']
MAXSTACK = 32
class Node(object):
"""Internal object, represents a tree node."""
__slots__ = ['left', 'right', 'balance', 'key', 'value']
def __init__(self, key=None, value=None):
self.left = None
self.right = None
self.key = key
self.value = value
self.balance = 0
def __getitem__(self, key):
"""N.__getitem__(key) <==> x[key], where key is 0 (left) or 1 (right)."""
return self.left if key == 0 else self.right
def __setitem__(self, key, value):
"""N.__setitem__(key, value) <==> x[key]=value, where key is 0 (left) or 1 (right)."""
if key == 0:
self.left = value
else:
self.right = value
def free(self):
"""Remove all references."""
self.left = None
self.right = None
self.key = None
self.value = None
def height(node):
return node.balance if node is not None else -1
def jsw_single(root, direction):
other_side = 1 - direction
save = root[other_side]
root[other_side] = save[direction]
save[direction] = root
rlh = height(root.left)
rrh = height(root.right)
slh = height(save[other_side])
root.balance = max(rlh, rrh) + 1
save.balance = max(slh, root.balance) + 1
return save
def jsw_double(root, direction):
other_side = 1 - direction
root[other_side] = jsw_single(root[other_side], other_side)
return jsw_single(root, direction)
class AVLTree(ABCTree):
"""
AVLTree implements a balanced binary tree with a dict-like interface.
see: http://en.wikipedia.org/wiki/AVL_tree
In computer science, an AVL tree is a self-balancing binary search tree, and
it is the first such data structure to be invented. In an AVL tree, the
heights of the two child subtrees of any node differ by at most one;
therefore, it is also said to be height-balanced. Lookup, insertion, and
deletion all take O(log n) time in both the average and worst cases, where n
is the number of nodes in the tree prior to the operation. Insertions and
deletions may require the tree to be rebalanced by one or more tree rotations.
The AVL tree is named after its two inventors, G.M. Adelson-Velskii and E.M.
Landis, who published it in their 1962 paper "An algorithm for the
organization of information."
AVLTree() -> new empty tree.
AVLTree(mapping) -> new tree initialized from a mapping
AVLTree(seq) -> new tree initialized from seq [(k1, v1), (k2, v2), ... (kn, vn)]
see also abctree.ABCTree() class.
"""
def _new_node(self, key, value):
"""Create a new tree node."""
self._count += 1
return Node(key, value)
def insert(self, key, value):
"""T.insert(key, value) <==> T[key] = value, insert key, value into tree."""
if self._root is None:
self._root = self._new_node(key, value)
else:
node_stack = [] # node stack
dir_stack = array('I') # direction stack
done = False
top = 0
node = self._root
# search for an empty link, save path
while True:
if key == node.key: # update existing item
node.value = value
return
direction = 1 if key > node.key else 0
dir_stack.append(direction)
node_stack.append(node)
if node[direction] is None:
break
node = node[direction]
# Insert a new node at the bottom of the tree
node[direction] = self._new_node(key, value)
# Walk back up the search path
top = len(node_stack) - 1
while (top >= 0) and not done:
direction = dir_stack[top]
other_side = 1 - direction
top_node = node_stack[top]
left_height = height(top_node[direction])
right_height = height(top_node[other_side])
# Terminate or rebalance as necessary */
if left_height - right_height == 0:
done = True
if left_height - right_height >= 2:
a = top_node[direction][direction]
b = top_node[direction][other_side]
if height(a) >= height(b):
node_stack[top] = jsw_single(top_node, other_side)
else:
node_stack[top] = jsw_double(top_node, other_side)
# Fix parent
if top != 0:
node_stack[top - 1][dir_stack[top - 1]] = node_stack[top]
else:
self._root = node_stack[0]
done = True
# Update balance factors
top_node = node_stack[top]
left_height = height(top_node[direction])
right_height = height(top_node[other_side])
top_node.balance = max(left_height, right_height) + 1
top -= 1
def remove(self, key):
"""T.remove(key) <==> del T[key], remove item <key> from tree."""
if self._root is None:
raise KeyError(str(key))
else:
node_stack = [None] * MAXSTACK # node stack
dir_stack = array('I', [0] * MAXSTACK) # direction stack
top = 0
node = self._root
while True:
# Terminate if not found
if node is None:
raise KeyError(str(key))
elif node.key == key:
break
# Push direction and node onto stack
direction = 1 if key > node.key else 0
dir_stack[top] = direction
node_stack[top] = node
node = node[direction]
top += 1
# Remove the node
if (node.left is None) or (node.right is None):
# Which child is not null?
direction = 1 if node.left is None else 0
# Fix parent
if top != 0:
node_stack[top - 1][dir_stack[top - 1]] = node[direction]
else:
self._root = node[direction]
node.free()
self._count -= 1
else:
# Find the inorder successor
heir = node.right
# Save the path
dir_stack[top] = 1
node_stack[top] = node
top += 1
while heir.left is not None:
dir_stack[top] = 0
node_stack[top] = heir
top += 1
heir = heir.left
# Swap data
node.key = heir.key
node.value = heir.value
# Unlink successor and fix parent
xdir = 1 if node_stack[top - 1].key == node.key else 0
node_stack[top - 1][xdir] = heir.right
heir.free()
self._count -= 1
# Walk back up the search path
top -= 1
while top >= 0:
direction = dir_stack[top]
other_side = 1 - direction
top_node = node_stack[top]
left_height = height(top_node[direction])
right_height = height(top_node[other_side])
b_max = max(left_height, right_height)
# Update balance factors
top_node.balance = b_max + 1
# Terminate or rebalance as necessary
if (left_height - right_height) == -1:
break
if (left_height - right_height) <= -2:
a = top_node[other_side][direction]
b = top_node[other_side][other_side]
if height(a) <= height(b):
node_stack[top] = jsw_single(top_node, direction)
else:
node_stack[top] = jsw_double(top_node, direction)
# Fix parent
if top != 0:
node_stack[top - 1][dir_stack[top - 1]] = node_stack[top]
else:
self._root = node_stack[0]
top -= 1
| zdvresearch/fast15-paper-extras | cache-simulator/cache_model_evaluation/bintrees/avltree.py | Python | mit | 10,082 |
# -*- coding: utf-8 -*-
"""
Test formatters:
* behave.formatter.tags.TagsCountFormatter
* behave.formatter.tags.TagsLocationFormatter
"""
from __future__ import absolute_import
from .test_formatter import FormatterTests as FormatterTest
from .test_formatter import MultipleFormattersTests as MultipleFormattersTest
# -----------------------------------------------------------------------------
# FORMATTER TESTS: With TagCountFormatter
# -----------------------------------------------------------------------------
class TestTagsCountFormatter(FormatterTest):
formatter_name = "tags"
# -----------------------------------------------------------------------------
# FORMATTER TESTS: With TagLocationFormatter
# -----------------------------------------------------------------------------
class TestTagsLocationFormatter(FormatterTest):
formatter_name = "tags.location"
# -----------------------------------------------------------------------------
# MULTI-FORMATTER TESTS: With TagCountFormatter
# -----------------------------------------------------------------------------
class TestPrettyAndTagsCount(MultipleFormattersTest):
formatters = ["pretty", "tags"]
class TestPlainAndTagsCount(MultipleFormattersTest):
formatters = ["plain", "tags"]
class TestJSONAndTagsCount(MultipleFormattersTest):
formatters = ["json", "tags"]
class TestRerunAndTagsCount(MultipleFormattersTest):
formatters = ["rerun", "tags"]
# -----------------------------------------------------------------------------
# MULTI-FORMATTER TESTS: With TagLocationFormatter
# -----------------------------------------------------------------------------
class TestPrettyAndTagsLocation(MultipleFormattersTest):
formatters = ["pretty", "tags.location"]
class TestPlainAndTagsLocation(MultipleFormattersTest):
formatters = ["plain", "tags.location"]
class TestJSONAndTagsLocation(MultipleFormattersTest):
formatters = ["json", "tags.location"]
class TestRerunAndTagsLocation(MultipleFormattersTest):
formatters = ["rerun", "tags.location"]
class TestTagsCountAndTagsLocation(MultipleFormattersTest):
formatters = ["tags", "tags.location"]
| jenisys/behave | tests/unit/test_formatter_tags.py | Python | bsd-2-clause | 2,181 |
"""Shared exceptions among the various command line scripts installed here."""
from __future__ import absolute_import, division, print_function, unicode_literals
class MismatchError(Exception):
def __init__(self, subsystem, expected, found):
super(MismatchError, self).__init__()
self.message = "Information mismatch in '%s', expected: '%s', found: '%s'" % (subsystem, expected, found)
class ExternalError(Exception):
def __init__(self, service, message):
super(ExternalError, self).__init__()
self.message = message
self.service = service
class InternalError(Exception):
def __init__(self, message):
super(InternalError, self).__init__()
self.message = message
class GenericError(Exception):
def __init__(self, message, return_value=1):
super(GenericError, self).__init__()
self.message = message
self.return_value = return_value
def handle_exception(exception):
"""Helper function for handling exceptions raised in main."""
print()
if isinstance(exception, MismatchError):
print("ERROR: There is a mismatch in required component information of the environment")
print(exception.message)
retval = 1
elif isinstance(exception, InternalError):
print("ERROR: An internal error has occurred. This indicates a bug in this script.")
print(exception.message)
retval = 2
elif isinstance(exception, ExternalError):
print("ERROR: An external service failed")
print("Service: %s" % exception.service)
print(exception.message)
retval = 3
elif isinstance(exception, KeyboardInterrupt):
print()
print("ERROR: Interrupted by Ctrl-C")
retval = 4
elif isinstance(exception, GenericError):
print("ERROR: %s" % exception.message)
retval = exception.return_value
else:
print("ERROR: An unknown exception occurred")
print(str(exception))
print()
return retval
| iotile/typedargs | .multipackage/scripts/shared_errors.py | Python | lgpl-3.0 | 2,032 |
import numpy
from ._gouraud_triangles import lib as _gouraud
from ._gouraud_triangles import ffi as _ffi
def _cast(ptype, array):
return _ffi.cast(ptype, array.ctypes.data)
def draw_mask(image_shape, geometry, antialias=False):
"""Draw a mask (0-255) from a given celiagg path. Requires celiagg installed.
Note: to produce a True/False mask from the output, simply do the following:
mask = draw_mask(image_shape, geometry)
bool_mask = mask > 0
Parameters:
image_shape: shape of the resulting image
geometry: celiagg VertexSource class, such as celiagg.Path or
celiagg.BSpline, containing geometry to draw
antialias: if False (default), output contains only 0 and 255. If True,
output will be antialiased (better for visualization).
Returns: mask array of dtype numpy.uint8
"""
import celiagg
image = numpy.zeros(image_shape, dtype=numpy.uint8, order='F')
# NB celiagg uses (h, w) C-order convention for image shapes, so give it the transpose
canvas = celiagg.CanvasG8(image.T)
state = celiagg.GraphicsState(drawing_mode=celiagg.DrawingMode.DrawFill, anti_aliased=antialias)
fill = celiagg.SolidPaint(1,1,1)
transform = celiagg.Transform()
canvas.draw_shape(geometry, transform, state, fill=fill)
return image
def gouraud_triangle_strip(triangle_strip, vertex_vals, shape, accumulate=False, background=0):
"""Return a triangle strip Gouraud-shaded based on values at each vertex.
Parameters:
triangle_strip: shape (n, 2) array of vertices describing a strip of
connected triangles (such that vertices (0,1,2) describe the first
triangle, vertices (1,2,3) describe the second, and so forth).
vertex_vals: shape (n,) or (n, m) array of values associated with each
vertex. In case of shape (n, m) this indicates m distinct sets of
values for each vertex; as such m distinct output images will be
produced.
shape: shape of the output image(s).
accumulate: if True, output values will be added atop one another in
in cases where triangles overlap. (Useful for finding such cases.)
background: value of output in non-drawn regions
Returns: single image (if vertex_vals is 1-dim) or list of images (if
vertex_vals is > 1-dim), where each image contains the interpolation
of the values at each vertex.
"""
triangle_strip = numpy.asarray(triangle_strip, dtype=numpy.float32, order='C')
vertex_vals = numpy.asarray(vertex_vals, dtype=numpy.float32, order='C')
assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2
assert vertex_vals.ndim in (1, 2)
unpack_out = False
if vertex_vals.ndim == 1:
vertex_vals = vertex_vals[:, numpy.newaxis]
unpack_out = True
assert len(vertex_vals) == len(triangle_strip)
num_vertices = len(triangle_strip)
out = numpy.empty(tuple(shape)+vertex_vals.shape[1:], dtype=numpy.float32, order='F')
out.fill(background)
_gouraud.gouraud_triangle_strip(num_vertices,
_cast('float *', triangle_strip),
_cast('float *', vertex_vals),
_cast('float *', out),
out.shape, out.strides, accumulate)
if unpack_out:
return out[:,:,0]
else:
return out.transpose((2,0,1))
def mask_triangle_strip(triangle_strip, shape):
"""Return a triangle strip rasterized into a boolean mask.
Mask is guaranteed to be identical to the region drawn by gouraud_triangle_strip,
which is not necessarily exactly the case for draw_mask() (which uses a
slightly different algorithm internally).
Parameters:
triangle_strip: shape (n, 2) array of vertices describing a strip of
connected triangles (such that vertices (0,1,2) describe the first
triangle, vertices (1,2,3) describe the second, and so forth).
shape: shape of the output image(s).
Returns: bool image of specified shape.
"""
triangle_strip = numpy.asarray(triangle_strip, dtype=numpy.float32, order='C')
assert triangle_strip.ndim == 2 and triangle_strip.shape[1] == 2 and len(triangle_strip) > 2
num_vertices = len(triangle_strip)
out = numpy.zeros(tuple(shape), dtype=bool, order='F')
_gouraud.mask_triangle_strip(num_vertices,
_cast('float *', triangle_strip),
_cast('char *', out),
out.shape, out.strides)
return out
def gourad_centerline_strip(left, center, right, left_v, center_v, right_v, shape, accumulate=False, background=0):
"""Gouraud-shade a polygon defined by a centerline and left and right edges.
Parameters:
left, right, center: shape (n, 2) arrays of vertices describing a
polygon.
left_v, center_v, right_v: shape (n,) or (n, m) arrays of values associated
with each vertex. In case of shape (n, m) this indicates m distinct
sets of values for each vertex; as such m distinct output images will be
produced.
shape: shape of the output image(s).
accumulate: if True, output values will be added atop one another in
in cases where triangles overlap. (Useful for finding such cases.)
background: value of output in non-drawn regions
Returns: single image (if vertex_vals is 1-dim) or list of images (if
vertex_vals is > 1-dim), where each image contains the interpolation
of the values at each vertex.
"""
left = numpy.asarray(left, dtype=numpy.float32, order='C')
center = numpy.asarray(center, dtype=numpy.float32, order='C')
right = numpy.asarray(right, dtype=numpy.float32, order='C')
left_v = numpy.asarray(left_v, dtype=numpy.float32, order='C')
center_v = numpy.asarray(center_v, dtype=numpy.float32, order='C')
right_v = numpy.asarray(right_v, dtype=numpy.float32, order='C')
assert left.shape == center.shape and center.shape == right.shape
assert left_v.shape == center_v.shape and center_v.shape == right_v.shape
assert left.ndim == 2 and left.shape[1] == 2 and len(left) > 1
assert left_v.ndim in (1, 2)
assert len(left) == len(left_v)
unpack_out = False
if left_v.ndim == 1:
left_v = left_v[:, numpy.newaxis]
center_v = center_v[:, numpy.newaxis]
right_v = right_v[:, numpy.newaxis]
unpack_out = True
num_points = len(left)
out = numpy.empty(tuple(shape)+left_v.shape[1:], dtype=numpy.float32, order='F')
out.fill(background)
_gouraud.gourad_centerline_strip(num_points,
_cast('float *', left),
_cast('float *', center),
_cast('float *', right),
_cast('float *', left_v),
_cast('float *', center_v),
_cast('float *', right_v),
_cast('float *', out),
out.shape, out.strides, accumulate)
if unpack_out:
return out[:,:,0]
else:
return out.transpose((2,0,1))
| zplab/zplib | zplib/image/draw.py | Python | mit | 7,001 |
import json
import logging
from uuid import UUID
import zlib
from django.core.cache import cache
from django.core.exceptions import PermissionDenied, SuspiciousOperation
from django.http import JsonResponse
from django.views.generic import View
from zentral.contrib.inventory.conf import macos_version_from_build
from zentral.contrib.inventory.exceptions import EnrollmentSecretVerificationFailed
from zentral.contrib.inventory.models import MachineTag, MetaMachine, PrincipalUserSource
from zentral.contrib.inventory.utils import commit_machine_snapshot_and_trigger_events, verify_enrollment_secret
from zentral.contrib.santa.events import post_enrollment_event, process_events, post_preflight_event
from zentral.contrib.santa.models import Configuration, EnrolledMachine, Enrollment, MachineRule
from zentral.utils.certificates import parse_dn
from zentral.utils.http import user_agent_and_ip_address_from_request
logger = logging.getLogger('zentral.contrib.santa.views.api')
class BaseSyncView(View):
use_enrolled_machine_cache = True
def _get_client_cert_dn(self):
dn = self.request.META.get("HTTP_X_SSL_CLIENT_S_DN")
if dn:
return parse_dn(dn)
else:
return None
def _get_json_data(self, request):
payload = request.body
if not payload:
return None
try:
if request.META.get('HTTP_CONTENT_ENCODING', None) in ("zlib", "deflate"):
payload = zlib.decompress(payload)
return json.loads(payload)
except ValueError:
raise SuspiciousOperation("Could not read JSON data")
def get_enrolled_machine(self):
try:
enrolled_machine = EnrolledMachine.objects.select_related(
"enrollment__secret",
"enrollment__configuration"
).get(
enrollment__secret__secret=self.enrollment_secret_secret,
hardware_uuid=self.hardware_uuid
)
except EnrolledMachine.DoesNotExist:
pass
else:
if enrolled_machine.enrollment.configuration.client_certificate_auth and not self.client_cert_dn:
raise PermissionDenied("Missing client certificate")
return enrolled_machine
def post(self, request, *args, **kwargs):
# URL kwargs
self.enrollment_secret_secret = kwargs["enrollment_secret"]
try:
self.hardware_uuid = str(UUID(kwargs["machine_id"]))
except ValueError:
raise PermissionDenied("Invalid machine id")
self.client_cert_dn = self._get_client_cert_dn()
self.user_agent, self.ip = user_agent_and_ip_address_from_request(request)
self.request_data = self._get_json_data(request)
self.cache_key = f"tests/santa/fixtures/{self.enrollment_secret_secret}{self.hardware_uuid}"
self.enrolled_machine = None
self.tag_ids = []
if self.use_enrolled_machine_cache:
try:
self.enrolled_machine, self.tag_ids = cache.get(self.cache_key)
except TypeError:
pass
else:
if self.enrolled_machine.enrollment.configuration.client_certificate_auth and not self.client_cert_dn:
raise PermissionDenied("Missing client certificate")
if not self.enrolled_machine:
self.enrolled_machine = self.get_enrolled_machine()
if not self.enrolled_machine:
raise PermissionDenied("Machine not enrolled")
meta_machine = MetaMachine(self.enrolled_machine.serial_number)
self.tag_ids = [t.id for t in meta_machine.tags]
cache.set(self.cache_key, (self.enrolled_machine, self.tag_ids), 600) # TODO cache timeout hardcoded
return JsonResponse(self.do_post())
class PreflightView(BaseSyncView):
use_enrolled_machine_cache = False
def _get_primary_user(self):
# primary user
primary_user = self.request_data.get('primary_user')
if primary_user:
primary_user = primary_user.strip()
if primary_user:
return primary_user
return None
def _get_enrolled_machine_defaults(self):
defaults = {
'serial_number': self.request_data['serial_num'],
'primary_user': self._get_primary_user(),
'client_mode': Configuration.MONITOR_MODE,
'santa_version': self.request_data['santa_version'],
'binary_rule_count': self.request_data.get('binary_rule_count'),
'certificate_rule_count': self.request_data.get('certificate_rule_count'),
'compiler_rule_count': self.request_data.get('compiler_rule_count'),
'transitive_rule_count': self.request_data.get('transitive_rule_count'),
}
# client mode
req_client_mode = self.request_data.get('client_mode')
if req_client_mode == "LOCKDOWN":
defaults['client_mode'] = Configuration.LOCKDOWN_MODE
elif req_client_mode != "MONITOR":
logger.error(f"Unknown client mode: {req_client_mode}")
return defaults
def _enroll_machine(self):
try:
enrollment = (Enrollment.objects.select_related("configuration", "secret")
.get(secret__secret=self.enrollment_secret_secret))
except Enrollment.DoesNotExist:
raise PermissionDenied("Unknown enrollment secret")
if enrollment.configuration.client_certificate_auth and not self.client_cert_dn:
raise PermissionDenied("Missing client certificate")
try:
verify_enrollment_secret(
"santa_enrollment", self.enrollment_secret_secret,
self.user_agent, self.ip,
serial_number=self.request_data["serial_num"],
udid=self.hardware_uuid,
)
except EnrollmentSecretVerificationFailed:
raise PermissionDenied("Wrong enrollment secret")
# get or create enrolled machine
enrolled_machine, _ = EnrolledMachine.objects.update_or_create(
enrollment=enrollment,
hardware_uuid=self.hardware_uuid,
defaults=self._get_enrolled_machine_defaults(),
)
# apply enrollment secret tags
for tag in enrollment.secret.tags.all():
MachineTag.objects.get_or_create(serial_number=enrolled_machine.serial_number, tag=tag)
# delete other enrolled machines
other_enrolled_machines = (EnrolledMachine.objects.exclude(pk=enrolled_machine.pk)
.filter(hardware_uuid=self.hardware_uuid))
if other_enrolled_machines.count():
self.enrollment_action = 're-enrollment'
other_enrolled_machines.delete()
else:
self.enrollment_action = 'enrollment'
# post event
post_enrollment_event(
enrolled_machine.serial_number, self.user_agent, self.ip,
{'configuration': enrollment.configuration.serialize_for_event(),
'action': self.enrollment_action}
)
return enrolled_machine
def get_enrolled_machine(self):
self.enrollment_action = None
enrolled_machine = super().get_enrolled_machine()
if not enrolled_machine:
enrolled_machine = self._enroll_machine()
else:
enrolled_machine_changed = False
for attr, val in self._get_enrolled_machine_defaults().items():
if getattr(enrolled_machine, attr) != val:
setattr(enrolled_machine, attr, val)
enrolled_machine_changed = True
if enrolled_machine_changed:
enrolled_machine.save()
return enrolled_machine
def _commit_machine_snapshot(self):
# os version
build = self.request_data["os_build"]
os_version = dict(zip(('major', 'minor', 'patch'),
(int(s) for s in self.request_data['os_version'].split('.'))))
os_version.update({'name': 'macOS', 'build': build})
try:
os_version.update(macos_version_from_build(build))
except ValueError:
pass
# tree
tree = {'source': {'module': 'zentral.contrib.santa',
'name': 'Santa'},
'reference': self.hardware_uuid,
'serial_number': self.enrolled_machine.serial_number,
'os_version': os_version,
'system_info': {'computer_name': self.request_data['hostname']},
'public_ip_address': self.ip,
}
# tree primary user
primary_user = self._get_primary_user()
if primary_user:
tree['principal_user'] = {
'source': {'type': PrincipalUserSource.SANTA_MACHINE_OWNER},
'unique_id': primary_user,
'principal_name': primary_user,
}
# tree business unit
business_unit = self.enrolled_machine.enrollment.secret.get_api_enrollment_business_unit()
if business_unit:
tree['business_unit'] = business_unit.serialize()
commit_machine_snapshot_and_trigger_events(tree)
def do_post(self):
post_preflight_event(self.enrolled_machine.serial_number,
self.user_agent,
self.ip,
self.request_data)
self._commit_machine_snapshot()
response_dict = self.enrolled_machine.enrollment.configuration.get_sync_server_config(
self.enrolled_machine.santa_version
)
# clean sync?
if self.request_data.get("request_clean_sync") is True or self.enrollment_action is not None:
MachineRule.objects.filter(enrolled_machine=self.enrolled_machine).delete()
response_dict["clean_sync"] = True
return response_dict
class RuleDownloadView(BaseSyncView):
def do_post(self):
request_cursor = self.request_data.get("cursor")
rules, response_cursor = MachineRule.objects.get_next_rule_batch(
self.enrolled_machine, self.tag_ids, request_cursor
)
response_dict = {"rules": rules}
if response_cursor:
# If a cursor is present in response, santa will make an extra request.
# This is used to acknowlege the rules. There will be always one extra query to validate the last batch.
# This is more robust than keeping the cursor on the enrolled machine and updating the cache to pass it
# to the Postflight view to validate the last batch.
response_dict["cursor"] = response_cursor
return response_dict
class EventUploadView(BaseSyncView):
def do_post(self):
unknown_file_bundle_hashes = process_events(
self.enrolled_machine,
self.user_agent,
self.ip,
self.request_data
)
response_dict = {}
if unknown_file_bundle_hashes:
response_dict["event_upload_bundle_binaries"] = unknown_file_bundle_hashes
return response_dict
class PostflightView(BaseSyncView):
def do_post(self):
cache.delete(self.cache_key)
return {}
| zentralopensource/zentral | zentral/contrib/santa/views/api.py | Python | apache-2.0 | 11,377 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 University of Liège
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, subprocess, platform
try:
# create build dir
os.chdir('..')
if not os.path.isdir('louisB'): os.mkdir('louisB')
os.chdir('louisB')
# cmake
if 'Windows' in platform.uname():
subprocess.call(r'cmake -G "Visual Studio 11 Win64" ..\louis', shell=True)
else:
subprocess.call('cmake -G"Eclipse CDT4 - Unix Makefiles" -DCMAKE_ECLIPSE_VERSION=4.6 -DCMAKE_ECLIPSE_GENERATE_SOURCE_PROJECT=TRUE ../louis', shell=True)
#subprocess.call('cmake -G"Eclipse CDT4 - Unix Makefiles" -DCMAKE_ECLIPSE_GENERATE_SOURCE_PROJECT=TRUE ../louis', shell=True)
subprocess.call('cmake --build . --config Release', shell=True)
os.chdir('../louis')
except Exception as e:
print(e)
print("<press ENTER to quit>"); input()
| rboman/progs | classes/sph0/louis/build.py | Python | apache-2.0 | 1,413 |
"""
:Created: 16 August 2015
:Author: Lucas Connors
"""
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.views.static import serve
from contest.models import Vote
from contest.views import (
ContestDetailsView,
HomeView,
SponsorDetailsView,
contest_upload_photo,
contest_vote_entry,
)
from users.decorators import redirect_authenticated
from users.views import LoginView, ProfileView, RegisterView, logout
urlpatterns = [
url(r"^admin/", admin.site.urls),
url(
r"^register/?$", redirect_authenticated(RegisterView.as_view()), name="register"
),
url(r"^login/?$", redirect_authenticated(LoginView.as_view()), name="login"),
url(r"^logout/?$", logout, name="logout"),
url(
r"^sponsor/details/(?P<slug>[\w_-]+)/?$",
SponsorDetailsView.as_view(),
name="sponsor_details",
),
url(
r"^contest/details/(?P<contest_slug>[\w_-]+)/entry/(?P<entry_id>\d+)/upvote/?$",
contest_vote_entry,
{"vote_type": Vote.UPVOTE},
name="contest_upvote_entry",
),
url(
r"^contest/details/(?P<contest_slug>[\w_-]+)/entry/(?P<entry_id>\d+)/downvote/?$",
contest_vote_entry,
{"vote_type": Vote.DOWNVOTE},
name="contest_downvote_entry",
),
url(
r"^contest/details/(?P<slug>[\w_-]+)/?$",
ContestDetailsView.as_view(),
name="contest_details",
),
url(
r"^contest/upload/(?P<slug>[\w_-]+)/?$",
contest_upload_photo,
name="contest_upload_photo",
),
url(r"^profile/?$", login_required(ProfileView.as_view()), name="profile"),
url(r"^$", HomeView.as_view(), name="home"),
]
# Add media folder to urls when DEBUG = True
if settings.DEBUG:
urlpatterns.append(
url(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT})
)
| RevolutionTech/flamingo | flamingo/urls.py | Python | isc | 1,970 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djconnectwise', '0018_auto_20170505_1531'),
]
operations = [
migrations.AddField(
model_name='syncjob',
name='entity_name',
field=models.CharField(max_length=100, default=''),
preserve_default=False,
),
]
| KerkhoffTechnologies/django-connectwise | djconnectwise/migrations/0019_syncjob_entity_name.py | Python | mit | 461 |
# pylint: disable=missing-docstring,import-error,unused-import,assignment-from-no-return
# pylint: disable=invalid-name, too-few-public-methods, useless-object-inheritance
from __future__ import print_function
from UNINFERABLE import uninferable_func
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
my_single_dispatch = singledispatch
class FakeSingleDispatch(object):
@staticmethod
def register(function):
return function
def __call__(self, function):
return function
fake_singledispatch_decorator = FakeSingleDispatch()
@singledispatch
def func(arg):
return arg
@func.register(str)
def _(arg):
return 42
@func.register(float)
@func.register(int)
def _(arg):
return 42
@my_single_dispatch
def func2(arg):
return arg
@func2.register(int)
def _(arg):
return 42
@singledispatch
def with_extra_arg(arg, verbose=False):
if verbose:
print(arg)
return arg
@with_extra_arg.register(str)
def _(arg, verbose=False):
unused = 42 # [unused-variable]
return arg[::-1]
@fake_singledispatch_decorator
def not_single_dispatch(arg): # [unused-argument]
return 'not yet implemented'
@fake_singledispatch_decorator.register(str)
def bad_single_dispatch(arg): # [unused-argument]
return 42
@fake_singledispatch_decorator.register(str)
def bad_single_dispatch(arg): # [unused-argument, function-redefined]
return 24
| ruchee/vimrc | vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/singledispatch_functions_py3.py | Python | mit | 1,470 |
__version__ = '3.1.7'
from .lib.main import Sequent
from .lib.sequent_types import SequentError, RunMode, StepReplay, StepStatus
from .lib.utils import or_
STEP_READY = StepStatus.ready # step is ready
STEP_ACTIVE = StepStatus.active # step is active
STEP_SUCCESS = StepStatus.success # step succeeded
STEP_FAILURE = StepStatus.failure # step failed
STEP_COMPLETE = StepStatus.complete # step complete with success or failure
STEP_SKIP = StepReplay.skip # skip step if previously succeeded
STEP_RERUN = StepReplay.rerun # reruns step regardless if previously succeeded
RUN_RESTART = RunMode.restart # run flow from start
RUN_RECOVER = RunMode.recover # reruns failed steps
# Note: internal use only
RUN_CONTINUE = RunMode.continue_ # continue from where it left in previous loop
| Acrisel/sequent | sequent/sequent/__init__.py | Python | mit | 794 |
# Download the Python helper library from twilio.com/docs/python/install
import os
from twilio.rest import Client
import json
# Find your credentials at twilio.com/console
# To set up environmental variables, see http://twil.io/secure
api_key_sid = os.environ['TWILIO_API_KEY']
api_key_secret = os.environ['TWILIO_API_KEY_SECRET']
client = Client(api_key_sid, api_key_secret)
composition = client.video.compositions.create(
room_sid = 'RMXXXX',
audio_sources = '*',
video_layout = {
"chess_table":{
"x_pos": 10,
"y_pos": 0,
"width": 1260,
"height": 720,
"max_rows": 3,
"max_columns": 3,
"reuse": "show_newest",
"cells_excluded": [1,3,5,7],
"video_sources":["*"]
}
},
status_callback = 'http://my.server.org/callbacks',
resolution = '1280x720',
format='mp4')
print('Created composition with SID=%s' % (composition.sid))
| TwilioDevEd/api-snippets | video/rest/compositions/compose-chess/compose-chess.6.x.py | Python | mit | 1,166 |
# -*- coding: utf-8 -*-
from apps.registro.models.ExtensionAulicaConexionInternet import ExtensionAulicaConexionInternet
from apps.registro.models.TipoConexion import TipoConexion
from django.core.exceptions import ValidationError
from django import forms
class ExtensionAulicaConexionInternetForm(forms.ModelForm):
tipo_conexion = forms.ModelChoiceField(queryset = TipoConexion.objects.all().order_by('nombre'), required = False)
verificado = forms.BooleanField(required=False)
class Meta:
model = ExtensionAulicaConexionInternet
exclude = ['extension_aulica']
def __chequear_si_tiene_conexion(self, field):
if self.cleaned_data['tiene_conexion']:
if (self.cleaned_data[field] is None
or self.cleaned_data[field] == ''):
raise ValidationError('Este campo es obligatorio.')
return self.cleaned_data[field]
return None
def clean_tipo_conexion(self):
return self.__chequear_si_tiene_conexion('tipo_conexion')
def clean_proveedor(self):
return self.__chequear_si_tiene_conexion('proveedor')
def clean_costo(self):
return self.__chequear_si_tiene_conexion('costo')
def clean_cantidad(self):
return self.__chequear_si_tiene_conexion('cantidad')
| MERegistro/meregistro | meregistro/apps/registro/forms/ExtensionAulicaConexionInternetForm.py | Python | bsd-3-clause | 1,222 |
import math
print(math.pi)
from math import *
print(pi)
from math import cos
print(cos(pi))
| lstorchi/teaching | modules/testimport.py | Python | gpl-3.0 | 95 |
def save():
import datetime
from bson import ObjectId
from util.mongodbutil import get_db
db = get_db()
device = db.ebc_device.find_one({"_id": "10279010865051"})
print(device)
obj = {
"_id": "2_oYazTsp2lBjzD_dNoFX17eVGIIcIACCF23576547",
"user_id": "1_oYazTsp2lBjzD_dNoFX17eVGIIcI",
"mac": "ACCF23576547",
"device": None,
"room_id": ObjectId("5565a90a808df7fd642ec8dc"),
"alias": "23432432",
"create_date": datetime.datetime.utcnow()
}
r = db.ebc_user_device.insert(obj)
print(r)
def read():
from util.mongodbutil import get_db
db = get_db()
ud = db.ebc_user_device.find_one({"user_id": "1_oYazTsp2lBjzD_dNoFX17eVGIIcI"})["device"]
r = db[ud.collection].find_one({"_id": ud.id})
print(r)
def insert_data():
from util.mongodbutil import get_db
db = get_db()
from bson import ObjectId
import datetime
obj = {
"_id": "2_oYazTsp2lBjzD_dNoFX17eVGIIcIACCF233C84E7",
"user_id": "2_oYazTsp2lBjzD_dNoFX17eVGIIcI",
"mac": "ACCF233C84E7",
"device_id": "10279010865051",
"room_id": ObjectId("5565a90a808df7fd642ec8f6"),
"alias": "110",
"create_date": datetime.datetime.utcnow()
}
db.ebc_user_device.insert(obj)
insert_data() | cherrishes/weilai | xingxing/test/dbtest.py | Python | apache-2.0 | 1,375 |
"""Lead interface for scattering models
The only way to create leads is using the :meth:`.Model.attach_lead` method.
The classes represented here are the final product of that process, listed
in :attr:`.Model.leads`.
"""
import numpy as np
import matplotlib.pyplot as plt
from math import pi
from scipy.sparse import csr_matrix
from . import _cpp
from . import pltutils, results
from .system import (System, plot_sites, plot_hoppings, structure_plot_properties,
decorate_structure_plot)
__all__ = ['Lead']
def _center(pos, shift):
"""Return the 2D center position of `pos + shift`"""
x = np.concatenate((pos[0], pos[0] + shift[0]))
y = np.concatenate((pos[1], pos[1] + shift[1]))
return (x.max() + x.min()) / 2, (y.max() + y.min()) / 2
class Lead:
"""Describes a single lead connected to a :class:`.Model`
Leads can only be created using :meth:`.Model.attach_lead`
and accessed using :attr:`.Model.leads`.
"""
def __init__(self, impl: _cpp.Lead, index, lattice):
self.impl = impl
self.index = index
self.lattice = lattice
@property
def indices(self) -> np.ndarray:
"""Main system indices (1d array) to which this lead is connected"""
return self.impl.indices
@property
def system(self) -> System:
"""Structural information, see :class:`.System`"""
return System(self.impl.system, self.lattice)
@property
def h0(self) -> csr_matrix:
"""Unit cell Hamiltonian as :class:`~scipy.sparse.csr_matrix`"""
return self.impl.h0
@property
def h1(self) -> csr_matrix:
"""Hamiltonian which connects who unit cells, :class:`~scipy.sparse.csr_matrix`"""
return self.impl.h1
def calc_bands(self, start=-pi, end=pi, step=0.05):
"""Calculate the band structure of an infinite lead
Parameters
----------
start, end : float
Points in reciprocal space which form the path for the band calculation.
step : float
Calculation step length in reciprocal space units. Lower `step` values
will return more detailed results.
Returns
-------
:class:`~pybinding.results.Bands`
"""
from scipy.linalg import eigh
h0 = self.h0.todense()
h1 = self.h1.todense()
h1t = np.conj(h1.T)
def eigenvalues(k):
h = h0 + h1 * np.exp(1j * k) + h1t * np.exp(-1j * k)
return eigh(h, eigvals_only=True)
k_path = results.make_path(start, end, step=step).flatten()
bands = [eigenvalues(k) for k in k_path]
return results.Bands(k_path, np.vstack(bands))
def plot(self, lead_length=6, **kwargs):
"""Plot the sites, hoppings and periodic boundaries of the lead
Parameters
----------
lead_length : int
Number of times to repeat the lead's periodic boundaries.
**kwargs
Additional plot arguments as specified in :func:`.structure_plot_properties`.
"""
pos = self.system.positions
sub = self.system.sublattices
inner_hoppings = self.system.hoppings.tocoo()
boundary = self.system.boundaries[0]
outer_hoppings = boundary.hoppings.tocoo()
props = structure_plot_properties(**kwargs)
props['site'].setdefault('radius', self.system.lattice.site_radius_for_plot())
blend_gradient = np.linspace(0.5, 0.1, lead_length)
for i, blend in enumerate(blend_gradient):
offset = i * boundary.shift
plot_sites(pos, sub, offset=offset, blend=blend, **props['site'])
plot_hoppings(pos, inner_hoppings, offset=offset, blend=blend, **props['hopping'])
plot_hoppings(pos, outer_hoppings, offset=offset - boundary.shift, blend=blend,
boundary=(1, boundary.shift), **props['boundary'])
label_pos = _center(pos, lead_length * boundary.shift * 1.5)
pltutils.annotate_box("lead {}".format(self.index), label_pos, bbox=dict(alpha=0.7))
decorate_structure_plot(**props)
def plot_contact(self, line_width=1.6, arrow_length=0.5,
shade_width=0.3, shade_color='#d40a0c'):
"""Plot the shape and direction of the lead contact region
Parameters
----------
line_width : float
Width of the line representing the lead contact.
arrow_length : float
Size of the direction arrow as a fraction of the contact line length.
shade_width : float
Width of the shaded area as a fraction of the arrow length.
shade_color : str
Color of the shaded area.
"""
lead_spec = self.impl.spec
vectors = self.lattice.vectors
if len(lead_spec.shape.vertices) != 2 or len(vectors) != 2:
raise RuntimeError("This only works for 2D systems")
# contact line vertices
a, b = (v[:2] for v in lead_spec.shape.vertices)
def plot_contact_line():
# Not using plt.plot() because it would reset axis limits
plt.gca().add_patch(plt.Polygon([a, b], color='black', lw=line_width))
def rescale_lattice_vector(vec):
line_length = np.linalg.norm(a - b)
scale = arrow_length * line_length / np.linalg.norm(vec)
return vec[:2] * scale
def plot_arrow(xy, vec, spec, head_width=0.08, head_length=0.2):
vnorm = np.linalg.norm(vec)
plt.arrow(xy[0], xy[1], *vec, color='black', alpha=0.9, length_includes_head=True,
head_width=vnorm * head_width, head_length=vnorm * head_length)
label = r"${}a_{}$".format("-" if spec.sign < 0 else "", spec.axis + 1)
pltutils.annotate_box(label, xy + vec / 5, fontsize='large',
bbox=dict(lw=0, alpha=0.6))
def plot_polygon(w):
plt.gca().add_patch(plt.Polygon([a - w, a + w, b + w, b - w],
color=shade_color, alpha=0.25, lw=0))
plot_contact_line()
v = rescale_lattice_vector(vectors[lead_spec.axis] * lead_spec.sign)
plot_arrow(xy=(a + b) / 2, vec=v, spec=lead_spec)
plot_polygon(w=shade_width * v)
pltutils.despine(trim=True)
pltutils.add_margin()
def plot_bands(self, start=-pi, end=pi, step=0.05, **kwargs):
"""Plot the band structure of an infinite lead
Parameters
----------
start, end : float
Points in reciprocal space which form the path for the band calculation.
step : float
Calculation step length in reciprocal space units. Lower `step` values
will return more detailed results.
**kwargs
Forwarded to :meth:`.Bands.plot`.
"""
bands = self.calc_bands(start, end, step)
bands.plot(**kwargs)
plt.title("lead {}".format(self.index))
class Leads:
def __init__(self, impl: _cpp.Leads, lattice):
self.impl = impl
self.lattice = lattice
def __getitem__(self, index):
return Lead(self.impl[index], index, self.lattice)
def __len__(self):
return len(self.impl)
| dean0x7d/pybinding | pybinding/leads.py | Python | bsd-2-clause | 7,279 |
# -*-coding: utf-8 -*-
"""
Copyright (c) 2015 by rapidhere, RANTTU. INC. All Rights Reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
predefined licenses headers
ref from http://opensource.org
the converter
"""
__author__ = "rapidhere"
from fpjs.ast import ES5Parser
from fpjs.ast.absyn import *
from fpjs.ast.token import *
from fakesyn import *
import const
from scope import Scope
from contextlib import contextmanager
class Converter(object):
def __init__(self):
self.parser = ES5Parser()
def load(self, content):
self.parser.load(content)
self.var_scope = Scope()
@contextmanager
def scope_context(self, ast):
self.var_scope.enter_scope()
if ast == Program:
for stat in ast:
self.build_scope(stat)
elif ast == FunctionExpression or ast == FunctionStatement:
self.build_scope(ast.body_statement)
else:
raise AssertionError("cannot build scope for ast: " + ast.__class__.__name__)
yield
self.var_scope.leave_scope()
def convert(self, print_ast=False, print_conv=False):
ast = self.parser.parse()
assert ast == Program
if print_ast:
ast.ast_print()
ret = self.wrap_runner(ast)
if print_conv:
print ret
return ret
def build_scope(self, ast):
if ast == IfStatement:
self.build_scope(ast.true_statement)
if ast.false_statement:
self.build_scope(ast.false_statement)
elif (ast == WhileStatement or
ast == DoWhileStatement or
ast == ForStatement):
self.build_scope(ast.body_statement)
elif ast == VariableStatement:
for var in ast:
if var.var_id not in self.var_scope:
self.var_scope[var.var_id] = var
elif ast == BlockStatement:
for stat in ast:
self.build_scope(stat)
elif ast == FunctionStatement:
self.var_scope[ast.id] = ast
def build_scope_wrap_begin(self):
return "((" + ",".join(self.var_scope) + ")=>"
def build_scope_wrap_end(self):
return ")()"
def wrap_runner(self, ast):
ret = const.CODE_FRAGMENT.RUNNER_WRAP_BEGIN
ret += self.convert_program(ast)
ret += const.CODE_FRAGMENT.RUNNER_WRAP_END
return ret
def convert_program(self, prog):
with self.scope_context(prog):
ret = self.build_scope_wrap_begin()
ret += self._convert_multiple_statements(iter(prog))
ret += self.build_scope_wrap_end()
return ret
def _convert_multiple_statements(self, stats):
rstats = []
# de iter
func_stats = [s for s in stats]
stats = []
# filter function statement, put it in front
for stat in func_stats:
if stat == FunctionStatement:
rstats.append("%s=%s" % (stat.id.value, self.convert_function_statement(stat)))
else:
stats.append(stat)
idx = 0
for stat in stats:
idx += 1
if (stat == IfStatement or
stat == WhileStatement or
stat == DoWhileStatement or
stat == ForStatement):
after = "(()=>%s)" % self._convert_multiple_statements(stats[idx:])
rstats.append(self._convert_with_after_statement(stat, after))
break
elif stat == ReturnStatement:
rstats.append(self.convert_return_statement(stat))
break
elif stat == BreakStatement:
rstats.append(self.convert_break_statement(stat))
break
elif stat == ContinueStatement:
rstats.append(self.convert_continue_statement(stat))
break
elif stat == FakeBreakStatement:
rstats.append("__A()")
break
elif stat == FakeContinueStatement:
rstats.append("__WN(__W,__WA)")
break
elif stat != FunctionStatement:
rstats.append(self.convert_statement(stat))
if rstats:
return "(" + ",".join(rstats) + ")"
else:
return "undefined"
def convert_statement(self, stat):
if stat == ExpressionStatement:
return self.convert_expression(stat.expression)
elif stat == VariableStatement:
return self.convert_variable_statement(stat)
elif stat == BlockStatement:
return self.convert_block_statement(stat)
elif stat == FunctionStatement:
return self.convert_function_statement(stat)
raise NotImplementedError("unsupported ast yet: " + stat.__class__.__name__)
def convert_function_statement(self, stat):
with self.scope_context(stat):
ret = "(" + ",".join([arg_id.value for arg_id in stat.arguments]) + ")=>"
ret += self.build_scope_wrap_begin()
ret += self.convert_statement(stat.body_statement)
ret += self.build_scope_wrap_end()
return ret
def convert_break_statement(self, stat):
return "__WA()"
def convert_continue_statement(self, stat):
return "__WN(__W,__WA)"
def convert_block_statement(self, stat):
return self._convert_multiple_statements(iter(stat))
def convert_return_statement(self, stat):
return self.convert_expression(stat.expression)
def convert_variable_statement(self, stat):
ret = []
for var in stat:
if var.init is not None:
ret.append("%s=%s" % (var.var_id.value, self.convert_expression(var.init)))
if ret:
return "(" + ",".join(ret) + ")"
return "undefined"
def _convert_with_after_statement(self, stat, after):
ret = None
if stat == IfStatement:
ret = self.convert_if_statement(stat, after)
elif stat == WhileStatement:
ret = self.convert_while_statement(stat, after)
elif stat == DoWhileStatement:
ret = self.convert_do_while_statement(stat, after)
elif stat == ForStatement:
ret = self.convert_for_statement(stat, after)
if not ret:
raise AssertionError("not a with-after statement or not implemented: " + stat.__class__.__name__)
return ret
def convert_if_statement(self, stat, after):
stat.true_statement.append(FakeBreakStatement())
stat.false_statement.append(FakeBreakStatement())
return const.CODE_FRAGMENT.IF_ELSE_FRAGMENT % (
self._convert_multiple_statements(iter(stat.true_statement)),
self._convert_multiple_statements(iter(stat.false_statement)),
self.convert_expression(stat.test_expression),
after)
def convert_while_statement(self, stat, after):
stat.body_statement.append(FakeContinueStatement())
return const.CODE_FRAGMENT.WHILE_FRAGMENT % (
self.convert_expression(stat.test_expression),
self._convert_multiple_statements(iter(stat.body_statement)),
after,
const.CODE_FRAGMENT.WN_WHILE_FRAGMENT)
def convert_do_while_statement(self, stat, after):
stat.body_statement.append(FakeContinueStatement())
wnext = const.CODE_FRAGMENT.WN_DO_WHILE_FRAGMENT % self.convert_expression(stat.test_expression)
return const.CODE_FRAGMENT.DO_WHILE_FRAGMENT % (
self._convert_multiple_statements(iter(stat.body_statement)),
after,
wnext)
def convert_for_statement(self, stat, after):
stat.body_statement.append(FakeContinueStatement())
return const.CODE_FRAGMENT.FOR_FRAGMENT % (
self.convert_expression(stat.init_expression),
self._convert_multiple_statements(iter(stat.body_statement)),
after,
const.CODE_FRAGMENT.WN_FOR_FRAGMENT % (
self.convert_expression(stat.increment_expression),
self.convert_expression(stat.test_expression)))
def convert_expression(self, exp):
if not exp:
return "undefined"
if exp == CallExpression:
return self.convert_call_expression(exp)
elif exp == PrimaryExpression:
return self.convert_primary_expression(exp)
elif exp == BinaryExpression:
return self.convert_binary_expression(exp)
elif exp == UnaryExpression:
return self.convert_unary_expression(exp)
elif exp == FunctionExpression:
return self.convert_function_expression(exp)
elif exp == MemberExpression:
return self.convert_member_expression(exp)
elif exp == MultipleExpression:
return self.convert_multiple_expression(exp)
elif exp == AssignmentExpression:
return self.convert_assign_expression(exp)
raise NotImplementedError("unsupported ast yet: " + exp.__class__.__name__)
def convert_function_expression(self, exp):
with self.scope_context(exp):
ret = "(" + ",".join([arg_id.value for arg_id in exp.arguments]) + ")=>"
ret += self.build_scope_wrap_begin()
ret += self.convert_statement(exp.body_statement)
ret += self.build_scope_wrap_end()
return ret
def convert_assign_expression(self, exp):
return (self.convert_expression(exp.left_hand) +
self.convert_token(exp.operator) +
self.convert_expression(exp.right_hand))
def convert_multiple_expression(self, exp):
ret = "("
ret += ",".join([self.convert_expression(e) for e in exp])
ret += ")"
return ret
def convert_call_expression(self, exp):
callee = self.convert_expression(exp.callee)
if exp.callee == FunctionExpression:
callee = "(%s)" % callee
return callee + self.convert_args(exp.arguments)
def convert_binary_expression(self, exp):
ret = "(%s)" % self.convert_expression(exp.left)
ret += self.convert_token(exp.operator)
ret += "(%s)" % self.convert_expression(exp.right)
return ret
def convert_unary_expression(self, exp):
expr = self.convert_expression(exp.expression)
tok = self.convert_token(exp.operator)
if exp.expression == PrimaryExpression:
return "%s%s" % (tok, expr)
else:
return "%s(%s)" % (tok, expr)
def convert_member_expression(self, exp):
group = self.convert_expression(exp.group)
if exp.identifier == ES5Id:
if exp.group == PrimaryExpression:
pattern = "%s.%s"
else:
pattern = "(%s).%s"
return pattern % (group, self.convert_token(exp.identifier))
else:
return "%s[%s]" % (group, self.convert_expression(exp.identifier))
def convert_primary_expression(self, exp):
if exp.value == ObjectLiteral:
return self.convert_object_literal(exp.value)
elif exp.value == ArrayLiteral:
return self.convert_array_literal(exp.value)
else:
return self.convert_token(exp.value)
def convert_args(self, args):
ret = "("
arg_rets = []
for arg in args:
arg_rets.append(self.convert_expression(arg))
ret += ",".join(arg_rets) + ")"
return ret
def convert_token(self, tok):
if tok == ES5String:
return '"%s"' % tok.value
return tok.value
def convert_object_literal(self, o):
props = "["
for k, v in o.iteritems():
props += "[%s,%s]," % (self.convert_token(k), self.convert_expression(v))
props = props[:-1] + "]"
return const.CODE_FRAGMENT.OBJECT_CONSTRUCTOR_FRAGMENT % props
def convert_array_literal(self, arr):
ret = "["
trim = False
for v in arr:
if v is None:
ret += ","
trim = False
else:
ret += "%s," % self.convert_expression(v)
trim = True
if trim:
ret = ret[:-1]
ret += "]"
return ret
| rapidhere/fpjs | fpjs/conv/converter.py | Python | lgpl-3.0 | 13,074 |
#!/usr/bin/env python3
from decimal import Decimal
import http.client
import subprocess
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import *
from test_framework.messages import *
from test_framework.mininode import *
from test_framework.script import *
from test_framework.qtum import *
class QtumBitcoreTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [['-addrindex=1'], ['-addrindex=0']]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_bitcore()
self.skip_if_no_wallet()
def unknown_segwit_address_test(self):
unspent = self.nodes[0].listunspent()[0]
script_pubkey = CScript([CScriptOp(OP_16), sha256(b"\x00")])
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), b"")]
tx.vout = [CTxOut(int(unspent['amount']*100000000 - 100000), script_pubkey)]
tx = rpc_sign_transaction(self.nodes[0], tx)
self.nodes[0].sendrawtransaction(bytes_to_hex_str(tx.serialize()))
tx.rehash()
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(int(unspent['amount']*100000000 - 200000), script_pubkey)]
tx.rehash()
tip = self.nodes[0].getblock(self.nodes[0].getbestblockhash())
block = create_block(int(self.nodes[0].getbestblockhash(), 16), create_coinbase(self.nodes[0].getblockcount()+1), tip['time'])
block.vtx.append(tx)
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
assert_equal(self.nodes[0].submitblock(bytes_to_hex_str(block.serialize())), None)
def run_test(self):
node = self.nodes[0]
t = int(time.time())-10000
node.setmocktime(t)
node.generate(990)
for i in range(10):
node.setmocktime(t+9900+i)
node.generate(1)
node.setmocktime(0)
node.setmocktime(0)
self.unknown_segwit_address_test()
confirmed_address = node.getnewaddress()
mempool_address = node.getnewaddress()
expected_address_txids = []
for i in range(10):
expected_address_txids.append(node.sendtoaddress(confirmed_address, 10))
time.sleep(0.1)
node.generate(1)
mempool_txid = node.sendtoaddress(mempool_address, 19999)
# check dgp info
ret = node.getdgpinfo()
assert_equal(ret, {'blockgaslimit': 40000000, 'maxblocksize': 2000000, 'mingasprice': 40})
ret = node.getaddresstxids({'addresses': [confirmed_address]})
assert_equal(set(ret), set(expected_address_txids))
ret = node.getaddressdeltas({'addresses': [confirmed_address]})
assert_equal(len(ret), 10)
ret = node.getaddressbalance({'addresses': [confirmed_address]})
assert_equal(ret['balance'], 10000000000)
ret = node.getaddressutxos({'addresses': [confirmed_address]})
ret = node.getaddressmempool({'addresses': [mempool_address]})
assert_equal(ret[0]['txid'], mempool_txid)
assert_equal(len(ret), 1)
new_block = node.getblock(node.getblockhash(994))
old_block = node.getblock(node.getblockhash(991))
ret = node.getblockhashes(new_block['time'], old_block['time'])
assert_equal(set(ret), set(node.getblockhash(991+i) for i in range(3)))
time.sleep(1)
txinfo = node.decoderawtransaction(node.gettransaction(expected_address_txids[0])['hex'])
spent_prevout = txinfo['vin'][0]
ret = node.getspentinfo({"txid": spent_prevout['txid'], "index": spent_prevout['vout']})
assert_equal(ret, {"txid": expected_address_txids[0], "index": 0, "height": 1002})
self.sync_all()
if __name__ == '__main__':
QtumBitcoreTest().main() | qtumproject/qtum | test/functional/qtum_rpc_bitcore.py | Python | mit | 3,988 |
from django.contrib.sitemaps import Sitemap
from .models import Concert, Artist, Venue
class ConcertSitemap(Sitemap):
changefreq = "daily"
priority = 0.5
def items(self):
return Concert.objects.all()
def lastmod(self, obj):
return obj.modify_date
class ArtistSiteMap(Sitemap):
changefreq = "daily"
priority = 0.5
def items(self):
return Artist.objects.all()
def lastmod(self, obj):
return obj.modify_date
class VenueSiteMap(Sitemap):
changefreq = "daily"
priority = 0.5
def items(self):
return Venue.objects.all()
def lastmod(self, obj):
return obj.modify_date
sitemaps = {
'concert': ConcertSitemap,
'artist': ArtistSiteMap,
'venue': VenueSiteMap,
}
| suriya/rasikapriya | rasikapriya/sitemaps.py | Python | mit | 770 |
from stt_watson.SttWatsonAbstractListener import SttWatsonAbstractListener
class SttWatsonLogListener(SttWatsonAbstractListener):
def __init__(self):
pass
def listenHypothesis(self, hypothesis):
print "Hypothesis: {0}".format(hypothesis)
def listenPayload(self, payload):
print(u"Text message received: {0}".format(payload))
def listenInterimHypothesis(self, interimHypothesis):
print "Interim hypothesis: {0}".format(interimHypothesis)
| HomeHabbit/stt-watson | stt_watson/SttWatsonLogListener.py | Python | mit | 490 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.