code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License",
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for choosing which member of a replica set to read from."""
import random
from pymongo.errors import ConfigurationError
class ReadPreference:
"""An enum that defines the read preference modes supported by PyMongo.
Used in three cases:
:class:`~pymongo.mongo_client.MongoClient` connected to a single host:
* `PRIMARY`: Queries are allowed if the host is standalone or the replica
set primary.
* All other modes allow queries to standalone servers, to the primary, or
to secondaries.
:class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a
sharded cluster of replica sets:
* `PRIMARY`: Queries are sent to the primary of a shard.
* `PRIMARY_PREFERRED`: Queries are sent to the primary if available,
otherwise a secondary.
* `SECONDARY`: Queries are distributed among shard secondaries. An error
is raised if no secondaries are available.
* `SECONDARY_PREFERRED`: Queries are distributed among shard secondaries,
or the primary if no secondary is available.
* `NEAREST`: Queries are distributed among all members of a shard.
:class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`:
* `PRIMARY`: Queries are sent to the primary of the replica set.
* `PRIMARY_PREFERRED`: Queries are sent to the primary if available,
otherwise a secondary.
* `SECONDARY`: Queries are distributed among secondaries. An error
is raised if no secondaries are available.
* `SECONDARY_PREFERRED`: Queries are distributed among secondaries,
or the primary if no secondary is available.
* `NEAREST`: Queries are distributed among all members.
"""
PRIMARY = 0
PRIMARY_PREFERRED = 1
SECONDARY = 2
SECONDARY_ONLY = 2
SECONDARY_PREFERRED = 3
NEAREST = 4
# For formatting error messages
modes = {
ReadPreference.PRIMARY: 'PRIMARY',
ReadPreference.PRIMARY_PREFERRED: 'PRIMARY_PREFERRED',
ReadPreference.SECONDARY: 'SECONDARY',
ReadPreference.SECONDARY_PREFERRED: 'SECONDARY_PREFERRED',
ReadPreference.NEAREST: 'NEAREST',
}
_mongos_modes = [
'primary',
'primaryPreferred',
'secondary',
'secondaryPreferred',
'nearest',
]
def mongos_mode(mode):
return _mongos_modes[mode]
def mongos_enum(enum):
return _mongos_modes.index(enum)
def select_primary(members):
for member in members:
if member.is_primary:
return member
return None
def select_member_with_tags(members, tags, secondary_only, latency):
candidates = []
for candidate in members:
if secondary_only and candidate.is_primary:
continue
if not (candidate.is_primary or candidate.is_secondary):
# In RECOVERING or similar state
continue
if candidate.matches_tags(tags):
candidates.append(candidate)
if not candidates:
return None
# ping_time is in seconds
fastest = min([candidate.get_avg_ping_time() for candidate in candidates])
near_candidates = [
candidate for candidate in candidates
if candidate.get_avg_ping_time() - fastest <= latency / 1000.]
return random.choice(near_candidates)
def select_member(
members,
mode=ReadPreference.PRIMARY,
tag_sets=None,
latency=15
):
"""Return a Member or None.
"""
if tag_sets is None:
tag_sets = [{}]
# For brevity
PRIMARY = ReadPreference.PRIMARY
PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED
SECONDARY = ReadPreference.SECONDARY
SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED
NEAREST = ReadPreference.NEAREST
if mode == PRIMARY:
if tag_sets != [{}]:
raise ConfigurationError("PRIMARY cannot be combined with tags")
return select_primary(members)
elif mode == PRIMARY_PREFERRED:
# Recurse.
candidate_primary = select_member(members, PRIMARY, [{}], latency)
if candidate_primary:
return candidate_primary
else:
return select_member(members, SECONDARY, tag_sets, latency)
elif mode == SECONDARY:
for tags in tag_sets:
candidate = select_member_with_tags(members, tags, True, latency)
if candidate:
return candidate
return None
elif mode == SECONDARY_PREFERRED:
# Recurse.
candidate_secondary = select_member(
members, SECONDARY, tag_sets, latency)
if candidate_secondary:
return candidate_secondary
else:
return select_member(members, PRIMARY, [{}], latency)
elif mode == NEAREST:
for tags in tag_sets:
candidate = select_member_with_tags(members, tags, False, latency)
if candidate:
return candidate
# Ran out of tags.
return None
else:
raise ConfigurationError("Invalid mode %s" % repr(mode))
"""Commands that may be sent to replica-set secondaries, depending on
ReadPreference and tags. All other commands are always run on the primary.
"""
secondary_ok_commands = frozenset([
"group", "aggregate", "collstats", "dbstats", "count", "distinct",
"geonear", "geosearch", "geowalk", "mapreduce", "getnonce", "authenticate",
"text", "parallelcollectionscan"
])
class MovingAverage(object):
def __init__(self, samples):
"""Immutable structure to track a 5-sample moving average.
"""
self.samples = samples[-5:]
assert self.samples
self.average = sum(self.samples) / float(len(self.samples))
def clone_with(self, sample):
"""Get a copy of this instance plus a new sample"""
return MovingAverage(self.samples + [sample])
def get(self):
return self.average
| otherness-space/myProject003 | my_project_003/lib/python2.7/site-packages/pymongo/read_preferences.py | Python | mit | 6,472 |
class Solution:
# @return a list of lists of length 3, [[val1,val2,val3]]
def threeSum(self, num):
if len(num) <= 2:
return []
ret = []
tar = 0
num.sort()
i = 0
while i < len(num) - 2:
j = i + 1
k = len(num) - 1
while j < k:
if num[i] + num[j] + num[k] < tar:
j += 1
elif num[i] + num[j] + num[k] > tar:
k -= 1
else:
ret.append([num[i], num[j], num[k]])
j += 1
k -= 1
# folowing 3 while can avoid the duplications
while j < k and num[j] == num[j - 1]:
j += 1
while j < k and num[k] == num[k + 1]:
k -= 1
while i < len(num) - 2 and num[i] == num[i + 1]:
i += 1
i += 1
return ret
| jasonleaster/LeetCode | 3Sum/3sum_opt_2.py | Python | gpl-2.0 | 988 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-25 13:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('birth_year', models.IntegerField(default=1988)),
('number', models.IntegerField(blank=True, default=0, null=True)),
('status', models.CharField(choices=[('f', 'Free'), ('p', 'Pending'), ('a', 'Active')], default='f', max_length=2)),
('active', models.BooleanField(default=False)),
],
options={
'ordering': ('number',),
},
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=32, null=True, unique=True)),
('city', models.CharField(blank=True, max_length=32, null=True)),
('active', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='player',
name='team',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='team.Team'),
),
migrations.AddField(
model_name='player',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| vollov/lotad | team/migrations/0001_initial.py | Python | mit | 1,996 |
### script for writing meta information of datasets into master.csv
### for node property prediction datasets.
import pandas as pd
dataset_dict = {}
dataset_list = []
### add meta-information about protein function prediction task
name = 'ogbn-proteins'
dataset_dict[name] = {'num tasks': 112, 'num classes': 2, 'eval metric': 'rocauc', 'task type': 'binary classification'}
dataset_dict[name]['download_name'] = 'proteins'
dataset_dict[name]['version'] = 1
dataset_dict[name]['url'] = 'http://snap.stanford.edu/ogb/data/nodeproppred/'+dataset_dict[name]['download_name']+'.zip'
## For undirected grarph, we only store one directional information. This flag allows us to add inverse edge at pre-processing time
dataset_dict[name]['add_inverse_edge'] = True
dataset_dict[name]['has_node_attr'] = False
dataset_dict[name]['has_edge_attr'] = True
dataset_dict[name]['split'] = 'species'
dataset_dict[name]['additional node files'] = 'node_species'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['is hetero'] = False
dataset_dict[name]['binary'] = False
### add meta-information about product category prediction task
name = 'ogbn-products'
dataset_dict[name] = {'num tasks': 1, 'num classes': 47, 'eval metric': 'acc', 'task type': 'multiclass classification'}
dataset_dict[name]['download_name'] = 'products'
dataset_dict[name]['version'] = 1
dataset_dict[name]['url'] = 'http://snap.stanford.edu/ogb/data/nodeproppred/'+dataset_dict[name]['download_name']+'.zip'
## For undirected grarph, we only store one directional information. This flag allows us to add inverse edge at pre-processing time
dataset_dict[name]['add_inverse_edge'] = True
dataset_dict[name]['has_node_attr'] = True
dataset_dict[name]['has_edge_attr'] = False
dataset_dict[name]['split'] = 'sales_ranking'
dataset_dict[name]['additional node files'] = 'None'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['is hetero'] = False
dataset_dict[name]['binary'] = False
### add meta-information about arxiv category prediction task
name = 'ogbn-arxiv'
dataset_dict[name] = {'num tasks': 1, 'num classes': 40, 'eval metric': 'acc', 'task type': 'multiclass classification'}
dataset_dict[name]['download_name'] = 'arxiv'
dataset_dict[name]['version'] = 1
dataset_dict[name]['url'] = 'http://snap.stanford.edu/ogb/data/nodeproppred/'+dataset_dict[name]['download_name']+'.zip'
dataset_dict[name]['add_inverse_edge'] = False
dataset_dict[name]['has_node_attr'] = True
dataset_dict[name]['has_edge_attr'] = False
dataset_dict[name]['split'] = 'time'
dataset_dict[name]['additional node files'] = 'node_year'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['is hetero'] = False
dataset_dict[name]['binary'] = False
### add meta-information about paper venue prediction task
name = 'ogbn-mag'
dataset_dict[name] = {'num tasks': 1, 'num classes': 349, 'eval metric': 'acc', 'task type': 'multiclass classification'}
dataset_dict[name]['download_name'] = 'mag'
dataset_dict[name]['version'] = 2
dataset_dict[name]['url'] = 'http://snap.stanford.edu/ogb/data/nodeproppred/'+dataset_dict[name]['download_name']+'.zip'
dataset_dict[name]['add_inverse_edge'] = False
dataset_dict[name]['has_node_attr'] = True
dataset_dict[name]['has_edge_attr'] = False
dataset_dict[name]['split'] = 'time'
dataset_dict[name]['additional node files'] = 'node_year'
dataset_dict[name]['additional edge files'] = 'edge_reltype'
dataset_dict[name]['is hetero'] = True
dataset_dict[name]['binary'] = False
### add meta-information about paper category prediction in huge paper citation network
name = 'ogbn-papers100M'
dataset_dict[name] = {'num tasks': 1, 'num classes': 172, 'eval metric': 'acc', 'task type': 'multiclass classification'}
dataset_dict[name]['download_name'] = 'papers100M-bin'
dataset_dict[name]['version'] = 1
dataset_dict[name]['url'] = 'http://snap.stanford.edu/ogb/data/nodeproppred/'+dataset_dict[name]['download_name']+'.zip'
dataset_dict[name]['add_inverse_edge'] = False
dataset_dict[name]['has_node_attr'] = True
dataset_dict[name]['has_edge_attr'] = False
dataset_dict[name]['split'] = 'time'
dataset_dict[name]['additional node files'] = 'node_year'
dataset_dict[name]['additional edge files'] = 'None'
dataset_dict[name]['is hetero'] = False
dataset_dict[name]['binary'] = True
df = pd.DataFrame(dataset_dict)
# saving the dataframe
df.to_csv('master.csv') | snap-stanford/ogb | ogb/nodeproppred/make_master_file.py | Python | mit | 4,402 |
def sphere(solution):
d = len(solution)
sumatory = 0
for i in range(0, d):
sumatory += solution[i] ** 2
return sumatory | elidrc/PSO | benchmark_functions.py | Python | mit | 145 |
#!/usr/bin/env python
#-*-coding:utf-8-*-
#
# @author Meng G.
# 2016-03-28 restructed
from sqip import app as application
if __name__ == '__main__':
application.debug = True
application.run(host="0.0.0.0") | gaomeng1900/SQIP-py | app.py | Python | cc0-1.0 | 212 |
"""Tests for the siren component."""
| lukas-hetzenecker/home-assistant | tests/components/siren/__init__.py | Python | apache-2.0 | 37 |
"""
Random walker segmentation algorithm
from *Random walks for image segmentation*, Leo Grady, IEEE Trans
Pattern Anal Mach Intell. 2006 Nov;28(11):1768-83.
Installing pyamg and using the 'cg_mg' mode of random_walker improves
significantly the performance.
"""
import warnings
import numpy as np
from scipy import sparse, ndimage
# executive summary for next code block: try to import umfpack from
# scipy, but make sure not to raise a fuss if it fails since it's only
# needed to speed up a few cases.
# See discussions at:
# https://groups.google.com/d/msg/scikit-image/FrM5IGP6wh4/1hp-FtVZmfcJ
# http://stackoverflow.com/questions/13977970/ignore-exceptions-printed-to-stderr-in-del/13977992?noredirect=1#comment28386412_13977992
try:
from scipy.sparse.linalg.dsolve import umfpack
old_del = umfpack.UmfpackContext.__del__
def new_del(self):
try:
old_del(self)
except AttributeError:
pass
umfpack.UmfpackContext.__del__ = new_del
UmfpackContext = umfpack.UmfpackContext()
except:
UmfpackContext = None
try:
from pyamg import ruge_stuben_solver
amg_loaded = True
except ImportError:
amg_loaded = False
from scipy.sparse.linalg import cg
from ..util import img_as_float
from ..filters import rank_order
#-----------Laplacian--------------------
def _make_graph_edges_3d(n_x, n_y, n_z):
"""Returns a list of edges for a 3D image.
Parameters
----------
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction
n_z: integer
The size of the grid in the z direction
Returns
-------
edges : (2, N) ndarray
with the total number of edges::
N = n_x * n_y * (nz - 1) +
n_x * (n_y - 1) * nz +
(n_x - 1) * n_y * nz
Graph edges with each column describing a node-id pair.
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_weights_3d(data, spacing, beta=130, eps=1.e-6,
multichannel=False):
# Weight calculation is main difference in multispectral version
# Original gradient**2 replaced with sum of gradients ** 2
gradients = 0
for channel in range(0, data.shape[-1]):
gradients += _compute_gradients_3d(data[..., channel],
spacing) ** 2
# All channels considered together in this standard deviation
beta /= 10 * data.std()
if multichannel:
# New final term in beta to give == results in trivial case where
# multiple identical spectra are passed.
beta /= np.sqrt(data.shape[-1])
gradients *= beta
weights = np.exp(- gradients)
weights += eps
return weights
def _compute_gradients_3d(data, spacing):
gr_deep = np.abs(data[:, :, :-1] - data[:, :, 1:]).ravel() / spacing[2]
gr_right = np.abs(data[:, :-1] - data[:, 1:]).ravel() / spacing[1]
gr_down = np.abs(data[:-1] - data[1:]).ravel() / spacing[0]
return np.r_[gr_deep, gr_right, gr_down]
def _make_laplacian_sparse(edges, weights):
"""
Sparse implementation
"""
pixel_nb = edges.max() + 1
diag = np.arange(pixel_nb)
i_indices = np.hstack((edges[0], edges[1]))
j_indices = np.hstack((edges[1], edges[0]))
data = np.hstack((-weights, -weights))
lap = sparse.coo_matrix((data, (i_indices, j_indices)),
shape=(pixel_nb, pixel_nb))
connect = - np.ravel(lap.sum(axis=1))
lap = sparse.coo_matrix(
(np.hstack((data, connect)), (np.hstack((i_indices, diag)),
np.hstack((j_indices, diag)))),
shape=(pixel_nb, pixel_nb))
return lap.tocsr()
def _clean_labels_ar(X, labels, copy=False):
X = X.astype(labels.dtype)
if copy:
labels = np.copy(labels)
labels = np.ravel(labels)
labels[labels == 0] = X
return labels
def _buildAB(lap_sparse, labels):
"""
Build the matrix A and rhs B of the linear system to solve.
A and B are two block of the laplacian of the image graph.
"""
labels = labels[labels >= 0]
indices = np.arange(labels.size)
unlabeled_indices = indices[labels == 0]
seeds_indices = indices[labels > 0]
# The following two lines take most of the time in this function
B = lap_sparse[unlabeled_indices][:, seeds_indices]
lap_sparse = lap_sparse[unlabeled_indices][:, unlabeled_indices]
nlabels = labels.max()
rhs = []
for lab in range(1, nlabels + 1):
mask = (labels[seeds_indices] == lab)
fs = sparse.csr_matrix(mask)
fs = fs.transpose()
rhs.append(B * fs)
return lap_sparse, rhs
def _mask_edges_weights(edges, weights, mask):
"""
Remove edges of the graph connected to masked nodes, as well as
corresponding weights of the edges.
"""
mask0 = np.hstack((mask[:, :, :-1].ravel(), mask[:, :-1].ravel(),
mask[:-1].ravel()))
mask1 = np.hstack((mask[:, :, 1:].ravel(), mask[:, 1:].ravel(),
mask[1:].ravel()))
ind_mask = np.logical_and(mask0, mask1)
edges, weights = edges[:, ind_mask], weights[ind_mask]
max_node_index = edges.max()
# Reassign edges labels to 0, 1, ... edges_number - 1
order = np.searchsorted(np.unique(edges.ravel()),
np.arange(max_node_index + 1))
edges = order[edges.astype(np.int64)]
return edges, weights
def _build_laplacian(data, spacing, mask=None, beta=50,
multichannel=False):
l_x, l_y, l_z = tuple(data.shape[i] for i in range(3))
edges = _make_graph_edges_3d(l_x, l_y, l_z)
weights = _compute_weights_3d(data, spacing, beta=beta, eps=1.e-10,
multichannel=multichannel)
if mask is not None:
edges, weights = _mask_edges_weights(edges, weights, mask)
lap = _make_laplacian_sparse(edges, weights)
del edges, weights
return lap
#----------- Random walker algorithm --------------------------------
def random_walker(data, labels, beta=130, mode='bf', tol=1.e-3, copy=True,
multichannel=False, return_full_prob=False, spacing=None):
"""Random walker algorithm for segmentation from markers.
Random walker algorithm is implemented for gray-level or multichannel
images.
Parameters
----------
data : array_like
Image to be segmented in phases. Gray-level `data` can be two- or
three-dimensional; multichannel data can be three- or four-
dimensional (multichannel=True) with the highest dimension denoting
channels. Data spacing is assumed isotropic unless the `spacing`
keyword argument is used.
labels : array of ints, of same shape as `data` without channels dimension
Array of seed markers labeled with different positive integers
for different phases. Zero-labeled pixels are unlabeled pixels.
Negative labels correspond to inactive pixels that are not taken
into account (they are removed from the graph). If labels are not
consecutive integers, the labels array will be transformed so that
labels are consecutive. In the multichannel case, `labels` should have
the same shape as a single channel of `data`, i.e. without the final
dimension denoting channels.
beta : float
Penalization coefficient for the random walker motion
(the greater `beta`, the more difficult the diffusion).
mode : string, available options {'cg_mg', 'cg', 'bf'}
Mode for solving the linear system in the random walker algorithm.
If no preference given, automatically attempt to use the fastest
option available ('cg_mg' from pyamg >> 'cg' with UMFPACK > 'bf').
- 'bf' (brute force): an LU factorization of the Laplacian is
computed. This is fast for small images (<1024x1024), but very slow
and memory-intensive for large images (e.g., 3-D volumes).
- 'cg' (conjugate gradient): the linear system is solved iteratively
using the Conjugate Gradient method from scipy.sparse.linalg. This is
less memory-consuming than the brute force method for large images,
but it is quite slow.
- 'cg_mg' (conjugate gradient with multigrid preconditioner): a
preconditioner is computed using a multigrid solver, then the
solution is computed with the Conjugate Gradient method. This mode
requires that the pyamg module (http://pyamg.org/) is
installed. For images of size > 512x512, this is the recommended
(fastest) mode.
tol : float
tolerance to achieve when solving the linear system, in
cg' and 'cg_mg' modes.
copy : bool
If copy is False, the `labels` array will be overwritten with
the result of the segmentation. Use copy=False if you want to
save on memory.
multichannel : bool, default False
If True, input data is parsed as multichannel data (see 'data' above
for proper input format in this case)
return_full_prob : bool, default False
If True, the probability that a pixel belongs to each of the labels
will be returned, instead of only the most likely label.
spacing : iterable of floats
Spacing between voxels in each spatial dimension. If `None`, then
the spacing between pixels/voxels in each dimension is assumed 1.
Returns
-------
output : ndarray
* If `return_full_prob` is False, array of ints of same shape as
`data`, in which each pixel has been labeled according to the marker
that reached the pixel first by anisotropic diffusion.
* If `return_full_prob` is True, array of floats of shape
`(nlabels, data.shape)`. `output[label_nb, i, j]` is the probability
that label `label_nb` reaches the pixel `(i, j)` first.
See also
--------
skimage.morphology.watershed: watershed segmentation
A segmentation algorithm based on mathematical morphology
and "flooding" of regions from markers.
Notes
-----
Multichannel inputs are scaled with all channel data combined. Ensure all
channels are separately normalized prior to running this algorithm.
The `spacing` argument is specifically for anisotropic datasets, where
data points are spaced differently in one or more spatial dimensions.
Anisotropic data is commonly encountered in medical imaging.
The algorithm was first proposed in *Random walks for image
segmentation*, Leo Grady, IEEE Trans Pattern Anal Mach Intell.
2006 Nov;28(11):1768-83.
The algorithm solves the diffusion equation at infinite times for
sources placed on markers of each phase in turn. A pixel is labeled with
the phase that has the greatest probability to diffuse first to the pixel.
The diffusion equation is solved by minimizing x.T L x for each phase,
where L is the Laplacian of the weighted graph of the image, and x is
the probability that a marker of the given phase arrives first at a pixel
by diffusion (x=1 on markers of the phase, x=0 on the other markers, and
the other coefficients are looked for). Each pixel is attributed the label
for which it has a maximal value of x. The Laplacian L of the image
is defined as:
- L_ii = d_i, the number of neighbors of pixel i (the degree of i)
- L_ij = -w_ij if i and j are adjacent pixels
The weight w_ij is a decreasing function of the norm of the local gradient.
This ensures that diffusion is easier between pixels of similar values.
When the Laplacian is decomposed into blocks of marked and unmarked
pixels::
L = M B.T
B A
with first indices corresponding to marked pixels, and then to unmarked
pixels, minimizing x.T L x for one phase amount to solving::
A x = - B x_m
where x_m = 1 on markers of the given phase, and 0 on other markers.
This linear system is solved in the algorithm using a direct method for
small images, and an iterative method for larger images.
Examples
--------
>>> np.random.seed(0)
>>> a = np.zeros((10, 10)) + 0.2 * np.random.rand(10, 10)
>>> a[5:8, 5:8] += 1
>>> b = np.zeros_like(a)
>>> b[3, 3] = 1 # Marker for first phase
>>> b[6, 6] = 2 # Marker for second phase
>>> random_walker(a, b)
array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 2, 2, 2, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=int32)
"""
# Parse input data
if mode is None:
if amg_loaded:
mode = 'cg_mg'
elif UmfpackContext is not None:
mode = 'cg'
else:
mode = 'bf'
if UmfpackContext is None and mode == 'cg':
warnings.warn('"cg" mode will be used, but it may be slower than '
'"bf" because SciPy was built without UMFPACK. Consider'
' rebuilding SciPy with UMFPACK; this will greatly '
'accelerate the conjugate gradient ("cg") solver. '
'You may also install pyamg and run the random_walker '
'function in "cg_mg" mode (see docstring).')
if (labels != 0).all():
warnings.warn('Random walker only segments unlabeled areas, where '
'labels == 0. No zero valued areas in labels were '
'found. Returning provided labels.')
if return_full_prob:
# Find and iterate over valid labels
unique_labels = np.unique(labels)
unique_labels = unique_labels[unique_labels > 0]
out_labels = np.empty(labels.shape + (len(unique_labels),),
dtype=np.bool)
for n, i in enumerate(unique_labels):
out_labels[..., n] = (labels == i)
else:
out_labels = labels
return out_labels
# This algorithm expects 4-D arrays of floats, where the first three
# dimensions are spatial and the final denotes channels. 2-D images have
# a singleton placeholder dimension added for the third spatial dimension,
# and single channel images likewise have a singleton added for channels.
# The following block ensures valid input and coerces it to the correct
# form.
if not multichannel:
if data.ndim < 2 or data.ndim > 3:
raise ValueError('For non-multichannel input, data must be of '
'dimension 2 or 3.')
dims = data.shape # To reshape final labeled result
data = np.atleast_3d(img_as_float(data))[..., np.newaxis]
else:
if data.ndim < 3:
raise ValueError('For multichannel input, data must have 3 or 4 '
'dimensions.')
dims = data[..., 0].shape # To reshape final labeled result
data = img_as_float(data)
if data.ndim == 3: # 2D multispectral, needs singleton in 3rd axis
data = data[:, :, np.newaxis, :]
# Spacing kwarg checks
if spacing is None:
spacing = np.asarray((1.,) * 3)
elif len(spacing) == len(dims):
if len(spacing) == 2: # Need a dummy spacing for singleton 3rd dim
spacing = np.r_[spacing, 1.]
else: # Convert to array
spacing = np.asarray(spacing)
else:
raise ValueError('Input argument `spacing` incorrect, should be an '
'iterable with one number per spatial dimension.')
if copy:
labels = np.copy(labels)
label_values = np.unique(labels)
# Reorder label values to have consecutive integers (no gaps)
if np.any(np.diff(label_values) != 1):
mask = labels >= 0
labels[mask] = rank_order(labels[mask])[0].astype(labels.dtype)
labels = labels.astype(np.int32)
# If the array has pruned zones, be sure that no isolated pixels
# exist between pruned zones (they could not be determined)
if np.any(labels < 0):
filled = ndimage.binary_propagation(labels > 0, mask=labels >= 0)
labels[np.logical_and(np.logical_not(filled), labels == 0)] = -1
del filled
labels = np.atleast_3d(labels)
if np.any(labels < 0):
lap_sparse = _build_laplacian(data, spacing, mask=labels >= 0,
beta=beta, multichannel=multichannel)
else:
lap_sparse = _build_laplacian(data, spacing, beta=beta,
multichannel=multichannel)
lap_sparse, B = _buildAB(lap_sparse, labels)
# We solve the linear system
# lap_sparse X = B
# where X[i, j] is the probability that a marker of label i arrives
# first at pixel j by anisotropic diffusion.
if mode == 'cg':
X = _solve_cg(lap_sparse, B, tol=tol,
return_full_prob=return_full_prob)
if mode == 'cg_mg':
if not amg_loaded:
warnings.warn(
"""pyamg (http://pyamg.org/)) is needed to use
this mode, but is not installed. The 'cg' mode will be used
instead.""")
X = _solve_cg(lap_sparse, B, tol=tol,
return_full_prob=return_full_prob)
else:
X = _solve_cg_mg(lap_sparse, B, tol=tol,
return_full_prob=return_full_prob)
if mode == 'bf':
X = _solve_bf(lap_sparse, B,
return_full_prob=return_full_prob)
# Clean up results
if return_full_prob:
labels = labels.astype(np.float)
X = np.array([_clean_labels_ar(Xline, labels, copy=True).reshape(dims)
for Xline in X])
for i in range(1, int(labels.max()) + 1):
mask_i = np.squeeze(labels == i)
X[:, mask_i] = 0
X[i - 1, mask_i] = 1
else:
X = _clean_labels_ar(X + 1, labels).reshape(dims)
return X
def _solve_bf(lap_sparse, B, return_full_prob=False):
"""
solves lap_sparse X_i = B_i for each phase i. An LU decomposition
of lap_sparse is computed first. For each pixel, the label i
corresponding to the maximal X_i is returned.
"""
lap_sparse = lap_sparse.tocsc()
solver = sparse.linalg.factorized(lap_sparse.astype(np.double))
X = np.array([solver(np.array((-B[i]).todense()).ravel())
for i in range(len(B))])
if not return_full_prob:
X = np.argmax(X, axis=0)
return X
def _solve_cg(lap_sparse, B, tol, return_full_prob=False):
"""
solves lap_sparse X_i = B_i for each phase i, using the conjugate
gradient method. For each pixel, the label i corresponding to the
maximal X_i is returned.
"""
lap_sparse = lap_sparse.tocsc()
X = []
for i in range(len(B)):
x0 = cg(lap_sparse, -B[i].todense(), tol=tol)[0]
X.append(x0)
if not return_full_prob:
X = np.array(X)
X = np.argmax(X, axis=0)
return X
def _solve_cg_mg(lap_sparse, B, tol, return_full_prob=False):
"""
solves lap_sparse X_i = B_i for each phase i, using the conjugate
gradient method with a multigrid preconditioner (ruge-stuben from
pyamg). For each pixel, the label i corresponding to the maximal
X_i is returned.
"""
X = []
ml = ruge_stuben_solver(lap_sparse)
M = ml.aspreconditioner(cycle='V')
for i in range(len(B)):
x0 = cg(lap_sparse, -B[i].todense(), tol=tol, M=M, maxiter=30)[0]
X.append(x0)
if not return_full_prob:
X = np.array(X)
X = np.argmax(X, axis=0)
return X
| bennlich/scikit-image | skimage/segmentation/random_walker_segmentation.py | Python | bsd-3-clause | 20,432 |
#!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fnmatch
import optparse
import os
import sys
REPOSITORY_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'build/android/gyp/util'))
sys.path.append(os.path.join(REPOSITORY_ROOT, 'net/tools/net_docs'))
import build_utils
import net_docs
from markdown.postprocessors import Postprocessor
from markdown.extensions import Extension
class CronetPostprocessor(Postprocessor):
def run(self, text):
return text.replace('@Override', '@Override')
class CronetExtension(Extension):
def extendMarkdown(self, md, md_globals):
md.postprocessors.add('CronetPostprocessor',
CronetPostprocessor(md), '_end')
def GenerateJavadoc(options):
output_dir = os.path.abspath(os.path.join(options.output_dir, 'javadoc'))
working_dir = os.path.join(options.input_dir, 'android/java')
overview_file = os.path.abspath(options.overview_file)
build_utils.DeleteDirectory(output_dir)
build_utils.MakeDirectory(output_dir)
javadoc_cmd = ['ant', '-Dsource.dir=src', '-Ddoc.dir=' + output_dir,
'-Doverview=' + overview_file, 'doc']
build_utils.CheckOutput(javadoc_cmd, cwd=working_dir)
def main():
parser = optparse.OptionParser()
parser.add_option('--output-dir', help='Directory to put javadoc')
parser.add_option('--input-dir', help='Root of cronet source')
parser.add_option('--overview-file', help='Path of the overview page')
parser.add_option('--readme-file', help='Path of the README.md')
options, _ = parser.parse_args()
net_docs.ProcessDocs([options.readme_file], options.input_dir,
options.output_dir, extensions=[CronetExtension()])
GenerateJavadoc(options)
if __name__ == '__main__':
sys.exit(main())
| vadimtk/chrome4sdp | components/cronet/tools/generate_javadoc.py | Python | bsd-3-clause | 1,986 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from heat.common import template_format
from heat.engine.clients.os import neutron
from heat.engine.resources.openstack.neutron import qos
from heat.engine import rsrc_defn
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
qos_policy_template = '''
heat_template_version: 2016-04-08
description: This template to define a neutron qos policy.
resources:
my_qos_policy:
type: OS::Neutron::QoSPolicy
properties:
name: test_policy
description: a policy for test
shared: true
tenant_id: d66c74c01d6c41b9846088c1ad9634d0
'''
bandwidth_limit_rule_template = '''
heat_template_version: 2016-04-08
description: This template to define a neutron bandwidth limit rule.
resources:
my_bandwidth_limit_rule:
type: OS::Neutron::QoSBandwidthLimitRule
properties:
policy: 477e8273-60a7-4c41-b683-fdb0bc7cd151
max_kbps: 1000
max_burst_kbps: 1000
tenant_id: d66c74c01d6c41b9846088c1ad9634d0
'''
class NeutronQoSPolicyTest(common.HeatTestCase):
def setUp(self):
super(NeutronQoSPolicyTest, self).setUp()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
tpl = template_format.parse(qos_policy_template)
self.stack = stack.Stack(
self.ctx,
'neutron_qos_policy_test',
template.Template(tpl)
)
self.neutronclient = mock.MagicMock()
self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
return_value=True)
self.my_qos_policy = self.stack['my_qos_policy']
self.my_qos_policy.client = mock.MagicMock(
return_value=self.neutronclient)
def test_resource_mapping(self):
mapping = qos.resource_mapping()
self.assertEqual(qos.QoSPolicy, mapping['OS::Neutron::QoSPolicy'])
self.assertIsInstance(self.my_qos_policy, qos.QoSPolicy)
def test_qos_policy_handle_create(self):
policy = {
'policy': {
'name': 'test_policy',
'description': 'a policy for test',
'id': '9c1eb3fe-7bba-479d-bd43-1d497e53c384',
'rules': [],
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0',
'shared': True
}
}
create_props = {'name': 'test_policy',
'description': 'a policy for test',
'shared': True,
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'}
self.neutronclient.create_qos_policy.return_value = policy
self.my_qos_policy.handle_create()
self.assertEqual('9c1eb3fe-7bba-479d-bd43-1d497e53c384',
self.my_qos_policy.resource_id)
self.neutronclient.create_qos_policy.assert_called_once_with(
{'policy': create_props}
)
def test_qos_policy_handle_delete(self):
policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.my_qos_policy.resource_id = policy_id
self.neutronclient.delete_qos_policy.return_value = None
self.assertIsNone(self.my_qos_policy.handle_delete())
self.neutronclient.delete_qos_policy.assert_called_once_with(
self.my_qos_policy.resource_id)
def test_qos_policy_handle_delete_not_found(self):
policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.my_qos_policy.resource_id = policy_id
not_found = self.neutronclient.NotFound
self.neutronclient.delete_qos_policy.side_effect = not_found
self.assertIsNone(self.my_qos_policy.handle_delete())
self.neutronclient.delete_qos_policy.assert_called_once_with(
self.my_qos_policy.resource_id)
def test_qos_policy_handle_delete_resource_id_is_none(self):
self.my_qos_policy.resource_id = None
self.assertIsNone(self.my_qos_policy.handle_delete())
self.assertEqual(0, self.neutronclient.delete_qos_policy.call_count)
def test_qos_policy_handle_update(self):
policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.my_qos_policy.resource_id = policy_id
props = {
'name': 'new_name',
'description': 'test',
'shared': False
}
update_snippet = rsrc_defn.ResourceDefinition(
self.my_qos_policy.name,
self.my_qos_policy.type(),
props)
self.my_qos_policy.handle_update(json_snippet=update_snippet,
tmpl_diff={},
prop_diff=props)
self.neutronclient.update_qos_policy.assert_called_once_with(
policy_id, {'policy': props})
def test_qos_policy_get_attr(self):
self.my_qos_policy.resource_id = 'test policy'
policy = {
'policy': {
'name': 'test_policy',
'description': 'a policy for test',
'id': '9c1eb3fe-7bba-479d-bd43-1d497e53c384',
'rules': [],
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0',
'shared': True
}
}
self.neutronclient.show_qos_policy.return_value = policy
self.assertEqual([], self.my_qos_policy.FnGetAtt('rules'))
self.assertEqual(policy['policy'],
self.my_qos_policy.FnGetAtt('show'))
self.neutronclient.show_qos_policy.assert_has_calls(
[mock.call(self.my_qos_policy.resource_id)] * 2)
class NeutronQoSBandwidthLimitRuleTest(common.HeatTestCase):
def setUp(self):
super(NeutronQoSBandwidthLimitRuleTest, self).setUp()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
tpl = template_format.parse(bandwidth_limit_rule_template)
self.stack = stack.Stack(
self.ctx,
'neutron_bandwidth_limit_rule_test',
template.Template(tpl)
)
self.neutronclient = mock.MagicMock()
self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
return_value=True)
self.bandwidth_limit_rule = self.stack['my_bandwidth_limit_rule']
self.bandwidth_limit_rule.client = mock.MagicMock(
return_value=self.neutronclient)
self.find_mock = self.patchobject(
neutron.neutronV20,
'find_resourceid_by_name_or_id')
self.policy_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
self.find_mock.return_value = self.policy_id
def test_resource_mapping(self):
mapping = qos.resource_mapping()
self.assertEqual(2, len(mapping))
self.assertEqual(qos.QoSBandwidthLimitRule,
mapping['OS::Neutron::QoSBandwidthLimitRule'])
self.assertIsInstance(self.bandwidth_limit_rule,
qos.QoSBandwidthLimitRule)
def test_rule_handle_create(self):
rule = {
'bandwidth_limit_rule': {
'id': 'cf0eab12-ef8b-4a62-98d0-70576583c17a',
'max_kbps': 1000,
'max_burst_kbps': 1000,
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'
}
}
create_props = {'max_kbps': 1000,
'max_burst_kbps': 1000,
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'}
self.neutronclient.create_bandwidth_limit_rule.return_value = rule
self.bandwidth_limit_rule.handle_create()
self.assertEqual('cf0eab12-ef8b-4a62-98d0-70576583c17a',
self.bandwidth_limit_rule.resource_id)
self.neutronclient.create_bandwidth_limit_rule.assert_called_once_with(
self.policy_id,
{'bandwidth_limit_rule': create_props})
def test_rule_handle_delete(self):
rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
self.bandwidth_limit_rule.resource_id = rule_id
self.neutronclient.delete_bandwidth_limit_rule.return_value = None
self.assertIsNone(self.bandwidth_limit_rule.handle_delete())
self.neutronclient.delete_bandwidth_limit_rule.assert_called_once_with(
rule_id, self.policy_id)
def test_rule_handle_delete_not_found(self):
rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
self.bandwidth_limit_rule.resource_id = rule_id
not_found = self.neutronclient.NotFound
self.neutronclient.delete_bandwidth_limit_rule.side_effect = not_found
self.assertIsNone(self.bandwidth_limit_rule.handle_delete())
self.neutronclient.delete_bandwidth_limit_rule.assert_called_once_with(
rule_id, self.policy_id)
def test_rule_handle_delete_resource_id_is_none(self):
self.bandwidth_limit_rule.resource_id = None
self.assertIsNone(self.bandwidth_limit_rule.handle_delete())
self.assertEqual(0,
self.neutronclient.bandwidth_limit_rule.call_count)
def test_rule_handle_update(self):
rule_id = 'cf0eab12-ef8b-4a62-98d0-70576583c17a'
self.bandwidth_limit_rule.resource_id = rule_id
prop_diff = {
'max_kbps': 500,
'max_burst_kbps': 400
}
self.bandwidth_limit_rule.handle_update(
json_snippet={},
tmpl_diff={},
prop_diff=prop_diff)
self.neutronclient.update_bandwidth_limit_rule.assert_called_once_with(
rule_id,
self.policy_id,
{'bandwidth_limit_rule': prop_diff})
def test_rule_get_attr(self):
self.bandwidth_limit_rule.resource_id = 'test rule'
rule = {
'bandwidth_limit_rule': {
'id': 'cf0eab12-ef8b-4a62-98d0-70576583c17a',
'max_kbps': 1000,
'max_burst_kbps': 1000,
'tenant_id': 'd66c74c01d6c41b9846088c1ad9634d0'
}
}
self.neutronclient.show_bandwidth_limit_rule.return_value = rule
self.assertEqual(rule['bandwidth_limit_rule'],
self.bandwidth_limit_rule.FnGetAtt('show'))
self.neutronclient.show_bandwidth_limit_rule.assert_called_once_with(
self.bandwidth_limit_rule.resource_id, self.policy_id)
| dragorosson/heat | heat/tests/openstack/neutron/test_qos.py | Python | apache-2.0 | 10,867 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.common.types import bidding
from google.ads.googleads.v8.common.types import custom_parameter
from google.ads.googleads.v8.common.types import frequency_cap
from google.ads.googleads.v8.common.types import (
real_time_bidding_setting as gagc_real_time_bidding_setting,
)
from google.ads.googleads.v8.common.types import (
targeting_setting as gagc_targeting_setting,
)
from google.ads.googleads.v8.enums.types import (
ad_serving_optimization_status as gage_ad_serving_optimization_status,
)
from google.ads.googleads.v8.enums.types import (
advertising_channel_sub_type as gage_advertising_channel_sub_type,
)
from google.ads.googleads.v8.enums.types import (
advertising_channel_type as gage_advertising_channel_type,
)
from google.ads.googleads.v8.enums.types import app_campaign_app_store
from google.ads.googleads.v8.enums.types import (
app_campaign_bidding_strategy_goal_type,
)
from google.ads.googleads.v8.enums.types import asset_field_type
from google.ads.googleads.v8.enums.types import (
bidding_strategy_type as gage_bidding_strategy_type,
)
from google.ads.googleads.v8.enums.types import brand_safety_suitability
from google.ads.googleads.v8.enums.types import campaign_experiment_type
from google.ads.googleads.v8.enums.types import campaign_serving_status
from google.ads.googleads.v8.enums.types import campaign_status
from google.ads.googleads.v8.enums.types import (
location_source_type as gage_location_source_type,
)
from google.ads.googleads.v8.enums.types import (
negative_geo_target_type as gage_negative_geo_target_type,
)
from google.ads.googleads.v8.enums.types import optimization_goal_type
from google.ads.googleads.v8.enums.types import (
payment_mode as gage_payment_mode,
)
from google.ads.googleads.v8.enums.types import (
positive_geo_target_type as gage_positive_geo_target_type,
)
from google.ads.googleads.v8.enums.types import (
vanity_pharma_display_url_mode as gage_vanity_pharma_display_url_mode,
)
from google.ads.googleads.v8.enums.types import (
vanity_pharma_text as gage_vanity_pharma_text,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v8.resources",
marshal="google.ads.googleads.v8",
manifest={"Campaign",},
)
class Campaign(proto.Message):
r"""A campaign.
Attributes:
resource_name (str):
Immutable. The resource name of the campaign. Campaign
resource names have the form:
``customers/{customer_id}/campaigns/{campaign_id}``
id (int):
Output only. The ID of the campaign.
name (str):
The name of the campaign.
This field is required and should not be empty
when creating new campaigns.
It must not contain any null (code point 0x0),
NL line feed (code point 0xA) or carriage return
(code point 0xD) characters.
status (google.ads.googleads.v8.enums.types.CampaignStatusEnum.CampaignStatus):
The status of the campaign.
When a new campaign is added, the status
defaults to ENABLED.
serving_status (google.ads.googleads.v8.enums.types.CampaignServingStatusEnum.CampaignServingStatus):
Output only. The ad serving status of the
campaign.
ad_serving_optimization_status (google.ads.googleads.v8.enums.types.AdServingOptimizationStatusEnum.AdServingOptimizationStatus):
The ad serving optimization status of the
campaign.
advertising_channel_type (google.ads.googleads.v8.enums.types.AdvertisingChannelTypeEnum.AdvertisingChannelType):
Immutable. The primary serving target for ads within the
campaign. The targeting options can be refined in
``network_settings``.
This field is required and should not be empty when creating
new campaigns.
Can be set only when creating campaigns. After the campaign
is created, the field can not be changed.
advertising_channel_sub_type (google.ads.googleads.v8.enums.types.AdvertisingChannelSubTypeEnum.AdvertisingChannelSubType):
Immutable. Optional refinement to
``advertising_channel_type``. Must be a valid sub-type of
the parent channel type.
Can be set only when creating campaigns. After campaign is
created, the field can not be changed.
tracking_url_template (str):
The URL template for constructing a tracking
URL.
url_custom_parameters (Sequence[google.ads.googleads.v8.common.types.CustomParameter]):
The list of mappings used to substitute custom parameter
tags in a ``tracking_url_template``, ``final_urls``, or
``mobile_final_urls``.
real_time_bidding_setting (google.ads.googleads.v8.common.types.RealTimeBiddingSetting):
Settings for Real-Time Bidding, a feature
only available for campaigns targeting the Ad
Exchange network.
network_settings (google.ads.googleads.v8.resources.types.Campaign.NetworkSettings):
The network settings for the campaign.
hotel_setting (google.ads.googleads.v8.resources.types.Campaign.HotelSettingInfo):
Immutable. The hotel setting for the
campaign.
dynamic_search_ads_setting (google.ads.googleads.v8.resources.types.Campaign.DynamicSearchAdsSetting):
The setting for controlling Dynamic Search
Ads (DSA).
shopping_setting (google.ads.googleads.v8.resources.types.Campaign.ShoppingSetting):
The setting for controlling Shopping
campaigns.
targeting_setting (google.ads.googleads.v8.common.types.TargetingSetting):
Setting for targeting related features.
geo_target_type_setting (google.ads.googleads.v8.resources.types.Campaign.GeoTargetTypeSetting):
The setting for ads geotargeting.
local_campaign_setting (google.ads.googleads.v8.resources.types.Campaign.LocalCampaignSetting):
The setting for local campaign.
app_campaign_setting (google.ads.googleads.v8.resources.types.Campaign.AppCampaignSetting):
The setting related to App Campaign.
labels (Sequence[str]):
Output only. The resource names of labels
attached to this campaign.
experiment_type (google.ads.googleads.v8.enums.types.CampaignExperimentTypeEnum.CampaignExperimentType):
Output only. The type of campaign: normal,
draft, or experiment.
base_campaign (str):
Output only. The resource name of the base campaign of a
draft or experiment campaign. For base campaigns, this is
equal to ``resource_name``.
This field is read-only.
campaign_budget (str):
The budget of the campaign.
bidding_strategy_type (google.ads.googleads.v8.enums.types.BiddingStrategyTypeEnum.BiddingStrategyType):
Output only. The type of bidding strategy.
A bidding strategy can be created by setting either the
bidding scheme to create a standard bidding strategy or the
``bidding_strategy`` field to create a portfolio bidding
strategy.
This field is read-only.
accessible_bidding_strategy (str):
Output only. Resource name of AccessibleBiddingStrategy, a
read-only view of the unrestricted attributes of the
attached portfolio bidding strategy identified by
'bidding_strategy'. Empty, if the campaign does not use a
portfolio strategy. Unrestricted strategy attributes are
available to all customers with whom the strategy is shared
and are read from the AccessibleBiddingStrategy resource. In
contrast, restricted attributes are only available to the
owner customer of the strategy and their managers.
Restricted attributes can only be read from the
BiddingStrategy resource.
start_date (str):
The date when campaign started.
end_date (str):
The last day of the campaign.
final_url_suffix (str):
Suffix used to append query parameters to
landing pages that are served with parallel
tracking.
frequency_caps (Sequence[google.ads.googleads.v8.common.types.FrequencyCapEntry]):
A list that limits how often each user will
see this campaign's ads.
video_brand_safety_suitability (google.ads.googleads.v8.enums.types.BrandSafetySuitabilityEnum.BrandSafetySuitability):
Output only. 3-Tier Brand Safety setting for
the campaign.
vanity_pharma (google.ads.googleads.v8.resources.types.Campaign.VanityPharma):
Describes how unbranded pharma ads will be
displayed.
selective_optimization (google.ads.googleads.v8.resources.types.Campaign.SelectiveOptimization):
Selective optimization setting for this
campaign, which includes a set of conversion
actions to optimize this campaign towards.
optimization_goal_setting (google.ads.googleads.v8.resources.types.Campaign.OptimizationGoalSetting):
Optimization goal setting for this campaign,
which includes a set of optimization goal types.
tracking_setting (google.ads.googleads.v8.resources.types.Campaign.TrackingSetting):
Output only. Campaign-level settings for
tracking information.
payment_mode (google.ads.googleads.v8.enums.types.PaymentModeEnum.PaymentMode):
Payment mode for the campaign.
optimization_score (float):
Output only. Optimization score of the
campaign.
Optimization score is an estimate of how well a
campaign is set to perform. It ranges from 0%
(0.0) to 100% (1.0), with 100% indicating that
the campaign is performing at full potential.
This field is null for unscored campaigns.
See "About optimization score" at
https://support.google.com/google-
ads/answer/9061546.
This field is read-only.
excluded_parent_asset_field_types (Sequence[google.ads.googleads.v8.enums.types.AssetFieldTypeEnum.AssetFieldType]):
The asset field types that should be excluded
from this campaign. Asset links with these field
types will not be inherited by this campaign
from the upper level.
bidding_strategy (str):
Portfolio bidding strategy used by campaign.
commission (google.ads.googleads.v8.common.types.Commission):
Commission is an automatic bidding strategy
in which the advertiser pays a certain portion
of the conversion value.
manual_cpc (google.ads.googleads.v8.common.types.ManualCpc):
Standard Manual CPC bidding strategy.
Manual click-based bidding where user pays per
click.
manual_cpm (google.ads.googleads.v8.common.types.ManualCpm):
Standard Manual CPM bidding strategy.
Manual impression-based bidding where user pays
per thousand impressions.
manual_cpv (google.ads.googleads.v8.common.types.ManualCpv):
Output only. A bidding strategy that pays a
configurable amount per video view.
maximize_conversions (google.ads.googleads.v8.common.types.MaximizeConversions):
Standard Maximize Conversions bidding
strategy that automatically maximizes number of
conversions while spending your budget.
maximize_conversion_value (google.ads.googleads.v8.common.types.MaximizeConversionValue):
Standard Maximize Conversion Value bidding
strategy that automatically sets bids to
maximize revenue while spending your budget.
target_cpa (google.ads.googleads.v8.common.types.TargetCpa):
Standard Target CPA bidding strategy that
automatically sets bids to help get as many
conversions as possible at the target cost-per-
acquisition (CPA) you set.
target_impression_share (google.ads.googleads.v8.common.types.TargetImpressionShare):
Target Impression Share bidding strategy. An
automated bidding strategy that sets bids to
achieve a desired percentage of impressions.
target_roas (google.ads.googleads.v8.common.types.TargetRoas):
Standard Target ROAS bidding strategy that
automatically maximizes revenue while averaging
a specific target return on ad spend (ROAS).
target_spend (google.ads.googleads.v8.common.types.TargetSpend):
Standard Target Spend bidding strategy that
automatically sets your bids to help get as many
clicks as possible within your budget.
percent_cpc (google.ads.googleads.v8.common.types.PercentCpc):
Standard Percent Cpc bidding strategy where
bids are a fraction of the advertised price for
some good or service.
target_cpm (google.ads.googleads.v8.common.types.TargetCpm):
A bidding strategy that automatically
optimizes cost per thousand impressions.
"""
class NetworkSettings(proto.Message):
r"""The network settings for the campaign.
Attributes:
target_google_search (bool):
Whether ads will be served with google.com
search results.
target_search_network (bool):
Whether ads will be served on partner sites in the Google
Search Network (requires ``target_google_search`` to also be
``true``).
target_content_network (bool):
Whether ads will be served on specified
placements in the Google Display Network.
Placements are specified using the Placement
criterion.
target_partner_search_network (bool):
Whether ads will be served on the Google
Partner Network. This is available only to some
select Google partner accounts.
"""
target_google_search = proto.Field(proto.BOOL, number=5, optional=True,)
target_search_network = proto.Field(
proto.BOOL, number=6, optional=True,
)
target_content_network = proto.Field(
proto.BOOL, number=7, optional=True,
)
target_partner_search_network = proto.Field(
proto.BOOL, number=8, optional=True,
)
class HotelSettingInfo(proto.Message):
r"""Campaign-level settings for hotel ads.
Attributes:
hotel_center_id (int):
Immutable. The linked Hotel Center account.
"""
hotel_center_id = proto.Field(proto.INT64, number=2, optional=True,)
class SelectiveOptimization(proto.Message):
r"""Selective optimization setting for this campaign, which
includes a set of conversion actions to optimize this campaign
towards.
Attributes:
conversion_actions (Sequence[str]):
The selected set of conversion actions for
optimizing this campaign.
"""
conversion_actions = proto.RepeatedField(proto.STRING, number=2,)
class DynamicSearchAdsSetting(proto.Message):
r"""The setting for controlling Dynamic Search Ads (DSA).
Attributes:
domain_name (str):
Required. The Internet domain name that this
setting represents, e.g., "google.com" or
"www.google.com".
language_code (str):
Required. The language code specifying the
language of the domain, e.g., "en".
use_supplied_urls_only (bool):
Whether the campaign uses advertiser supplied
URLs exclusively.
feeds (Sequence[str]):
The list of page feeds associated with the
campaign.
"""
domain_name = proto.Field(proto.STRING, number=6,)
language_code = proto.Field(proto.STRING, number=7,)
use_supplied_urls_only = proto.Field(
proto.BOOL, number=8, optional=True,
)
feeds = proto.RepeatedField(proto.STRING, number=9,)
class ShoppingSetting(proto.Message):
r"""The setting for Shopping campaigns. Defines the universe of
products that can be advertised by the campaign, and how this
campaign interacts with other Shopping campaigns.
Attributes:
merchant_id (int):
Immutable. ID of the Merchant Center account.
This field is required for create operations.
This field is immutable for Shopping campaigns.
sales_country (str):
Immutable. Sales country of products to
include in the campaign. This field is required
for Shopping campaigns. This field is immutable.
This field is optional for non-Shopping
campaigns, but it must be equal to 'ZZ' if set.
campaign_priority (int):
Priority of the campaign. Campaigns with
numerically higher priorities take precedence
over those with lower priorities. This field is
required for Shopping campaigns, with values
between 0 and 2, inclusive.
This field is optional for Smart Shopping
campaigns, but must be equal to 3 if set.
enable_local (bool):
Whether to include local products.
"""
merchant_id = proto.Field(proto.INT64, number=5, optional=True,)
sales_country = proto.Field(proto.STRING, number=6, optional=True,)
campaign_priority = proto.Field(proto.INT32, number=7, optional=True,)
enable_local = proto.Field(proto.BOOL, number=8, optional=True,)
class TrackingSetting(proto.Message):
r"""Campaign-level settings for tracking information.
Attributes:
tracking_url (str):
Output only. The url used for dynamic
tracking.
"""
tracking_url = proto.Field(proto.STRING, number=2, optional=True,)
class LocalCampaignSetting(proto.Message):
r"""Campaign setting for local campaigns.
Attributes:
location_source_type (google.ads.googleads.v8.enums.types.LocationSourceTypeEnum.LocationSourceType):
The location source type for this local
campaign.
"""
location_source_type = proto.Field(
proto.ENUM,
number=1,
enum=gage_location_source_type.LocationSourceTypeEnum.LocationSourceType,
)
class GeoTargetTypeSetting(proto.Message):
r"""Represents a collection of settings related to ads
geotargeting.
Attributes:
positive_geo_target_type (google.ads.googleads.v8.enums.types.PositiveGeoTargetTypeEnum.PositiveGeoTargetType):
The setting used for positive geotargeting in
this particular campaign.
negative_geo_target_type (google.ads.googleads.v8.enums.types.NegativeGeoTargetTypeEnum.NegativeGeoTargetType):
The setting used for negative geotargeting in
this particular campaign.
"""
positive_geo_target_type = proto.Field(
proto.ENUM,
number=1,
enum=gage_positive_geo_target_type.PositiveGeoTargetTypeEnum.PositiveGeoTargetType,
)
negative_geo_target_type = proto.Field(
proto.ENUM,
number=2,
enum=gage_negative_geo_target_type.NegativeGeoTargetTypeEnum.NegativeGeoTargetType,
)
class AppCampaignSetting(proto.Message):
r"""Campaign-level settings for App Campaigns.
Attributes:
bidding_strategy_goal_type (google.ads.googleads.v8.enums.types.AppCampaignBiddingStrategyGoalTypeEnum.AppCampaignBiddingStrategyGoalType):
Represents the goal which the bidding
strategy of this app campaign should optimize
towards.
app_id (str):
Immutable. A string that uniquely identifies
a mobile application.
app_store (google.ads.googleads.v8.enums.types.AppCampaignAppStoreEnum.AppCampaignAppStore):
Immutable. The application store that
distributes this specific app.
"""
bidding_strategy_goal_type = proto.Field(
proto.ENUM,
number=1,
enum=app_campaign_bidding_strategy_goal_type.AppCampaignBiddingStrategyGoalTypeEnum.AppCampaignBiddingStrategyGoalType,
)
app_id = proto.Field(proto.STRING, number=4, optional=True,)
app_store = proto.Field(
proto.ENUM,
number=3,
enum=app_campaign_app_store.AppCampaignAppStoreEnum.AppCampaignAppStore,
)
class VanityPharma(proto.Message):
r"""Describes how unbranded pharma ads will be displayed.
Attributes:
vanity_pharma_display_url_mode (google.ads.googleads.v8.enums.types.VanityPharmaDisplayUrlModeEnum.VanityPharmaDisplayUrlMode):
The display mode for vanity pharma URLs.
vanity_pharma_text (google.ads.googleads.v8.enums.types.VanityPharmaTextEnum.VanityPharmaText):
The text that will be displayed in display
URL of the text ad when website description is
the selected display mode for vanity pharma
URLs.
"""
vanity_pharma_display_url_mode = proto.Field(
proto.ENUM,
number=1,
enum=gage_vanity_pharma_display_url_mode.VanityPharmaDisplayUrlModeEnum.VanityPharmaDisplayUrlMode,
)
vanity_pharma_text = proto.Field(
proto.ENUM,
number=2,
enum=gage_vanity_pharma_text.VanityPharmaTextEnum.VanityPharmaText,
)
class OptimizationGoalSetting(proto.Message):
r"""Optimization goal setting for this campaign, which includes a
set of optimization goal types.
Attributes:
optimization_goal_types (Sequence[google.ads.googleads.v8.enums.types.OptimizationGoalTypeEnum.OptimizationGoalType]):
The list of optimization goal types.
"""
optimization_goal_types = proto.RepeatedField(
proto.ENUM,
number=1,
enum=optimization_goal_type.OptimizationGoalTypeEnum.OptimizationGoalType,
)
resource_name = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=59, optional=True,)
name = proto.Field(proto.STRING, number=58, optional=True,)
status = proto.Field(
proto.ENUM,
number=5,
enum=campaign_status.CampaignStatusEnum.CampaignStatus,
)
serving_status = proto.Field(
proto.ENUM,
number=21,
enum=campaign_serving_status.CampaignServingStatusEnum.CampaignServingStatus,
)
ad_serving_optimization_status = proto.Field(
proto.ENUM,
number=8,
enum=gage_ad_serving_optimization_status.AdServingOptimizationStatusEnum.AdServingOptimizationStatus,
)
advertising_channel_type = proto.Field(
proto.ENUM,
number=9,
enum=gage_advertising_channel_type.AdvertisingChannelTypeEnum.AdvertisingChannelType,
)
advertising_channel_sub_type = proto.Field(
proto.ENUM,
number=10,
enum=gage_advertising_channel_sub_type.AdvertisingChannelSubTypeEnum.AdvertisingChannelSubType,
)
tracking_url_template = proto.Field(proto.STRING, number=60, optional=True,)
url_custom_parameters = proto.RepeatedField(
proto.MESSAGE, number=12, message=custom_parameter.CustomParameter,
)
real_time_bidding_setting = proto.Field(
proto.MESSAGE,
number=39,
message=gagc_real_time_bidding_setting.RealTimeBiddingSetting,
)
network_settings = proto.Field(
proto.MESSAGE, number=14, message=NetworkSettings,
)
hotel_setting = proto.Field(
proto.MESSAGE, number=32, message=HotelSettingInfo,
)
dynamic_search_ads_setting = proto.Field(
proto.MESSAGE, number=33, message=DynamicSearchAdsSetting,
)
shopping_setting = proto.Field(
proto.MESSAGE, number=36, message=ShoppingSetting,
)
targeting_setting = proto.Field(
proto.MESSAGE,
number=43,
message=gagc_targeting_setting.TargetingSetting,
)
geo_target_type_setting = proto.Field(
proto.MESSAGE, number=47, message=GeoTargetTypeSetting,
)
local_campaign_setting = proto.Field(
proto.MESSAGE, number=50, message=LocalCampaignSetting,
)
app_campaign_setting = proto.Field(
proto.MESSAGE, number=51, message=AppCampaignSetting,
)
labels = proto.RepeatedField(proto.STRING, number=61,)
experiment_type = proto.Field(
proto.ENUM,
number=17,
enum=campaign_experiment_type.CampaignExperimentTypeEnum.CampaignExperimentType,
)
base_campaign = proto.Field(proto.STRING, number=56, optional=True,)
campaign_budget = proto.Field(proto.STRING, number=62, optional=True,)
bidding_strategy_type = proto.Field(
proto.ENUM,
number=22,
enum=gage_bidding_strategy_type.BiddingStrategyTypeEnum.BiddingStrategyType,
)
accessible_bidding_strategy = proto.Field(proto.STRING, number=71,)
start_date = proto.Field(proto.STRING, number=63, optional=True,)
end_date = proto.Field(proto.STRING, number=64, optional=True,)
final_url_suffix = proto.Field(proto.STRING, number=65, optional=True,)
frequency_caps = proto.RepeatedField(
proto.MESSAGE, number=40, message=frequency_cap.FrequencyCapEntry,
)
video_brand_safety_suitability = proto.Field(
proto.ENUM,
number=42,
enum=brand_safety_suitability.BrandSafetySuitabilityEnum.BrandSafetySuitability,
)
vanity_pharma = proto.Field(proto.MESSAGE, number=44, message=VanityPharma,)
selective_optimization = proto.Field(
proto.MESSAGE, number=45, message=SelectiveOptimization,
)
optimization_goal_setting = proto.Field(
proto.MESSAGE, number=54, message=OptimizationGoalSetting,
)
tracking_setting = proto.Field(
proto.MESSAGE, number=46, message=TrackingSetting,
)
payment_mode = proto.Field(
proto.ENUM,
number=52,
enum=gage_payment_mode.PaymentModeEnum.PaymentMode,
)
optimization_score = proto.Field(proto.DOUBLE, number=66, optional=True,)
excluded_parent_asset_field_types = proto.RepeatedField(
proto.ENUM,
number=69,
enum=asset_field_type.AssetFieldTypeEnum.AssetFieldType,
)
bidding_strategy = proto.Field(
proto.STRING, number=67, oneof="campaign_bidding_strategy",
)
commission = proto.Field(
proto.MESSAGE,
number=49,
oneof="campaign_bidding_strategy",
message=bidding.Commission,
)
manual_cpc = proto.Field(
proto.MESSAGE,
number=24,
oneof="campaign_bidding_strategy",
message=bidding.ManualCpc,
)
manual_cpm = proto.Field(
proto.MESSAGE,
number=25,
oneof="campaign_bidding_strategy",
message=bidding.ManualCpm,
)
manual_cpv = proto.Field(
proto.MESSAGE,
number=37,
oneof="campaign_bidding_strategy",
message=bidding.ManualCpv,
)
maximize_conversions = proto.Field(
proto.MESSAGE,
number=30,
oneof="campaign_bidding_strategy",
message=bidding.MaximizeConversions,
)
maximize_conversion_value = proto.Field(
proto.MESSAGE,
number=31,
oneof="campaign_bidding_strategy",
message=bidding.MaximizeConversionValue,
)
target_cpa = proto.Field(
proto.MESSAGE,
number=26,
oneof="campaign_bidding_strategy",
message=bidding.TargetCpa,
)
target_impression_share = proto.Field(
proto.MESSAGE,
number=48,
oneof="campaign_bidding_strategy",
message=bidding.TargetImpressionShare,
)
target_roas = proto.Field(
proto.MESSAGE,
number=29,
oneof="campaign_bidding_strategy",
message=bidding.TargetRoas,
)
target_spend = proto.Field(
proto.MESSAGE,
number=27,
oneof="campaign_bidding_strategy",
message=bidding.TargetSpend,
)
percent_cpc = proto.Field(
proto.MESSAGE,
number=34,
oneof="campaign_bidding_strategy",
message=bidding.PercentCpc,
)
target_cpm = proto.Field(
proto.MESSAGE,
number=41,
oneof="campaign_bidding_strategy",
message=bidding.TargetCpm,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v8/resources/types/campaign.py | Python | apache-2.0 | 30,111 |
#!/usr/bin/env python
# file: fixfn.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Copyright © 2021 R.F. Smith <rsmith@xs4all.nl>
# SPDX-License-Identifier: MIT
# Created: 2021-12-26T09:19:01+0100
# Last modified: 2021-12-26T19:34:37+0100
"""Fix filenames by removing whitespace and ID numbers from filenames and
making them lower case."""
import argparse
import logging
import os
import re
import shutil
import sys
__version__ = "2021.12.26"
def main():
"""
Entry point for .py.
"""
args = setup()
for path in args.files:
# We only want to modify the filename itself.
origpath, fn = os.path.split(path)
# Remove IDs from e.g. youtube at the end of the filename.
newfn = re.sub(r"-\[?[0-9a-zA-Z_-]{6,11}\]?\.", ".", fn)
if newfn != fn:
logging.info(f"removed ID from “{fn}”")
# Remove all dash-like Unicode characters surrounded by whitespace.
# See https://jkorpela.fi/dashes.html
dashes = (
r"\s+[\-\u058A\u05BE\u1806\u2010-\u2015\u2053\u207B\u208B\u2212"
r"\u2E3A\u2E3B\uFE58\uFE63\uFF0D]+\s+"
)
newfn, n = re.subn(dashes, "-", newfn)
logging.info(f"replaced {n} instances of dashes in “{fn}”")
newfn, m = re.subn("\s+", args.replacement, newfn)
logging.info(f"replaced {m} instances of whitespace in “{fn}”")
# Remove “waves” (_-_).
newfn, p = re.subn("_-_", args.replacement, newfn)
if not args.nolower:
newfn = newfn.lower()
else:
logging.info(f"not converting “{newfn}” to lower case")
newpath = os.path.join(origpath, newfn)
if newpath == path:
logging.info(f"path “{path}” not modified")
continue
if args.dryrun:
logging.info(f"“{path}” would be moved to “{newpath}”")
continue
try:
shutil.move(path, newpath)
logging.info(f"moved “{path}” to “{newpath}”")
except PermissionError as e:
logging.error(e)
def setup():
"""Program initialization"""
# Process command-line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-v", "--version", action="version", version=__version__)
repdefault = "_"
parser.add_argument(
"-r",
"--replacement",
default=repdefault,
help=f"character to replace whitespace with (defaults to “{repdefault}”)",
)
logdefault = "warning"
parser.add_argument(
"--log",
default=logdefault,
choices=["debug", "info", "warning", "error"],
help=f"logging level (defaults to “{logdefault}”)",
)
parser.add_argument(
"-d",
"--dry-run",
dest="dryrun",
action="store_true",
help="perform a trial run with no changes made",
)
parser.add_argument(
"-n",
"--no-lower",
dest="nolower",
action="store_true",
help="do not convert to lower case",
)
parser.add_argument(
"files", metavar="file", nargs="*", help="one or more files to process"
)
args = parser.parse_args(sys.argv[1:])
if args.dryrun:
if args.log.upper() != "DEBUG":
args.log = "info"
logging.basicConfig(
level=getattr(logging, args.log.upper(), None),
format="%(levelname)s: %(message)s",
)
if args.dryrun:
logging.info("performing dry run")
logging.debug(f"command line arguments = {sys.argv}")
logging.debug(f"parsed arguments = {args}")
return args
if __name__ == "__main__":
main()
| rsmith-nl/scripts | fixfn.py | Python | mit | 3,698 |
#!/usr/bin/env python
################################################################################
#
# print_dependencies.py
#
#
# Copyright (c) 10/9/2009 Leo Goodstadt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
"""
print_dependencies.py
provides support for dependency trees
"""
#
# Number of pre-canned colour schemes
#
CNT_COLOUR_SCHEMES = 8
import types
import sys
try:
from StringIO import StringIO
except:
from io import StringIO
from .adjacent_pairs_iterate import adjacent_pairs_iterate
from collections import defaultdict
def _get_name (node):
"""
Get name for node
use display_name or _name
"""
if hasattr(node, "display_name"):
return node.display_name
elif hasattr(node, "_name"):
return node._name
else:
raise Exception("Unknown node type [%s] has neither _name or display_name" % str(node))
#_________________________________________________________________________________________
# Helper functions for dot format
#_________________________________________________________________________________________
def attributes_to_str (attributes, name):
"""
helper function for dot format
turns dictionary into a=b, c=d...
"""
# remove ugly __main__. qualifier
name = name.replace("__main__.", "")
# if a label is specified, that overrides the node name
if "label" not in attributes:
attributes["label"] = name.replace(" before ", "\\nbefore ").replace(", ", ",\n")
# remove any quotes
if attributes["label"][0] == '<':
attributes["label"] = attributes["label"][1:-1]
html_label = True
else:
html_label = False
if attributes["label"][0] == '"':
attributes["label"] = attributes["label"][1:-1]
# add suffix / prefix
if "label_prefix" in attributes:
attributes["label"] = attributes["label_prefix"] + attributes["label"]
del attributes["label_prefix"]
if "label_suffix" in attributes:
attributes["label"] = attributes["label"] + attributes["label_suffix"]
del attributes["label_suffix"]
# restore quotes
if html_label:
attributes["label"] = '<' + attributes["label"] + '>'
else:
attributes["label"] = '"' + attributes["label"] + '"'
# support for html labels
#if "<" in name and ">" in name:
# attributes["label"] = '<' + name + '>'
#else:
# attributes["label"] = '"' + name + '"'
return "[" + ", ".join ("%s=%s" % (k,v) for k,v in sorted(attributes.items())) + "];\n"
#_________________________________________________________________________________________
#
# get_arrow_str_for_legend_key
#_________________________________________________________________________________________
def get_arrow_str_for_legend_key (from_task_type, to_task_type, n1, n2, colour_scheme):
"""
Get dot format for arrows inside legend key
"""
if "Vicious cycle" in (from_task_type, to_task_type):
return ("%s -> %s[color=%s, arrowtype=normal];\n" % (n1, n2, colour_scheme["Vicious cycle"]["linecolor"]) +
"%s -> %s[color=%s, arrowtype=normal];\n" % (n2, n1, colour_scheme["Vicious cycle"]["linecolor"]))
if from_task_type in ("Final target", "Task to run",
"Up-to-date task forced to rerun",
"Explicitly specified task"):
return "%s -> %s[color=%s, arrowtype=normal];\n" % (n1, n2, colour_scheme["Task to run"]["linecolor"])
elif from_task_type in ("Up-to-date task", "Down stream","Up-to-date Final target"):
return "%s -> %s[color=%s, arrowtype=normal];\n" % (n1, n2, colour_scheme["Up-to-date"]["linecolor"])
#
# shouldn't be here!!
#
else:
return "%s -> %s[color=%s, arrowtype=normal];\n" % (n1, n2, colour_scheme["Up-to-date"]["linecolor"])
#_________________________________________________________________________________________
#
# get_default_colour_scheme
#_________________________________________________________________________________________
def get_default_colour_scheme(default_colour_scheme_index = 0):
"""
A selection of default colour schemes "inspired" by entries in
http://kuler.adobe.com/#create/fromacolor
"""
if default_colour_scheme_index ==0:
bluey_outline = '"#0044A0"'
bluey = '"#EBF3FF"'
greeny_outline = '"#006000"'
greeny = '"#B8CC6E"'
orangey = '"#EFA03B"'
orangey_outline= greeny_outline
ruddy = '"#FF3232"'
elif default_colour_scheme_index ==1:
bluey_outline = '"#000DDF"'
bluey = 'transparent'
greeny_outline = '"#4B8C2E"'
greeny = '"#9ED983"'
orangey = '"#D98100"'
orangey_outline= '"#D9D911"'
ruddy = '"#D93611"'
elif default_colour_scheme_index ==2:
bluey_outline = '"#4A64A5"'
bluey = 'transparent'
greeny_outline = '"#4A92A5"'
greeny = '"#99D1C1"'
orangey = '"#D2C24A"'
orangey_outline= greeny_outline
ruddy = '"#A54A64"'
elif default_colour_scheme_index ==3:
bluey_outline = '"#BFB5FF"'
bluey = 'transparent'
greeny_outline = '"#7D8A2E"'
greeny = '"#C9D787"'
orangey = '"#FFF1DC"'
orangey_outline= greeny_outline
ruddy = '"#FF3E68"'
elif default_colour_scheme_index ==4:
bluey_outline = '"#004460"'
bluey = 'transparent'
greeny_outline = '"#4B6000"'
greeny = '"#B8CC6E"'
orangey = '"#FFF0A3"'
orangey_outline= greeny_outline
ruddy = '"#F54F29"'
elif default_colour_scheme_index ==5:
bluey_outline = '"#1122FF"'
bluey = '"#AABBFF"'
greeny_outline = '"#007700"'
greeny = '"#44FF44"'
orangey = '"#EFA03B"'
orangey_outline= '"#FFCC3B"'
ruddy = '"#FF0000"'
elif default_colour_scheme_index ==6:
bluey_outline = '"#0044A0"'
bluey = '"#EBF3FF"'
greeny_outline = 'black'
greeny = '"#6cb924"'
orangey = '"#ece116"'
orangey_outline= greeny_outline
ruddy = '"#FF3232"'
else:
bluey_outline = '"#87BAE4"'
bluey = 'transparent'
greeny_outline = '"#87B379"'
greeny = '"#D3FAE3"'
orangey = '"#FDBA40"'
orangey_outline= greeny_outline
ruddy = '"#b9495e"'
default_colour_scheme = defaultdict(dict)
default_colour_scheme["Vicious cycle"]["linecolor"] = ruddy
default_colour_scheme["Pipeline"]["fontcolor"] = ruddy
default_colour_scheme["Key"]["fontcolor"] = "black"
default_colour_scheme["Key"]["fillcolor"] = '"#F6F4F4"'
default_colour_scheme["Task to run"]["linecolor"] = bluey_outline
default_colour_scheme["Up-to-date"]["linecolor"] = "gray"
default_colour_scheme["Final target"]["fillcolor"] = orangey
default_colour_scheme["Final target"]["fontcolor"] = "black"
default_colour_scheme["Final target"]["color"] = "black"
default_colour_scheme["Final target"]["dashed"] = 0
default_colour_scheme["Vicious cycle"]["fillcolor"] = ruddy
default_colour_scheme["Vicious cycle"]["fontcolor"] = 'white'
default_colour_scheme["Vicious cycle"]["color"] = "white"
default_colour_scheme["Vicious cycle"]["dashed"] = 0
default_colour_scheme["Up-to-date task"]["fillcolor"] = greeny
default_colour_scheme["Up-to-date task"]["fontcolor"] = greeny_outline
default_colour_scheme["Up-to-date task"]["color"] = greeny_outline
default_colour_scheme["Up-to-date task"]["dashed"] = 0
default_colour_scheme["Down stream"]["fillcolor"] = "white"
default_colour_scheme["Down stream"]["fontcolor"] = "gray"
default_colour_scheme["Down stream"]["color"] = "gray"
default_colour_scheme["Down stream"]["dashed"] = 0
default_colour_scheme["Explicitly specified task"]["fillcolor"] = "transparent"
default_colour_scheme["Explicitly specified task"]["fontcolor"] = "black"
default_colour_scheme["Explicitly specified task"]["color"] = "black"
default_colour_scheme["Explicitly specified task"]["dashed"] = 0
default_colour_scheme["Task to run"]["fillcolor"] = bluey
default_colour_scheme["Task to run"]["fontcolor"] = bluey_outline
default_colour_scheme["Task to run"]["color"] = bluey_outline
default_colour_scheme["Task to run"]["dashed"] = 0
default_colour_scheme["Up-to-date task forced to rerun"]["fillcolor"] = 'transparent'
default_colour_scheme["Up-to-date task forced to rerun"]["fontcolor"] = bluey_outline
default_colour_scheme["Up-to-date task forced to rerun"]["color"] = bluey_outline
default_colour_scheme["Up-to-date task forced to rerun"]["dashed"] = 1
default_colour_scheme["Up-to-date Final target"]["fillcolor"] = orangey
default_colour_scheme["Up-to-date Final target"]["fontcolor"] = orangey_outline
default_colour_scheme["Up-to-date Final target"]["color"] = orangey_outline
default_colour_scheme["Up-to-date Final target"]["dashed"] = 0
if default_colour_scheme_index ==6:
default_colour_scheme["Vicious cycle"]["fontcolor"] = 'black'
default_colour_scheme["Task to run"]["fillcolor"] = '"#5f52ee"'
default_colour_scheme["Task to run"]["fontcolor"] = "lightgrey"
default_colour_scheme["Up-to-date Final target"]["fontcolor"] = '"#EFA03B"'
return default_colour_scheme
#_________________________________________________________________________________________
#
# get_dot_format_for_task_type
#_________________________________________________________________________________________
def get_dot_format_for_task_type (task_type, attributes, colour_scheme, used_formats):
"""
Look up appropriate colour and style for each type of task
"""
used_formats.add(task_type)
for color_type in ("fontcolor", "fillcolor", "color"):
attributes[color_type] = colour_scheme[task_type][color_type]
attributes["style"]="filled"
if colour_scheme[task_type]["dashed"]:
attributes["style"]="dashed"
#_________________________________________________________________________________________
#
# write_legend_key
#_________________________________________________________________________________________
def write_legend_key (stream, used_task_types, minimal_key_legend, colour_scheme, key_name = "Key:", subgraph_index = 1):
"""
Write legend/key to dependency tree graph
"""
if not len(used_task_types):
return
stream.write( 'subgraph clusterkey%d\n' % subgraph_index)
stream.write( '{\n')
stream.write( 'rank="min";\n')
stream.write( 'style=filled;\n')
stream.write( 'fontsize=20;\n')
stream.write( 'color=%s;\n' % (colour_scheme["Key"]["fillcolor"]))
stream.write( 'label = "%s";\n' % key_name)
stream.write( 'fontcolor = %s;' % (colour_scheme["Key"]["fontcolor"]))
stream.write( 'node[margin="0.2,0.2", fontsize="14"];\n')
#
# Only include used task types
#
all_task_types = [
"Vicious cycle" ,
"Down stream" ,
"Up-to-date task" ,
"Explicitly specified task" ,
"Task to run" ,
"Up-to-date task forced to rerun" ,
"Up-to-date Final target" ,
"Final target" ,]
if not minimal_key_legend:
used_task_types |= set(all_task_types)
wrapped_task_types = [
"Vicious cycle" ,
"Down stream" ,
"Up-to-date task" ,
"Explicitly specified task" ,
"Task to run" ,
"Up-to-date task\\nforced to rerun" ,
"Up-to-date\\nFinal target" ,
"Final target" ,]
wrapped_task_types = dict(zip(all_task_types, wrapped_task_types))
def outputkey (key, task_type, stream):
ignore_used_task_types = set()
attributes = dict()
attributes["shape"]="box3d"
#attributes["shape"] = "rect"
get_dot_format_for_task_type (task_type, attributes, colour_scheme, ignore_used_task_types)
#attributes["fontsize"] = '15'
stream.write(key + attributes_to_str(attributes, wrapped_task_types[task_type]))
sorted_used_task_types = []
for t in all_task_types:
if t in used_task_types:
sorted_used_task_types.append(t)
# print first key type
outputkey("k1_%d" % subgraph_index, sorted_used_task_types[0], stream)
for i, (from_task_type, to_task_type) in enumerate(adjacent_pairs_iterate(sorted_used_task_types)):
from_key = 'k%d_%d' % (i + 1, subgraph_index)
to_key = 'k%d_%d' % (i + 2, subgraph_index)
# write key
outputkey(to_key, to_task_type, stream)
# connection between keys
stream.write(get_arrow_str_for_legend_key (from_task_type, to_task_type, from_key, to_key, colour_scheme))
stream.write("}\n")
#_________________________________________________________________________________________
# write_colour_scheme_demo_in_dot_format
#_________________________________________________________________________________________
def write_colour_scheme_demo_in_dot_format(stream):
"""
Write all the colour schemes in different colours
"""
stream.write( 'digraph "Colour schemes"\n{\n')
stream.write( 'size="8,11";\n')
stream.write( 'splines=true;\n')
stream.write( 'fontsize="30";\n')
stream.write( 'ranksep = 0.3;\n')
stream.write( 'node[fontsize="20"];\n')
for colour_scheme_index in range(CNT_COLOUR_SCHEMES):
colour_scheme = get_default_colour_scheme(colour_scheme_index)
write_legend_key (stream, set("test"), False, colour_scheme, "Colour Scheme %d" % colour_scheme_index, colour_scheme_index)
stream.write("}\n")
#_________________________________________________________________________________________
# write_flowchart_in_dot_format
#_________________________________________________________________________________________
def write_flowchart_in_dot_format( jobs_to_run,
up_to_date_jobs,
dag_violating_edges,
dag_violating_nodes,
byte_stream,
target_jobs,
forced_to_run_jobs = [],
all_jobs = None,
vertical = True,
skip_uptodate_tasks = False,
no_key_legend = False,
minimal_key_legend = True,
user_colour_scheme = None,
pipeline_name = "Pipeline:"):
"""
jobs_to_run = pipeline jobs which are not up to date or have dependencies
which are not up to date
vertical = print flowchart vertically
minimal_key_legend = only print used task types in key legend
user_colour_scheme = dictionary for overriding default colours
Colours can be names e.g. "black" or quoted hex e.g. '"#F6F4F4"'
Default values will be used unless specified
key = "colour_scheme_index": index of default colour scheme
key = "Final target"
"Explicitly specified task"
"Task to run"
"Down stream"
"Up-to-date Final target"
"Up-to-date task forced to rerun"
"Up-to-date task"
"Vicious cycle"
Specifies colours for each task type
Subkey = "fillcolor"
"fontcolor"
"color"
"dashed" = 0/1
key = "Vicious cycle"
"Task to run"
"Up-to-date"
Subkey = "linecolor"
Specifying colours for arrows between tasks
key = "Pipeline"
Subkey = "fontcolor"
Specifying flowchart title colour
key = "Key"
Subkey = "fontcolor"
"fillcolor"
Specifies legend colours
"""
if user_colour_scheme is None:
colour_scheme = get_default_colour_scheme()
else:
if "colour_scheme_index" in user_colour_scheme:
colour_scheme_index = user_colour_scheme["colour_scheme_index"]
else:
colour_scheme_index = 0
colour_scheme = get_default_colour_scheme(colour_scheme_index)
for k, v in user_colour_scheme.items():
if k not in colour_scheme:
continue
if isinstance(v, dict):
colour_scheme[k].update(v)
else:
colour_scheme[k] = v
up_to_date_jobs = set(up_to_date_jobs)
#
# cases where child points back to ancestor
#
dag_violating_dependencies = set(dag_violating_edges)
stream = StringIO()
stream.write( 'digraph "%s"\n{\n' % pipeline_name)
stream.write( 'size="8,11";\n')
stream.write( 'splines=true;\n')
stream.write( 'fontsize="30";\n')
stream.write( 'ranksep = 0.3;\n')
stream.write( 'node[fontsize="20"];\n')
stream.write( 'graph[clusterrank="local"];\n')
if not vertical:
stream.write( 'rankdir="LR";\n')
stream.write( 'subgraph clustertasks\n'
"{\n")
stream.write( 'rank="min";\n')
stream.write( 'fontcolor = %s;\n' % colour_scheme["Pipeline"]["fontcolor"])
stream.write( 'label = "%s";\n' % pipeline_name)
#if vertical:
# stream.write( 'edge[minlen=2];\n')
delayed_task_strings = list()
vicious_cycle_task_strings = list()
#
# all jobs should be specified
# this is a bad fall-back
# because there is no guarantee that we are printing what we want to print
if all_jobs is None:
all_jobs = node.all_nodes
used_task_types = set()
#
# defined duplicately in graph. Bad practice
#
_one_to_one = 0
_many_to_many = 1
_one_to_many = 2
_many_to_one = 3
for n in all_jobs:
attributes = dict()
attributes["shape"]="box3d"
#attributes["shape"] = "rect"
if hasattr(n, "single_multi_io"):
if n.single_multi_io == _one_to_many:
attributes["shape"] = "house"
attributes["peripheries"] = 2
elif n.single_multi_io == _many_to_one:
attributes["shape"] = "invhouse"
attributes["height"] = 1.1
attributes["peripheries"] = 2
#
# circularity violating DAG: highlight in red
#
if n in dag_violating_nodes:
get_dot_format_for_task_type ("Vicious cycle", attributes, colour_scheme, used_task_types)
vicious_cycle_task_strings.append('t%d' % n._node_index + attributes_to_str(attributes, _get_name(n)))
#
# these jobs will be run
#
elif n in jobs_to_run:
#
# up to date but forced to run: outlined in blue
#
if n in forced_to_run_jobs:
get_dot_format_for_task_type ("Explicitly specified task", attributes, colour_scheme, used_task_types)
#
# final target: outlined in orange
#
elif n in target_jobs:
get_dot_format_for_task_type("Final target", attributes, colour_scheme, used_task_types)
#
# up to date dependency but forced to run: outlined in green
#
elif n in up_to_date_jobs:
get_dot_format_for_task_type ("Up-to-date task forced to rerun" , attributes, colour_scheme, used_task_types)
else:
get_dot_format_for_task_type ("Task to run", attributes, colour_scheme, used_task_types)
#
# graphviz attributes override other definitions
# presume the user knows what she is doing!
#
if(hasattr(n,'graphviz_attributes')):
for k in n.graphviz_attributes:
attributes[k]=n.graphviz_attributes[k]
stream.write('t%d' % n._node_index + attributes_to_str(attributes, _get_name(n)))
else:
#
# these jobs are up to date and will not be re-run
#
if not skip_uptodate_tasks:
if n in target_jobs:
get_dot_format_for_task_type ("Up-to-date Final target" , attributes, colour_scheme, used_task_types)
elif n in up_to_date_jobs:
get_dot_format_for_task_type ("Up-to-date task" , attributes, colour_scheme, used_task_types)
#
# these jobs will be ignored: gray with gray dependencies
#
else:
get_dot_format_for_task_type ("Down stream" , attributes, colour_scheme, used_task_types)
delayed_task_strings.append('t%d' % n._node_index + attributes_to_str(attributes, _get_name(n)))
for o in n._get_inward():
delayed_task_strings.append('t%d -> t%d[color=%s, arrowtype=normal];\n' % (o._node_index, n._node_index, colour_scheme["Up-to-date"]["linecolor"]))
continue
#
# graphviz attributes override other definitions
# presume the user knows what she is doing!
#
if(hasattr(n,'graphviz_attributes')):
for k in n.graphviz_attributes:
attributes[k]=n.graphviz_attributes[k]
stream.write('t%d' % n._node_index + attributes_to_str(attributes, _get_name(n)))
#
# write edges
#
unconstrained = False
for o in sorted(n._get_inward(), reverse=True, key = lambda x: x._node_index):
#
# circularity violating DAG: highlight in red: should never be a constraint
# in drawing the graph
#
if (n, o) in dag_violating_dependencies:
constraint_str = ", constraint=false" if o._node_index > n._node_index else ""
vicious_cycle_task_strings.append('t%d -> t%d[color=%s %s];\n' % (o._node_index, n._node_index, colour_scheme["Vicious cycle"]["linecolor"], constraint_str))
continue
elif not o in jobs_to_run or not n in jobs_to_run:
if not skip_uptodate_tasks:
edge_str = 't%d -> t%d[color=%s, arrowtype=normal];\n' % (o._node_index, n._node_index, colour_scheme["Up-to-date"]["linecolor"])
if unconstrained:
delayed_task_strings.append(edge_str)
else:
stream.write(edge_str)
else:
stream.write('t%d -> t%d[color=%s];\n' % (o._node_index, n._node_index, colour_scheme["Task to run"]["linecolor"]))
unconstrained = True
for l in delayed_task_strings:
stream.write(l)
#
# write vicious cycle at end so not constraint in drawing graph
#
for l in vicious_cycle_task_strings:
stream.write(l)
stream.write( '}\n')
if not no_key_legend:
write_legend_key (stream, used_task_types, minimal_key_legend, colour_scheme)
stream.write("}\n")
ss = stream.getvalue().encode()
byte_stream.write(ss)
stream.close()
| jigneshvasoya/ruffus | ruffus/print_dependencies.py | Python | mit | 26,918 |
"""myproject
"""
__author__ = 'myproject:author_name'
__email__ = 'myproject:author_email'
#----------------------------------------------------------------------
def hello_world(extend_hello=False):
"""prints hello world
:returns: None
:rtype: None
"""
print 'Hello World!{}'.format(' Beautiful World!' if extend_hello else '')
| diszgaurav/projecture | projecture/projects/python/myproject/myproject/myproject.py | Python | mit | 353 |
from google.appengine.ext import vendor
# Add any libraries install in the "lib" folder
vendor.add('lib')
| alexeikostevich/python-blog | appengine_config.py | Python | unlicense | 108 |
import functools
import logging
import json
from django.http import HttpResponse, HttpResponseForbidden, Http404
from django.core import exceptions as django_exceptions
from django.contrib.auth.models import User
# Slightly modified copy of:
# https://github.com/ASKBOT/askbot-devel/blob/85a833860e8915474abbbcb888ab99a1c2300e2c/askbot/utils/decorators.py
def get_only(view_func):
@functools.wraps(view_func)
def wrapper(request, *args, **kwargs):
if request.method != 'GET':
raise django_exceptions.PermissionDenied(
'request method %s is not supported for this function' % \
request.method
)
return view_func(request, *args, **kwargs)
return wrapper
def post_only(view_func):
@functools.wraps(view_func)
def wrapper(request, *args, **kwargs):
if request.method != 'POST':
raise django_exceptions.PermissionDenied(
'request method %s is not supported for this function' % \
request.method
)
return view_func(request, *args, **kwargs)
return wrapper
def ajax_only(view_func):
@functools.wraps(view_func)
def wrapper(request, *args, **kwargs):
if not request.is_ajax():
raise Http404
try:
data = view_func(request, *args, **kwargs)
if data is None:
data = {}
except Exception, e:
#todo: also check field called "message"
if hasattr(e, 'messages'):
if len(e.messages) > 1:
message = u'<ul>' + \
u''.join(
map(lambda v: u'<li>%s</li>' % v, e.messages)
) + \
u'</ul>'
else:
message = e.messages[0]
else:
message = unicode(e)
if message == '':
message = 'Oops, apologies - there was some error'
logging.debug(message)
data = {
'message': message,
'success': 0
}
return HttpResponse(json.dumps(data), content_type='application/json')
if isinstance(data, HttpResponse):#is this used?
data.mimetype = 'application/json'
return data
else:
data['success'] = 1
json_data = json.dumps(data)
return HttpResponse(json_data, content_type='application/json')
return wrapper
def handle(self, *args, **options):
domain = Site.objects.get_current().domain
full_url = "https://{}{}".format(domain,
reverse('edit_common_profile'))
for user in User.objects.filter(common_profile__profile_type='S',
is_active=True).exclude(email=""):
try:
mail.send([user.email],
template="update_common_profile",
context={'user': user, 'full_url': full_url})
self.stdout.write(u'Emailed {}.'.format(user.email))
except ValidationError:
self.stdout.write(u'Error with {}'.format(user.email))
def get_only(view_func):
@functools.wraps(view_func)
def wrapper(request, *args, **kwargs):
if request.method != 'GET':
raise django_exceptions.PermissionDenied(
'request method %s is not supported for this function' % \
request.method
)
return view_func(request, *args, **kwargs)
return wrapper
| osamak/wikiproject-med | core/decorators.py | Python | agpl-3.0 | 3,638 |
def func():
for var in 'spam': # type: [str]
var
| jwren/intellij-community | python/testData/intentions/PyAnnotateVariableTypeIntentionTest/typeCommentLocalForTarget_after.py | Python | apache-2.0 | 62 |
'''
Created on May 19, 2015
@author: joep
'''
import pygame
from game.Game import Game
if __name__ == "__main__":
pygame.init()
try:
game = Game()
game.run()
except:
pygame.quit()
raise | JoepDriesen/Township | Township/main.py | Python | gpl-3.0 | 250 |
# -*- coding: utf-8 -*-
import inspect
import os
import pytest
from zirkon.toolbox.compose import ArgumentStore, Composer, compose
class Alpha(object):
def __init__(self, x, y=10):
self.x = x
self.y = y
def __repr__(self):
return "{}(x={!r}, y={!r})".format(self.__class__.__name__, self.x, self.y)
class Beta(Alpha):
def __init__(self, xy):
super().__init__(xy[0], xy[1])
self.x, self.y = xy
@classmethod
def build(cls, xx, yy):
return cls((xx, yy))
def __repr__(self):
return "{}(x={!r}, y={!r})".format(self.__class__.__name__, self.x, self.y)
def gamma(a, b, c):
return [a, b, c]
def fab(a, b):
return [a, b]
def fbc(b, c):
return [b, c]
def _check_objects(objects):
assert isinstance(objects[0], Alpha)
assert objects[0].x == 11
assert objects[0].y == 10
assert isinstance(objects[1], Beta)
assert objects[1].x == 13
assert objects[1].y == 14
assert isinstance(objects[2], Beta)
assert objects[2].x == 23
assert objects[2].y == 24
assert isinstance(objects[3], list)
assert objects[3] == [100, 200, 300]
def _check_actual_arguments(actual_arguments):
kwlist = list(actual_arguments.items())
assert kwlist[0] == ('x', 11)
assert kwlist[1] == ('xy', (13, 14))
assert kwlist[2] == ('xx', 23)
assert kwlist[3] == ('yy', 24)
assert kwlist[4] == ('a', 100)
assert kwlist[5] == ('b', 200)
assert kwlist[6] == ('c', 300)
def _check_actual_arguments_y(actual_arguments):
kwlist = list(actual_arguments.items())
assert kwlist[0] == ('x', 11)
assert kwlist[1] == ('y', 10)
assert kwlist[2] == ('xy', (13, 14))
assert kwlist[3] == ('xx', 23)
assert kwlist[4] == ('yy', 24)
assert kwlist[5] == ('a', 100)
assert kwlist[6] == ('b', 200)
assert kwlist[7] == ('c', 300)
@pytest.fixture()
def composer():
return Composer(Alpha, Beta, Beta.build, gamma)
def fsub(item_min, item_max, item_type):
return [item_type, item_min, item_max]
@pytest.fixture()
def subcomposer():
return Composer(fsub)
def _check_sub_objects(objects):
assert isinstance(objects[0], list)
assert objects[0][0] == 'int'
assert objects[0][1] == 5
assert objects[0][2] == 10
def _check_actual_sub_arguments(actual_arguments):
kwlist = list(actual_arguments.items())
assert kwlist[0] == ('item_min', 5)
assert kwlist[1] == ('item_max', 10)
assert kwlist[2] == ('item_type', 'int')
def test_ArgumentStore():
argument_store = ArgumentStore()
assert not tuple(argument_store)
def test_ArgumentStore_get_used():
argument_store = ArgumentStore({'x': 10})
assert tuple(argument_store)
assert not argument_store.get_used('x')
assert argument_store.get('x') == 10
assert argument_store.get_used('x')
def test_ArgumentStore_set_used():
argument_store = ArgumentStore({'x': 10})
assert not argument_store.get_used('x')
argument_store.set_used('x')
assert argument_store.get_used('x')
def test_ArgumentStore_update():
argument_store = ArgumentStore()
argument_store.set_used('x')
argument_store.update({'x': 10, 'y': 20})
assert argument_store.get('x') == 10
assert argument_store.get('y') == 20
def test_ArgumentStore_eq():
as1 = ArgumentStore({'x': 10})
as2 = ArgumentStore({'x': 10})
assert as1 == as2
def test_ArgumentStore_eq_dct():
as1 = ArgumentStore({'x': 10})
dct2 = {'x': 10}
assert as1 == dct2
def test_Composer_empty():
Composer()
def test_Composer_simple_ok(composer):
actual_arguments, objects = composer(a=100, x=11, c=300, xy=(13, 14), yy=24, b=200, xx=23)
_check_actual_arguments(actual_arguments)
_check_objects(objects)
def test_Composer_simple_missing(composer):
with pytest.raises(TypeError) as exc_info:
actual_arguments, objects = composer(x=11, xy=(13, 14), yy=24, a=100, b=200, c=300, y=10)
assert str(exc_info.value) == "build: missing required argument xx"
def test_Composer_simple_unexpected(composer):
with pytest.raises(TypeError) as exc_info:
actual_arguments, objects = composer(x=11, xy=(13, 14), xx=23, zz=45, yy=24, a=100, b=200, c=300)
assert str(exc_info.value) == "unexpected arguments: zz=45"
def test_Composer_partial(composer):
arguments = dict(x=11, xy=(13, 14), xx=23, zz=45, yy=24, a=100, b=200, c=300)
argument_store = ArgumentStore(arguments)
actual_arguments, objects = composer.partial(argument_store)
_check_actual_arguments(actual_arguments)
_check_objects(objects)
with pytest.raises(TypeError) as exc_info:
composer.verify_argument_store(argument_store)
assert str(exc_info.value) == "unexpected arguments: zz=45"
def test_Composer_partial(composer, subcomposer):
arguments = dict(x=11, xy=(13, 14), xx=23, item_max=10, item_min=5, zz=45, item_type='int', yy=24, a=100, b=200, c=300)
argument_store = ArgumentStore(arguments)
actual_arguments, objects = composer.partial(argument_store)
_check_actual_arguments(actual_arguments)
_check_objects(objects)
sub_actual_arguments, sub_objects = subcomposer.partial(argument_store)
_check_actual_sub_arguments(sub_actual_arguments)
_check_sub_objects(sub_objects)
with pytest.raises(TypeError) as exc_info:
composer.verify_argument_store(argument_store)
assert str(exc_info.value) == "unexpected arguments: zz=45"
def test_Composer_sub(composer):
arguments = dict(x=11, xy=(13, 14), xx=23, yy=24, a=100, b=200, c=300, sub_a=111, sub_b=222, sub_c=333)
argument_store = ArgumentStore(arguments)
actual_arguments, objects = composer.partial(argument_store)
_check_objects(objects)
sub_composer = Composer(fab, fbc)
sub_actual_arguments, sub_objects = sub_composer.partial(argument_store, prefix='sub_')
assert sub_objects[0] == [111, 222]
assert sub_objects[1] == [222, 333]
composer.verify_argument_store(argument_store)
def test_Composer_sub_missing(composer):
arguments = dict(x=11, xy=(13, 14), xx=23, yy=24, a=100, b=200, c=300, sub_a=111, sub_c=333)
argument_store = ArgumentStore(arguments)
actual_arguments, objects = composer.partial(argument_store)
_check_objects(objects)
sub_composer = Composer(fab, fbc)
with pytest.raises(TypeError) as exc_info:
sub_actual_arguments, sub_objects = sub_composer.partial(argument_store, prefix='sub_')
assert str(exc_info.value) == "fab: missing required argument b"
def test_Composer_sub_unexpected(composer):
arguments = dict(x=11, xy=(13, 14), xx=23, yy=24, zz=45, a=100, b=200, c=300, sub_a=111, sub_b=222, sub_c=333, sub_d=444)
argument_store = ArgumentStore(arguments)
actual_arguments, objects = composer.partial(argument_store)
_check_objects(objects)
sub_composer = Composer(fab, fbc)
sub_actual_arguments, sub_objects = sub_composer.partial(argument_store, prefix='sub_')
assert sub_objects[0] == [111, 222]
assert sub_objects[1] == [222, 333]
with pytest.raises(TypeError) as exc_info:
composer.verify_argument_store(argument_store)
assert str(exc_info.value) == "unexpected arguments: sub_d=444, zz=45"
def test_ArgumentStore_split_merge():
d = dict(min_len=1, max_len=5, default=[5, 6, 7], item_min=3, item_max=18)
argument_store = ArgumentStore(d)
sub_argument_store = argument_store.split(prefix='item_')
l = list(sub_argument_store.items())
l.sort(key=lambda x: x[0])
assert l[0][0] == 'max'
assert l[0][1] == 18
assert l[1][0] == 'min'
assert l[1][1] == 3
assert sub_argument_store.get('min') == 3
assert argument_store.get('max_len') == 5
argument_store.merge(sub_argument_store, prefix='item_')
assert argument_store.get('default') == [5, 6, 7]
assert argument_store.get_used('item_min')
assert not argument_store.get_used('item_max')
assert not argument_store.get_used('min_len')
assert argument_store.get_used('max_len')
assert argument_store.get_used('default')
def test_composer_empty():
compose()
| simone-campagna/daikon | tests/unit/toolbox/test_compose.py | Python | apache-2.0 | 8,106 |
import warnings
from functools import wraps
def deprecated(func):
"""
Generates a deprecation warning
"""
@wraps(func)
def wrapper(*args, **kwargs):
msg = "'{}' is deprecated".format(func.__name__)
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapper
| nerandell/vyked | vyked/utils/decorators.py | Python | mit | 356 |
import csv
from time import strftime
from django.core.urlresolvers import reverse_lazy
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.views.generic import View
from django.views.generic.base import TemplateView
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView, DeleteView
from registration.models import Registration
from projects.models import Project, Team
from awards.models import (
LocalAward,
Nomination,
)
from .models import Location, Lead
from .forms import (
LocationForm,
CheckInFormSet,
SponsorFormSet,
LeadFormSet,
ResourceFormSet,
AwardFormSet,
NominationFormSet
)
def get_teams(location):
users = Registration.objects.filter(location=location)
teams = []
for i in users:
try:
Team.objects.filter(user=i.user)
except Team.DoesNotExist:
pass
else:
for i in Team.objects.filter(user=i.user):
teams.append(i.id)
return teams
def get_projects(location):
projects = []
if get_teams(location):
for i in get_teams(location):
team = Team.objects.get(id=i)
project = Project.objects.get(id=team.project.id)
projects.append(project.id)
return projects
class Detail(DetailView):
model = Location
def get_context_data(self, **kwargs):
context = super(Detail, self).get_context_data(**kwargs)
context['related_teams'] = Team.objects.filter(
id__in=get_teams(context['object'])).order_by('project')
context['awards'] = LocalAward.objects.filter(
location=context['object']
)
context['nomination'] = Nomination.objects.filter(location=self.object)
return context
class List(ListView):
queryset = Location.objects.filter(private=False)
class Edit(UpdateView):
model = Location
form_class = LocationForm
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
context = self.get_context_data()
sponsor_form = context['sponsor_form']
lead_form = context['lead_form']
resource_form = context['resource_form']
award_form = context['localaward_form']
nomination_form = context['nomination_form']
if (form.is_valid() and
sponsor_form.is_valid() and
lead_form.is_valid() and
resource_form.is_valid() and
award_form.is_valid() and
nomination_form.is_valid()):
self.object = form.save()
sponsor_form.instance = self.object
sponsor_form.save()
lead_form.instance = self.object
lead_form.save()
resource_form.instance = self.object
resource_form.save()
award_form.instance = self.object
award_form.save()
nomination_form.instance = self.object
nomination_form.save()
# messages.success(self.request, 'Your form has been saved!')
return super(Edit, self).form_valid(form)
else:
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
context = super(Edit, self).get_context_data(**kwargs)
context['leads'] = Lead.objects.filter(location=self.object)
# initial_awards = []
# for i in LocalAward.objects.filter(location=self.object):
# initial_awards.append({'title': i.title, 'project': i.project.id})
if self.request.POST:
context['sponsor_form'] = SponsorFormSet(
self.request.POST,
self.request.FILES,
prefix='sponsor',
instance=self.object)
context['lead_form'] = LeadFormSet(
self.request.POST,
prefix='lead',
instance=self.object)
context['resource_form'] = ResourceFormSet(
self.request.POST,
prefix='resource',
instance=self.object)
context['localaward_form'] = AwardFormSet(
self.request.POST,
projects=get_projects(self.object),
instance=self.object,
prefix='localaward')
context['nomination_form'] = NominationFormSet(
self.request.POST,
projects=get_projects(self.object),
instance=self.object,
prefix='nomination')
else:
context['sponsor_form'] = SponsorFormSet(
instance=self.object,
prefix='sponsor')
context['lead_form'] = LeadFormSet(
instance=self.object,
prefix='lead')
context['resource_form'] = ResourceFormSet(
instance=self.object,
prefix='resource')
context['localaward_form'] = AwardFormSet(
instance=self.object,
projects=get_projects(self.object),
prefix='localaward')
context['nomination_form'] = NominationFormSet(
projects=get_projects(self.object),
instance=self.object,
prefix='nomination')
return context
def render_to_response(self, context, **response_kwargs):
lead = Lead.objects.filter(
location=self.object, lead=self.request.user)
if lead or self.request.user.is_superuser:
return super(Edit, self).render_to_response(context,
**response_kwargs)
else:
raise PermissionDenied
class Attendees(TemplateView):
template_name = 'locations/location_attendees.html'
model = Location
def get_context_data(self, **kwargs):
context = super(Attendees, self).get_context_data(**kwargs)
location = Location.objects.get(slug=kwargs['slug'])
context['leads'] = Lead.objects.filter(location=location)
context['attendees'] = Registration.objects.filter(location=location)
lead = Lead.objects.filter(location=location, lead=self.request.user)
if lead or self.request.user.is_superuser:
return context
else:
raise PermissionDenied
class Streaming(DetailView):
model = Location
template_name = 'locations/location_streaming.html'
class Sponsors(ListView):
queryset = Location.objects.filter(private=False)
locations = Location.objects.all()
template_name = 'locations/location_sponsors.html'
class Visualize(ListView):
queryset = Location.objects.filter(private=False)
template_name = 'locations/location_visualize.html'
class CSV(View):
def get(self, request, *args, **kwargs):
location = Location.objects.get(slug=kwargs['slug'])
attendees = Registration.objects.filter(location=location)
lead = Lead.objects.filter(
location=location, lead=self.request.user)
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = (
'attachment; filename=%s %s Attendees.csv' % (
strftime("%Y-%m-%dT%H:%M:%SZ"),
location.slug))
writer = csv.writer(response)
writer.writerow([
'Slug',
'First Name',
'Last Name',
'Email',
'Backgrounds',
"City of Residence",
"State/Province of Residence",
"Country of Residence"])
for i in attendees:
try:
profile = i.user.get_profile()
backgrounds = ','.join(
[background.title
for background
in profile.background.all()]
)
city = profile.city
state = profile.state
country = profile.country
except:
backgrounds = "Not Provided"
city = ""
state = ""
country = ""
writer.writerow([
location.slug,
i.user.first_name.encode('utf-8'),
i.user.last_name.encode('utf-8'),
i.user.email.encode('utf-8'),
backgrounds,
city.encode('utf-8'),
state.encode('utf-8'),
country.encode('utf-8')])
if lead or self.request.user.is_superuser:
return response
else:
raise PermissionDenied
class Related(View):
def get(self, request, *args, **kwargs):
location = Location.objects.get(slug=kwargs['slug'])
projects = get_projects(location)
return HttpResponse(
Project.objects.get(id=i).get_absolute_url() for i in projects
)
| nasa/39A | spaceapps/locations/views.py | Python | apache-2.0 | 9,416 |
from django.db import models
class Phylum(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Class(models.Model):
name = models.CharField(max_length=50)
phylum = models.ForeignKey(Phylum)
def __unicode__(self):
return self.name
class Order(models.Model):
name = models.CharField(max_length=50)
classe = models.ForeignKey(Class)
def __unicode__(self):
return self.name
class Family(models.Model):
name = models.CharField(max_length=50)
order = models.ForeignKey(Order)
def __unicode__(self):
return self.name
class Genus(models.Model):
name = models.CharField(max_length=50)
family = models.ForeignKey(Family)
def __unicode__(self):
return self.name
class Species(models.Model):
name = models.CharField(max_length=50)
genus = models.ForeignKey(Genus)
def __unicode__(self):
return self.name
class Animal(models.Model):
name = models.CharField(max_length=50)
image = models.ImageField(upload_to="images", blank=True)
description = models.TextField(blank=True)
species = models.ForeignKey(Species)
views = models.IntegerField(default=0)
admin_notes = models.TextField(blank=True)
def __unicode__(self):
return self.name | atimothee/django-playground | django_playground/animalia/models.py | Python | bsd-3-clause | 1,324 |
#!/usr/bin/env python3
#
# Debian Changes Bot
# Copyright (C) 2008 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (C) 2015 Sebastian Ramacher <sramacher@debian.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os
import requests
import requests_mock
import io
from DebianDevelChangesBot.datasources import NewQueue
class TestDatasourceTestingNewQueue(unittest.TestCase):
def setUp(self):
fixture = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "fixtures", "new_queue.txt"
)
with open(fixture, encoding="utf-8") as f:
data = f.read()
self.mocker = requests_mock.Mocker()
self.mocker.start()
self.mocker.register_uri("GET", NewQueue.URL, text=data)
session = requests.Session()
self.datasource = NewQueue(session)
def tearDown(self):
self.mocker.stop()
def is_new(self, package, version):
self.datasource.update()
return self.datasource.is_new(package, version)
def testURL(self):
"""
Check we have a sane URL.
"""
self.assertTrue(len(self.datasource.URL) > 5)
self.assertTrue(self.datasource.URL.startswith("http"))
def testInterval(self):
"""
Check we have a sane update interval.
"""
self.assertTrue(self.datasource.INTERVAL > 60)
def testTop(self):
self.assertTrue(self.is_new("ezmlm-idx", "6.0.1-1"))
def testBottom(self):
self.assertTrue(self.is_new("libxml-sax-expatxs-perl", "1.31-1"))
def testMultipleVersions(self):
self.assertTrue(self.is_new("libgcal", "0.8.1-1"))
self.assertTrue(self.is_new("libgcal", "0.8.1-2"))
def testInvalidVersion(self):
self.assertFalse(self.is_new("rcpp", "0.5.2.invalid"))
def testNotInQueue(self):
self.assertFalse(self.is_new("package-not-in-new-queue", "version-foo"))
def testByhand(self):
self.assertFalse(self.is_new("loadlin", "1.6c.really1.6c.nobin-2"))
def testExperimental(self):
self.assertTrue(self.is_new("ooo-build", "3.0.0.9+r14588-1"))
if __name__ == "__main__":
unittest.main()
| sebastinas/debian-devel-changes-bot | tests/test_datasources_new_queue.py | Python | agpl-3.0 | 2,826 |
# Finds the maximum difference in array element
def max_diff_array(array):
if len(array) == 0:
return 0
if len(array) == 1:
return array
max_diff = 0
min = array[0]
max = array[0]
for n in array:
if n > max:
max = n
if n < min:
min = n
return max - min
print(max_diff_array([15,20,2,3,1,16,4]))
| bkpathak/HackerRank-Problems | collections/array/max_diff.py | Python | mit | 381 |
#!/usr/bin/env python-sirius
import argparse
import calendar
import datetime
from pyjob import MATCH_RULE, handle_request, match_clients, MatchClientsErr
def main():
# configuration of the parser for the arguments
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--clients', dest='clients', type=str,
help="list of hosts to interact with. "
"[format: client1,client2,... default: 'this']. "
"Use 'all' to get all clients. " + MATCH_RULE)
parser.add_argument(
'-n', '--niceness', dest='niceness', type=int,
help="Niceness of the jobs submitted by the clients. "
"[default: 'current value']")
parser.add_argument(
'--shutdown', dest='shut', type=str,
help="If true shutdown the clients. ")
parser.add_argument(
'--MoreJobs', dest='More', type=str,
help="If false, the clients won't ask for new jobs.")
parser.add_argument(
'--defnumproc', dest='defproc', type=int,
help="Default number of processes the clients can run."
"It means that for the set of (W,H,M) not specified "
"in the calendar this will be the number of jobs each"
" client will run. [default: current value]")
parser.add_argument(
'--remove', dest='remove', action='store_true', default=False,
help="This option removes the client's configurations"
"from the server's list. If the client is 'on', as soon"
"as it makes contact to the server, the configurations"
"will be restored.")
group = parser.add_argument_group("Calendar Options")
group.add_argument(
'--calendar', dest='calendar', type=str,
help="If this option is given, the calendar of the "
"clients will be set according the following options"
" to the (W,H,M) specifications for the number of "
"processes to run. [Possible values: "
"'append', 'set' and 'empty'] [default: 'append']")
group.add_argument(
'-W', '--weekday', dest='week', type=str,
help="list of week days to set the calendar. "
"[format: day1,day2,... default is the weekday of today]")
group.add_argument(
'-i', '--initial', dest='initial', type=str,
help="Initial time to set the calendar. [format H:M default 00:00]")
group.add_argument(
'-f', '--final', dest='final', type=str,
help="Final time set the calendar. [format H:M default 23:59]")
group.add_argument(
'-N', '--num_proc', dest='np', type=int,
help="Integer which specify the number of processes to "
"set to the calendar) [no Default Value]")
opts = parser.parse_args()
try:
if opts.clients == 'all':
clients = opts.clients
ok, ConfigsReceived = handle_request('GET_CONFIGS', 'all')
elif opts.clients is None:
ok, ConfigsReceived = handle_request('GET_CONFIGS', 'this')
else:
clients = set(opts.clients.split(","))
ConfigsReceived = match_clients(clients)
ok = True
if not ok:
raise MatchClientsErr('Could not get configs of server.')
except MatchClientsErr as err:
print(err)
return
RmClie = {}
if opts.remove:
RmClie = set(ConfigsReceived.keys())
calendars = {}
if opts.calendar in {'append', 'set', 'empty'}:
if opts.np is None and opts.calendar != 'empty':
print(
'Calendar not submitted: must specify -N or --num_proc option')
return
else:
np = opts.np
if opts.week is not None:
week = opts.week.split(',')
days = tuple(
x for x in calendar.day_name for y in week
if x.lower().startswith(y.lower()))
if len(days) != len(week):
print("Problem with at least one week day specified")
return
else:
days = (calendar.day_name[datetime.datetime.now().weekday()],)
IH, IM = 0, 0
initial = None
if (opts.initial is not None):
initial = tuple(int(x) for x in opts.initial.split(':'))
if len(initial) != 2 or not ((-1 < initial[0] < 24) and
(-1 < initial[1] < 60)):
print("Problem with specification of initial time")
return
IH, IM = initial
FH, FM = 23, 59
final = None
if (opts.final is not None):
final = tuple(int(x) for x in opts.final.split(':'))
if len(final) != 2 or not ((-1 < final[0] < 24) and
(-1 < final[1] < 60) ):
print("Problem with specification of final time")
return
FH, FM = final
if ((initial is not None) and (final is not None)) and (initial > final):
print('Initial time must be smaller than the final.')
return
interval = tuple((H, M) for H in range(IH, FH+1)
for M in range(0, 60)
if (IH, IM) <= (H, M) <= (FH, FM))
calendars = {(x, y, z): np for x in days for (y, z) in interval}
else:
if opts.calendar is not None:
print("Wrong value for --calendar option:", opts.calendar)
return
if any((opts.initial, opts.final, opts.week)):
print("Option --calendar must be given to set the calendar")
return
for k in ConfigsReceived.keys():
if opts.calendar == 'append':
ConfigsReceived[k].Calendar.update(calendars)
elif opts.calendar == 'set':
ConfigsReceived[k].Calendar = calendars
elif opts.calendar == 'empty':
ConfigsReceived[k].Calendar = dict()
if opts.niceness is not None:
niceness = (-20 if -20 > opts.niceness else
20 if 20 < opts.niceness else opts.niceness )
for k in ConfigsReceived.keys():
ConfigsReceived[k].niceness = niceness
if opts.shut is not None:
if not opts.shut:
print('Option -s must be True or False')
return
shut = (True if 'true'.startswith(opts.shut.lower()) else
False if 'false'.startswith(opts.shut.lower()) else 'bla' )
if shut == 'bla':
print('Option -s must be True or False')
return
for k in ConfigsReceived.keys():
ConfigsReceived[k].shutdown = shut
if opts.More is not None:
if not opts.More:
print('Option -s must be True or False')
return
More = (True if 'true'.startswith(opts.More.lower()) else
False if 'false'.startswith(opts.More.lower()) else 'bla' )
if More == 'bla':
print('Option -s must be True or False')
return
for k in ConfigsReceived.keys():
ConfigsReceived[k].MoreJobs = More
if opts.defproc is not None:
defproc = opts.defproc
for k in ConfigsReceived.keys():
ConfigsReceived[k].defNumJobs = defproc
ok, clients = handle_request('SET_CONFIGS', ConfigsReceived, RmClie)
if ok:
print('Success. Configurations will be set! for \n',
', '.join(tuple(ConfigsReceived)))
else:
print("It seems that these clients are not in the server's list;",
', '.join(clients))
if __name__ == '__main__':
main()
| lnls-fac/job_manager | scripts/pyjob_configs_set.py | Python | mit | 7,621 |
from django.contrib import admin
from django.template.response import SimpleTemplateResponse
from django.utils.translation import ugettext_lazy as _
from .forms import InteractivePointForm
from .models import InteractivePoint
IS_POPUP_VAR = '_popup'
ACTION_VAR = '_action'
POINT_ID_VAR = '_point_id'
class InteractivePointAdmin(admin.ModelAdmin):
name = _("Interactive Point")
change_form_template = "interactive_point_change_form.html"
filter_horizontal = ['pages']
form = InteractivePointForm
model = InteractivePoint
def get_model_perms(self, request):
# Return empty perms dict thus hiding the model from admin index
return {}
def response_add(self, request, value, post_url_continue=None):
if IS_POPUP_VAR in request.POST:
return self.response_popup(request, value)
else:
super().response_add(request, value, post_url_continue)
def response_change(self, request, value):
if IS_POPUP_VAR in request.POST:
return self.response_popup(request, value)
else:
super().response_change(request, value)
def response_delete(self, request, value):
if IS_POPUP_VAR in request.GET:
return self.response_popup(request, value)
else:
super().response_delete(request, value)
def response_popup(self, request, value):
action = request.GET.get(ACTION_VAR)
id = request.GET.get(POINT_ID_VAR)
if id is None:
id = value.pk
return SimpleTemplateResponse('admin/interactive_point_popup_response.html', {
'id': id,
'value': value,
'action': action
})
admin.site.register(InteractivePoint, InteractivePointAdmin)
| geometalab/djangocms-interactiveimage | admin.py | Python | mit | 1,767 |
# Copyright 2015 ClusterHQ Inc. See LICENSE file for details.
"""
Run the client installation tests.
"""
import os
import shutil
import sys
import tempfile
import yaml
from characteristic import attributes
import docker
from effect import TypeDispatcher, sync_performer, perform
from twisted.python.usage import Options, UsageError
from twisted.python.filepath import FilePath
import flocker
from flocker.common.version import make_rpm_version
from flocker.provision import PackageSource
from flocker.provision._effect import Sequence, perform_sequence
from flocker.provision._install import (
ensure_minimal_setup,
task_cli_pkg_install,
task_cli_pkg_test,
task_cli_pip_prereqs,
task_cli_pip_install,
task_cli_pip_test,
)
from flocker.provision._ssh import (
Run, Sudo, Put, Comment, perform_sudo, perform_put)
@attributes(['image', 'package_manager'])
class DockerImage(object):
"""Holder for Docker image information."""
DOCKER_IMAGES = {
'centos-7': DockerImage(image='centos:7', package_manager='yum'),
'debian-8': DockerImage(image='debian:8', package_manager='apt'),
'fedora-22': DockerImage(image='fedora:22', package_manager='dnf'),
'ubuntu-14.04': DockerImage(image='ubuntu:14.04', package_manager='apt'),
'ubuntu-15.04': DockerImage(image='ubuntu:15.04', package_manager='apt'),
}
PIP_DISTRIBUTIONS = DOCKER_IMAGES.keys()
PACKAGED_CLIENT_DISTRIBUTIONS = ('ubuntu-14.04', 'ubuntu-15.04')
class ScriptBuilder(TypeDispatcher):
"""
Convert an Effect sequence to a shell script.
The effects are those defined in flocker.provision._effect and
flocker.provision._ssh._model.
"""
def __init__(self, effects):
self.lines = [
'#!/bin/bash',
'set -ex'
]
TypeDispatcher.__init__(self, {
Run: self.perform_run,
Sudo: perform_sudo,
Put: perform_put,
Comment: self.perform_comment,
Sequence: perform_sequence
})
perform(self, effects)
# Add blank line to terminate script with a newline
self.lines.append('')
self._script = '\n'.join(self.lines)
@sync_performer
def perform_run(self, dispatcher, intent):
"""
For Run effects, add the command line.
"""
self.lines.append(intent.command)
@sync_performer
def perform_comment(self, dispatcher, intent):
"""
For Comment effects, prefix the comment with #
"""
self.lines.append('# ' + intent.comment)
def script(self):
"""
Return the generated shell script.
"""
return self._script
def make_script_file(dir, effects):
"""
Create a shell script file from a sequence of effects.
:param bytes dir: The directory in which to create the script.
:param Effect effects: An effect which contains the commands,
typically a Sequence containing multiple commands.
:return: The base filename of the script.
"""
builder = ScriptBuilder(effects)
fd, filename = tempfile.mkstemp(dir=dir, text=True)
os.write(fd, builder.script())
os.close(fd)
os.chmod(filename, 0555)
return os.path.basename(filename)
class DockerRunner:
"""
Run commands in a Docker container.
"""
def __init__(self, image):
self.docker = docker.Client(version='1.18')
self.image = image
def start(self):
"""
Start the Docker container.
"""
self.tmpdir = tempfile.mkdtemp()
try:
self.docker.pull(self.image)
container = self.docker.create_container(
image=self.image, command='/bin/bash', tty=True,
volumes=['/mnt/script'],
)
self.container_id = container[u'Id']
self.docker.start(
self.container_id,
binds={
self.tmpdir: {'bind': '/mnt/script', 'ro': True},
}
)
except:
os.rmdir(self.tmpdir)
raise
def stop(self):
"""
Stop the Docker container.
"""
self.docker.stop(self.container_id)
shutil.rmtree(self.tmpdir)
def execute(self, commands, out=sys.stdout):
"""
Execute a set of commands in the Docker container.
The set of commands provided to one call of ``execute`` will be
executed in a single session. This means commands will see the
environment created by previous commands.
The output of the commands is sent to the ``out`` file object,
which must have a ``write`` method.
:param Effect commands: An Effect containing the commands to run,
probably a Sequence of Effects, one for each command to run.
:param out: Where to send command output. Any object with a
``write`` method.
:return int: The exit status of the commands. If all commands
succeed, this will be zero. If any command fails, this will
be non-zero.
"""
script_file = make_script_file(self.tmpdir, commands)
script = '/mnt/script/{}'.format(script_file)
session = self.docker.exec_create(self.container_id, script)
session_id = session[u'Id']
for output in self.docker.exec_start(session, stream=True):
out.write(output)
return self.docker.exec_inspect(session_id)[u'ExitCode']
class RunOptions(Options):
description = "Run the client tests."
optParameters = [
['distribution', None, None,
'The target distribution. '
'One of {}. With --pip, one of {}'.format(
', '.join(PACKAGED_CLIENT_DISTRIBUTIONS),
', '.join(PIP_DISTRIBUTIONS))],
['branch', None, None, 'Branch to grab packages from'],
['flocker-version', None, flocker.__version__,
'Version of flocker to install'],
['build-server', None, 'http://build.clusterhq.com/',
'Base URL of build server for package downloads'],
# XXX - remove the remaining flags once Buildbot is updated (FLOC-2813)
['provider', None, None, 'No longer used.'],
['config-file', None, None, 'No longer used.'],
]
optFlags = [
['pip', None, 'Install using pip rather than packages.'],
]
synopsis = ('Usage: run-client-tests --distribution <distribution> '
'[--branch <branch>] [--flocker-version <version>] '
'[--build-server <url>] [--pip]')
def __init__(self, top_level):
"""
:param FilePath top_level: The top-level of the flocker repository.
"""
Options.__init__(self)
self.top_level = top_level
def postOptions(self):
if self['distribution'] is None:
raise UsageError("Distribution required.")
if self['config-file'] is not None:
config_file = FilePath(self['config-file'])
self['config'] = yaml.safe_load(config_file.getContent())
else:
self['config'] = {}
if self['flocker-version']:
rpm_version = make_rpm_version(self['flocker-version'])
os_version = "%s-%s" % (rpm_version.version, rpm_version.release)
if os_version.endswith('.dirty'):
os_version = os_version[:-len('.dirty')]
else:
os_version = None
self['package_source'] = PackageSource(
version=self['flocker-version'],
os_version=os_version,
branch=self['branch'],
build_server=self['build-server'],
)
if self['pip']:
supported = PIP_DISTRIBUTIONS
else:
supported = PACKAGED_CLIENT_DISTRIBUTIONS
if self['distribution'] not in supported:
raise UsageError(
"Distribution %r not supported. Available distributions: %s"
% (self['distribution'], ', '.join(supported)))
def main(args, base_path, top_level):
"""
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the Flocker repository.
"""
options = RunOptions(top_level=top_level)
try:
options.parseOptions(args)
except UsageError as e:
sys.exit("%s: %s\n" % (base_path.basename(), e))
distribution = options['distribution']
package_manager = DOCKER_IMAGES[distribution].package_manager
package_source = options['package_source']
if options['pip']:
virtualenv = 'flocker-client'
steps = [
ensure_minimal_setup(package_manager),
task_cli_pip_prereqs(package_manager),
task_cli_pip_install(virtualenv, package_source),
task_cli_pip_test(virtualenv),
]
else:
steps = [
ensure_minimal_setup(package_manager),
task_cli_pkg_install(distribution, package_source),
task_cli_pkg_test(),
]
runner = DockerRunner(DOCKER_IMAGES[distribution].image)
runner.start()
try:
for commands in steps:
status = runner.execute(commands)
if status != 0:
sys.exit(status)
finally:
runner.stop()
| agonzalezro/flocker | admin/client.py | Python | apache-2.0 | 9,340 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a set of VMs each running a Salt minion daemon in a Docker container.
"""
IMAGE = ('https://www.googleapis.com/compute/v1/projects/debian-cloud'
'/global/images/family/debian-9')
def GenerateConfig(context):
"""Generate final configuration."""
resources = []
for replica in range(0, context.properties['minionCount']):
resources.append(GenerateInstanceConfig(context, replica))
return {'resources': resources}
def GenerateInstanceConfig(context, replica):
"""Generate configuration for every minion instance."""
name = (context.env['deployment'] + '-' + context.env['name'] + '-'
+ str(replica))
machine_type = ('https://www.googleapis.com/compute/v1/projects/'
+ context.env['project'] + '/zones/'
+ context.properties['zone'] + '/machineTypes/f1-micro')
instance = {
'type': 'compute.v1.instance',
'name': name,
'properties': {
'zone': context.properties['zone'],
'machineType': machine_type,
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': IMAGE
}
}],
'networkInterfaces': [{
'network': ('https://www.googleapis.com/compute/v1/projects/'
+ context.env['project']
+ '/global/networks/default'),
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}]
}],
'tags': {
'items': ['http-server']
},
'metadata': {
'items': [{
'key': 'startup-script',
'value': ('#! /bin/bash\n'
'sudo echo \'deb http://debian.saltstack.com'
'/debian jessie-saltstack main\' >> '
'/etc/apt/sources.list\n'
'sudo wget -q -O- http://debian.saltstack.com/'
'debian-salt-team-joehealy.gpg.key | '
'sudo apt-key add -\n'
'sudo apt-get update\n'
'sudo apt-get -y install salt-minion\n'
'sudo sed -i \'s/#master: salt/master: ' +
context.properties['master'] +
'/\' /etc/salt/minion\n'
'sudo salt-minion -l debug')
}]
}
}
}
return instance
| aljim/deploymentmanager-samples | examples/v2/saltstack/python/minion.py | Python | apache-2.0 | 3,271 |
# Generated by Django 2.2.6 on 2019-11-13 17:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('poi_manager', '0003_auto_20191018_0832'),
]
operations = [
migrations.AlterField(
model_name='poi',
name='fk_campus',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='buildings.Campus'),
),
]
| indrz/indrz | indrz/poi_manager/migrations/0004_auto_20191113_1843.py | Python | gpl-3.0 | 500 |
#
# Hiiktuu constraints.
#
########################################################################
#
# This file is part of the HLTDI L^3 project
# for parsing, generation, translation, and computer-assisted
# human translation.
#
# Copyright (C) 2014, HLTDI <gasser@cs.indiana.edu>
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =========================================================================
# 2014.03.27
# -- Created. Initially just copied from l3xdg/constraint.py.
# 2014.03.29
# -- Fixed cant_precede() so it works with IVars (determined and not).
# 2014.04.03
# -- Created ComplexSetConvexity
# 2014.04.05
# -- Created ComplexUnionSelection
# 2014.04.15
# -- Constraint types used so far:
# UnionSelection, PrecedenceSelection, ComplexUnionSelection,
# ComplexSetConvexity, Union, Disjoint, Inclusion
# 2014.04.26
# -- Fixed several bugs in SetPrecedence (needed for TL sequencing).
# 2014.04.30
# -- Eliminated lots of unused constraints.
# Fixed complex constraints so that sub-constraints are not recorded
# in their variables.
# 2014.05.04-5
# -- AgrSelection constraint.
# 2014.05.05
# -- ComplexAgrSelection constraint.
# Generalization of three complex constraints to ComplexConstraint class.
# 2014.05.08
# -- Added Order constraint for ordering indices of sequences, replacing multiple
# SetPrecedence constraints, and including one additional condition not in
# SetPrecedence.
# 2014.05.11
# -- Complex constraints make selection variables for indices out of main sel
# selection variables (groups in Hiiktuu) non-essential once the constraint
# is entailed.
from .variable import *
# This is imported in another branch too...
from .features import *
import itertools
class Constraint:
# Constants for outcome of running
failed = 0
entailed = 1
sleeping = 2
# Constant threshold for lenience
lenience = .5
def __init__(self, variables, problem=None, record=True, weight=1):
self.variables = variables
self.problem = problem
self.weight = weight
if record:
for var in variables:
if isinstance(var, DetVar):
continue
# if problem:
# if var not in problem.vrs:
# problem.vrs[var] = []
# problem.vrs[var].append(self)
var.constraints.append(self)
self.name = ''
def __repr__(self):
return self.name
def is_lenient(self):
return self.weight < Constraint.lenience
def set_weight(self, weight):
self.weight = weight
def get_var(self):
"""The single variable for this constraint."""
return self.variables[0]
# Each Constraint type must implement fails(), is_entailed(), and infer().
def fails(self, dstore=None):
raise NotImplementedError("{} is an abstract class".format(self.__class__.__name__))
def is_entailed(self, dstore=None):
raise NotImplementedError("{} is an abstract class".format(self.__class__.__name__))
def infer(self, dstore=None, verbosity=0, tracevar=None):
"""Should return state and variables that change."""
raise NotImplementedError("{} is an abstract class".format(self.__class__.__name__))
def determine(self, dstore=None, verbosity=0, tracevar=None):
"""Try to determine each variable, returning the set if any determined."""
det = set()
for variable in self.variables:
if not variable.is_determined(dstore=dstore) and \
variable.determined(dstore=dstore, constraint=self, verbosity=verbosity) is not False:
if verbosity and variable in tracevar:
print(' {} determining {} at {}'.format(self, variable, variable.get_value(dstore=dstore)))
det.add(variable)
return det
def run(self, dstore=None, verbosity=0, tracevar=[]):
"""Run this constraint during constraint satisfaction."""
if verbosity > 1:
print(' Running {}'.format(self))
determined = self.determine(dstore=dstore, verbosity=verbosity, tracevar=tracevar)
# Try to determine the variables; if any are determined, go to sleep and return
# the set of newly determined variables.
if determined:
if verbosity > 1:
print(' Determined variables', determined)
return Constraint.sleeping, determined
# Otherwise see if the constraint fails. If it does fail and return the empty set.
if self.fails(dstore=dstore):
if verbosity > 1:
print(' Failed!')
elif verbosity:
print('{} failed; weight: {}'.format(self, self.weight))
return Constraint.failed, set()
# Otherwise see if the constraint is entailed. If it is, succeed and return the empty set.
if self.is_entailed(dstore=dstore):
if verbosity > 1:
print(' Entailed')
return Constraint.entailed, set()
# Otherwise try inferring variable values. Either succeed or sleep and return any changed
# variables.
return self.infer(dstore=dstore, verbosity=verbosity, tracevar=tracevar)
@staticmethod
def string_set(s):
"""Convenient print name for a set."""
if len(s) > 10:
return '{{{0}...{1}}}'.format(min(s), max(s))
else:
return '{}'.format(set.__repr__(s))
def print_vars(self):
'''Print out components of constraint variables.'''
for v in self.variables:
print('{} :: {}'.format(v, v.dstores))
## Primitive basic constraints
# Integer domains
class Member(Constraint):
def __init__(self, var, domain, problem=None, record=True):
"""
var: an IVar
domain: a set of ints
"""
Constraint.__init__(self, (var,), problem=problem, record=record)
self.domain = domain
self.name = '{0}<{1}'.format(self.get_var(), Constraint.string_set(self.domain))
def fails(self, dstore=None):
"""Is the constraint domain not a superset of the variable's domain?"""
if not self.domain.issubset(self.get_var().get_domain(dstore=dstore)):
return True
return False
def is_entailed(self, dstore=None):
"""Is the variable's domain a subset of the constraint's domain?"""
if self.get_var().get_domain(dstore=dstore).issubset(self.domain):
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=None):
"""The variable's values are restricted to the intersection of
their current values and the constraint's domain."""
var = self.get_var()
if var.strengthen(self.domain, dstore=dstore, constraint=(verbosity>1 or var in tracevar) and self):
return Constraint.entailed, {var}
return Constraint.entailed, set()
# Set domains
class Superset(Constraint):
"""Set variable is constrained to be a superset of subset."""
def __init__(self, var, subset, problem=None, record=True):
"""
var: a SVar
subset: a set of ints
"""
Constraint.__init__(self, (var,), problem=problem, record=record)
self.subset = subset
self.name = '{0} >= {1}'.format(self.get_var(), Constraint.string_set(self.subset))
def fails(self, dstore=None):
"""Is the constraint subset not a subset of the var's upper bound?"""
if not self.subset.issubset(self.get_var().get_upper(dstore=dstore)):
return True
return False
def is_entailed(self, dstore=None):
"""Is the variable's lower bound a superset of the constraint's subset?"""
if self.get_var().get_lower(dstore=dstore).issuperset(self.subset):
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=None):
"""The variable's values are restricted to be a superset of the union
of the current lower bound and subset."""
var = self.get_var()
if var.strengthen_lower(self.subset, dstore=dstore,
constraint=(verbosity>1 or var in tracevar) and self):
return Constraint.entailed, {var}
return Constraint.entailed, set()
class Subset(Constraint):
"""Set variable is constrained to be a subset of superset."""
def __init__(self, var, superset, problem=None, record=True):
"""
var: a SVar
superset: a set of ints
"""
Constraint.__init__(self, (var,), problem=problem, record=record)
self.superset = superset
self.name = '{0} c= {1}'.format(self.get_var(), Constraint.string_set(self.superset))
def fails(self, dstore=None):
"""Is the var's lower bound not a subset of the constraint superset?"""
if not self.get_var().get_lower(dstore=dstore).issubset(self.superset):
return True
return False
def is_entailed(self, dstore=None):
"""Is the variable's upper bound a subset of the constraint's superset?"""
if self.get_var().get_upper(dstore=dstore).issubset(self.superset):
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=None):
"""The variable's values are restricted to be a subset of the intersection
of the current upper bound and superset."""
var = self.get_var()
if var.strengthen_upper(self.superset, dstore=dstore, constraint=(verbosity>1 or var in tracevar) and self):
return Constraint.entailed, {var}
return Constraint.entailed, set()
# Set domain variables only
class SetConvexity(Constraint):
"""There must not be any 'holes' in the (single) set variable, which represents
the positions of the descendants of a node as well as that of the node itself."""
def __init__(self, var, problem=None, weight=1, record=True):
"""Only one variable, so a special constructor."""
Constraint.__init__(self, [var], problem=problem, weight=weight, record=record)
self.var = self.variables[0]
self.name = '{0} <>'.format(self.var)
def fails(self, dstore=None):
"""Four ways to fail."""
# If the variable is determined and has holes...
if self.var.determined(dstore=dstore, constraint=self) is not False:
val = self.var.get_value(dstore=dstore)
# There can't be any holes
if val:
val_range = set(range(min(val), max(val)+1))
if val_range - val:
return True
lower_card = self.var.get_lower_card(dstore=dstore)
lower = self.var.get_lower(dstore=dstore)
upper = self.var.get_upper(dstore=dstore)
if lower:
# Necessary range includes all values between the minimum and the maximum (inclusive)
# of the lower bound
neces_range = set(range(min(lower), max(lower)+1))
if neces_range - upper:
# If there's some value in necessary range not in upper bound...
return True
# Possible values that are not in necessary range
possible = upper - neces_range
# If there's a gap separating max necessary and min possible and too many possible
# values would need to be discarded...
if possible and neces_range:
min_poss = min(possible)
max_neces = max(neces_range)
if min_poss - max_neces > 1:
if len(upper) - len(possible) < lower_card:
return True
# If there is continuous sequence of integers as long as the lower cardinality...
if lower_card <= 1:
return False
upper_ordered = list(upper)
upper_ordered.sort()
last = upper_ordered[0]
count = 1
for pos in upper_ordered[1:]:
if count >= lower_card:
return False
if pos - last > 1:
count = 1
last = pos
else:
count += 1
last = pos
if count >= lower_card:
return False
return True
def is_entailed(self, dstore=None):
"""If the variable is determined, or if the lower bound is convex,
and the upper only adds a single vowel below or above the lower bound."""
if self.var.determined(dstore=dstore, constraint=self) is not False:
return True
lower = self.var.get_lower(dstore=dstore)
upper = self.var.get_upper(dstore=dstore)
if not lower:
return False
min_lower = min(lower)
max_lower = max(lower)
if not set(range(min_lower, max_lower+1)) - lower:
if min_lower - min(upper) <= 1 and max(upper) - max_lower <= 1:
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=[]):
changed = set()
# If the variable's lower bound is non-empty, every value between
# the min and max of the lower bound must be in the variable, and
# there can't be any gaps in the upper bound either.
v = self.var
lower = v.get_lower(dstore=dstore)
if len(lower) > 0:
upper = v.get_upper(dstore=dstore)
min_low = min(lower)
max_low = max(lower)
# Make the lower bound everything between the min and max
if v.strengthen_lower(set(range(min_low, max_low+1)),
dstore=dstore, constraint=(verbosity>1 or v in tracevar) and self):
changed.add(v)
return Constraint.sleeping, changed
# Look for gaps in the upper bound
# Starting at the max of the lower bound...
max_up = max(upper)
x = max_low+1
while x in upper and x < max_up:
x += 1
if x < max_up:
if v.discard_upper(set(range(x, max_up+1)),
dstore=dstore, constraint=(verbosity>1 or v in tracevar) and self):
changed.add(v)
return Constraint.sleeping, changed
# Starting at the min of the lower bound...
min_up = min(upper)
x = min_low-1
while x in upper and x > min_up:
x -= 1
if x > min_up + 1:
if v.discard_upper(set(range(min_up, x)),
dstore=dstore, constraint=(verbosity>1 or v in tracevar) and self):
changed.add(v)
return Constraint.sleeping, changed
return Constraint.sleeping, changed
class SupersetIntersection(Constraint):
"""Set var S1 is superset of intersection of set vars S2 and S3."""
def __init__(self, variables, problem=None, weight=1, record=True):
Constraint.__init__(self, variables, problem=problem,
weight=weight, record=record)
self.name = '{0} >= {1} ^ {2}'.format(self.variables[0], self.variables[1], self.variables[2])
def fails(self, dstore=None):
"""Is the intersection of the lower bounds of S2 and S3 not a subset of
the upper bound of S1?"""
s1 = self.variables[0]
s2 = self.variables[1]
s3 = self.variables[2]
s2_inters_s3 = s2.get_lower(dstore=dstore) & s3.get_lower(dstore=dstore)
if not s2_inters_s3 <= s1.get_upper(dstore=dstore):
return True
# Fail on cardinalities
if s1.get_upper_card(dstore=dstore) < len(s2_inters_s3):
return True
return False
def is_entailed(self, dstore=None):
"""Is the intersection of the upper bounds of S2 and S3 already a subset of
the lower bound of S1?"""
s1 = self.variables[0]
s2 = self.variables[1]
s3 = self.variables[2]
if s2.get_upper(dstore=dstore) & s3.get_upper(dstore=dstore) <= s1.get_lower(dstore=dstore):
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=[]):
changed = set()
# Intersection of lower bound of S2 and S3 is subset of lower bound of S1.
s1 = self.variables[0]
s2 = self.variables[1]
s3 = self.variables[2]
if s1.strengthen_lower(s2.get_lower(dstore=dstore) & s3.get_lower(dstore=dstore),
dstore=dstore, constraint=(verbosity>1 or s1 in tracevar) and self):
changed.add(s1)
# Upper bound of S2 and S3 excludes elements which are in the lower bounds of S3 and S2, respectively,
# but not in the upper bound of S1.
s1_up = s1.get_upper(dstore=dstore)
s2_not_s1 = s2.get_lower(dstore=dstore) - s1_up
s3_not_s1 = s3.get_lower(dstore=dstore) - s1_up
for x in s3.get_upper(dstore=dstore).copy():
if x in s2_not_s1:
if s3.discard_upper(x, dstore=dstore, constraint=(verbosity>1 or s3 in tracevar) and self):
changed.add(s3)
for x in s2.get_upper(dstore=dstore).copy():
if x in s3_not_s1:
if s2.discard_upper(x, dstore=dstore, constraint=(verbosity>1 or s2 in tracevar) and self):
changed.add(s2)
# Inference based on cardinalities (from Müller, p. 104)
s2Us3_card = len(s2.get_upper(dstore=dstore) | s3.get_upper(dstore=dstore))
s1_up_card = s1.get_upper_card(dstore=dstore)
s2_low_card = s2.get_lower_card(dstore=dstore)
s3_low_card = s3.get_lower_card(dstore=dstore)
if s1.strengthen_lower_card(s2_low_card + s3_low_card - s2Us3_card,
dstore=dstore, constraint=(verbosity>1 or s1 in tracevar) and self):
changed.add(s1)
if s2.strengthen_upper_card(s2Us3_card + s1_up_card - s3_low_card,
dstore=dstore, constraint=(verbosity>1 or s2 in tracevar) and self):
changed.add(s2)
if s3.strengthen_upper_card(s2Us3_card + s1_up_card - s2_low_card,
dstore=dstore, constraint=(verbosity>1 or s3 in tracevar) and self):
changed.add(s3)
if verbosity > 1 and changed:
print(' Variables {} changed'.format(changed))
return Constraint.sleeping, changed
class SubsetUnion(Constraint):
"""Set var S1 is subset of union of set vars S2 and S3."""
def __init__(self, variables, problem=None, propagate=True,
weight=1, record=True):
Constraint.__init__(self, variables, problem=problem, weight=weight, record=record)
self.name = '{0} c= {1} U {2}'.format(self.variables[0], self.variables[1], self.variables[2])
def fails(self, dstore=None):
"""Is the union of the upper bounds of S2 and S3 (the biggest it can be)
not a superset of the lower bound of S1?"""
s1 = self.variables[0]
s2 = self.variables[1]
s3 = self.variables[2]
s2_union_s3 = s2.get_upper(dstore=dstore) | s3.get_upper(dstore=dstore)
if not s2_union_s3 >= s1.get_lower(dstore=dstore):
return True
# Fail on cardinalities
if s1.get_lower_card(dstore=dstore) > len(s2_union_s3):
return True
return False
def is_entailed(self, dstore=None):
"""Is the union of the lower bounds of S2 and S3 already a superset of
the upper bound of S1?"""
s1 = self.variables[0]
s2 = self.variables[1]
s3 = self.variables[2]
if s2.get_lower(dstore=dstore) | s3.get_lower(dstore=dstore) >= s1.get_upper(dstore=dstore):
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=[]):
changed = set()
# S1 must be a subset of the union of the upper bounds of S2 and S3
s1 = self.variables[0]
s2 = self.variables[1]
s3 = self.variables[2]
if s1.strengthen_upper(s2.get_upper(dstore=dstore) | s3.get_upper(dstore=dstore),
dstore=dstore, constraint=(verbosity>1 or s1 in tracevar) and self):
changed.add(s1)
# S2's and S3's lower bounds must contain elements that are in the lower bound of S1 but not
# S3 and S2, respectively (note: Müller has *lower* bounds of S3 and S2 (Eq. 11.17, p. 105),
# but this seems too strong).
s1_not_s2 = s1.get_lower(dstore=dstore) - s2.get_upper(dstore=dstore)
s1_not_s3 = s1.get_lower(dstore=dstore) - s3.get_upper(dstore=dstore)
if s3.strengthen_lower(s1_not_s2, dstore=dstore, constraint=(verbosity>1 or s3 in tracevar) and self):
changed.add(s3)
if s2.strengthen_lower(s1_not_s3, dstore=dstore, constraint=(verbosity>1 or s2 in tracevar) and self):
changed.add(s2)
# Inference based on cardinalities (from Müller, p. 105, but there's apparently
# a typo; in Eq. 11.19, n1 should be the upper, not the lower bound of S1)
if s1.strengthen_upper_card(s2.get_upper_card(dstore=dstore) + s3.get_upper_card(dstore=dstore),
dstore=dstore, constraint=(verbosity>1 or s1 in tracevar) and self):
changed.add(s1)
if s2.strengthen_lower_card(s1.get_lower_card(dstore=dstore) - s3.get_lower_card(dstore=dstore),
dstore=dstore, constraint=(verbosity>1 or s2 in tracevar) and self):
changed.add(s2)
if s3.strengthen_lower_card(s1.get_lower_card(dstore=dstore) - s2.get_lower_card(dstore=dstore),
dstore=dstore, constraint=(verbosity>1 or s3 in tracevar) and self):
changed.add(s3)
if verbosity > 1 and changed:
print(' Variables {} changed'.format(changed))
return Constraint.sleeping, changed
class SetPrecedence(Constraint):
"""All elements of set variable 1 must precede all elements of set variable 2."""
def __init__(self, variables, problem=None, weight=1, record=True):
Constraint.__init__(self, variables, problem=problem,
weight=weight, record=record)
self.name = '{0} << {1}'.format(self.variables[0], self.variables[1])
# Also used in PrecedenceSelection
@staticmethod
def must_precede(svar1, svar2, dstore=None):
"""Is the highest value that can occur in svar1 < the lowest value that can occur in svar2?"""
v1_upper = svar1.get_upper(dstore=dstore)
v2_upper = svar2.get_upper(dstore=dstore)
return v1_upper and v2_upper and (max(v1_upper) < min(v2_upper))
@staticmethod
def cant_precede(var1, var2, dstore=None):
"""Is the highest value that must occur in var1 >= the lowest value that must occur in var2?"""
if not var1.get_upper(dstore=dstore):
print("Can't precede {}, {}".format(var1, var2))
# Lower
if isinstance(var1, IVar):
v1 = min(var1.get_upper(dstore=dstore))
elif not var1.get_lower(dstore=dstore):
return False
else:
v1 = max(var1.get_lower(dstore=dstore))
# Upper
if isinstance(var2, IVar):
v2 = max(var2.get_upper(dstore=dstore))
elif not var2.get_lower(dstore=dstore):
return False
else:
v2 = min(var2.get_lower(dstore=dstore))
return v1 >= v2
# return v1_lower and v2_lower and (max(v1_lower) >= min(v2_lower))
def fails(self, dstore=None):
"""Fail if any of set1's lower bound > any of set2's lower bound."""
return SetPrecedence.cant_precede(self.variables[0], self.variables[1], dstore=dstore)
def is_entailed(self, dstore=None):
"""Entailed if everything that can be in set1 precedes anything that can be in set2."""
return SetPrecedence.must_precede(self.variables[0], self.variables[1], dstore=dstore)
def infer(self, dstore=None, verbosity=0, tracevar=[]):
changed = set()
state = Constraint.sleeping
v1 = self.variables[0]
v1_low = v1.get_lower(dstore=dstore)
v2 = self.variables[1]
v2_low = v2.get_lower(dstore=dstore)
# If the lower bound on v1 is not empty, v2 must be a subset of
# {min(MAX, max(v1 + 1)), ..., MAX}
if v1_low:
v2_up_new = range(min([v1.max, max(v1_low) + 1]), v2.max+1)
if v2.strengthen_upper(v2_up_new, dstore=dstore,
constraint=(verbosity>1 or v2 in tracevar) and self):
changed.add(v2)
return state, changed
# If the lower bound on v2 is not empty, v1 must be a subset of
# {0, ..., max(0, min(v2_low) - 1)}
if v2_low:
v1_up_new = range(0, max([0, min(v2_low) - 1]) + 1)
if v1.strengthen_upper(v1_up_new, dstore=dstore,
constraint=(verbosity>1 or v1 in tracevar) and self):
changed.add(v1)
return state, changed
# Remove all elements from v1 >= highest possible element in v2
v1_up = v1.get_upper(dstore=dstore)
v2_up = v2.get_upper(dstore=dstore)
v2.max = max(v2_up)
v1_over = set(itertools.filterfalse(lambda x: x < v2.max, v1_up))
if v1_over:
if v1.discard_upper(v1_over, dstore=dstore,
constraint=(verbosity>1 or v1 in tracevar) and self):
changed.add(v1)
return state, changed
return state, changed
class Order(Constraint):
"""N int variables, whose values are positions from 0 to n-1."""
def __init__(self, variables, problem=None, weight=1, record=True):
Constraint.__init__(self, variables, problem=problem,
weight=weight, record=record)
self.max_value = len(variables) - 1
self.name = '{0} <...<'.format(self.variables)
def fails(self, dstore=None):
"""Fail if any determined variable has a value greater than maximum (n-1)
or if two determined variables have the same value or in any value
in the range is not in the domain of any variable."""
det_values = []
for v in self.variables:
if v.determined(dstore=dstore, constraint=self) is not False:
val = v.get_value(dstore=dstore)
val = list(val)[0]
if val > self.max_value or val in det_values:
return True
det_values.append(val)
for i in range(len(self.variables)):
found = False
for v in self.variables:
if i in v.get_upper(dstore=dstore):
found = True
break
if not found:
return True
return False
def is_entailed(self, dstore=None):
"""Entailed if all variables are determined."""
for v in self.variables:
# Some variable is not determined
if v.determined(dstore=dstore, constraint=self) is False:
return False
return True
def infer(self, dstore=None, verbosity=0, tracevar=[]):
"""If some value is only in the domain of one variable, determine that variable
at that value."""
changed = set()
state = Constraint.sleeping
for i in range(len(self.variables)):
v = None
for vb in self.variables:
if i in vb.get_upper(dstore=dstore):
if v:
# Already a variable that has this value
v = None
break
else:
v = vb
if v:
if v.determine(i, dstore=dstore,
constraint=(verbosity>1 or vb in tracevar) and self):
changed.add(v)
return state, changed
return state, changed
# Selection constraint propagators
class Selection(Constraint):
"""Superclass for most selection constraints.
mainvar: set domain var or int domain var (set var for primitive propagators)
seqvars: set domain vars, int domain vars, constant sets, or constant ints
(set var for primitive propagators)
selvar: set domain var or int domain var (set var for primitive propagators)
"""
def __init__(self, mainvar=None, selvar=None, seqvars=None,
problem=None, weight=1, record=True):
Constraint.__init__(self, [mainvar, selvar] + seqvars, problem=problem,
weight=weight, record=record)
self.selvar = selvar
self.mainvar = mainvar
self.seqvars = seqvars
def is_entailed(self, dstore=None):
"""Entailed only if all vars are determined.
"""
if self.mainvar.determined(dstore=dstore, constraint=self) is not False \
and self.selvar.determined(dstore=dstore, constraint=self) is not False \
and all([v.determined(dstore=dstore, constraint=self) is not False for v in self.seqvars]):
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=None):
"""Some rules are common to all Selection subclasses."""
changed = set()
state = Constraint.sleeping
seqvars = self.seqvars
selvar = self.selvar
mainvar = self.mainvar
# If there is only one seqvar, then the main var is constrained to be that value
# and the selection var has to be {0} or 0
if len(seqvars) == 1:
# since there's only one seq var to select, the selection variable has to
# be {0} or 0
if selvar.determine(0, dstore=dstore,
constraint=(verbosity>1 or selvar in tracevar) and self):
changed.add(selvar)
seqvar = seqvars[0]
if seqvar.determined(dstore=dstore, verbosity=verbosity, constraint=self) is not False:
if mainvar.determine(seqvar.get_value(dstore=dstore), dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
state = Constraint.entailed
else:
if mainvar.strengthen_lower(seqvar.get_lower(dstore=dstore), dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
if mainvar.strengthen_upper(seqvar.get_upper(dstore=dstore), dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
## if mainvar.determined(dstore=dstore, verbosity=verbosity) is not False:
## state = Constraint.entailed
if changed:
if verbosity > 1:
print(' Variables {} changed'.format(changed))
return state, changed
# If all of the seqvars are equal to one another and determined and the selection variable must
# be non-empty, then the main var can be determined (as long as the seqvar value is in its domain)
if all([v.determined(dstore=dstore, verbosity=verbosity, constraint=self) is not False for v in seqvars]) and \
selvar.get_lower_card(dstore=dstore) > 0 and seqvars[0].all_equal(seqvars[1:], dstore=dstore):
seq0_val = seqvars[0].get_value(dstore=dstore)
if mainvar.determine(seq0_val, dstore=dstore, constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
state = Constraint.entailed
if verbosity > 1 and changed:
print(' Variables {} changed'.format(changed))
return state, changed
# If the upper bound of selvar includes values that are greater than the length of selvars,
# then those values can be eliminated from the upper bound.
# selupper = selvar.get_upper(dstore=dstore)
# n_seqvars = len(seqvars)
# to_remove = {index for index in selupper if index >= n_seqvars}
# if to_remove:
# if selvar.discard_upper(to_remove, dstore=dstore, constraint=(verbosity>1 or mainvar in tracevar) and self):
# changed.add(selvar)
# if changed:
# if verbosity > 1:
# print(' Variables {} changed'.format(changed))
# return state, changed
return False
@staticmethod
def format_seq(seq):
string = '< '
for i, elem in enumerate(seq):
if i != 0:
string += ', '
if elem == set():
string += '{}'
else:
string += elem.__repr__()
return string + ' >'
@staticmethod
def format_list(seq):
string = '['
for i, elem in enumerate(seq):
if i != 0:
string += ', '
if elem == set():
string += '{}'
else:
string += elem.__repr__()
return string + ']'
class UnionSelection(Selection):
'''All variables can be set vars; seq vars can also be int vars.
Select the union of the selected sets.'''
def __init__(self, mainvar=None, selvar=None, seqvars=None, problem=None, weight=1,
maxset=None, record=True):
Selection.__init__(self, mainvar=mainvar, selvar=selvar, seqvars=seqvars,
problem=problem, weight=weight, record=record)
self.maxset = maxset or ALL
self.name = '{0} = U{1} [{2}]'.format(self.mainvar, self.format_seq(self.seqvars), self.selvar)
def sel_lower(self, dstore=None):
"""Values that must be selected."""
seq_len = len(self.seqvars)
selvar_lower = self.selvar.get_lower(dstore=dstore)
if any([i >= seq_len for i in selvar_lower]):
return False
res = set()
for i in selvar_lower:
if i < len(self.seqvars):
res.update(self.seqvars[i].get_lower(dstore=dstore))
return res
def fails(self, dstore=None):
"""Fail
(1) if the lower bound of sel var has indices beyond the length of seq vars
(2) if the upper bound of mainvar excludes a value that must be in it
(3) if the lower bound of mainvar includes values that cannot be in the union
of selected seq vars (upper bounds of seq vars selected by upper bound of
sel var)
"""
sel_low = self.sel_lower(dstore=dstore)
if sel_low is False:
# print(self, 'fails because of sel_low')
# This should actually fail but allow it to succeed
sel_low = set()
mainupper = self.mainvar.get_upper(dstore=dstore)
if len(sel_low - self.mainvar.get_upper(dstore=dstore)) > 0:
return True
# If the values that must be included in mainvar include values that are excluded
# from those that can be selected, fail
mainlower = self.mainvar.get_lower(dstore=dstore)
selupper = self.selvar.get_upper(dstore=dstore)
maxsel = set()
for index, seq in enumerate(self.seqvars):
if index in selupper:
sequpper = seq.get_upper(dstore=dstore)
maxsel.update(sequpper)
if mainlower - maxsel:
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=[]):
seqvars = self.seqvars
selvar = self.selvar
mainvar = self.mainvar
changed = set()
state = Constraint.sleeping
sel_infer = Selection.infer(self, dstore=dstore, verbosity=verbosity, tracevar=tracevar)
if sel_infer:
return sel_infer
## Some variable(s) determined
# Selection variable
if selvar.determined(dstore=dstore, verbosity=verbosity, constraint=self) is not False:
# If the selection var is determined, check whether the indexed sequence vars
# are also
all_determined = True
selval = selvar.get_value(dstore=dstore)
selseqs = [seqvars[index] for index in selval if index < len(seqvars)]
result = set()
for seq in selseqs:
if seq.determined(dstore=dstore, verbosity=verbosity, constraint=self) is not False:
# print('Determined {}'.format(seq))
result.update(seq.get_lower(dstore=dstore))
else:
all_determined = False
if all_determined:
# If so, determine the main var
if mainvar.determine(result, dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
state = Constraint.entailed
if verbosity > 1 and changed:
print(' Variables {} changed'.format(changed))
return state, changed
# Also check whether the main var is determined, in which case the seq vars
# can possibly be constrained
if mainvar.determined(dstore=dstore, verbosity=verbosity, constraint=self) is not False:
mainvalue = mainvar.get_value(dstore=dstore)
# Remove any values from upper bounds that are not in main var
for seq in selseqs:
seq_up = seq.get_upper(dstore=dstore)
# unused_up is everything in seq's upper bound that's not in mainvalue
unused_up = seq_up - mainvalue
if unused_up:
if len(seq_up - unused_up) < seq.get_lower_card(dstore=dstore):
if verbosity:
if seq in tracevar:
s = ' {} attempting to discard {} from upper bound of {}, making it too small'
print(s.format(self, unused_up, seq))
print('{} failed'.format(self))
# print(self, 'failed because of attempt to make upper bound too small')
return Constraint.failed, set()
if seq.discard_upper(unused_up, dstore=dstore,
constraint=(verbosity>1 or seq in tracevar) and self):
# if tracevar==seq:
# print(self, 'discarding', unused_up, 'from', seq, 'mainvalue', mainvalue, 'seq_up', seq_up)
changed.add(seq)
return state, changed
# Even if seqvars are not determined, we may be able to constrain mainvar (as long as it's not
# already determined)
else:
main_lowcard = max([seq.get_lower_card(dstore=dstore) for seq in selseqs])
if mainvar.strengthen_lower_card(main_lowcard, dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
return state, changed
seq_uppers = [seq.get_upper(dstore=dstore) for seq in selseqs]
seq_up_union = set().union(*seq_uppers)
if mainvar.strengthen_upper(seq_up_union, dstore=dstore,
reduce=True,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
return state, changed
if mainvar.strengthen_upper_card(len(seq_up_union), dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
return state, changed
seq_lowers = [seq.get_lower(dstore=dstore) for seq in selseqs]
if mainvar.strengthen_lower(set().union(*seq_lowers), dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
# s = "Strengthening lower bound of mainvar {}; seq lowers {}, seq uppers {}, main upper {}"
# print(s.format(mainvar, seq_lowers, seq_uppers, mainvar.get_upper(dstore=dstore)))
# if tracevar in selseqs:
# print(self, 'strengthening lower main 1', seq_lowers, seq_uppers, mainvar.get_upper(dstore=dstore),
# mainvar.get_upper_card(dstore=dstore))
changed.add(mainvar)
return state, changed
# Main variable determined
if mainvar.determined(dstore=dstore, verbosity=verbosity, constraint=self) is False:
# The main variable must be a subset of the union of the upper bounds of all
# sequence variables indexed by the upper bound of the selection variable.
selupper = selvar.get_upper(dstore=dstore)
for j in selupper:
if j >= len(seqvars):
print(self, 'seqvars', seqvars, 'too short for', selupper, 'of variable', selvar)
seq_uppers = [seqvars[j].get_upper(dstore=dstore) for j in selupper if j < len(seqvars)]
if mainvar.strengthen_upper(set().union(*seq_uppers), dstore=dstore,
reduce=True,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
return state, changed
# The main variable must be a superset of the union of the lower bounds of all
# sequence variables indexed by the lower bound of the selection variable.
seq_lowers = [seqvars[j].get_lower(dstore=dstore) for j in selvar.get_lower(dstore=dstore)]
if mainvar.strengthen_lower(set().union(*seq_lowers), dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
return state, changed
## Neither selection variable nor main variable determined
# If the lower bound of some seqvar is not a subset of mainvar's upperbound,
# then exclude its index from selection var (remove it from the selection var's
# upper bound)
for j in selvar.get_upper(dstore=dstore).copy():
# Consider only indices for seq vars that are in the upper bound of selection variable
if j >= len(seqvars):
continue
seqvar = seqvars[j]
seqlower = seqvar.get_lower(dstore=dstore)
mainupper = mainvar.get_upper(dstore=dstore)
if not seqlower <= mainupper:
# If pattern is True and seqlower and mainupper unify, it's still OK
# This should fail if by discarding j, the cardinality of upper has gone below lower card
if len(selvar.get_upper(dstore=dstore) - {j}) < selvar.get_lower_card(dstore=dstore):
if verbosity:
if selvar in tracevar:
s = ' {} attempting to discard {} from upper bound of {}, making it too small'
print(s.format(self, j, selvar))
print('{} failed'.format(self))
return Constraint.failed, set()
if selvar.discard_upper(j, dstore=dstore, constraint=(verbosity>1 or selvar in tracevar) and self):
changed.add(selvar)
return state, changed
# If excluding any index from selection var's upper bound in figuring the
# union of upper bounds of indexed sequence variables causes the lower bound
# of the main variable to contain elements not in the union,
# then add those elements to the excluded sequence
# variable and the excluded index to the selection variable's lower bound
selvar_upper = selvar.get_upper(dstore=dstore)
for j in selvar_upper:
if j >= len(seqvars):
continue
# Consider only indices in the upper bound of selection variable
# Exclude j
indices = selvar_upper - {j}
# Get the union of the upper bounds of the indexed sequence variables
seqvar_union = set().union(*[seqvars[i].get_upper(dstore=dstore) for i in indices if i < len(seqvars)])
# Does the lower bound of the main variable have any elements not in the union?
main_union_diff = mainvar.get_lower(dstore=dstore) - seqvar_union
if len(main_union_diff) > 0:
# print(self, 'excluding index', j, 'indices', indices, 'seqvar_union',
# seqvar_union, 'main_union_diff', main_union_diff)
# print(' Attempting to strengthen lower bound of', seqvars[j])
# Yes; add the difference to the excluded seq var's lower bound
if seqvars[j].strengthen_lower(main_union_diff, dstore=dstore,
constraint=(verbosity>1 or seqvars[j] in tracevar) and self):
changed.add(seqvars[j])
# s = 'Strengthening lower of seqvar {}, main union diff {}'
# print(s.format(seqvars[j], main_union_diff))
return state, changed
# and add the index to selection var's lower bound
if selvar.strengthen_lower({j}, dstore=dstore,
constraint=(verbosity>1 or selvar in tracevar) and self):
changed.add(selvar)
return state, changed
# For all seq vars in the lower bound of selvar, exclude any elements that are not in the
# upper bound of mainvar (not in Duchier??)
selvar_lower = selvar.get_lower(dstore=dstore)
mainvar_upper = mainvar.get_upper(dstore=dstore)
seqvar_upper = set().union(*[seqvars[i].get_upper(dstore=dstore) for i in selvar_lower if i < len(seqvars)])
seq_main_diff = seqvar_upper - mainvar_upper
if seq_main_diff:
for j in selvar_lower:
if j >= len(seqvars):
continue
seq = seqvars[j]
# if seq in tracevar:
if seq.discard_upper(seq_main_diff, dstore=dstore,
constraint=(verbosity>1 or seq in tracevar) and self):
# print(self, 'discarding', seq_main_diff, 'from', seq,
# 'mainvar_upper', mainvar_upper,
# 'seqvar_upper', seqvar_upper,
# 'selvar_lower', selvar_lower)
changed.add(seq)
return state, changed
if verbosity > 1 and changed:
print(' Variables {} changed'.format(changed))
return state, changed
class ComplexConstraint(Constraint):
"""Each value of selection variable (potentially) selects a simple constraint."""
def __init__(self, selvar=None, selvars=None, othervars=None,
problem=None, weight=1, record=True):
Constraint.__init__(self, [selvar] + selvars + othervars, problem=problem, weight=weight,
record=record)
self.selvar = selvar
self.selvars = selvars
self.constraints = []
def fails(self, dstore=None):
"""Fail if any of the UnionSelection constraints over the selvars and mainvars indexed by the
lower bound of selvar fail."""
for index in self.selvar.get_lower(dstore=dstore):
constraint = self.constraints[index]
if constraint and constraint.fails(dstore=dstore):
return True
return False
def is_entailed(self, dstore=None):
"""Is entailed if all of the constraints indexed by the upper bound of selvar are entailed."""
selvar_upper = self.selvar.get_upper(dstore=dstore)
for index in selvar_upper:
constraint = self.constraints[index]
if constraint and not constraint.is_entailed(dstore=dstore):
return False
# Remove non-indexed selection variables from essential variable list
# if they're there.
for index, selvar in enumerate(self.selvars):
if index not in selvar_upper:
if selvar in dstore.ess_undet:
# print("Removing {} from DStore essential variables".format(selvar))
dstore.ess_undet.remove(selvar)
return True
def infer(self, dstore=None, verbosity=0, tracevar=[]):
"""Run infer() on all constraints indexed in the lower bound of selvar,
and remove indices from the upper bound of selvar if the indexed constraint
fails."""
selvar = self.selvar
selupper = selvar.get_upper(dstore=dstore)
sellower = selvar.get_lower(dstore=dstore)
for index, constraint in enumerate(self.constraints):
if not constraint:
continue
if index in sellower:
state, changed = constraint.infer(dstore=dstore)
# If any variable changed as a result this, return it
if changed:
return state, changed
elif index in selupper:
# A constraint indexed by a value in the upper bound of selvar failed
if constraint.fails(dstore=dstore):
# Try to remove this index from the upper bound of selvar
if selvar.discard_upper(index, dstore=dstore,
constraint=(verbosity>1 or selvar in tracevar) and self):
return Constraint.sleeping, {selvar}
return Constraint.sleeping, set()
class ComplexUnionSelection(ComplexConstraint):
"""Each value of selection variable (potentially) selects a union selection constraint, with each of the
selection variables (selvars) as the selection variable for one of these."""
def __init__(self, selvar=None, mainvars=None, seqvars=None, selvars=None,
problem=None, weight=1, record=True):
ComplexConstraint.__init__(self, selvar, selvars, seqvars + mainvars,
problem=problem, weight=weight, record=record)
# Constraint.__init__(self, [selvar] + selvars + seqvars + mainvars,
# problem=problem, weight=weight, record=record)
# self.selvar = selvar
# self.selvars = selvars
self.seqvars = seqvars
self.mainvars = mainvars
self.name = '{} = U{} [{}] [[{}]]'.format(Selection.format_seq(mainvars),
Selection.format_seq(seqvars),
Selection.format_seq(selvars),
selvar)
# self.constraints = []
for sel, main in zip(selvars, mainvars):
if not sel.get_upper():
self.constraints.append(None)
else:
# Don't record this constraint in its variables
self.constraints.append(UnionSelection(main, sel, seqvars,
record=False, weight=1, maxset=None))
class ComplexSetConvexity(ComplexConstraint):
"""Each value of selection variable (potentially) selects a set convexity constraint over one of
the seqvars."""
def __init__(self, selvar=None, convexvars=None, problem=None, weight=1, record=True):
ComplexConstraint.__init__(self, selvar, selvars=convexvars, othervars=[],
problem=problem, weight=weight, record=record)
# Constraint.__init__(self, [selvar] + convexvars, problem=problem, weight=weight,
# record=record)
# self.selvar = selvar
self.convexvars = convexvars
self.name = '{} <> [{}]'.format(Selection.format_seq(self.convexvars), self.selvar)
# self.constraints = []
for cv in convexvars:
# Don't record this constraint in the variables
self.constraints.append(SetConvexity(cv, weight=weight, record=False))
class IntersectionSelection(Selection):
'''All variables are set vars. Select the intersection of the selected sets.'''
def __init__(self, mainvar=None, selvar=None, seqvars=None, problem=None, weight=1,
record=True):
Selection.__init__(self, mainvar=mainvar, selvar=selvar, seqvars=seqvars,
problem=problem, weight=weight, record=record)
self.name = '{0} = ^{1} [{2}]'.format(self.mainvar, self.format_seq(self.seqvars), self.selvar)
def sel_upper(self, dstore=None):
"""Upper bound on values that *can* be selected."""
seq_len = len(self.seqvars)
selvar_lower = self.selvar.get_lower(dstore=dstore)
if not selvar_lower:
# There's nothing we can say about what must be selected
return True
if any([i >= seq_len for i in selvar_lower]):
return False
selvar_lower = list(selvar_lower)
res = self.seqvars[selvar_lower[0]].get_upper(dstore=dstore)
for i in selvar_lower[1:]:
res = res & self.seqvars[i].get_upper(dstore=dstore)
return res
def fails(self, dstore=None):
"""Fail if the lower bound of sel var has indices beyond the length of seq vars
or if the lower bound of mainvar is a superset of the values that can be included."""
sel_upper = self.sel_upper(dstore=dstore)
if sel_upper is False:
# print('Failed because sel_upper', sel_upper, 'is False')
return True
if sel_upper is True:
return False
if sel_upper < self.mainvar.get_lower(dstore=dstore):
# Lower bound of mainvar includes values that can't be selected
# print('Failed because sel_upper', sel_upper, 'is less than main lower', self.mainvar.get_lower(dstore=dstore))
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=None):
seqvars = self.seqvars
selvar = self.selvar
mainvar = self.mainvar
changed = set()
state = Constraint.sleeping
sel_infer = Selection.infer(self, dstore=dstore, verbosity=verbosity, tracevar=tracevar)
if sel_infer:
return sel_infer
if selvar.determined(dstore=dstore, verbosity=verbosity, constraint=self) is not False:
# If the selection var is determined, check whether the indexed sequence vars
# are also
all_determined = True
selval = selvar.get_value(dstore=dstore)
to_intersect = []
for index in selval:
seq = seqvars[index]
if seq.determined(dstore=dstore, verbosity=verbosity, constraint=self) is not False:
# Intersect result with lower bound of seq
to_intersect.append(seq.get_lower(dstore=dstore))
else:
all_determined = False
if all_determined:
# Intersect the sets found in lower bounds
if to_intersect:
inters = to_intersect[0].intersection(*to_intersect[1:])
else:
inters = set()
# If so, determine the main var using the accumulated intersection
if mainvar.determine(inters, dstore=dstore, constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
state = Constraint.entailed
if verbosity > 1 and changed:
print(' Variables {} changed'.format(changed))
return state, changed
# Also check whether the main var is determined, in which case the seq vars
# can possibly be constrained
if mainvar.determined(dstore=dstore, verbosity=verbosity, constraint=self) is not False:
mainvalue = mainvar.get_value(dstore=dstore)
# Selected seq vars
selseqs = [seqvars[i] for i in selval]
# Lower bounds of selected seq vars
seqlower = list([seq.get_lower(dstore=dstore) for seq in selseqs])
# Upper bounds of selected seq vars
sequpper = [seq.get_upper(dstore=dstore) for seq in selseqs]
# Intersection of lower bounds
seqinters = seqlower[0].intersection(*seqlower[1:])
# Unaccounted for elements in main value; these have to appear in all seq vars
unaccounted = mainvalue - seqinters
for seq in selseqs:
if seq.strengthen_lower(unaccounted, dstore=dstore,
constraint=(verbosity>1 or seq in tracevar) and self):
changed.add(seq)
# The main variable must be a superset of the intersection of the lower bounds of all
# sequence variables indexed by the upper bound of the selection variable.
seq_lowers = list([seqvars[j].get_lower(dstore=dstore) for j in selvar.get_upper(dstore=dstore)])
if mainvar.strengthen_lower(seq_lowers[0].intersection(*seq_lowers[1:]), dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
sellower = selvar.get_lower(dstore=dstore)
if len(sellower) > 0:
# The main variable must be a subset of the intersection of the upper bounds of all
# sequence variables indexed by the lower bound of the selection variable.
seq_uppers = list([seqvars[j].get_upper(dstore=dstore) for j in sellower])
if mainvar.strengthen_upper(seq_uppers[0].intersection(*seq_uppers[1:]), dstore=dstore,
constraint=(verbosity>1 or mainvar in tracevar) and self):
changed.add(mainvar)
# If the upper bound of some seqvar is not a superset of mainvar's lower bound,
# then exclude its index from selection var (remove it from the selection var's
# upper bound)
for j in selvar.get_upper(dstore=dstore).copy():
# Consider only indices that are in the upper bound of selection variable
seqvar = seqvars[j]
if not seqvar.get_upper(dstore=dstore) >= mainvar.get_lower(dstore=dstore):
if selvar.discard_upper(j, dstore=dstore, constraint=(verbosity>1 or selvar in tracevar) and self):
changed.add(selvar)
# If excluding any index from selection var's LOWER bound in figuring the
# INTERSECTION of LOWER bounds of indexed sequence variables causes INTERSECTION to
# contain elements not in the UPPER bound of the main variable,
# then EXCLUDE those elements from the upper bound of the excluded seq var
# and add the excluded index to the selection variable's lower bound
for j in sellower:
# Consider only indices that in the lower bound of selection variable
# Exclude j
indices = sellower - {j}
# Get the intersection of the lower bounds of the indexed sequence variables
seq_lowers = list([seqvars[i].get_lower(dstore=dstore) for i in indices])
if not seq_lowers:
continue
seqvar_inters = seq_lowers[0].intersection(*seq_lowers[1:])
# Does this intersection have any values not in the upper bound of the main var
inters_main_diff = seqvar_inters - mainvar.get_upper(dstore=dstore)
if len(inters_main_diff) > 0:
# Yes; exclude the values in the intersection from the excluded seq var's upper bound
for val in inters_main_diff:
if seqvars[j].discard_upper(val, dstore=dstore,
constraint=(verbosity>1 or seqvars[j] in tracevar) and self):
changed.add(seqvars[j])
# and add the index to selection var's lower bound
if selvar.strengthen_lower({j}, dstore=dstore, constraint=(verbosity>1 or selvar in tracevar) and self):
changed.add(selvar)
if verbosity > 1 and changed:
print(' Variables {} changed'.format(changed))
return state, changed
class AgrSelection(Constraint):
"""Selection variable is a determined set of tuples of 3 or more elements:
(index1, inde2, (feat1, feat2), ...)
where (feat1, feat2) means feat1 for the element with index1 must agree with
the feat2 for the element with index2 (indices in index space 1).
Each of the sequence variables maps indices in the selection variable (index space 1)
onto another set of indices (index space 2 gnode->snode in Hiiktuu).
Each of the feature variables is a list of features objects or dicts associated
with an element in index space 2.
"""
def __init__(self, featvars=None, selvar=None, seqvars=None, problem=None, weight=1,
record=True):
Constraint.__init__(self, featvars + [selvar] + seqvars, problem=problem,
weight=weight, record=record)
self.featvars = featvars
self.selvar = selvar
self.seqvars = seqvars
self.name = '[{0}] = <{1}> {2}'.format(self.featvars, self.seqvars, AgrSelection.selvar_string(selvar.get_value()))
@staticmethod
def selvar_string(value):
constraints = []
for attribs in value:
s = "{{({},{}):".format(attribs[0], attribs[1])
agrs = []
for f1, f2 in attribs[2:]:
agrs.append("{}={}".format(f1, f2))
s += ','.join(agrs) + "}"
constraints.append(s)
return '[' + ';'.join(constraints) + ']'
def fail_featvars(self, fv0, fv1, agr, dstore=None):
"""Do feat vars fv0 and fv1 fail to satisfy agr constraints?"""
fv0upper = fv0.get_upper(dstore=dstore)
fv1upper = fv1.get_upper(dstore=dstore)
fv0lowcard = fv0.get_lower_card(dstore=dstore)
fv1lowcard = fv1.get_lower_card(dstore=dstore)
n_agr0, n_agr1 = Features.n_agree(fv0upper, fv1upper, agr)
return n_agr0 < fv0lowcard or n_agr1 < fv1lowcard
def fails(self, dstore=None):
"""Fail if any combinations in the lower bounds of any pairs of seqvars indexed
in selvar index pairs of featvars which don't have enough agreeing features in their
upper bounds."""
for attribs in self.selvar.get_lower(dstore=dstore):
i0, i1, agr = attribs[0], attribs[1], attribs[2:]
seq0, seq1 = self.seqvars[i0], self.seqvars[i1]
seq0lower = seq0.get_lower(dstore=dstore)
seq1lower = seq1.get_lower(dstore=dstore)
for s0 in seq0lower:
for s1 in seq1lower:
fv0, fv1 = self.featvars[s0], self.featvars[s1]
if self.fail_featvars(fv0, fv1, agr, dstore=dstore):
return True
return False
def is_entailed(self, dstore=None):
"""Entailed if all combinations in the upper bounds of seqvars indexed in
in selvar index pairs of featvars whose upper bounds agree on the relevant
features."""
for attribs in self.selvar.get_upper(dstore=dstore):
i0, i1, agr = attribs[0], attribs[1], attribs[2:]
seq0, seq1 = self.seqvars[i0], self.seqvars[i1]
seq0upper = seq0.get_upper(dstore=dstore)
seq1upper = seq1.get_upper(dstore=dstore)
for s0 in seq0upper:
for s1 in seq1upper:
feat0, feat1 = self.featvars[s0], self.featvars[s1]
for f0 in feat0.get_upper(dstore=dstore):
for f1 in feat1.get_upper(dstore=dstore):
if not f0.agrees(f1, agr):
return False
return True
def infer(self, dstore=None, verbosity=0, tracevar=[]):
"""Remove feats from featvars if they have to agree because of lower bound
of seqvars. Remove indices from seqvars if the indexed featvars can't agree."""
"""Fail if any combinations in the lower bounds of any pairs of seqvars indexed
in selvar index pairs of featvars whose lower bounds fail to agree on the relevant
features."""
changed = set()
state = Constraint.sleeping
for attribs in self.selvar.get_lower(dstore=dstore):
# Try to eliminate features from upper bounds of featvars
# THERE IS A MUCH MORE EFFICIENT WAY TO DO THIS
i0, i1, agr = attribs[0], attribs[1], attribs[2:]
seq0, seq1 = self.seqvars[i0], self.seqvars[i1]
seq0lower = seq0.get_lower(dstore=dstore)
seq1lower = seq1.get_lower(dstore=dstore)
for s0 in seq0lower:
for s1 in seq1lower:
feat0, feat1 = self.featvars[s0], self.featvars[s1]
feat0undec = feat0.get_undecided(dstore=dstore)
feat1lower = feat1.get_lower(dstore=dstore)
failfeat0 = Features.agree_with_none1(feat0undec, feat1lower, agr)
if failfeat0:
if feat0.discard_upper(failfeat0, dstore=dstore,
constraint=(verbosity>1 or feat0 in tracevar) and self):
changed.add(feat0)
return state, changed
feat1undec = feat1.get_undecided(dstore=dstore)
feat0lower = feat0.get_lower(dstore=dstore)
failfeat1 = Features.agree_with_none2(feat0lower, feat1undec, agr)
if failfeat1:
if feat1.discard_upper(failfeat1, dstore=dstore,
constraint=(verbosity>1 or feat1 in tracevar) and self):
changed.add(feat1)
return state, changed
# Try to eliminate indices from upper bounds of seqvars
seq0upper = seq0.get_undecided(dstore=dstore)
seq1lower = seq1.get_lower(dstore=dstore)
for s0 in seq0upper:
for s1 in seq1lower:
feat0, feat1 = self.featvars[s0], self.featvars[s1]
if self.fail_featvars(feat0, feat1, agr, dstore=dstore):
# for f0 in feat0.get_lower(dstore=dstore):
# for f1 in feat1.get_lower(dstore=dstore):
# if not f0.agrees(f1, agr):
if seq0.discard_upper(s0, dstore=dstore,
constraint=(verbosity>1 or feat0 in tracevar) and self):
changed.add(seq0)
return state, changed
seq0lower = seq0.get_lower(dstore=dstore)
seq1upper = seq1.get_undecided(dstore=dstore)
for s0 in seq0lower:
for s1 in seq1upper:
feat0, feat1 = self.featvars[s0], self.featvars[s1]
if self.fail_featvars(feat0, feat1, agr, dstore=dstore):
# for f0 in feat0.get_lower(dstore=dstore):
# for f1 in feat1.get_lower(dstore=dstore):
# if not f0.agrees(f1, agr):
if seq1.discard_upper(s1, dstore=dstore,
constraint=(verbosity>1 or feat0 in tracevar) and self):
changed.add(seq1)
return state, changed
return state, changed
class ComplexAgrSelection(ComplexConstraint):
"""Generalize AgrSelection to multiple values for the selection variables (selvars), which are selected by
a meta-selection variable (selvar). Works similarly to ComplexUnionSelection."""
def __init__(self, selvar=None, featvars=None, selvars=None, seqvars=None,
problem=None, weight=1, record=True):
ComplexConstraint.__init__(self, selvar, selvars, seqvars + featvars,
problem=problem, weight=weight, record=record)
# Constraint.__init__(self, [selvar] + featvars + selvars + seqvars,
# problem=problem, weight=weight, record=record)
# self.selvar = selvar
self.featvars = featvars
self.selvars = selvars
self.seqvars = seqvars
self.name = '[{}] = <{}> [{}] [[{}]]'.format(featvars, seqvars,
';;'.join([AgrSelection.selvar_string(v.get_value()) for v in selvars]),
selvar)
# self.constraints = []
for sel in selvars:
# Each of the selvars is a determined set variable containing a set of triple+ constraints.
if not sel.get_upper():
# No constraint necessary for this selvar position
self.constraints.append(None)
else:
# Create an AgrSelection constraint for this agr selection variable
self.constraints.append(AgrSelection(featvars, sel, seqvars, record=False, weight=1))
class PrecedenceSelection(Constraint):
"""
PrecedenceSelection is different enough from UnionSelection and IntersectionSelection
in that it doesn't inherit from the Selection class.
posvars: set vars consisting of int positions (determined for analysis, not for generation)
selvar: a set var consisting of pairs of indices within posvars, each
pair specifying a precedence constraint between the corresponding sets
"""
def __init__(self, selvar=None, posvars=None, problem=None,
weight=1, maxset=None, record=True):
Constraint.__init__(self, [selvar] + posvars, problem=problem,
weight=weight, record=record)
self.selvar = selvar
self.posvars = posvars
# Maximum set of tuples for the particular problem (normally depends on number of arc
# labels for LP dimension)
self.maxset = maxset or ALL
self.name = '<< {} [{}]'.format(Selection.format_seq(self.posvars), self.selvar)
def is_entailed(self, dstore=None):
"""Entailed if all variables are determined or
if all pairs of indices that *could* be included correspond to
constraints that are already satisfied.
"""
if self.selvar.determined(dstore=dstore, constraint=self) is not False \
and all([v.determined(dstore=dstore, constraint=self) is not False for v in self.posvars]):
return True
selupper = self.selvar.get_upper(dstore=dstore)
for first, second in selupper:
var1 = self.posvars[first]
var2 = self.posvars[second]
if not SetPrecedence.must_precede(var1, var2, dstore=dstore):
return False
return True
def fails(self, dstore=None):
"""Fail if the lower bound of selvar includes pairs representing variables
that necessarily violate the precedence constraint."""
sellower = self.selvar.get_lower(dstore=dstore)
for first, second in sellower:
# elements in first variable must precede those in second
# as in SetPrecedence
var1 = self.posvars[first]
var2 = self.posvars[second]
if SetPrecedence.cant_precede(var1, var2, dstore=dstore):
return True
return False
def infer(self, dstore=None, verbosity=0, tracevar=[]):
changed = set()
state = Constraint.sleeping
## Constrain the selection variable based on the position variables.
# For each pair of indices in the sel var's upper bound, check to see
# whether the corresponding precedence constraint can't succeed
discard_selup = set()
strengthen_sellow = set()
for first, second in self.selvar.get_upper(dstore=dstore):
var1 = self.posvars[first]
var2 = self.posvars[second]
if SetPrecedence.cant_precede(var1, var2, dstore=dstore):
discard_selup.add((first, second))
if discard_selup:
if self.selvar.discard_upper(discard_selup, dstore=dstore,
constraint=(verbosity>1 or self.selvar in tracevar) and self):
changed.add(self.selvar)
return state, changed
## Constrain the position variables based on the selection variable.
# For each pair of indices in the sel var's lower bound, check to see
# whether indices can be excluded from the upper bounds of the corresponding
# position variables.
for first, second in self.selvar.get_lower(dstore=dstore):
v1 = self.posvars[first]
v2 = self.posvars[second]
v1_low = v1.get_lower(dstore=dstore)
v2_low = v2.get_lower(dstore=dstore)
v1_up = v1.get_upper(dstore=dstore)
v2_up = v2.get_upper(dstore=dstore)
# If the lower bound on v1 is not empty, v2 must be a subset of
# {min(MAX, max(v1 + 1)), ..., MAX}
if v1_low and v1_up and v2_up:
v2_up_new = range(min([max(v1_up), max(v1_low) + 1]), max(v2_up)+1)
if v2.strengthen_upper(v2_up_new, dstore=dstore,
constraint=(verbosity>1 or v2 in tracevar) and self):
changed.add(v2)
# print('1 Strengthened', v2)
return state, changed
# If the lower bound on v2 is not empty, v1 must be a subset of
# {0, ..., max(0, min(v2_low) - 1)}
if v2_low:
v1_up_new = range(0, max([0, min(v2_low) - 1]) + 1)
if v1.strengthen_upper(v1_up_new, dstore=dstore,
constraint=(verbosity>1 or v1 in tracevar) and self):
changed.add(v1)
# print('2 Strengthened', v1)
return state, changed
v1_up = v1.get_upper(dstore=dstore)
v2_up = v2.get_upper(dstore=dstore)
# if verbosity>1 or v1 in tracevar:
# print('v1', v1, 'v2', v2,
# 'v1_up', v1_up, 'v2_up', v2_up,
# 'v1_lc', v1.get_lower_card(dstore=dstore),
# 'v2_lc', v2.get_lower_card(dstore=dstore))
if v1_up and v2_up and v1.get_lower_card(dstore=dstore) and v2.get_lower_card(dstore=dstore):
# Eliminate elements of v1 upper that are >= max of v2 upper
lowenough1 = {x for x in v1_up if x < max(v2_up)}
if v1.strengthen_upper(lowenough1, dstore=dstore,
constraint=(verbosity>1 or v1 in tracevar) and self):
changed.add(v1)
# print('4 Strengthened', v1)
return state, changed
# Eliminate elements of v2 upper that are <= min of v1 upper
# (v1_up might have changed so assign again and check whether it's empty)
v1_up = v1.get_upper(dstore=dstore)
if v1_up:
highenough2 = {x for x in v2_up if x > min(v1_up)}
if v2.strengthen_upper(highenough2, dstore=dstore,
constraint=(verbosity>1 or v2 in tracevar) and self):
# print('3 Strengthened', v2)
changed.add(v2)
return state, changed
return state, changed
### Constraints derived from other Constraints
class DerivedConstraint:
"""Abstract class for
constraints that are equivalent to a conjunction of primitive or derived constraints."""
def __init__(self, variables, problem=None, weight=1, record=True):
self.variables = variables
self.problem = problem
self.weight = weight
self.record = record
self.init_constraints()
def init_constraints(self):
raise NotImplementedError("{} is an abstract class".format(self.__class__.__name__))
def set_weight(self, weight):
self.weight = weight
for c in self.constraints:
c.weight = weight
class Inclusion(DerivedConstraint):
"""Set inclusion:
S1 c= S2: S1 c= S2 U S3 and S3 c= 0
"""
def init_constraints(self):
sv3 = EMPTY
self.constraints = [SubsetUnion(self.variables + [sv3], problem=self.problem,
weight=self.weight, record=self.record)]
class Disjoint(DerivedConstraint):
"""S1 || S2 || S3...: 0 >= S1 ^ S2 and 0 >= S1 ^ S3 and 0 >= S2 ^ S3...
The variables must not share any elements.
"""
def init_constraints(self):
sv3 = EMPTY
self.constraints = []
for i, sv1 in enumerate(self.variables[:-1]):
if sv1 == EMPTY:
continue
for sv2 in self.variables[i+1:]:
if sv2 == EMPTY:
continue
self.constraints.append(SupersetIntersection([sv3, sv1, sv2], problem=self.problem,
weight=self.weight, record=self.record))
# A dict of DetSVars for different values so these don't get recreated each time
# Union is instantiated
DetVarD = dict([(n, DetVar('sel' + str(n), set(range(n)))) for n in range(1, 20)])
class Union(DerivedConstraint):
"""S0 = S1 U S2 U ... :
S0 = U<S1,...,Sn>[0,...n-1]."""
# def __repr__():
# ...
def init_constraints(self):
nvar = len(self.variables) - 1
selvar = DetVarD.get(nvar) or DetVar('sel', set(range(nvar)))
self.constraints = [UnionSelection(self.variables[0], selvar, self.variables[1:],
problem=self.problem,
weight=self.weight,
record=self.record)]
class Intersection(DerivedConstraint):
"""S0 = S1 ^ S2 ^ ... :
S0 = ^<S1,...,Sn>[0,...n-1]."""
def init_constraints(self):
nvar = len(self.variables) - 1
selvar = DetVarD.get(nvar) or DetVar('sel', set(range(nvar)))
self.constraints = [IntersectionSelection(self.variables[0], selvar, self.variables[1:],
problem=self.problem,
weight=self.weight,
record=self.record)]
class Partition(DerivedConstraint):
"""S0 = S1 U+ ... U+ Sn:
S0 = S1 U ... U Sn and S1 || ... || Sn.
The first variable is the union of the remaining variables, which must not share any elements.
"""
def init_constraints(self):
self.constraints = Union(self.variables, problem=self.problem, weight=self.weight, record=self.record).constraints
self.constraints.extend(Disjoint(self.variables[1:], problem=self.problem, weight=self.weight, record=self.record).constraints)
| LowResourceLanguages/hltdi-l3 | hiiktuu/constraint.py | Python | gpl-3.0 | 80,949 |
from soilpy.core.soil import *
import math
class SoilProfile:
"""
Soil manager class.
"""
def __init__(self, s_p_c='n'):
self.soil_layer_list = []
self.soil_pressure_coefficient = s_p_c
# Add checks, such that the soil layer is under the previous one
def add_soil_layer(self, s_l):
# If s_l is a soil layer
if isinstance(s_l, SoilLayer):
# If the water level is above the soil layer and it is the first layer added, then add it to the list as a layer
if s_l.water_layer.level > s_l.top_level and len(self.soil_layer_list) is 0:
self.soil_layer_list.append(SoilLayer(Soil(10, 10, 0), s_l.water_layer, s_l.water_layer.level, s_l.top_level))
# If the water level is in the soil layer, then split it into two parts
if s_l.water_layer.level < s_l.top_level and s_l.water_layer.level > s_l.bottom_level:
# Top soil layer
self.soil_layer_list.append(SoilLayer(s_l.soil, s_l.water_layer, s_l.top_level, s_l.water_layer.level))
# Bottom soil layer
self.soil_layer_list.append(SoilLayer(s_l.soil, s_l.water_layer, s_l.water_layer.level, s_l.bottom_level))
else:
self.soil_layer_list.append(s_l)
# TODO change the name of the soil angle
def calculate_soil_pressure_coefficient(self, a):
"""
Calculate the soil pressure coefficient of the soil.
:param a:
:return:
"""
if self.soil_pressure_coefficient == "a":
return math.pow(math.tan(((45 - (a / 2))/180) * math.pi), 2)
elif self.soil_pressure_coefficient == "p":
return math.pow(math.tan(((45 - (a / 2))/180) * math.pi), 2)
elif self.soil_pressure_coefficient == "n":
return 1. - math.sin(((a) / 180) * math.pi)
else:
print("ERROR, soil pressure coefficient is not defined!") | RikHendriks/soilpy | soilpy/core/soil/soilprofile.py | Python | mit | 1,961 |
#!/usr/bin/python3
""" Updates all houses current sensors """
import urllib.request, urllib.error, urllib.parse
import json
import mysql.connector
from mysql.connector import errorcode
import time
connectionConfig = {
'user': 'xxxx',
'password': 'xxxx',
'host': 'xxxx',
'database': 'xxxx'
}
try:
sensor_connection = mysql.connector.connect(**connectionConfig)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your username or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
cursor_house = sensor_connection.cursor(buffered=True)
cursor_edit = sensor_connection.cursor(buffered=True)
cursor_query = sensor_connection.cursor(buffered=True)
## SQL Queries
##
house_getter_sql = (
"SELECT houseID, houseServer FROM houseInfo"
)
sensor_check_sql = (
"SELECT houseID, sensorALTID FROM sensorInfo "
"WHERE houseID=%(houseID)s AND sensorALTID=%(sensorALTID)s "
)
sensor_insert_sql = (
"INSERT INTO sensorInfo "
"(houseID, sensorID, sensorALTID, sensorName, sensorCategory, lastUpdate) "
"VALUES (%(houseID)s, %(sensorID)s, %(sensorALTID)s, "
"%(sensorName)s, %(sensorCategory)s, %(lastUpdate)s)"
)
sensor_update_sql = (
"UPDATE sensorInfo "
"SET "
"sensorID=%(sensorID)s, "
"sensorName=%(sensorName)s, "
"lastUpdate=%(lastUpdate)s "
"WHERE "
"houseID=%(houseID)s "
"AND "
"sensorALTID=%(sensorALTID)s"
)
## Scripts
def sensor_insert():
""" Inserts newly discovered sensors"""
cursor_house.execute(house_getter_sql)
for (houseID, houseServer) in cursor_house:
urlIns = ("http://{}/uuuu/pppp/{}/data_request?id=sdata"
.format(houseServer, houseID))
sensor_ins = urllib.request.urlopen(urlIns).read().decode("utf-8")
try:
sensorDataInsert = json.loads(sensor_ins)
except ValueError:
continue
for i in sensorDataInsert['devices']:
cursor_query.execute(sensor_check_sql, {'houseID': houseID,
'sensorALTID':i['altid']})
Query = cursor_query.fetchall()
if not Query:
cursor_edit.execute(sensor_insert_sql,
{'houseID':houseID,
'sensorID':i['id'],
'sensorALTID':i['altid'],
'sensorName':i['name'],
'sensorCategory':i['category'],
'lastUpdate':time.strftime('%Y-%m-%d %H:%M:%S')})
sensor_connection.commit()
def sensor_update():
""" Updates existing sensors for changes"""
cursor_house.execute(house_getter_sql)
for (houseID, houseServer) in cursor_house:
urlUp = ("http://{}/refitvera/Smartdata2013/{}/data_request?id=sdata"
.format(houseServer, houseID))
sensor_upd = urllib.request.urlopen(urlUp).read().decode("utf-8")
try:
sensorDataInsert = json.loads(sensor_upd)
except ValueError:
continue
for i in sensorDataInsert['devices']:
cursor_edit.execute(sensor_update_sql,
{'houseID':houseID,
'sensorID':i['id'],
'sensorALTID':i['altid'],
'sensorName':i['name'],
'sensorCategory':i['category'],
'lastUpdate':time.strftime('%Y-%m-%d %H:%M:%S')})
sensor_connection.commit()
if __name__ == '__main__':
sensor_insert()
sensor_update()
sensor_connection.close()
| David-Murray/REFIT | SensorUpdater.py | Python | mit | 3,971 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ast, os
import jsonrpclib
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer, SimpleJSONRPCRequestHandler
from util import json_decode, DaemonThread
from wallet import WalletStorage, Wallet
from commands import known_commands, Commands
from simple_config import SimpleConfig
def lockfile(config):
return os.path.join(config.path, 'daemon')
def get_daemon(config):
try:
with open(lockfile(config)) as f:
host, port = ast.literal_eval(f.read())
except:
return
server = jsonrpclib.Server('http://%s:%d' % (host, port))
# check if daemon is running
try:
server.ping()
return server
except:
pass
class RequestHandler(SimpleJSONRPCRequestHandler):
def do_OPTIONS(self):
self.send_response(200)
self.end_headers()
def end_headers(self):
self.send_header("Access-Control-Allow-Headers",
"Origin, X-Requested-With, Content-Type, Accept")
self.send_header("Access-Control-Allow-Origin", "*")
SimpleJSONRPCRequestHandler.end_headers(self)
class Daemon(DaemonThread):
def __init__(self, config, network, gui=None):
DaemonThread.__init__(self)
self.config = config
self.network = network
self.gui = gui
self.wallets = {}
if gui is None:
self.wallet = self.load_wallet(config)
else:
self.wallet = None
self.cmd_runner = Commands(self.config, self.wallet, self.network)
host = config.get('rpchost', 'localhost')
port = config.get('rpcport', 0)
self.server = SimpleJSONRPCServer((host, port), requestHandler=RequestHandler, logRequests=False)
with open(lockfile(config), 'w') as f:
f.write(repr(self.server.socket.getsockname()))
self.server.timeout = 0.1
for cmdname in known_commands:
self.server.register_function(getattr(self.cmd_runner, cmdname), cmdname)
self.server.register_function(self.run_cmdline, 'run_cmdline')
self.server.register_function(self.ping, 'ping')
self.server.register_function(self.run_daemon, 'daemon')
self.server.register_function(self.run_gui, 'gui')
def ping(self):
return True
def run_daemon(self, config):
sub = config.get('subcommand')
assert sub in ['start', 'stop', 'status']
if sub == 'start':
response = "Daemon already running"
elif sub == 'status':
p = self.network.get_parameters()
response = {
'path': self.network.config.path,
'server': p[0],
'blockchain_height': self.network.get_local_height(),
'server_height': self.network.get_server_height(),
'nodes': self.network.get_interfaces(),
'connected': self.network.is_connected(),
'auto_connect': p[4],
'wallets': dict([ (k, w.is_up_to_date()) for k, w in self.wallets.items()]),
}
elif sub == 'stop':
self.stop()
response = "Daemon stopped"
return response
def run_gui(self, config_options):
config = SimpleConfig(config_options)
if self.gui:
if hasattr(self.gui, 'new_window'):
path = config.get_wallet_path()
self.gui.new_window(path, config.get('url'))
response = "ok"
else:
response = "error: current GUI does not support multiple windows"
else:
response = "Error: Electrum is running in daemon mode. Please stop the daemon first."
return response
def load_wallet(self, config):
path = config.get_wallet_path()
if path in self.wallets:
wallet = self.wallets[path]
else:
storage = WalletStorage(path)
wallet = Wallet(storage)
wallet.start_threads(self.network)
self.wallets[path] = wallet
return wallet
def run_cmdline(self, config_options):
password = config_options.get('password')
config = SimpleConfig(config_options)
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
wallet = self.load_wallet(config) if cmd.requires_wallet else None
# arguments passed to function
args = map(lambda x: config.get(x), cmd.params)
# decode json arguments
args = map(json_decode, args)
# options
args += map(lambda x: config.get(x), cmd.options)
cmd_runner = Commands(config, wallet, self.network)
cmd_runner.password = password
func = getattr(cmd_runner, cmd.name)
result = func(*args)
return result
def run(self):
while self.is_running():
self.server.handle_request()
os.unlink(lockfile(self.config))
def stop(self):
for k, wallet in self.wallets.items():
wallet.stop_threads()
DaemonThread.stop(self)
| ulrichard/electrum | lib/daemon.py | Python | gpl-3.0 | 5,794 |
# Copyright 2011 Isaku Yamahata <yamahata@valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo.config import cfg
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
'ephemeral0': 'sda2',
'root': DEFAULT_ROOT_DEV_NAME,
'swap': 'sda3'}
bdm_legacy_fields = set(['device_name', 'delete_on_termination',
'virtual_name', 'snapshot_id',
'volume_id', 'volume_size', 'no_device',
'connection_info'])
bdm_new_fields = set(['source_type', 'destination_type',
'guest_format', 'device_type', 'disk_bus', 'boot_index',
'device_name', 'delete_on_termination', 'snapshot_id',
'volume_id', 'volume_size', 'image_id', 'no_device',
'connection_info'])
bdm_db_only_fields = set(['id', 'instance_uuid'])
bdm_db_inherited_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
bdm_new_non_api_fields = set(['volume_id', 'snapshot_id',
'image_id', 'connection_info'])
bdm_new_api_only_fields = set(['uuid'])
bdm_new_api_fields = ((bdm_new_fields - bdm_new_non_api_fields) |
bdm_new_api_only_fields)
class BlockDeviceDict(dict):
"""Represents a Block Device Mapping in Nova."""
_fields = bdm_new_fields
_db_only_fields = (bdm_db_only_fields |
bdm_db_inherited_fields)
_required_fields = set(['source_type'])
def __init__(self, bdm_dict=None, do_not_default=None, **kwargs):
super(BlockDeviceDict, self).__init__()
bdm_dict = bdm_dict or {}
bdm_dict.update(kwargs)
do_not_default = do_not_default or set()
self._validate(bdm_dict)
if bdm_dict.get('device_name'):
bdm_dict['device_name'] = prepend_dev(bdm_dict['device_name'])
# NOTE (ndipanov): Never default db fields
self.update(
dict((field, None)
for field in self._fields - do_not_default))
self.update(list(bdm_dict.iteritems()))
def _validate(self, bdm_dict):
"""Basic data format validations."""
dict_fields = set(key for key, _ in bdm_dict.iteritems())
# Check that there are no bogus fields
if not (dict_fields <=
(self._fields | self._db_only_fields)):
raise exception.InvalidBDMFormat(
details=_("Some fields are invalid."))
if bdm_dict.get('no_device'):
return
# Check that all required fields are there
if (self._required_fields and
not ((dict_fields & self._required_fields) ==
self._required_fields)):
raise exception.InvalidBDMFormat(
details=_("Some required fields are missing"))
if 'delete_on_termination' in bdm_dict:
bdm_dict['delete_on_termination'] = strutils.bool_from_string(
bdm_dict['delete_on_termination'])
if bdm_dict.get('device_name') is not None:
validate_device_name(bdm_dict['device_name'])
validate_and_default_volume_size(bdm_dict)
if bdm_dict.get('boot_index'):
try:
bdm_dict['boot_index'] = int(bdm_dict['boot_index'])
except ValueError:
raise exception.InvalidBDMFormat(
details=_("Boot index is invalid."))
@classmethod
def from_legacy(cls, legacy_bdm):
copy_over_fields = bdm_legacy_fields & bdm_new_fields
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
# NOTE (ndipanov): These fields cannot be computed
# from legacy bdm, so do not default them
# to avoid overwriting meaningful values in the db
non_computable_fields = set(['boot_index', 'disk_bus',
'guest_format', 'device_type'])
new_bdm = dict((fld, val) for fld, val in legacy_bdm.iteritems()
if fld in copy_over_fields)
virt_name = legacy_bdm.get('virtual_name')
if is_swap_or_ephemeral(virt_name):
new_bdm['source_type'] = 'blank'
new_bdm['delete_on_termination'] = True
new_bdm['destination_type'] = 'local'
if virt_name == 'swap':
new_bdm['guest_format'] = 'swap'
else:
new_bdm['guest_format'] = CONF.default_ephemeral_format
elif legacy_bdm.get('snapshot_id'):
new_bdm['source_type'] = 'snapshot'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('volume_id'):
new_bdm['source_type'] = 'volume'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('no_device'):
# NOTE (ndipanov): Just keep the BDM for now,
pass
else:
raise exception.InvalidBDMFormat(
details=_("Unrecognized legacy format."))
return cls(new_bdm, non_computable_fields)
@classmethod
def from_api(cls, api_dict):
"""Transform the API format of data to the internally used one.
Only validate if the source_type field makes sense.
"""
if not api_dict.get('no_device'):
source_type = api_dict.get('source_type')
device_uuid = api_dict.get('uuid')
if source_type not in ('volume', 'image', 'snapshot', 'blank'):
raise exception.InvalidBDMFormat(
details=_("Invalid source_type field."))
elif source_type == 'blank' and device_uuid:
raise exception.InvalidBDMFormat(
details=_("Invalid device UUID."))
elif source_type != 'blank':
if not device_uuid:
raise exception.InvalidBDMFormat(
details=_("Missing device UUID."))
api_dict[source_type + '_id'] = device_uuid
api_dict.pop('uuid', None)
return cls(api_dict)
def legacy(self):
copy_over_fields = bdm_legacy_fields - set(['virtual_name'])
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
legacy_block_device = dict((field, self.get(field))
for field in copy_over_fields if field in self)
source_type = self.get('source_type')
destination_type = self.get('destination_type')
no_device = self.get('no_device')
if source_type == 'blank':
if self['guest_format'] == 'swap':
legacy_block_device['virtual_name'] = 'swap'
else:
# NOTE (ndipanov): Always label as 0, it is up to
# the calling routine to re-enumerate them
legacy_block_device['virtual_name'] = 'ephemeral0'
elif source_type in ('volume', 'snapshot') or no_device:
legacy_block_device['virtual_name'] = None
elif source_type == 'image':
if destination_type != 'volume':
# NOTE(ndipanov): Image bdms with local destination
# have no meaning in the legacy format - raise
raise exception.InvalidBDMForLegacy()
legacy_block_device['virtual_name'] = None
return legacy_block_device
def get_image_mapping(self):
drop_fields = (set(['connection_info', 'device_name']) |
self._db_only_fields)
mapping_dict = dict(self)
for fld in drop_fields:
mapping_dict.pop(fld, None)
return mapping_dict
def is_safe_for_update(block_device_dict):
"""Determine if passed dict is a safe subset for update.
Safe subset in this case means a safe subset of both legacy
and new versions of data, that can be passed to an UPDATE query
without any transformation.
"""
fields = set(block_device_dict.keys())
return fields <= (bdm_new_fields |
bdm_db_inherited_fields |
bdm_db_only_fields)
def create_image_bdm(image_ref, boot_index=0):
"""Create a block device dict based on the image_ref.
This is useful in the API layer to keep the compatibility
with having an image_ref as a field in the instance requests
"""
return BlockDeviceDict(
{'source_type': 'image',
'image_id': image_ref,
'delete_on_termination': True,
'boot_index': boot_index,
'device_type': 'disk',
'destination_type': 'local'})
def snapshot_from_bdm(snapshot_id, template):
"""Create a basic volume snapshot BDM from a given template bdm."""
copy_from_template = ['disk_bus', 'device_type', 'boot_index']
snapshot_dict = {'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id}
for key in copy_from_template:
snapshot_dict[key] = template.get(key)
return BlockDeviceDict(snapshot_dict)
def legacy_mapping(block_device_mapping):
"""Transform a list of block devices of an instance back to the
legacy data format.
"""
legacy_block_device_mapping = []
for bdm in block_device_mapping:
try:
legacy_block_device = BlockDeviceDict(bdm).legacy()
except exception.InvalidBDMForLegacy:
continue
legacy_block_device_mapping.append(legacy_block_device)
# Re-enumerate the ephemeral devices
for i, dev in enumerate(dev for dev in legacy_block_device_mapping
if dev['virtual_name'] and
is_ephemeral(dev['virtual_name'])):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
return legacy_block_device_mapping
def from_legacy_mapping(legacy_block_device_mapping, image_uuid='',
root_device_name=None, no_root=False):
"""Transform a legacy list of block devices to the new data format."""
new_bdms = [BlockDeviceDict.from_legacy(legacy_bdm)
for legacy_bdm in legacy_block_device_mapping]
# NOTE (ndipanov): We will not decide which device is root here - we assume
# that it will be supplied later. This is useful for having the root device
# as part of the image defined mappings that are already in the v2 format.
if no_root:
for bdm in new_bdms:
bdm['boot_index'] = -1
return new_bdms
image_bdm = None
volume_backed = False
# Try to assign boot_device
if not root_device_name and not image_uuid:
# NOTE (ndipanov): If there is no root_device, pick the first non
# blank one.
non_blank = [bdm for bdm in new_bdms if bdm['source_type'] != 'blank']
if non_blank:
non_blank[0]['boot_index'] = 0
else:
for bdm in new_bdms:
if (bdm['source_type'] in ('volume', 'snapshot', 'image') and
root_device_name is not None and
(strip_dev(bdm.get('device_name')) ==
strip_dev(root_device_name))):
bdm['boot_index'] = 0
volume_backed = True
elif not bdm['no_device']:
bdm['boot_index'] = -1
else:
bdm['boot_index'] = None
if not volume_backed and image_uuid:
image_bdm = create_image_bdm(image_uuid, boot_index=0)
return ([image_bdm] if image_bdm else []) + new_bdms
def properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
root_device_name = None
# NOTE(yamahata): see image_service.s3.s3create()
for bdm in properties.get('mappings', []):
if bdm['virtual'] == 'root':
root_device_name = bdm['device']
# NOTE(yamahata): register_image's command line can override
# <machine>.manifest.xml
if 'root_device_name' in properties:
root_device_name = properties['root_device_name']
return root_device_name
def validate_device_name(value):
try:
# NOTE (ndipanov): Do not allow empty device names
# until assigning default values
# is supported by nova.compute
utils.check_string_length(value, 'Device name',
min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidBDMFormat(
details=_("Device name empty or too long."))
if ' ' in value:
raise exception.InvalidBDMFormat(
details=_("Device name contains spaces."))
def validate_and_default_volume_size(bdm):
if bdm.get('volume_size'):
try:
bdm['volume_size'] = utils.validate_integer(
bdm['volume_size'], 'volume_size', min_value=0)
except exception.InvalidInput:
raise exception.InvalidBDMFormat(
details=_("Invalid volume_size."))
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def is_ephemeral(device_name):
return _ephemeral.match(device_name) is not None
def ephemeral_num(ephemeral_name):
assert is_ephemeral(ephemeral_name)
return int(_ephemeral.sub('\\1', ephemeral_name))
def is_swap_or_ephemeral(device_name):
return (device_name and
(device_name == 'swap' or is_ephemeral(device_name)))
def new_format_is_swap(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') == 'swap'):
return True
return False
def new_format_is_ephemeral(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') != 'swap'):
return True
return False
def get_root_bdm(bdms):
try:
return (bdm for bdm in bdms if bdm.get('boot_index', -1) == 0).next()
except StopIteration:
return None
def get_bdms_to_connect(bdms, exclude_root_mapping=False):
"""Will return non-root mappings, when exclude_root_mapping is true.
Otherwise all mappings will be returned.
"""
return (bdm for bdm in bdms if bdm.get('boot_index', -1) != 0 or
not exclude_root_mapping)
def mappings_prepend_dev(mappings):
"""Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings
_dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
def prepend_dev(device_name):
"""Make sure there is a leading '/dev/'."""
return device_name and '/dev/' + strip_dev(device_name)
_pref = re.compile('^((x?v|s)d)')
def strip_prefix(device_name):
"""remove both leading /dev/ and xvd or sd or vd."""
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
_nums = re.compile('\d+')
def get_device_letter(device_name):
letter = strip_prefix(device_name)
# NOTE(vish): delete numbers in case we have something like
# /dev/sda1
return _nums.sub('', letter)
def instance_block_mapping(instance, bdms):
root_device_name = instance['root_device_name']
# NOTE(clayg): remove this when xenapi is setting default_root_device
if root_device_name is None:
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
root_device_name = '/dev/xvda'
else:
return _DEFAULT_MAPPINGS
mappings = {}
mappings['ami'] = strip_dev(root_device_name)
mappings['root'] = root_device_name
default_ephemeral_device = instance.get('default_ephemeral_device')
if default_ephemeral_device:
mappings['ephemeral0'] = default_ephemeral_device
default_swap_device = instance.get('default_swap_device')
if default_swap_device:
mappings['swap'] = default_swap_device
ebs_devices = []
blanks = []
# 'ephemeralN', 'swap' and ebs
for bdm in bdms:
# ebs volume case
if bdm.destination_type == 'volume':
ebs_devices.append(bdm.device_name)
continue
if bdm.source_type == 'blank':
blanks.append(bdm)
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
# Right now sort by device name for deterministic
# result.
if ebs_devices:
ebs_devices.sort()
for nebs, ebs in enumerate(ebs_devices):
mappings['ebs%d' % nebs] = ebs
swap = [bdm for bdm in blanks if bdm.guest_format == 'swap']
if swap:
mappings['swap'] = swap.pop().device_name
ephemerals = [bdm for bdm in blanks if bdm.guest_format != 'swap']
if ephemerals:
for num, eph in enumerate(ephemerals):
mappings['ephemeral%d' % num] = eph.device_name
return mappings
def match_device(device):
"""Matches device name and returns prefix, suffix."""
match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device)
if not match:
return None
return match.groups()
def volume_in_mapping(mount_device, block_device_info):
block_device_list = [strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(strip_dev(swap['device_name']))
block_device_list += [strip_dev(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug("block_device_list %s", block_device_list)
return strip_dev(mount_device) in block_device_list
def get_bdm_ephemeral_disk_size(block_device_mappings):
return sum(bdm.get('volume_size', 0)
for bdm in block_device_mappings
if new_format_is_ephemeral(bdm))
def get_bdm_swap_list(block_device_mappings):
return [bdm for bdm in block_device_mappings
if new_format_is_swap(bdm)]
def get_bdm_local_disk_num(block_device_mappings):
return len([bdm for bdm in block_device_mappings
if bdm.get('destination_type') == 'local'])
| ChinaMassClouds/copenstack-server | openstack/src/nova-2014.2/nova/block_device.py | Python | gpl-2.0 | 19,621 |
import asyncio
import aioredis
def main():
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
conn = yield from aioredis.create_connection(
('localhost', 6379), encoding='utf-8')
ok = yield from conn.execute('set', 'my-key', 'some value')
assert ok == 'OK', ok
str_value = yield from conn.execute('get', 'my-key')
raw_value = yield from conn.execute('get', 'my-key', encoding=None)
assert str_value == 'some value'
assert raw_value == b'some value'
print('str value:', str_value)
print('raw value:', raw_value)
# optionally close connection
conn.close()
loop.run_until_complete(go())
if __name__ == '__main__':
main()
| iho/aioredis | examples/connection.py | Python | mit | 757 |
#!/usr/bin/env python
# another thinly disguised shell script written in Python
import sys
import os
import glob
import subprocess
# TOPDIR root of RPM build tree typically /usr/src/redhat or /home/xxx/.rpm
#TOPDIR = '/usr/src/redhat'
TOPDIR = os.path.join(os.environ['HOME'], '.rpm')
# where the SVN exe source directory is
#SRCDIR = '/usr/local/src'
SRCDIR = os.path.abspath('../../..')
# get the version/revision
sys.path.insert(0, os.path.join(SRCDIR, 'exe'))
from exe.engine import version
# find the first release that doesn't exist
clrelease = 1
while 1:
files = glob.glob(os.path.join(TOPDIR, 'RPMS/i386',
'exe-%s-%d.*.i386.rpm' % (version.version, clrelease)))
if len(files) == 0:
break
clrelease += 1
print "Making version: %s release: %s" % (version.version, clrelease)
# create the source tarball
os.chdir(SRCDIR)
tarball = os.path.join(TOPDIR, 'SOURCES', 'exe-' + version.version + '-source.tgz')
try:
ret = subprocess.call('tar -czf %s --wildcards-match-slash --exclude=.git --exclude="*.svn*" --exclude "*.pyc" --exclude="*.tmp" --exclude="*~" --exclude="dist/*" --exclude="build/*" --exclude="pyxpcom/*" exe' %
tarball, shell = True)
if ret < 0:
print >>sys.stderr, "Unable to make tarball signal", -ret
sys.exit(ret)
except OSError, e:
print >>sys.stderr, "Execution of tar failed:", e
try:
ret = subprocess.call('rpmbuild -tb --define="clversion %s" --define="clrelease %s" %s' %
(version.version, clrelease, tarball), shell = True)
if ret < 0:
print >>sys.stderr, "Unable to run rpmbuild, signal", -ret
sys.exit(ret)
except OSError, e:
print >>sys.stderr, "Execution of rpmbuild failed:", e
| skython/eXe | installs/rpm/make.py | Python | gpl-2.0 | 1,747 |
# -*- coding: utf-8 -*-
__author__ = 'Paweł Sołtysiak'
import pandas as pd
import scipy.io.arff as arff
from sklearn import cross_validation
from sklearn.decomposition import PCA
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
waveformData, waveformMeta = arff.loadarff(u'../Datasets/waveform-5000.arff')
df = pd.DataFrame(waveformData)
desc = df.values[:, -1]
df = df.drop('class', axis=1)
pca = PCA()
Y = pca.fit_transform(df.values)
for d in np.unique(desc):
plt.plot(Y[d == desc, 0], Y[d == desc, 1], '.')
voteData, voteMeta = arff.loadarff(u'../Datasets/vote.arff')
df = pd.DataFrame(voteData)
desc = df.values[:, -1]
print df.replace('y', True)
df = df.drop('Class', axis=1)
pca = PCA()
Y = pca.fit_transform(df.values)
for d in np.unique(desc):
plt.plot(Y[d == desc, 0], Y[d == desc, 1], '.')
plt.show() | soltys/ZUT_Algorytmy_Eksploracji_Danych | DataVisualization/app.py | Python | mit | 849 |
# Copyright (c) 2019 kamyu. All rights reserved.
#
# Google Code Jam 2016 World Finals - Problem E. Radioactive Islands
# https://code.google.com/codejam/contest/7234486/dashboard#s=p4
#
# Time: O(X/H), X is the const range of x for integral
# , H is the dx parameter for integral
# Space: O(1)
#
# Calculus of Variations: the fastest one
#
from sys import float_info
from math import sqrt
def D(C, x, y):
dose = 0.0
for c in C:
d_square = x**2 + (y-c)**2
if d_square < float_info.epsilon:
return float("inf")
dose += 1.0/d_square
return dose
# Euler-Lagrange equation for finding minima of F(a, b) = sum(f(x, y, y') * dx)
def fp(C, x, y, yp): # y'' = f'(x, y, y')
t, s, syp, sx = 1.0+yp**2, 1.0, 0.0, 0.0
for c in C:
d_square = x**2 + (y-c)**2
s += 1.0/d_square
syp += (y-c)/d_square**2
sx += (x + (y-c)*yp)/d_square**2
# according to Euler-Lagrange equation, i.e. df/dy = d(df/dy')/dx
# let f = s * t^(1/2)
# 1. df/dy = -2 * syp * t^(1/2)
# 2. df/dy' = s * y' * t^(-1/2)
# d(df/dy')/dx = (-2 * sx) * y' * t^(-1/2) + s * (y'' * t^(-1/2) - y' * y' * y'' * t^(-3/2))
# = (-2 * sx * y') * t^(-1/2) + s * (t^(-1/2) - y'^2 * t^(-3/2)) * y''
# df/dy = d(df/dy')/dx
# => -2 * syp * t^(1/2) = (-2 * sx * y') * t^(-1/2) + s * (t^(-1/2) - y'^2 * t^(-3/2)) * y''
# => y'' = 2 * ((sx * y') * t^(-1/2) - syp * t^(1/2)) / (s * (t^(-1/2) - y'^2 * t^(-3/2)))
# = 2 * (sx * y' * t - syp * t^2) / s * (t - y'^2))
# = 2 * (sx * y' * t - syp * t^2) / s * ((1 + y'^2) - y'^2))
# = 2 * (sx * y' * t - syp * t^2) / s
# = 2 * t * (sx * y' - syp * t) / s
return 2.0 * t * (sx * yp - syp * t) / s
# Runge-Kutta methods:
# - https://en.wikipedia.org/wiki/List_of_Runge%E2%80%93Kutta_methods
# RK2 for 2nd-order ODE:
# - https://math.stackexchange.com/questions/1134540/second-order-runge-kutta-method-for-solving-second-order-ode
# RK4 for 2nd-order ODE:
# - https://math.stackexchange.com/questions/2615672/solve-fourth-order-ode-using-fourth-order-runge-kutta-method
# - http://homepages.cae.wisc.edu/~blanchar/eps/ivp/ivp.htm
# - https://stackoverflow.com/questions/52334558/runge-kutta-4th-order-method-to-solve-second-order-odes
# - https://publications.waset.org/1175/pdf
def F(C, x, y, yp):
dose = 0.0
for _ in xrange(int((X_END-X_START)/H)): # more accurate than [while x < X_END]
if not (MIN_Y_BOUND <= y <= MAX_Y_BOUND):
return float("inf"), y
# dose = sum(f(x, y, y') * dx = (1 + sum(1 / (x^2 + (y-ci)^2))) * sqrt(1 + y'^2) * dx)), where dx = H
dose += H * (1.0+D(C, x, y)) * sqrt(1.0 + yp**2)
# applying RK1 (forward Euler) for 2nd-order ODE is enough,
# besides, RK2 (explicit midpoint) is also fine,
# but RK4 is unstable (fine with H = 0.01 but other may not) for some cases due to large y'
k1 = H * yp
l1 = H * fp(C, x, y, yp)
'''
k2 = H * (yp + l1/2.0)
l2 = H * fp(C, x+H/2.0, y+k1/2.0, yp+l1/2.0)
k3 = H * (yp + l2/2.0)
l3 = H * fp(C, x+H/2.0, y+k2/2.0, yp+l2/2.0)
k4 = H * (yp + l3)
l4 = H * fp(C, x+H, y+k3, yp+l3)
'''
x += H
y += k1 # RK2: y += k2, RK4: y += (k1 + 2.0*k2 + 2.0*k3 + k4)/6.0
yp += l1 # RK2: yp += l2, RK4: yp += (l1 + 2.0*l2 + 2.0*l3 + l4)/6.0
return dose, y
def binary_search(A, B, C, left, right):
dose = float("inf")
while abs(right-left)/2.0 > float_info.epsilon:
mid = (left+right)/2.0
dose, y = F(C, X_START, A, mid)
if y >= B:
right = mid
else:
left = mid
return dose
def radioactive_islands():
N, A, B = map(float, raw_input().strip().split())
C = map(float, raw_input().strip().split())
slopes = [MIN_SLOPE, MAX_SLOPE]
slopes.extend((c-A)/(X_C-X_START) for c in C)
slopes.sort()
result = float("inf")
for i in xrange(len(slopes)-1):
result = min(result, binary_search(A, B, C, slopes[i], slopes[i+1]))
return result
H = 0.01 # tuned by experiment, works for RK1, RK2, RK4, besides, H = 0.5 only works for RK1
MIN_Y_BOUND, MAX_Y_BOUND = -13.0, 13.0 # verified by experiment
X_START, X_END = -10.0, 10.0
MIN_A, MAX_A = -10.0, 10.0
MIN_C, MAX_C = -10.0, 10.0
X_C = 0.0
MIN_SLOPE, MAX_SLOPE = (MIN_C-MAX_A)/(X_C-X_START), (MAX_C-MIN_A)/(X_C-X_START)
for case in xrange(input()):
print "Case #%d: %s" % (case+1, radioactive_islands())
| kamyu104/GoogleCodeJam-2016 | World Finals/radioactive-islands2.py | Python | mit | 4,568 |
import logging
import tempfile
from sklearn.linear_model import LinearRegression
from mrfitty.base import AdaptiveEnergyRangeBuilder, FixedEnergyRangeBuilder
from mrfitty.combination_fit import AllCombinationFitTask
logging_level = logging.INFO
logging.basicConfig(level=logging_level, filename="test_arsenic_fit.log")
log = logging.getLogger(name=__name__)
"""
These are smoke tests for the fitting code. Configuration code is not tested.
"""
def test_arsenic_1(caplog, arsenic_references, arsenic_unknowns):
"""
Test fits for known arsenic data and reference_spectra using AdaptiveEnergyRangeBuilder.
:param caplog: logging fixture
:param arsenic_references: list of arsenic reference spectra from mr-fitty/src/example/arsenic
:param arsenic_unknowns: list of arsenic unknown spectra from mr-fitty/src/example/arsenic
:return:
"""
caplog.set_level(logging_level)
task = AllCombinationFitTask(
ls=LinearRegression,
energy_range_builder=AdaptiveEnergyRangeBuilder(),
reference_spectrum_list=arsenic_references,
unknown_spectrum_list=[arsenic_unknowns[0]],
best_fits_plot_limit=1,
component_count_range=range(1, 3 + 1),
)
with tempfile.TemporaryDirectory() as plots_pdf_dp:
task.fit_all(plots_pdf_dp=plots_pdf_dp)
unknown_spectrum_fit = task.fit_table[arsenic_unknowns[0]]
assert (
unknown_spectrum_fit.best_fit.interpolant_incident_energy.shape
== unknown_spectrum_fit.best_fit.fit_spectrum_b.shape
)
assert (
unknown_spectrum_fit.best_fit.interpolant_incident_energy.shape
== unknown_spectrum_fit.best_fit.unknown_spectrum_b.shape
)
assert (
unknown_spectrum_fit.best_fit.interpolant_incident_energy.shape
== unknown_spectrum_fit.best_fit.residuals.shape
)
assert 3 == len(unknown_spectrum_fit.best_fit.reference_spectra_seq)
def test_arsenic_2(caplog, arsenic_references, arsenic_unknowns):
"""
Test fits for a single reference against all reference_spectra using FixedEnergyRangeBuilder.
:param caplog: logging fixture
:param arsenic_references: list of arsenic reference spectra from mr-fitty/src/example/arsenic
:param arsenic_unknowns: list of arsenic unknown spectra from mr-fitty/src/example/arsenic
:return:
"""
caplog.set_level(logging_level)
task = AllCombinationFitTask(
ls=LinearRegression,
energy_range_builder=FixedEnergyRangeBuilder(
energy_start=11850.0, energy_stop=12090.0
),
reference_spectrum_list=arsenic_references,
unknown_spectrum_list=[arsenic_unknowns[0]],
best_fits_plot_limit=1,
component_count_range=range(1, 3 + 1),
)
with tempfile.TemporaryDirectory() as plots_pdf_dp:
task.fit_all(plots_pdf_dp=plots_pdf_dp)
unknown_spectrum_fit = task.fit_table[arsenic_unknowns[0]]
best_fit_ref_count = len(unknown_spectrum_fit.best_fit.reference_spectra_seq)
assert 2 <= best_fit_ref_count <= 3
| jklynch/mr-fitty | mrfitty/tests/test_arsenic_fit.py | Python | mit | 3,115 |
"""Mongodb implementations of authorization queries."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package mongo package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from .. import utilities
from ...abstract_osid.authorization import queries as abc_authorization_queries
from ..primitives import Id
from dlkit.mongo.osid import queries as osid_queries
class AuthorizationQuery(abc_authorization_queries.AuthorizationQuery, osid_queries.OsidRelationshipQuery):
"""The query for authorizations."""
@utilities.arguments_not_none
def match_explicit_authorizations(self, match):
"""Matches explciit authorizations.
arg: match (boolean): ``true`` to match explicit
authorizations, ``false`` to match implciit
authorizations
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_explicit_authorizations_terms(self):
"""Clears the explicit authorization query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
explicit_authorizations_terms = property(fdel=clear_explicit_authorizations_terms)
@utilities.arguments_not_none
def match_related_authorization_id(self, id_, match):
"""Adds an ``Id`` to match explicit or implicitly related authorizations depending on
``matchExplicitAuthorizations()``.
Multiple ``Ids`` can be added to perform a boolean ``OR`` among
them.
arg: id (osid.id.Id): ``Id`` to match
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_related_authorization_id_terms(self):
"""Clears the related authorization ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
related_authorization_id_terms = property(fdel=clear_related_authorization_id_terms)
def supports_related_authorization_query(self):
"""Tests if an ``AuthorizationQuery`` is available.
return: (boolean) - ``true`` if an authorization query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_related_authorization_query(self, match):
"""Gets the authorization query.
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
return: (osid.authorization.AuthorizationQuery) - the
``AuthorizationQuery``
raise: Unimplemented -
``supports_related_authorization_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_related_authorization_query()`` is ``true``.*
"""
raise errors.Unimplemented()
def clear_related_authorization_terms(self):
"""Clears the related authorization query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
related_authorization_terms = property(fdel=clear_related_authorization_terms)
@utilities.arguments_not_none
def match_resource_id(self, resource_id, match):
"""Matches the resource identified by the given ``Id``.
arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``resource_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_resource_id_terms(self):
"""Clears the resource ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
resource_id_terms = property(fdel=clear_resource_id_terms)
def supports_resource_query(self):
"""Tests if a ``ResourceQuery`` is available.
return: (boolean) - ``true`` if a resource query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_resource_query(self, match):
"""Gets the resource query.
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
return: (osid.resource.ResourceQuery) - the ``ResourceQuery``
raise: Unimplemented - ``supports_resource_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_resource_query()`` is ``true``.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def match_any_resource(self, match):
"""Matches authorizations that have any resource.
arg: match (boolean): ``true`` to match authorizations with
any resource, ``false`` to match authorizations with no
resource
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_resource_terms(self):
"""Clears the resource query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
resource_terms = property(fdel=clear_resource_terms)
@utilities.arguments_not_none
def match_trust_id(self, trust_id, match):
"""Matches the trust identified by the given ``Id``.
arg: trust_id (osid.id.Id): the ``Id`` of the ``Trust``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``trust_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def match_any_trust_id(self, match):
"""Matches authorizations that have any trust defined.
arg: match (boolean): ``true`` to match authorizations with
any trust, ``false`` to match authorizations with no
trusts
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_trust_id_terms(self):
"""Clears the trust ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
trust_id_terms = property(fdel=clear_trust_id_terms)
@utilities.arguments_not_none
def match_agent_id(self, agent_id, match):
"""Matches the agent identified by the given ``Id``.
arg: agent_id (osid.id.Id): the Id of the ``Agent``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_agent_id_terms(self):
"""Clears the agent ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
agent_id_terms = property(fdel=clear_agent_id_terms)
def supports_agent_query(self):
"""Tests if an ``AgentQuery`` is available.
return: (boolean) - ``true`` if an agent query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_agent_query(self, match):
"""Gets the agent query.
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
return: (osid.authentication.AgentQuery) - the ``AgentQuery``
raise: Unimplemented - ``supports_agent_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_agent_query()`` is ``true``.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def match_any_agent(self, match):
"""Matches authorizations that have any agent.
arg: match (boolean): ``true`` to match authorizations with
any agent, ``false`` to match authorizations with no
agent
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_agent_terms(self):
"""Clears the agent query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
agent_terms = property(fdel=clear_agent_terms)
@utilities.arguments_not_none
def match_function_id(self, function_id, match):
"""Matches the function identified by the given ``Id``.
arg: function_id (osid.id.Id): the Id of the ``Function``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``function_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_function_id_terms(self):
"""Clears the function ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
function_id_terms = property(fdel=clear_function_id_terms)
def supports_function_query(self):
"""Tests if a ``FunctionQuery`` is available.
return: (boolean) - ``true`` if a function query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_function_query(self, match):
"""Gets the function query.
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
return: (osid.authorization.FunctionQuery) - the
``FunctinQuery``
raise: Unimplemented - ``supports_function_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_function_query()`` is ``true``.*
"""
raise errors.Unimplemented()
def clear_function_terms(self):
"""Clears the function query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
function_terms = property(fdel=clear_function_terms)
@utilities.arguments_not_none
def match_qualifier_id(self, qualifier_id, match):
"""Matches the qualifier identified by the given ``Id``.
arg: qualifier_id (osid.id.Id): the Id of the ``Qualifier``
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
raise: NullArgument - ``qualifier_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_qualifier_id_terms(self):
"""Clears the qualifier ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
qualifier_id_terms = property(fdel=clear_qualifier_id_terms)
def supports_qualifier_query(self):
"""Tests if a ``QualifierQuery`` is available.
return: (boolean) - ``true`` if a qualifier query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_qualifier_query(self, match):
"""Gets the qualiier query.
arg: match (boolean): ``true`` if a positive match, ``false``
for a negative match
return: (osid.authorization.QualifierQuery) - the
``QualifierQuery``
raise: Unimplemented - ``supports_qualifier_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_qualifier_query()`` is ``true``.*
"""
raise errors.Unimplemented()
def clear_qualifier_terms(self):
"""Clears the qualifier query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
qualifier_terms = property(fdel=clear_qualifier_terms)
@utilities.arguments_not_none
def match_vault_id(self, vault_id, match):
"""Sets the vault ``Id`` for this query.
arg: vault_id (osid.id.Id): a vault ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``vault_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_vault_id_terms(self):
"""Clears the vault ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
vault_id_terms = property(fdel=clear_vault_id_terms)
def supports_vault_query(self):
"""Tests if a ``VaultQuery`` is available.
return: (boolean) - ``true`` if a vault query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_vault_query(self):
"""Gets the query for a vault.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.authorization.VaultQuery) - the vault query
raise: Unimplemented - ``supports_vault_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_vault_query()`` is ``true``.*
"""
raise errors.Unimplemented()
vault_query = property(fget=get_vault_query)
def clear_vault_terms(self):
"""Clears the vault query terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
vault_terms = property(fdel=clear_vault_terms)
@utilities.arguments_not_none
def get_authorization_query_record(self, authorization_record_type):
"""Gets the authorization query record corresponding to the given ``Authorization`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
arg: authorization_record_type (osid.type.Type): an
authorization record type
return: (osid.authorization.records.AuthorizationQueryRecord) -
the authorization query record
raise: NullArgument - ``authorization_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(authorization_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class VaultQuery(abc_authorization_queries.VaultQuery, osid_queries.OsidCatalogQuery):
"""This is the query for searching vaults.
Each method specifies an ``AND`` term while multiple invocations of
the same method produce a nested ``OR``.
"""
def __init__(self, runtime):
self._runtime = runtime
record_type_data_sets = self._get_registry('_RECORD_TYPES')
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_queries.OsidCatalogQuery.__init__(self, runtime)
@utilities.arguments_not_none
def match_function_id(self, function_id, match):
"""Sets the function ``Id`` for this query.
arg: function_id (osid.id.Id): a function ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``function_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_function_id_terms(self):
"""Clears the function ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('functionId')
function_id_terms = property(fdel=clear_function_id_terms)
def supports_function_query(self):
"""Tests if a ``FunctionQuery`` is available.
return: (boolean) - ``true`` if a function query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_function_query(self):
"""Gets the query for a function.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.authorization.FunctionQuery) - the function query
raise: Unimplemented - ``supports_function_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_function_query()`` is ``true``.*
"""
raise errors.Unimplemented()
function_query = property(fget=get_function_query)
@utilities.arguments_not_none
def match_any_function(self, match):
"""Matches vaults that have any function.
arg: match (boolean): ``true`` to match vaults with any
function mapping, ``false`` to match vaults with no
function mapping
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_function_terms(self):
"""Clears the function query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('function')
function_terms = property(fdel=clear_function_terms)
@utilities.arguments_not_none
def match_qualifier_id(self, qualifier_id, match):
"""Sets the qualifier ``Id`` for this query.
arg: qualifier_id (osid.id.Id): a qualifier ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``qualifier_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_qualifier_id_terms(self):
"""Clears the qualifier ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('qualifierId')
qualifier_id_terms = property(fdel=clear_qualifier_id_terms)
def supports_qualifier_query(self):
"""Tests if a ``QualifierQuery`` is available.
return: (boolean) - ``true`` if a qualifier query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_qualifier_query(self):
"""Gets the query for a qualifier.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.authorization.QualifierQuery) - the qualifier
query
raise: Unimplemented - ``supports_qualifier_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_qualifier_query()`` is ``true``.*
"""
raise errors.Unimplemented()
qualifier_query = property(fget=get_qualifier_query)
@utilities.arguments_not_none
def match_any_qualifier(self, match):
"""Matches vaults that have any qualifier.
arg: match (boolean): ``true`` to match vaults with any
qualifier mapping, ``false`` to match vaults with no
qualifier mapping
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_qualifier_terms(self):
"""Clears the qualifier query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('qualifier')
qualifier_terms = property(fdel=clear_qualifier_terms)
@utilities.arguments_not_none
def match_authorization_id(self, authorization_id, match):
"""Sets the authorization ``Id`` for this query.
arg: authorization_id (osid.id.Id): an authorization ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``authorization_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_authorization_id_terms(self):
"""Clears the authorization ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('authorizationId')
authorization_id_terms = property(fdel=clear_authorization_id_terms)
def supports_authorization_query(self):
"""Tests if an ``AuthorizationQuery`` is available.
return: (boolean) - ``true`` if an authorization query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_authorization_query(self):
"""Gets the query for an authorization.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.authorization.AuthorizationQuery) - the
authorization query
raise: Unimplemented - ``supports_authorization_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_authorization_query()`` is ``true``.*
"""
raise errors.Unimplemented()
authorization_query = property(fget=get_authorization_query)
@utilities.arguments_not_none
def match_any_authorization(self, match):
"""Matches vaults that have any authorization.
arg: match (boolean): ``true`` to match vaults with any
authorization mapping, ``false`` to match vaults with no
authorization mapping
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_authorization_terms(self):
"""Clears the authorization query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('authorization')
authorization_terms = property(fdel=clear_authorization_terms)
@utilities.arguments_not_none
def match_ancestor_vault_id(self, vault_id, match):
"""Sets the vault ``Id`` for this query to match vaults that have the specified vault as an ancestor.
arg: vault_id (osid.id.Id): a vault ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``vault_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_ancestor_vault_id_terms(self):
"""Clears the ancestor vault ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('ancestorVaultId')
ancestor_vault_id_terms = property(fdel=clear_ancestor_vault_id_terms)
def supports_ancestor_vault_query(self):
"""Tests if a ``VaultQuery`` is available.
return: (boolean) - ``true`` if a vault query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_ancestor_vault_query(self):
"""Gets the query for a vault.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.authorization.VaultQuery) - the vault query
raise: Unimplemented - ``supports_ancestor_vault_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_ancestor_vault_query()`` is ``true``.*
"""
raise errors.Unimplemented()
ancestor_vault_query = property(fget=get_ancestor_vault_query)
@utilities.arguments_not_none
def match_any_ancestor_vault(self, match):
"""Matches vaults that have any ancestor.
arg: match (boolean): ``true`` to match vaults with any
ancestor, ``false`` to match root vaults
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_ancestor_vault_terms(self):
"""Clears the ancestor vault query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('ancestorVault')
ancestor_vault_terms = property(fdel=clear_ancestor_vault_terms)
@utilities.arguments_not_none
def match_descendant_vault_id(self, vault_id, match):
"""Sets the vault ``Id`` for this query to match vaults that have the specified vault as a descendant.
arg: vault_id (osid.id.Id): a vault ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``vault_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_descendant_vault_id_terms(self):
"""Clears the descendant vault ``Id`` query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('descendantVaultId')
descendant_vault_id_terms = property(fdel=clear_descendant_vault_id_terms)
def supports_descendant_vault_query(self):
"""Tests if a ``VaultQuery`` is available.
return: (boolean) - ``true`` if a vault query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_descendant_vault_query(self):
"""Gets the query for a vault.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.authorization.VaultQuery) - the vault query
raise: Unimplemented - ``supports_descendant_vault_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_descendant_vault_query()`` is ``true``.*
"""
raise errors.Unimplemented()
descendant_vault_query = property(fget=get_descendant_vault_query)
@utilities.arguments_not_none
def match_any_descendant_vault(self, match):
"""Matches vaults that have any descendant.
arg: match (boolean): ``true`` to match vaults with any
Ddscendant, ``false`` to match leaf vaults
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_descendant_vault_terms(self):
"""Clears the descendant vault query terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('descendantVault')
descendant_vault_terms = property(fdel=clear_descendant_vault_terms)
@utilities.arguments_not_none
def get_vault_query_record(self, vault_record_type):
"""Gets the vault query record corresponding to the given ``Vault`` record ``Type``.
Multiple record retrievals produce a nested ``OR`` term.
arg: vault_record_type (osid.type.Type): a vault record type
return: (osid.authorization.records.VaultQueryRecord) - the
vault query record
raise: NullArgument - ``vault_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(vault_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
| birdland/dlkit-doc | dlkit/mongo/authorization/queries.py | Python | mit | 29,579 |
"""Tests for resources."""
# pylint: disable=invalid-name
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.test import TestCase
from model_mommy import mommy
import json
from open_connect.media.tests import get_in_memory_image_file
from open_connect.connect_core.utils.basetests import ConnectTestMixin
from open_connect.resources.models import Resource
from open_connect.resources.tests import ResourceMixin
class TestResourceCreateView(ConnectTestMixin, TestCase):
"""Tests for ResourceCreateView."""
def test_form_valid(self):
"""Test form_valid."""
group = mommy.make('groups.Group')
group2 = mommy.make('groups.Group')
user = self.create_user()
permission = Permission.objects.get_by_natural_key(
'add_resource', 'resources', 'resource')
user.user_permissions.add(permission)
group.owners.add(user)
group2.owners.add(user)
self.login(user)
response = self.client.post(
reverse('create_resource'),
{
'attachment': get_in_memory_image_file(),
'name': 'fjkliogaer89u032fjkl',
'groups': [group.pk, group2.pk]
}
)
self.assertRedirects(response, reverse('resources'))
resource = Resource.objects.get(name='fjkliogaer89u032fjkl')
self.assertEqual(resource.content_type, 'image/png')
self.assertEqual(resource.created_by, user)
self.assertEqual(resource.groups.count(), 2)
def test_permission_required_to_create_resource(self):
"""User needs permission to create new resources."""
group = mommy.make('groups.Group')
user = self.create_user()
group.owners.add(user)
self.login(user)
response = self.client.post(
reverse('create_resource'),
{
'attachment': get_in_memory_image_file(),
'name': 'Test resource',
'groups': [group.pk]
}
)
self.assertEqual(response.status_code, 403)
def test_user_has_permission_but_is_not_group_owner(self):
"""Users can only create resources for groups they own."""
group = mommy.make('groups.Group')
user = self.create_user()
permission = Permission.objects.get_by_natural_key(
'add_resource', 'resources', 'resource')
user.user_permissions.add(permission)
self.login(user)
response = self.client.post(
reverse('create_resource'),
{
'attachment': get_in_memory_image_file(),
'name': 'Test resource',
'groups': [group.pk]
}
)
self.assertEqual(
response.context_data['form'].errors,
{'groups': [
u'Select a valid choice. '
u'%s is not one of the available choices.' % group.pk
]}
)
def test_user_has_super_create_permission(self):
"""Users with elevated permissions can create resources anywhere."""
group = mommy.make('groups.Group')
user = self.create_user()
add_permission = Permission.objects.get_by_natural_key(
'add_resource', 'resources', 'resource')
anywhere_permission = Permission.objects.get_by_natural_key(
'can_add_resource_anywhere', 'resources', 'resource')
user.user_permissions.add(add_permission, anywhere_permission)
self.login(user)
response = self.client.post(
reverse('create_resource'),
{
'attachment': get_in_memory_image_file(),
'name': 'fjkliogaer89u032fjkl',
'groups': [group.pk]
}
)
self.assertRedirects(response, reverse('resources'))
resource = Resource.objects.get(name='fjkliogaer89u032fjkl')
self.assertTrue(resource.groups.filter(pk=group.pk).exists())
class TestResourceUpdateView(ConnectTestMixin, TestCase):
"""Tests for ResourceUpdateView."""
def setUp(self):
"""Prepare common items."""
self.group = mommy.make('groups.Group')
self.user = self.create_user()
self.resource = Resource.objects.create(
attachment=get_in_memory_image_file(),
name='test resource',
created_by=self.user
)
self.resource.groups.add(self.group)
self.url = reverse(
'update_resource', kwargs={'uuid': self.resource.uuid})
def test_form_valid(self):
"""Test form_valid."""
permission = Permission.objects.get_by_natural_key(
'add_resource', 'resources', 'resource')
self.user.user_permissions.add(permission)
self.group.owners.add(self.user)
self.login(self.user)
old_attachment = self.resource.attachment
response = self.client.post(
self.url,
{
'attachment': get_in_memory_image_file(),
'name': 'agafdsafdsagewa',
'groups': [self.group.pk]
}
)
self.assertRedirects(response, reverse('resources'))
resource = Resource.objects.get(pk=self.resource.pk)
self.assertEqual(resource.name, 'agafdsafdsagewa')
self.assertNotEqual(resource.attachment, old_attachment)
def test_form_valid_not_updating_file(self):
"""Should be able to submit form without updating the actual file."""
permission = Permission.objects.get_by_natural_key(
'add_resource', 'resources', 'resource')
self.user.user_permissions.add(permission)
self.group.owners.add(self.user)
self.login(self.user)
response = self.client.post(
self.url,
{
'name': 'agafdsafdsagewa',
'groups': [self.group.pk]
}
)
self.assertRedirects(response, reverse('resources'))
resource = Resource.objects.get(pk=self.resource.pk)
self.assertEqual(resource.name, 'agafdsafdsagewa')
self.assertIsNotNone(resource.attachment)
def test_only_owner_can_modify_resource(self):
"""Only the creator of a resource should be able to modify it."""
user = self.create_user()
permission = Permission.objects.get_by_natural_key(
'add_resource', 'resources', 'resource')
user.user_permissions.add(permission)
self.group.owners.add(user)
self.login(user)
response = self.client.post(
self.url,
{
'attachment': get_in_memory_image_file(),
'name': 'agafdsafdsagewa',
'groups': [self.group.pk]
}
)
self.assertEqual(response.status_code, 403)
def test_user_has_super_create_permission_can_modify_any_resource(self):
"""A user with elevated permissions can edit any resource."""
user = self.create_user()
add_permission = Permission.objects.get_by_natural_key(
'add_resource', 'resources', 'resource')
anywhere_permission = Permission.objects.get_by_natural_key(
'can_add_resource_anywhere', 'resources', 'resource')
user.user_permissions.add(add_permission, anywhere_permission)
self.login(user)
old_attachment = self.resource.attachment
response = self.client.post(
self.url,
{
'attachment': get_in_memory_image_file(),
'name': 'agafdsafdsagewa',
'groups': [self.group.pk]
}
)
self.assertRedirects(response, reverse('resources'))
resource = Resource.objects.get(pk=self.resource.pk)
self.assertEqual(resource.name, 'agafdsafdsagewa')
self.assertNotEqual(resource.attachment, old_attachment)
class TestResourceListView(ConnectTestMixin, TestCase):
"""Tests for ResourceListView."""
def test_limited_to_files_for_user(self):
"""Queryset should only include files for groups a user belongs to."""
user = self.create_user()
group1 = mommy.make('groups.Group')
user.add_to_group(group1.pk)
group2 = mommy.make('groups.Group')
resource1 = mommy.make('resources.Resource', groups=[group1])
resource2 = mommy.make('resources.Resource', groups=[group2])
self.login(user)
response = self.client.get(reverse('resources'))
self.assertIn(resource1, response.context_data['resources'])
self.assertNotIn(resource2, response.context_data['resources'])
def test_with_query(self):
"""Test seaching by name query."""
user = self.create_user()
group1 = mommy.make('groups.Group')
user.add_to_group(group1.pk)
group2 = mommy.make('groups.Group')
user.add_to_group(group2.pk)
resource1 = mommy.make(
'resources.Resource', groups=[group1], name='cool thing')
resource2 = mommy.make(
'resources.Resource', groups=[group2], name='no way')
self.login(user)
response = self.client.get(
reverse('resources'),
{'query': 'cool'}
)
self.assertIn(resource1, response.context_data['resources'])
self.assertNotIn(resource2, response.context_data['resources'])
def test_with_query_tag(self):
"""Test searching by tag."""
user = self.create_user()
group1 = mommy.make('groups.Group')
user.add_to_group(group1.pk)
group2 = mommy.make('groups.Group')
user.add_to_group(group2.pk)
resource1 = mommy.make(
'resources.Resource', groups=[group1])
resource1.tags.add('borg')
resource2 = mommy.make(
'resources.Resource', groups=[group2])
self.login(user)
response = self.client.get(
reverse('resources'),
{'query': 'borg'}
)
self.assertIn(resource1, response.context_data['resources'])
self.assertNotIn(resource2, response.context_data['resources'])
def test_with_group(self):
"""Test searching by group."""
user = self.create_user()
group1 = mommy.make('groups.Group')
user.add_to_group(group1.pk)
group2 = mommy.make('groups.Group')
user.add_to_group(group2.pk)
resource1 = mommy.make(
'resources.Resource', groups=[group1])
resource2 = mommy.make(
'resources.Resource', groups=[group2])
self.login(user)
response = self.client.get(
reverse('resources'),
{'group_id': group2.pk}
)
self.assertNotIn(resource1, response.context_data['resources'])
self.assertIn(resource2, response.context_data['resources'])
def test_with_file_type(self):
"""Test searching by file type."""
user = self.create_user()
group1 = mommy.make('groups.Group')
user.add_to_group(group1.pk)
group2 = mommy.make('groups.Group')
user.add_to_group(group2.pk)
resource1 = mommy.make(
'resources.Resource',
groups=[group1],
content_type='application/pdf'
)
resource2 = mommy.make(
'resources.Resource', groups=[group2], content_type='video/avi')
self.login(user)
response = self.client.get(
reverse('resources'),
{'file_type': 'video'}
)
self.assertNotIn(resource1, response.context_data['resources'])
self.assertIn(resource2, response.context_data['resources'])
class TestResourceDownloadView(ResourceMixin, ConnectTestMixin, TestCase):
"""Tests for ResourceDownloadView."""
def test_get_redirect_url(self):
"""Test get_redirect_url."""
resource = self.create_resource()
self.login(resource.created_by)
response = self.client.get(
reverse('resource', kwargs={'slug': resource.slug}))
# Not using assertRedirects because the file will not be found.
# Just need to verify that it would redirect to the right place.
self.assertEqual(
response['location'],
'http://testserver{url}'.format(url=resource.attachment.url)
)
def test_user_not_in_group_group_is_private(self):
"""Nonmember shouldn't be able to download resource in private group."""
private_group = mommy.make('groups.Group', private=True)
resource = self.create_resource(groups=[private_group])
user = self.create_user()
self.login(user)
response = self.client.get(
reverse('resource', kwargs={'slug': resource.slug}))
self.assertEqual(response.status_code, 403)
class TestResourceDeleteView(ConnectTestMixin, TestCase):
"""Tests for ResourceDeleteView"""
def setUp(self):
"""Prepare common items."""
self.group = mommy.make('groups.Group')
self.user = self.create_user()
self.resource = Resource.objects.create(
attachment=get_in_memory_image_file(),
name='test resource',
created_by=self.user
)
self.resource.groups.add(self.group)
self.url = reverse(
'delete_resource', kwargs={'uuid': self.resource.uuid})
def test_only_owner_can_delete_resource(self):
"""Only the creator of a resource should be able to delete it."""
user = self.create_user()
permission = Permission.objects.get_by_natural_key(
'add_resource', 'resources', 'resource')
user.user_permissions.add(permission)
self.group.owners.add(user)
self.login(user)
response = self.client.post(self.url)
self.assertEqual(response.status_code, 403)
def test_user_has_super_create_permission_can_delete_any_resource(self):
"""A user with elevated permissions can delete any resource."""
user = self.create_user()
add_permission = Permission.objects.get_by_natural_key(
'add_resource', 'resources', 'resource')
anywhere_permission = Permission.objects.get_by_natural_key(
'can_add_resource_anywhere', 'resources', 'resource')
user.user_permissions.add(add_permission, anywhere_permission)
self.login(user)
response = self.client.post(self.url)
self.assertEqual(
json.loads(response.content),
{'success': True, 'message': 'The resource has been deleted.'}
)
resource = Resource.objects.with_deleted().get(pk=self.resource.pk)
self.assertEqual(resource.status, 'deleted')
def test_response_is_json(self):
"""After deleting a Resource, return a JSON response."""
self.login(self.user)
response = self.client.post(self.url)
self.assertEqual(
json.loads(response.content),
{'success': True, 'message': 'The resource has been deleted.'}
)
| lpatmo/actionify_the_news | open_connect/resources/tests/test_views.py | Python | mit | 15,074 |
import json
from cffi import FFI
ffi = FFI()
ffi.cdef("""
struct jv_refcnt;
typedef struct {
unsigned char kind_flags;
unsigned char pad_;
unsigned short offset; /* array offsets */
int size;
...;
} jv;
typedef struct jq_state jq_state;
typedef void (*jq_err_cb)(void *, jv);
jq_state *jq_init(void);
int jq_compile(jq_state *, const char* str);
void jq_start(jq_state *, jv value, int flags);
jv jq_next(jq_state *);
jv jv_null(void);
jv jv_string(const char*);
const char* jv_string_value(jv);
void jv_free(jv);
jv jv_dump_string(jv, int flags);
typedef enum {
JV_PARSE_EXPLODE_TOPLEVEL_ARRAY = 1
} jv_parser_flags;
struct jv_parser;
struct jv_parser* jv_parser_new(jv_parser_flags);
void jv_parser_set_buf(struct jv_parser*, const char*, int, int);
jv jv_parser_next(struct jv_parser*);
void jv_parser_free(struct jv_parser*);
int jv_is_valid(jv x);
""")
jqlib = ffi.verify("#include <jq.h>\n#include <jv.h>",
libraries=['jq'])
class JQ(object):
def __init__(self, program):
program_bytes = program.encode("utf8")
self.jq = jqlib.jq_init()
if not self.jq:
raise Exception("jq_init failed")
compiled = jqlib.jq_compile(self.jq, program_bytes)
if not compiled:
raise ValueError("program was not valid")
def transform(self, input, raw_input=False, raw_output=False, multiple_output=False):
string_input = input if raw_input else json.dumps(input)
bytes_input = string_input.encode("utf8")
result_bytes = self._string_to_strings(bytes_input)
result_strings = map(lambda s: ffi.string(s).decode("utf8"), result_bytes)
if raw_output:
return "\n".join(result_strings)
elif multiple_output:
return [json.loads(s) for s in result_strings]
else:
return json.loads(next(iter(result_strings)))
def _string_to_strings(self, input):
parser = jqlib.jv_parser_new(0)
jqlib.jv_parser_set_buf(parser, input, len(input), 0)
value = ffi.new("jv*")
results = []
while True:
value = jqlib.jv_parser_next(parser)
if jqlib.jv_is_valid(value):
self._process(value, results)
else:
break
jqlib.jv_parser_free(parser)
return results
def _process(self, value, output):
jq_flags = 0
jqlib.jq_start(self.jq, value, jq_flags)
result = ffi.new("jv*")
dumpopts = 0
dumped = ffi.new("jv*")
while True:
result = jqlib.jq_next(self.jq)
if not jqlib.jv_is_valid(result):
jqlib.jv_free(result)
return
else:
dumped = jqlib.jv_dump_string(result, dumpopts)
output.append(jqlib.jv_string_value(dumped))
jqlib.jv_free(dumped)
jq = JQ | kkszysiu/jq.py | jq.py | Python | mit | 3,032 |
from builtins import filter
import re
import six
import datetime
import pytz
import pydantic
import requests
import typing_extensions
from jinja2 import Template
from bugwarrior import config
from bugwarrior.services import IssueService, Issue
import logging
log = logging.getLogger(__name__)
class PagureConfig(config.ServiceConfig, prefix='pagure'):
# strictly required
service: typing_extensions.Literal['pagure']
base_url: config.StrippedTrailingSlashUrl
# conditionally required
tag: str = ''
repo: str = ''
# optional
include_repos: config.ConfigList = config.ConfigList([])
exclude_repos: config.ConfigList = config.ConfigList([])
import_tags: bool = False
tag_template: str = '{{label}}'
@pydantic.root_validator
def require_tag_or_repo(cls, values):
if not values['tag'] and not values['repo']:
raise ValueError(
'section requires one of:\npagure.tag\npagure.repo')
return values
class PagureIssue(Issue):
TITLE = 'paguretitle'
DATE_CREATED = 'paguredatecreated'
URL = 'pagureurl'
REPO = 'pagurerepo'
TYPE = 'paguretype'
ID = 'pagureid'
UDAS = {
TITLE: {
'type': 'string',
'label': 'Pagure Title',
},
DATE_CREATED: {
'type': 'date',
'label': 'Pagure Created',
},
REPO: {
'type': 'string',
'label': 'Pagure Repo Slug',
},
URL: {
'type': 'string',
'label': 'Pagure URL',
},
TYPE: {
'type': 'string',
'label': 'Pagure Type',
},
ID: {
'type': 'numeric',
'label': 'Pagure Issue/PR #',
},
}
UNIQUE_KEY = (URL, TYPE,)
def _normalize_label_to_tag(self, label):
return re.sub(r'[^a-zA-Z0-9]', '_', label)
def to_taskwarrior(self):
if self.extra['type'] == 'pull_request':
priority = 'H'
else:
priority = self.origin['default_priority']
return {
'project': self.extra['project'],
'priority': priority,
'annotations': self.extra.get('annotations', []),
'tags': self.get_tags(),
self.URL: self.record['html_url'],
self.REPO: self.record['repo'],
self.TYPE: self.extra['type'],
self.TITLE: self.record['title'],
self.ID: self.record['id'],
self.DATE_CREATED: datetime.datetime.fromtimestamp(
int(self.record['date_created']), pytz.UTC),
}
def get_tags(self):
tags = []
if not self.origin['import_tags']:
return tags
context = self.record.copy()
tag_template = Template(self.origin['tag_template'])
for tagname in self.record.get('tags', []):
context.update({'label': self._normalize_label_to_tag(tagname) })
tags.append(tag_template.render(context))
return tags
def get_default_description(self):
return self.build_default_description(
title=self.record['title'],
url=self.get_processed_url(self.record['html_url']),
number=self.record['id'],
cls=self.extra['type'],
)
class PagureService(IssueService):
ISSUE_CLASS = PagureIssue
CONFIG_SCHEMA = PagureConfig
def __init__(self, *args, **kw):
super(PagureService, self).__init__(*args, **kw)
self.session = requests.Session()
def get_service_metadata(self):
return {
'import_tags': self.config.import_tags,
'tag_template': self.config.tag_template,
}
def get_issues(self, repo, keys):
""" Grab all the issues """
key1, key2 = keys
key3 = key1[:-1] # Just the singular form of key1
url = self.config.base_url + "/api/0/" + repo + "/" + key1
response = self.session.get(url, params=dict(status='Open'))
if not bool(response):
error = response.json()
code = error['error_code']
if code == 'ETRACKERDISABLED':
return []
else:
raise IOError('Failed to talk to %r %r' % (url, error))
issues = []
for result in response.json()[key2]:
idx = six.text_type(result['id'])
result['html_url'] = "/".join([
self.config.base_url, repo, key3, idx])
issues.append((repo, result))
return issues
def annotations(self, issue, issue_obj):
url = issue['html_url']
return self.build_annotations(
((
c['user']['name'],
c['comment'],
) for c in issue['comments']),
issue_obj.get_processed_url(url)
)
def get_owner(self, issue):
if issue[1]['assignee']:
return issue[1]['assignee']['name']
def filter_repos(self, repo):
if repo in self.config.exclude_repos:
return False
if self.config.include_repos:
if repo in self.config.include_repos:
return True
else:
return False
return True
def issues(self):
if self.config.tag:
url = (self.config.base_url +
"/api/0/projects?tags=" + self.config.tag)
response = self.session.get(url)
if not bool(response):
raise IOError('Failed to talk to %r %r' % (url, response))
all_repos = [r['name'] for r in response.json()['projects']]
else:
all_repos = [self.config.repo]
repos = filter(self.filter_repos, all_repos)
issues = []
for repo in repos:
issues.extend(self.get_issues(repo, ('issues', 'issues')))
issues.extend(self.get_issues(repo, ('pull-requests', 'requests')))
log.debug(" Found %i issues.", len(issues))
issues = list(filter(self.include, issues))
log.debug(" Pruned down to %i issues.", len(issues))
for repo, issue in issues:
# Stuff this value into the upstream dict for:
# https://pagure.com/ralphbean/bugwarrior/issues/159
issue['repo'] = repo
issue_obj = self.get_issue_for_record(issue)
extra = {
'project': repo,
'type': 'pull_request' if 'branch' in issue else 'issue',
'annotations': self.annotations(issue, issue_obj)
}
issue_obj.update_extra(extra)
yield issue_obj
| pombredanne/bugwarrior | bugwarrior/services/pagure.py | Python | gpl-3.0 | 6,661 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
surrounded_regions.py
~~~~~~~~~~~~~~
A brief description goes here.
"""
class Solution:
# @param board, a 2D array
# Capture all regions by modifying the input board in-place.
# Do not return any value.
def solve(self, board):
def get_boundary_zero(board):
"""
Returns:
[i, j] -> list of "i+j"
"""
zeros = set()
for i, c in enumerate(board[0]):
if c == 'O':
zeros.add('0+%d' % i)
for i, c in enumerate(board[-1]):
if c == 'O':
zeros.add('%d+%d' % (len(board) - 1, i))
for i in range(0, len(board)):
l = len(board[i]) - 1
if board[i][0] == 'O':
zeros.add('%d+0' % i)
if board[i][l] == 'O':
zeros.add('%d+%d' % (i, l))
return zeros
def get_all_zeros(board):
zeros = set()
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j] == 'O':
zeros.add("%d+%d" % (i, j))
return zeros
if not board:
return []
all_zeros = get_all_zeros(board)
boundary_zeros = get_boundary_zero(board)
queue = list(boundary_zeros)
while queue:
x, y = queue.pop().split('+')
x = int(x)
y = int(y)
if x > 0 and board[x - 1][y] == 'O' and not "%d+%d" % (x-1, y) in boundary_zeros:
queue.append("%d+%d" % (x-1, y))
if x < len(board) - 1 and board[x + 1][y] == 'O' and not "%d+%d" % (x+1, y) in boundary_zeros:
queue.append("%d+%d" % (x+1, y))
if y > 0 and board[x][y-1] == 'O' and not "%d+%d" % (x, y-1) in boundary_zeros:
queue.append("%d+%d" % (x, y-1))
if y < len(board[0]) - 1 and board[x][y+1] == 'O' and not "%d+%d" % (x, y+1) in boundary_zeros:
queue.append("%d+%d" % (x, y+1))
boundary_zeros.add("%d+%d" % (x, y))
for i in (all_zeros - boundary_zeros):
x, y = i.split('+')
x = int(x)
y = int(y)
tmp = [i for i in board[x]]
tmp[y] = 'X'
board[x] = ''.join(tmp)
return board
def main(argv):
s = Solution()
boards = [
["XXX",
"XOX",
"XXX"],
[
"XXXOX",
"XOOOX",
"XXOXX",
"XOXOX",
"XOXXX",
],
[],
]
def mprint(l):
for i in l:
print i
import time
for b in boards:
print len(b)
if b:
print len(b[0])
t1 = time.time()
s.solve(b)
t2 = time.time()
print t2 - t1
if __name__ == '__main__':
import sys
main(sys.argv)
| luozhaoyu/leetcode | surrounded_regions.py | Python | mit | 2,985 |
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'TestHarness.views.home', name='home'),
url(r'^assuranceimage$', 'TestHarness.views.assuranceimage', name='assuranceimage'),
url(r'^cardimage$', 'TestHarness.views.cardimage', name='cardimage'),
url(r'^sha1$', 'TestHarness.views.sha1', name='sha1')
)
| miiCard/api-wrappers-python-test | src/urls.py | Python | bsd-3-clause | 478 |
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data
from sklearn.linear_model.base import sparse_center_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| kashif/scikit-learn | sklearn/linear_model/tests/test_base.py | Python | bsd-3-clause | 12,955 |
#! /usr/bin/env python
# -*- coding: UTF8 -*-
from collections import *
from Bio import SeqIO
import os
import time
from Bio.Blast import NCBIXML
from sys import stdout
import sqlite3
import csv
import sys
import datetime
import hashlib
import multiprocessing
import traceback
qq = str(sys.argv[1])
indiv = qq.split("/")[-1]
rundir = "run_" + indiv
step = 1000
cpu = 8 # dont touch this ! For developmental purpose only
hsp_read_percent = 70
wordsize = 9
################################
##DEV PARAMETERS DONT TOUCH !!!!
lenhash = 50 # desactivated dont touch !!
hashseq = True # desactivated dont touch !!
keep_multihits = True
crosspe = "Y" # Y or N
savesequences = "Y"
if crosspe == "Y":
blastlunch = str("""../../bin/ncbi-blast/bin/blastn -task blastn -num_threads """ + str(cpu) + """ -query ./""" + rundir + """/tmp.fasta -db ./db/db -out ./""" + rundir + """/out.txt -qcov_hsp_perc """ + str(hsp_read_percent) + """ -evalue 0.05 -window_size 0 -outfmt "6 qseqid stitle pident sstrand evalue score qseq nident" -dust yes -soft_masking yes -lcase_masking -reward 1 -penalty -1 -gapopen 1 -gapextend 2 -word_size """ + str(wordsize))
else:
blastlunch = str("""../../bin/ncbi-blast/bin/blastn -task blastn -num_threads """ + str(cpu) + """ -query ./""" + rundir + """/tmp.fasta -db ./db/db -out ./""" + rundir + """/out.txt -qcov_hsp_perc """ + str(hsp_read_percent) + """ -evalue 0.05 -window_size 0 -outfmt "6 qseqid stitle pident sstrand evalue score qseq nident" -dust yes -soft_masking yes -lcase_masking -word_size """ + str(wordsize))
class Logger(object):
def __init__(self, filename="Default.log"):
self.terminal = sys.stdout
self.log = open(filename, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
os.system("mkdir -p ./logs")
logfile = str("./logs/Voskhod_" + indiv + "_" + str(datetime.datetime.now().strftime("%d-%m-%y")) + "_log.txt")
sys.stdout = Logger(logfile)
print """
.
/|\ .
/ | \ ./|\,
,-' \|/ `-. <-=O=->
<'--==<O>==--`> '\|/`
`-. /|\ ,-' '
\ | /
\|/
'
_ __ __ __ __
| | / /____ _____ / /__ / /_ ____ ____/ /
| | / // __ \ / ___// //_// __ \ / __ \ / __ /
| |/ // /_/ /(__ )/ ,< / / / // /_/ // /_/ /
|___/ \____//____//_/|_|/_/ /_/ \____/ \__,_/
¤ Automatic Blast / Reads tagger
Version 20170721
Voskhod Pipeline version V1.2
Part of the Voskhod project
https://github.com/egeeamu/voskhod
(GPL-3.0)
Arnaud Ungaro contact@arnaud-ungaro.fr
"""
#crosspe = "Y"
# indiv = str(raw_input("Enter project name: "))
# step = int(raw_input("Enter stepping (100-10000): "))
# cpu = int(raw_input("Enter core to use (1-n): "))
# wordsize = int(raw_input("Enter word_size (4-30): "))
# crosspe = str(raw_input("Cross-Species ? (Y/N): ")).upper()
serv = os.popen("""hostname""").read().split("\n")
serv.pop(-1)
dat = os.popen("""date""").read().split("\n")
dat.pop(-1)
distrib = os.popen("""cat /etc/issue""").read().split("\n")
distrib.pop(-1)
kernel = os.popen("""uname -a""").read().split("\n")
kernel.pop(-1)
user = os.popen("""whoami""").read().split("\n")
user.pop(-1)
pwd = os.popen("""pwd""").read().split("\n")
pwd.pop(-1)
cpu2 = os.popen('''cat /proc/cpuinfo | grep "model name" | cut -d ":" -f2''').read().split("\n")
cpu2.pop(-1)
blastversion = os.popen("""../../bin/ncbi-blast/bin/blastn -version""").read().split("\n")
blastversion.pop(-1)
blastversion = blastversion[0].split(" ")[1]
#blastpath = os.popen("""whereis blastn""").read().split("\n")
#blastpath.pop(-1)
print "\nIndiv: " + indiv
print "Blast: " + str(blastversion)
#print "Blast: " + str(blastpath).replace("[", "").replace("]", "").replace("'", "").split(" ")[-1]
print "File: " + str(qq)
print "Dir: " + str(pwd[0])
print "Server: " + str(serv[0])
print "Distro: " + str(distrib[0])
print "Kernel: " + str(kernel[0])
print "CPU: " + str(cpu2[0])
print "Date: " + str(dat[0])
print "User: " + str(user[0])
print "\nParameters:"
print "Indiv: " + str(indiv)
print "Stepping: " + str(step)
print "Core(s): " + str(cpu)
print "Word-Size: " + str(wordsize)
print "Cross-Spe: " + str(crosspe)
print "Hsp/Read percent:" + str(hsp_read_percent)
print "Hash's length: " + str(lenhash)
print "Blast commands :" + str(blastlunch)
countreads_start = 0
print "\nChecking input file.."
for record in SeqIO.parse(qq, "fastq"):
if countreads_start % 100000 == 0 and countreads_start != 0:
print "."
countreads_start += 1
print "\n"
if step > int(countreads_start / 5.):
step = int(countreads_start / 5.)
os.system('rm -rf ./' + rundir)
os.system('rm -rf ./results/table_data_' + indiv + '.db')
os.system('mkdir ./' + rundir)
conn2 = sqlite3.connect('./results/table_data_' + indiv + '.db')
c2 = conn2.cursor()
c2.execute('''CREATE TABLE result (sample varchar, read_name varchar, read_sequence varchar, gene_id varchar, species varchar, read_size int, read_hsp_identity int, identity float, evalue float, gene_name varchar, strand int, hsp varchar, hsp_size int, quality varchar, transcript_id varchar, score float, multi_hits varchar)''')
c2.execute('''PRAGMA synchronous = OFF''')
c2.execute('''PRAGMA journal_mode = OFF''')
c2.execute('''PRAGMA cache_size = 4096''')
conn2.commit()
conn = sqlite3.connect("./data_input/cdna_infos.db")
c = conn.cursor()
c.execute('''PRAGMA synchronous = OFF''')
c.execute('''PRAGMA journal_mode = OFF''')
c.execute('''PRAGMA cache_size = 4096''')
conn.commit()
dico_cdnainfo = OrderedDict()
c.execute("""SELECT DISTINCT transcript_id,gene_id,gene_name,species,transcript_sequence FROM RESULT""")
conn.commit()
match = c.fetchone()
print "Reading cdna_info.db.. "
while match is not None:
Ensdart = str(match[0])
Ensdarg = str(match[1])
Gene_name = str(match[2])
Specie = str(match[3])
Seq = str(match[4])
dico_cdnainfo[Ensdart] = {"Ensdart": Ensdart, "Ensdarg": Ensdarg, "Gene_name": Gene_name, "Specie": Specie}
match = c.fetchone()
print "Analysis of " + str(countreads_start) + " reads starting.."
dico_reads = OrderedDict()
pctage2 = -1.
hspadd = 0.
readsizeadd = 0.
counthsp = 0.
countloop = 0.
identityadd = 0
identitycount = 0
countreads = 0
countreads_total = 0
count_miss = 0
timestart = round((time.time()), 0)
timestart2 = round((time.time()), 0)
seqsec = 0.
counterror = 0
for record in SeqIO.parse(qq, "fastq"):
countreads += 1
countreads_total += 1
liste_qual = record.letter_annotations["phred_quality"]
qual_fastaq = ""
for i in liste_qual:
qual_fastaq += str(i) + " "
qual_fastaq = qual_fastaq[:-1]
sq = str(record.seq)
readsize = len(sq)
if hashseq == True:
seqqhash = sq[0:lenhash]
hashsqq = hashlib.md5(seqqhash).hexdigest()
else:
hashsqq = ""
Reads_name = str(record.description).replace(" ", "_").replace("\t", "_")
#Reads_name = str(countreads_total)
dico_reads[Reads_name] = {"indiv": indiv, "read_name": Reads_name, "read": sq, "ensdarg": "", "specie": "", "read_size": readsize, "read_hsp_identity": "", "identity": 0, "evalue": "", "gene_name": "", "sens": "", "hsp": "", "hsp_size": 0, "qual": qual_fastaq, "ensdart": "", "score": 0.0, "md5_hash": hashsqq, "multihits": []}
if countreads == step:
os.system("rm -rf ./" + rundir + "/")
os.system("mkdir ./" + rundir + "/")
fasta = str("./" + rundir + "/tmp.fasta")
fastaw = open(fasta, "a+b")
for i in dico_reads:
readname = dico_reads[i]["read_name"]
read = dico_reads[i]["read"]
npres = read.upper().count("N")
ratioofn = float(npres) / len(read)
if ratioofn <= 0.25:
fastaw.write(">" + str(readname) + "\n" + read + "\n")
else:
print "Too many N in read (" + str(npres) + ") : " + str(readname) + " " + str(read)
fastaw.close()
try:
os.system(blastlunch)
except Exception:
print "ERROR WITH BLAST NEAR READ " + str(countreads_total)
print blastlunch
counterror += 1
outputtsv = list(csv.reader(open('./' + rundir + '/out.txt', 'rb'), delimiter='\t'))
try:
for i in outputtsv:
try:
query = str(i[0])
precscore = dico_reads[query]["score"]
score = int(i[5])
if score >= precscore and keep_multihits == True:
Ensdartmulti = str(i[1])
Ensdargmulti = dico_cdnainfo[Ensdartmulti]["Ensdarg"]
Speciemulti = dico_cdnainfo[Ensdartmulti]["Specie"]
mutihitsstr = Speciemulti + "+" + Ensdartmulti + "+" + Ensdargmulti
dico_reads[query]["multihits"].append(mutihitsstr)
#print Ensdartmulti,Ensdargmulti,Speciemulti
if score > precscore:
hsp_blast = str(i[6]).replace("-", "")
hsp_size = len(hsp_blast)
Evalue = float(i[4])
sqsens = str(i[3]).replace("minus", "-1").replace("plus", "1")
Ensdart = str(i[1])
identity = float(i[2]) / 100.
Ensdarg = dico_cdnainfo[Ensdart]["Ensdarg"]
Specie = dico_cdnainfo[Ensdart]["Specie"]
Gene_name = dico_cdnainfo[Ensdart]["Gene_name"]
read_size = dico_reads[query]["read_size"]
dico_reads[query]["ensdarg"] = Ensdarg
dico_reads[query]["specie"] = Specie
dico_reads[query]["read_hsp_identity"] = int(i[7])
dico_reads[query]["identity"] = identity
dico_reads[query]["evalue"] = Evalue
dico_reads[query]["gene_name"] = Gene_name
dico_reads[query]["sens"] = sqsens
dico_reads[query]["hsp"] = hsp_blast
dico_reads[query]["hsp_size"] = hsp_size
dico_reads[query]["ensdart"] = Ensdart
dico_reads[query]["score"] = score
#dico_reads[query]["md5_hash"] = ""
identitycount += 1
identityadd += identity
except:
print "ERROR PARSING NEAR READ " + str(countreads_total)
counterror += 1
except Exception:
print "ERROR PARSING NEAR READ " + str(countreads_total)
counterror += 1
for i in dico_reads:
try:
multihits = "|".join(dico_reads[i]["multihits"])
read_name = dico_reads[i]["read_name"]
ensdarg = dico_reads[i]["ensdarg"]
specie = dico_reads[i]["specie"]
read_size = dico_reads[i]["read_size"]
read_hsp_identity = dico_reads[i]["read_hsp_identity"]
identity = dico_reads[i]["identity"]
evalue = dico_reads[i]["evalue"]
gene_name = dico_reads[i]["gene_name"]
sens = dico_reads[i]["sens"]
hsp_size = dico_reads[i]["hsp_size"]
ensdart = dico_reads[i]["ensdart"]
score = dico_reads[i]["score"]
md5_hash = dico_reads[i]["md5_hash"]
#md5_hash = ""
if savesequences == "Y":
read = dico_reads[i]["read"]
hsp = dico_reads[i]["hsp"]
qual = dico_reads[i]["qual"]
else:
read = ""
hsp = ""
qual = ""
if float(hsp_size) > 0:
hspadd += hsp_size
counthsp += 1
countloop += 1
readsizeadd += read_size
if ensdarg == "":
identity = ""
hsp_size = ""
score = ""
read_size = ""
if ensdarg == "":
count_miss +=1
c2.execute('''INSERT INTO result VALUES ("''' + str(indiv) + '''","''' + str(read_name) + '''","''' + str(read) + '''","''' + str(ensdarg) + '''","''' + str(specie) + '''","''' + str(read_size) + '''","''' + str(read_hsp_identity) + '''","''' + str(identity) + '''","''' + str(evalue) + '''","''' + str(gene_name) + '''","''' + str(sens) + '''","''' + str(hsp) + '''","''' + str(hsp_size) + '''","''' + str(qual) + '''","''' + str(ensdart) + '''","''' + str(score) + '''","''' + str(multihits) + '''")''')
except:
counterror += 1
print "ERROR DICO " + str(i)
dico_reads.clear()
conn2.commit()
identity = round(identityadd/identitycount, 3)
lenghtnz = len(str(countreads_total))
nsr = countreads_start - countreads_total
tmpsr = round(nsr / (float(countreads_total) / (time.time() - timestart)), 0)
timeremaining = "Remaining time : " + str(datetime.timedelta(seconds=tmpsr))
seqsec = str(round(float(countreads_total) / (time.time() - timestart), 1))
seqsecinstant = str(round(float(countreads) / (time.time() - timestart2), 1))
timestart2 = round((time.time()), 0)
pctage = round((float(((countreads_total / float(countreads_start)) * 100))), 1)
prt = "[" + str(int(countreads_total)).center(lenghtnz) + "/" + str(int(countreads_start)).center(lenghtnz) + " - " + '%.1f' % pctage + " %]" + " Identity: " + '%.1f' % (identity * 100) + " % Identified: " + str( 100 - round(count_miss / float(countreads_total) * 100., 1)) + " % " + timeremaining + " Seq/s MEAN: " + seqsec + " Seq/s : " + seqsecinstant + " mean_READ : " + str(round(readsizeadd / countloop, 3)) + " mean_HSP : " + str(round(hspadd / counthsp, 3))
countreads = 0
if (countreads_total + step) > countreads_start:
countreads = (countreads_total + step) - countreads_start
os.system("rm -rf ./" + rundir + "/")
if pctage2 != pctage:
print prt
pctage2 = pctage
#stdout.write("\r%s" % str(prt))
#stdout.flush()
#stdout.write("\n")
print "\n"
conn2.commit()
dat = os.popen("""date""").read().split("\n")
dat.pop(-1)
print "\n[DONE] - " + str(dat)
print "Speed: " + str(seqsec)
print "Mean HSP: " + str(round(hspadd / counthsp, 3))
print "Mean Read: " + str(round(readsizeadd / countloop, 3))
print "Total time :" + str(datetime.timedelta(seconds=(round(time.time() - timestart, 0))))
print "Total erros :" + str(counterror)
| egeeamu/voskhod | bin/voskhod_validate_assembly.py | Python | gpl-3.0 | 14,974 |
# -*- coding: utf-8 -*-
import re
from openerp import netsvc
from openerp.osv import osv, fields
class value_mapping_field(osv.osv):
""""""
_name = 'etl.value_mapping_field'
_description = 'value_mapping_field'
_columns = {
'name': fields.char(string='Field Name', required=True),
'type': fields.selection([(u'id', u'Id'), (u'char', u'Char (not implemented yet)'), (u'selection', u'Selection')], string='Type', required=True),
'source_model_id': fields.many2one('etl.external_model', string='Source Model'),
'target_model_id': fields.many2one('etl.external_model', string='Target Model'),
'log': fields.text(string='log'),
'value_mapping_field_detail_ids': fields.one2many('etl.value_mapping_field_detail', 'value_mapping_field_id', string='Details'),
'value_mapping_field_value_ids': fields.one2many('etl.value_mapping_field_value', 'value_mapping_field_id', string='Mapping Values'),
'manager_id': fields.many2one('etl.manager', string='manager_id', ondelete='cascade', required=True),
}
_defaults = {
'manager_id': lambda self, cr, uid, context=None: context and context.get('manager_id', False),
}
_constraints = [
]
value_mapping_field()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| shingonoide/odoo-etl | addons/etl/value_mapping_field.py | Python | agpl-3.0 | 1,338 |
from nipype.testing import assert_equal
from nipype.interfaces.fsl.model import FILMGLS, FILMGLSInputSpec
def test_filmgls():
input_map = dict(args = dict(argstr='%s',),
autocorr_estimate_only = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-ac',),
autocorr_noestimate = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-noest',),
brightness_threshold = dict(argstr='-epith %d',),
design_file = dict(argstr='%s',),
environ = dict(usedefault=True,),
fit_armodel = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-ar',),
full_data = dict(argstr='-v',),
ignore_exception = dict(usedefault=True,),
in_file = dict(mandatory=True,argstr='%s',),
mask_size = dict(argstr='-ms %d',),
multitaper_product = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-mt %d',),
output_pwdata = dict(argstr='-output_pwdata',),
output_type = dict(),
results_dir = dict(usedefault=True,argstr='-rn %s',),
smooth_autocorr = dict(argstr='-sa',),
threshold = dict(argstr='%f',),
tukey_window = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='-tukey %d',),
use_pava = dict(argstr='-pava',),
)
input_map2 = dict(args = dict(argstr='%s',),
autocorr_estimate_only = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--ac',),
autocorr_noestimate = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--noest',),
brightness_threshold = dict(argstr='--epith=%d',),
design_file = dict(argstr='--pd=%s',),
environ = dict(usedefault=True,),
fit_armodel = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--ar',),
full_data = dict(argstr='-v',),
ignore_exception = dict(usedefault=True,),
in_file = dict(mandatory=True,argstr='--in=%s',),
mask_size = dict(argstr='--ms=%d',),
multitaper_product = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--mt=%d',),
output_pwdata = dict(argstr='--outputPWdata',),
output_type = dict(),
results_dir = dict(argstr='--rn=%s',usedefault=True,),
smooth_autocorr = dict(argstr='--sa',),
terminal_output = dict(),
threshold = dict(usedefault=True,argstr='--thr=%f',),
tukey_window = dict(xor=['autocorr_estimate_only', 'fit_armodel', 'tukey_window', 'multitaper_product', 'use_pava', 'autocorr_noestimate'],argstr='--tukey=%d',),
use_pava = dict(argstr='--pava',),
)
instance = FILMGLS()
if isinstance(instance.inputs, FILMGLSInputSpec):
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
else:
for key, metadata in input_map2.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
| JohnGriffiths/nipype | nipype/interfaces/fsl/tests/test_FILMGLS.py | Python | bsd-3-clause | 4,142 |
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Remoting tests.
@since: 0.1.0
"""
| ethankennerly/hotel-vs-gozilla | pyamf/tests/remoting/__init__.py | Python | mit | 116 |
import os.path
import struct
class Register:
def __init__ (self, name, alias, address):
self.name, self.address = name, address
self.alias = name if alias=="_" else alias
self.changed = False
@staticmethod
def from_str(s):
name, alias, address = s.split()
return Register(name, alias, address)
def __str__ (self):
alias = "_" if self.alias == self.name else self.alias
return "{} {} {}\n".format(self.name, alias, self.address)
@property
def gdbvalue(self):
inferior = gdb.selected_inferior()
memory = inferior.read_memory(int(self.address, 0), 4)
m = struct.unpack("<L", memory)[0]
return gdb.parse_and_eval(str(m))
def format_value (self, FORMAT):
value = self.gdbvalue
int_value = to_unsigned(value, value.type.sizeof)
try:
if value.type.code in [gdb.TYPE_CODE_INT, gdb.TYPE_CODE_PTR]:
if FORMAT == "BIN":
value_format = '{{:0{}b}}'.format(8 * value.type.sizeof)
fvalue = value_format.format(int_value)
fvalue = '_'.join([ fvalue[i:i+8] for i in range(0, len(fvalue), 8) ])
elif FORMAT == "DECIMAL":
value_format = '{}'
fvalue = value_format.format(int_value)
else:
value_format = '0x{{:0{}x}}'.format(2 * value.type.sizeof)
fvalue = value_format.format(int_value)
return fvalue
except (gdb.error, ValueError):
pass
return str(value)
def set_value (self, value):
oldvalue = self.gdbvalue
if oldvalue.type.code == gdb.TYPE_CODE_INT:
width = oldvalue.type.sizeof * 8
if 0 <= value < (2 ** width):
run("set *{0} = {1}".format(self.address, value))
@staticmethod
def find_recursive(rs, name, path, baseaddr):
for r in rs:
if r.name == path[0]:
raddr = format_address(baseaddr + r.address_offset)
if len(path) == 1:
return Register(name, name, raddr)
else:
for f in r.fields:
if f.name == path[1]:
return Field(name, name, raddr, f.bit_offset, f.bit_width)
class Field (Register):
def __init__ (self, name, alias, address, boffset, bwidth):
self.name, self.address, self.boffset, self.bwidth = name, address, boffset, bwidth
self.alias = name if alias=="_" else alias
self.changed = False
@staticmethod
def from_str(s):
name, alias, address, boffset, bwidth = s.split()
return Field(name, alias, address, int(boffset), int(bwidth))
def __str__ (self):
alias = "_" if self.alias == self.name else self.alias
return "{} {} {} {} {}".format(self.name, alias, self.address, self.boffset, self.bwidth)
def format_value (self, FORMAT):
value = self.gdbvalue
try:
if value.type.code in [gdb.TYPE_CODE_INT, gdb.TYPE_CODE_PTR]:
int_value = to_unsigned(value, value.type.sizeof)
int_value = (int_value >> self.boffset) & (0xffff_ffff >> (32 - self.bwidth))
if FORMAT == "BIN":
value_format = '0b{{:0{}b}}'.format(self.bwidth)
elif FORMAT == "DECIMAL":
value_format = '{}'
else:
value_format = '0x{:x}'
return value_format.format(int_value)
except (gdb.error, ValueError):
pass
return str(value)
def set_value (self, value):
oldvalue = self.gdbvalue
if oldvalue.type.code == gdb.TYPE_CODE_INT:
int_value = to_unsigned(oldvalue, oldvalue.type.sizeof)
if 0 <= value < (2 ** self.bwidth):
clean_mask = (0xffff_ffff >> (32 - self.bwidth))
newvalue = oldvalue & ~(clean_mask << self.boffset) | (value << self.boffset)
run("set *{0} = {1}".format(self.address, newvalue))
class SvdRegisters (Dashboard.Module):
"""Show the CPU registers and their values."""
FILE = "registers.txt"
def __init__ (self):
self.table = {}
self.FORMAT = "HEX"
self.FORMAT_CHANGED = False
self.SHOW_CHANGED = False
self.svd_device = None
def label (self):
return 'SVD Registers'
def lines (self, term_width, term_height, style_changed):
# fetch registers status
out = []
registers = []
if os.path.isfile(SvdRegisters.FILE):
with open(SvdRegisters.FILE, 'r') as f:
lines = [l.strip() for l in f.readlines()]
lines = [l for l in lines if l]
changed_list = []
for reg_info in lines[1:]:
# fetch register and update the table
reg_split = reg_info.split()
if len(reg_split) == 3:
r = Register.from_str(reg_info)
elif len(reg_split) == 5:
r = Field.from_str(reg_info)
r.value = r.format_value(self.FORMAT)
old_r = self.table.get(r.alias, None)
r.changed = old_r and (old_r.value != r.value) and not self.FORMAT_CHANGED
self.table[r.alias] = r
registers.append(r)
if r.changed:
changed_list.append((r, old_r))
# split registers in rows and columns
max_name = max(len(r.alias) for r in registers)
max_value = max(len(r.value) for r in registers)
max_width = max_name + max_value + 2
per_line = int((term_width + 1) / max_width) or 1
# redistribute extra space among columns
extra = int((term_width + 1 - max_width * per_line) / per_line)
if per_line == 1:
# center when there is only one column
max_name += int(extra / 2)
max_value += int(extra / 2)
else:
max_value += extra
# format registers info
partial = []
for r in registers:
styled_name = ansi(r.alias.rjust(max_name), R.style_low)
value_style = R.style_selected_1 if r.changed else ''
styled_value = ansi(r.value.ljust(max_value), value_style)
partial.append(styled_name + ' ' + styled_value)
for i in range(0, len(partial), per_line):
out.append(' '.join(partial[i:i + per_line]).rstrip())
if changed_list:
out.append('- '*(term_width//2))
for r, old_r in changed_list:
out.append('{} {} -> {}'.format(ansi(r.alias.rjust(max_name), R.style_low),
ansi(old_r.value, ''), ansi(r.value, '')))
else:
raise Exception("{} is missing. Add it".format(SvdRegisters.FILE))
self.FORMAT_CHANGED = False
return out
def hex (self, arg):
self.FORMAT = "HEX"
self.FORMAT_CHANGED = True
def bin (self, arg):
self.FORMAT = "BIN"
self.FORMAT_CHANGED = True
def decimal (self, arg):
self.FORMAT = "DECIMAL"
self.FORMAT_CHANGED = True
def changed (self, arg):
self.SHOW_CHANGED = True
def monitor (self, arg):
if not self.svd_device:
try:
from cmsis_svd.parser import SVDParser
except:
raise Exception("Cannot import SVDParser. Check 'cmsis_svd' library installed")
if os.path.isfile(SvdRegisters.FILE):
try:
with open(SvdRegisters.FILE, 'r') as f:
lines = [l.strip() for l in f.readlines()]
parser = SVDParser.for_xml_file(lines[0])
self.svd_device = parser.get_device()
except:
raise Exception("Cannot load or parse SVD file")
else:
raise Exception("{} is missing. Add it".format(SvdRegisters.FILE))
if self.svd_device and arg:
args = arg.split()
name = args[0]
if name not in self.table:
r = self.find_register(name)
if r:
r.alias = args[1] if len(args) > 1 else "_"
with open(SvdRegisters.FILE, "a") as f:
f.write(str(r)+"\n")
else:
raise Exception("Register {} not found".format(name))
else:
raise Exception("Register {} already exists".format(name))
def find_register (self, name):
path = name.split(".")
pname = path[0]
pfound = False
for p in self.svd_device.peripherals:
if p.name == pname:
pfound = True
if len(path) > 1:
return Register.find_recursive(p.registers, name, path[1:], p.base_address)
if pfound == False:
raise Exception("Peripheral {} not found".format(pname))
def remove (self, arg):
if os.path.isfile(SvdRegisters.FILE):
with open(SvdRegisters.FILE, 'r') as f:
lines = f.readlines()
newlines = [l for l in lines[1:] if arg not in l.split()[:2]]
with open(SvdRegisters.FILE, 'w') as f:
f.write(lines[0]+"".join(newlines))
if arg in self.table:
del self.table[arg]
def set_value (self, arg):
if arg:
args = arg.split()
if len(args) == 2:
name, value = args[0], int(args[1])
if name in self.table:
r = self.table[name]
r.set_value(value)
else:
raise Exception("Register {} not found".format(name))
def commands (self):
return {
'hex': {
'action': self.hex,
'doc': 'Set hexidemical format.'
},
'bin': {
'action': self.bin,
'doc': 'Set binary format.'
},
'decimal': {
'action': self.decimal,
'doc': 'Set decimal format.'
},
'changed': {
'action': self.changed,
'doc': 'Show old value of changed registers.'
},
'add': {
'action': self.monitor,
'doc': 'Add register to monitored.'
},
'monitor': {
'action': self.monitor,
'doc': 'Add register to monitored.'
},
'remove': {
'action': self.remove,
'doc': 'Remove register from monitored.'
},
'set': {
'action': self.set_value,
'doc': 'Change register value.'
},
}
| burrbull/gdb-dashboard-svdregisters | svdregisters.py | Python | apache-2.0 | 11,204 |
# coding=utf-8
import os
import unittest
import numpy as np
from pkg_resources import resource_filename
from compliance_checker.base import BaseCheck, GenericFile, Result
from compliance_checker.suite import CheckSuite
static_files = {
"2dim": resource_filename("compliance_checker", "tests/data/2dim-grid.nc"),
"bad_region": resource_filename("compliance_checker", "tests/data/bad_region.nc"),
"bad_data_type": resource_filename(
"compliance_checker", "tests/data/bad_data_type.nc"
),
"test_cdl": resource_filename("compliance_checker", "tests/data/test_cdl.cdl"),
"test_cdl_nc": resource_filename(
"compliance_checker", "tests/data/test_cdl_nc_file.nc"
),
"empty": resource_filename("compliance_checker", "tests/data/non-comp/empty.file"),
"ru07": resource_filename(
"compliance_checker", "tests/data/ru07-20130824T170228_rt0.nc"
),
"netCDF4": resource_filename(
"compliance_checker", "tests/data/test_cdl_nc4_file.cdl"
),
}
class TestSuite(unittest.TestCase):
# @see
# http://www.saltycrane.com/blog/2012/07/how-prevent-nose-unittest-using-docstring-when-verbosity-2/
def setUp(self):
self.cs = CheckSuite()
self.cs.load_all_available_checkers()
def shortDescription(self):
return None
# override __str__ and __repr__ behavior to show a copy-pastable nosetest name for ion tests
# ion.module:TestClassName.test_function_name
def __repr__(self):
name = self.id()
name = name.split(".")
if name[0] not in ["ion", "pyon"]:
return "%s (%s)" % (name[-1], ".".join(name[:-1]))
else:
return "%s ( %s )" % (
name[-1],
".".join(name[:-2]) + ":" + ".".join(name[-2:]),
)
__str__ = __repr__
def test_suite(self):
# BWA: what's the purpose of this test? Just to see if the suite
# runs without errors?
ds = self.cs.load_dataset(static_files["2dim"])
self.cs.run(ds, [], "acdd")
def test_unicode_formatting(self):
ds = self.cs.load_dataset(static_files["bad_region"])
score_groups = self.cs.run(ds, [], "cf")
limit = 2
for checker, rpair in score_groups.items():
groups, errors = rpair
score_list, points, out_of = self.cs.standard_output(
ds.filepath(), limit, checker, groups
)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(groups, limit, points, out_of, checker)
def test_generate_dataset_netCDF4(self):
"""
Tests that suite.generate_dataset works with cdl file with netCDF4
features.
"""
# create netCDF4 file
ds_name = self.cs.generate_dataset(static_files["netCDF4"])
# check if correct name is return
assert ds_name == static_files["netCDF4"].replace(".cdl", ".nc")
# check if netCDF4 file was created
assert os.path.isfile(static_files["netCDF4"].replace(".cdl", ".nc"))
def test_include_checks(self):
ds = self.cs.load_dataset(static_files["bad_data_type"])
score_groups = self.cs.run_all(ds, ["cf:1.7"], ["check_standard_name"])
checks_run = score_groups["cf:1.7"][0]
assert len(checks_run) == 1
first_check = checks_run[0]
assert first_check.name == "§3.3 Standard Name"
assert first_check.value[0] < first_check.value[1]
def test_skip_checks(self):
"""Tests that checks are properly skipped when specified"""
ds = self.cs.load_dataset(static_files["2dim"])
# exclude title from the check attributes
score_groups = self.cs.run_all(ds, ["acdd"], skip_checks=["check_high"])
assert all(
sg.name not in {"Conventions", "title", "keywords", "summary"}
for sg in score_groups["acdd"][0]
)
def test_skip_check_level(self):
"""Checks level limited skip checks"""
ds = self.cs.load_dataset(static_files["ru07"])
score_groups = self.cs.run(
ds,
[
"check_flags:A",
"check_convention_possibly_var_attrs:M",
"check_standard_name:L",
],
"cf",
)
name_set = {sg.name for sg in score_groups["cf"][0]}
# flattened set of messages
msg_set = {msg for sg in score_groups["cf"][0] for msg in sg.msgs}
expected_excluded_names = {
u"§3.5 flag_meanings for lat",
u"§3.5 flag_meanings for lon",
u"§3.5 lat is a valid flags variable",
u"§3.5 lat is a valid flags variable",
u"§3.5 lon is a valid flags variable",
}
self.assertTrue(len(expected_excluded_names & name_set) == 0)
# should skip references
ref_msg = u"references global attribute should be a non-empty string"
self.assertTrue(ref_msg not in msg_set)
# check_standard_name is high priority, but we requested only low,
# so the standard_name check should still exist
standard_name_hdr = u"§3.3 Standard Name"
self.assertTrue(standard_name_hdr in name_set)
def test_group_func(self):
# This is checking for issue #183, where group_func results in
# IndexError: list index out of range
ds = self.cs.load_dataset(static_files["bad_data_type"])
score_groups = self.cs.run(ds, [], "cf")
limit = 2
for checker, rpair in score_groups.items():
groups, errors = rpair
score_list, points, out_of = self.cs.standard_output(
ds.filepath(), limit, checker, groups
)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(groups, limit, points, out_of, checker)
def test_score_grouping(self):
# Testing the grouping of results for output, which can fail
# if some assumptions are not met, e.g. if a Result object has
# a value attribute of unexpected type
res = [
Result(BaseCheck.MEDIUM, True, "one"),
Result(BaseCheck.MEDIUM, (1, 3), "one"),
Result(BaseCheck.MEDIUM, None, "one"),
Result(BaseCheck.MEDIUM, True, "two"),
Result(BaseCheck.MEDIUM, np.isnan(1), "two"), # value is type numpy.bool_
]
score = self.cs.scores(res)
self.assertEqual(score[0].name, "one")
self.assertEqual(score[0].value, (2, 4))
self.assertEqual(score[1].name, "two")
self.assertEqual(score[1].value, (1, 2))
def test_cdl_file(self):
# Testing whether you can run compliance checker on a .cdl file
# Load the cdl file
ds = self.cs.load_dataset(static_files["test_cdl"])
vals = self.cs.run(ds, [], "cf")
limit = 2
for checker, rpair in vals.items():
groups, errors = rpair
score_list, cdl_points, cdl_out_of = self.cs.standard_output(
ds.filepath(), limit, checker, groups
)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(
groups, limit, cdl_points, cdl_out_of, checker
)
ds.close()
# Ok now load the nc file that it came from
ds = self.cs.load_dataset(static_files["test_cdl_nc"])
vals = self.cs.run(ds, [], "cf")
limit = 2
for checker, rpair in vals.items():
groups, errors = rpair
score_list, nc_points, nc_out_of = self.cs.standard_output(
ds.filepath(), limit, checker, groups
)
# This asserts that print is able to generate all of the unicode output
self.cs.standard_output_generation(
groups, limit, nc_points, nc_out_of, checker
)
ds.close()
nc_file_path = static_files["test_cdl"].replace(".cdl", ".nc")
self.addCleanup(os.remove, nc_file_path)
# Ok the scores should be equal!
self.assertEqual(nc_points, cdl_points)
self.assertEqual(nc_out_of, cdl_out_of)
def test_load_local_dataset_GenericFile(self):
resp = self.cs.load_local_dataset(static_files["empty"])
assert isinstance(resp, GenericFile) == True
def test_standard_output_score_header(self):
"""
Check that the output score header only checks the number of
of potential issues, rather than the weighted score
"""
ds = self.cs.load_dataset(static_files["bad_region"])
score_groups = self.cs.run(ds, [], "cf")
limit = 2
groups, errors = score_groups["cf"]
score_list, all_passed, out_of = self.cs.standard_output(
ds.filepath(), limit, "cf", groups
)
assert all_passed < out_of
def test_netCDF4_features(self):
"""
Check if a proper netCDF4 file with netCDF4-datatypes is created.
"""
# create and open dataset
ds = self.cs.load_dataset(static_files["netCDF4"])
# check if netCDF type of global attributes is correct
assert isinstance(ds.global_att_of_type_int, np.int32)
# check if netCDF4 type of global attributes is correct
assert isinstance(ds.global_att_of_type_int64, np.int64)
# check if netCDF type of variable is correct
assert ds["tas"].dtype is np.dtype("float32")
# check if netCDF4 type of variable is correct
assert ds["mask"].dtype is np.dtype("int64")
| ocefpaf/compliance-checker | compliance_checker/tests/test_suite.py | Python | apache-2.0 | 9,710 |
__author__ = 'bromix'
from .client import Client
from .provider import Provider
| Soullivaneuh/kodi-plugin.audio.soundcloud | resources/lib/content/__init__.py | Python | gpl-2.0 | 81 |
"""
WSGI config for learning_dokku project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "learning_dokku.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| sherzberg/learning-dokku | learning_dokku/wsgi.py | Python | mit | 403 |
import argparse
from datetime import datetime
from time import time
import itertools
import os
import re
import pprint
from sys import exit
from docker.Container import Container
from docker.ContainerCollection import ContainerCollection
from stats.CpuAcct import CpuAcctStat, CpuAcctPerCore, ThrottledCpu
from stats.MemStat import MemStat
from stats.BlkioStat import BlkioStat
from stats.NetIoStat import NetIoStat
pp = pprint.PrettyPrinter(indent=4, width=40, depth=None, stream=None)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help='Name for the output files', required=False,
default='ds-' + datetime.now().strftime('%Y-%m-%d'))
parser.add_argument('-d', '--delta', help='Calculate and output deltas between samples', required=False,
action='store_true')
parser.add_argument('arg', nargs='*') # use '+' for 1 or more args (instead of 0 or more)
parsed = parser.parse_args()
#print('Result:', vars(parsed))
global outputFileNameBase
outputFileNameBase = parsed.output
calculateDeltas = parsed.delta
print 'Output files: ' + outputFileNameBase + "*.csv"
cpuSamples = []
memorySamples = []
blkioSamples = []
netioSamples = []
runningContainers = ContainerCollection()
runningContainers.getRunningContainers()
print('Found {0} running containers'.format(len(runningContainers.containers())))
allContainers = ContainerCollection()
allContainers.getAllContainers()
print('Found total {0} containers'.format(len(allContainers.containers())))
sampleNumber = 0
while (True):
sampleName = "Sample_" + str(sampleNumber)
try:
tmp = raw_input('Next sample? (ctrl-d to exit) [' + sampleName + ']: ')
if tmp != '':
sampleName = tmp
cpuSample = collectCpuSample(sampleName, runningContainers)
cpuSamples.append(cpuSample)
memorySample = collectMemorySample(sampleName, runningContainers)
memorySamples.append(memorySample)
blkioSample = collectBlkioSample(sampleName, runningContainers)
blkioSamples.append(blkioSample)
netioSample = collectNetioSample(sampleName, runningContainers)
netioSamples.append(netioSample)
sampleNumber += 1
except EOFError as e:
break
finally:
pass
if calculateDeltas:
if len(cpuSamples) < 2:
print('\nAt least two samples are needed when deltas are calculated.')
return
writeStatistics('cpu', cpuSamples, writeCpuStatisticsHeader, writeCpuSample, calculateDeltas)
writeStatistics('mem', memorySamples, writeMemoryStatisticsHeader, writeMemorySample, calculateDeltas)
writeStatistics('blkio', blkioSamples, writeBlkioStatisticsHeader, writeBlkioSample, calculateDeltas)
writeStatistics('netio', netioSamples, writeNetioStatisticsHeader, writeNetioSample, calculateDeltas)
exit()
def writeStatistics(statisticsType, samples, headerFunction, sampleWriteFunction, calculateDeltas):
outputFileName = uniqueFileName(outputFileNameBase + '-' + statisticsType + '.csv')
if calculateDeltas:
print('\nWriting sample deltas of {0} statistics to {1} ...'.format(statisticsType, outputFileName))
else:
print('\nWriting {0} statistics to {1} ...'.format(statisticsType, outputFileName))
with open(outputFileName, 'w') as outputFile:
headerNotWritten = True
prevSample = None
for sample in samples:
if headerNotWritten:
headerFunction(outputFile, sample)
headerNotWritten = False
if calculateDeltas:
if prevSample == None:
sampleWriteFunction(outputFile, sample, None)
prevSample = sample
continue
sampleWriteFunction(outputFile, sample, prevSample)
prevSample = sample
else:
sampleWriteFunction(outputFile, sample, None)
def uniqueFileName(file):
'''Append counter to the end of filename body, if the file already exists'''
if not os.path.isfile(file):
return file
if re.match('.+\.[a-zA-Z0-9]+$', os.path.basename(file)):
name_func = lambda f, i: re.sub('(\.[a-zA-Z0-9]+)$', '_%i\\1' % i, f)
else:
name_func = lambda f, i: ''.join([f, '_%i' % i])
for new_file_name in (name_func(file, i) for i in itertools.count(1)):
if not os.path.exists(new_file_name):
return new_file_name
def collectCpuSample(sampleName, runningContainers):
sample = {}
sample['name'] = sampleName
sample['timestamp'] = time()
sample['containers'] = {}
for container in runningContainers.containers():
sampleSet = {}
sampleSet['cpuacct'] = CpuAcctStat(container.id, container.name)
sampleSet['percore'] = CpuAcctPerCore(container.id, container.name)
sampleSet['throttled'] = ThrottledCpu(container.id, container.name)
sample['containers'][container.name] = sampleSet
# pp.pprint(sample)
return sample
def writeCpuStatisticsHeader(outputFile,sample):
outputFile.write("Sample;Timestamp;Container;User Jiffies;System Jiffies;")
outputFile.write("Enforcement Intervals;Group Throttiling Count;Throttled Time Total;")
# Pick first container and first sample and nbr of cores from there
firstContainer = sample['containers'].keys()[0]
perCoreSample = sample['containers'][firstContainer]['percore']
for i in range(0,len(perCoreSample.perCore)):
outputFile.write("Core {0};".format(i))
outputFile.write("\n")
def writeCpuSample(outputFile, sample, prevSample):
for (container, sampleSet) in sample['containers'].iteritems():
cpuacct = sampleSet['cpuacct']
userJiffies = number(getattr(cpuacct, 'userJiffies', None))
systemJiffies = number(getattr(cpuacct, 'systemJiffies', None))
timeStamp = sample['timestamp']
if prevSample:
timeStamp -= prevSample['timestamp']
prevCpuacct = prevSample['containers'][container]['cpuacct']
prevUserJiffies = number(getattr(prevCpuacct, 'userJiffies', None))
prevSystemJiffies = number(getattr(prevCpuacct, 'systemJiffies', None))
if prevUserJiffies:
userJiffies -= prevUserJiffies
if prevSystemJiffies:
systemJiffies -= prevSystemJiffies
outputFile.write("{0};{1};{2};{3};{4};".format(sample['name'], int(timeStamp),
container,
userJiffies,
systemJiffies))
throttled = sampleSet['throttled']
enforcementIntervals = number(getattr(throttled, 'enforcementIntervals', None))
groupThrottilingCount = number(getattr(throttled, 'groupThrottilingCount', None))
throttledTimeTotal = number(getattr(throttled, 'throttledTimeTotal', None))
if prevSample:
prevThrottled = prevSample['containers'][container]['throttled']
prevEnforcementIntervals = number(getattr(prevThrottled, 'enforcementIntervals', None))
prevGroupThrottilingCount = number(getattr(prevThrottled, 'groupThrottilingCount', None))
prevThrottledTimeTotal = number(getattr(prevThrottled, 'throttledTimeTotal', None))
if prevEnforcementIntervals:
enforcementIntervals -= prevEnforcementIntervals
if prevGroupThrottilingCount:
groupThrottilingCount -= prevGroupThrottilingCount
if prevThrottledTimeTotal:
throttledTimeTotal -= prevThrottledTimeTotal
outputFile.write("{0};{1};{2}".format(enforcementIntervals,
groupThrottilingCount,
throttledTimeTotal))
perCoreSample = sampleSet['percore']
prevPerCoreSample = None
if prevSample:
prevPerCoreSample = prevSample['containers'][container]['percore']
for i, coreNs in enumerate(perCoreSample.perCore):
ns = number(coreNs)
if prevPerCoreSample:
ns -= number(prevPerCoreSample.perCore[i])
outputFile.write(";{0}".format(ns))
outputFile.write("\n")
def collectMemorySample(sampleName, runningContainers):
sample = {}
sample['name'] = sampleName
sample['timestamp'] = time()
sample['containers'] = {}
for container in runningContainers.containers():
sample['containers'][container] = MemStat(container.id, container.name)
return sample
def writeMemoryStatisticsHeader(outputFile, sample):
outputFile.write("Sample;Timestamp;Container")
# Take rest of the headers from sample. Content of memory statistics seem to vary in different versions
# Pick first container and first sample
firstContainer = sample['containers'].keys()[0]
memSample = sample['containers'][firstContainer]
for key in sorted(memSample.values.keys()):
outputFile.write(";{0}".format(key))
outputFile.write("\n")
def writeMemorySample(outputFile, sample, prevSample):
for (container, memStat) in sample['containers'].iteritems():
prevMemStat = None
timeStamp = sample['timestamp']
if prevSample:
timeStamp -= prevSample['timestamp']
prevMemStat = prevSample['containers'][container]
outputFile.write("{0};{1};{2}".format(sample['name'], int(timeStamp), container.name))
for key in sorted(memStat.values.keys()):
value = number(memStat.values[key])
if prevMemStat:
value -= number(prevMemStat.values[key])
outputFile.write(";{0}".format(value))
outputFile.write("\n")
def collectBlkioSample(sampleName, runningContainers):
sample = {}
sample['name'] = sampleName
sample['timestamp'] = time()
sample['containers'] = {}
for container in runningContainers.containers():
sample['containers'][container] = BlkioStat(container.id, container.name)
return sample
def writeBlkioStatisticsHeader(outputFile, sample):
outputFile.write("Sample;Timestamp;Container;Device;Read count;Write count;Async count;Sync count;Read bytes;Write bytes;Async bytes;Sync bytes;\n")
def writeBlkioSample(outputFile, sample, prevSample):
for (container, blkioSample) in sample['containers'].iteritems():
blkioDevices = blkioSample.devices
prevBlkioDevices = None
timeStamp = sample['timestamp']
if prevSample:
timeStamp -= prevSample['timestamp']
prevBlkioDevices = prevSample['containers'][container].devices
for device in blkioDevices:
outputFile.write("{0};{1};{2};".format(sample['name'], int(timeStamp), container.name))
outputFile.write("{0}({1})".format(blkioDevices[device]['name'], blkioDevices[device]['type']))
for operation in ('Read', 'Write', 'Async', 'Sync'):
value = number(blkioDevices[device]['operations'][operation])
if prevBlkioDevices:
value -= number(prevBlkioDevices[device]['operations'][operation])
outputFile.write(";{0}".format(value))
for operation in ('Read', 'Write', 'Async', 'Sync'):
value = number(blkioDevices[device]['bytes'][operation])
if prevBlkioDevices:
value -= number(prevBlkioDevices[device]['bytes'][operation])
outputFile.write(";{0}".format(value))
outputFile.write("\n")
def collectNetioSample(sampleName, runningContainers):
sample = {}
sample['name'] = sampleName
sample['timestamp'] = time()
sample['containers'] = {}
for container in runningContainers.containers():
sample['containers'][container] = NetIoStat(container.id, container.name)
return sample
def writeNetioStatisticsHeader(outputFile, sample):
outputFile.write("Sample;Timestamp;Container;Interface;Received bytes;Received packets;Sent bytes;Sent packets\n")
def writeNetioSample(outputFile, sample, prevSample):
for (container, netioSample) in sample['containers'].iteritems():
interfaces = netioSample.interfaces
prevInterfaces = None
timeStamp = sample['timestamp']
if prevSample:
timeStamp -= prevSample['timestamp']
prevInterfaces = prevSample['containers'][container].interfaces
for interface in interfaces.keys():
outputFile.write("{0};{1};{2};".format(sample['name'], int(timeStamp), container.name))
receivedBytes = number(interfaces[interface]['received']['bytes'])
transmittedBytes = number(interfaces[interface]['transmitted']['bytes'])
receivedPackets = number(interfaces[interface]['received']['packets'])
transmittedPackets = number(interfaces[interface]['transmitted']['packets'])
if prevInterfaces:
receivedBytes -= number(prevInterfaces[interface]['received']['bytes'])
transmittedBytes -= number(prevInterfaces[interface]['transmitted']['bytes'])
receivedPackets -= number(prevInterfaces[interface]['received']['packets'])
transmittedPackets = number(prevInterfaces[interface]['transmitted']['packets'])
outputFile.write("{0};{1};{2};{3};{4}\n".format(interface,
receivedBytes,
receivedPackets,
transmittedBytes,
transmittedPackets))
def number(s):
try:
return int(s)
except ValueError:
return float(s)
except TypeError:
return None
if __name__ == "__main__":
main()
| sofkaski/dockerstat | dockerstat/dockerstat.py | Python | mit | 14,074 |
import sys
__version_info__ = (0, 4, 3)
__version__ = '.'.join(map(str, __version_info__))
ALL = ['udict']
# py2/py3 compatibility
if sys.version_info.major == 2:
def iteritems(d):
return d.iteritems()
else:
def iteritems(d):
return d.items()
# For internal use only as a value that can be used as a default
# and should never exist in a dict.
_MISSING = object()
class udict(dict):
"""
A dict that supports attribute-style access and hierarchical keys.
See `__getitem__` for details of how hierarchical keys are handled,
and `__getattr__` for details on attribute-style access.
Subclasses may define a '__missing__' method (it must be an instance method
defined on the class and not just an instance variable) that accepts one
parameter. If such a method is defined, then a call to `my_udict[key]`
(or the equivalent `my_udict.__getitem__(key)`) that fails will call
the '__missing__' method with the key as the parameter and return the
result of that call (or raise any exception the call raises).
"""
def __init__(self, *args, **kwargs):
"""
Initialize a new `udict` using `dict.__init__`.
When passing in a dict arg, this won't do any special
handling of values that are dicts. They will remain plain dicts inside
the `udict`. For a recursive init that will convert all
dict values in a dict to udicts, use `udict.fromdict`.
Likewise, dotted keys will not be treated specially, so something
like `udict({'a.b': 'a.b'})` is equivalent to `ud = udict()` followed
by `setattr(ud, 'a.b', 'a.b')`.
"""
dict.__init__(self, *args, **kwargs)
def __getitem__(self, key):
"""
Get mapped value for given `key`, or raise `KeyError` if no such
mapping.
The `key` may be any value that is valid for a plain `dict`. If the
`key` is a dotted key (a string like 'a.b' containing one or more
'.' characters), then the key will be split on '.' and interpreted
as a sequence of `__getitem__` calls. For example,
`d.__getitem__('a.b')` would be interpreted as (approximately)
`d.__getitem__('a').__getitem__('b')`. If the key is not a dotted
it is treated normally.
:exceptions:
- KeyError: if there is no such key on a dict (or object that supports
`__getitem__`) at any level of the dotted-key traversal.
- TypeError: if key is not hashable or if an object at some point
in the dotted-key traversal does not support `__getitem__`.
"""
if not isinstance(key, str) or '.' not in key:
return dict.__getitem__(self, key)
obj, token = _descend(self, key)
return _get(obj, token)
def __setitem__(self, key, value):
"""
Set `value` for given `key`.
See `__getitem__` for details of how `key` is intepreted if it is a
dotted key and for exceptions that may be raised.
"""
if not isinstance(key, str) or '.' not in key:
return dict.__setitem__(self, key, value)
obj, token = _descend(self, key)
return dict.__setitem__(obj, token, value)
def __delitem__(self, key):
"""
Remove mapping for `key` in self.
See `__getitem__` for details of how `key` is intepreted if it is a
dotted key and for exceptions that may be raised.
"""
if not isinstance(key, str) or '.' not in key:
dict.__delitem__(self, key)
return
obj, token = _descend(self, key)
del obj[token]
def __getattr__(self, key):
# no special treatement for dotted keys, but we need to use
# 'get' rather than '__getitem__' in order to avoid using
# '__missing__' if key is not in dict
val = dict.get(self, key, _MISSING)
if val is _MISSING:
raise AttributeError("no attribute '%s'" % (key,))
return val
def __setattr__(self, key, value):
# normal setattr behavior, except we put it in the dict
# instead of setting an attribute (i.e., dotted keys are
# treated as plain keys)
dict.__setitem__(self, key, value)
def __delattr__(self, key):
try:
# no special handling of dotted keys
dict.__delitem__(self, key)
except KeyError as e:
raise AttributeError("no attribute '%s'" % (e.args[0]))
def __reduce__(self):
# pickle the contents of a udict as a list of items;
# __getstate__ and __setstate__ aren't needed
constructor = self.__class__
instance_args = (list(iteritems(self)),)
return constructor, instance_args
def get(self, key, default=None):
# We can't use self[key] to support `get` here, because a missing key
# should return the `default` and should not use a `__missing__`
# method if one is defined (as happens for self[key]).
if not isinstance(key, str) or '.' not in key:
return dict.get(self, key, default)
try:
obj, token = _descend(self, key)
return _get(obj, token)
except KeyError:
return default
@classmethod
def fromkeys(self, seq, value=None):
return udict((elem, value) for elem in seq)
@classmethod
def fromdict(cls, mapping):
"""
Create a new `udict` from the given `mapping` dict.
The resulting `udict` will be equivalent to the input
`mapping` dict but with all dict instances (recursively)
converted to an `udict` instance. If you don't want
this behavior (i.e., you want sub-dicts to remain plain dicts),
use `udict(mapping)` instead.
"""
ud = cls()
for k in mapping:
v = dict.__getitem__(mapping, k) # okay for py2/py3
if isinstance(v, dict):
v = cls.fromdict(v)
dict.__setitem__(ud, k, v)
return ud
def todict(self):
"""
Create a plain `dict` from this `udict`.
The resulting `dict` will be equivalent to this `udict`
but with every `udict` value (recursively) converted to
a plain `dict` instance.
"""
d = dict()
for k in self:
v = dict.__getitem__(self, k)
if isinstance(v, udict):
v = v.todict()
d[k] = v
return d
def copy(self):
"""
Return a shallow copy of this `udict`.
For a deep copy, use `udict.fromdict` (as long as there aren't
plain dict values that you don't want converted to `udict`).
"""
return udict(self)
def setdefault(self, key, default=None):
"""
If `key` is in the dictionary, return its value.
If not, insert `key` with a value of `default` and return `default`,
which defaults to `None`.
"""
val = self.get(key, _MISSING)
if val is _MISSING:
val = default
self[key] = default
return val
def __contains__(self, key):
return self.get(key, _MISSING) is not _MISSING
def pop(self, key, *args):
if not isinstance(key, str) or '.' not in key:
return dict.pop(self, key, *args)
try:
obj, token = _descend(self, key)
except KeyError:
if args:
return args[0]
raise
else:
return dict.pop(obj, token, *args)
def __dir__(self):
"""
Expose the expected instance and class attributes and methods
for the builtin `dir` method, as well as the top-level keys that
are stored.
"""
return sorted(set(dir(udict)) | set(self.keys()))
# helper to do careful and consistent `obj[name]`
def _get(obj, name):
"""
Get the indexable value with given `name` from `obj`, which may be
a `dict` (or subclass) or a non-dict that has a `__getitem__` method.
"""
try:
# try to get value using dict's __getitem__ descriptor first
return dict.__getitem__(obj, name)
except TypeError:
# if it's a dict, then preserve the TypeError
if isinstance(obj, dict):
raise
# otherwise try one last time, relying on __getitem__ if any
return obj[name]
# helper for common use case of traversing a path like 'a.b.c.d'
# to get the 'a.b.c' object and do something to it with the 'd' token
def _descend(obj, key):
"""
Descend on `obj` by splitting `key` on '.' (`key` must contain at least
one '.') and using `get` on each token that results from splitting
to fetch the successive child elements, stopping on the next-to-last.
A `__getitem__` would do `dict.__getitem__(value, token)` with the
result, and a `__setitem__` would do `dict.__setitem__(value, token, v)`.
:returns:
(value, token) - `value` is the next-to-last object found, and
`token` is the last token in the `key` (the only one that wasn't consumed
yet).
"""
tokens = key.split('.')
if len(tokens) < 2:
raise ValueError(key)
value = obj
for token in tokens[:-1]:
value = _get(value, token)
return value, tokens[-1]
| eukaryote/uberdict | uberdict/__init__.py | Python | mit | 9,340 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging, os, sets
from rapid_app import settings_app
from rapid_app.models import ManualDbHandler, PrintTitleDev
log = logging.getLogger(__name__)
class UpdateTitlesHelper( object ):
""" Manages views.update_production_easyA_titles() work. """
def __init__(self):
self.db_handler = ManualDbHandler()
def run_update( self, request ):
""" Calls the backup and update code.
Called by views.update_production_easyA_titles() """
log.debug( 'calling update_older_backup()' )
self.update_older_backup()
log.debug( 'calling update_backup()' )
self.update_backup()
log.debug( 'calling update_production_table()' )
self.update_production_table()
return
def update_older_backup( self ):
""" Copies data from backup table to older backup table.
Called by run_update() """
result = self.db_handler.run_sql( sql=unicode(os.environ['RAPID__BACKUP_COUNT_SQL']), connection_url=settings_app.DB_CONNECTION_URL )
if result[0][0] > 10000: # result is like `[(27010,)]`; don't backup if the count is way off
self.db_handler.run_sql(
sql=unicode(os.environ['RAPID__BACKUP_OLDER_DELETE_SQL']), connection_url=settings_app.DB_CONNECTION_URL )
if 'sqlite' in settings_app.DB_CONNECTION_URL:
self.db_handler.run_sql( sql='VACUUM;', connection_url=settings_app.DB_CONNECTION_URL )
self.db_handler.run_sql(
sql=unicode(os.environ['RAPID__BACKUP_OLDER_INSERT_SQL']), connection_url=settings_app.DB_CONNECTION_URL )
else:
log.info( 'not backing up because count is only, ```{}```'.format(result) )
return
def update_backup( self ):
""" Copies data from production table to backup table.
Called by run_update() """
result = self.db_handler.run_sql( sql=unicode(os.environ['RAPID__PRODUCTION_COUNT_SQL']), connection_url=settings_app.DB_CONNECTION_URL )
if result[0][0] > 10000: # result is like `[(27010,)]`; don't backup if the count is way off
self.db_handler.run_sql(
sql=unicode(os.environ['RAPID__BACKUP_DELETE_SQL']), connection_url=settings_app.DB_CONNECTION_URL )
if 'sqlite' in settings_app.DB_CONNECTION_URL:
self.db_handler.run_sql( sql='VACUUM;', connection_url=settings_app.DB_CONNECTION_URL )
self.db_handler.run_sql(
sql=unicode(os.environ['RAPID__BACKUP_INSERT_SQL']), connection_url=settings_app.DB_CONNECTION_URL )
else:
log.info( 'not backing up because count is only, ```{}```'.format(result) )
return
def update_production_table( self ):
""" Runs update-production sql.
Called by run_update() """
( rapid_keys, easya_keys, key_int ) = self._setup_vars() # setup
rapid_keys = self._populate_rapid_keys( rapid_keys ) # get rapid keys
easya_keys = self._populate_easya_keys( easya_keys, key_int ) # get easyA keys
( rapid_not_in_easya, easya_not_in_rapid ) = self._intersect_keys( rapid_keys, easya_keys) # intersect sets
self._add_rapid_entries( rapid_not_in_easya ) # insert new rapid records
self._remove_easya_entries( easya_not_in_rapid ) # run easyA deletions
return
def _setup_vars( self ):
""" Preps vars.
Called by update_production_table() """
rapid_keys = []
easya_keys = []
tuple_keys = { 'key': 0, 'issn': 1, 'start': 2, 'end': 3, 'location': 4, 'call_number': 5 }
key_int = tuple_keys['key'] # only using zero now, might use other tuple-elements later
return ( rapid_keys, easya_keys, key_int )
def _populate_rapid_keys( self, rapid_keys ):
""" Preps list of rapid keys.
Called by update_production_table() """
for title in PrintTitleDev.objects.all():
rapid_keys.append( title.key )
log.debug( 'len rapid_keys, {}'.format(len(rapid_keys)) )
return rapid_keys
def _populate_easya_keys( self, easya_keys, key_int ):
""" Preps list of easya keys.
Called by update_production_table() """
sql = 'SELECT * FROM `{}`'.format( unicode(os.environ['RAPID__TITLES_TABLE_NAME']) )
result = self.db_handler.run_sql( sql=sql, connection_url=settings_app.DB_CONNECTION_URL )
for row_tuple in result:
easya_keys.append( row_tuple[key_int] )
log.debug( 'len easya_keys, {}'.format(len(easya_keys)) )
return easya_keys
def _intersect_keys( self, rapid_keys, easya_keys):
""" Runs set work.
Called by update_production_table() """
rapid_not_in_easya = list( sets.Set(rapid_keys) - sets.Set(easya_keys) )
easya_not_in_rapid = list( sets.Set(easya_keys) - sets.Set(rapid_keys) )
log.debug( 'rapid_not_in_easya, {}'.format(rapid_not_in_easya) )
log.debug( 'easya_not_in_rapid, {}'.format(easya_not_in_rapid) )
return ( rapid_not_in_easya, easya_not_in_rapid )
def _add_rapid_entries( self, rapid_not_in_easya ):
""" Runs inserts of new records.
Called by update_production_table() """
for rapid_key in rapid_not_in_easya:
rapid_title = PrintTitleDev.objects.get( key=rapid_key )
sql = '''
INSERT INTO `{destination_table}` ( `key`, `issn`, `start`, `end`, `location`, `call_number` )
VALUES ( '{key}', '{issn}', '{start}', '{end}', '{building}', '{call_number}' );
'''.format( destination_table=unicode(os.environ['RAPID__TITLES_TABLE_NAME']), key=rapid_title.key, issn=rapid_title.issn, start=rapid_title.start, end=rapid_title.end, building=rapid_title.building, call_number=rapid_title.call_number )
self.db_handler.run_sql( sql=sql, connection_url=settings_app.DB_CONNECTION_URL )
log.debug( 'rapid additions to easyA complete' )
return
def _remove_easya_entries( self, easya_not_in_rapid ):
""" Runs deletion of old records.
Called by update_production_table() """
for easya_key in easya_not_in_rapid:
sql = '''
DELETE FROM `{destination_table}`
WHERE `key` = '{easya_key}'
LIMIT 1;
'''.format( destination_table=unicode(os.environ['RAPID__TITLES_TABLE_NAME']), easya_key=easya_key )
self.db_handler.run_sql( sql=sql, connection_url=settings_app.DB_CONNECTION_URL )
log.debug( 'easyA deletions complete' )
return
# def update_production_table( self ):
# """ Runs update-production sql.
# TODO: a more elegant way to do this would be to query both tables, do a set intersection, and then do the appropriate small loop of additions and deletes.
# Called by run_update() """
# ## load all new data to memory
# titles = PrintTitleDev.objects.all()
# ## iterate through source-set adding new records if needed
# for entry in titles:
# sql = '''
# SELECT * FROM `{destination_table}`
# WHERE `key` = '{key}'
# AND `issn` = '{issn}'
# AND `start` = {start}
# AND `end` = {end}
# AND `location` = '{location}'
# AND `call_number` = '{call_number}';
# '''.format( destination_table=unicode(os.environ['RAPID__TITLES_TABLE_NAME']), key=entry.key, issn=entry.issn, start=entry.start, end=entry.end, location=entry.location, call_number=entry.call_number )
# result = self.db_handler.run_sql( sql=sql, connection_url=settings_app.DB_CONNECTION_URL )
# if result == None:
# sql = '''
# INSERT INTO `{destination_table}` ( `key`, `issn`, `start`, `end`, `location`, `call_number` )
# VALUES ( '{key}', '{issn}', '{start}', '{end}', '{location}', '{call_number}' );
# '''.format( destination_table=unicode(os.environ['RAPID__TITLES_TABLE_NAME']), key=entry.key, issn=entry.issn, start=entry.start, end=entry.end, location=entry.location, call_number=entry.call_number )
# self.db_handler.run_sql( sql=sql, connection_url=settings_app.DB_CONNECTION_URL )
# ## iterate through destination-set deleting records if they're not in the source
# sql = '''SELECT * FROM `{}`;'''.format( unicode(os.environ['RAPID__TITLES_TABLE_NAME']) )
# result = self.db_handler.run_sql( sql=sql, connection_url=settings_app.DB_CONNECTION_URL )
# tuple_keys = {
# 'key': 0, 'issn': 1, 'start': 2, 'end': 3, 'location': 4, 'call_number': 5 }
# for tuple_entry in result:
# match = PrintTitleDev.objects.filter(
# key=tuple_keys['key'], issn=tuple_keys['issn'], start=int(tuple_keys['start']), end=int(tuple_keys['end']), building=tuple_keys['location'], call_number=tuple_keys['call_number'] )
# if match == []:
# sql = '''
# DELETE * FROM `{destination_table}`
# WHERE `key` = '{key}'
# AND `issn` = '{issn}'
# AND `start` = {start}
# AND `end` = {end}
# AND `location` = '{location}'
# AND `call_number` = '{call_number}'
# LIMIT 1;
# '''.format( destination_table=unicode(os.environ['RAPID__TITLES_TABLE_NAME']), key=entry.key, issn=entry.issn, start=entry.start, end=entry.end, location=entry.location, call_number=entry.call_number )
# return
# end class UpdateTitlesHelper
| birkin/rapid_exports | rapid_app/lib/viewhelper_updatedb.py | Python | mit | 9,897 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
module: win_disk_image
short_description: Manage ISO/VHD/VHDX mounts on Windows hosts
version_added: '2.3'
description:
- Manages mount behavior for a specified ISO, VHD, or VHDX image on a Windows host. When C(state) is C(present),
the image will be mounted under a system-assigned drive letter, which will be returned in the C(mount_path) value
of the module result.
- Requires Windows 8+ or Windows Server 2012+.
options:
image_path:
description:
- Path to an ISO, VHD, or VHDX image on the target Windows host (the file cannot reside on a network share)
type: str
required: yes
state:
description:
- Whether the image should be present as a drive-letter mount or not.
type: str
choices: [ absent, present ]
default: present
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES = r'''
# Run installer from mounted ISO, then unmount
- name: Ensure an ISO is mounted
win_disk_image:
image_path: C:\install.iso
state: present
register: disk_image_out
- name: Run installer from mounted ISO
win_package:
path: '{{ disk_image_out.mount_paths[0] }}setup\setup.exe'
product_id: 35a4e767-0161-46b0-979f-e61f282fee21
state: present
- name: Unmount ISO
win_disk_image:
image_path: C:\install.iso
state: absent
'''
RETURN = r'''
mount_path:
description: Filesystem path where the target image is mounted, this has been deprecated in favour of C(mount_paths).
returned: when C(state) is C(present)
type: str
sample: F:\
mount_paths:
description: A list of filesystem paths mounted from the target image.
returned: when C(state) is C(present)
type: list
sample: [ 'E:\', 'F:\' ]
'''
| alxgu/ansible | lib/ansible/modules/windows/win_disk_image.py | Python | gpl-3.0 | 2,042 |
try:
import mock
except ImportError:
from unittest import mock
from django.http import HttpResponse
from django.test import RequestFactory, TestCase
from django.utils import timezone
from export_csv.exceptions import NoModelFoundException
from export_csv.views import ExportCSV
from .models import Customer
class ExportCSVTests(TestCase):
def setUp(self):
Customer.objects.create(name='name1', address='address1',
is_active=True, last_updated=timezone.now())
Customer.objects.create(name='name2', address='address2',
is_active=True, last_updated=timezone.now())
def setup_view(self, view, request, model=None, field_names=None,
filename=None, add_col_names=False, col_names=None,
*args, **kwargs):
"""Mimic :func:`as_view` callable but returns view instance.
``args`` and ``kwargs`` are the same you would pass to ``reverse()``.
"""
view.request = request
view.model = model
view.field_names = field_names
view.filename = filename
view.add_col_names = add_col_names
view.col_names = col_names
view.args = args
view.kwargs = kwargs
return view
def test_get_queryset_pass(self):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request, model=Customer)
queryset = view.get_queryset()
self.assertEqual(queryset.model, Customer)
def test_get_queryset_exception(self):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request)
self.assertRaises(NoModelFoundException, view.get_queryset)
def test_get_field_names_custom(self):
request = RequestFactory().get("")
view = ExportCSV()
field_names = ['name', 'address', 'is_active']
view = self.setup_view(view, request, field_names=field_names)
returned_field_names = view.get_field_names()
self.assertEqual(field_names, returned_field_names)
def test_get_field_names_model(self):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request, model=Customer)
expected_field_names = ['name', 'address', 'is_active', 'last_updated']
field_names = view.get_field_names()
self.assertEqual(set(field_names), set(expected_field_names))
def test_get_field_names_exception(self):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request)
self.assertRaises(NoModelFoundException, view.get_field_names)
@mock.patch('export_csv.views.ExportCSV.get_field_names')
def test_get_field_verbose_names_custom(self, mock_get_field_names):
request = RequestFactory().get("")
view = ExportCSV()
field_names = ['name', 'address', 'is_active']
expected_verbose_names = ['name', 'address', 'Is Active']
view = self.setup_view(view, request, model=Customer,
field_names=field_names)
mock_get_field_names.return_value = field_names
result_verbose_names = view._get_field_verbose_names()
self.assertEqual(expected_verbose_names, result_verbose_names)
@mock.patch('export_csv.views.ExportCSV.get_field_names')
def test_get_field_verbose_names_model(self, mock_get_field_names):
request = RequestFactory().get("")
view = ExportCSV()
expected_verbose_names = ['name', 'address', 'Is Active',
'last updated']
view = self.setup_view(view, request, model=Customer)
mock_get_field_names.return_value = ['name', 'address', 'is_active',
'last_updated']
result_verbose_names = view._get_field_verbose_names()
self.assertEqual(set(expected_verbose_names),
set(result_verbose_names))
@mock.patch('export_csv.views.ExportCSV.get_field_names')
def test_get_field_verbose_names_exception(self, mock_get_field_names):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request, model=None)
mock_get_field_names.return_value = None
self.assertRaises(NoModelFoundException, view._get_field_verbose_names)
@mock.patch('export_csv.views.ExportCSV._get_field_verbose_names')
def test_get_col_names_model(self, mock_get_field_verbose_names):
request = RequestFactory().get("")
view = ExportCSV()
expected_col_names = ['name', 'address', 'Is Active',
'last updated']
view = self.setup_view(view, request, model=Customer)
mock_get_field_verbose_names.return_value = expected_col_names
result_col_names = view.get_col_names()
self.assertEqual(set(expected_col_names), set(result_col_names))
@mock.patch('export_csv.views.ExportCSV._get_field_verbose_names')
def test_get_col_names_custom(self, mock_get_field_verbose_names):
request = RequestFactory().get("")
view = ExportCSV()
expected_col_names = ['name', 'address', 'Is Active',
'last updated']
view = self.setup_view(view, request, model=Customer,
col_names=expected_col_names)
mock_get_field_verbose_names.return_value = None
result_col_names = view.get_col_names()
self.assertEqual(set(expected_col_names), set(result_col_names))
def test_get_col_names_exception(self):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request, col_names="string")
self.assertRaises(TypeError, view.get_col_names)
def test_get_filename_custom(self):
request = RequestFactory().get("")
view = ExportCSV()
expected_filename = "custom_filename.csv"
view = self.setup_view(view, request, filename=expected_filename)
result_filename = view.get_filename()
self.assertEqual(set(expected_filename), set(result_filename))
def test_get_filename_model(self):
request = RequestFactory().get("")
view = ExportCSV()
expected_filename = "customer_list.csv"
view = self.setup_view(view, request, model=Customer)
result_filename = view.get_filename()
self.assertEqual(set(expected_filename), set(result_filename))
def test_get_filename_exception(self):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request)
self.assertRaises(NoModelFoundException, view.get_filename)
def test_create_csv(self):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request, model=Customer)
response = view._create_csv()
self.assertEqual(200, response.status_code)
self.assertEqual('text/csv', response['Content-Type'])
def test_create_csv_col_names(self):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request, model=Customer,
add_col_names=True)
response = view._create_csv()
self.assertEqual(200, response.status_code)
self.assertEqual('text/csv', response['Content-Type'])
def clean_name(self, value):
return str(value).upper()
def test_create_csv_col_names_clean_name(self):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request, model=Customer,
add_col_names=True)
view.clean_name = self.clean_name
response = view._create_csv()
self.assertEqual(200, response.status_code)
self.assertEqual('text/csv', response['Content-Type'])
@mock.patch('export_csv.views.ExportCSV._create_csv')
def test_get(self, mock_create_csv):
request = RequestFactory().get("")
view = ExportCSV()
view = self.setup_view(view, request, model=Customer)
mock_create_csv.return_value = HttpResponse(status=200, content_type='text/csv')
response = view.get(request=request)
self.assertEqual(200, response.status_code)
self.assertEqual('text/csv', response['Content-Type'])
| narenchoudhary/django-export-csv | tests/test_views.py | Python | bsd-3-clause | 8,479 |
# -*- coding: utf-8 -*-
## Copyright © 2012, Matthias Urlichs <matthias@urlichs.de>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
from __future__ import division,absolute_import
from django.views.generic import ListView,DetailView,CreateView,UpdateView,DeleteView
from django.forms import ModelForm
from rainman.models import Group,Site
from irrigator.views import FormMixin,SiteParamMixin,get_profile
from rainman.utils import get_request
class GroupForm(ModelForm):
class Meta:
model = Group
exclude = ('site',)
def save(self,commit=True):
if self.instance.id is None:
self.instance.site = self.aux_data['site']
return super(GroupForm,self).save(commit)
def limit_choices(self,site=None,valve=None):
gu = get_profile(get_request())
v = gu.all_valves
if valve is not None:
v = v.filter(id=valve.id)
if site is not None:
v = v.filter(controller__site__id=site.id)
self.fields['valves'].queryset = v
class GroupMixin(FormMixin):
model = Group
context_object_name = "group"
def get_queryset(self):
gu = get_profile(self.request)
return super(GroupMixin,self).get_queryset().filter(id__in=gu.groups)
class GroupsView(GroupMixin,SiteParamMixin,ListView):
context_object_name = "group_list"
pass
class GroupView(GroupMixin,DetailView):
pass
class GroupNewView(GroupMixin,SiteParamMixin,CreateView):
form_class = GroupForm
success_url="/group/%(id)s"
def get_form(self, form_class):
form = super(GroupNewView,self).get_form(form_class)
form.limit_choices(**self.aux_data)
return form
class GroupEditView(GroupMixin,SiteParamMixin,UpdateView):
form_class = GroupForm
success_url="/group/%(id)s"
def get_form(self, form_class):
form = super(GroupEditView,self).get_form(form_class)
form.limit_choices(**self.aux_data)
return form
class GroupDeleteView(GroupMixin,DeleteView):
def post(self,*a,**k):
group = self.get_object()
self.success_url="/site/%d" % (group.site.id,)
return super(DeleteView,self).post(*a,**k)
pass
| smurfix/HomEvenT | irrigation/irrigator/views/group.py | Python | gpl-3.0 | 2,500 |
#!/usr/bin/env python
import unittest
from ..TestCase import TestCase
class TestDomainCreate(TestCase):
def test_render_domain_create_request_min(self):
self.assertRequest('''<?xml version="1.0" ?>
<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<create>
<domain:create xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:authInfo>
<domain:pw/>
</domain:authInfo>
</domain:create>
</create>
<clTRID>XXXX-11</clTRID>
</command>
</epp>''', {
'command': 'domain:create',
'name': 'example.com',
'clTRID': 'XXXX-11',
})
def test_render_domain_create_request(self):
self.assertRequest('''<?xml version="1.0" ?>
<epp xmlns="urn:ietf:params:xml:ns:epp-1.0">
<command>
<create>
<domain:create xmlns:domain="urn:ietf:params:xml:ns:domain-1.0">
<domain:name>example.com</domain:name>
<domain:period unit="y">2</domain:period>
<domain:registrant>jd1234</domain:registrant>
<domain:ns>
<domain:hostObj>ns1.example.net</domain:hostObj>
<domain:hostObj>ns2.example.net</domain:hostObj>
</domain:ns>
<domain:contact type="admin">sh8013</domain:contact>
<domain:contact type="tech">sh8014</domain:contact>
<domain:contact type="billing">sh8015</domain:contact>
<domain:authInfo>
<domain:pw>2fooBAR</domain:pw>
</domain:authInfo>
</domain:create>
</create>
<clTRID>XXXX-11</clTRID>
</command>
</epp>''', {
'command': 'domain:create',
'name': 'example.com',
'period': 2,
'registrant': 'jd1234',
'nss': [
'ns1.example.net',
'ns2.example.net'
],
'admin': 'sh8013',
'tech': 'sh8014',
'billing': 'sh8015',
'pw': '2fooBAR',
'clTRID': 'XXXX-11',
})
def test_parse_domain_create_response(self):
self.assertResponse({
'clTRID': 'XXXX-11',
'crDate': '2018-10-04T13:17:43.0Z',
'exDate': '2020-10-04T13:17:43.0Z',
'name': 'silverfire.me',
'result_code': '1000',
'result_lang': 'en-US',
'result_msg': 'Command completed successfully',
'svTRID': 'SRW-425500000011131514'
}, '''<?xml version="1.0" ?>
<epp xmlns="urn:ietf:params:xml:ns:epp-1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="urn:ietf:params:xml:ns:epp-1.0 epp-1.0.xsd">
<response>
<result code="1000">
<msg lang="en-US">Command completed successfully</msg>
</result>
<resData>
<domain:creData xmlns:domain="urn:ietf:params:xml:ns:domain-1.0" xsi:schemaLocation="urn:ietf:params:xml:ns:domain-1.0 domain-1.0.xsd">
<domain:name>silverfire.me</domain:name>
<domain:crDate>2018-10-04T13:17:43.0Z</domain:crDate>
<domain:exDate>2020-10-04T13:17:43.0Z</domain:exDate>
</domain:creData>
</resData>
<trID>
<clTRID>XXXX-11</clTRID>
<svTRID>SRW-425500000011131514</svTRID>
</trID>
</response>
</epp>
''')
if __name__ == '__main__':
unittest.main(verbosity=2)
| hiqdev/reppy | tests/modules/domain/test_domain_create.py | Python | bsd-3-clause | 3,702 |
import random
from datetime import datetime
import demistomock as demisto # noqa: F401
import requests
from CommonServerPython import * # noqa: F401
args = demisto.args()
search = args.get('search', 'nebula')
widget_type = args.get('widgetType')
date_now = datetime.utcnow()
end_year = date_now.year
headers = {
"Accept": "application/json"
}
params = {
"q": search,
"media_type": "image",
"year_start": 1920,
"year_end": end_year
}
res = requests.request(
'GET',
'https://images-api.nasa.gov/search',
params=params,
headers=headers
)
if res.status_code != 200:
demisto.results("Hmmm, I couldn't a photo. Try refreshing?")
else:
json_data = res.json()
items = json_data.get('collection', {}).get('items', [])
random_index = random.randint(0, len(items) - 1)
random_list_entry = items[random_index]
title = random_list_entry.get('data')[0].get('title')
description = random_list_entry.get('data')[0].get('description')
url = random_list_entry.get('href')
res = requests.request(
'GET',
url,
headers=headers
)
if res.status_code != 200:
demisto.results("Hmmm, I couldn't a photo. Try refreshing?")
else:
json_data = res.json()
json_data = [x for x in json_data if "metadata.json" not in x]
if json_data:
random_image_index = random.randint(0, len(json_data) - 1)
image_url = json_data[random_image_index]
md = f"\n[{title}]({image_url}) - {description}"
else:
md = ""
if not widget_type:
command_results = CommandResults(
outputs_prefix="NASA.Image",
outputs_key_field="title",
outputs={
"title": title,
"description": description,
"image": image_url
},
readable_output=md
)
return_results(command_results)
elif widget_type == "text":
demisto.results(md)
elif widget_type == "number":
demisto.results(42)
| demisto/content | Packs/RandomImages_VideosAndAudio/Scripts/RandomPhotoNasa/RandomPhotoNasa.py | Python | mit | 2,155 |
from django.shortcuts import render
from web.models import Candidato, IdeaFuerza, Cita, Documento, Noticia
# Create your views here.
def index(request):
ideasfuerza_m = IdeaFuerza.objects.filter(seccion='m').order_by('orden')
m_columns = 0
if len(ideasfuerza_m) > 0:
m_columns = 12 / len(ideasfuerza_m)
ideasfuerza_p = IdeaFuerza.objects.filter(seccion='p').order_by('orden')
p_columns = 0
if len(ideasfuerza_p) > 0:
p_columns = 12 / len(ideasfuerza_p)
ideasfuerza_a = IdeaFuerza.objects.filter(seccion='a').order_by('orden')
a_columns = 0
if len(ideasfuerza_a) > 0:
a_columns = 12 / len(ideasfuerza_a)
citas = Cita.objects.order_by('orden')
candidatos = Candidato.objects.order_by('orden')
documentos = Documento.objects.order_by('orden')
noticias = Noticia.objects.order_by('-fecha')
return render(request, 'web/index.html', {
'ideasfuerza_m': ideasfuerza_m,
'm_columns': m_columns,
'ideasfuerza_p': ideasfuerza_p,
'p_columns': p_columns,
'ideasfuerza_a': ideasfuerza_a,
'a_columns': a_columns,
'citas': citas,
'candidatos': candidatos,
'documentos': documentos,
'noticias': noticias,
}) | pedroluislopez/ahorapodemosmurciaweb | web/views.py | Python | gpl-3.0 | 1,254 |
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""tests for the astroid inference capabilities
"""
import sys
from functools import partial
import unittest
import six
from astroid import InferenceError, builder, nodes
from astroid.inference import infer_end as inference_infer_end
from astroid.bases import YES, Instance, BoundMethod, UnboundMethod,\
path_wrapper, BUILTINS
from astroid import test_utils
from astroid.tests import resources
def get_node_of_class(start_from, klass):
return next(start_from.nodes_of_class(klass))
builder = builder.AstroidBuilder()
if sys.version_info < (3, 0):
EXC_MODULE = 'exceptions'
else:
EXC_MODULE = BUILTINS
class InferenceUtilsTest(unittest.TestCase):
def test_path_wrapper(self):
def infer_default(self, *args):
raise InferenceError
infer_default = path_wrapper(infer_default)
infer_end = path_wrapper(inference_infer_end)
with self.assertRaises(InferenceError):
next(infer_default(1))
self.assertEqual(next(infer_end(1)), 1)
def _assertInferElts(node_type, self, node, elts):
infered = next(node.infer())
self.assertIsInstance(infered, node_type)
self.assertEqual(sorted(elt.value for elt in infered.elts),
elts)
def partialmethod(func, arg):
"""similar to functools.partial but return a lambda instead of a class so returned value may be
turned into a method.
"""
return lambda *args, **kwargs: func(arg, *args, **kwargs)
class InferenceTest(resources.SysPathSetup, unittest.TestCase):
# additional assertInfer* method for builtin types
def assertInferConst(self, node, expected):
infered = next(node.infer())
self.assertIsInstance(infered, nodes.Const)
self.assertEqual(infered.value, expected)
def assertInferDict(self, node, expected):
infered = next(node.infer())
self.assertIsInstance(infered, nodes.Dict)
elts = set([(key.value, value.value)
for (key, value) in infered.items])
self.assertEqual(sorted(elts), sorted(expected.items()))
assertInferTuple = partialmethod(_assertInferElts, nodes.Tuple)
assertInferList = partialmethod(_assertInferElts, nodes.List)
assertInferSet = partialmethod(_assertInferElts, nodes.Set)
CODE = '''
class C(object):
"new style"
attr = 4
def meth1(self, arg1, optarg=0):
var = object()
print ("yo", arg1, optarg)
self.iattr = "hop"
return var
def meth2(self):
self.meth1(*self.meth3)
def meth3(self, d=attr):
b = self.attr
c = self.iattr
return b, c
ex = Exception("msg")
v = C().meth1(1)
m_unbound = C.meth1
m_bound = C().meth1
a, b, c = ex, 1, "bonjour"
[d, e, f] = [ex, 1.0, ("bonjour", v)]
g, h = f
i, (j, k) = "glup", f
a, b= b, a # Gasp !
'''
ast = test_utils.build_module(CODE, __name__)
def test_module_inference(self):
infered = self.ast.infer()
obj = next(infered)
self.assertEqual(obj.name, __name__)
self.assertEqual(obj.root().name, __name__)
self.assertRaises(StopIteration, partial(next, infered))
def test_class_inference(self):
infered = self.ast['C'].infer()
obj = next(infered)
self.assertEqual(obj.name, 'C')
self.assertEqual(obj.root().name, __name__)
self.assertRaises(StopIteration, partial(next, infered))
def test_function_inference(self):
infered = self.ast['C']['meth1'].infer()
obj = next(infered)
self.assertEqual(obj.name, 'meth1')
self.assertEqual(obj.root().name, __name__)
self.assertRaises(StopIteration, partial(next, infered))
def test_builtin_name_inference(self):
infered = self.ast['C']['meth1']['var'].infer()
var = next(infered)
self.assertEqual(var.name, 'object')
self.assertEqual(var.root().name, BUILTINS)
self.assertRaises(StopIteration, partial(next, infered))
def test_tupleassign_name_inference(self):
infered = self.ast['a'].infer()
exc = next(infered)
self.assertIsInstance(exc, Instance)
self.assertEqual(exc.name, 'Exception')
self.assertEqual(exc.root().name, EXC_MODULE)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['b'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, 1)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['c'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, "bonjour")
self.assertRaises(StopIteration, partial(next, infered))
def test_listassign_name_inference(self):
infered = self.ast['d'].infer()
exc = next(infered)
self.assertIsInstance(exc, Instance)
self.assertEqual(exc.name, 'Exception')
self.assertEqual(exc.root().name, EXC_MODULE)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['e'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, 1.0)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['f'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Tuple)
self.assertRaises(StopIteration, partial(next, infered))
def test_advanced_tupleassign_name_inference1(self):
infered = self.ast['g'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, "bonjour")
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['h'].infer()
var = next(infered)
self.assertEqual(var.name, 'object')
self.assertEqual(var.root().name, BUILTINS)
self.assertRaises(StopIteration, partial(next, infered))
def test_advanced_tupleassign_name_inference2(self):
infered = self.ast['i'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, u"glup")
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['j'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, "bonjour")
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast['k'].infer()
var = next(infered)
self.assertEqual(var.name, 'object')
self.assertEqual(var.root().name, BUILTINS)
self.assertRaises(StopIteration, partial(next, infered))
def test_swap_assign_inference(self):
infered = self.ast.locals['a'][1].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, 1)
self.assertRaises(StopIteration, partial(next, infered))
infered = self.ast.locals['b'][1].infer()
exc = next(infered)
self.assertIsInstance(exc, Instance)
self.assertEqual(exc.name, 'Exception')
self.assertEqual(exc.root().name, EXC_MODULE)
self.assertRaises(StopIteration, partial(next, infered))
def test_getattr_inference1(self):
infered = self.ast['ex'].infer()
exc = next(infered)
self.assertIsInstance(exc, Instance)
self.assertEqual(exc.name, 'Exception')
self.assertEqual(exc.root().name, EXC_MODULE)
self.assertRaises(StopIteration, partial(next, infered))
def test_getattr_inference2(self):
infered = get_node_of_class(self.ast['C']['meth2'], nodes.Getattr).infer()
meth1 = next(infered)
self.assertEqual(meth1.name, 'meth1')
self.assertEqual(meth1.root().name, __name__)
self.assertRaises(StopIteration, partial(next, infered))
def test_getattr_inference3(self):
infered = self.ast['C']['meth3']['b'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, 4)
self.assertRaises(StopIteration, partial(next, infered))
def test_getattr_inference4(self):
infered = self.ast['C']['meth3']['c'].infer()
const = next(infered)
self.assertIsInstance(const, nodes.Const)
self.assertEqual(const.value, "hop")
self.assertRaises(StopIteration, partial(next, infered))
def test_callfunc_inference(self):
infered = self.ast['v'].infer()
meth1 = next(infered)
self.assertIsInstance(meth1, Instance)
self.assertEqual(meth1.name, 'object')
self.assertEqual(meth1.root().name, BUILTINS)
self.assertRaises(StopIteration, partial(next, infered))
def test_unbound_method_inference(self):
infered = self.ast['m_unbound'].infer()
meth1 = next(infered)
self.assertIsInstance(meth1, UnboundMethod)
self.assertEqual(meth1.name, 'meth1')
self.assertEqual(meth1.parent.frame().name, 'C')
self.assertRaises(StopIteration, partial(next, infered))
def test_bound_method_inference(self):
infered = self.ast['m_bound'].infer()
meth1 = next(infered)
self.assertIsInstance(meth1, BoundMethod)
self.assertEqual(meth1.name, 'meth1')
self.assertEqual(meth1.parent.frame().name, 'C')
self.assertRaises(StopIteration, partial(next, infered))
def test_args_default_inference1(self):
optarg = test_utils.get_name_node(self.ast['C']['meth1'], 'optarg')
infered = optarg.infer()
obj1 = next(infered)
self.assertIsInstance(obj1, nodes.Const)
self.assertEqual(obj1.value, 0)
obj1 = next(infered)
self.assertIs(obj1, YES, obj1)
self.assertRaises(StopIteration, partial(next, infered))
def test_args_default_inference2(self):
infered = self.ast['C']['meth3'].ilookup('d')
obj1 = next(infered)
self.assertIsInstance(obj1, nodes.Const)
self.assertEqual(obj1.value, 4)
obj1 = next(infered)
self.assertIs(obj1, YES, obj1)
self.assertRaises(StopIteration, partial(next, infered))
def test_inference_restrictions(self):
infered = test_utils.get_name_node(self.ast['C']['meth1'], 'arg1').infer()
obj1 = next(infered)
self.assertIs(obj1, YES, obj1)
self.assertRaises(StopIteration, partial(next, infered))
def test_ancestors_inference(self):
code = '''
class A(object): #@
pass
class A(A): #@
pass
'''
a1, a2 = test_utils.extract_node(code, __name__)
a2_ancestors = list(a2.ancestors())
self.assertEqual(len(a2_ancestors), 2)
self.assertIs(a2_ancestors[0], a1)
def test_ancestors_inference2(self):
code = '''
class A(object): #@
pass
class B(A): #@
pass
class A(B): #@
pass
'''
a1, b, a2 = test_utils.extract_node(code, __name__)
a2_ancestors = list(a2.ancestors())
self.assertEqual(len(a2_ancestors), 3)
self.assertIs(a2_ancestors[0], b)
self.assertIs(a2_ancestors[1], a1)
def test_f_arg_f(self):
code = '''
def f(f=1):
return f
a = f()
'''
ast = test_utils.build_module(code, __name__)
a = ast['a']
a_infered = a.infered()
self.assertEqual(a_infered[0].value, 1)
self.assertEqual(len(a_infered), 1)
def test_exc_ancestors(self):
code = '''
def f():
raise __(NotImplementedError)
'''
error = test_utils.extract_node(code, __name__)
nie = error.infered()[0]
self.assertIsInstance(nie, nodes.Class)
nie_ancestors = [c.name for c in nie.ancestors()]
if sys.version_info < (3, 0):
self.assertEqual(nie_ancestors, ['RuntimeError', 'StandardError', 'Exception', 'BaseException', 'object'])
else:
self.assertEqual(nie_ancestors, ['RuntimeError', 'Exception', 'BaseException', 'object'])
def test_except_inference(self):
code = '''
try:
print (hop)
except NameError as ex:
ex1 = ex
except Exception as ex:
ex2 = ex
raise
'''
ast = test_utils.build_module(code, __name__)
ex1 = ast['ex1']
ex1_infer = ex1.infer()
ex1 = next(ex1_infer)
self.assertIsInstance(ex1, Instance)
self.assertEqual(ex1.name, 'NameError')
self.assertRaises(StopIteration, partial(next, ex1_infer))
ex2 = ast['ex2']
ex2_infer = ex2.infer()
ex2 = next(ex2_infer)
self.assertIsInstance(ex2, Instance)
self.assertEqual(ex2.name, 'Exception')
self.assertRaises(StopIteration, partial(next, ex2_infer))
def test_del1(self):
code = '''
del undefined_attr
'''
delete = test_utils.extract_node(code, __name__)
self.assertRaises(InferenceError, delete.infer)
def test_del2(self):
code = '''
a = 1
b = a
del a
c = a
a = 2
d = a
'''
ast = test_utils.build_module(code, __name__)
n = ast['b']
n_infer = n.infer()
infered = next(n_infer)
self.assertIsInstance(infered, nodes.Const)
self.assertEqual(infered.value, 1)
self.assertRaises(StopIteration, partial(next, n_infer))
n = ast['c']
n_infer = n.infer()
self.assertRaises(InferenceError, partial(next, n_infer))
n = ast['d']
n_infer = n.infer()
infered = next(n_infer)
self.assertIsInstance(infered, nodes.Const)
self.assertEqual(infered.value, 2)
self.assertRaises(StopIteration, partial(next, n_infer))
def test_builtin_types(self):
code = '''
l = [1]
t = (2,)
d = {}
s = ''
s2 = '_'
'''
ast = test_utils.build_module(code, __name__)
n = ast['l']
infered = next(n.infer())
self.assertIsInstance(infered, nodes.List)
self.assertIsInstance(infered, Instance)
self.assertEqual(infered.getitem(0).value, 1)
self.assertIsInstance(infered._proxied, nodes.Class)
self.assertEqual(infered._proxied.name, 'list')
self.assertIn('append', infered._proxied.locals)
n = ast['t']
infered = next(n.infer())
self.assertIsInstance(infered, nodes.Tuple)
self.assertIsInstance(infered, Instance)
self.assertEqual(infered.getitem(0).value, 2)
self.assertIsInstance(infered._proxied, nodes.Class)
self.assertEqual(infered._proxied.name, 'tuple')
n = ast['d']
infered = next(n.infer())
self.assertIsInstance(infered, nodes.Dict)
self.assertIsInstance(infered, Instance)
self.assertIsInstance(infered._proxied, nodes.Class)
self.assertEqual(infered._proxied.name, 'dict')
self.assertIn('get', infered._proxied.locals)
n = ast['s']
infered = next(n.infer())
self.assertIsInstance(infered, nodes.Const)
self.assertIsInstance(infered, Instance)
self.assertEqual(infered.name, 'str')
self.assertIn('lower', infered._proxied.locals)
n = ast['s2']
infered = next(n.infer())
self.assertEqual(infered.getitem(0).value, '_')
def test_builtin_types(self):
code = 's = {1}'
ast = test_utils.build_module(code, __name__)
n = ast['s']
infered = next(n.infer())
self.assertIsInstance(infered, nodes.Set)
self.assertIsInstance(infered, Instance)
self.assertEqual(infered.name, 'set')
self.assertIn('remove', infered._proxied.locals)
@test_utils.require_version(maxver='3.0')
def test_unicode_type(self):
code = '''u = u""'''
ast = test_utils.build_module(code, __name__)
n = ast['u']
infered = next(n.infer())
self.assertIsInstance(infered, nodes.Const)
self.assertIsInstance(infered, Instance)
self.assertEqual(infered.name, 'unicode')
self.assertIn('lower', infered._proxied.locals)
def test_descriptor_are_callable(self):
code = '''
class A:
statm = staticmethod(open)
clsm = classmethod('whatever')
'''
ast = test_utils.build_module(code, __name__)
statm = next(ast['A'].igetattr('statm'))
self.assertTrue(statm.callable())
clsm = next(ast['A'].igetattr('clsm'))
self.assertTrue(clsm.callable())
def test_bt_ancestor_crash(self):
code = '''
class Warning(Warning):
pass
'''
ast = test_utils.build_module(code, __name__)
w = ast['Warning']
ancestors = w.ancestors()
ancestor = next(ancestors)
self.assertEqual(ancestor.name, 'Warning')
self.assertEqual(ancestor.root().name, EXC_MODULE)
ancestor = next(ancestors)
self.assertEqual(ancestor.name, 'Exception')
self.assertEqual(ancestor.root().name, EXC_MODULE)
ancestor = next(ancestors)
self.assertEqual(ancestor.name, 'BaseException')
self.assertEqual(ancestor.root().name, EXC_MODULE)
ancestor = next(ancestors)
self.assertEqual(ancestor.name, 'object')
self.assertEqual(ancestor.root().name, BUILTINS)
self.assertRaises(StopIteration, partial(next, ancestors))
def test_qqch(self):
code = '''
from astroid.modutils import load_module_from_name
xxx = load_module_from_name('__pkginfo__')
'''
ast = test_utils.build_module(code, __name__)
xxx = ast['xxx']
self.assertSetEqual({n.__class__ for n in xxx.infered()},
{nodes.Const, YES.__class__})
def test_method_argument(self):
code = '''
class ErudiEntitySchema:
"""a entity has a type, a set of subject and or object relations"""
def __init__(self, e_type, **kwargs):
kwargs['e_type'] = e_type.capitalize().encode()
def meth(self, e_type, *args, **kwargs):
kwargs['e_type'] = e_type.capitalize().encode()
print(args)
'''
ast = test_utils.build_module(code, __name__)
arg = test_utils.get_name_node(ast['ErudiEntitySchema']['__init__'], 'e_type')
self.assertEqual([n.__class__ for n in arg.infer()],
[YES.__class__])
arg = test_utils.get_name_node(ast['ErudiEntitySchema']['__init__'], 'kwargs')
self.assertEqual([n.__class__ for n in arg.infer()],
[nodes.Dict])
arg = test_utils.get_name_node(ast['ErudiEntitySchema']['meth'], 'e_type')
self.assertEqual([n.__class__ for n in arg.infer()],
[YES.__class__])
arg = test_utils.get_name_node(ast['ErudiEntitySchema']['meth'], 'args')
self.assertEqual([n.__class__ for n in arg.infer()],
[nodes.Tuple])
arg = test_utils.get_name_node(ast['ErudiEntitySchema']['meth'], 'kwargs')
self.assertEqual([n.__class__ for n in arg.infer()],
[nodes.Dict])
def test_tuple_then_list(self):
code = '''
def test_view(rql, vid, tags=()):
tags = list(tags)
__(tags).append(vid)
'''
name = test_utils.extract_node(code, __name__)
it = name.infer()
tags = next(it)
self.assertIsInstance(tags, nodes.List)
self.assertEqual(tags.elts, [])
with self.assertRaises(StopIteration):
next(it)
def test_mulassign_inference(self):
code = '''
def first_word(line):
"""Return the first word of a line"""
return line.split()[0]
def last_word(line):
"""Return last word of a line"""
return line.split()[-1]
def process_line(word_pos):
"""Silly function: returns (ok, callable) based on argument.
For test purpose only.
"""
if word_pos > 0:
return (True, first_word)
elif word_pos < 0:
return (True, last_word)
else:
return (False, None)
if __name__ == '__main__':
line_number = 0
for a_line in file('test_callable.py'):
tupletest = process_line(line_number)
(ok, fct) = process_line(line_number)
if ok:
fct(a_line)
'''
ast = test_utils.build_module(code, __name__)
self.assertEqual(len(list(ast['process_line'].infer_call_result(
None))), 3)
self.assertEqual(len(list(ast['tupletest'].infer())), 3)
values = ['Function(first_word)', 'Function(last_word)', 'Const(NoneType)']
self.assertEqual([str(infered)
for infered in ast['fct'].infer()], values)
def test_float_complex_ambiguity(self):
code = '''
def no_conjugate_member(magic_flag): #@
"""should not raise E1101 on something.conjugate"""
if magic_flag:
something = 1.0
else:
something = 1.0j
if isinstance(something, float):
return something
return __(something).conjugate()
'''
func, retval = test_utils.extract_node(code, __name__)
self.assertEqual(
[i.value for i in func.ilookup('something')],
[1.0, 1.0j])
self.assertEqual(
[i.value for i in retval.infer()],
[1.0, 1.0j])
def test_lookup_cond_branches(self):
code = '''
def no_conjugate_member(magic_flag):
"""should not raise E1101 on something.conjugate"""
something = 1.0
if magic_flag:
something = 1.0j
return something.conjugate()
'''
ast = test_utils.build_module(code, __name__)
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'something', -1).infer()], [1.0, 1.0j])
def test_simple_subscript(self):
code = '''
a = [1, 2, 3][0]
b = (1, 2, 3)[1]
c = (1, 2, 3)[-1]
d = a + b + c
print (d)
e = {'key': 'value'}
f = e['key']
print (f)
'''
ast = test_utils.build_module(code, __name__)
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'a', -1).infer()], [1])
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'b', -1).infer()], [2])
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'c', -1).infer()], [3])
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'd', -1).infer()], [6])
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'f', -1).infer()], ['value'])
#def test_simple_tuple(self):
#"""test case for a simple tuple value"""
## XXX tuple inference is not implemented ...
#code = """
#a = (1,)
#b = (22,)
#some = a + b
#"""
#ast = builder.string_build(code, __name__, __file__)
#self.assertEqual(ast['some'].infer.next().as_string(), "(1, 22)")
def test_simple_for(self):
code = '''
for a in [1, 2, 3]:
print (a)
for b,c in [(1,2), (3,4)]:
print (b)
print (c)
print ([(d,e) for e,d in ([1,2], [3,4])])
'''
ast = test_utils.build_module(code, __name__)
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'a', -1).infer()], [1, 2, 3])
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'b', -1).infer()], [1, 3])
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'c', -1).infer()], [2, 4])
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'd', -1).infer()], [2, 4])
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'e', -1).infer()], [1, 3])
def test_simple_for_genexpr(self):
code = '''
print ((d,e) for e,d in ([1,2], [3,4]))
'''
ast = test_utils.build_module(code, __name__)
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'd', -1).infer()], [2, 4])
self.assertEqual([i.value for i in
test_utils.get_name_node(ast, 'e', -1).infer()], [1, 3])
def test_builtin_help(self):
code = '''
help()
'''
# XXX failing since __builtin__.help assignment has
# been moved into a function...
node = test_utils.extract_node(code, __name__)
infered = list(node.func.infer())
self.assertEqual(len(infered), 1, infered)
self.assertIsInstance(infered[0], Instance)
self.assertEqual(infered[0].name, "_Helper")
def test_builtin_open(self):
code = '''
open("toto.txt")
'''
node = test_utils.extract_node(code, __name__).func
infered = list(node.infer())
self.assertEqual(len(infered), 1)
if hasattr(sys, 'pypy_version_info'):
self.assertIsInstance(infered[0], nodes.Class)
self.assertEqual(infered[0].name, 'file')
else:
self.assertIsInstance(infered[0], nodes.Function)
self.assertEqual(infered[0].name, 'open')
def test_callfunc_context_func(self):
code = '''
def mirror(arg=None):
return arg
un = mirror(1)
'''
ast = test_utils.build_module(code, __name__)
infered = list(ast.igetattr('un'))
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Const)
self.assertEqual(infered[0].value, 1)
def test_callfunc_context_lambda(self):
code = '''
mirror = lambda x=None: x
un = mirror(1)
'''
ast = test_utils.build_module(code, __name__)
infered = list(ast.igetattr('mirror'))
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Lambda)
infered = list(ast.igetattr('un'))
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Const)
self.assertEqual(infered[0].value, 1)
def test_factory_method(self):
code = '''
class Super(object):
@classmethod
def instance(cls):
return cls()
class Sub(Super):
def method(self):
print ('method called')
sub = Sub.instance()
'''
ast = test_utils.build_module(code, __name__)
infered = list(ast.igetattr('sub'))
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], Instance)
self.assertEqual(infered[0]._proxied.name, 'Sub')
def test_import_as(self):
code = '''
import os.path as osp
print (osp.dirname(__file__))
from os.path import exists as e
assert e(__file__)
from new import code as make_code
print (make_code)
'''
ast = test_utils.build_module(code, __name__)
infered = list(ast.igetattr('osp'))
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Module)
self.assertEqual(infered[0].name, 'os.path')
infered = list(ast.igetattr('e'))
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Function)
self.assertEqual(infered[0].name, 'exists')
if sys.version_info >= (3, 0):
self.skipTest('<new> module has been removed')
infered = list(ast.igetattr('make_code'))
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], Instance)
self.assertEqual(str(infered[0]),
'Instance of %s.type' % BUILTINS)
def _test_const_infered(self, node, value):
infered = list(node.infer())
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Const)
self.assertEqual(infered[0].value, value)
def test_unary_not(self):
for code in ('a = not (1,); b = not ()',
'a = not {1:2}; b = not {}'):
ast = builder.string_build(code, __name__, __file__)
self._test_const_infered(ast['a'], False)
self._test_const_infered(ast['b'], True)
def test_binary_op_int_add(self):
ast = builder.string_build('a = 1 + 2', __name__, __file__)
self._test_const_infered(ast['a'], 3)
def test_binary_op_int_sub(self):
ast = builder.string_build('a = 1 - 2', __name__, __file__)
self._test_const_infered(ast['a'], -1)
def test_binary_op_float_div(self):
ast = builder.string_build('a = 1 / 2.', __name__, __file__)
self._test_const_infered(ast['a'], 1 / 2.)
def test_binary_op_str_mul(self):
ast = builder.string_build('a = "*" * 40', __name__, __file__)
self._test_const_infered(ast['a'], "*" * 40)
def test_binary_op_bitand(self):
ast = builder.string_build('a = 23&20', __name__, __file__)
self._test_const_infered(ast['a'], 23&20)
def test_binary_op_bitor(self):
ast = builder.string_build('a = 23|8', __name__, __file__)
self._test_const_infered(ast['a'], 23|8)
def test_binary_op_bitxor(self):
ast = builder.string_build('a = 23^9', __name__, __file__)
self._test_const_infered(ast['a'], 23^9)
def test_binary_op_shiftright(self):
ast = builder.string_build('a = 23 >>1', __name__, __file__)
self._test_const_infered(ast['a'], 23>>1)
def test_binary_op_shiftleft(self):
ast = builder.string_build('a = 23 <<1', __name__, __file__)
self._test_const_infered(ast['a'], 23<<1)
def test_binary_op_list_mul(self):
for code in ('a = [[]] * 2', 'a = 2 * [[]]'):
ast = builder.string_build(code, __name__, __file__)
infered = list(ast['a'].infer())
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.List)
self.assertEqual(len(infered[0].elts), 2)
self.assertIsInstance(infered[0].elts[0], nodes.List)
self.assertIsInstance(infered[0].elts[1], nodes.List)
def test_binary_op_list_mul_none(self):
'test correct handling on list multiplied by None'
ast = builder.string_build('a = [1] * None\nb = [1] * "r"')
infered = ast['a'].infered()
self.assertEqual(len(infered), 1)
self.assertEqual(infered[0], YES)
infered = ast['b'].infered()
self.assertEqual(len(infered), 1)
self.assertEqual(infered[0], YES)
def test_binary_op_tuple_add(self):
ast = builder.string_build('a = (1,) + (2,)', __name__, __file__)
infered = list(ast['a'].infer())
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Tuple)
self.assertEqual(len(infered[0].elts), 2)
self.assertEqual(infered[0].elts[0].value, 1)
self.assertEqual(infered[0].elts[1].value, 2)
def test_binary_op_custom_class(self):
code = '''
class myarray:
def __init__(self, array):
self.array = array
def __mul__(self, x):
return myarray([2,4,6])
def astype(self):
return "ASTYPE"
def randint(maximum):
if maximum is not None:
return myarray([1,2,3]) * 2
else:
return int(5)
x = randint(1)
'''
ast = test_utils.build_module(code, __name__)
infered = list(ast.igetattr('x'))
self.assertEqual(len(infered), 2)
value = [str(v) for v in infered]
# The __name__ trick here makes it work when invoked directly
# (__name__ == '__main__') and through pytest (__name__ ==
# 'unittest_inference')
self.assertEqual(value, ['Instance of %s.myarray' % __name__,
'Instance of %s.int' % BUILTINS])
def test_nonregr_lambda_arg(self):
code = '''
def f(g = lambda: None):
__(g()).x
'''
callfuncnode = test_utils.extract_node(code)
infered = list(callfuncnode.infer())
self.assertEqual(len(infered), 2, infered)
infered.remove(YES)
self.assertIsInstance(infered[0], nodes.Const)
self.assertIsNone(infered[0].value)
def test_nonregr_getitem_empty_tuple(self):
code = '''
def f(x):
a = ()[x]
'''
ast = test_utils.build_module(code, __name__)
infered = list(ast['f'].ilookup('a'))
self.assertEqual(len(infered), 1)
self.assertEqual(infered[0], YES)
def test_nonregr_instance_attrs(self):
"""non regression for instance_attrs infinite loop : pylint / #4"""
code = """
class Foo(object):
def set_42(self):
self.attr = 42
class Bar(Foo):
def __init__(self):
self.attr = 41
"""
ast = test_utils.build_module(code, __name__)
foo_class = ast['Foo']
bar_class = ast['Bar']
bar_self = ast['Bar']['__init__']['self']
assattr = bar_class.instance_attrs['attr'][0]
self.assertEqual(len(foo_class.instance_attrs['attr']), 1)
self.assertEqual(len(bar_class.instance_attrs['attr']), 1)
self.assertEqual(bar_class.instance_attrs, {'attr': [assattr]})
# call 'instance_attr' via 'Instance.getattr' to trigger the bug:
instance = bar_self.infered()[0]
instance.getattr('attr')
self.assertEqual(len(bar_class.instance_attrs['attr']), 1)
self.assertEqual(len(foo_class.instance_attrs['attr']), 1)
self.assertEqual(bar_class.instance_attrs, {'attr': [assattr]})
def test_python25_generator_exit(self):
sys.stderr = six.StringIO()
data = "b = {}[str(0)+''].a"
ast = builder.string_build(data, __name__, __file__)
list(ast['b'].infer())
output = sys.stderr.getvalue()
# I have no idea how to test for this in another way...
self.assertNotIn("RuntimeError", output, "Exception exceptions.RuntimeError: 'generator ignored GeneratorExit' in <generator object> ignored")
sys.stderr = sys.__stderr__
def test_python25_relative_import(self):
data = "from ...logilab.common import date; print (date)"
# !! FIXME also this relative import would not work 'in real' (no __init__.py in test/)
# the test works since we pretend we have a package by passing the full modname
ast = builder.string_build(data, 'astroid.test.unittest_inference', __file__)
infered = next(test_utils.get_name_node(ast, 'date').infer())
self.assertIsInstance(infered, nodes.Module)
self.assertEqual(infered.name, 'logilab.common.date')
def test_python25_no_relative_import(self):
ast = resources.build_file('data/package/absimport.py')
self.assertTrue(ast.absolute_import_activated(), True)
infered = next(test_utils.get_name_node(ast, 'import_package_subpackage_module').infer())
# failed to import since absolute_import is activated
self.assertIs(infered, YES)
def test_nonregr_absolute_import(self):
ast = resources.build_file('data/absimp/string.py', 'data.absimp.string')
self.assertTrue(ast.absolute_import_activated(), True)
infered = next(test_utils.get_name_node(ast, 'string').infer())
self.assertIsInstance(infered, nodes.Module)
self.assertEqual(infered.name, 'string')
self.assertIn('ascii_letters', infered.locals)
def test_mechanize_open(self):
try:
import mechanize # pylint: disable=unused-variable
except ImportError:
self.skipTest('require mechanize installed')
data = '''
from mechanize import Browser
print(Browser)
b = Browser()
'''
ast = test_utils.build_module(data, __name__)
browser = next(test_utils.get_name_node(ast, 'Browser').infer())
self.assertIsInstance(browser, nodes.Class)
bopen = list(browser.igetattr('open'))
self.skipTest('the commit said: "huum, see that later"')
self.assertEqual(len(bopen), 1)
self.assertIsInstance(bopen[0], nodes.Function)
self.assertTrue(bopen[0].callable())
b = next(test_utils.get_name_node(ast, 'b').infer())
self.assertIsInstance(b, Instance)
bopen = list(b.igetattr('open'))
self.assertEqual(len(bopen), 1)
self.assertIsInstance(bopen[0], BoundMethod)
self.assertTrue(bopen[0].callable())
def test_property(self):
code = '''
from smtplib import SMTP
class SendMailController(object):
@property
def smtp(self):
return SMTP(mailhost, port)
@property
def me(self):
return self
my_smtp = SendMailController().smtp
my_me = SendMailController().me
'''
decorators = set(['%s.property' % BUILTINS])
ast = test_utils.build_module(code, __name__)
self.assertEqual(ast['SendMailController']['smtp'].decoratornames(),
decorators)
propinfered = list(ast.body[2].value.infer())
self.assertEqual(len(propinfered), 1)
propinfered = propinfered[0]
self.assertIsInstance(propinfered, Instance)
self.assertEqual(propinfered.name, 'SMTP')
self.assertEqual(propinfered.root().name, 'smtplib')
self.assertEqual(ast['SendMailController']['me'].decoratornames(),
decorators)
propinfered = list(ast.body[3].value.infer())
self.assertEqual(len(propinfered), 1)
propinfered = propinfered[0]
self.assertIsInstance(propinfered, Instance)
self.assertEqual(propinfered.name, 'SendMailController')
self.assertEqual(propinfered.root().name, __name__)
def test_im_func_unwrap(self):
code = '''
class EnvBasedTC:
def pactions(self):
pass
pactions = EnvBasedTC.pactions.im_func
print (pactions)
class EnvBasedTC2:
pactions = EnvBasedTC.pactions.im_func
print (pactions)
'''
ast = test_utils.build_module(code, __name__)
pactions = test_utils.get_name_node(ast, 'pactions')
infered = list(pactions.infer())
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Function)
pactions = test_utils.get_name_node(ast['EnvBasedTC2'], 'pactions')
infered = list(pactions.infer())
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Function)
def test_augassign(self):
code = '''
a = 1
a += 2
print (a)
'''
ast = test_utils.build_module(code, __name__)
infered = list(test_utils.get_name_node(ast, 'a').infer())
self.assertEqual(len(infered), 1)
self.assertIsInstance(infered[0], nodes.Const)
self.assertEqual(infered[0].value, 3)
def test_nonregr_func_arg(self):
code = '''
def foo(self, bar):
def baz():
pass
def qux():
return baz
spam = bar(None, qux)
print (spam)
'''
ast = test_utils.build_module(code, __name__)
infered = list(test_utils.get_name_node(ast['foo'], 'spam').infer())
self.assertEqual(len(infered), 1)
self.assertIs(infered[0], YES)
def test_nonregr_func_global(self):
code = '''
active_application = None
def get_active_application():
global active_application
return active_application
class Application(object):
def __init__(self):
global active_application
active_application = self
class DataManager(object):
def __init__(self, app=None):
self.app = get_active_application()
def test(self):
p = self.app
print (p)
'''
ast = test_utils.build_module(code, __name__)
infered = list(Instance(ast['DataManager']).igetattr('app'))
self.assertEqual(len(infered), 2, infered) # None / Instance(Application)
infered = list(test_utils.get_name_node(ast['DataManager']['test'], 'p').infer())
self.assertEqual(len(infered), 2, infered)
for node in infered:
if isinstance(node, Instance) and node.name == 'Application':
break
else:
self.fail('expected to find an instance of Application in %s' % infered)
def test_list_inference(self):
"""#20464"""
code = '''
import optparse
A = []
B = []
def test():
xyz = [
"foobar=%s" % options.ca,
] + A + B
if options.bind is not None:
xyz.append("bind=%s" % options.bind)
return xyz
def main():
global options
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
Z = test()
'''
ast = test_utils.build_module(code, __name__)
infered = list(ast['Z'].infer())
self.assertEqual(len(infered), 1, infered)
self.assertIsInstance(infered[0], Instance)
self.assertIsInstance(infered[0]._proxied, nodes.Class)
self.assertEqual(infered[0]._proxied.name, 'list')
def test__new__(self):
code = '''
class NewTest(object):
"doc"
def __new__(cls, arg):
self = object.__new__(cls)
self.arg = arg
return self
n = NewTest()
'''
ast = test_utils.build_module(code, __name__)
self.assertRaises(InferenceError, list, ast['NewTest'].igetattr('arg'))
n = next(ast['n'].infer())
infered = list(n.igetattr('arg'))
self.assertEqual(len(infered), 1, infered)
def test_two_parents_from_same_module(self):
code = '''
from data import nonregr
class Xxx(nonregr.Aaa, nonregr.Ccc):
"doc"
'''
ast = test_utils.build_module(code, __name__)
parents = list(ast['Xxx'].ancestors())
self.assertEqual(len(parents), 3, parents) # Aaa, Ccc, object
def test_pluggable_inference(self):
code = '''
from collections import namedtuple
A = namedtuple('A', ['a', 'b'])
B = namedtuple('B', 'a b')
'''
ast = test_utils.build_module(code, __name__)
aclass = ast['A'].infered()[0]
self.assertIsInstance(aclass, nodes.Class)
self.assertIn('a', aclass.instance_attrs)
self.assertIn('b', aclass.instance_attrs)
bclass = ast['B'].infered()[0]
self.assertIsInstance(bclass, nodes.Class)
self.assertIn('a', bclass.instance_attrs)
self.assertIn('b', bclass.instance_attrs)
def test_infer_arguments(self):
code = '''
class A(object):
def first(self, arg1, arg2):
return arg1
@classmethod
def method(cls, arg1, arg2):
return arg2
@classmethod
def empty(cls):
return 2
@staticmethod
def static(arg1, arg2):
return arg1
def empty_method(self):
return []
x = A().first(1, [])
y = A.method(1, [])
z = A.static(1, [])
empty = A.empty()
empty_list = A().empty_method()
'''
ast = test_utils.build_module(code, __name__)
int_node = ast['x'].infered()[0]
self.assertIsInstance(int_node, nodes.Const)
self.assertEqual(int_node.value, 1)
list_node = ast['y'].infered()[0]
self.assertIsInstance(list_node, nodes.List)
int_node = ast['z'].infered()[0]
self.assertIsInstance(int_node, nodes.Const)
self.assertEqual(int_node.value, 1)
empty = ast['empty'].infered()[0]
self.assertIsInstance(empty, nodes.Const)
self.assertEqual(empty.value, 2)
empty_list = ast['empty_list'].infered()[0]
self.assertIsInstance(empty_list, nodes.List)
def test_infer_variable_arguments(self):
code = '''
def test(*args, **kwargs):
vararg = args
kwarg = kwargs
'''
ast = test_utils.build_module(code, __name__)
func = ast['test']
vararg = func.body[0].value
kwarg = func.body[1].value
kwarg_infered = kwarg.infered()[0]
self.assertIsInstance(kwarg_infered, nodes.Dict)
self.assertIs(kwarg_infered.parent, func.args)
vararg_infered = vararg.infered()[0]
self.assertIsInstance(vararg_infered, nodes.Tuple)
self.assertIs(vararg_infered.parent, func.args)
def test_infer_nested(self):
code = """
def nested():
from threading import Thread
class NestedThread(Thread):
def __init__(self):
Thread.__init__(self)
"""
# Test that inferring Thread.__init__ looks up in
# the nested scope.
ast = test_utils.build_module(code, __name__)
callfunc = next(ast.nodes_of_class(nodes.CallFunc))
func = callfunc.func
infered = func.infered()[0]
self.assertIsInstance(infered, UnboundMethod)
def test_instance_binary_operations(self):
code = """
class A(object):
def __mul__(self, other):
return 42
a = A()
b = A()
sub = a - b
mul = a * b
"""
ast = test_utils.build_module(code, __name__)
sub = ast['sub'].infered()[0]
mul = ast['mul'].infered()[0]
self.assertIs(sub, YES)
self.assertIsInstance(mul, nodes.Const)
self.assertEqual(mul.value, 42)
def test_instance_binary_operations_parent(self):
code = """
class A(object):
def __mul__(self, other):
return 42
class B(A):
pass
a = B()
b = B()
sub = a - b
mul = a * b
"""
ast = test_utils.build_module(code, __name__)
sub = ast['sub'].infered()[0]
mul = ast['mul'].infered()[0]
self.assertIs(sub, YES)
self.assertIsInstance(mul, nodes.Const)
self.assertEqual(mul.value, 42)
def test_instance_binary_operations_multiple_methods(self):
code = """
class A(object):
def __mul__(self, other):
return 42
class B(A):
def __mul__(self, other):
return [42]
a = B()
b = B()
sub = a - b
mul = a * b
"""
ast = test_utils.build_module(code, __name__)
sub = ast['sub'].infered()[0]
mul = ast['mul'].infered()[0]
self.assertIs(sub, YES)
self.assertIsInstance(mul, nodes.List)
self.assertIsInstance(mul.elts[0], nodes.Const)
self.assertEqual(mul.elts[0].value, 42)
def test_infer_call_result_crash(self):
code = """
class A(object):
def __mul__(self, other):
return type.__new__()
a = A()
b = A()
c = a * b
"""
ast = test_utils.build_module(code, __name__)
node = ast['c']
self.assertEqual(node.infered(), [YES])
def test_infer_empty_nodes(self):
# Should not crash when trying to infer EmptyNodes.
node = nodes.EmptyNode()
self.assertEqual(node.infered(), [YES])
def test_infinite_loop_for_decorators(self):
# Issue https://bitbucket.org/logilab/astroid/issue/50
# A decorator that returns itself leads to an infinite loop.
code = """
def decorator():
def wrapper():
return decorator()
return wrapper
@decorator()
def do_a_thing():
pass
"""
ast = test_utils.build_module(code, __name__)
node = ast['do_a_thing']
self.assertEqual(node.type, 'function')
def test_no_infinite_ancestor_loop(self):
klass = test_utils.extract_node("""
import datetime
def method(self):
datetime.datetime = something()
class something(datetime.datetime): #@
pass
""")
self.assertIn(
'object',
[base.name for base in klass.ancestors()])
def test_stop_iteration_leak(self):
code = """
class Test:
def __init__(self):
self.config = {0: self.config[0]}
self.config[0].test() #@
"""
ast = test_utils.extract_node(code, __name__)
expr = ast.func.expr
self.assertRaises(InferenceError, next, expr.infer())
def test_tuple_builtin_inference(self):
code = """
var = (1, 2)
tuple() #@
tuple([1]) #@
tuple({2}) #@
tuple("abc") #@
tuple({1: 2}) #@
tuple(var) #@
tuple(tuple([1])) #@
tuple(None) #@
tuple(1) #@
tuple(1, 2) #@
"""
ast = test_utils.extract_node(code, __name__)
self.assertInferTuple(ast[0], [])
self.assertInferTuple(ast[1], [1])
self.assertInferTuple(ast[2], [2])
self.assertInferTuple(ast[3], ["a", "b", "c"])
self.assertInferTuple(ast[4], [1])
self.assertInferTuple(ast[5], [1, 2])
self.assertInferTuple(ast[6], [1])
for node in ast[7:]:
infered = next(node.infer())
self.assertIsInstance(infered, Instance)
self.assertEqual(infered.qname(), "{}.tuple".format(BUILTINS))
def test_set_builtin_inference(self):
code = """
var = (1, 2)
set() #@
set([1, 2, 1]) #@
set({2, 3, 1}) #@
set("abcab") #@
set({1: 2}) #@
set(var) #@
set(tuple([1])) #@
set(set(tuple([4, 5, set([2])]))) #@
set(None) #@
set(1) #@
set(1, 2) #@
"""
ast = test_utils.extract_node(code, __name__)
self.assertInferSet(ast[0], [])
self.assertInferSet(ast[1], [1, 2])
self.assertInferSet(ast[2], [1, 2, 3])
self.assertInferSet(ast[3], ["a", "b", "c"])
self.assertInferSet(ast[4], [1])
self.assertInferSet(ast[5], [1, 2])
self.assertInferSet(ast[6], [1])
for node in ast[7:]:
infered = next(node.infer())
self.assertIsInstance(infered, Instance)
self.assertEqual(infered.qname(), "{}.set".format(BUILTINS))
def test_list_builtin_inference(self):
code = """
var = (1, 2)
list() #@
list([1, 2, 1]) #@
list({2, 3, 1}) #@
list("abcab") #@
list({1: 2}) #@
list(var) #@
list(tuple([1])) #@
list(list(tuple([4, 5, list([2])]))) #@
list(None) #@
list(1) #@
list(1, 2) #@
"""
ast = test_utils.extract_node(code, __name__)
self.assertInferList(ast[0], [])
self.assertInferList(ast[1], [1, 1, 2])
self.assertInferList(ast[2], [1, 2, 3])
self.assertInferList(ast[3], ["a", "a", "b", "b", "c"])
self.assertInferList(ast[4], [1])
self.assertInferList(ast[5], [1, 2])
self.assertInferList(ast[6], [1])
for node in ast[7:]:
infered = next(node.infer())
self.assertIsInstance(infered, Instance)
self.assertEqual(infered.qname(), "{}.list".format(BUILTINS))
@test_utils.require_version('3.0')
def test_builtin_inference_py3k(self):
code = """
list(b"abc") #@
tuple(b"abc") #@
set(b"abc") #@
"""
ast = test_utils.extract_node(code, __name__)
self.assertInferList(ast[0], [97, 98, 99])
self.assertInferTuple(ast[1], [97, 98, 99])
self.assertInferSet(ast[2], [97, 98, 99])
def test_dict_inference(self):
code = """
dict() #@
dict(a=1, b=2, c=3) #@
dict([(1, 2), (2, 3)]) #@
dict([[1, 2], [2, 3]]) #@
dict([(1, 2), [2, 3]]) #@
dict([('a', 2)], b=2, c=3) #@
dict({1: 2}) #@
dict({'c': 2}, a=4, b=5) #@
def func():
return dict(a=1, b=2)
func() #@
var = {'x': 2, 'y': 3}
dict(var, a=1, b=2) #@
dict([1, 2, 3]) #@
dict([(1, 2), (1, 2, 3)]) #@
dict({1: 2}, {1: 2}) #@
dict({1: 2}, (1, 2)) #@
dict({1: 2}, (1, 2), a=4) #@
dict([(1, 2), ([4, 5], 2)]) #@
dict([None, None]) #@
def using_unknown_kwargs(**kwargs):
return dict(**kwargs)
using_unknown_kwargs(a=1, b=2) #@
"""
ast = test_utils.extract_node(code, __name__)
self.assertInferDict(ast[0], {})
self.assertInferDict(ast[1], {'a': 1, 'b': 2, 'c': 3})
for i in range(2, 5):
self.assertInferDict(ast[i], {1: 2, 2: 3})
self.assertInferDict(ast[5], {'a': 2, 'b': 2, 'c': 3})
self.assertInferDict(ast[6], {1: 2})
self.assertInferDict(ast[7], {'c': 2, 'a': 4, 'b': 5})
self.assertInferDict(ast[8], {'a': 1, 'b': 2})
self.assertInferDict(ast[9], {'x': 2, 'y': 3, 'a': 1, 'b': 2})
for node in ast[10:]:
infered = next(node.infer())
self.assertIsInstance(infered, Instance)
self.assertEqual(infered.qname(), "{}.dict".format(BUILTINS))
def test_str_methods(self):
code = """
' '.decode() #@
' '.encode() #@
' '.join('abcd') #@
' '.replace('a', 'b') #@
' '.format('a') #@
' '.capitalize() #@
' '.title() #@
' '.lower() #@
' '.upper() #@
' '.swapcase() #@
' '.strip() #@
' '.rstrip() #@
' '.lstrip() #@
' '.rjust() #@
' '.ljust() #@
' '.center() #@
' '.index() #@
' '.find() #@
' '.count() #@
"""
ast = test_utils.extract_node(code, __name__)
self.assertInferConst(ast[0], u'')
for i in range(1, 16):
self.assertInferConst(ast[i], '')
for i in range(16, 19):
self.assertInferConst(ast[i], 0)
def test_unicode_methods(self):
code = """
u' '.encode() #@
u' '.decode() #@
u' '.join('abcd') #@
u' '.replace('a', 'b') #@
u' '.format('a') #@
u' '.capitalize() #@
u' '.title() #@
u' '.lower() #@
u' '.upper() #@
u' '.swapcase() #@
u' '.strip() #@
u' '.rstrip() #@
u' '.lstrip() #@
u' '.rjust() #@
u' '.ljust() #@
u' '.center() #@
u' '.index() #@
u' '.find() #@
u' '.count() #@
"""
ast = test_utils.extract_node(code, __name__)
self.assertInferConst(ast[0], '')
for i in range(1, 16):
self.assertInferConst(ast[i], u'')
for i in range(16, 19):
self.assertInferConst(ast[i], 0)
def test_scope_lookup_same_attributes(self):
code = '''
import collections
class Second(collections.Counter):
def collections(self):
return "second"
'''
ast = test_utils.build_module(code, __name__)
bases = ast['Second'].bases[0]
inferred = next(bases.infer())
self.assertTrue(inferred)
self.assertIsInstance(inferred, nodes.Class)
self.assertEqual(inferred.qname(), 'collections.Counter')
if __name__ == '__main__':
unittest.main()
| GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/astroid/tests/unittest_inference.py | Python | agpl-3.0 | 59,256 |
"""
The MIT License (MIT)
Copyright (c) Datos IO, Inc. 2015.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import argparse
import textwrap
import traceback
from common.common import report, capture_exception_and_abort
from common.geppetto import Geppetto
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--test_file', required=True, help="Test file.")
parser.add_argument('-c', '--config', required=True, help="Configuration file.")
parser.add_argument('-e', '--email', help="Email to send results to.")
return parser.parse_args()
def do_welcome():
title = """
_____ _ _
/ ____| | | | |
| | __ ___ _ __ _ __ ___| |_| |_ ____
| | |_ |/ _ \ '_ \| '_ \ / _ \ __| __/ _ |
| |__| | __/ |_) | |_) | __/ |_| || (_) |
\_____|\___| .__/| .__/ \___|\__|\__\___/
| | | |
|_| |_| The Cloud Maestro
"""
license = """THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
license = '%s\n%s\n%s' % ('*' * 70, textwrap.fill(license, 70), '*' * 70,)
usage = """ """
print(title)
print(license)
print(usage)
def main():
args = parse_args()
# Import the test file.
try:
test_file_name = args.test_file
test_file = test_file_name[:-3].replace('/', '.')
mod = __import__(test_file, fromlist=['TestRun'])
TestRun = getattr(mod, 'TestRun')
except:
report('Unable to load TestRun() from file: %s' % args.test_file, 'critical', no_date=True)
print(traceback.print_exc())
sys.exit(1)
# Import the config file.
try:
config_file_name = args.config
config_file = config_file_name[:-3].replace('/', '.')
mod = __import__(config_file, fromlist=['CONFIG_DICT'])
config_dict = getattr(mod, 'CONFIG_DICT')
except:
report("Unable to import the config file: %s" % args.config, 'critical', no_date=True)
print(traceback.print_exc())
sys.exit(1)
do_welcome()
class GeppettoExecutableTest(TestRun):
def __init__(self):
Geppetto.__init__(self)
TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name)
@capture_exception_and_abort
def run(self):
TestRun.run(self)
g = GeppettoExecutableTest()
g.run()
if __name__ == '__main__':
main()
| datosio/geppetto | run.py | Python | mit | 3,889 |
# -*- coding: utf-8 -*-
import warnings
import numpy as np
import pandas as pd
from pandas.core.api import Series, DataFrame, MultiIndex
import pandas.util.testing as tm
import pytest
class TestIndexingSlow(object):
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_multiindex_get_loc(self): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
from numpy.random import randint, choice, randn
cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
def validate(mi, df, key):
mask = np.ones(len(df)).astype('bool')
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[:i + 1] not in mi.index
continue
assert key[:i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
right.drop(cols[:i + 1], axis=1, inplace=True)
right.set_index(cols[i + 1:-1], inplace=True)
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
else: # full key
right.set_index(cols[:-1], inplace=True)
if len(right) == 1: # single hit
right = Series(right['jolia'].values,
name=right.index[0],
index=['jolia'])
tm.assert_series_equal(mi.loc[key[:i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[:i + 1]], right)
def loop(mi, df, keys):
for key in keys:
validate(mi, df, key)
n, m = 1000, 50
vals = [randint(0, 10, n), choice(
list('abcdefghij'), n), choice(
pd.date_range('20141009', periods=10).tolist(), n), choice(
list('ZYXWVUTSRQ'), n), randn(n)]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [randint(0, 11, m), choice(
list('abcdefghijk'), m), choice(
pd.date_range('20141009', periods=11).tolist(), m), choice(
list('ZYXWVUTSRQP'), m)]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[::n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
for frame in a, b:
for i in range(5): # lexsort depth
df = frame.copy() if i == 0 else frame.sort_values(
by=cols[:i])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
@pytest.mark.slow
def test_large_dataframe_indexing(self):
# GH10692
result = DataFrame({'x': range(10 ** 6)}, dtype='int64')
result.loc[len(result)] = len(result) + 1
expected = DataFrame({'x': range(10 ** 6 + 1)}, dtype='int64')
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_large_mi_dataframe_indexing(self):
# GH10645
result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
assert (not (10 ** 6, 0) in result)
| cython-testbed/pandas | pandas/tests/indexing/test_indexing_slow.py | Python | bsd-3-clause | 3,774 |
import unittest, time, sys, random
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_glm, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e
ITERATIONS = 20
DELETE_ON_DONE = 1
DO_EXEC = True
DO_UNCOMPRESSED = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2, java_heap_GB=3)
else:
# all hdfs info is done thru the hdfs_config michal's ec2 config sets up?
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_manyfiles_1(self):
h2o.beta_features = True
# these will be used as directory imports/parse
csvDirname = "manyfiles-nflx-gz"
timeoutSecs = 600
trial = 0
for iteration in range(ITERATIONS):
if DO_UNCOMPRESSED:
csvFilename = "a_1.dat"
else:
csvFilename = "file_1.dat.gz"
csvPathname = csvDirname + "/" + csvFilename
trialStart = time.time()
# import*****************************************
hex_key = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
# the import has to overwrite existing keys. no parse
h2i.import_only(bucket='home-0xdiag-datasets', path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=120, doSummary=False)
elapsed = time.time() - start
print "import", trial, "end ", 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
# STOREVIEW***************************************
print "\nTrying StoreView after the import"
for node in h2o.nodes:
h2o_cmd.runStoreView(node=node, timeoutSecs=30, view=10000)
# exec does read lock on all existing keys
if DO_EXEC:
# fails
execExpr="A.hex=c(0,1)"
# execExpr="A.hex=0;"
h2e.exec_expr(execExpr=execExpr, timeoutSecs=20)
h2o_cmd.runInspect(key='A.hex')
print "\nTrying StoreView after the exec "
h2o_cmd.runStoreView(timeoutSecs=30, view=10000)
# for node in h2o.nodes:
# h2o_cmd.runStoreView(node=node, timeoutSecs=30, view=10000)
print "Trial #", trial, "completed in", time.time() - trialStart, "seconds."
trial += 1
if __name__ == '__main__':
h2o.unit_main()
| janezhango/BigDataMachineLearning | py/testdir_single_jvm/test_import_only_loop.py | Python | apache-2.0 | 2,844 |
# coding: utf-8
import pickle
from universal import *
from SequenceModel import USE_BASELINE
MIN_INFO = 0.25
TARGT_IC = 0.50 # mean column IC after normalizing a PWM
P_FACTOR = 1E+6 # just a very large number, or mu = -13.8
MAX_ITER = 1000
CI_LEVEL = 0.95
FRAC1 = (1, 0, 1), (1, 0, None)
FRAC2 = (1, 0, 1), (1, 1, None)
def Weights(e_facs, p_fac, n_fac, u=(1, 1, 1), d=(1, 1, 1)):
nu = np.array(u)
nd = np.array(d)
for i, x in enumerate(nd):
if x is None:
nd[i] = len(e_facs)
den = [nu.dot([e_fac, p_fac, n_fac]) for e_fac in e_facs]
num = nd.dot([sum(e_facs), p_fac, n_fac])
return np.divide(den, num)
class ModelBase(object):
def SetData(self, seqs):
self.uniqs = list(set(seqs))
def EFactors(self):
return dict(self.Prediction())
def PFactor(self):
return P_FACTOR
def NFactor(self):
return 0.0
def Prediction(self):
for seq in self.uniqs:
yield seq, 1.0
def CalWeights(self, u=(1, 1, 1), d=(1, 1, 1)):
pred = self.EFactors()
p_fac = self.PFactor()
n_fac = self.NFactor()
def Func(seqs):
e_facs = [pred[seq] for seq in seqs]
return Weights(e_facs, p_fac, n_fac, u, d)
return Func
def Consensus(self):
self.consensus = 'T0'
return self.consensus
def Init(self, _PFM, extra=()):
pwm = PFM.Shrink(_PFM, self.consensus)
pwm.extend(extra)
pwm.append(-np.log(P_FACTOR))
pwm.append(0.0) # A = exp(0) = 1
if USE_BASELINE:
pwm.append(-np.log(P_FACTOR))
return np.array(pwm)
class Model(ModelBase):
def __init__(self, model_file, Encode, pwm=None):
"""
:param model_file: str
:param Encode: Encoder.Encode
:param pwm: array_like | None
:return:
"""
self.model = pickle.load(open(model_file))
""":type: BaseModel"""
self.Encode = Encode
self.pwm = self.model.R.xf if pwm is None else np.array(pwm, float)
def ReadData(self, path, bias=1.0):
"""
:param path: str
:param bias: float | array_like
:return:
"""
self.data, self.targ = self.model.read_data(open(path), targets=True)
self.closure = self.model.closure(
self.data, self.targ, instance_weight=bias
)
def SetData(self, seqs):
"""
:param seqs: iterable
:return:
"""
uniqs = list(set(seqs))
codes = np.mat(map(self.Encode, uniqs)).T
self.data = uniqs, codes
def Reparameterize(self, pwm=None):
if pwm is None:
pwm = self.pwm
return self.model.prior.reparameterize(pwm)
def Error(self, pwm=None):
if pwm is None:
pwm = self.pwm
return self.closure.error(pwm)
def Hessian(self, pwm=None, h=1e-6):
if pwm is None:
pwm = self.pwm
return self.closure.hessian(pwm, h)
def InvCov(self, pwm=None, h=1e-6):
H = self.Hessian(pwm, h)
m = len(self.targ.A.T) - len(self.model.model)
sigma2 = self.Error(pwm) / m # unbiased estimate of sigma^2
return H / sigma2
def Gradient(self, pwm=None):
if pwm is None:
pwm = self.pwm
return self.closure.gradient(pwm)
def Prediction(self, cutoff=0.0):
uniqs, codes = self.data
pc = self.model.PC(self.model.model, self.pwm, self.model.prior)
counts, = pc(codes).A # pc(codes) is 1-by-n matrix
return itt.izip(uniqs, np.maximum(counts, cutoff))
def OnlyPWM(self):
return self.Reparameterize()[:self.model.I]
def EFactors(self):
"""
:return: dict
"""
pwm = self.OnlyPWM()
uniqs, codes = self.data # codes is np.matrix
energies = codes.T.A.dot(pwm) # pwm is negative of actual PWM
return dict(itt.izip(uniqs, np.exp(energies)))
def PFactor(self):
potential = self.Reparameterize()[self.model.I] # chem potential
return np.exp(-potential)
def NFactor(self): # non-specific binding
return self.Reparameterize()[self.model.I + 2] if USE_BASELINE else 0.0
class Model2(Model):
def __init__(self, model_file, Encode, pwm=None):
super(Model2, self).__init__(model_file, Encode, pwm)
self.Unravel()
def Unravel(self):
if USE_BASELINE:
U, A, b = self.Reparameterize()[self.model.I:] # A > 0, b > 0
self.p_factor = np.exp(-U) * A / (A + b)
self.n_factor = np.exp(-U) - self.p_factor
else:
U = self.Reparameterize()[self.model.I]
self.p_factor = np.exp(-U)
self.n_factor = 0.0
def PFactor(self):
return self.p_factor
def NFactor(self): # non-specific binding
return self.n_factor
class PPMModel(ModelBase):
def __init__(self, _PPM, width, pseudo, labels=BASES, extra=()):
self.pred, self._PPM = PFM.Predictor(_PPM, width, labels, pseudo)
self.consensus = PFM.Consensus(self._PPM, labels)
self.x0 = self.Init(self._PPM, extra)
def Consensus(self):
return self.consensus
def Prediction(self):
for seq in self.uniqs:
yield seq, self.pred[seq]
def MakeLib(path, width, templ, both=True):
lib = ContigLibrary(templ.format).ReadCount(path)
lib.Contig(width, both)
return lib
def ContigCnt(lib, model, start=0, stop=None, u=None, d=None):
model.SetData(contig[start:stop] for contig in lib.Iter())
CalWeights = model.CalWeights(u, d)
for rec in lib.itervalues():
rec.SetWeights(CalWeights([seq[start:stop] for seq in rec.contigs]))
return lib.New()
class PFMSelector(object):
def __init__(self, PPMs, core_len=5):
self.PPMs = sorted(PPMs, key=len)
self.core_len = min(len(self.PPMs[0]), core_len)
def get_base_PFM(self, pseudo=0.0):
return max(
self.PPMs, key=lambda x: self.core_MCIC(x, self.core_len, pseudo)
)
def select(self, limit, min_info=MIN_INFO):
out_PFM = base_PFM = self.get_base_PFM()
for PPM in self.PPMs:
if len(PPM) > len(base_PFM):
cond1 = self.is_aligned(base_PFM, PPM, self.core_len, limit)
cond2 = self.is_aligned(
base_PFM, self.rc_PFM(PPM), self.core_len, limit
)
if cond1 or cond2:
ents = PFM.Entropy(PPM)
if 2.0 - max(ents[0], ents[-1]) > min_info:
out_PFM = PPM
return out_PFM
@classmethod
def core_MCIC(cls, _PFM, width, pseudo=0.0):
return 2.0 - np.mean(PFM.Entropy(PFM.Slice(_PFM, width, pseudo)))
@classmethod
def is_aligned(cls, xPFM, yPFM, width, limit):
r2s = []
for xPWM in Subset(PFM.PFM_PWM(xPFM), width):
for yPWM in Subset(PFM.PFM_PWM(yPFM), width):
r2s.append(cls.mean_col_r2(xPWM, yPWM))
return max(r2s) > limit
@classmethod
def mean_col_r2(cls, xPFM, yPFM):
assert len(xPFM) == len(yPFM)
r2s = [stats.pearsonr(x, y)[0] ** 2 for x, y in zip(xPFM, yPFM)]
return np.mean(r2s)
@classmethod
def rc_PFM(cls, _PFM):
return np.fliplr(_PFM[::-1])
| sx-ruan/BEESEM | tools.py | Python | gpl-3.0 | 7,388 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'zhwei'
from django.db import models
from django.db.models.signals import post_save
from django.core.urlresolvers import reverse
from ..utils.models import TimeStampedModel
from ..utils.choices import DOCTOR_CHOICES, get_full_desc
from ..accounts.models import User
from ..patient.models import SEX_CHOICES, Patient
class Hospital(models.Model):
user = models.OneToOneField(User, verbose_name='医院名称')
local = models.CharField(verbose_name='所在地区', max_length=20)
is_community = models.BooleanField(verbose_name='是否治疗中心')
level = models.CharField(verbose_name='医院等级', max_length=20)
class Meta:
verbose_name = '医生'
def __unicode__(self):
return self.user.username
class Doctor(models.Model):
user = models.OneToOneField(User, verbose_name='医生姓名')
hospital = models.ForeignKey(Hospital, verbose_name='所在医院', blank=True, null=True)
sex = models.CharField(verbose_name='性别', default='2', choices=SEX_CHOICES, max_length=1)
age = models.CharField(verbose_name='年龄', max_length=2, blank=True, null=True)
job_title = models.CharField(verbose_name='职称', max_length=50, blank=True, null=True,
choices=DOCTOR_CHOICES)
class Meta:
verbose_name = '医生'
def get_sex(self):
if self.sex == '1': return '男'
elif self.sex == '0': return '女'
else: return '隐私'
def get_patients(self):
""" 通过就医记录获取患者列表
"""
record_list = self.doctorrecord_set.filter(end_date=None, doctor=self)
for r in record_list:
yield r.patient
def get_job_title(self):
""" 获取医生职称
"""
return get_full_desc(self.job_title, choices=DOCTOR_CHOICES)
def __unicode__(self):
return self.user.username
class Record(TimeStampedModel):
""" 患者的病情记录
More: 此处可用 **unique_for_??** 来限制病例的创建频率
"""
patient = models.ForeignKey(Patient, verbose_name='病人', blank=True)
doctor = models.ForeignKey(Doctor, verbose_name='检查医师')
type = models.CharField(verbose_name='记录类型', max_length=50,
choices=(("description","病情描述"),("result","检查结果"),),
help_text='病情描述、检查结果数据')
physical_state = models.CharField(verbose_name='最近身体状况', max_length=50,
choices=(
("badly","非常不好"),
("worse","不太好"),
("stable","稳定"),
("better","有改善"),
("nice","改善明显"),
))
mental_state = models.CharField(verbose_name='最近精神状态', max_length=50,
choices=(
("zshbg","总是很悲观"),
("jchbg","经常很悲观"),
("oeng","偶尔难过"),
("ddlg","大多数时间乐观"),
("zslg","总是很乐观"),
))
living_state = models.CharField(verbose_name='目前生活状况', max_length=50,
choices=(
("zcgz","基本正常工作"),
("gzcl","工作很吃力"),
("sbzl","不工作生活自理"),
("shbnzl","生活不能自理"),
))
present_symptoms = models.CharField(verbose_name='目前症状及体征', max_length=50,
choices=(
("xmqd","胸闷气短"),
("xt","胸痛"),
("kx","咳血"),
("pwjghxkn","平卧即感呼吸困难"),
("zg","紫绀"),
("xj","心悸"),
("jrtt","肌肉疼痛"),
("xzhfbfz","下肢或腹部浮肿"),
("exot","恶心呕吐"),
("other","其他症状(请描述)"),
),)
present_heart_func = models.CharField(verbose_name='目前心功能', max_length=20,
choices=(
("1","I级"),
("2","II级"),
("3","III级"),
("4","IV级"),
),)
check_item = models.CharField(verbose_name='检查项目', max_length=20,
choices=(
("ggn","肝功能"),
("sgn","肾功能"),
("xgn","心功能"),
("cc","彩超"),
("other","其他(请列明)"),
))
pap = models.CharField(verbose_name='肺动脉压力', max_length=10,
help_text='单位:mmHg')
heart_fail_value = models.CharField(verbose_name='心衰值', max_length=10)
walk_distance_in6 = models.PositiveIntegerField(verbose_name='6分钟步行距离',
help_text='单位:(米)')
bp = models.CharField(verbose_name='血压', max_length=20,
help_text='单位:mmHg')
glu = models.CharField(verbose_name='血糖', max_length=20,
help_text='单位:mmol/L,正常值:3.61~6.11mmol/L')
chol = models.CharField(verbose_name='胆固醇', max_length=20,
help_text='单位:,正常值:2.4--5.5mmol/L')
ua = models.CharField(verbose_name='尿酸', max_length=20,
help_text='单位:umol/L,正常值:男149~416umol/L;女89~357umol/L;')
class Meta:
verbose_name = '病情记录'
def __unicode__(self):
return self.patient.user.username + "'s 病情记录"
class DoctorRecord(models.Model):
""" 患者就医记录
患者患病后经过那些医生的诊治
"""
patient = models.ForeignKey(Patient, verbose_name='患者', blank=True)
doctor = models.ForeignKey(Doctor, verbose_name='医生')
from_date = models.DateTimeField(verbose_name='开始日期', auto_now_add=True)
from_description = models.TextField(verbose_name='就医说明', max_length=500)
end_date = models.DateTimeField(verbose_name='结束日期', blank=True, null=True)
end_description = models.TextField(verbose_name='结束说明', max_length=500, blank=True, null=True)
class Meta:
verbose_name = '患者就医记录'
def status(self):
if self.end_date:return '已结束'
else:return '正在接受治疗'
def __unicode__(self):
return self.patient.user.username + ' Doctor History'
| sdutlinux/pahchina | pahchina/apps/medical/models.py | Python | mit | 7,784 |
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.iam.managed_policy
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey import app
class ManagedPolicy(Watcher):
index = 'policy'
i_am_singular = 'Managed Policy'
i_am_plural = 'Managed Policies'
def __init__(self, accounts=None, debug=False):
super(ManagedPolicy, self).__init__(accounts=accounts, debug=debug)
def slurp(self):
"""
:returns: item_list - list of Managed Policies.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
all_policies = []
try:
iam = connect(account, 'iam_boto3')
for policy in iam.policies.all():
all_policies.append(policy)
except Exception as e:
exc = BotoConnectionIssue(str(e), 'iamuser', account, None)
self.slurp_exception((self.index, account, 'universal'), exc, exception_map)
continue
for policy in all_policies:
if self.check_ignore_list(policy.policy_name):
continue
item_config = {
'name': policy.policy_name,
'arn': policy.arn,
'create_date': str(policy.create_date),
'update_date': str(policy.update_date),
'default_version_id': policy.default_version_id,
'attachment_count': policy.attachment_count,
'attached_users': [a.arn for a in policy.attached_users.all()],
'attached_groups': [a.arn for a in policy.attached_groups.all()],
'attached_roles': [a.arn for a in policy.attached_roles.all()],
'policy': policy.default_version.document
}
app.logger.debug("Slurping %s (%s) from %s" % (self.i_am_singular, policy.policy_name, account))
item_list.append(
ManagedPolicyItem(account=account, name=policy.policy_name, config=item_config)
)
return item_list, exception_map
class ManagedPolicyItem(ChangeItem):
def __init__(self, account=None, name=None, config={}):
super(ManagedPolicyItem, self).__init__(
index=ManagedPolicy.index,
region='universal',
account=account,
name=name,
new_config=config)
| Yelp/security_monkey | security_monkey/watchers/iam/managed_policy.py | Python | apache-2.0 | 3,561 |
"""
Revision ID: e4eedba5965f
Revises: 93fce807f225
Create Date: 2021-06-01 15:17:01.968058
"""
from alembic import op
import sqlalchemy as sa
import rdr_service.model.utils
from rdr_service.genomic_enums import GenomicReportState
# revision identifiers, used by Alembic.
revision = 'e4eedba5965f'
down_revision = '93fce807f225'
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('genomic_member_report_state',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('genomic_set_member_id', sa.Integer(), nullable=False),
sa.Column('genomic_report_state', rdr_service.model.utils.Enum(GenomicReportState), nullable=True),
sa.Column('created', sa.DateTime(), nullable=True),
sa.Column('modified', sa.DateTime(), nullable=True),
sa.Column('module', sa.String(length=80), nullable=True),
sa.ForeignKeyConstraint(['genomic_set_member_id'], ['genomic_set_member.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.execute(
"Insert into genomic_member_report_state (genomic_set_member_id) Select id From genomic_set_member "
"Where genomic_set_member.genomic_workflow_state in (23,24,25)"
)
# should be > 68K records
op.execute(
"Update genomic_member_report_state INNER Join genomic_set_member "
"On genomic_set_member.id = genomic_member_report_state.genomic_set_member_id "
"Set module = 'gem', "
"genomic_set_member_id = genomic_set_member.id, "
"genomic_report_state = "
"CASE WHEN genomic_set_member.genomic_workflow_state = 23 THEN 1 "
"WHEN genomic_set_member.genomic_workflow_state = 24 THEN 2 "
"WHEN genomic_set_member.genomic_workflow_state = 25 THEN 3 END, "
"genomic_member_report_state.created=NOW()"
"Where genomic_set_member.genomic_workflow_state in (23,24,25)"
)
# should be > 68K records
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('genomic_member_report_state')
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| all-of-us/raw-data-repository | rdr_service/alembic/versions/e4eedba5965f_.py | Python | bsd-3-clause | 2,602 |
"""Module requiring Paste to test dependencies download of pip wheel."""
__version__ = "3.1.4"
| sbidoul/pip | tests/data/packages/requiresPaste/requiresPaste.py | Python | mit | 96 |
# -*- coding: utf-8 -*-
"""Provides deal object for PMP-E and Global Deals."""
from __future__ import absolute_import
from ..entity import Entity
class Deal(Entity):
"""docstring for deals."""
collection = 'deals'
resource = 'deal'
_relations = {
'advertiser',
'publisher',
'supply_source',
}
_deal_sources = Entity._enum({'USER', 'INTERNAL'}, 'INTERNAL')
_media_types = Entity._enum({'DISPLAY', 'VIDEO'}, 'DISPLAY')
_price_methods = Entity._enum({'CPM'}, 'CPM')
_price_types = Entity._enum({'FIXED', 'FLOOR'}, None)
_pull = {
'advertiser_id': int,
'created_on': Entity._strpt,
'currency_code': None,
'deal_identifier': None,
'deal_source': None,
'description': None,
'end_datetime': Entity._strpt,
'id': int,
'media_type': None,
'name': None,
'partner_sourced': Entity._int_to_bool,
'price': float,
'price_method': None,
'price_type': None,
'publisher_id': int,
'start_datetime': Entity._strpt,
'status': Entity._int_to_bool,
'supply_source_id': int,
'updated_on': Entity._strpt,
'version': int,
'zone_name': None,
}
_push = _pull.copy()
_push.update({
'deal_source': _deal_sources,
'end_datetime': Entity._strft,
'media_type': _media_types,
'partner_sourced': int,
'price_method': _price_methods,
'price_type': _price_types,
'start_datetime': Entity._strft,
'status': int,
})
def __init__(self, session, properties=None, **kwargs):
super(Deal, self).__init__(session, properties, **kwargs)
| Cawb07/t1-python | terminalone/models/deal.py | Python | bsd-3-clause | 1,720 |
"""
WSGI config for grading_controller project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aws")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application) | edx/edxanalytics | src/edxanalytics/edxdeployment/wsgi.py | Python | agpl-3.0 | 1,145 |
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
if filters.from_date >= filters.to_date:
frappe.msgprint(_("To Date must be greater than From Date"))
data = []
columns = get_columns()
get_data(data , filters)
return columns, data
def get_columns():
return [
{
"label": _("Purchase Order"),
"fieldtype": "Link",
"fieldname": "purchase_order",
"options": "Purchase Order",
"width": 150
},
{
"label": _("Date"),
"fieldtype": "Date",
"fieldname": "date",
"hidden": 1,
"width": 150
},
{
"label": _("Supplier"),
"fieldtype": "Link",
"fieldname": "supplier",
"options": "Supplier",
"width": 150
},
{
"label": _("Item Code"),
"fieldtype": "Data",
"fieldname": "rm_item_code",
"width": 100
},
{
"label": _("Required Quantity"),
"fieldtype": "Float",
"fieldname": "r_qty",
"width": 100
},
{
"label": _("Transferred Quantity"),
"fieldtype": "Float",
"fieldname": "t_qty",
"width": 100
},
{
"label": _("Pending Quantity"),
"fieldtype": "Float",
"fieldname": "p_qty",
"width": 100
}
]
def get_data(data, filters):
po = get_po(filters)
po_transferred_qty_map = frappe._dict(get_transferred_quantity([v.name for v in po]))
sub_items = get_purchase_order_item_supplied([v.name for v in po])
for order in po:
for item in sub_items:
if order.name == item.parent and order.name in po_transferred_qty_map and \
item.required_qty != po_transferred_qty_map.get(order.name).get(item.rm_item_code):
transferred_qty = po_transferred_qty_map.get(order.name).get(item.rm_item_code) \
if po_transferred_qty_map.get(order.name).get(item.rm_item_code) else 0
row ={
'purchase_order': item.parent,
'date': order.transaction_date,
'supplier': order.supplier,
'rm_item_code': item.rm_item_code,
'r_qty': item.required_qty,
't_qty':transferred_qty,
'p_qty':item.required_qty - transferred_qty
}
data.append(row)
return(data)
def get_po(filters):
record_filters = [
["is_subcontracted", "=", "Yes"],
["supplier", "=", filters.supplier],
["transaction_date", "<=", filters.to_date],
["transaction_date", ">=", filters.from_date],
["docstatus", "=", 1]
]
return frappe.get_all("Purchase Order", filters=record_filters, fields=["name", "transaction_date", "supplier"])
def get_transferred_quantity(po_name):
stock_entries = get_stock_entry(po_name)
stock_entries_detail = get_stock_entry_detail([v.name for v in stock_entries])
po_transferred_qty_map = {}
for entry in stock_entries:
for details in stock_entries_detail:
if details.parent == entry.name:
details["Purchase_order"] = entry.purchase_order
if entry.purchase_order not in po_transferred_qty_map:
po_transferred_qty_map[entry.purchase_order] = {}
po_transferred_qty_map[entry.purchase_order][details.item_code] = details.qty
else:
po_transferred_qty_map[entry.purchase_order][details.item_code] = po_transferred_qty_map[entry.purchase_order].get(details.item_code, 0) + details.qty
return po_transferred_qty_map
def get_stock_entry(po):
return frappe.get_all("Stock Entry", filters=[
('purchase_order', 'IN', po),
('stock_entry_type', '=', 'Send to Subcontractor'),
('docstatus', '=', 1)
], fields=["name", "purchase_order"])
def get_stock_entry_detail(se):
return frappe.get_all("Stock Entry Detail", filters=[
["parent", "in", se]
],
fields=["parent", "item_code", "qty"])
def get_purchase_order_item_supplied(po):
return frappe.get_all("Purchase Order Item Supplied", filters=[
('parent', 'IN', po)
], fields=['parent', 'rm_item_code', 'required_qty'])
| Zlash65/erpnext | erpnext/buying/report/subcontracted_raw_materials_to_be_transferred/subcontracted_raw_materials_to_be_transferred.py | Python | gpl-3.0 | 3,849 |
import math
from js_helper import TestCase
INFINITY = float('inf')
NEG_INFINITY = float('-inf')
class TestMathFuncs(TestCase):
def do_func(self, func):
def wrap(params, output):
self.do_expr("Math.%s(%s)" % (func, params), output)
return wrap
def do_expr(self, expr, output):
self.setUp()
self.run_script("var x = %s" % expr)
self.assert_var_eq("x", output)
def test_abs(self):
"""Test that the abs() function works properly."""
yield self.do_func("abs"), "-5", 5
yield self.do_func("abs"), "5", 5
yield self.do_func("abs"), "-Infinity", INFINITY
def test_exp(self):
"""Test that the exp() function works properly."""
yield self.do_func("exp"), "null", 1
yield self.do_func("exp"), "false", 1
yield self.do_expr, "Math.exp(1) == Math.E", True
yield self.do_expr, "Math.exp('1') == Math.E", True
yield self.do_func("exp"), "'0'", 1
yield self.do_func("exp"), "0", 1
yield self.do_func("exp"), "-0", 1
yield self.do_expr, "Math.exp(Infinity) == Infinity", True
yield self.do_expr, "Math.exp(-Infinity) == 0", True
def test_ceil(self):
"""Test that the ceil() function works properly."""
yield self.do_func("ceil"), "null", 0
yield self.do_func("ceil"), "void 0", 0
yield self.do_func("ceil"), "true", 1
yield self.do_func("ceil"), "false", 0
yield self.do_func("ceil"), "'1.1'", 2
yield self.do_func("ceil"), "'-1.1'", -1
yield self.do_func("ceil"), "'0.1'", 1
yield self.do_func("ceil"), "'-0.1'", 0
yield self.do_func("ceil"), "0", 0
# "j": -0,
yield self.do_expr, "Math.ceil(-0) == -Math.floor(0)", True
yield self.do_func("ceil"), "Infinity", INFINITY
yield (self.do_expr, "Math.ceil(Infinity) == -Math.floor(-Infinity)",
True)
yield self.do_func("ceil"), "-Infinity", NEG_INFINITY
yield self.do_func("ceil"), "0.0000001", 1
yield self.do_func("ceil"), "-0.0000001", 0
def test_floor(self):
"""Test that the floor() function works properly."""
yield self.do_func("floor"), "null", 0
yield self.do_func("floor"), "void 0", 0
yield self.do_func("floor"), "true", 1
yield self.do_func("floor"), "false", 0
yield self.do_func("floor"), "'1.1'", 1
yield self.do_func("floor"), "'-1.1'", -2
yield self.do_func("floor"), "'0.1'", 0
yield self.do_func("floor"), "'-0.1'", -1
yield self.do_func("floor"), "0", 0
# "j": -0,
yield self.do_expr, "Math.floor(-0) == -Math.ceil(0)", True
yield self.do_func("floor"), "Infinity", INFINITY
yield (self.do_expr, "Math.floor(Infinity) == -Math.ceil(-Infinity)",
True)
yield self.do_func("floor"), "-Infinity", NEG_INFINITY
yield self.do_func("floor"), "0.0000001", 0
yield self.do_func("floor"), "-0.0000001", -1
def test_trig(self):
"""Test the trigonometric functions."""
yield self.do_func("cos"), "0", 1
yield self.do_func("cos"), "Math.PI", -1
yield self.do_func("sin"), "0", 0
yield self.do_func("sin"), "Math.PI", 0
yield self.do_func("tan"), "0", 0
yield self.do_func("tan"), "Math.PI / 4", 1
yield self.do_func("acos"), "1", 0
yield self.do_func("asin"), "0", 0
yield self.do_func("atan"), "0", 0
yield self.do_expr, "Math.acos(0) == Math.PI / 2", True
yield self.do_expr, "Math.acos(-1) == Math.PI", True
yield self.do_expr, "Math.asin(1) == Math.PI / 2", True
yield self.do_expr, "Math.asin(-1) == Math.PI / -2", True
yield self.do_expr, "Math.atan(1) == Math.PI / 4", True
yield self.do_expr, "Math.atan(Infinity) == Math.PI / 2", True
yield self.do_expr, "Math.atan2(1, 0) == Math.PI / 2", True
yield self.do_func("atan2"), "0, 0", 0
yield self.do_expr, "Math.atan2(0, -1) == Math.PI", True
def test_sqrt(self):
"""Test that the sqrt() function works properly."""
yield self.do_func("sqrt"), "10", round(math.sqrt(10), 5)
yield self.do_func("sqrt"), "4", 2
yield self.do_func("sqrt"), "3 * 3 + 4 * 4", 5
def test_round(self):
"""Test that the round() function works properly."""
yield self.do_func("round"), "'0.99999'", 1
yield self.do_func("round"), "0", 0
yield self.do_func("round"), "0.49", 0
yield self.do_func("round"), "0.5", 1
yield self.do_func("round"), "0.51", 1
yield self.do_func("round"), "-0.49", 0
yield self.do_func("round"), "-0.5", 0
yield self.do_func("round"), "-0.51", -1
yield self.do_expr, "Math.round(Infinity) == Infinity", True
yield self.do_expr, "Math.round(-Infinity) == -Infinity", True
def test_random(self):
"""Test that the random() function works "properly"."""
yield self.do_func("random"), "", 0.5
def test_pow(self):
"""Test that the pow() function works properly."""
yield self.do_func("pow"), "true, false", 1
yield self.do_func("pow"), "2, 32", 4294967296
yield self.do_func("pow"), "1.0000001, Infinity", INFINITY
yield self.do_func("pow"), "1.0000001, -Infinity", 0
yield self.do_func("pow"), "123, 0", 1
def test_log(self):
"""Test that the log() function works properly."""
yield self.do_func("log"), "1", 0
yield self.do_func("log"), "0", NEG_INFINITY
yield self.do_func("log"), "Infinity", INFINITY
yield self.do_func("log"), "-1", None
def test_min_max(self):
"""Test that the min() and max() function works properly."""
yield self.do_func("min"), "Infinity, -Infinity", NEG_INFINITY
yield self.do_func("min"), "1, -1", -1
yield self.do_func("max"), "Infinity, -Infinity", INFINITY
yield self.do_func("max"), "1, -1", 1
def test_math_infinity(self):
"""Test for known tracebacks regarding math."""
self.run_script("""
var x = Infinity;
x >>= 10;
var y = Infinity;
var z = 10 >> y;
""")
# We really don't care about the output here.
| mattbasta/perfalator | tests/js/test_math.py | Python | bsd-3-clause | 6,365 |
# Copyright 2018 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, models, _
from odoo.exceptions import UserError
class ProductTemplate(models.Model):
_inherit = "product.template"
@api.constrains('uom_id')
def _check_orderpoint_procure_uom(self):
for rec in self:
orderpoint = self.env['stock.warehouse.orderpoint'].search([
('procure_uom_id.category_id', '!=',
rec.uom_id.category_id.id),
('product_id', 'in', rec.product_variant_ids.ids)], limit=1)
if orderpoint:
raise UserError(
_("At least one reordering rule for this product has a "
"different Procurement unit of measure category."))
| Vauxoo/stock-logistics-warehouse | stock_orderpoint_uom/models/product_template.py | Python | agpl-3.0 | 869 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appflowcollector(base_resource) :
""" Configuration for AppFlow collector resource. """
def __init__(self) :
self._name = ""
self._ipaddress = ""
self._port = 0
self._netprofile = ""
self._newname = ""
self.___count = 0
@property
def name(self) :
ur"""Name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at
(@), equals (=), and hyphen (-) characters.
Only four collectors can be configured.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow collector" or 'my appflow collector').<br/>Minimum length = 1<br/>Maximum length = 127.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at
(@), equals (=), and hyphen (-) characters.
Only four collectors can be configured.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow collector" or 'my appflow collector').<br/>Minimum length = 1<br/>Maximum length = 127
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ipaddress(self) :
ur"""IPv4 address of the collector.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@ipaddress.setter
def ipaddress(self, ipaddress) :
ur"""IPv4 address of the collector.
"""
try :
self._ipaddress = ipaddress
except Exception as e:
raise e
@property
def port(self) :
ur"""UDP port on which the collector listens.<br/>Default value: 4739.
"""
try :
return self._port
except Exception as e:
raise e
@port.setter
def port(self, port) :
ur"""UDP port on which the collector listens.<br/>Default value: 4739
"""
try :
self._port = port
except Exception as e:
raise e
@property
def netprofile(self) :
ur"""Netprofile to associate with the collector. The IP address defined in the profile is used as the source IP address for AppFlow traffic for this collector. If you do not set this parameter, the NetScaler IP (NSIP) address is used as the source IP address.<br/>Maximum length = 128.
"""
try :
return self._netprofile
except Exception as e:
raise e
@netprofile.setter
def netprofile(self, netprofile) :
ur"""Netprofile to associate with the collector. The IP address defined in the profile is used as the source IP address for AppFlow traffic for this collector. If you do not set this parameter, the NetScaler IP (NSIP) address is used as the source IP address.<br/>Maximum length = 128
"""
try :
self._netprofile = netprofile
except Exception as e:
raise e
@property
def newname(self) :
ur"""New name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must
contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at(@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow coll" or 'my appflow coll').<br/>Minimum length = 1.
"""
try :
return self._newname
except Exception as e:
raise e
@newname.setter
def newname(self, newname) :
ur"""New name for the collector. Must begin with an ASCII alphabetic or underscore (_) character, and must
contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at(@), equals (=), and hyphen (-) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my appflow coll" or 'my appflow coll').<br/>Minimum length = 1
"""
try :
self._newname = newname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appflowcollector_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appflowcollector
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add appflowcollector.
"""
try :
if type(resource) is not list :
addresource = appflowcollector()
addresource.name = resource.name
addresource.ipaddress = resource.ipaddress
addresource.port = resource.port
addresource.netprofile = resource.netprofile
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ appflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].ipaddress = resource[i].ipaddress
addresources[i].port = resource[i].port
addresources[i].netprofile = resource[i].netprofile
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete appflowcollector.
"""
try :
if type(resource) is not list :
deleteresource = appflowcollector()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ appflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ appflowcollector() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def rename(cls, client, resource, new_name) :
ur""" Use this API to rename a appflowcollector resource.
"""
try :
renameresource = appflowcollector()
if type(resource) == cls :
renameresource.name = resource.name
else :
renameresource.name = resource
return renameresource.rename_resource(client,new_name)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the appflowcollector resources that are configured on netscaler.
"""
try :
if not name :
obj = appflowcollector()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = appflowcollector()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [appflowcollector() for _ in range(len(name))]
obj = [appflowcollector() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = appflowcollector()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of appflowcollector resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appflowcollector()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the appflowcollector resources configured on NetScaler.
"""
try :
obj = appflowcollector()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of appflowcollector resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appflowcollector()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class appflowcollector_response(base_response) :
def __init__(self, length=1) :
self.appflowcollector = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appflowcollector = [appflowcollector() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/appflow/appflowcollector.py | Python | apache-2.0 | 10,774 |
#!/usr/bin/env python
from nextfeed import nextfeed2
with nextfeed2(db='live') as (id, feed):
print "Got %s:%s." % (feed, id)
while True:
pass
| timverhoeven/python | test.py | Python | gpl-2.0 | 161 |
from datetime import date, datetime
from django.conf.urls import url
from django.conf.urls.i18n import i18n_patterns
from django.contrib.sitemaps import Sitemap, GenericSitemap, FlatPageSitemap, views
from django.http import HttpResponse
from django.utils import timezone
from django.views.decorators.cache import cache_page
from django.contrib.sitemaps.tests.base import I18nTestModel, TestModel
class SimpleSitemap(Sitemap):
changefreq = "never"
priority = 0.5
location = '/location/'
lastmod = datetime.now()
def items(self):
return [object()]
class SimpleI18nSitemap(Sitemap):
changefreq = "never"
priority = 0.5
i18n = True
def items(self):
return I18nTestModel.objects.all()
class EmptySitemap(Sitemap):
changefreq = "never"
priority = 0.5
location = '/location/'
def items(self):
return []
class FixedLastmodSitemap(SimpleSitemap):
lastmod = datetime(2013, 3, 13, 10, 0, 0)
class FixedLastmodMixedSitemap(Sitemap):
changefreq = "never"
priority = 0.5
location = '/location/'
loop = 0
def items(self):
o1 = TestModel()
o1.lastmod = datetime(2013, 3, 13, 10, 0, 0)
o2 = TestModel()
return [o1, o2]
class DateSiteMap(SimpleSitemap):
lastmod = date(2013, 3, 13)
class TimezoneSiteMap(SimpleSitemap):
lastmod = datetime(2013, 3, 13, 10, 0, 0, tzinfo=timezone.get_fixed_timezone(-300))
def testmodelview(request, id):
return HttpResponse()
simple_sitemaps = {
'simple': SimpleSitemap,
}
simple_i18nsitemaps = {
'simple': SimpleI18nSitemap,
}
empty_sitemaps = {
'empty': EmptySitemap,
}
fixed_lastmod_sitemaps = {
'fixed-lastmod': FixedLastmodSitemap,
}
fixed_lastmod__mixed_sitemaps = {
'fixed-lastmod-mixed': FixedLastmodMixedSitemap,
}
generic_sitemaps = {
'generic': GenericSitemap({'queryset': TestModel.objects.all()}),
}
flatpage_sitemaps = {
'flatpages': FlatPageSitemap,
}
urlpatterns = [
url(r'^simple/index\.xml$', views.index, {'sitemaps': simple_sitemaps}),
url(r'^simple/custom-index\.xml$', views.index,
{'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap_index.xml'}),
url(r'^simple/sitemap-(?P<section>.+)\.xml$', views.sitemap,
{'sitemaps': simple_sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
url(r'^simple/sitemap\.xml$', views.sitemap, {'sitemaps': simple_sitemaps}),
url(r'^simple/i18n\.xml$', views.sitemap, {'sitemaps': simple_i18nsitemaps}),
url(r'^simple/custom-sitemap\.xml$', views.sitemap,
{'sitemaps': simple_sitemaps, 'template_name': 'custom_sitemap.xml'}),
url(r'^empty/sitemap\.xml$', views.sitemap, {'sitemaps': empty_sitemaps}),
url(r'^lastmod/sitemap\.xml$', views.sitemap, {'sitemaps': fixed_lastmod_sitemaps}),
url(r'^lastmod-mixed/sitemap\.xml$', views.sitemap, {'sitemaps': fixed_lastmod__mixed_sitemaps}),
url(r'^lastmod/date-sitemap.xml$', views.sitemap,
{'sitemaps': {'date-sitemap': DateSiteMap}}),
url(r'^lastmod/tz-sitemap.xml$', views.sitemap,
{'sitemaps': {'tz-sitemap': TimezoneSiteMap}}),
url(r'^generic/sitemap\.xml$', views.sitemap, {'sitemaps': generic_sitemaps}),
url(r'^flatpages/sitemap\.xml$', views.sitemap, {'sitemaps': flatpage_sitemaps}),
url(r'^cached/index\.xml$', cache_page(1)(views.index),
{'sitemaps': simple_sitemaps, 'sitemap_url_name': 'cached_sitemap'}),
url(r'^cached/sitemap-(?P<section>.+)\.xml', cache_page(1)(views.sitemap),
{'sitemaps': simple_sitemaps}, name='cached_sitemap')
]
urlpatterns += i18n_patterns(
url(r'^i18n/testmodel/(?P<id>\d+)/$', testmodelview, name='i18n_testmodel'),
)
| olasitarska/django | django/contrib/sitemaps/tests/urls/http.py | Python | bsd-3-clause | 3,714 |
#
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
#
"""JavaScript 1.7 keywords"""
keywords = set([
"break",
"case", "catch", "const", "continue",
"debugger", "default", "delete", "do",
"else",
"false", "finally", "for", "function",
"if", "in", "instanceof",
"let",
"new", "null",
"return",
"switch",
"this", "throw", "true", "try", "typeof",
"var", "void",
"yield",
"while", "with"
])
| zynga/jasy | jasy/js/tokenize/Lang.py | Python | mit | 458 |
#!/usr/bin/env python
import argparse
import json
import time
import logging
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import RPi.GPIO as GPIO
parser = argparse.ArgumentParser(description='Lightbulb control unit.')
parser.add_argument('-e', '--endpoint', required=True, help='The AWS Iot endpoint.')
parser.add_argument('-r', '--rootCA', required=True, help='Root CA file path.')
parser.add_argument('-c', '--cert', required=True, help='Certificate file path.')
parser.add_argument('-k', '--key', required=True, help='Private key file path.')
args = parser.parse_args()
def lightbulbShadowCallback_Update(payload, responseStatus, token):
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
print("property: " + str(payloadDict["state"]["desired"]["color"]))
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
def lightBulbShadowCallback_Delete(payload, responseStatus, token):
if responseStatus == "timeout":
print("Delete request " + token + " time out!")
if responseStatus == "accepted":
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Delete request with token: " + token + " accepted!")
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Delete request " + token + " rejected!")
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTShadowClient
lightBulbShadowClient = AWSIoTMQTTShadowClient("controlUnitClient")
lightBulbShadowClient.configureEndpoint(args.endpoint, 8883)
lightBulbShadowClient.configureCredentials(args.rootCA, args.key, args.cert)
# AWSIoTMQTTShadowClient configuration
lightBulbShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
lightBulbShadowClient.configureConnectDisconnectTimeout(10) # 10 sec
lightBulbShadowClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect to AWS IoT
lightBulbShadowClient.connect()
# Create a deviceShadow with persistent subscription
ControlUnit = lightBulbShadowClient.createShadowHandlerWithName("rpi-sense-hat", True)
# Delete shadow JSON doc
ControlUnit.shadowDelete(lightBulbShadowCallback_Delete, 5)
# Update shadow
def updateShadow(color):
JSONPayload = '{"state":{"desired":{"color":"' + color + '"}}}'
ControlUnit.shadowUpdate(JSONPayload, lightbulbShadowCallback_Update, 5)
RED = 9
GREEN = 10
BLUE = 11
GPIO.setmode(GPIO.BCM)
GPIO.setup(RED, GPIO.IN)
GPIO.setup(GREEN, GPIO.IN)
GPIO.setup(BLUE, GPIO.IN)
lastButton = None
while True:
if (lastButton != RED and GPIO.input(RED) == False):
lastButton = RED
updateShadow("red")
if (lastButton != GREEN and GPIO.input(GREEN) == False):
lastButton = GREEN
updateShadow("green")
if (lastButton != BLUE and GPIO.input(BLUE)== False):
lastButton = BLUE
updateShadow("blue")
time.sleep(0.05);
| stephenjelfs/aws-iot-gddev2016 | controlUnit.py | Python | mit | 3,371 |
"""
homeassistant.components.mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
MQTT component, using paho-mqtt.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/mqtt/
"""
import json
import logging
import os
import socket
import time
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util as util
from homeassistant.helpers import validate_config
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt"
MQTT_CLIENT = None
DEFAULT_PORT = 1883
DEFAULT_KEEPALIVE = 60
DEFAULT_QOS = 0
SERVICE_PUBLISH = 'publish'
EVENT_MQTT_MESSAGE_RECEIVED = 'MQTT_MESSAGE_RECEIVED'
DEPENDENCIES = []
REQUIREMENTS = ['paho-mqtt==1.1', 'jsonpath-rw==1.4.0']
CONF_BROKER = 'broker'
CONF_PORT = 'port'
CONF_CLIENT_ID = 'client_id'
CONF_KEEPALIVE = 'keepalive'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
CONF_CERTIFICATE = 'certificate'
ATTR_TOPIC = 'topic'
ATTR_PAYLOAD = 'payload'
ATTR_QOS = 'qos'
MAX_RECONNECT_WAIT = 300 # seconds
def publish(hass, topic, payload, qos=None):
""" Send an MQTT message. """
data = {
ATTR_TOPIC: topic,
ATTR_PAYLOAD: payload,
}
if qos is not None:
data[ATTR_QOS] = qos
hass.services.call(DOMAIN, SERVICE_PUBLISH, data)
def subscribe(hass, topic, callback, qos=DEFAULT_QOS):
""" Subscribe to a topic. """
def mqtt_topic_subscriber(event):
""" Match subscribed MQTT topic. """
if _match_topic(topic, event.data[ATTR_TOPIC]):
callback(event.data[ATTR_TOPIC], event.data[ATTR_PAYLOAD],
event.data[ATTR_QOS])
hass.bus.listen(EVENT_MQTT_MESSAGE_RECEIVED, mqtt_topic_subscriber)
MQTT_CLIENT.subscribe(topic, qos)
def setup(hass, config):
""" Get the MQTT protocol service. """
if not validate_config(config, {DOMAIN: ['broker']}, _LOGGER):
return False
conf = config[DOMAIN]
broker = conf[CONF_BROKER]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
client_id = util.convert(conf.get(CONF_CLIENT_ID), str)
keepalive = util.convert(conf.get(CONF_KEEPALIVE), int, DEFAULT_KEEPALIVE)
username = util.convert(conf.get(CONF_USERNAME), str)
password = util.convert(conf.get(CONF_PASSWORD), str)
certificate = util.convert(conf.get(CONF_CERTIFICATE), str)
# For cloudmqtt.com, secured connection, auto fill in certificate
if certificate is None and 19999 < port < 30000 and \
broker.endswith('.cloudmqtt.com'):
certificate = os.path.join(os.path.dirname(__file__),
'addtrustexternalcaroot.crt')
global MQTT_CLIENT
try:
MQTT_CLIENT = MQTT(hass, broker, port, client_id, keepalive, username,
password, certificate)
except socket.error:
_LOGGER.exception("Can't connect to the broker. "
"Please check your settings and the broker "
"itself.")
return False
def stop_mqtt(event):
""" Stop MQTT component. """
MQTT_CLIENT.stop()
def start_mqtt(event):
""" Launch MQTT component when Home Assistant starts up. """
MQTT_CLIENT.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_mqtt)
def publish_service(call):
""" Handle MQTT publish service calls. """
msg_topic = call.data.get(ATTR_TOPIC)
payload = call.data.get(ATTR_PAYLOAD)
qos = call.data.get(ATTR_QOS, DEFAULT_QOS)
if msg_topic is None or payload is None:
return
MQTT_CLIENT.publish(msg_topic, payload, qos)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_mqtt)
hass.services.register(DOMAIN, SERVICE_PUBLISH, publish_service)
return True
# pylint: disable=too-few-public-methods
class _JsonFmtParser(object):
""" Implements a json parser on xpath. """
def __init__(self, jsonpath):
import jsonpath_rw
self._expr = jsonpath_rw.parse(jsonpath)
def __call__(self, payload):
match = self._expr.find(json.loads(payload))
return match[0].value if len(match) > 0 else payload
# pylint: disable=too-few-public-methods
class FmtParser(object):
""" Wrapper for all supported formats. """
def __init__(self, fmt):
self._parse = lambda x: x
if fmt:
if fmt.startswith('json:'):
self._parse = _JsonFmtParser(fmt[5:])
def __call__(self, payload):
return self._parse(payload)
# This is based on one of the paho-mqtt examples:
# http://git.eclipse.org/c/paho/org.eclipse.paho.mqtt.python.git/tree/examples/sub-class.py
# pylint: disable=too-many-arguments
class MQTT(object):
""" Implements messaging service for MQTT. """
def __init__(self, hass, broker, port, client_id, keepalive, username,
password, certificate):
import paho.mqtt.client as mqtt
self.userdata = {
'hass': hass,
'topics': {},
'progress': {},
}
if client_id is None:
self._mqttc = mqtt.Client()
else:
self._mqttc = mqtt.Client(client_id)
self._mqttc.user_data_set(self.userdata)
if username is not None:
self._mqttc.username_pw_set(username, password)
if certificate is not None:
self._mqttc.tls_set(certificate)
self._mqttc.on_subscribe = _mqtt_on_subscribe
self._mqttc.on_unsubscribe = _mqtt_on_unsubscribe
self._mqttc.on_connect = _mqtt_on_connect
self._mqttc.on_disconnect = _mqtt_on_disconnect
self._mqttc.on_message = _mqtt_on_message
self._mqttc.connect(broker, port, keepalive)
def publish(self, topic, payload, qos):
""" Publish a MQTT message. """
self._mqttc.publish(topic, payload, qos)
def start(self):
""" Run the MQTT client. """
self._mqttc.loop_start()
def stop(self):
""" Stop the MQTT client. """
self._mqttc.loop_stop()
def subscribe(self, topic, qos):
""" Subscribe to a topic. """
if topic in self.userdata['topics']:
return
result, mid = self._mqttc.subscribe(topic, qos)
_raise_on_error(result)
self.userdata['progress'][mid] = topic
self.userdata['topics'][topic] = None
def unsubscribe(self, topic):
""" Unsubscribe from topic. """
result, mid = self._mqttc.unsubscribe(topic)
_raise_on_error(result)
self.userdata['progress'][mid] = topic
def _mqtt_on_message(mqttc, userdata, msg):
""" Message callback """
userdata['hass'].bus.fire(EVENT_MQTT_MESSAGE_RECEIVED, {
ATTR_TOPIC: msg.topic,
ATTR_QOS: msg.qos,
ATTR_PAYLOAD: msg.payload.decode('utf-8'),
})
def _mqtt_on_connect(mqttc, userdata, flags, result_code):
""" On connect, resubscribe to all topics we were subscribed to. """
if result_code != 0:
_LOGGER.error('Unable to connect to the MQTT broker: %s', {
1: 'Incorrect protocol version',
2: 'Invalid client identifier',
3: 'Server unavailable',
4: 'Bad username or password',
5: 'Not authorised'
}.get(result_code, 'Unknown reason'))
mqttc.disconnect()
return
old_topics = userdata['topics']
userdata['topics'] = {}
userdata['progress'] = {}
for topic, qos in old_topics.items():
# qos is None if we were in process of subscribing
if qos is not None:
mqttc.subscribe(topic, qos)
def _mqtt_on_subscribe(mqttc, userdata, mid, granted_qos):
""" Called when subscribe successful. """
topic = userdata['progress'].pop(mid, None)
if topic is None:
return
userdata['topics'][topic] = granted_qos
def _mqtt_on_unsubscribe(mqttc, userdata, mid, granted_qos):
""" Called when subscribe successful. """
topic = userdata['progress'].pop(mid, None)
if topic is None:
return
userdata['topics'].pop(topic, None)
def _mqtt_on_disconnect(mqttc, userdata, result_code):
""" Called when being disconnected. """
# When disconnected because of calling disconnect()
if result_code == 0:
return
tries = 0
wait_time = 0
while True:
try:
if mqttc.reconnect() == 0:
_LOGGER.info('Successfully reconnected to the MQTT server')
break
except socket.error:
pass
wait_time = min(2**tries, MAX_RECONNECT_WAIT)
_LOGGER.warning(
'Disconnected from MQTT (%s). Trying to reconnect in %ss',
result_code, wait_time)
# It is ok to sleep here as we are in the MQTT thread.
time.sleep(wait_time)
tries += 1
def _raise_on_error(result):
""" Raise error if error result. """
if result != 0:
raise HomeAssistantError('Error talking to MQTT: {}'.format(result))
def _match_topic(subscription, topic):
""" Returns if topic matches subscription. """
if subscription.endswith('#'):
return (subscription[:-2] == topic or
topic.startswith(subscription[:-1]))
sub_parts = subscription.split('/')
topic_parts = topic.split('/')
return (len(sub_parts) == len(topic_parts) and
all(a == b for a, b in zip(sub_parts, topic_parts) if a != '+'))
| badele/home-assistant | homeassistant/components/mqtt/__init__.py | Python | mit | 9,508 |
##############################################
#
# ChriCar Beteiligungs- und Beratungs- GmbH
# created 2009-07-11 12:22:09+02
##############################################
import room
| VitalPet/c2c-rd-addons | chricar_room/__init__.py | Python | agpl-3.0 | 186 |
#Copyright (c) 2011-2012 Litle & Co.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
class TestAuth(unittest.TestCase):
def testSimpleAuthWithCard(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100000000000000"
card.expDate = "1210"
card.type = 'VI'
authorization.card = card
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals("000",response.response)
def testSimpleAuthWithPaypal(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '12344'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
paypal = litleXmlFields.payPal()
paypal.payerId = "1234"
paypal.token = "1234"
paypal.transactionId = '123456'
authorization.paypal = paypal
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals("Approved",response.message)
def testSimpleAuthWithSecondaryAmountAndApplepay(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '1234'
authorization.amount = 110
authorization.orderSource = 'ecommerce'
authorization.secondaryAmount = '10'
applepay = litleXmlFields.applepayType()
applepay.data = "4100000000000000"
applepay.signature = "sign"
applepay.version = '1'
header=litleXmlFields.applepayHeaderType()
header.applicationData='e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
header.ephemeralPublicKey ='e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
header.publicKeyHash='e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
header.transactionId='1024'
applepay.header=header
authorization.applepay = applepay
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals("Insufficient Funds",response.message)
self.assertEquals(110,response.applepayResponse.transactionAmount)
def testPosWithoutCapabilityAndEntryMode(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '123456'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
pos = litleXmlFields.pos()
pos.cardholderId = "pin"
authorization.pos = pos
card = litleXmlFields.cardType()
card.number = "4100000000000002"
card.expDate = "1210"
card.type = 'VI'
card.cardValidationNum = '1213'
authorization.card = card
litle = litleOnlineRequest(config)
with self.assertRaises(Exception):
litle.sendRequest(authorization)
def testAccountUpdate(self):
authorization = litleXmlFields.authorization()
authorization.orderId = '12344'
authorization.amount = 106
authorization.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.number = "4100100000000000"
card.expDate = "1210"
card.type = 'VI'
card.cardValidationNum = '1213'
authorization.card = card
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals("4100100000000000",response.accountUpdater.originalCardInfo.number)
def testTrackData(self):
authorization = litleXmlFields.authorization()
authorization.id = 'AX54321678'
authorization.reportGroup = 'RG27'
authorization.orderId = '12z58743y1'
authorization.amount = 12522
authorization.orderSource = 'retail'
billToAddress = litleXmlFields.contact()
billToAddress.zip = '95032'
authorization.billToAddress = billToAddress
card = litleXmlFields.cardType()
card.track = "%B40000001^Doe/JohnP^06041...?;40001=0604101064200?"
authorization.card = card
pos = litleXmlFields.pos()
pos.capability = 'magstripe'
pos.entryMode = 'completeread'
pos.cardholderId = 'signature'
authorization.pos = pos
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals('Approved', response.message)
def testListOfTaxAmounts(self):
authorization = litleXmlFields.authorization()
authorization.id = '12345'
authorization.reportGroup = 'Default'
authorization.orderId = '67890'
authorization.amount = 10000
authorization.orderSource = 'ecommerce'
enhanced = litleXmlFields.enhancedData()
dt1 = litleXmlFields.detailTax()
dt1.taxAmount = 100
enhanced.detailTax.append(dt1)
dt2 = litleXmlFields.detailTax()
dt2.taxAmount = 200
enhanced.detailTax.append(dt2)
authorization.enhancedData = enhanced
card = litleXmlFields.cardType()
card.number = '4100000000000001'
card.expDate = '1215'
card.type = 'VI'
authorization.card = card
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(authorization)
self.assertEquals('Approved', response.message)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestAuth)
return suite
if __name__ =='__main__':
unittest.main() | Rediker-Software/litle-sdk-for-python | litleSdkPythonTest/functional/TestAuth.py | Python | mit | 7,012 |
from __future__ import print_function
'''
Parses the files in the input directory using bllip parser.
'''
import logging
import sys
import argparse
import os.path
import glob
from bllipparser import RerankingParser
from bllipbioc.bllip_wrapper import init_model, parse_bioc
from bioc import *
__author__ = 'Yifan Peng'
def parse_argv():
"""
Parses arguments.
:return: filelist
:rtype: list
"""
parser = argparse.ArgumentParser(description="Parses the input files using bllip parser.")
parser.add_argument("-v", "--verbose", action="store_true", help="print debug information")
parser.add_argument("pathnames", nargs='+', help="input files (support pattens by the Unix "
"shell)")
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
logging.info('Pathnames: %s' % args.pathnames)
return args.pathnames
if __name__ == "__main__":
pathnames = parse_argv()
model_dir = init_model()
logging.info('loading model %s ...' % model_dir)
rrp = RerankingParser.from_unified_model_dir(model_dir)
for pathname in pathnames:
inputfilenames = glob.glob(pathname)
for inputfilename in inputfilenames:
if not os.path.isfile(inputfilename):
sys.stderr.write('Cannot find input file: %s\n' % inputfilename)
continue
print('Process file: %s' % inputfilename)
basename = os.path.basename(inputfilename)
inputdir = os.path.dirname(inputfilename)
filename, file_extension = os.path.splitext(basename)
outputfilename = os.path.join(inputdir, filename + '-ptb.xml')
# create lck
lckfilename = outputfilename + ".lck"
logging.info('create lck file: %s' % lckfilename)
open(lckfilename, 'w').close()
# parse file
collection = parse(inputfilename)
collection.clear_infons()
collection.infons['tool'] = 'Bllip'
collection.infons['process'] = 'parse'
parse_bioc(rrp, collection)
collection.tobiocfile(outputfilename)
# remove lck
logging.info('remove lck file: %s' % lckfilename)
os.remove(lckfilename)
| yfpeng/pengyifan-bllip | bllip_biocbatch.py | Python | bsd-3-clause | 2,342 |
import os
import sys
# Add parent directory to path to make test aware of other modules
pardir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(pardir)
from extras.data_audit_wrapper import IP_verified
from safe.common.testing import DATADIR, UNITDATA
if __name__ == '__main__':
# Verify external data provided with InaSAFE
IP_verified(DATADIR)
# Verify bundled test data
IP_verified(UNITDATA)
| ingenieroariel/inasafe | scripts/data_IP_audit.py | Python | gpl-3.0 | 446 |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from bottle import Bottle
from .context import Context
class Server(Bottle):
"""
Serves web requests
"""
def __init__(self,
context=None,
httpd=None,
route=None,
routes=None,
check=False):
"""
Serves web requests
:param context: global context for this process
:type context: Context
:param httpd: actual WSGI server
:param route: a route to add to this instance
:type route: Route
:param routes: multiple routes to add to this instance
:type routes: list of Route
:param check: True to check configuration settings
:type check: bool
"""
self.context = Context() if context is None else context
self.httpd = Bottle() if httpd is None else httpd
self._routes = {}
if route is not None:
self.add_route(route)
if routes is not None:
self.add_routes(routes)
if check:
self.configure()
def configure(self, settings={}):
"""
Checks settings of the server
:param settings: a dictionary with some statements for this instance
:type settings: dict
This function reads key ``server`` and below, and update
the context accordingly::
>>>server.configure({'server': {
'binding': '10.4.2.5',
'port': 5000,
'debug': True,
}})
This can also be written in a more compact form::
>>>server.configure({'server.port': 5000})
"""
self.context.apply(settings)
self.context.check('server.binding', '0.0.0.0')
self.context.check('server.url', 'http://no.server', filter=True)
self.context.check('server.port', 8080)
self.context.check('server.debug', False)
@property
def routes(self):
"""
Lists all routes
:return: a list of routes, or []
Example::
>>>server.get_routes()
['/hello', '/world']
"""
return sorted(self._routes.keys())
def route(self, route):
"""
Gets one route by path
:return: the related route, or None
"""
return self._routes.get(route, None)
def add_routes(self, items):
"""
Adds web routes
:param routes: a list of additional routes
:type routes: list of routes
"""
for item in items:
self.add_route(item)
def add_route(self, item):
"""
Adds one web route
:param route: one additional route
:type route: Route
"""
self._routes[item.route] = item
self.httpd.route(item.route, method="GET", callback=item.get)
self.httpd.route(item.route, method="POST", callback=item.post)
self.httpd.route(item.route, method="PUT", callback=item.put)
self.httpd.route(item.route, method="DELETE", callback=item.delete)
def run(self):
"""
Serves requests
"""
logging.info(u'Starting web server')
for route in self.routes:
logging.debug(u'- {}'.format(route))
self.httpd.run(host=self.context.get('server.address', '0.0.0.0'),
port=self.context.get('server.port', 80),
debug=self.context.get('server.debug', False),
server='paste')
logging.info(u'Web server has been stopped')
| bernard357/shellbot | shellbot/server.py | Python | apache-2.0 | 4,414 |
import sqlite3
import os
import time
import socket
import pythonwhois as pywhois
def findAll(s, ch):
return [i for i, ltr in enumerate(s) if ltr == ch]
res_dir = '/home/nsarafij/project/OpenWPM/analysis/results/'
filename=os.path.join(res_dir,'domains_owners')
fhand=open(filename)
#with open(filename) as fhand:
db = os.path.join(res_dir,'imagesFirst.sqlite')
print db
#with sqlite3.connect(db) as conn:
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS Companies')
cur.execute('DROP TABLE IF EXISTS DomainCompany')
cur.execute('CREATE TABLE IF NOT EXISTS Companies (id INTEGER PRIMARY KEY AUTOINCREMENT, company TEXT UNIQUE, country CHARACTER(2))')
cur.execute('CREATE TABLE IF NOT EXISTS Domain2Company (domainTwoPart_id INTEGER, company_id INTEGER, \
FOREIGN KEY (domainTwoPart_id) REFERENCES DomainsTwoPart(id), FOREIGN KEY (company_id) REFERENCES Companies(id))')
cur.execute('SELECT count(domain_id) FROM DomainCompany')
last_id = cur.fetchone()[0]
print last_id
#max_id = 20
firstLine = True
i=0
for line in fhand:
if firstLine:
firstLine = False
continue
i+=1; print i
#if i>max_id: break
if i<=last_id: continue
splits = line.rstrip().split(",")
count = splits[0]; domain = splits[1]; domain_id = splits[2]
if count == 1: break
company = None
country = None
while True:
try:
w=pywhois.get_whois(domain)
try:
company = w['contacts']['tech']['organization']
country = w['contacts']['tech']['country']
break
except:
points = findAll(domain,'.')
no_points = len(points)
if no_points<2: break
point = points[0]
domain = domain[point+1:]
except Exception as e:
print "Exception: ", e
break
time.sleep(5)
if company is None: company = 'None'
cur.execute('SELECT id FROM Companies WHERE company = ?',(company,))
data=cur.fetchone()
if data is not None:
company_id=data[0]
cur.execute('INSERT INTO DomainCompany (domain_id,company_id) VALUES (?,?)',(domain_id,company_id))
else:
cur.execute('INSERT INTO Companies (company,country) VALUES (?,?)',(company,country))
company_id = cur.lastrowid
cur.execute('INSERT INTO DomainCompany (domain_id,company_id) VALUES (?,?)',(domain_id,company_id))
if i % 100 == 0: conn.commit()
conn.commit()
conn.close()
fhand.close()
'''
import whois
import socket
domain = 'hm.baidu.com'
while True:
print domain
try:
print 1
w=whois.whois(domain)
except whois.parser.PywhoisError:
print 2
points = findAll(domain,'.')
no_points = len(points)
print 'no_points:',no_points
if no_points<2: break
point = points[0]
domain = domain[point+1:]
except socket.error:
print 3
w={}
break
with open(filename) as fhand:
db = os.path.join(res_dir,'images.sqlite')
conn = sqlite3.connect(db)
with conn:
cur = conn.cursor()
cur.execute('DROP TABLE IF EXISTS Companies')
cur.execute('DROP TABLE IF EXISTS DomainCompany')
cur.execute('CREATE TABLE IF NOT EXISTS Companies (id INTEGER PRIMARY KEY AUTOINCREMENT, company TEXT UNIQUE, country CHARACTER(2))')
cur.execute('CREATE TABLE IF NOT EXISTS DomainCompany (domain_id INTEGER, company_id INTEGER, \
FOREIGN KEY (domain_id) REFERENCES Domains(id), FOREIGN KEY (company_id) REFERENCES Companies(id))')
firstLine = True
for line in fhand:
if firstLine:
firstLine = False
continue
splits = line.split(",")
if len(splits)<4: continue
domain_id = splits[2]
company = splits[3].split(' - ')[-1]
country = splits[4]
print company
cur.execute('SELECT id FROM Companies WHERE company = ?',(company,))
data=cur.fetchone()
if data is not None:
company_id=data[0]
print company_id
cur.execute('INSERT INTO DomainCompany (domain_id,company_id) VALUES (?,?)',(domain_id,company_id))
continue
else:
print company, country
cur.execute('INSERT INTO Companies (company,country) VALUES (?,?)',(company,country))
company_id = cur.lastrowid
cur.execute('INSERT INTO DomainCompany (domain_id,company_id) VALUES (?,?)',(domain_id,company_id))
'''
| natasasdj/OpenWPM | analysis/13_companies_sqlite.py | Python | gpl-3.0 | 4,762 |
import scrapy
from scrapy import Request
class CWACResultsSpider(scrapy.Spider):
name = "cw-all-candidates"
def start_requests(self):
for i in range(60):
if self.endpoint == 'archive':
yield Request('https://web.archive.org/web/20160823114553/http://eciresults.nic.in/ConstituencywiseS03%s.htm?ac=%s' % (i+1,i+1), callback=self.parse)
else:
yield Request('http://eciresults.nic.in/ConstituencywiseS14%s.htm?ac=%s' % (i+1,i+1), callback=self.parse)
def parse(self, response):
results = response.css('#div1 > table > tr')
for result in results[3:len(results)-1]:
yield {
'state': results[0].css('td::text').extract_first().split(' - ')[0],
'constituency': results[0].css('td::text').extract_first().split(' - ')[1],
'candidate': result.css('td::text')[0].extract(),
'party': result.css('td::text')[1].extract(),
'votes': result.css('td::text')[2].extract(),
'status': results[1].css('td::text').extract_first(),
}
class CWTrendsSpider(scrapy.Spider):
name = "cw-trends"
def start_requests(self):
if self.endpoint == 'archive':
yield Request('https://web.archive.org/web/20160823114553/http://eciresults.nic.in/StatewiseS03.htm', callback=self.parse)
else:
yield Request('http://eciresults.nic.in/StatewiseS14.htm', callback=self.parse)
for i in range(5):
if self.endpoint == 'archive':
yield Request('https://web.archive.org/web/20160823114553/http://eciresults.nic.in/StatewiseS03%s.htm' % (i+1), callback=self.parse)
else:
yield Request('http://eciresults.nic.in/StatewiseS14%s.htm' % (i+1), callback=self.parse)
def parse(self, response):
results = response.css('#divACList > table > tr')
for result in results[4:len(results)-1]:
yield {
'constituency': result.css('td::text')[0].extract(),
'const. no.': result.css('td::text')[1].extract(),
'leading candidate': result.css('td::text')[2].extract(),
'leading party': result.css('td::text')[3].extract(),
'trailing candidate': result.css('td::text')[4].extract(),
'trailing party': result.css('td::text')[5].extract(),
'margin': result.css('td::text')[6].extract(),
'status': result.css('td::text')[7].extract()
}
| factly/election-results-2017 | manipur/manipur/spiders/results_spider.py | Python | mit | 2,491 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', 'phone']
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse('users:detail',
kwargs={'username': self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = 'username'
slug_url_kwarg = 'username'
| bilgorajskim/soman | server/soman/users/views.py | Python | mit | 1,466 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
from django.core.management.base import CommandError
from pootle_app.management.commands import PootleCommand
from pootle_app.models import Directory
from pootle_language.models import Language
from pootle_project.models import Project
from pootle_translationproject.models import TranslationProject
DUMPED = {
'TranslationProject': ('pootle_path', 'disabled'),
'Store': ('translation_project', 'pootle_path', 'name', 'state'),
'Directory': ('name', 'parent', 'pootle_path'),
'Unit': ('source', 'target', 'target_wordcount',
'developer_comment', 'translator_comment', 'locations',
'isobsolete', 'isfuzzy', 'istranslated'),
'UnitSource': ('source_wordcount', ),
'Suggestion': ('target_f', 'user_id'),
'Language': ('code', 'fullname', 'pootle_path'),
'Project': ('code', 'fullname', 'checkstyle',
'source_language', 'ignoredfiles',
'screenshot_search_prefix', 'disabled')
}
class Command(PootleCommand):
help = "Dump data."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--stats',
action='store_true',
dest='stats',
default=False,
help='Dump stats',
)
parser.add_argument(
'--data',
action='store_true',
dest='data',
default=False,
help='Data all data',
)
parser.add_argument(
'--stop-level',
action='store',
dest='stop_level',
default=-1,
type=int,
help="Depth of data to retrieve",
)
def handle_all(self, **options):
if not self.projects and not self.languages:
if options['stats']:
self.dump_stats(stop_level=options['stop_level'])
return
if options['data']:
self.dump_all(stop_level=options['stop_level'])
return
raise CommandError("Set --data or --stats option.")
else:
super(Command, self).handle_all(**options)
def handle_translation_project(self, tp, **options):
if options['stats']:
res = {}
self._dump_stats(tp.directory, res,
stop_level=options['stop_level'])
return
if options['data']:
self._dump_item(tp.directory, 0, stop_level=options['stop_level'])
return
raise CommandError("Set --data or --stats option.")
def dump_stats(self, stop_level):
res = {}
for prj in Project.objects.all():
self._dump_stats(prj, res, stop_level=stop_level)
def _dump_stats(self, item, res, stop_level):
key = item.pootle_path
item.initialize_children()
if stop_level != 0 and item.children:
if stop_level > 0:
stop_level = stop_level - 1
for child in item.children:
self._dump_stats(child, res,
stop_level=stop_level)
res[key] = (item.data_tool.get_stats(include_children=False))
if res[key]['last_submission']:
if 'id' in res[key]['last_submission']:
last_submission_id = res[key]['last_submission']['id']
else:
last_submission_id = None
else:
last_submission_id = None
if res[key]['last_created_unit']:
if 'id' in res[key]['last_created_unit']:
last_updated_id = res[key]['last_created_unit']['id']
else:
last_updated_id = None
else:
last_updated_id = None
out = u"%s %s,%s,%s,%s,%s,%s,%s" % \
(key, res[key]['total'], res[key]['translated'],
res[key]['fuzzy'], res[key]['suggestions'],
res[key]['critical'],
last_submission_id, last_updated_id)
self.stdout.write(out)
def dump_all(self, stop_level):
root = Directory.objects.root
self._dump_item(root, 0, stop_level=stop_level)
def _dump_item(self, item, level, stop_level):
self.stdout.write(self.dumped(item))
if isinstance(item, Directory):
pass
elif isinstance(item, Language):
self.stdout.write(self.dumped(item.language))
elif isinstance(item, TranslationProject):
try:
self.stdout.write(self.dumped(item.translationproject))
except:
pass
elif isinstance(item, Project):
pass
# self.stdout.write(self.dumped(item))
else:
# item should be a Store
for unit in item.units:
self.stdout.write(self.dumped(unit))
for sg in unit.get_suggestions():
self.stdout.write(self.dumped(sg))
if stop_level != level:
item.initialize_children()
if item.children:
for child in item.children:
self._dump_item(child, level + 1, stop_level=stop_level)
def dumped(self, item):
def get_param(param):
p = getattr(item, param)
res = p() if callable(p) else p
res = u"%s" % res
res = res.replace('\n', '\\n')
return (param, res)
return u"%d:%s\t%s" % \
(
item.id,
item._meta.object_name,
"\t".join(
u"%s=%s" % (k, v)
for k, v in map(get_param, DUMPED[item._meta.object_name])
)
)
| unho/pootle | pootle/apps/pootle_app/management/commands/dump.py | Python | gpl-3.0 | 6,105 |
from __future__ import absolute_import
from . import finders
from . import teams
from . import players
from . import boxscores
from . import winProb
from . import pbp
from .players import Player
from .seasons import Season
from .teams import Team
from .boxscores import BoxScore
from .finders import GamePlayFinder, PlayerSeasonFinder
BASE_URL = 'http://www.pro-football-reference.com'
# modules/variables to expose
__all__ = [
'BASE_URL',
'finders', 'GamePlayFinder', 'PlayerSeasonFinder',
'boxscores', 'BoxScore',
'players', 'Player',
'seasons', 'Season',
'teams', 'Team',
'winProb',
'pbp',
] | phillynch7/sportsref | sportsref/nfl/__init__.py | Python | gpl-3.0 | 629 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=line-too-long, invalid-name, old-style-class, multiple-statements, too-many-branches
"""
This module contains the PyChop2 class which allows calculation of the resolution and flux of
direct geometry time-of-flight inelastic neutron spectrometers.
"""
from __future__ import (absolute_import, division, print_function)
from .ISISFermi import ISISFermi
from .ISISDisk import ISISDisk
import warnings
class PyChop2:
"""
PyChop2 is a class to calculate the energy resolution of direct geometry time-of-flight spectrometers
based on calculations of the time spread of neutron bunches. It currently has separate implementations
for instruments using Fermi choppers and disk choppers for monochromation.
"""
__Classes = {
'LET': ISISDisk, # LET default
'LETHIFLUX': ISISDisk, # LET, high flux configuration
'LETINTERMED': ISISDisk, # LET, medium flux configuration
'LETHIRES': ISISDisk, # LET, low flux configuration
'MAPS': ISISFermi,
'MARI': ISISFermi,
'MERLIN': ISISFermi}
__MultiRepClasses = {
'LET': ISISDisk, # LET default
'LETHIFLUX': ISISDisk, # LET, high flux configuration
'LETINTERMED': ISISDisk, # LET, medium flux configuration
'LETHIRES': ISISDisk, # LET, low flux configuration
'MERLIN': ISISDisk,
'MAPS': ISISDisk,
'MARI': ISISDisk}
def __init__(self, instname, *args):
warnings.warn("The PyChop2 class is deprecated and will be removed in the next Mantid version. "
"Please use the Instrument class or the official PyChop CLI interface.", DeprecationWarning)
instname = instname.upper()
if instname not in self.__Classes.keys():
raise ValueError('Instrument %s not recognised' % (instname))
self.object = self.__Classes[instname](instname, *args)
self.instname = instname
def allowedInstruments(self):
"""
! Returns a list of currently implemented instruments
"""
return self.__Classes.keys()
def setInstrument(self, *args):
"""
! Sets the instrument to calculate for
"""
if self.__Classes[args[0]] != self.__Classes[self.instname]:
self.object = self.__Classes[args[0]](*args)
else:
self.object.setInstrument(*args)
self.instname = args[0]
def setChopper(self, *args):
"""
! Sets the chopper rotor (Fermi instruments) or instrument configuration (disks instruments)
"""
self.object.setChopper(*args)
def getChopper(self):
"""
! Returns the currently set chopper rotor or instrument configuration
"""
return self.object.getChopper()
def setFrequency(self, *args, **kwargs):
"""
! Sets the chopper frequency(ies)
"""
self.object.setFrequency(*args, **kwargs)
def getFrequency(self):
"""
! Returns (a list of) the current chopper frequency(ies)
"""
return self.object.getFrequency()
def setEi(self, *args):
"""
! Sets the desired or focused incident energy
"""
self.object.setEi(*args)
def getEi(self, *args):
"""
! Returns the currently set desired or focused incident energy
"""
return self.object.getEi(*args)
def getObject(self):
"""
! Returns the object instance which actually handles the calculation.
! This object's type is a subclass specific to Fermi or Disk instruments and will have
! additional methods specific to the class.
"""
return self.object
def getResolution(self, *args):
"""
! Returns the energy resolution as a function of energy transfer
!
! .getResolution() - if Ei is set, calculates for [0.05Ei,0.95Ei] in 20 steps
! .getResolution(Etrans) - if Ei is set, calculates for Etrans energy transfer
! .getResolution(Etrans, Ei) - calculates for an Ei different from that set previously
"""
return self.object.getResolution(*args)
def getFlux(self, *args):
"""
! Returns (an estimate of) the neutron flux at the sample at the set Ei in n/cm^2/s
"""
return self.object.getFlux(*args)
def getResFlux(self, *args):
"""
! Returns a tuple of the (resolution, flux)
"""
return self.object.getResFlux(*args)
def getWidths(self, *args):
"""
! Returns the individual time widths that go into the calculated energy widths as a dict
"""
return self.object.getWidths(*args)
def __getMultiRepObject(self):
"""
Private method to obtain multi-rep information
"""
if self.instname not in self.__MultiRepClasses.keys():
raise ValueError('Instrument %s does not support multirep mode')
if self.__MultiRepClasses[self.instname] == self.__Classes[self.instname]:
obj = self.object
else:
obj = self.__MultiRepClasses[self.instname](self.instname)
obj.setChopper(self.object.getChopper())
obj.setFrequency(self.object.getFrequency(), Chopper2Phase=self.object.diskchopper_phase)
obj.setEi(self.object.getEi())
return obj
def getAllowedEi(self, *args):
"""
! For instruments which support multi-rep mode, returns a list of allowed incident energies
"""
return self.__getMultiRepObject().getAllowedEi(*args)
def getMultiRepResolution(self, *args):
"""
! For instruments which support multi-rep mode, returns the resolution for each rep
"""
return self.__getMultiRepObject().getMultiRepResolution(*args)
def getMultiRepFlux(self, *args):
"""
! For instruments which support multi-rep mode, returns the flux for each rep
"""
return self.__getMultiRepObject().getMultiRepFlux(*args)
def getMultiWidths(self, *args):
"""
! Returns the individual time widths that go into the calculated energy widths as a dict
"""
return self.__getMultiRepObject().getMultiWidths(*args)
def plotMultiRepFrame(self, *args):
"""
! For instruments which support multi-rep mode, plots the time-distance diagram
"""
return self.__getMultiRepObject().plotFrame(*args)
@classmethod
def calculate(cls, *args, **kwargs):
"""
! Calculates the resolution and flux directly (without setting up a PyChop2 object)
!
! PyChop2.calculate('mari', 's', 250., 55.) # Instname, Chopper Type, Freq, Ei in order
! PyChop2.calculate('let', 180, 2.2) # For LET, chopper type is not needed.
! PyChop2.calculate('let', [160., 80.], 1.) # For LET, specify resolution and pulse remover freq
! PyChop2.calculate('let', 'High flux', 80, 2.2) # LET default is medium flux configuration
! PyChop2.calculate(inst='mari', chtyp='s', freq=250., ei=55.) # With keyword arguments
! PyChop2.calculate(inst='let', variant='High resolution', freq=[160., 80.], ei=2.2)
!
! For LET, the allowed variant names are:
! 'With Chopper 3'
! 'Without Chopper 3'
! You have to use these strings exactly.
!
! By default this function returns the elastic resolution and flux only.
! If you want the inelastic resolution, specify the inelastic energy transfer
! as either the last positional argument, or as a keyword argument, e.g.:
!
! PyChop2.calculate('merlin', 'g', 450., 60., range(55))
! PyChop2.calculate('maps', 'a', 450., 600., etrans=np.linspace(0,550,55))
!
! The results are returned as tuple: (resolution, flux)
"""
if len(args) > 0:
if not isinstance(args[0], str):
raise ValueError('The first argument must be the instrument name')
instname = args[0].upper()
elif 'inst' in kwargs.keys():
instname = kwargs['inst'].upper()
else:
raise RuntimeError('You must specify the instrument name')
obj = cls(instname)
argdict = {}
if 'LET' not in instname:
argname = ['inst', 'chtyp', 'freq', 'ei', 'etrans']
lna = (len(argname) if len(args) > len(argname) else len(args))
for ind in range(1, lna):
argdict[argname[ind]] = args[ind]
for ind in kwargs.keys():
if ind in argname:
argdict[ind] = kwargs[ind]
for ind in range(1, 4):
if argname[ind] not in argdict:
raise RuntimeError('Parameter ''%s'' must be specified' % (argname[ind]))
obj.setChopper(argdict['chtyp'], argdict['freq'])
obj.setEi(argdict['ei'])
else:
if 'variant' in kwargs.keys():
argdict['variant'] = kwargs['variant']
if len(args) > 1 and isinstance(args[1], str):
argname = ['inst', 'variant', 'freq', 'ei', 'etrans']
else:
argname = ['inst', 'freq', 'ei', 'etrans']
lna = (len(argname) if len(args) > len(argname) else len(args))
for ind in range(1, lna):
argdict[argname[ind]] = args[ind]
for ind in kwargs.keys():
if ind in argname:
argdict[ind] = kwargs[ind]
if 'variant' in argdict.keys():
obj.setChopper(argdict['variant'])
if 'freq' not in argdict.keys() or 'ei' not in argdict.keys():
raise RuntimeError('The chopper frequency and focused incident energy must be specified')
obj.setFrequency(argdict['freq'])
obj.setEi(argdict['ei'])
etrans = argdict['etrans'] if 'etrans' in argdict.keys() else 0.
return obj.getResolution(etrans), obj.getFlux()
| mganeva/mantid | scripts/PyChop/PyChop2.py | Python | gpl-3.0 | 10,393 |
#!/usr/bin/env python
# Core
from __future__ import print_function
from decimal import *
from functools import wraps
import logging
import math
import pprint
import random
import re
import time
import ConfigParser
# Third-Party
import argh
from clint.textui import progress
import funcy
import html2text
from PIL import Image
from splinter import Browser
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import \
TimeoutException, UnexpectedAlertPresentException, WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import selenium.webdriver.support.expected_conditions as EC
import selenium.webdriver.support.ui as ui
# Local
# Begin code
getcontext().prec = 2
getcontext().rounding = ROUND_DOWN
logging.basicConfig(
format='%(lineno)s - %(message)s',
level=logging.INFO
)
random.seed()
pp = pprint.PrettyPrinter(indent=4)
base_url = 'http://www.MyPayingAds.com/'
action_path = dict(
login='themes/common/login.php',
view_ads='member/viewad.php',
dashboard='member/memberoverview.php',
repurchase_balance_transfer='balance_transfer.php',
purchase_balance_transfer='pbalance_transfer.php',
buy_pack='member/shares.php'
)
one_minute = 60
three_minutes = 3 * one_minute
ten_minutes = 10 * one_minute
one_hour = 3600
def url_for_action(action):
return "{0}/{1}".format(base_url, action_path[action])
def loop_forever():
while True:
pass
def clear_input_box(box):
box.type(Keys.CONTROL + "e")
for i in xrange(100):
box.type(Keys.BACKSPACE)
return box
# http://stackoverflow.com/questions/16807258/selenium-click-at-certain-position
def click_element_with_offset(driver, elem, x, y):
action = ActionChains(driver)
echo_print("Moving to x position", x)
echo_print("Moving to y position", y)
action.move_to_element_with_offset(elem, x, y)
print("OK now see where the mouse is...")
action.click()
action.perform()
def page_source(browser):
document_root = browser.driver.page_source
return document_root
def wait_visible(driver, locator, by=By.XPATH, timeout=30):
"""
:param driver:
:param locator:
:param by:
:param timeout:
:return:
"""
try:
if ui.WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((by, locator))):
logging.info("Found element.")
return driver.find_element(by, locator)
except TimeoutException:
logging.info("TimeoutException in wait_visible.")
return False
def maybe_accept_alert(driver):
try:
logging.warn("Probing for alert.")
ui.WebDriverWait(driver, 3).until(EC.alert_is_present(),
'Timed out waiting for PA creation ' +
'confirmation popup to appear.')
alert = driver.switch_to_alert()
alert.accept()
print("alert accepted")
except TimeoutException:
print("no alert")
def trap_unexpected_alert(func):
@wraps(func)
def wrapper(self):
try:
return func(self)
except UnexpectedAlertPresentException:
print("Caught unexpected alert.")
return 254
except WebDriverException:
print("Caught webdriver exception.")
return 254
return wrapper
def trap_alert(func):
@wraps(func)
def wrapper(self):
try:
return func(self)
except UnexpectedAlertPresentException:
logging.info("Caught UnexpectedAlertPresentException.")
alert = self.browser.driver.switch_to_alert()
alert.accept()
return 254
except WebDriverException:
print("Caught webdriver exception.")
return 253
return wrapper
def get_element_html(driver, elem):
return driver.execute_script("return arguments[0].innerHTML;", elem)
def get_outer_html(driver, elem):
return driver.execute_script("return arguments[0].outerHTML;", elem)
def echo_print(text, elem):
print("{0}={1}.".format(text, elem))
import inspect
def retrieve_name(var):
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
return [var_name for var_name, var_val in callers_local_vars if var_val is var]
# https://stackoverflow.com/questions/10848900/how-to-take-partial-screenshot-frame-with-selenium-webdriver/26225137#26225137?newreg=8807b51813c4419abbb37ab2fe696b1a
def element_screenshot(driver, element, filename):
t = type(element).__name__
if t == 'WebDriverElement':
element = element._element
bounding_box = (
element.location['x'], # left
element.location['y'], # upper
(element.location['x'] + element.size['width']), # right
(element.location['y'] + element.size['height']) # bottom
)
bounding_box = map(int, bounding_box)
echo_print('Bounding Box', bounding_box)
return bounding_box_screenshot(driver, bounding_box, filename)
def bounding_box_screenshot(driver, bounding_box, filename):
driver.save_screenshot(filename)
base_image = Image.open(filename)
cropped_image = base_image.crop(bounding_box)
base_image = base_image.resize(
[int(i) for i in cropped_image.size])
base_image.paste(cropped_image, (0, 0))
base_image.save(filename)
return base_image
class Entry(object):
def __init__(self, username, password, browser, pack_value):
self._username = username
self._password = password
self.browser = browser
self._pack_value = pack_value
def login(self):
print("Logging in...")
self.browser_visit('login')
self.browser.find_by_name('user_name').first.type(self._username)
self.browser.find_by_name('password').first.type(
"{0}\t\n".format(self._password))
# self.browser.find_by_xpath("//input[@value='LOGIN']").first.click()
logging.info("Waiting for login ad...")
link_elem = wait_visible(self.browser.driver, "//input[@name='skipad']", timeout=60)
if link_elem:
print("Skip ad found.")
link_elem.click()
else:
print("Logging in again.")
self.login()
logging.info("Login complete.")
def browser_visit(self, action_label):
try:
print("Visiting URL for {0}".format(action_label))
self.browser.visit(url_for_action(action_label))
except TimeoutException:
logging.info("Page load timeout.")
pass
except UnexpectedAlertPresentException:
logging.info("Caught UnexpectedAlertPresentException.")
logging.warn("Attempting to dismiss alert")
alert = self.browser.driver.switch_to_alert()
alert.dismiss()
return 254
except WebDriverException:
logging.info("Caught webdriver exception.")
return 253
def view_ads(self, surf_amount):
logging.warn("Visiting viewads")
for i in xrange(1, surf_amount + 1):
while True:
print("Viewing ad {0}".format(i))
result = self.view_ad()
if result == 0:
break
self.browser_visit('dashboard')
@trap_alert
def view_ad(self):
self.browser_visit('view_ads')
ads = self.browser.find_by_xpath('//a[@class="bannerlink"]')
# print(ads)
ads[3].click()
self.browser.driver.switch_to_window(self.browser.driver.window_handles[-1])
elem = wait_visible(self.browser.driver, '//div[@class="counter-text"]')
print("may close elem={0}".format(elem))
self.browser.driver.close()
self.browser.driver.switch_to_window(self.browser.driver.window_handles[0])
return 0
def wait_on_ad(self):
time_to_wait_on_ad = random.randrange(40, 50)
for _ in progress.bar(range(time_to_wait_on_ad)):
time.sleep(1)
def collect_stats(self):
self.browser_visit('dashboard')
ad_pack_elem = self.browser.find_by_xpath("//p[@class='number-pack']")
ad_packs = int(ad_pack_elem.text)
main_account_balance_elem = self.browser.find_by_xpath("//p[@style='font-size:41px;']")
main_account_balance = Decimal(main_account_balance_elem.text[1:])
account_balance_elem = self.browser.find_by_xpath("//div[@class='account-blance']")
account_balance_html = get_outer_html(self.browser.driver, account_balance_elem._element)
account_balance_text = html2text.HTML2Text().handle(account_balance_html)
floating_point_regexp = re.compile('\d+\.\d+')
main, purchase, repurchase = [Decimal(f) for f in floating_point_regexp.findall(account_balance_text)]
self._balance = dict(
main=main, purchase=purchase, repurchase=repurchase, ad_packs=ad_packs
)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(self._balance)
@staticmethod
def packs_to_purchase(bread, pack_value):
return int(math.floor(bread/pack_value))
def get_balance(self):
account_balance_elem = wait_visible(self.browser.driver, "rightbar", by=By.ID)
account_balance_html = get_outer_html(self.browser.driver, account_balance_elem)
account_balance_text = html2text.HTML2Text().handle(account_balance_html)
# dollar amount samples:
# $4.28
# $0
# no known samples for something like 28 cents. Not sure if it is
# $0.28 or $.28
floating_point_regexp = re.compile('\$(\d+(\.\d+)?)')
floats = [Decimal(f[0]) for f in floating_point_regexp.findall(account_balance_text)]
cash, repurchase = floats[20:22]
self._balance = dict(
cash=cash, repurchase=repurchase
)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(self._balance)
def exhaustive_buy(self):
pack_values = [1, 3, 5, 7, 10, 15, 20, 30, 40, 50]
pack_values.reverse()
for pack_value in pack_values[pack_values.index(self._pack_value)::]:
self.buy_pack(pack_value)
def buy_pack(self, pack_value):
self.browser_visit('buy_pack')
balance = self.get_balance()
total_balance = self._balance['cash'] + self._balance['repurchase']
packs_to_buy = int(total_balance / pack_value)
logging.info("Buying {0} packs of value {1}".format(packs_to_buy, pack_value))
pack_value_to_index = {
1: 0,
3: 1,
5: 2,
7: 3,
10: 4,
15: 5,
20: 6,
30: 7,
40: 8,
50: 9,
}
if packs_to_buy < 1:
return
buy_form = self.browser.find_by_xpath("//form[@method='post']")
form = buy_form[pack_value_to_index[pack_value]]
pack_input = "{0}\t\t ".format(packs_to_buy)
form.find_by_id('position').type(pack_input)
button = wait_visible(self.browser.driver, 'paynow', By.ID)
if button:
button.click()
time.sleep(2)
f = self.browser.find_by_xpath("//div[@class='disputbox1']")
buttons = f.find_by_id('paynow')
buttons.first.click()
def calc_account_balance(self):
time.sleep(1)
logging.warn("visiting dashboard")
self.browser_visit('dashboard')
logging.warn("finding element by xpath")
elem = self.browser.find_by_xpath(
'/html/body/table[2]/tbody/tr/td[2]/table/tbody/tr/td[2]/table[6]/tbody/tr/td/table/tbody/tr[2]/td/h2[2]/font/font'
)
print("Elem Text: {}".format(elem.text))
self.account_balance = Decimal(elem.text[1:])
print("Available Account Balance: {}".format(self.account_balance))
def calc_credit_packs(self):
time.sleep(1)
logging.warn("visiting dashboard")
self.browser_visit('dashboard')
logging.warn("finding element by xpath")
elem = self.browser.find_by_xpath(
"//font[@color='#009900']"
)
print("Active credit packs = {0}".format(elem[0].text))
# for i, e in enumerate(elem):
# print("{0}, {1}".format(i, e.text))
def solve_captcha(self):
time.sleep(3)
t = page_source(self.browser).encode('utf-8').strip()
# print("Page source {0}".format(t))
captcha = funcy.re_find(
"""ctx.strokeText\('(\d+)'""", t)
# print("CAPTCHA = {0}".format(captcha))
self.browser.find_by_name('codeSb').fill(captcha)
time.sleep(6)
button = self.browser.find_by_name('Submit')
button.click()
def main(conf,
surf=False, buy_pack=False, exhaustive_buy=False, stay_up=False,
pack_value=7, surf_amount=10, random_delay=False
):
config = ConfigParser.ConfigParser()
config.read(conf)
username = config.get('login', 'username')
password = config.get('login', 'password')
if random_delay:
time.sleep(random.randrange(1, 5) * one_minute)
with Browser() as browser:
browser.driver.set_window_size(1200, 1100)
browser.driver.set_window_position(600, 0)
browser.driver.set_page_load_timeout(30)
e = Entry(username, password, browser, pack_value)
e.login()
if exhaustive_buy:
e.exhaustive_buy()
if buy_pack:
e.buy_pack(pack_value)
if surf:
e.view_ads(surf_amount)
if stay_up:
loop_forever()
if __name__ == '__main__':
argh.dispatch_command(main)
| metaperl/mpa | src/main.py | Python | artistic-2.0 | 13,764 |
# Enter the password of the email address you intend to send emails from
email_address = ""
email_password = ""
# Enter the login information for the EPNM API Account
API_username = ""
API_password = ""
| cisco-gve/epnm_alarm_report | web_ui/opensesame.py | Python | apache-2.0 | 203 |
#!/usr/bin/env python
import os
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from app.models import User, Follow, Role, Permission, Post, Comment
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
from flask_debugtoolbar import DebugToolbarExtension
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
toolbar = DebugToolbarExtension(app)
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Comment=Comment)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade
from app.models import Role, User
# migrate database to latest revision
upgrade()
# create user roles
Role.insert_roles()
# create self-follows for all users
User.add_self_follows()
if __name__ == '__main__':
manager.run()
| bobcolner/material-girl | manage.py | Python | mit | 2,480 |
"""
Question:
The API: int read4(char *buf) reads 4 characters at a time from a file.
The return value is the actual number of characters read. For example, it returns 3 if there
is only 3 characters left in the file.
By using the read4 API, implement the function int read(char *buf, int n) that reads n characters from the file.
Note: The read function will only be called once for each test case.
"""
class Solution:
def read4(self, buf):
# read 4 chars
n = len(buf)
return n
def read(self, buf, n):
msg = []
ret = []
a = n / 4
if n % 4 != 0:
a += 1
for i in xrange(a):
read4(msg)
ret.append(msg)
| linyaoli/acm | others/intermediate/read4.py | Python | gpl-2.0 | 710 |
# coding: utf-8
"""
sickle.tests.test_sickle
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2015 Mathias Loesch
"""
import os
import unittest
from nose.tools import raises
from sickle import Sickle
this_dir, this_filename = os.path.split(__file__)
class TestCase(unittest.TestCase):
@raises(ValueError)
def test_invalid_http_method(self):
Sickle("http://localhost", http_method="DELETE")
@raises(ValueError)
def test_wrong_protocol_version(self):
Sickle("http://localhost", protocol_version="3.0")
@raises(TypeError)
def test_invalid_iterator(self):
Sickle("http://localhost", iterator=None)
| avorio/sickle | sickle/tests/test_sickle.py | Python | bsd-3-clause | 660 |
"""Exceptions used by basic support utilities."""
__author__ = "Ian Goodfellow"
import sys
from pylearn2.utils.common_strings import environment_variable_essay
from theano.compat import six
class EnvironmentVariableError(Exception):
"""
An exception raised when a required environment variable is not defined
Parameters
----------
*args: list
Arguments passed to Exception()
"""
def __init__(self, *args):
super(EnvironmentVariableError, self).__init__(*args)
# This exception is here as string_utils need it and setting it in
# datasets.exc would create a circular import.
class NoDataPathError(EnvironmentVariableError):
"""
Exception raised when PYLEARN2_DATA_PATH is required but has not been
defined.
"""
def __init__(self):
super(NoDataPathError, self).__init__(data_path_essay +
environment_variable_essay)
data_path_essay = """\
You need to define your PYLEARN2_DATA_PATH environment variable. If you are
using a computer at LISA, this should be set to /data/lisa/data.
"""
def reraise_as(new_exc):
"""
Re-raise current exception as a new exception.
Parameters
----------
new_exc : Exception isinstance
The new error to be raised e.g. (ValueError("New message"))
or a string that will be prepended to the original exception
message
Notes
-----
Note that when reraising exceptions, the arguments of the original
exception are cast to strings and appended to the error message. If
you want to retain the original exception arguments, please use:
>>> except Exception as e:
... reraise_as(NewException("Extra information", *e.args))
Examples
--------
>>> try:
... do_something_crazy()
... except Exception:
... reraise_as(UnhandledException("Informative message"))
"""
orig_exc_type, orig_exc_value, orig_exc_traceback = sys.exc_info()
if isinstance(new_exc, six.string_types):
new_exc = orig_exc_type(new_exc)
if hasattr(new_exc, 'args'):
if len(new_exc.args) > 0:
# We add all the arguments to the message, to make sure that this
# information isn't lost if this exception is reraised again
new_message = ', '.join(str(arg) for arg in new_exc.args)
else:
new_message = ""
new_message += '\n\nOriginal exception:\n\t' + orig_exc_type.__name__
if hasattr(orig_exc_value, 'args') and len(orig_exc_value.args) > 0:
if getattr(orig_exc_value, 'reraised', False):
new_message += ': ' + str(orig_exc_value.args[0])
else:
new_message += ': ' + ', '.join(str(arg)
for arg in orig_exc_value.args)
new_exc.args = (new_message,) + new_exc.args[1:]
new_exc.__cause__ = orig_exc_value
new_exc.reraised = True
six.reraise(type(new_exc), new_exc, orig_exc_traceback)
| CIFASIS/pylearn2 | pylearn2/utils/exc.py | Python | bsd-3-clause | 3,029 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.